query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
90da290424ff6c0b0ec779d425a34e69
Tells if the cover is closed or not.
[ { "docid": "94be06c29ccedfac3b40d3085291a36e", "score": "0.7914196", "text": "def is_closed(self) -> bool | None:\n return self.coordinator.get_cover_state(self._device.endpoint_id).position == 0", "title": "" } ]
[ { "docid": "6bc50b64b9fd4d10972a8ac37d487483", "score": "0.83833516", "text": "def is_closed(self) -> bool:\n return self.current_cover_position == 0", "title": "" }, { "docid": "5b2d57a7fa5eef813ddc71cf7b48801e", "score": "0.8137329", "text": "def is_closing(self) -> bool | None:\n return self.__check_cover_status(CoverStatus.DOWN)", "title": "" }, { "docid": "ae0dc033dd77ddcdd9c39a1b5838a599", "score": "0.80841476", "text": "def is_closed(self) -> bool | None:\n if self.current_cover_position is not None:\n return self.current_cover_position == 0\n return None", "title": "" }, { "docid": "a051f04d0aed488ad0881aee4f1332c2", "score": "0.7309144", "text": "def is_opening(self) -> bool | None:\n return self.__check_cover_status(CoverStatus.UP)", "title": "" }, { "docid": "b8fb7e2659d0d78f4f966d6b801a4013", "score": "0.7265644", "text": "def is_closed(self) -> bool:\n return self.color == RED", "title": "" }, { "docid": "052fbf8ac849c5407942aa162e85ec84", "score": "0.70936316", "text": "def is_closed(self) -> bool | None:\n return cast(bool, self.status[\"state\"] == \"closed\")", "title": "" }, { "docid": "6472ae536b4e27c2707f9cbde2c55ac4", "score": "0.70303845", "text": "def is_closed(self):\n return self._state == STATE_CLOSED", "title": "" }, { "docid": "418bb7d6182c8d9117320d96d8a5a97f", "score": "0.6982686", "text": "def is_closed(self):\n\n return self._closed or super().is_closed", "title": "" }, { "docid": "b37cfb474cee362eb0bc3bba91f457f0", "score": "0.697572", "text": "def isClosed(self):\n return self._closed", "title": "" }, { "docid": "9b17ff5e3abf95db597d3d60d96543cd", "score": "0.68995726", "text": "def closed(self) -> bool:\n return self.state is State.CLOSED", "title": "" }, { "docid": "7948af233ba91ed1e4a86e21170e98fe", "score": "0.68962204", "text": "def closed(self):\n return self._mode == _MODE_CLOSED", "title": "" }, { "docid": "ae59ff1fa344fa930a07e892ed4fa87b", "score": "0.6888515", "text": "def is_closed(self):\n return self._is_closed", "title": "" }, { "docid": "354c8b913805c7934a30f98069781be4", "score": "0.6851635", "text": "def is_closed(self) -> bool:\n return self.current_position == 0", "title": "" }, { "docid": "354c8b913805c7934a30f98069781be4", "score": "0.6851635", "text": "def is_closed(self) -> bool:\n return self.current_position == 0", "title": "" }, { "docid": "ad0d0c39fee3bb29041385204240c4ee", "score": "0.6831432", "text": "def IsClosed(*args):\n return _BRep.BRep_Tool_IsClosed(*args)", "title": "" }, { "docid": "3412cd50f4a3b8c36781044644a4daf7", "score": "0.6827712", "text": "def is_closed(self):\n if (self._state == STATE_CLOSED):\n return True\n elif (self._state == STATE_OPEN):\n return False\n return None", "title": "" }, { "docid": "10100776a3a8ef09be0b168486c38f84", "score": "0.67811185", "text": "def is_closed(self) -> bool:\n if self.control_result:\n return cast(bool, self.control_result[\"current_pos\"] == 0)\n\n return cast(int, self.block.rollerPos) == 0", "title": "" }, { "docid": "2c4bda7ceb26c2519d48e559b6dbbc24", "score": "0.67665577", "text": "def closed(self) -> bool:\n raise NotImplementedError(\"closed must be implemented\")", "title": "" }, { "docid": "fa12e098f62a0cb272e8e83507d442b2", "score": "0.66787016", "text": "def closed(self):\n\t\treturn self._is_closed", "title": "" }, { "docid": "f876dac863d3951c0e11f7aaace206cb", "score": "0.66104144", "text": "def is_closing(self) -> bool:\n if self.control_result:\n return cast(bool, self.control_result[\"state\"] == \"close\")\n\n return self.block.roller == \"close\"", "title": "" }, { "docid": "240784711669f6d3a2756d9afc6496e0", "score": "0.66036606", "text": "async def test_close_cover(hass: HomeAssistant, setup_comp) -> None:\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_OPEN\n assert state.attributes[ATTR_CURRENT_POSITION] == 70\n\n await hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_CLOSING\n for _ in range(7):\n future = dt_util.utcnow() + timedelta(seconds=1)\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_CLOSED\n assert state.attributes[ATTR_CURRENT_POSITION] == 0", "title": "" }, { "docid": "e4f54f049dc1e4e545fc09ecb73df7a0", "score": "0.6595445", "text": "def is_closed_icon_present_in_course_page(self):\r\n try:\r\n self.find_element_by_xpath(cpl.CLOSED_COURSE_ICON)\r\n return True\r\n except TimeoutException:\r\n return False", "title": "" }, { "docid": "8656ea2b3f78b72a6629a309a02e0baf", "score": "0.65616363", "text": "def is_closing(self) -> bool:\n return cast(bool, self.status[\"state\"] == \"closing\")", "title": "" }, { "docid": "9cbf879c8aedfa1f3e5e887ae09365b8", "score": "0.6502258", "text": "def can_be_closed(self):\n return True", "title": "" }, { "docid": "f487dbe531e78d741d9d31afb51be9b4", "score": "0.649051", "text": "def is_closeable(self) -> bool:\n return True", "title": "" }, { "docid": "fd9522594a85b3e3d8b18d49128b84a2", "score": "0.64184785", "text": "def is_closed(self):\n if self._state in [STATE_UNKNOWN, STATE_OFFLINE]:\n return None\n return self._state in [STATE_CLOSED, STATE_OPENING]", "title": "" }, { "docid": "004bdc86ed7122de443bb8b2f14f33f6", "score": "0.6367387", "text": "def detectorCoverClose():\n yield from bps.mv(cover_detector.close, 1)\n \n while cover_detector.status.get() == 1:\n #print(cover_detector.status.get())\n time.sleep(0.5)\n \n return", "title": "" }, { "docid": "004bdc86ed7122de443bb8b2f14f33f6", "score": "0.6367387", "text": "def detectorCoverClose():\n yield from bps.mv(cover_detector.close, 1)\n \n while cover_detector.status.get() == 1:\n #print(cover_detector.status.get())\n time.sleep(0.5)\n \n return", "title": "" }, { "docid": "afe3cbdc3aaad214208928c349868bf0", "score": "0.635566", "text": "def should_close(self) -> bool:\n return False", "title": "" }, { "docid": "13f8719efc97c285ea6dcae88b7b88b3", "score": "0.6334148", "text": "def canClose(self):\n return True", "title": "" }, { "docid": "c1a55234b6ab9bb8445d940ba81068a4", "score": "0.62947834", "text": "def door_closed(self) -> bool:\n return self._implementation.door_closed()", "title": "" }, { "docid": "e3b3d8da1de033861ec708257873b6b7", "score": "0.6237161", "text": "def BRep_Tool_IsClosed(*args):\n return _BRep.BRep_Tool_IsClosed(*args)", "title": "" }, { "docid": "6321e95cbc9afd7f4fb3a74e02aae78d", "score": "0.6180495", "text": "def is_closing(self):\n if (self._state == STATE_CLOSING):\n return True\n return False", "title": "" }, { "docid": "4adda113af04abfd4955175b2cc58867", "score": "0.6178453", "text": "def is_open(self) -> bool:\n return self.color == GREEN", "title": "" }, { "docid": "175191faacb2139875a1368e81ee7036", "score": "0.6173124", "text": "def is_open(self):\n return self.is_still_open and (self.openseats > 0)", "title": "" }, { "docid": "4caab7990b4d642c7b9bdb55c9e7e721", "score": "0.6100001", "text": "async def async_close_cover(self, **kwargs: Any) -> None:\n data = {ATTR_ENTITY_ID: self._covers[KEY_OPEN_CLOSE]}\n await self.hass.services.async_call(\n DOMAIN, SERVICE_CLOSE_COVER, data, blocking=True, context=self._context\n )", "title": "" }, { "docid": "0693863c018856539d9c05f2e55b1319", "score": "0.60844606", "text": "def gripper_is_closed(self):\n return self.switch_is_closed[self.MOTOR_GRIPPER]", "title": "" }, { "docid": "b6c9cceee95ed4b67cd50a24d7ac814d", "score": "0.60843784", "text": "def has_been_closed_by (self, hmate):\n closing = TopNoticeClosing.objects.filter(top_notice=self, hmate=hmate)\n return int(closing.count()) > 0", "title": "" }, { "docid": "f17cb47d4a3b3e4c6c90648cc45c06ea", "score": "0.6080632", "text": "def is_closed(self):\n mesh = self.to_mesh()\n return mesh.is_closed()", "title": "" }, { "docid": "aa591b3a9c1bdb2ca971079f5f664474", "score": "0.60748357", "text": "def is_close(window):\n return lib.is_close(window)", "title": "" }, { "docid": "c40f6530ef69fdde0608a8296945a267", "score": "0.60292906", "text": "def close():\n if Box.isOpen == \"YES\":\n Box.isOpen = \"NO\"\n return True\n else:\n return False", "title": "" }, { "docid": "53537d39c7c21b7e977c8cc9a33b9a43", "score": "0.6007625", "text": "def is_last_testcase_closed (self):\n ret = True\n if self.testcases:\n if not self.testcases[-1].isstopped():\n ret = False\n return ret", "title": "" }, { "docid": "3055565f51ddd631c60c4c29ee409d27", "score": "0.5998788", "text": "def isClosing(self):\n return ruv.isClosing(self.handle)", "title": "" }, { "docid": "9ca299d506d6ce78a546295137874321", "score": "0.59895986", "text": "def detectorCoverOpen():\n yield from bps.mv(cover_detector.open, 1)\n \n while cover_detector.status.get() != 1:\n #print(cover_detector.status.get())\n time.sleep(0.5)\n \n return", "title": "" }, { "docid": "9ca299d506d6ce78a546295137874321", "score": "0.59895986", "text": "def detectorCoverOpen():\n yield from bps.mv(cover_detector.open, 1)\n \n while cover_detector.status.get() != 1:\n #print(cover_detector.status.get())\n time.sleep(0.5)\n \n return", "title": "" }, { "docid": "82cad5e59701b1925c3279749ecd863a", "score": "0.59853697", "text": "def closed(self):\n return self._closed", "title": "" }, { "docid": "82cad5e59701b1925c3279749ecd863a", "score": "0.59853697", "text": "def closed(self):\n return self._closed", "title": "" }, { "docid": "82cad5e59701b1925c3279749ecd863a", "score": "0.59853697", "text": "def closed(self):\n return self._closed", "title": "" }, { "docid": "c1779b5a9ecd9721b83842d3311015f2", "score": "0.5960251", "text": "def getDateClosed(self):\r\n return self.date_closed", "title": "" }, { "docid": "af76ddceac7a150d48bff8282ac236cf", "score": "0.5957455", "text": "def is_closed(self, new_is_closed):\n self.__dict__[\"is_closed\"] = new_is_closed\n self._update_edges()", "title": "" }, { "docid": "5312fc342c14a75d5b5bcf167fbf75d2", "score": "0.5945965", "text": "def snowcover(self):\n return self.swe > 0.0", "title": "" }, { "docid": "7ec0298419d9be5b56e7e1b5436ceb1e", "score": "0.5942662", "text": "def is_opening(self) -> bool:\n if self.control_result:\n return cast(bool, self.control_result[\"state\"] == \"open\")\n\n return self.block.roller == \"open\"", "title": "" }, { "docid": "21a1123af6771f9483f98f28009bba5f", "score": "0.5895655", "text": "def is_open(self) -> bool:\r\n pass", "title": "" }, { "docid": "fbf6b02edb1465b537a1710fc444549a", "score": "0.5887907", "text": "def is_last_testsection_closed (self):\n ret = True\n if self.commonSetup:\n if not self.commonSetup[-1].isstopped():\n ret = False\n if self.commonVerify:\n if not self.commonVerify[-1].isstopped():\n ret = False\n if self.commonCleanup:\n if not self.commonVerify[-1].isstopped():\n ret = False\n if self.commonModify:\n if not self.commonModify[-1].isstopped():\n ret = False\n return ret", "title": "" }, { "docid": "b18e28208426eeeb57c5d21c14e3624d", "score": "0.5884256", "text": "def is_opened(self) -> bool:", "title": "" }, { "docid": "df89e49f03cd8804ff38b2c83b464842", "score": "0.58509237", "text": "def opens_before_closes(self):\n return self.registration_open_time <= self.registration_close_time", "title": "" }, { "docid": "5a9cdc6c3e2f3c97d2c9fca683cd2a4d", "score": "0.5847244", "text": "def is_opening(self) -> bool:\n return cast(bool, self.status[\"state\"] == \"opening\")", "title": "" }, { "docid": "19bc52ea059286bc6c21805eeed4e47b", "score": "0.582886", "text": "def isOpen(self):\n return self.opened", "title": "" }, { "docid": "61734201761e10c6f061973d8ead10df", "score": "0.5804572", "text": "async def test_open_cover(hass: HomeAssistant, setup_comp) -> None:\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_OPEN\n assert state.attributes[ATTR_CURRENT_POSITION] == 70\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_OPENING\n for _ in range(7):\n future = dt_util.utcnow() + timedelta(seconds=1)\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_OPEN\n assert state.attributes[ATTR_CURRENT_POSITION] == 100", "title": "" }, { "docid": "2a706beabbe8ac8903e754864d88be91", "score": "0.5797081", "text": "def getTopCoverStatus():", "title": "" }, { "docid": "892585923f8063be98fdef48ccb96e07", "score": "0.5796299", "text": "def isOpen(self):\r\n return self._opened", "title": "" }, { "docid": "9a26bd2e1ed6950e95ec8db7ee0392c9", "score": "0.57852983", "text": "def test_must_close_drawer_to_count(self) -> None:\n drawer: CashDrawer = CashDrawer()\n drawer.open(0.0)\n with pytest.raises(RuntimeError) as e:\n drawer.get_count(CashDenomination.PENNY)\n assert_that(str(e.value), is_(\"Cash drawer must be closed to count.\"))\n with pytest.raises(RuntimeError) as e:\n drawer.get_total()\n assert_that(str(e.value), is_(\"Cash drawer must be closed to count.\"))", "title": "" }, { "docid": "a38ec740f1cee3b21d91179eb0c2b11e", "score": "0.57772726", "text": "async def async_open_cover(self, **kwargs: Any) -> None:\n data = {ATTR_ENTITY_ID: self._covers[KEY_OPEN_CLOSE]}\n await self.hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, data, blocking=True, context=self._context\n )", "title": "" }, { "docid": "fa860e02c9a1b4c60efab58cfe1bca4b", "score": "0.5769212", "text": "def IsCurveOnClosedSurface(self, *args):\n return _BRep.BRep_CurveRepresentation_IsCurveOnClosedSurface(self, *args)", "title": "" }, { "docid": "0c89f69ce5941369b5db02a3e87154f1", "score": "0.57683986", "text": "def closed(self) -> bool:\n with self._lock:\n return self._handle is None", "title": "" }, { "docid": "f230b077397f7c85891a6a6e39682e43", "score": "0.5760645", "text": "def close_cover(self, **kwargs) -> None:\n\n # Connect to device\n if self._connect() == False:\n self._state = None\n return\n\n # Change the state on Home Assistant\n self._state = STATE_CLOSING\n\n # Control the device\n _LOGGER.info('Moving the mornin to close the curtain %s...', self._mac_address)\n if (self._reverse):\n self._mornin_device.char_write(CONTROL_SERVICE_CONTROL_UUID, CONTROL_SERVICE_CONTROL_OPEN_VALUE, True)\n else:\n self._mornin_device.char_write(CONTROL_SERVICE_CONTROL_UUID, CONTROL_SERVICE_CONTROL_CLOSE_VALUE, True)\n self._sleep(CONTROL_SERVICE_CONTROL_WAIT_SEC)\n\n # Change the state on Home Assistant\n self._state = STATE_CLOSED", "title": "" }, { "docid": "6ed45c54a4487503e1a3717473ea7ef0", "score": "0.5740872", "text": "def is_open(self):\n return self.__opened", "title": "" }, { "docid": "9ca1e1c17bb7814eb07a2dca33f401ab", "score": "0.57358736", "text": "def can_cancel(self):\n return self.is_open", "title": "" }, { "docid": "d06b98b18f07ed4e859e1beec57a9047", "score": "0.57297635", "text": "def is_open(self):\n pass", "title": "" }, { "docid": "d1f7f243256cff9a21fd85da504e60ad", "score": "0.5726513", "text": "def closed_plan(self):\n\t\treturn self.plan.closed", "title": "" }, { "docid": "91a1d421f945f28dc9193f17c4527e39", "score": "0.57178307", "text": "def closed(self):\n return self._sink.closed", "title": "" }, { "docid": "cac7552d21e473ffa7aad367c43be844", "score": "0.57112247", "text": "def is_open(self):\n raise NotImplementedError('You must override this')", "title": "" }, { "docid": "297db98e67dd830d5edd5e321f6bc819", "score": "0.57103306", "text": "def good(self):\n return self.open", "title": "" }, { "docid": "104dee2148714659ea88d79678317512", "score": "0.5703047", "text": "def close_allowed(self) -> bool:\n return self.device_json[\"state\"].get(\"is_unattended_close_allowed\") is True", "title": "" }, { "docid": "8931b8ab00778955ee8a0209d221975d", "score": "0.5700205", "text": "def should_close(self):\r\n if self.headers.get('connection') == 'close':\r\n return True\r\n elif self.headers.get('VERSION') == 'HTTP/1.0':\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "2b5a7d27ee8f28caf291b3d5ad4dfc5f", "score": "0.5698994", "text": "def closes_ticket(self):\r\n try:\r\n changes = self.changes.get(option=\"Status\")\r\n if changes.to_text == \"Closed\":\r\n return True\r\n except TicketChangeItem.DoesNotExist:\r\n return False", "title": "" }, { "docid": "7229e520be7fa61ac9549a061ca7a44d", "score": "0.56967074", "text": "def is_open(self):\n return self._is_open", "title": "" }, { "docid": "741007822953f67e3097e177292b79ef", "score": "0.56931204", "text": "def closed(self):\n return self._file.closed", "title": "" }, { "docid": "741007822953f67e3097e177292b79ef", "score": "0.56931204", "text": "def closed(self):\n return self._file.closed", "title": "" }, { "docid": "741007822953f67e3097e177292b79ef", "score": "0.56931204", "text": "def closed(self):\n return self._file.closed", "title": "" }, { "docid": "e7e2b30e49181aa34831652505e3b4ac", "score": "0.56912845", "text": "def is_open(self):\n return True", "title": "" }, { "docid": "e7e2b30e49181aa34831652505e3b4ac", "score": "0.56912845", "text": "def is_open(self):\n return True", "title": "" }, { "docid": "f08bb4ab1f450c4ade198adb535a874d", "score": "0.56783867", "text": "def is_open(self):\n\t\treturn not self.bitmex.ws.exited", "title": "" }, { "docid": "f90b402d3a6357ad39bffec4e2bc2e3d", "score": "0.56739974", "text": "def is_waiting_for_close(self):\n return does_object_pass_filter(self, self.is_waiting_for_close_filter())", "title": "" }, { "docid": "8ceb1d53fc96692ef52393e1647cfa73", "score": "0.5653662", "text": "def is_open(self):\n return self.status in SalesOrderStatusGroups.OPEN", "title": "" }, { "docid": "0332672e8dfc4b164b7df53649e70e5e", "score": "0.56421036", "text": "async def test_toggle_cover(hass: HomeAssistant, setup_comp) -> None:\n # Start open\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n for _ in range(7):\n future = dt_util.utcnow() + timedelta(seconds=1)\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_OPEN\n assert state.attributes[\"current_position\"] == 100\n # Toggle closed\n await hass.services.async_call(\n DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n for _ in range(10):\n future = dt_util.utcnow() + timedelta(seconds=1)\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_CLOSED\n assert state.attributes[ATTR_CURRENT_POSITION] == 0\n # Toggle open\n await hass.services.async_call(\n DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n for _ in range(10):\n future = dt_util.utcnow() + timedelta(seconds=1)\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_COVER)\n assert state.state == STATE_OPEN\n assert state.attributes[ATTR_CURRENT_POSITION] == 100", "title": "" }, { "docid": "04949a29125838339ecfa679a907066a", "score": "0.5639083", "text": "def closed(self):\r\n return self._stream.closed", "title": "" }, { "docid": "86464ac9e3f0da73f6ad71bf3af6abdd", "score": "0.56380004", "text": "def is_open_icon_present_in_course_page(self):\r\n try:\r\n self.find_element_by_xpath(cpl.OPEN_COURSE_ICON)\r\n return True\r\n except TimeoutException:\r\n return False", "title": "" }, { "docid": "0f049918e7ee9c3e6d40febe63a083b9", "score": "0.5621163", "text": "def is_opening(self):\n if (self._state == STATE_OPENING):\n return True\n return False", "title": "" }, { "docid": "4c3d469538f9803525b733a90fc63c97", "score": "0.5602785", "text": "def is_opened(self):\n return self.opened", "title": "" }, { "docid": "d10ee2ed619bb21f46f0d88df12d2116", "score": "0.55972993", "text": "def gop_closed_cadence(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gop_closed_cadence\")", "title": "" }, { "docid": "d10ee2ed619bb21f46f0d88df12d2116", "score": "0.55972993", "text": "def gop_closed_cadence(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gop_closed_cadence\")", "title": "" }, { "docid": "9bae41e4341206287b5083ed8dec0d75", "score": "0.55961293", "text": "def isOpen(self):\n return self._isOpen", "title": "" }, { "docid": "726803b9149ca6489cb0687e192645db", "score": "0.55922115", "text": "def IsEnclosed(self, *args):\n return _GccEnt.GccEnt_QualifiedCirc_IsEnclosed(self, *args)", "title": "" }, { "docid": "ddc54eb9de8dd6bb309f8464ff6fb0c5", "score": "0.55792904", "text": "def is_open(self):\n return self.status in ReturnOrderStatusGroups.OPEN", "title": "" }, { "docid": "2b42586b90a7b0b89e1f662613c91670", "score": "0.557229", "text": "def AlreadyClosedDesc(self):\r\n\r\n return SCase(\"%s %s already closed.\" % (self.TheDesc(), Be(self)))", "title": "" }, { "docid": "5c7b6ce60756d3d71a2958c87b3d988f", "score": "0.5571315", "text": "def is_on(self):\n return self._chart.open_house", "title": "" }, { "docid": "8e3bfa74e970dcbe0292a1267c46e257", "score": "0.55691195", "text": "def comments_are_open(self):\n if AUTO_CLOSE_COMMENTS_AFTER and self.comment_enabled:\n return (datetime.now() - self.start_publication).days < \\\n AUTO_CLOSE_COMMENTS_AFTER\n return self.comment_enabled", "title": "" }, { "docid": "d392705181bb464deba1fd751fdf9f92", "score": "0.55654424", "text": "def is_door_open(self):\n return self.motion_state == 3 # Open", "title": "" }, { "docid": "1e3b9a0ac3c3b614aaa1978e33dda328", "score": "0.55360585", "text": "async def test_stop_cover(hass: HomeAssistant, setup_comp) -> None:\n state = hass.states.get(ENTITY_COVER)\n assert state.attributes[ATTR_CURRENT_POSITION] == 70\n await hass.services.async_call(\n DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n future = dt_util.utcnow() + timedelta(seconds=1)\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n await hass.services.async_call(\n DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True\n )\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n state = hass.states.get(ENTITY_COVER)\n assert state.attributes[ATTR_CURRENT_POSITION] == 80", "title": "" } ]
71c049b254e653c937237ed6a9da8422
getVocabCount returns the number of unique keys within a master dictionary.
[ { "docid": "f67ac3977e8f08ef4acbe84d187a2d7b", "score": "0.81111103", "text": "def getVocabCount(data):\r\n return len(data.keys())", "title": "" } ]
[ { "docid": "f33fcc86d003bd7cd3980d7071d16b9b", "score": "0.703855", "text": "def vocab_count(corpus):\n\n flatten_sequence=[i for j in corpus for i in j]\n vocab_counter=Counter(flatten_sequence)\n return vocab_counter", "title": "" }, { "docid": "12139c8bea7b2e2b2212431dd452c485", "score": "0.6564975", "text": "def num_vocab():\n return len(symbols)", "title": "" }, { "docid": "4530436df66bbfeafb76e57bf60f6c2d", "score": "0.6488135", "text": "def get_vocab_size(self):\n # Vocab Sizes\n return len(self.unique_tokens)", "title": "" }, { "docid": "f922620d375d508e3eca66bdaa23eea8", "score": "0.6193528", "text": "def vocabulary_size(self):\n return self._index_lookup_layer.vocabulary_size()", "title": "" }, { "docid": "f4a772c91a2ef8bdfb6d6d90301dca1a", "score": "0.6099508", "text": "def get_vocabulary_length():\n\n X,_ = load_tweets('../data/clean/train.txt',True)\n\n all_word = []\n for elem in X:\n for w in elem.split(' '):\n all_word.append(w)\n all_word = list(set(all_word))\n vocab_length = len(all_word) + 13\n\n return vocab_length", "title": "" }, { "docid": "dc9300162698ed0a88f760a95d3b8f03", "score": "0.6093063", "text": "def frequencies(cls) -> Dict[str, int]:\n\n return cls.vocab_", "title": "" }, { "docid": "dc9300162698ed0a88f760a95d3b8f03", "score": "0.6093063", "text": "def frequencies(cls) -> Dict[str, int]:\n\n return cls.vocab_", "title": "" }, { "docid": "dc9300162698ed0a88f760a95d3b8f03", "score": "0.6093063", "text": "def frequencies(cls) -> Dict[str, int]:\n\n return cls.vocab_", "title": "" }, { "docid": "5254ce7ff365d84353f223e925a7eaad", "score": "0.60661745", "text": "def count_unique_answers(answers_counter: Counter) -> int:\n return len(answers_counter.keys())", "title": "" }, { "docid": "6c92863af7d6c639ef9f714c5a7d54e4", "score": "0.6046621", "text": "def get_class_count(word_dict, _kls):\n keys = set(word_dict.keys())\n _kls_set = set(_kls)\n intersection = keys & _kls_set\n return sum([word_dict[key] for key in intersection])", "title": "" }, { "docid": "e324c9353ef7f26b84c0ecfa825b2923", "score": "0.60361344", "text": "def num_of_keys(self):\n return len(self.get_key_list())", "title": "" }, { "docid": "293198e7e919349601ee863990cfbec3", "score": "0.6028108", "text": "def count(ngram_dict, key):\n if key in ngram_dict:\n return ngram_dict[key]\n else:\n return 0", "title": "" }, { "docid": "1891e9ba0b1564258c6f9ec561e08663", "score": "0.60174805", "text": "def compute_vocabulary_size(files):\n vocabulary = set()\n for f in files:\n for row in f:\n for integer in row:\n if integer not in vocabulary:\n vocabulary.add(integer)\n return max(vocabulary)+1", "title": "" }, { "docid": "acdc61564870236a085dfbfdb7669ba7", "score": "0.5979016", "text": "def n_unique_words(self) -> int:\n if self._n_unique_words is None:\n self._n_unique_words = basics.n_unique_words(self.words)\n return self._n_unique_words", "title": "" }, { "docid": "bc2359d51168481fc3f26fe1b276d00b", "score": "0.5978727", "text": "def unique_words(histogram):\n total_count = 0\n for k,v in word_histogram.items():\n if v == 1:\n total_count +=1\n \n print(total_count)", "title": "" }, { "docid": "0f9788cc929a4bc8332a3f6d953ea04d", "score": "0.59778035", "text": "def vocab_size(self) -> int:\n return len(self._word2index)", "title": "" }, { "docid": "0d32a15ad88f14b1a54fad682cc9b570", "score": "0.5961781", "text": "def _number_of_keys(self):\n return self.ram[0x66E]", "title": "" }, { "docid": "4b4749b0ebb20d10394a365fba5dc73a", "score": "0.59456867", "text": "def __len__(self):\n return sum(1 for _ in self.keys())", "title": "" }, { "docid": "342a8ea05e29409d505c1a358738dd25", "score": "0.5940414", "text": "def numKeys(path):\n\n prof = openProfiles(path)\n if type(prof) == dict:\n keys = prof.keys()\n return len(list(keys))\n else:\n return -1", "title": "" }, { "docid": "50dac38c284cfbb99724f0573181a93c", "score": "0.59350204", "text": "def count(self):\n return sum(1 for tag in self.AcDbDictinary if tag.code == ENTRY_NAME_CODE)", "title": "" }, { "docid": "0c022df4ded68056375ee017b90f8ba6", "score": "0.5923824", "text": "def cntKes(self, key):\n return self.cntIoVals(self.kels, key)", "title": "" }, { "docid": "7c0cc7b1c924931d81675d25456c4c40", "score": "0.59056294", "text": "def get_count_in_relevant_docs(self, term, relevant_docs):\n return len([key for key, val in self.training_data.items() if term in val.keys() if key in relevant_docs])", "title": "" }, { "docid": "63ae4df7f4d8689bcd305ed6f72140a4", "score": "0.5904525", "text": "def count_words(word_list):\n wc_dict = {}\n for word in word_list:\n if word in wc_dict:\n wc_dict[word] += 1\n else:\n wc_dict[word] = 1\n return wc_dict", "title": "" }, { "docid": "a26494932b4139d392be90edf9d2aa19", "score": "0.5895889", "text": "def get_vocabulary(fobj, is_dict_style=False):\n vocab = Counter()\n for line in fobj:\n if is_dict_style:\n word, count = line.strip().split()\n vocab[word] = int(count)\n else:\n vocab.update([_ for _ in line.strip().split() if len(_) > 0])\n return vocab", "title": "" }, { "docid": "92b8beb448cdff3e1ba3677dc192c396", "score": "0.589127", "text": "def build_vocab(lines: List[List[str]], unk_threshold = 2.0) -> Counter:\n counts = Counter()\n for line in lines:\n for word in line:\n counts[word] += 1.0\n pruned_counts = Counter({k: counts[k] for k in counts if counts[k] > unk_threshold})\n print(\"Kept %i words out of %i possible\" % (len(pruned_counts), len(counts)))\n return pruned_counts", "title": "" }, { "docid": "d008df281142ab671c8deb9c5691ff00", "score": "0.58793753", "text": "def num_words(self):\n return len(self.inverse_vocab)", "title": "" }, { "docid": "a4cbdeadfcf4eb7ba63de1a1d8bb7658", "score": "0.58535147", "text": "def get_num_keys(self):\n return 0", "title": "" }, { "docid": "a4cbdeadfcf4eb7ba63de1a1d8bb7658", "score": "0.58535147", "text": "def get_num_keys(self):\n return 0", "title": "" }, { "docid": "e9859c17a8da36ac4adcb89c3782bb6d", "score": "0.5846477", "text": "def computeCountDict(tfDict):\n countDict = {}\n for review in tfDict:\n for word in review:\n if word in countDict:\n countDict[word] += 1\n else:\n countDict[word] = 1\n return countDict", "title": "" }, { "docid": "6a9905f0c92491ae4eb5f867ff7e8e2c", "score": "0.5845988", "text": "def wordCount():", "title": "" }, { "docid": "5c3137e68e967ee3694ae3fb1fc585d1", "score": "0.5842564", "text": "def vocab_size(self):\n try:\n return self._vocab_size\n except AttributeError:\n logging.error('Please tokenize the corpus first')", "title": "" }, { "docid": "40f86fba529283604833631c5be06986", "score": "0.5827235", "text": "def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts()\n return list(vocab_dict.keys())[:vocab_size]", "title": "" }, { "docid": "d2852485d777b4072d386c7e7cdaf59b", "score": "0.5806862", "text": "def num_qubits(words):\n nvn_space_size = len(words['nouns'])**2 * len(words['verbs'])\n req_qubits_n = int(np.ceil(np.log2(len(words['nouns']))))\n req_qubits_v = int(np.ceil(np.log2(len(words['verbs']))))\n \n #req_qubits = int(np.ceil(np.log2(nvn_space_size)))\n req_qubits = req_qubits_n*2 + req_qubits_v\n \n print(\"Unique states:\",nvn_space_size,\"\\tRequired qubits total:\", req_qubits, \"\\tRequired qubits nouns:\", req_qubits_n, \"\\tRequired qubits verbs:\", req_qubits_v)\n return (nvn_space_size, req_qubits, req_qubits_n, req_qubits_v)", "title": "" }, { "docid": "c0ad08626ca0fe9627b62b2ae4a763e0", "score": "0.5785523", "text": "def count_words(cls, dictionary_data):\n counter_dictionary = {}\n for single_dictionary in dictionary_data:\n if ctag := WordCounter.is_ctag_correct(single_dictionary['ctag']):\n try:\n counter_dictionary[f\"{single_dictionary['base']} {ctag}\"] += 1\n except KeyError:\n counter_dictionary[f\"{single_dictionary['base']} {ctag}\"] = 1\n return counter_dictionary", "title": "" }, { "docid": "dad1a7fbe738af43d1613912c7a78fb6", "score": "0.57582873", "text": "def word_counter(file_name):\n word_counts = {} \n\n for word in get_word_list(file_name):\n word_counts[word] = word_counts.get(word, 0) + 1\n\n for word, count in word_counts.items():\n print(f\"{word} {count}\")\n\n return word_counts", "title": "" }, { "docid": "4794dd664aa7e590a7978162387c9579", "score": "0.57547605", "text": "def build_vocabulary(texts: Iterable[List[str]]) -> Dict[str, int]:\n vocab = {PAD_TOKEN: 0, UNK_TOKEN: 1}\n i = 2\n for line in texts:\n for word in line:\n if word not in vocab:\n vocab[word] = i\n i += 1\n print(f\"Vocabulary size = {len(vocab)} tokens\")\n return vocab", "title": "" }, { "docid": "391c8392d28f02346247e6404ed12fc6", "score": "0.5728117", "text": "def scopes_size(scopes: Scopes) -> Counter:\n \n return counter([len(scope) for scope in scopes.values()])", "title": "" }, { "docid": "6172ac890d00e27bfc471fec47cba792", "score": "0.57153106", "text": "def word_count(self):\n return", "title": "" }, { "docid": "905c80906e8363f68df9396d210166ce", "score": "0.5714818", "text": "def get_word_count(self):\n\t\treturn len(self.word_list)", "title": "" }, { "docid": "0221ff5dc70b58f635fcf9283ce2b6cf", "score": "0.57032824", "text": "def __len__(self):\n return self.vocab_size + len(self.added_tokens_encoder)", "title": "" }, { "docid": "881c864ea4360f4ab3c19157819f18ae", "score": "0.56965435", "text": "def cntUwes(self, key):\n return self.cntIoVals(self.uwes, key)", "title": "" }, { "docid": "45a9078aec3a6e7c647d9cc9baef07d8", "score": "0.56926906", "text": "def word_count(self):\r\n count = 0\r\n for entry in self.all_entries:\r\n count += entry.word_count\r\n return count", "title": "" }, { "docid": "305058f2e9f83a9305cf07dbbf4f4c39", "score": "0.5650426", "text": "def __len__(self):\n return len(self.keys())", "title": "" }, { "docid": "3e5bc03ef16c05ec27d2f8d7eebaf9ba", "score": "0.5622591", "text": "def getVocabularySize(self):\n return len(self.word2index)", "title": "" }, { "docid": "f40cc41553ce0158fc39dab1698be93c", "score": "0.5619694", "text": "def count_words(words):\n word_count = {}\n for word in words:\n try:\n count = word_count.setdefault(word, 0)\n except TypeError:\n #if 'word' is not hashable, skip to next word.\n pass\n word_count[word] += 1\n return word_count", "title": "" }, { "docid": "756ea210db6d1488072ee1bdcee848bc", "score": "0.56109506", "text": "def get_word_count(dictionary, text):\n count = 0\n for word in dictionary:\n #print word, count\n #print word, \"----\", count\n if word in text:\n #print word\n count += 1\n return count", "title": "" }, { "docid": "42095877a4ad842cc1f8637d6ce3687d", "score": "0.5610086", "text": "def __TermCount(self):\n termDict = CountDict()\n for term in self._termVec:\n termDict.AddOne(term)\n return termDict", "title": "" }, { "docid": "ddd73c262704932d8762f1278dd00e6c", "score": "0.5605298", "text": "def known_subset(self, counter):\n return Counter(dict([\n (k, v)\n for (k, v) in counter.items()\n if k in self._known_vocab\n ]))", "title": "" }, { "docid": "398182437e3e84113ad14313e21f13b2", "score": "0.5587377", "text": "def term_counts(self):\n\n return [(key, len(self.termvalues[key])) for key in self.termvalues]", "title": "" }, { "docid": "03798a842ec01b50a30a6d594fd62569", "score": "0.5582387", "text": "def get_total_docs_containing_term(self, term):\n return len([1 for val in self.training_data.values() if term in val.keys()])", "title": "" }, { "docid": "c3beb2a4f695680ae975a98720c11726", "score": "0.55786735", "text": "def size(self):\n\n return len(self.keys())", "title": "" }, { "docid": "179b48d7e7c94795bd90d2136926eac2", "score": "0.55672914", "text": "def num_unique_elements(self):\n\t\treturn len(self._dict)", "title": "" }, { "docid": "1ae74406039eed942e21ae69694ec400", "score": "0.55630696", "text": "def word_count(fn):\r\n\tcnt = dict()\r\n\twith open(fn, 'r') as fd:\r\n\t\tfor word in re.split('\\W+', fd.read()):\r\n\t\t\tw = word.lower()\r\n\t\t\tif w in cnt:\r\n\t\t\t\tcnt[w] += 1\r\n\t\t\telse:\r\n\t\t\t\tcnt[w] = 1\r\n\tdel cnt['']\r\n\treturn cnt", "title": "" }, { "docid": "d64a7174433cc74f5b4c4c4d62ab0251", "score": "0.5561961", "text": "def _get_wordcount_counter(corpus: Corpus):\n corpus_counter = Counter()\n for current_document in corpus:\n document_counter = current_document.get_wordcount_counter()\n corpus_counter += document_counter\n return corpus_counter", "title": "" }, { "docid": "064ce10da3c645676dd59163071de491", "score": "0.5559577", "text": "def voc_settle_count(self): # IVS1\n return self._voc_settle_count", "title": "" }, { "docid": "cdaa049ff156cde70ab8e5aedf6ac296", "score": "0.5551283", "text": "def build_vocabulary(self):\n vocabulary_index = 0\n for pos, book in enumerate(self.Books):\n l = book.get_words_count() \n for word, count in l.items():\n if word not in self.vocabulary: #remove duplicate words over the corpus\n self.vocabulary[word]= vocabulary_index\n vocabulary_index += 1", "title": "" }, { "docid": "ffe972396fcbd5262092d31383e46890", "score": "0.5544315", "text": "def get_count(self):\n return len(self.krs)", "title": "" }, { "docid": "4fae9a5b68f8bc1c2d828945e8d6bb90", "score": "0.5523956", "text": "def vocab_size_relations(self) -> int:\n if hasattr(self, 'bpe_vocab'):\n return self.bpe_vocab.num_rel_subtokens\n else:\n return self.num_tokens_relations()", "title": "" }, { "docid": "6f2e9438fc48758fcfb50cf898fa6950", "score": "0.5521989", "text": "def get_word_counts(words):\n return dict(Counter(words))", "title": "" }, { "docid": "be8a1846455921518d15c9348081585e", "score": "0.55200243", "text": "def dict_counts(self, dictionary):\n return sorted(dictionary.iteritems(), key=operator.itemgetter(0))", "title": "" }, { "docid": "f537cdbe45b51a1e98113cce90383351", "score": "0.5517906", "text": "def count_windows(TE_pos):\n count = 0\n for chrom in TE_pos.keys():\n count += len(TE_pos[chrom].keys()) \n\n return count", "title": "" }, { "docid": "7afadfdec1b13e8b2586ecc2490dd577", "score": "0.5514635", "text": "def count_values(sample_dict: Dict) -> int:\n unique_values = []\n for k,v in sample_dict.items():\n if v not in unique_values:\n unique_values.append(v)\n\n return len(unique_values)", "title": "" }, { "docid": "c78ade0d1c4330c14d49e9f5fc7cba18", "score": "0.5509104", "text": "def create_word_vocab(input_data):\n word_vocab = {}\n for sentence in input_data:\n words = sentence.strip().split(' ')\n for word in words:\n if word not in word_vocab:\n word_vocab[word] = 1\n else:\n word_vocab[word] += 1\n \n return word_vocab", "title": "" }, { "docid": "c78ade0d1c4330c14d49e9f5fc7cba18", "score": "0.5509104", "text": "def create_word_vocab(input_data):\n word_vocab = {}\n for sentence in input_data:\n words = sentence.strip().split(' ')\n for word in words:\n if word not in word_vocab:\n word_vocab[word] = 1\n else:\n word_vocab[word] += 1\n \n return word_vocab", "title": "" }, { "docid": "4a67f58d75a90b3f0dc3415fb1605077", "score": "0.5500539", "text": "def length(self):\n # Count number of key-value entries in each of the buckets\n count = 0\n for bucket in self.buckets:\n count += bucket.length()\n return count\n pass", "title": "" }, { "docid": "71f1c7ad1a8efe289fb38c9e2d27b790", "score": "0.5489641", "text": "def length(self):\n if self.bucket_size is not None:\n return self.bucket_size\n return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets", "title": "" }, { "docid": "f099f95d4c63dc44dd410da6a441331d", "score": "0.5473232", "text": "def getWordCount(self):\n return self.wordCount", "title": "" }, { "docid": "7964012a78a3e5483def3700f059f462", "score": "0.5466468", "text": "def word_count(documents):\n return Counter(word for document in documents \n for word in tokenize(document))", "title": "" }, { "docid": "c5c464f1d77d643bea9e1766ac5b595f", "score": "0.544723", "text": "def get_catalog_counts(catalog):\n # Make sure we don't count the header\n return (len(catalog),\n len([m for m in catalog if m.string and m.id]),\n len([m for m in catalog if m.string and m.id and m.fuzzy]))", "title": "" }, { "docid": "db687e7733975aee39ab19061c080fdb", "score": "0.5446152", "text": "def cntWigs(self, key):\n return self.cntVals(self.wigs, key)", "title": "" }, { "docid": "73053b46528646998d726785bd67fd55", "score": "0.54457396", "text": "def get_total_docs(self):\n return len(self.total_words.keys())", "title": "" }, { "docid": "0d3610bafb1f1189620eda2dfd29c78c", "score": "0.5443372", "text": "def __len__(self):\n return sum(self._counts.itervalues())", "title": "" }, { "docid": "870f05ec029e5dab82fea90db7a13520", "score": "0.5442347", "text": "def count(self) -> int:\n total_count = 0\n for gem in self.d.keys():\n total_count += len(self.d[gem])\n return total_count", "title": "" }, { "docid": "360f0d08cee8f9629111a21104e3357f", "score": "0.54414326", "text": "def unigram_counts(text):\n counts = {}\n for word in text:\n counts[word] = counts.get(word,0) + 1\n return counts", "title": "" }, { "docid": "9a05c7b22c48e0c37645d8ea26f9d7e0", "score": "0.54403967", "text": "def count(self, value):\n\t\treturn self._dict.get(value, 0)", "title": "" }, { "docid": "4d724d2c3ca497fdba3329bbb5737ca0", "score": "0.5439982", "text": "def cntVrcs(self, key):\n return self.cntVals(self.vrcs, key)", "title": "" }, { "docid": "3a9631c7d382d7e938e797c0f3eeee9d", "score": "0.5427885", "text": "def vocab_size(cls) -> int:\n sz: int = (\n cls.embeddings.shape[0] if cls.embeddings is not None else len(cls.vocab_)\n )\n\n return sz", "title": "" }, { "docid": "265cd10cbfc8cdbb61c1634142dbfc35", "score": "0.54257244", "text": "def count_words(subreddit, word_list):\n hotp = requests.get('https://www.reddit.com/r/{}/hot.json'\n .format(subreddit),\n headers={\"User-Agent\": \"Simon & Lennox\"},\n allow_redirects=False,\n params={'limit': 100})\n if hotp.status_code != 200:\n return\n else:\n my_dict = {}\n hotp = hotp.json()\n for word in word_list:\n counter = 0\n for post in hotp['data']['children']:\n counter += post['data']['title'].count(word)\n if counter != 0:\n my_dict[word] = counter\n for word in sorted(my_dict.keys()):\n print(\"{}: {}\".format(word, my_dict[word]))", "title": "" }, { "docid": "c710d22cbc1f1ca0315ccdc4c06c9797", "score": "0.54143393", "text": "def get_qty_of_contigs(self):\n qty_contigs = len(self.dict_fasta_data)\n return qty_contigs", "title": "" }, { "docid": "f2028263ed679a934e1440b62605b58f", "score": "0.539638", "text": "def __len__(self) -> int:\n return len(self.data.keys())", "title": "" }, { "docid": "4610870d6873ecf9a4897fd2adb8e2af", "score": "0.53888136", "text": "def create_counts_dict(corpus_iterator):\n counts_dict = defaultdict(int)\n for word, ne_tag in corpus_iterator:\n counts_dict[word] += 1\n return counts_dict", "title": "" }, { "docid": "6b7b15d50dbce39d8a16819d5d95b497", "score": "0.5375414", "text": "def count_words(subreddit, word_list):", "title": "" }, { "docid": "a9bca6ab39ffa9a6dcf5fa43077ce972", "score": "0.5373257", "text": "def word_counts(self):\n\n return dict(Counter(word for word, _ in self.words))", "title": "" }, { "docid": "c99c294fa2347dcbb008c83e8f144d50", "score": "0.53696513", "text": "def count_vocab_items(self, counter, sentences):\n\n if self.is_counting:\n for sentence in sentences:\n for value in sentence[self.source_key].values():\n counter[self.counter_namespace][str(value)] += 1\n\n logger.info(\"Count sentences {} to update counter namespace {} successfully.\".format(\n self.source_key, self.counter_namespace))", "title": "" }, { "docid": "95534be66c803150bceaba4b05511cde", "score": "0.53662395", "text": "def count_terms(self, exp):\n return len(self.extract_terms(exp))", "title": "" }, { "docid": "186b8140f61f24b44a8637fe8654f255", "score": "0.5357132", "text": "def num_set_elements(self):\n return self.__hashmap.size()", "title": "" }, { "docid": "bab54f8284fec6383a3a02ac2c980f64", "score": "0.53563017", "text": "def create_vocabulary(data):\n vocab = {}\n for line in data:\n for item in line:\n if item in vocab:\n vocab[item] += 1\n else:\n vocab[item] = 1\n vocab_list = _START_VOCAB + sorted(vocab)\n vocab = dict([(x, y) for (y, x) in enumerate(vocab_list)])\n return vocab", "title": "" }, { "docid": "3d8d6049e8227ff0835f441f8490a5bc", "score": "0.5347672", "text": "def key_and_chordnum():\n count_max = 0\n empty_dict = chordlist(notes_summarized)\n array = chordarray()\n for key,value in empty_dict.items():\n for chord in array:\n for val in range(len(value)):\n if chord == value[val]:\n count_max+=1\n empty_dict[key] = count_max\n count_max = 0\n return empty_dict", "title": "" }, { "docid": "066dc8d26afe4064848c96837dc3b8d1", "score": "0.53460795", "text": "def getDocumentFrequencies(corpus, keyedVectors):\n n_words = len(keyedVectors.vocab)\n freqs = np.zeros(n_words)\n\n for lineNo, line in enumerate(corpus):\n if lineNo % 10000 == 0:\n log.info(\"Processing line {}...\".format(lineNo))\n\n words = set(line.split())\n for word in words:\n if word in keyedVectors:\n freqs[keyedVectors.vocab[word].index] += 1.0\n\n return freqs", "title": "" }, { "docid": "3edf6790019a3efb9e4c90e78733a725", "score": "0.5344988", "text": "def vocabulary_size(self) -> TYPE_INT:\n return TYPE_INT(len(self.vocabulary))", "title": "" }, { "docid": "fb3d33c50a8774e554b905c144e8db82", "score": "0.534474", "text": "def create_words_dict(words_list):\n word_count = {}\n\n for w in words_list:\n if w in word_count:\n word_count[w] += 1\n else:\n word_count[w] = 1\n\n return word_count", "title": "" }, { "docid": "f2fec4f242678258342c99bf8cbd09af", "score": "0.53418845", "text": "def bag_of_words(text):\n bag = {}\n for word in text.lower().split():\n bag[word] = bag.get(word, 0) + 1\n return bag", "title": "" }, { "docid": "ef1cd808aa135e27746b708d4263f228", "score": "0.53396267", "text": "def unigram_counts(sequence):\n return Counter(sequence)", "title": "" }, { "docid": "c01f48fe5ce6e3a7d2bc959513550250", "score": "0.53343356", "text": "def getTotalUniqueWords(text):\n return len(set(text.split()))", "title": "" }, { "docid": "74bd3aedcad68288963b520ae870a18b", "score": "0.5324283", "text": "def load_word_counts(cache_dir=None, vocab_size: Optional[int] = None):\n if vocab_size is not None:\n if not isinstance(vocab_size, int):\n raise TypeError(\n f'vocab_size should be None or int, got {type(vocab_size)}.'\n )\n if vocab_size <= 0:\n raise ValueError(f'vocab_size must be positive, got {vocab_size}.')\n\n path = tf.keras.utils.get_file(\n 'stackoverflow.word_count.tar.bz2',\n origin='https://storage.googleapis.com/tff-datasets-public/stackoverflow.word_count.tar.bz2',\n file_hash=(\n '1dc00256d6e527c54b9756d968118378ae14e6692c0b3b6cad470cdd3f0c519c'\n ),\n hash_algorithm='sha256',\n extract=True,\n archive_format='tar',\n cache_dir=cache_dir,\n )\n\n word_counts = collections.OrderedDict()\n dir_path = os.path.dirname(path)\n file_path = os.path.join(dir_path, 'stackoverflow.word_count')\n with open(file_path) as f:\n for line in f:\n word, count = line.split()\n word_counts[word] = int(count)\n if vocab_size is not None and len(word_counts) >= vocab_size:\n break\n return word_counts", "title": "" }, { "docid": "161620a28a3fa30e25924860797de14b", "score": "0.53217626", "text": "def countWords(arr): \n counts=dict()\n for word in arr:\n if word in counts:\n counts[word]+=1\n \n else:\n counts[word]=1\n return counts", "title": "" }, { "docid": "7d2c98a038373f78a598f87e25de701c", "score": "0.53189933", "text": "def create_char_vocab(input_data):\n char_vocab = {}\n for sentence in input_data:\n words = sentence.strip().split(' ')\n for word in words:\n chars = list(word)\n for ch in chars:\n if ch not in char_vocab:\n char_vocab[ch] = 1\n else:\n char_vocab[ch] += 1\n \n return char_vocab", "title": "" }, { "docid": "7d2c98a038373f78a598f87e25de701c", "score": "0.53189933", "text": "def create_char_vocab(input_data):\n char_vocab = {}\n for sentence in input_data:\n words = sentence.strip().split(' ')\n for word in words:\n chars = list(word)\n for ch in chars:\n if ch not in char_vocab:\n char_vocab[ch] = 1\n else:\n char_vocab[ch] += 1\n \n return char_vocab", "title": "" }, { "docid": "289d4c78986b6323cc8f1ef26e4f0111", "score": "0.5316994", "text": "def get_known_vocabulary(text,vocab_threshold=1):\n lexicon = Counter() \n for line in text:\n if type(line) == str:\n lexicon.update(line.split())\n else:\n lexicon.update(line)\n \n return set([word for word, counts in lexicon.items() if counts > vocab_threshold])", "title": "" }, { "docid": "51beb21f1db0c1a669c5549ac55111bc", "score": "0.5314778", "text": "def __len__(self):\r\n i = 0\r\n for cookie in self: i = i + 1\r\n return i", "title": "" } ]
cb2bee471fe9022f5ccc69799cb945ac
Visualizes the board by printing the contents of the cells ++ |B T| < That is a cell; T represents the item on top ++ B represents the item on the bottom
[ { "docid": "9652f13837aa4d3ed343007b1b611e42", "score": "0.77480453", "text": "def draw(self):\n border = \"+---+\"\n item = \"|{} {}|\"\n\n def draw_all_border():\n \"\"\"\n prints '+---+' times the number of cols\n specified in NUM_COLS\n \"\"\"\n line = \"\" # buffer string\n for _ in range(self.NUM_COLS):\n line = string_builder(line, border) # whole border and moves to new line\n print(line)\n\n\n def string_builder(*arg):\n \"\"\"\n faster string concat\n arguments: any number of strings to concat\n return one string with all others joined\n \"\"\"\n return ''.join(arg)\n\n\n for i in range(self.NUM_ROWS):\n buffer_text = \"\"\n draw_all_border()\n for j in range(self.NUM_COLS):\n if self.board[i][j].is_empty(): # if empty print an empty cell\n buffer_text = string_builder(buffer_text, item.format(' ', ' '))\n else: # not empty; must peek at all the items and grab individual items\n con_of_stack = self.board[i][j].peek_all()\n # only bottom item of stack because only two items can be in this stack\n if self.board[i][j].get_size() == 1:\n # string of cell with only btm item of stack\n single_item = item.format(con_of_stack[0], ' ')\n buffer_text = string_builder(buffer_text, single_item)\n else: # stack is full, so print both of the contents\n # string of cell with both items in stack\n both_items = item.format(con_of_stack[0], con_of_stack[1])\n buffer_text = string_builder(buffer_text, both_items)\n # print the buffer text\n print(buffer_text)\n draw_all_border() # end this row of cells", "title": "" } ]
[ { "docid": "83d45ba96060002fd66381146b94dcc9", "score": "0.79420996", "text": "def render_board(self):\n\n # this prints the top and bottom boundaries,\n # an in between renders '*' + row cells + '*'\n # for each row on the board\n print('*'*(self.width+2))\n for row in self.seed:\n print(''.join(['*']+['#' if cell else ' ' for cell in row]+['*']))\n print('*'*(self.width+2))", "title": "" }, { "docid": "31d2ced58ba5f6677f7eceea16163e91", "score": "0.7757172", "text": "def display_board(self):\n\n print(' ', end='')\n for n in range(self.width):\n print(' {} '.format(n), end=''),\n print('\\n', end='')\n for i, row in enumerate(self.board):\n print(i, end='')\n for j, col in enumerate(self.board[i]):\n if self.board[i][j].clicked:\n if self.board[i][j]._mine:\n print('[*]', end='')\n else:\n mines = self._count_touching_mines(i, j)\n print(' {} '.format(mines), end='')\n elif self.board[i][j].flagged:\n print('[?]', end='')\n else:\n print('[ ]', end='')\n print('\\n', end='')", "title": "" }, { "docid": "88bebe1794668111e7ac15db09fabcf7", "score": "0.7647937", "text": "def print_board(self):\n assert self.size_ == 9\n for i in range(self.size_):\n base = i * self.size_\n row = self.data[base:base + 3] + ['|'] + self.data[base +\n 3:base + 6] + ['|'] + self.data[base + 6:base + 9]\n print(\" \".join(map(str, row)))\n if (i + 1) % 3 == 0:\n print(\"\") # Just an empty line", "title": "" }, { "docid": "7df461aeab65f90ef6d509edfc0c65f4", "score": "0.75273716", "text": "def displayGrid(self):\n for i in range(self.size):\n for j in range(self.size):\n v = self.tile[i][j]\n print(\"\\x1b[%sm %s \\x1b[0m\" % (colorMap[v], str(v).center(5, \" \")), end=' ')\n print(\" \")\n if i % 4 != -1:\n print(\"\")", "title": "" }, { "docid": "032be671908e6741211d7b20ded8af8c", "score": "0.74921674", "text": "def display(self):\n b = list(self.board)\n for i,v in enumerate(b):\n if v == None:\n b[i] = \" \"\n\n print(\"+-----+-----+-----+\")\n print(\"| \",b[0],\" | \",b[1],\" | \",b[2],\" |\")\n print(\"+-----+-----+-----+\")\n print(\"| \",b[3],\" | \",b[4],\" | \",b[5],\" |\")\n print(\"+-----+-----+-----+\")\n print(\"| \",b[6],\" | \",b[7],\" | \",b[8],\" |\")\n print(\"+-----+-----+-----+\")", "title": "" }, { "docid": "0f4dc8836e9dcb3fe202d59cd754195d", "score": "0.7438464", "text": "def display_board(self, board):\n from colorama import Fore, Back, Style\n index = 1\n print(Fore.YELLOW + \" --a----b----c----d----e----f----g----h----i--\"\n + Style.RESET_ALL)\n for row in board:\n if index == 6:\n print(\" \", Fore.YELLOW + Back.BLUE +\n \"~~~~~~~~~~~~~~~~~~~~RIVER~~~~~~~~~~~~~~~~~~~~\" +\n Style.RESET_ALL)\n if index == 10:\n print(Fore.YELLOW + str(index) + \"|\", end=\"\" + Style.RESET_ALL)\n else:\n print(Fore.YELLOW + \" \" + str(index) + \"|\",\n end=\"\" + Style.RESET_ALL)\n for i in range(len(row)):\n if row[i] == None:\n print(Fore.WHITE + Back.YELLOW + \"[ ]\",\n end=\" \" + Style.RESET_ALL)\n else:\n columns = 1\n team = row[i].get_team()[0]\n type = row[i].get_type()\n piece = team.upper() + type\n if team == \"r\":\n print(Fore.RED + Back.YELLOW +\n \"[\" + piece + \"]\", end=\" \" + Style.RESET_ALL)\n elif team == \"b\":\n print(Fore.LIGHTBLUE_EX + Back.YELLOW +\n \"[\" + piece + \"]\", end=\" \" + Style.RESET_ALL)\n columns += 1\n print(Fore.YELLOW + \"|\" + str(index) + Style.RESET_ALL)\n index += 1\n print(Fore.YELLOW + \" --a----b----c----d----e----f----g----h----i--\"\n + Style.RESET_ALL)", "title": "" }, { "docid": "3538d8a3873816d53e5010842d5f0933", "score": "0.74235016", "text": "def draw(self):\n for i in xrange(self.width):\n output = []\n for j in xrange(self.height):\n cell = self.cells[(i, j)]\n output.append(cell.draw())\n print(''.join(output))", "title": "" }, { "docid": "8e0d0c20205dee05560ba650d7081fea", "score": "0.7412777", "text": "def print_board(self):\r\n for i in range(self.height):\r\n print(self.spacing + \"|\", end=\"\")\r\n\r\n for j in range(self.width):\r\n print(self.matrix[i][j] + \"|\", end=\"\")\r\n\r\n print()", "title": "" }, { "docid": "9bcbdacfe2a356a41b96e1540a98aee0", "score": "0.74043685", "text": "def printBoard(self) -> None:\n\n # Print x heading\n print(f\"|{' ':^3}|\", end='')\n for i in range(len(self.map[0])):\n print(f'{i+1:^3}|', end='')\n # Print rows with y heading\n for i in range(len(self.map)):\n print(f'\\n|{i+1:^3}|', end='')\n for j in range(len(self.map[i])):\n print(f'{self.map[i][j]:^3}|', end='')\n return", "title": "" }, { "docid": "47c94215446b0f56e0ee6ace034e4036", "score": "0.73989767", "text": "def print_board(self) -> None:\n print(\" = \" * self.board_size)\n for row in self.board:\n print(row)\n print(\" = \" * self.board_size)", "title": "" }, { "docid": "2c1afb69d466a1e153e2741e689e8340", "score": "0.73749506", "text": "def print_board(self):\n\n index_columns = \" \"\n for j in range(self.WIDTH):\n index_columns += \" \" + str(j) + \" \" + str(j) + \" \"\n print(index_columns)\n\n norm_line = \" |---|---|---|---|---|---|---|---|\"\n print(norm_line)\n\n for j in range(self.HEIGHT):\n temp_line = str(j) + \" \"\n if j % 2 == 1:\n temp_line += \"|///|\"\n else:\n temp_line += \"|\"\n for i in range(self.WIDTH):\n temp_line = temp_line + \" \" + self.get_symbol([j, i]) + \" |\"\n if i != 3 or j % 2 != 1: # TODO should figure out if this 3 should be changed to self.WIDTH-1\n temp_line = temp_line + \"///|\"\n print(temp_line)\n print(norm_line)", "title": "" }, { "docid": "7ded2e4783fb431b2ee5f6f5965ba086", "score": "0.73658276", "text": "def display(self):\n cnt = 0\n for i in range(3):\n print('{}{}Board #{}{}'.format(Back.WHITE, Fore.BLACK, i + 1, Style.RESET_ALL))\n for row in range(3):\n larr = []\n for col in range(3):\n cell_position = i*9 + row*3 + col\n cell = self.board[cell_position]\n bg = Back.RED\n if self.winner and (cell_position in self.winning_combo):\n bg = Back.BLUE\n if cell in self.players:\n s = '{}{:>2}{}'.format(bg, cell * 2, Style.RESET_ALL)\n else:\n s = '{:>2}'.format(cell)\n larr.append(s)\n print(' '.join(larr))", "title": "" }, { "docid": "a66720733e02929482ff236b97610787", "score": "0.7354732", "text": "def display(self):\n for i in range(self.game.height):\n print '\\n'\n for j in range(self.game.width):\n print self.board[(i, j)],\n print '\\n--------\\n'", "title": "" }, { "docid": "383f462eefc9113072f1dba8f468a9c5", "score": "0.7344172", "text": "def print(self):\n for row in self.grid:\n p = []\n for c in row:\n if c.piece:\n p.append(\"%d\"%c.piece)\n else:\n p.append(\" \")\n print(\"|\"+\"|\".join(p)+\"|\")", "title": "" }, { "docid": "d8d640236d2c7a82722f0f179f0f85e1", "score": "0.7334446", "text": "def drawBoard(self):\n print('|', self.board[0], '|', self.board[1], '|', self.board[2], '|')\n print('|', self.board[3], '|', self.board[4], '|', self.board[5], '|')\n print('|', self.board[6], '|', self.board[7], '|', self.board[8], '|')\n print()", "title": "" }, { "docid": "bfc9910ebdc1ce4d1838668328b58875", "score": "0.7319626", "text": "def display_board():\n\tprint(\"\\n\\t\", board[0], \"|\", board[1], \"|\", board[2])\n\tprint(\"\\t\", \"---------\")\n\tprint(\"\\t\", board[3], \"|\", board[4], \"|\", board[5])\n\tprint(\"\\t\", \"---------\")\n\tprint(\"\\t\", board[6], \"|\", board[7], \"|\", board[8], \"\\n\")", "title": "" }, { "docid": "111c87e7dc6758f9cbfce85777b50e21", "score": "0.73105884", "text": "def print_board(self):\n print(\"\\033[32m--------------------------------------------\")\n print(\"\\033[32m |\", end=' ')\n for i in range( self.boardSize ):\n print(\"%0.0d |\"%i, end=' ')\n print(\"\\n\\033[32m--------------------------------------------\")\n\n for i in range(self.boardSize):\n print( \"|\\033[32m %0.0d|\" %i , end=' ' )\n for j in range(self.boardSize):\n if self.hit_map[i,j] ==1 and self.board[i,j]!=0:\n print(\"\\033[31m%0.0d \\033[32m|\" % self.board[i][j] , end=' ')\n elif self.hit_map[i,j] !=1 and self.board[i,j]!=0:\n print(\"\\033[34m%0.0d \\033[32m|\" % self.board[i][j], end=' ')\n elif self.hit_map[i,j] ==1 and self.board[i,j]==0:\n print(\"\\033[37mX \\033[32m|\", end=' ')\n else :\n print(\" \\033[32m|\", end=\" \")\n print(\"\\n\\033[32m--------------------------------------------\")\n print(\"\\033[0m\")", "title": "" }, { "docid": "d110aaeaa3408174f1ea4528de185675", "score": "0.72612524", "text": "def display(self):\n print(\"\\n\\t\", self.current_board[0], \"|\", self.current_board[1], \"|\", self.current_board[2])\n print(\"\\t\",\"---------\")\n print(\"\\t\",self.current_board[3], \"|\", self.current_board[4], \"|\", self.current_board[5])\n print(\"\\t\",\"---------\")\n print(\"\\t\",self.current_board[6], \"|\", self.current_board[7], \"|\", self.current_board[8])", "title": "" }, { "docid": "a77f5d0c0e33bdb9046191f0bf610ddc", "score": "0.7260707", "text": "def print_board(self):\n for i in range(len(self._board)):\n for j in range(len(self._board[i])):\n print(self._board[i][j], end=\" \")\n print()", "title": "" }, { "docid": "7ce8b290f4e39f62cb94cb23e8ed364b", "score": "0.72408444", "text": "def show(self):\n\n for r in range(len(self.board)):\n for c in range(len(self.board[0])):\n if self.board[r][c] in self.borders:\n print(self.board[r][c], end=\"\")\n elif self.board[r][c] == 0:\n print(\" \", end=\"\")\n else:\n print(\"*\", end=\"\")\n print(\"\")", "title": "" }, { "docid": "93f4e5aa74d5bb7185ee46a2ca251bac", "score": "0.72339934", "text": "def display(self):\n for row in self.grid:\n for mark in row:\n print(mark, end='')\n print()\n print()", "title": "" }, { "docid": "7ea646786a29be9e2fd6cbc06593a548", "score": "0.7178229", "text": "def print_board():\n print(' ')\n print(' A B C')\n print('1 {} | {} | {} '.format(BOARD[0][0], BOARD[0][1], BOARD[0][2]))\n print(' ---+---+---')\n print('2 {} | {} | {} '.format(BOARD[1][0], BOARD[1][1], BOARD[1][2]))\n print(' ---+---+---')\n print('3 {} | {} | {} '.format(BOARD[2][0], BOARD[2][1], BOARD[2][2]))\n print('')", "title": "" }, { "docid": "e97efff081632b1c7f3fdaa6a0e4f8d0", "score": "0.7173047", "text": "def __repr__(self):\n '''\n count=1\n for row in self.board:\n print(f'{count}-{row}')\n count+=1\n temp=[chr(ord('A')+i) for i in range(8)]\n return temp\n '''\n\n tempBoard=[[k for k in i] for i in self.board] \n #Add column labeling\n columnLetters = [chr(ord('A')+i) for i in range(8)]\n columnLetters.insert(0,'_')\n tempBoard.insert(0,columnLetters)\n \n #add row labeling\n count=1\n for i in range(1,len(tempBoard)):\n tempBoard[i].insert(0,count)\n count+=1\n #Format board display\n # COde found @ https://stackoverflow.com/questions/13214809/pretty-print-2d-python-list\n s = [[str(e) for e in row] for row in tempBoard]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n return '\\n'.join(table)", "title": "" }, { "docid": "a5ac9f3b2251398285a25181a95059d6", "score": "0.71729416", "text": "def draw_board():\r\n print(board)", "title": "" }, { "docid": "4f154c0190326e062909b446583f7097", "score": "0.7172764", "text": "def print_board(self):\n div = int(math.sqrt(self.BoardSize))\n dash = \"\"\n space = \"\"\n line = \"+\"\n sep = \"|\"\n for i in range(div):\n dash += \"----\"\n space += \" \"\n for i in range(div):\n line += dash + \"+\"\n sep += space + \"|\"\n for i in range(-1, self.BoardSize):\n if i != -1:\n print \"|\",\n for j in range(self.BoardSize):\n if self.CurrentGameBoard[i][j] > 9:\n print self.CurrentGameBoard[i][j],\n elif self.CurrentGameBoard[i][j] > 0:\n print \"\", self.CurrentGameBoard[i][j],\n else:\n print \" \",\n if (j+1 != self.BoardSize):\n if ((j+1)//div != j/div):\n print \"|\",\n else:\n print \"\",\n else:\n print \"|\"\n if ((i+1)//div != i/div):\n print line\n else:\n print sep", "title": "" }, { "docid": "1c54c4fa69894f790b702a556dbf90cc", "score": "0.71664995", "text": "def displayBoard(board):\n print(colored('##############################', 'magenta'))\n rows = [colored('## ', 'magenta'), 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', colored(' ##', 'magenta')]\n for i in range(len(rows)):\n st = colored(rows[i], 'blue')\n print(st, end=' ', sep=' ')\n print()\n cprint('## ##', color='magenta')\n for row in range(len(board)):\n st1 = colored('## ', 'magenta')\n st2 = colored(str(row), 'cyan')\n st3 = colored(' ', 'blue')\n st4 = st1 + st2 + st3\n print(st4, end='')\n for column in range(len(board[row])):\n val = board[row][column]\n if isinstance(val, int):\n print(val, end=' ')\n elif isinstance(val, str):\n if val == '_':\n print(val, end=' ')\n elif val == '*':\n cprint(val, color='red', end=' ')\n else:\n cprint(val, color='green', end=' ')\n else:\n cprint(val, color='green', end=' ')\n\n print(' ##')\n print()", "title": "" }, { "docid": "e80ccc20130885ee5768453c311978db", "score": "0.71567", "text": "def show(self):\n print()\n print(' 1 2 3 4 5 6 7 8 9 ')\n for i in range(9):\n if i % 3 == 0:\n print(' +-----+-----+-----+')\n print(f'{i + 1} |', end='')\n for j in range(9):\n if self.grid[i][j] == 0:\n print(end=' ')\n else:\n print(f'{self.grid[i][j]}', end='')\n if j % 3 != 2:\n print(end=' ')\n else:\n print('|', end='')\n print()\n print(' +-----+-----+-----+')\n print()", "title": "" }, { "docid": "d7b6e95140124595a6e0348ceab4da8c", "score": "0.71506906", "text": "def print_board(self):\n for j in range(self.rows-1, -1, -1):\n for i in range(self.rows):\n print(self.board[i][j], end='')\n print(\"\")", "title": "" }, { "docid": "713d7846e70f4d20767705c5cb38d3d3", "score": "0.7141998", "text": "def printBoard(self): \n # Prthe column numbers\n print(' ',end='')\n for j in range(self.boardSize):\n print(\" \"+str(j+1), end='')\n \n \n # Prthe rows with marks\n print(\"\")\n for i in range(self.boardSize):\n # Prthe line separating the row\n print(\" \",end='')\n for j in range(self.boardSize):\n print(\"--\",end='')\n \n print(\"-\")\n\n # Prthe row number\n print(i+1,end='')\n \n # Prthe marks on self row\n for j in range(self.boardSize):\n print(\"|\"+self.marks[i][j],end='')\n \n print(\"|\")\n \n \n # Prthe line separating the last row\n print(\" \",end='')\n for j in range(self.boardSize):\n print(\"--\",end='')\n \n print(\"-\")", "title": "" }, { "docid": "367a38ded778cf60f04f031a1980aef8", "score": "0.7141429", "text": "def print_board(self):\n i = 0\n for row in self.__board:\n for item in row:\n if item == 0:\n print(\" \", end=' ')\n elif 100 <= item <= 999:\n print(item, end=' ')\n elif 10 <= item <= 99:\n print(str(item).rjust(3), end=' ')\n else:\n print(str(item).rjust(3), end=' ')\n print()\n i += 1", "title": "" }, { "docid": "b59f9ccd7e7f212ff92c1ab1f08dce00", "score": "0.71403", "text": "def print_board(self):\n char1 = \" \"\n for i in range(19):\n char1 += char_list[i] + \" \"\n char1 += \" \"\n print char1\n num = 0\n for x in self.board:\n if num < 10:\n char = str(19 - num) + \" \"\n else:\n char = str(19 - num) + \" \"\n for y in x:\n char = char + y.value + \" \"\n #print y, y.pos\n #print self.board[m][n].value, self.board[m][n].pos\n print char + str(19 - num)\n num += 1\n print char1\n #print self.board[3][3].value", "title": "" }, { "docid": "e75bf1a6a4edc0a44d6a0bc8c84958cd", "score": "0.7140018", "text": "def display_board(board):\n print(\"\\n\\t\", board[0], \"|\", board[1], \"|\", board[2])\n print(\"\\t\", \"---------\")\n print(\"\\t\", board[3], \"|\", board[4], \"|\", board[5])\n print(\"\\t\", \"---------\")\n print(\"\\t\", board[6], \"|\", board[7], \"|\", board[8], \"\\n\")", "title": "" }, { "docid": "ea7bbf4966c73ddeba66d5e3844ef35a", "score": "0.7139497", "text": "def print_board(self):\n div = int(math.sqrt(self.BoardSize))\n dash = \"\"\n space = \"\"\n line = \"+\"\n sep = \"|\"\n for i in range(div):\n dash += \"----\"\n space += \" \"\n for i in range(div):\n line += dash + \"+\"\n sep += space + \"|\"\n for i in range(-1, self.BoardSize):\n if i != -1:\n print \"|\",\n for j in range(self.BoardSize):\n if self.CurrentGameBoard[i][j] > 9:\n print self.CurrentGameBoard[i][j],\n elif self.CurrentGameBoard[i][j] > 0:\n print \"\", self.CurrentGameBoard[i][j],\n else:\n print \" \",\n if (j + 1 != self.BoardSize):\n if ((j + 1) // div != j / div):\n print \"|\",\n else:\n print \"\",\n else:\n print \"|\"\n if ((i + 1) // div != i / div):\n print line\n else:\n print sep", "title": "" }, { "docid": "6605a28fc535f60675c7b3b64d9cfd6d", "score": "0.71370125", "text": "def show(self, screen):\n for elt in self.cells:\n x_display = elt.xy_position[0] * constants.CELL_SIZE\n y_display = elt.xy_position[1] * constants.CELL_SIZE\n screen.blit(elt.cell_image, (x_display, y_display))", "title": "" }, { "docid": "470b2dfbd7209b3476c66e8742ae38ac", "score": "0.71259606", "text": "def display_board(board):\n print(\"\\n\\t\", board[0], '|', board[1], '|', board[2])\n print(\"\\t\", \"---------\")\n print(\"\\n\\t\", board[3], '|', board[4], '|', board[5])\n print(\"\\t\", \"---------\")\n print(\"\\n\\t\", board[6], '|', board[7], '|', board[8], \"\\n\")\n print(\"\\t\", \"---------\")", "title": "" }, { "docid": "4c73f61f5013fcada446deae144cc707", "score": "0.7119194", "text": "def print_board(self):\n print(\" %s | %s | %s \" % (self.__board[0], self.__board[1], self.__board[2]))\n print(\"--------------\")\n print(\" %s | %s | %s \" % (self.__board[3], self.__board[4], self.__board[5]))\n print(\"--------------\")\n print(\" %s | %s | %s \\n\" % (self.__board[6], self.__board[7], self.__board[8]))", "title": "" }, { "docid": "4dd900bcb5518ed7a09545089caa8fda", "score": "0.7113528", "text": "def display_board(board):\n print(\"\\n\\t\", board[0], \"|\", board[1], \"|\", board[2]) \n print(\"\\t\", \"---------\")\n print(\"\\t\", board[3], \"|\", board[4], \"|\", board[5]) \n print(\"\\t\", \"---------\")\n print(\"\\t\", board[6], \"|\", board[7], \"|\", board[8], \"\\n\")", "title": "" }, { "docid": "0a94f7fd63921792976d885a9b1bb7ed", "score": "0.7109522", "text": "def display( self ):\n if not self.__grid:\n print \"No solution\"\n return\n n = self.__boxSize\n n2 = n * n\n inset = n * \" \"\n fmt = '|'+('|'.join([' %s ' * n] * n)) + '|'\n sep = '+'+('+'.join(['---' * n] * n)) + '+' \n print inset, sep\n for i in range(n):\n for j in range(n):\n offset = (i * n + j) * n2\n cells = \"\"\n for x in self.__grid[offset : offset + n2]:\n if type(x) == list:\n x = x[0]\n cells = cells + x\n print inset, fmt % tuple(cells)\n if i != n - 1:\n print inset, sep\n print inset, sep", "title": "" }, { "docid": "3034c7201e5374b4100076518a7f005e", "score": "0.7101523", "text": "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "title": "" }, { "docid": "3034c7201e5374b4100076518a7f005e", "score": "0.7101523", "text": "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "title": "" }, { "docid": "3034c7201e5374b4100076518a7f005e", "score": "0.7101523", "text": "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "title": "" }, { "docid": "ea0a0591dc4a72c4da9b3ed1cbbc98c1", "score": "0.7100069", "text": "def print_board(self):\r\n self.update_board()\r\n # transform the bord to string\r\n str_for_printing = \"\"\r\n for i in range(self.board_row_size):\r\n str_for_printing += \" \".join(self.bord[i]) + \"\\n\"\r\n return(str_for_printing)", "title": "" }, { "docid": "f2e8aaeb5a6e4812cccf320176daeff9", "score": "0.70990866", "text": "def display(self):\n\n for newline in range(self.y):\n print()\n for row in range(self.height):\n for spaces in range(self.x):\n print(\" \", end=\"\")\n for col in range(self.width):\n print(\"#\", end=\"\")\n print()", "title": "" }, { "docid": "809aaddd17e1285aee0696e060755f0a", "score": "0.7098599", "text": "def print_board (board):\n for row in range(1, GRID_SIZE + 1):\n for column in range(1, GRID_SIZE + 1):\n print ' ' + get_mark(board, column, row),\n print ''", "title": "" }, { "docid": "4a4d702290c8a88358ee8e72a13f248b", "score": "0.7090552", "text": "def display(self, colored=False):\n if not colored:\n i = 0\n print(' ', *list(range(7)), sep=' ')\n for row in self.board:\n print(i, end=' ')\n for column in row:\n print(column, end=' ')\n i +=1\n print()\n return\n\n new_board = []\n for row in self.board:\n new_row = []\n for column in row:\n if column == 'W':\n new_row.append(self._WHITEBG + 'W' + self._ENDC)\n elif column == 'B':\n new_row.append(self._BLACKBG + 'B' + self._ENDC)\n elif column == 'R':\n new_row.append(self._REDBG + 'R' + self._ENDC)\n else:\n new_row.append(' ')\n new_board.append(new_row)\n\n print(' ', '-'*16)\n print(' ', '|', *list(range(7)), '|', sep=' ')\n print(' ', '-'*16)\n i = 0\n for row in new_board:\n print(i, '|', end=' ')\n for column in row:\n print(column, end=' ')\n i += 1\n print('|', end=' ')\n print()\n print(' ', '-'*16)", "title": "" }, { "docid": "e9062b83ed7f1ce8cbeacee867fbddd3", "score": "0.7084782", "text": "def display(self):\n for y in range(self.__y):\n print()\n for i_row in range(self.__height):\n for x in range(self.__x):\n print(\" \", end=\"\")\n for j_colum in range(self.__width):\n print(\"#\", end=\"\")\n print()", "title": "" }, { "docid": "4cbbe822ba3752d5a94fc3a7e74daf49", "score": "0.7081497", "text": "def display(self, state: GameState) -> None:\n\n # construct a dictionary mapping board positions to symbols\n board_dict = {}\n for row in range(self.rows):\n for col in range(self.columns):\n if state.player1_board & self._actions_to_binary[(row, col)]:\n board_dict[(row, col)] = \"x\"\n elif state.player2_board & self._actions_to_binary[(row, col)]:\n board_dict[(row, col)] = \"o\"\n else:\n board_dict[(row, col)] = \" \"\n\n divider = \"\\n\" + \"+\".join([\"---\"] * self.columns) + \"\\n\"\n\n # construct the string for each row\n row_strings = []\n for row in range(self.rows):\n row_string = \"|\".join([\" {} \".format(board_dict[(row, col)])\n for col in range(self.columns)])\n row_strings.append(row_string)\n\n ascii_grid = divider.join(row_strings)\n print(ascii_grid)", "title": "" }, { "docid": "b132b0cfc26b519eb73f8c6e536b9ecf", "score": "0.708135", "text": "def display_board(self):\n line_number = 20\n for row in reversed(self._board):\n print(str(row) + ' ' + str(line_number))\n line_number -= 1\n\n print(' a ', ' b ', ' c ', ' d ', ' e ', \\\n ' f ', ' g ', ' h ', ' i ', ' j ', ' k ' \\\n , ' l ', ' m ', ' n ', ' o ', ' p ', \\\n ' q ', ' r ', ' s ', ' t ')", "title": "" }, { "docid": "a8b5ab9befc52ff54ae4750bd5bacd0f", "score": "0.7077757", "text": "def print(self):\n for i in range(self.height):\n row_values = [str(cell).ljust(27, ' ') for cell in self.grid[i*self.width:(i+1)*self.width]]\n print(' '.join(row_values))", "title": "" }, { "docid": "b8e44982c5638b67d189e9320f44e577", "score": "0.70672286", "text": "def display(self):\n for pos_y in range(self.__y):\n print()\n for row in range(self.__height):\n for pos_x in range(self.__x):\n print(' ', end='')\n for column in range(self.__width):\n print('#', end='')\n print()", "title": "" }, { "docid": "9c0cf62b0705f05bd50af9c77e080e46", "score": "0.70672", "text": "def print_board(self):\n for row in self.board:\n for item in row:\n if item != \"_\":\n item = self.cars_dict[item].description\n print(f'{item} ', end=\"\")\n print()", "title": "" }, { "docid": "fb27c693edc974c2d33dddacf8c8f282", "score": "0.7066166", "text": "def showBoard(self):\n print(\" 0, 1, 2, 3, 4, 5, 6, 7, 8 \")\n print(\" ---------------------------\")\n for row in range(0, self.sudokuBoard.__len__()):\n print(row, \": \", self.sudokuBoard[row])", "title": "" }, { "docid": "f91cf093214b9a455785090d2145eb89", "score": "0.7064421", "text": "def drawGrid(self):\n\n for i in range(self.row):\n line = '\\t|'\n for j in range(self.col):\n if not self.getCell((i * self.row) + j):\n line += ' '.center(5) + '|'\n else:\n line += str(self.getCell((i * self.row) + \n j)).center(5) + '|'\n print(line)\n print()", "title": "" }, { "docid": "55750c7352f6a54f291d655844f82611", "score": "0.70583093", "text": "def displayBoard(board):\n print(\"\\n\"*100)\n print(\"\\t\"+board[7]+\"|\"+board[8]+\"|\"+board[9])\n print(\"\\t-----\")\n print(\"\\t\"+board[4]+\"|\"+board[5]+\"|\"+board[6])\n print(\"\\t-----\")\n print(\"\\t\"+board[1]+\"|\"+board[2]+\"|\"+board[3])\n print(\"\\n\"*3)", "title": "" }, { "docid": "d771687990161833e679fd4d7b223b9b", "score": "0.70491195", "text": "def display(self):\n # Display the border of the board, not filled in\n rectangle(self.screen, \n BOARD_Y, BOARD_X - 1, \n BOARD_Y + BOARD_HEIGHT, BOARD_X + BOARD_WIDTH + 1)\n\n # Displays solidifed tiles\n for y in range(ROWS):\n for x in range(10):\n if (self.board[y][x] == 0 or y > 19):\n continue\n else:\n # Find the bottom left corner of the rectangle on the visual board representing each x, y coordinate\n offset_x = (x * tile_width) + BOARD_X + 1\n offset_y = ((20 - y) * tile_height) + BOARD_Y - 1\n # Draw a rectangle for each tile\n for _y in range(offset_y, offset_y + tile_height):\n for _x in range(offset_x, offset_x + tile_width):\n self.screen.addch(_y, _x, \"█\")\n \n # Create a piece representing the preview and display it\n lowest_y = self.get_lowest_valid_y()\n _piece = deepcopy(self.active_piece)\n _piece.y = lowest_y\n _piece.show(self.screen, \"*\")\n \n self.active_piece.show(self.screen, \"#\")\n \n # Display the hold box and held piece\n rectangle(self.screen, \n BOARD_Y, hold_rect_x,\n BOARD_Y + hold_rect_height, hold_rect_x + hold_rect_width)\n if self.held_piece:\n self.held_piece.show(self.screen, \"█\")\n\n #Display the next 5 pieces\n rectangle(self.screen,\n BOARD_Y, queue_rect_x,\n BOARD_Y + queue_rect_height, queue_rect_x + queue_rect_width)\n for i, name in enumerate([*self.bag, *self.next_bag][:5]):\n _piece = piece.Piece(name)\n _piece.x = 12\n _piece.y = 18 - (3 * i)\n _piece.show(self.screen, \"█\")\n\n self.screen.refresh()", "title": "" }, { "docid": "da34065bc87f4c3e30802284cb36d226", "score": "0.70422727", "text": "def draw_grid(grid):\r\n print '---------------'\r\n for row in grid:\r\n print '|',\r\n for element in row:\r\n print element, '\\t|',\r\n print\r\n print '---------------'", "title": "" }, { "docid": "fb950110e85daf8313a1c6b5afa21551", "score": "0.70388895", "text": "def show_board(self):\n print('-------')\n for i in range(3):\n print('|', TicTac.print_mapping[self.board[3*i]],\n '|', TicTac.print_mapping[self.board[3*i + 1]],\n '|', TicTac.print_mapping[self.board[3*i + 2]], '|', sep='')\n print('-------')", "title": "" }, { "docid": "f960b60c6196f15aa293d244fe4ca235", "score": "0.703137", "text": "def printGrid(self) -> None:\n for row, i in enumerate(self.grid):\n for col, j in enumerate(i):\n print(j, end = \" \")\n if ((col + 1) % 3 == 0 and col != 8):\n print('|', end=\" \")\n print('\\n', end = \"\")\n if (row + 1) % 3 == 0 and row != 8:\n print('------+-------+-------')", "title": "" }, { "docid": "0ed359b6210de1338c04ca6568bf2440", "score": "0.70300084", "text": "def display(self, board):\n b = Board(self.n)\n n = board.shape\n print(\"\\nReserve Black (X): \", board[0][0])\n # label top\n print(\" \", end=\"\")\n for y in range(n[1]):\n if y < 4:\n print(GipfGame.alpha[y]+(str(y+4)), end=\" \")\n else:\n print(GipfGame.alpha[y]+(str(10-y)), end=\" \")\n print(\"\")\n # board\n for x in range(n[0]):\n print(\" \", end=\"\")\n for y in range(n[1]):\n if (x,y) in b.get_startingPoints():\n print(\" \"+chr(9679), end=\" \")\n elif (x,y) in b.get_actBoard():\n piece = board[x][y] # get the piece to print\n print(\"(\"+GipfGame.spot_content[piece]+\")\", end=\"\")\n else:\n print(\" \", end=\"\")\n print(\"\")\n # label bottom\n print(\" \", end=\"\")\n for y in range(n[1]):\n print(GipfGame.alpha[y]+\"1\", end=\" \")\n print(\"\")\n print(\"Reserve White (O): \", board[0][2])\n print(\"\")", "title": "" }, { "docid": "88e40b33b95fb4f86b3456926c42645b", "score": "0.70263845", "text": "def my_print(self):\n if self.size:\n for pos_y in range(self.position[1]):\n print()\n for row in range(self.size):\n for pos_x in range(self.position[0]):\n print(\" \", end='')\n for col in range(self.size):\n print(\"#\", end='')\n print()\n else:\n print()", "title": "" }, { "docid": "f512bcd3c38c9d0627b01edf33debd2a", "score": "0.70250636", "text": "def show_board(board):\n\n # Create new black image of entire board\n width, height = 400, 400\n\n img = Image.new(\"RGB\", (width, height))\n draw = ImageDraw.Draw(img)\n\n tile_size = 20\n # Draw squares on odd rows\n for x_pos in range(0, width, 2 * tile_size):\n for y_pos in range(0, height, 2 * tile_size):\n draw.rectangle(\n [(x_pos, y_pos), (x_pos + tile_size - 1, y_pos + tile_size - 1)],\n fill=\"white\",\n )\n\n # Draw squares on even rows\n for x_pos in range(tile_size, width, 2 * tile_size):\n for y_pos in range(tile_size, height, 2 * tile_size):\n draw.rectangle(\n [(x_pos, y_pos), (x_pos + tile_size - 1, y_pos + tile_size - 1)],\n fill=\"white\",\n )\n\n # Add queens to board\n font = ImageFont.truetype(\"./arialbd.ttf\", 18)\n for x_pos, row in enumerate(board):\n for y_pos, square in enumerate(row):\n if square == 1:\n draw.text(\n (x_pos * tile_size + 3, y_pos * tile_size),\n \"Q\",\n font=font,\n fill=\"red\",\n )\n\n img.save(\"./board.png\")\n img.show()\n sys.exit(0)", "title": "" }, { "docid": "ec00b63e151b487f00383834a6edfb37", "score": "0.7023297", "text": "def displayBoard(board):\n print(\"\\n\\t\",board[0],\"|\"board[1],\"|\",board[2])\n print(\"\\t\",\"---------\")\n print(\"\\n\\t\",board[3],\"|\"board[4],\"|\",board[5])\n print(\"\\t\",\"---------\")\n print(\"\\n\\t\",board[6],\"|\"board[7],\"|\",board[8],\"\\n\")", "title": "" }, { "docid": "e6b0e7349f12137f8053cdce25e3035b", "score": "0.70220125", "text": "def print_board(self):\n\n t1 = time.time()\n self.get_board()\n print(f\"Getting board took: {time.time() - t1} seconds\")\n\n for row in range(16):\n for col in range(30):\n print(f\"{'{0:0=2d}'.format(int(self.board[row, col]))} \", end='')\n print(\"\")", "title": "" }, { "docid": "5405739f1b52d29d4d891106d0f4850e", "score": "0.701105", "text": "def print_board(board):\n win = GraphWin('N-Rainhas', 850, 650)\n win.setBackground(color_rgb(188, 237, 145))\n title = Text(Point(400, 30), \"N-Rainhas\")\n title.setSize(20)\n title.draw(win)\n\n # Desenha tabuleiro principal\n rect = Rectangle(\n Point(150 - 5, 100 - 5),\n Point(650 + 5, 600 + 5)\n )\n rect.setFill('brown')\n rect.draw(win)\n\n # Desenha as casas no tabuleiro\n square = 500 / N\n for i in range(N):\n for j in range(N):\n if (i + j) % 2 == 0:\n x = 150 + i * square\n y = 100 + j * square\n rect = Rectangle(\n Point(x, y),\n Point(x + square, y + square)\n )\n rect.setFill('gray')\n rect.draw(win)\n\n # Desenha as peças no tabuleiro\n x = 150 + i * square\n y = 100 + board[i] * square\n cir = Circle(\n Point(x + 0.5 * square, y + 0.5 * square), 160 / N\n )\n cir.setFill('blue')\n cir.draw(win)\n\n win.getMouse()\n win.close()", "title": "" }, { "docid": "4a38d6c584b036541497c7a43c7473c5", "score": "0.7010936", "text": "def print_board(self):\n for row in self.board:\n print(row)", "title": "" }, { "docid": "097795c470ad5c54e257ca84d8973871", "score": "0.7008945", "text": "def display(self):\n for i in range(self.y):\n print()\n for i in range(0, self.height):\n for j in range(self.x):\n print(' ', end='')\n for j in range(0, self.width):\n print('#', end='')\n print()", "title": "" }, { "docid": "c5db10736708076c9f7078661ace854c", "score": "0.70041984", "text": "def print_board(board):\n for rows in range(len(board)):\n for cols in range(len(board[rows])):\n print(str(board[rows][cols])+ \" \",end='')\n print(\"\\n\")", "title": "" }, { "docid": "572672e31e6f0175602657546a3c549b", "score": "0.7003478", "text": "def printBoard(board):\r\n dividers = \"|--------------------------------|\"\r\n print(dividers)\r\n for rows in range(0,9):\r\n for columns in range(0,9):\r\n if (rows == 3 or rows == 6) and columns == 0:\r\n print(dividers)\r\n if columns == 0 or columns == 3 or columns == 6:\r\n print(\"|\", end=\" \")\r\n print(\" \" + str(board[rows][columns]), end=\" \")\r\n if columns == 8:\r\n print(\"|\")\r\n print(dividers)\r\n return", "title": "" }, { "docid": "15df6ef6b505a385217ce078b2756102", "score": "0.6998638", "text": "def print_board (self):\n\n self.printboard = '\\n\\n'.join([' '.join(['{}'.format(item) for item in row]) for row in self.board])\n return self.printboard", "title": "" }, { "docid": "0c6fa4a62cb2bc0cf0d52dda2dd6314a", "score": "0.69947696", "text": "def print_board(board):\r\n pprint(board, width = 55)", "title": "" }, { "docid": "f09c0777866f30195b3a3e0bf0d809a8", "score": "0.69942194", "text": "def print_board_repr(self):\n for line in self.board :\n for char in line :\n sys.stdout.write(str(char))\n print(\"\\n\")", "title": "" }, { "docid": "a54922c9d8a514cf87d2b60626145805", "score": "0.69893", "text": "def draw_grid(self):\n os.system(\"clear\")\n print(\" A | B | C \")\n print(\"1 {} | {} | {}\".format(*self.grid[0]))\n print(\"2 {} | {} | {}\".format(*self.grid[1]))\n print(\"3 {} | {} | {}\".format(*self.grid[2]))", "title": "" }, { "docid": "d9eaca40d347af77bb6ce38e74a4cf42", "score": "0.698742", "text": "def display(self, board):\n # Clear the screen\n os.system('clear')\n # Print who's turn it is\n print('Player: ' + str(self.player) + '\\n')\n # These numbers go at the top and are useful in understanding the coordinate output\n print(' 0 1 2 3 4 5 6 7')\n # The line at the top\n print(' ------------------')\n # For the numbers going down the left side\n row = 0\n # Print every item of the board\n for i in board:\n # Print the row index and border\n print(str(row) + '| ', end='', flush=True)\n # Print the pieces\n for j in i:\n # Set the actual piece to a 2-char block character of the appropriate color\n piece = (bcolors.BLACK + '██') if j == 1 else ((bcolors.FAIL + '██') if j == 2 else (bcolors.GRAY + '██' if j == 3 else (bcolors.RED + '██' if j == 4 else bcolors.WHITE + ' ')))\n # Print it\n print(piece, end='', flush=True)\n # Reset the colors and print the border\n print(bcolors.ENDC, end='', flush=True)\n print(' |')\n row += 1\n # Print the bottom border\n print(' ------------------')", "title": "" }, { "docid": "49195803d7e9e4b5087ec36cbe966b23", "score": "0.697707", "text": "def drawGrid(self):\n\n for i in range(self.row):\n line = '\\t|'\n for j in range(self.col):\n if not self.getCell((i * self.row) + j): \n line += ' '.center(5) + '|'\n else:\n line += str(self.getCell((i * self.row) + j)).center(5) + '|' \n print(line)\n print()", "title": "" }, { "docid": "c3526ff18f1417206b8aa9390bfb6f86", "score": "0.69665295", "text": "def show_board(self):\n for row in range(0, BoardParameters['ROWS']):\n for col in range(0, BoardParameters['COLS']):\n pygame.draw.rect(self.destination,\n self.board.get_cell_color_at(row, col),\n [col * BoardParameters['CELL_WIDTH'],\n row * BoardParameters['CELL_HEIGHT'],\n BoardParameters['CELL_WIDTH'],\n BoardParameters['CELL_HEIGHT']])\n piece = self.board.get_piece_at(row, col)\n if piece != Pieces.NONE:\n self.destination.blit(self.pieces_graphics[piece],\n (col * BoardParameters['CELL_HEIGHT'],\n row * BoardParameters['CELL_WIDTH']))\n pygame.display.update()", "title": "" }, { "docid": "616a562962e1171b50235e1cb119c45a", "score": "0.6961917", "text": "def print_board(self):\n cols = len(self.board)\n rows = 0\n if cols:\n rows = len(self.board[0])\n for j in range(rows):\n for i in range(cols):\n if(self.board[i][j].isBomb == True and self.board[i][j].isVisible==True):\n print(\"*\", end=\" \")\n elif(self.board[i][j].isVisible==True):\n print(self.board[i][j].adjBomb, end=\" \")\n else:\n print(\"?\", end=\" \")\n print()\n print()\n return self.board", "title": "" }, { "docid": "33fa7dc195b261b3238fe7c39bae70cb", "score": "0.6961245", "text": "def print_board(self):\n for i in range(self.N):\n print(self.board[i])", "title": "" }, { "docid": "e047acee923c3290fae045e833fd4127", "score": "0.69567883", "text": "def display(self):\n for ejey in range(0, self.__y):\n print(\"\")\n for q in range(0, self.__height):\n for ejex in range(0, self.__x):\n print(\" \", end=\"\")\n for p in range(0, self.__width):\n print(\"#\", end=\"\")\n print(\"\")", "title": "" }, { "docid": "98ce4b5aafb828a60a2e8661eba7b61e", "score": "0.69564074", "text": "def board_display(board):\n spaces = \" \" * 10\n line_space = spaces + \" | | \"\n line_horiz = spaces + \"—\" * 11\n\n clear_screen()\n print(\"\\n\\n{}\".format(line_horiz))\n for i in range(0,9,3):\n print(\"{}\\n{} {} | {} | {} \".format(line_space, spaces, board[i+1], board[i+2], board[i+3]))\n print(\"{}\\n{}\".format(line_space, line_horiz))", "title": "" }, { "docid": "0bcf8bb148c7a9a13668976672e25d94", "score": "0.6952317", "text": "def display_board(board):\n print(\"\\n\\t\", board[0],\"║\", board[1], \"║\", board[2])\n print(\"\\t\", \"══╬═══╬══ 012\")\n print(\"\\t\", board[3],\"║\", board[4], \"║\", board[5], \" 345\")\n print(\"\\t\", \"══╬═══╬══ 678\")\n print(\"\\t\", board[6],\"║\", board[7], \"║\", board[8])", "title": "" }, { "docid": "5565a1f033aec4450534f9625fc8e27c", "score": "0.6942078", "text": "def print_board(self):\n return_string = \"{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n\" \\\n \"{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n\" \\\n \"{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n\" \\\n \"{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\\n{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8} {:<8}\"\n print(return_string.format(str(self.board[0]), str(self.board[1]), str(self.board[2]), str(self.board[3]), str(self.board[4]),\n str(self.board[5]), str(self.board[6]), str(self.board[7]),\n str(self.board[8]), str(self.board[9]), str(self.board[10]), str(self.board[11]), str(self.board[12]),\n str(self.board[13]), str(self.board[14]), str(self.board[15]),\n str(self.board[16]), str(self.board[17]), str(self.board[18]), str(self.board[19]), str(self.board[20]),\n str(self.board[21]), str(self.board[22]), str(self.board[23]),\n str(self.board[24]), str(self.board[25]), str(self.board[26]), str(self.board[27]), str(self.board[28]),\n str(self.board[29]), str(self.board[30]), str(self.board[31]),\n str(self.board[32]), str(self.board[33]), str(self.board[34]), str(self.board[35]), str(self.board[36]),\n str(self.board[37]), str(self.board[38]), str(self.board[39]),\n str(self.board[40]), str(self.board[41]), str(self.board[42]), str(self.board[43]), str(self.board[44]),\n str(self.board[45]), str(self.board[46]), str(self.board[47]),\n str(self.board[48]), str(self.board[49]), str(self.board[50]), str(self.board[51]), str(self.board[52]),\n str(self.board[53]), str(self.board[54]), str(self.board[55]),\n str(self.board[56]), str(self.board[57]), str(self.board[58]), str(self.board[59]), str(self.board[60]),\n str(self.board[61]), str(self.board[62]), str(self.board[63])))", "title": "" }, { "docid": "493279d75847882dc3803e99be3d7494", "score": "0.6930201", "text": "def draw_board(size):\n h_element = ' ---'\n v_element = '| '\n for i in range(size):\n print(h_element * (size))\n print(v_element * (size+1))\n print(h_element * (size))", "title": "" }, { "docid": "4604d78a59e69f04551428ddcb27b7fd", "score": "0.69255614", "text": "def print_board(board):\n\n colors = {\n '*': None,\n '2': 'red',\n '4': 'green',\n '8': 'yellow',\n '16': 'blue',\n '32': 'magenta',\n '64': 'cyan',\n '128': 'grey',\n '256': 'white',\n '512': 'green',\n '1024': 'red',\n '2048': 'blue',\n '4096': 'magenta'\n };\n header = \"Use the arrows keys to play 2048! Press q to quit\";\n print(header);\n N = len(board);\n vertical_edge = \"\";\n for i in range(N + 2):\n vertical_edge += \"-\\t\";\n print(vertical_edge);\n for y in range(N):\n row = \"\";\n for x in board[y]:\n\n # Handling installation fail (no colors printed)\n if termcolor is not None:\n row += termcolor.colored(x, colors[x]);\n else:\n row += x\n\n row += \"\\t\";\n print(\"|\\t\" + row + \"|\");\n if y is not N - 1: print(\"\")\n print(vertical_edge);\n\n if GUI_runnable:\n gui.update_grid(board)\n gui.update()", "title": "" }, { "docid": "d93193cad3a3414442f0d5684be7615e", "score": "0.69198704", "text": "def show_board(b):\r\n\tfor i in range(3):\r\n\t\tprint \"| {} | {} | {} |\".format( *b[i*3: i*3+3] )\r\n\t\tif i in (0, 1):\r\n\t\t\tprint \"- - - - - - -\"", "title": "" }, { "docid": "b7a8c74cf45ffa1d34fa981eafad8de2", "score": "0.6915727", "text": "def draw(self):\n for i in range(9):\n for j in range(9):\n sq = self.cells[i][j]\n # Cells with fixed value have a colour in them to indicate that they are fixed.\n color = mauve if (self.board_copy[i][j] != 0) else white\n sq.draw_square(color)\n self.draw_thicker_lines()", "title": "" }, { "docid": "7a915a45056ff290222a8cf95b6cda40", "score": "0.69150984", "text": "def printGrid():\n print(\"-------\")\n print(\"|\" + grid[0] + \"|\" + grid[1] + \"|\" + grid[2] + \"|\")\n print(\"-------\")\n print(\"|\" + grid[3] + \"|\" + grid[4] + \"|\" + grid[5] + \"|\")\n print(\"-------\")\n print(\"|\" + grid[6] + \"|\" + grid[7] + \"|\" + grid[8] + \"|\")\n print(\"-------\")\n print(\"\\033[9A\") # Moves the cursor up 9 lines", "title": "" }, { "docid": "6f7bb814ea34fb0bed5970abc43848d3", "score": "0.6909626", "text": "def display(self) -> None:\r\n for row in range(3):\r\n print('| ', end=\"\")\r\n for col in range(3):\r\n print(f\"{self.state[row][col]} |\", end=\" \")\r\n print(\"\\n\")", "title": "" }, { "docid": "356cfaa985ffce49b3ada50a6dd80c27", "score": "0.6900502", "text": "def display(self):\n\t\tprint (\"\\n \", end='')\n\t\tfor i in range (self.columns):\n\t\t\tprint (str(i) + \" \", end='')\n\t\tprint(\"\\n\")\n\t\tfor i in range (self.rows):\n\t\t\tfor j in range (self.columns):\n\t\t\t\tif j == 0:\n\t\t\t\t\tprint (str(i) + \" \", end='')\n\t\t\t\tprint (self.get_letter(i,j) + \" \", end='')\n\t\t\t\tif j == self.rows - 1:\n\t\t\t\t\tprint (str(i), end='')\n\t\t\tprint(\"\\n\")\n\t\tprint (\" \", end='')\n\t\tfor i in range (self.columns):\n\t\t\tprint (str(i) + \" \", end='')\n\t\tprint(\"\\n\")", "title": "" }, { "docid": "d4cf09fdd99d902ad624c0f87ecdb12c", "score": "0.6900333", "text": "def _draw_board_item(self) -> None:\r\n\r\n for row in range (self._state._row):\r\n for col in range (self._state._column):\r\n\r\n if \"[\" in str(self._state._board[col][row]):\r\n self._draw_colors_with_frame(col, row)\r\n\r\n elif \"*\" in str(self._state._board[col][row]):\r\n self._draw_color_with_rect(MATCH_COLOR, col, row)\r\n\r\n elif \"|\" in str(self._state._board[col][row]):\r\n self._draw_colors_with_frame(col, row)\r\n self._draw_inner_rect(col, row)\r\n\r\n\r\n if self._state._board[col][row] == \" S \":\r\n self._draw_color_with_rect(S_COLOR, col, row)\r\n elif self._state._board[col][row] == \" T \":\r\n self._draw_color_with_rect(T_COLOR, col, row)\r\n elif self._state._board[col][row] == \" V \":\r\n self._draw_color_with_rect(V_COLOR, col, row)\r\n elif self._state._board[col][row] == \" W \":\r\n self._draw_color_with_rect(W_COLOR, col, row)\r\n elif self._state._board[col][row] == \" X \":\r\n self._draw_color_with_rect(X_COLOR, col, row)\r\n elif self._state._board[col][row] == \" Y \":\r\n self._draw_color_with_rect(Y_COLOR, col, row)\r\n elif self._state._board[col][row] == \" Z \":\r\n self._draw_color_with_rect(Z_COLOR, col, row)", "title": "" }, { "docid": "26c198acbcd1e8668c41ef1118c30805", "score": "0.6880866", "text": "def show(self):\n print(\n \"\\n\"\n + \"\\n\".join(\n [\" 1 2 3 4 5 6 7 8\"]\n + [\n \"ABCDEFGH\"[k]\n + \" \"\n + \" \".join(\n [\n [\".\", \"1\", \"2\", \"X\"][self.board[k, i]]\n for i in range(self.board_size[0])\n ]\n )\n for k in range(self.board_size[1])\n ]\n + [\"\"]\n )\n )", "title": "" }, { "docid": "cdfe103d00ea0c729efe01c1940aea10", "score": "0.68766457", "text": "def display_board(board):\n print(' | |')\n print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])\n print(' | |')", "title": "" }, { "docid": "060e23a074ef4f7e92ae1ed9f8eb0a65", "score": "0.6876126", "text": "def display_board(self, game):\n\n # Define elements for drawing the board\n hbx3 = BOX[\"hb\"] * 3\n top = BOX[\"ul\"] + (hbx3+BOX[\"nt\"]) * 7 + hbx3 + BOX[\"ur\"]\n mid = BOX[\"wt\"] + (hbx3+BOX[\"cc\"]) * 7 + hbx3 + BOX[\"et\"]\n bot = BOX[\"ll\"] + (hbx3+BOX[\"st\"]) * 7 + hbx3 + BOX[\"lr\"]\n # Construct the board\n board_view = top + \"\\n\"\n for row in range(1, 8):\n board_view = board_view + self.get_row(row) + \"\\n\" + mid + \"\\n\"\n board_view = board_view + self.get_row(8) + \"\\n\" + bot\n # Clear the screen and display the player names and board\n os.system('clear')\n print(\"Black: {}\".format(game.black_name))\n print(board_view)\n print(\"White: {}\\n\".format(game.white_name))", "title": "" }, { "docid": "665b52e868344fc194da6042dc0f4e5a", "score": "0.6855913", "text": "def print_grid(self):\n for y in range(3):\n for x in range(3):\n print(self.tic_tac_toe_grid[x][y], end='')\n if x == 2:\n print(\"\")", "title": "" }, { "docid": "8ad1bb23df197339d42888c4b8fabd5c", "score": "0.6852709", "text": "def print_board(x_max=32,y_max=32,print_bottom=True):\n for y in range(y_max):\n print \" \", \n for x in range(x_max):\n print \"%2x|\" % self.get_tile(x,y)[0],\n print \"\\n\",\n if print_bottom:\n print \" \",\n for x in range(x_max):\n print \"%2x|\" % self.get_tile(x,y)[1],\n print \"\\n\",", "title": "" }, { "docid": "cdb8c6ce2cb3a3351cf8c4fdfe0ff6f6", "score": "0.6849329", "text": "def __render_board(self):\n for row in range(self.__DIMENSION):\n for col in range(self.__DIMENSION):\n if (row + col) % 2 != 0:\n p.draw.rect(self.__screen, p.Color('grey'), p.Rect(\n col*self.__SQ_SIZE, row*self.__SQ_SIZE, self.__SQ_SIZE, self.__SQ_SIZE))\n else:\n p.draw.rect(self.__screen, p.Color('white'), p.Rect(\n col*self.__SQ_SIZE, row*self.__SQ_SIZE, self.__SQ_SIZE, self.__SQ_SIZE))", "title": "" }, { "docid": "25c3774f99600c395336db5758b808f5", "score": "0.68458056", "text": "def display(self):\n for r in range(len(self.grid)):\n for c in range(len(self.grid[r])):\n if (r, c) == self.location:\n print('O', end=' ')\n else:\n print(self.grid[r][c], end=' ')\n print()\n print()", "title": "" }, { "docid": "25e990d0bc7bb2d8d6ac5870fd45ccf3", "score": "0.6845362", "text": "def printBoard(board):\r\n\tprint(\" \")\r\n\tprint(\" 1 2 3\")\r\n\tprint(\"---------\")\r\n\tprint(\"1 |\", board[0][0],board[0][1],board[0][2])\r\n\tprint(\"2 |\", board[1][0],board[1][1],board[1][2])\r\n\tprint(\"3 |\", board[2][0],board[2][1],board[2][2])", "title": "" }, { "docid": "b4e78e7558c3f81a249d3ac350bfc26b", "score": "0.6828926", "text": "def display(self):\n mTop = \"\\n\" * self.__y\n mLeft = \" \" * self.__x\n row = (\"#\" * self.__width) + \"\\n\"\n print(mTop + ((mLeft + row) * self.__height), end=\"\")", "title": "" }, { "docid": "2be3fba82e93903bd7b6d5040d66373c", "score": "0.68281925", "text": "def render(self):\n for i in range(3):\n print('\\n|', end=\"\")\n for j in range(3):\n if self.board[i][j] == 1:\n print(' X |', end=\"\")\n elif self.board[i][j] == 0:\n print(' |', end=\"\")\n else:\n print(' O |', end=\"\")\n print('\\n')", "title": "" }, { "docid": "03ca04c3a40261c4bbb9d4a752bece8c", "score": "0.68257415", "text": "def printBoardHidden(self) -> None:\n #temporary for debugging. remove for production\n self.printBoard()\n return\n # Print x heading\n print(f\"|{' ':^3}|\", end='')\n for i in range(len(self.map[0])):\n print(f'{i+1:^3}|', end='')\n # Print rows with y heading\n for i in range(len(self.map)):\n print(f'\\n|{i+1:^3}|', end='')\n for j in range(len(self.map[i])):\n if (self.map[i][j] == 'H' or self.map[i][j] == 'M'):\n print(f'{self.map[i][j]:^3}|', end='')\n else:\n print(f\"{'#':^3}|\", end='')\n return", "title": "" } ]
74553c2a9c9e131ccdb7c35572865c13
Exit when the Python version is too low.
[ { "docid": "0e40d912964040f4de81dfc0a64f14d6", "score": "0.81366885", "text": "def check_python_version():\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))", "title": "" } ]
[ { "docid": "be7a125e9493d1fb55458bf7b214668a", "score": "0.8035704", "text": "def verify_python_version(major_version: int, minor_revision: int)-> None:\n if (major_version, minor_revision) > sys.version_info:\n message: str = f\"This program can only be run on Python {major_version}\" \\\n f\".{minor_version} or newer, you are trying to use Python \" \\\n f\"{sys.version_info[0]}.{sys.version_info[1]} instead.\\nAborting \" \\\n \"execution.\"\n sys.exit(message)", "title": "" }, { "docid": "d4586fa7dac6f01b302b538559a21df8", "score": "0.7949835", "text": "def check_py_version():\n\timport sys\n\tvers = sys.version_info\n\tif sys.version_info<(3,0):\n\t\texit(\"sorry, you need to run this with some version of python3\")", "title": "" }, { "docid": "5cbfa92f8656f04ffd1acc29db867b1c", "score": "0.7778712", "text": "def assert_python_version():\n min_py_version = (3, 8, 0)\n\n py_version = sys.version_info\n if py_version < min_py_version:\n py_version = \".\".join(map(str, py_version[0:3]))\n min_py_version = \".\".join(map(str, min_py_version))\n print(f\"ERROR: You are currently using Python {py_version}\")\n print(f\"Please run this program using Python {min_py_version} or greater\")\n exit(1)", "title": "" }, { "docid": "1ef9be364ed98c20f7d4522c5615584f", "score": "0.75972563", "text": "def validate_python() -> None:\n if sys.version_info[:3] < REQUIRED_PYTHON_VER:\n print(\n \"Home Assistant requires at least Python {}.{}.{}\".format(\n *REQUIRED_PYTHON_VER\n )\n )\n sys.exit(1)", "title": "" }, { "docid": "a195e30ee70ebaf7b8123c26f573c391", "score": "0.7262684", "text": "def _verify_python(self):\n if sys.version_info < (2, 7) or sys.version_info >= (2, 8):\n raise ValidationFailure(\"Egg generation required version 2.7 of python\")", "title": "" }, { "docid": "a50c7e924d236c92876009b2a4ff5a09", "score": "0.72609884", "text": "def checkPythonVersion() :\n\t\n\tif sys.version_info[0] < 2 or (sys.version_info[0] > 2 and sys.version_info[1] < 7) :\n\t\treturn False\n\treturn True", "title": "" }, { "docid": "2a552bdc3a92868321794000b2ba2e32", "score": "0.7079327", "text": "def check_python():\n major = sys.version_info.major\n minor = sys.version_info.minor\n if not (major == 3 and minor >= 7):\n sys.exit('\\nRMG-Py requires Python 3.7 or higher. You are using Python {0}.{1}.\\n\\n'\n 'If you are using Anaconda, you should create a new environment using\\n\\n'\n ' conda env create -f environment.yml\\n\\n'\n 'If you have an existing rmg_env, you can remove it using\\n\\n'\n ' conda remove --name rmg_env --all\\n'.format(major, minor))", "title": "" }, { "docid": "5d12788419e3114b4d386298459e2ec4", "score": "0.6949776", "text": "def is_old_python():\n\n return _sys.version_info.major < 3", "title": "" }, { "docid": "b9cb47fdb7d6891ecda2f277fb490545", "score": "0.6896203", "text": "def test_python_version(self):\n self.assertGreaterEqual(sys.version_info[0], 3)", "title": "" }, { "docid": "27122d6c1e264d829b9d5b640c9dd0d1", "score": "0.6666842", "text": "def check_python_ver(min_py_ver=MIN_PYTHON_VER_DEF):\n display(HTML(\"Checking Python kernel version...\"))\n if sys.version_info < min_py_ver:\n display(\n HTML(\n \"\"\"\n <h3><font color='red'>This notebook requires a different notebook\n (Python) kernel version.</h3></font>\n From the Notebook menu (above), choose <b>Kernel</b> then\n <b>Change Kernel...</b> from the menu.<br>\n Select a <b>Python %s.%s</b> (or later) version kernel and then re-run\n this cell.<br><br>\n \"\"\"\n % min_py_ver\n )\n )\n display(\n HTML(\n \"\"\"\n Please see the <b><a href=\"./TroubleShootingNotebooks.ipynb\">\n TroubleShootingNotebooks</a></b>\n in this folder for more information<br><br><hr>\n \"\"\"\n )\n )\n raise RuntimeError(\"Python %s.%s or later kernel is required.\" % min_py_ver)\n\n display(\n HTML(\n \"Python kernel version %s.%s.%s OK\"\n % (sys.version_info[0], sys.version_info[1], sys.version_info[2])\n )\n )\n\n _check_nteract()", "title": "" }, { "docid": "82e577f1ffbfcd916c99185062645c4a", "score": "0.6656511", "text": "def test_python_version(self):\n version = get_envar(\"PYTHON_VERSION\")\n if not version:\n raise RuntimeError(\"No 'PYTHON_VERSION' envar has been set\")\n\n # remove any pre-release suffix\n version = version.split(\"-\")[0]\n version = tuple([int(vv) for vv in version.split(\".\")])\n assert version[:2] == sys.version_info[:2]", "title": "" }, { "docid": "580ae993f8709f2672f3d4fbb23caf02", "score": "0.64764005", "text": "def new_enough(majver, minver):\n if majver > sys.version_info[0]:\n sentinel = False\n elif majver == sys.version_info[0] and minver > sys.version_info[1]:\n sentinel = False\n else:\n sentinel = True\n return sentinel", "title": "" }, { "docid": "4c2e34b85a4057d39b240609047a4b8d", "score": "0.64026815", "text": "def _check_version(min_version_info, msg=None): # reliably restored by inspect\n pass", "title": "" }, { "docid": "dcf5955e71081b09972b4160f3f8e487", "score": "0.63901204", "text": "def validate_version(python_version):\n if len(python_version) != 2:\n # This is typically validated in the option parser, but check here too in\n # case we get python_version via a different entry point.\n raise UsageError(\"python_version must be <major>.<minor>: %r\" %\n format_version(python_version))\n if (3, 0) <= python_version <= (3, 3):\n # These have odd __build_class__ parameters, store co_code.co_name fields\n # as unicode, and don't yet have the extra qualname parameter to\n # MAKE_FUNCTION. Jumping through these extra hoops is not worth it, given\n # that typing.py isn't introduced until 3.5, anyway.\n raise UsageError(\n \"Python versions 3.0 - 3.3 are not supported. Use 3.4 and higher.\")\n if python_version > (3, 6):\n # We have an explicit per-minor-version mapping in opcodes.py\n raise UsageError(\"Python versions > 3.6 are not yet supported.\")", "title": "" }, { "docid": "66b47a65eb33386e8a4486d465ab127a", "score": "0.62481856", "text": "def CheckToolkitVersion(self, major, minor):", "title": "" }, { "docid": "503ee4bff555837dea5f6d9e5b127d05", "score": "0.62315845", "text": "def test_package_version_python():\n assert tuple(map(int, get_package_version('python').split('.'))) > (3, 5, 0)", "title": "" }, { "docid": "85acdcb524b9a6e5f718bff57f3a0f88", "score": "0.6194659", "text": "def is_python27():\n import sys\n return sys.version_info <= (2, 7)", "title": "" }, { "docid": "25e838ead0ab7d3923119ecdc16b39bd", "score": "0.6117221", "text": "def validate_python_version(config, actual_py_version=None):\n # type: (Config, Optional[str]) -> None\n lambda_version = config.lambda_python_version\n if actual_py_version is None:\n actual_py_version = 'python%s.%s' % sys.version_info[:2]\n if actual_py_version != lambda_version:\n # We're not making this a hard error for now, but we may\n # turn this into a hard fail.\n warnings.warn(\"You are currently running %s, but the closest \"\n \"supported version on AWS Lambda is %s\\n\"\n \"Please use %s, otherwise you may run into \"\n \"deployment issues. \" %\n (actual_py_version, lambda_version, lambda_version),\n stacklevel=2)", "title": "" }, { "docid": "2c98a0ca33c965e9ffc81b8b0fd8aeb8", "score": "0.60797554", "text": "def version():\n sys.exit(__version__)", "title": "" }, { "docid": "30dd8055ca45f43d1fd8bc55012f7fea", "score": "0.60484034", "text": "def check_python_exe_or_die(required) -> List[str]:\n error = []\n if sys.platform == \"win32\":\n possible_exes = ([\"py\", f\"-{required}\"], [\"py3\"], [\"py\"])\n else:\n possible_exes = ([f\"python{required}\"], [\"python3\"], [\"python\"])\n for exe in possible_exes:\n valid, out = check_python_version(exe, required)\n if valid:\n return exe\n elif out:\n error.append(out)\n logging.critical(\n \"Could not find a valid python%s interpreter in path (found %s)\",\n required, \", \".join(sorted(set(error))))\n sys.exit(1)", "title": "" }, { "docid": "31656999acd98dd727fef423469ffc73", "score": "0.60255", "text": "def is_python3(self):\n # type: () -> bool\n return sys.version_info.major > 2", "title": "" }, { "docid": "f489c8de09f0878c8e135a796e292ad2", "score": "0.60024446", "text": "def check_python_version(exe: List[str], required):\n try:\n # python --version outputs to stderr for earlier versions\n _, out, err = runner.BinaryRun(exe + [\"--version\"]).communicate() # pylint: disable=unpacking-non-sequence\n version = out or err\n version = version.decode(\"utf-8\")\n if version.startswith(f\"Python {required}\"):\n return True, None\n else:\n return False, version.rstrip()\n except OSError:\n return False, None", "title": "" }, { "docid": "0f61d1746fee177ed5531c9706902ad0", "score": "0.5987796", "text": "def version_check(required_version):\n current_version = weechat.info_get(\"version_number\", \"\") or 0\n if int(current_version) < required_version:\n text = \"{name} requires WeeChat >= 1.3.0\".format(name=SCRIPT_NAME)\n weechat.prnt(\"\", text)\n\n import sys\n sys.exit(weechat.WEECHAT_RC_ERROR)", "title": "" }, { "docid": "d5d8352e8743b3646f7b1dd2f1e31385", "score": "0.59577376", "text": "def check_version():\n err = \"PaddlePaddle version 1.6 or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\" \\\n\n try:\n fluid.require_version('1.6.0')\n except Exception as e:\n logger.error(err)\n sys.exit(1)", "title": "" }, { "docid": "9269acd0028f48e3ce54d8362b705853", "score": "0.5951916", "text": "def version_check():\n print(\"setuptools version:\", setuptools.__version__)", "title": "" }, { "docid": "82dac8fc5be21d1cb970156e2acf4935", "score": "0.59262145", "text": "def self_checking(self):\n if PYTHON_VERSION in (2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8):\n self.version = PYTHON_VERSION\n else:\n assert PYTHON_VERSION == 2.7\n self.version = 2.7\n\n path = osp.join(srcdir, \"bytecode-%s\" % self.version, parent_function_name() + \".pyc\")\n self.assert_runs_ok(path, arg_type=\"bytecode-file\")", "title": "" }, { "docid": "954a1353aa5505274f11ac84132bb74e", "score": "0.5900853", "text": "def _warn_deprecated_python():\n deprecated_versions = {\n (2, 7): {\"date\": DEPRECATION_DATE_MAP[\"2.x\"]},\n (3, 4): {\"date\": DEPRECATION_DATE_MAP[\"2.x\"]},\n (3, 5): {\"date\": \"2021-11-10\"},\n }\n py_version = (sys.version_info.major, sys.version_info.minor)\n minimum_version = (3, 6)\n\n if py_version in deprecated_versions:\n params = deprecated_versions[py_version]\n warning = (\n \"aws-encryption-sdk will no longer support Python {}.{} \"\n \"starting {}. To continue receiving service updates, \"\n \"bug fixes, and security updates please upgrade to Python {}.{} or \"\n \"later. For more information, see SUPPORT_POLICY.rst: \"\n \"https://github.com/aws/aws-encryption-sdk-python/blob/master/SUPPORT_POLICY.rst\"\n ).format(py_version[0], py_version[1], params[\"date\"], minimum_version[0], minimum_version[1])\n warnings.warn(warning, DeprecationWarning)", "title": "" }, { "docid": "efbae16ec84d1e7c88a06dc5ffb0294e", "score": "0.58912295", "text": "def python_version():\n pybin = os.path.basename(sys.executable)\n debug(\n \"Use {pybin}, version {v[0]}.{v[1]}.{v[2]} on process {pid}@{server}\",\n pybin=pybin,\n v=sys.version_info[0:3],\n pid=os.getpid(),\n server=platform.uname().node,\n )", "title": "" }, { "docid": "635d1045cc3f8905d0af840b059562d8", "score": "0.58544433", "text": "def check_version(log=None, sysmodule=None, target_version=None):\n if log is None:\n log = _log\n\n (comparison, current_version, target_version) = \\\n compare_version(sysmodule, target_version)\n\n if comparison >= 0:\n # Then the current version is at least the minimum version.\n return True\n\n message = (\"WebKit Python scripts do not support your current Python \"\n \"version (%s). The minimum supported version is %s.\\n\"\n \" See the following page to upgrade your Python version:\\n\\n\"\n \" http://trac.webkit.org/wiki/PythonGuidelines\\n\"\n % (current_version, target_version))\n log.warn(message)\n return False", "title": "" }, { "docid": "e5e8f85d118f3503440442b3199e372b", "score": "0.5831753", "text": "def opt_version(self) -> None:\n exit(ExitStatus.EX_OK, f\"{version}\")", "title": "" }, { "docid": "7387b18dbdf1b5dc9f47de7a66043a3e", "score": "0.58301646", "text": "def python_is_supported():\n # Any change to this must be copied to pwb.py\n return (PYTHON_VERSION >= (3, 3, 0) or\n (PY2 and PYTHON_VERSION >= (2, 7, 2)) or\n (PY26 and PYTHON_VERSION >= (2, 6, 5)))", "title": "" }, { "docid": "9a8ea46d71c17bc62f82d3ddddacd686", "score": "0.5816669", "text": "def check_version(switches):\n if switches.has_key('-v'):\n version()\n sys.exit(0)", "title": "" }, { "docid": "d1d178f2074e5a7456129579bd917521", "score": "0.5815192", "text": "def isPython3():\n return sys.version_info[0] == 3", "title": "" }, { "docid": "8dc1fd994e1824b7818a18aa1b24d82a", "score": "0.5805148", "text": "def pyv():\n global _py\n if _py is None:\n try:\n res = check_output([\n 'python', '-c',\n 'import sys; sys.stdout.write(str(sys.version_info.major))'\n ]).strip()\n val = str(bytes(res).decode('utf-8'))\n except:\n val = ''\n if val == '': _py = sys.version_info.major\n else: _py = int(val)\n if _py <= 0: raise SystemError('invalid python version', _py)\n return _py", "title": "" }, { "docid": "feed7088309e7a33823a9df1cd8acf47", "score": "0.57939905", "text": "def CheckOSVersion(self, major, minor):", "title": "" }, { "docid": "e934e2d026310cf8cf099ec1d9030fe0", "score": "0.57933474", "text": "def _CheckVisualStudioVersion(self, line):\n return False", "title": "" }, { "docid": "5c48c0e887c733d0378bbf2a8c37c520", "score": "0.57925713", "text": "def test_raise(self):\n\n manifest_input = self.manifest_tmpl % self.uses_sdk(min='27')\n expected = self.manifest_tmpl % self.uses_sdk(min='28', target='28')\n output = self.raise_min_sdk_version_test(manifest_input, '28', '28', False)\n self.assertEqual(output, expected)", "title": "" }, { "docid": "5659bbaea8c56f813ffe9488f43a5b09", "score": "0.569665", "text": "def check_version(self, env):\n return env.check_program('hg')", "title": "" }, { "docid": "ddbaef0b933ae413e68fa67e61700927", "score": "0.5665958", "text": "def version_happiness():\n if version_info.major == 2:\n return ':-('\n # elif version_info.major == 7:\n # raise Exception('wut?')\n else:\n return ':-)'", "title": "" }, { "docid": "a76a680abb31777487da5cd55b958696", "score": "0.5653877", "text": "def python_version(name):\n from platform import python_compiler\n\n version_str = python_compiler()\n if name not in version_str.lower():\n return 0.0\n return float(version_str.split(\" \", 2)[1].rsplit(\".\", 1)[0])", "title": "" }, { "docid": "f40b6a3c9d71356eef0f97d8bfd00fa8", "score": "0.5646651", "text": "def checkVersion():\n return xmlsecmod.checkVersion()", "title": "" }, { "docid": "d926a3939a40e6284ff0dc000c618273", "score": "0.56431687", "text": "def _check_pypi_version(self):\n try:\n check_version()\n except VersionException as err:\n print(str(err), file=sys.stderr)\n time.sleep(TWO_SECONDS)", "title": "" }, { "docid": "f52a8a4fe957c00b7912770ed1d6e5fd", "score": "0.56350756", "text": "def check_os_and_abort():\n result = try_and_print(\n message='OS support status...',\n function=check_os_support_status,\n cs='GOOD',\n )\n if not result['CS'] and 'Unsupported' in result['Error']:\n print_warning('OS version not supported by this script')\n if not ask('Continue anyway? (NOT RECOMMENDED)'):\n abort()", "title": "" }, { "docid": "d3f5aec5c0a0daefbf47d232a486bc3f", "score": "0.5631134", "text": "def test_raise_min(self):\n\n manifest_input = self.manifest_tmpl % self.uses_sdk(min='27')\n expected = self.manifest_tmpl % self.uses_sdk(min='28', target='28')\n output = self.raise_min_sdk_version_test(manifest_input, '28', '28', False)\n self.assertEqual(output, expected)", "title": "" }, { "docid": "5b152944fae549a53f55d21d92cb5e10", "score": "0.5612155", "text": "def _store_python_version(self, python_version):\n if python_version:\n if isinstance(python_version, str):\n version = utils.version_from_string(python_version)\n else:\n version = python_version\n else:\n version = sys.version_info[:2]\n if len(version) != 2:\n self.error(\n f\"--python_version must be <major>.<minor>: {python_version!r}\")\n # Check that we have a version supported by pytype.\n utils.validate_version(version)\n self.output_options.python_version = version\n\n try:\n self.output_options.python_exe = compiler.get_python_executable(version)\n except compiler.PythonNotFoundError:\n self.error(\"Need a valid python%d.%d executable in $PATH\" % version)", "title": "" }, { "docid": "d0d36137c3fecd6b045575e194ea48b4", "score": "0.5600134", "text": "def get_python_version():\n import platform\n versions = {'2' : '2.7.15',\n '3' : '3.6.5'}\n return versions[platform.python_version_tuple()[0]]", "title": "" }, { "docid": "0c77496db44d774df69b44aff5d21918", "score": "0.55695623", "text": "def check_version(self, env):\n return env.check_program('bzr', version_arg='--version',\n version_regexp='(\\d+)\\.(\\d+)',\n version_required=(2, 1))", "title": "" }, { "docid": "81294f8baa6ae31771afa2e586900cf4", "score": "0.5566428", "text": "def main():\n\n if sys.version_info[0] > 2 and sys.version_info[1] > 5:\n take_input(2)\n else:\n raise Exception(\"Python 3.6 or a more recent version is required.\")", "title": "" }, { "docid": "02f0b64e5966ea9a8a3a256909847b70", "score": "0.5564169", "text": "def get_python_api_ver():\n ver = sys.version_info[0:2]\n if ver == (2,6):\n return (2,5)\n else:\n return ver", "title": "" }, { "docid": "08bb931d37630f98fa53817f00049208", "score": "0.5543243", "text": "def get_python_version():\n return sys.version", "title": "" }, { "docid": "37624f7a50433e60478d9d72d4346262", "score": "0.55149937", "text": "def test_no_raise_min(self):\n\n manifest_input = self.manifest_tmpl % self.uses_sdk(min='28')\n expected = self.manifest_tmpl % self.uses_sdk(min='28', target='27')\n output = self.raise_min_sdk_version_test(manifest_input, '27', '27', False)\n self.assertEqual(output, expected)", "title": "" }, { "docid": "ec4b9b91a1032f417c1211276bbfaa7c", "score": "0.5503418", "text": "def upgrade_python(c):", "title": "" }, { "docid": "0f52abe48fb822d5a77bf03ba31de13b", "score": "0.54934067", "text": "def check_version():\n if \"3\" not in sys.version:\n print(\"[Info] : You are running python version \" + sys.version\n + \". We recommend python 3.7.2 for expected performance.\")\n else:\n print(\"[Info] : You are running python version \" + sys.version + \".\")\n\n if \"3\" not in cv2.__version__:\n print(\"[Info] : You are running cv2 version \" + cv2.__version__\n + \". We recommend cv2 3.4.2 for expected performance.\")\n else:\n print(\"[Info] : You are running cv2 version \" + cv2.__version__ + \".\")", "title": "" }, { "docid": "63351d2c644bd328030be3b956d79cef", "score": "0.5482215", "text": "def test_raise_codename(self):\n\n manifest_input = self.manifest_tmpl % self.uses_sdk(min='28')\n expected = self.manifest_tmpl % self.uses_sdk(min='P', target='P')\n output = self.raise_min_sdk_version_test(manifest_input, 'P', 'P', False)\n self.assertEqual(output, expected)", "title": "" }, { "docid": "8dd0d05e84b77b2b08d2d7a71dc51fca", "score": "0.5479583", "text": "def python_major_version(request):\n # https://docs.python.org/2/library/sys.html#sys.version_info\n return sys.version_info.major", "title": "" }, { "docid": "de84276b1a65d780eb94821a5f9b32e5", "score": "0.5470223", "text": "def check_python_plug_section(plugin_info: PluginInfo) -> bool:\n version = plugin_info.python_version\n\n # if the plugin doesn't restric anything, assume it is ok and try to load it.\n if not version:\n return True\n\n sys_version = sys.version_info[:3]\n if version < (3, 0, 0):\n log.error('Plugin %s is made for python 2 only and Errbot is not compatible with Python 2 anymore.',\n plugin_info.name)\n log.error('Please contact the plugin developer or try to contribute to port the plugin.')\n return False\n\n if version >= sys_version:\n log.error('Plugin %s requires python >= %s and this Errbot instance runs %s.',\n plugin_info.name, '.'.join(str(v) for v in version), '.'.join(str(v) for v in sys_version))\n log.error('Upgrade your python interpreter if you want to use this plugin.')\n return False\n\n return True", "title": "" }, { "docid": "3da711618a22de334f0fcf3243ba0069", "score": "0.5446665", "text": "def test_all_package_versions_include_python_version():\n assert 'python' in all_package_versions(include_python_version=True)\n assert 'python' not in all_package_versions(include_python_version=False)", "title": "" }, { "docid": "6b24c4a807fb11c534896b67e9d54cc4", "score": "0.54226345", "text": "def test_python_version(image):\n ctx = Context()\n container_name = get_container_name(\"py-version\", image)\n\n py_version = \"\"\n for tag_split in image.split(\"-\"):\n if tag_split.startswith(\"py\"):\n if len(tag_split) > 3:\n py_version = f\"Python {tag_split[2]}.{tag_split[3]}\"\n else:\n py_version = f\"Python {tag_split[2]}\"\n start_container(container_name, image, ctx)\n output = run_cmd_on_container(container_name, ctx, \"python --version\")\n\n # Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to\n # stream to stderr (in some cases).\n container_py_version = output.stdout + output.stderr\n\n assert py_version in container_py_version, f\"Cannot find {py_version} in {container_py_version}\"", "title": "" }, { "docid": "e70e0623c32f7c12e354eeb1d4a687a0", "score": "0.5411615", "text": "def check_version(self, env):\n# \n# distributions = [\n# ['debian', 'apt-get'],\n# ['ubuntu', 'apt-get'],\n# ['fedora', 'yum'],\n# ['redhat', 'yum'],\n# ['centos', 'yum'],\n# ['suse', 'yast'],\n# ['darwin', 'port'],\n# ]\n#\n# import platform \n \n# (distribution, version, version_id) = platform.linux_distribution()\n# if not distribution:\n# distribution = 'darwin' # osName\n# else:\n# distribution = distribution.lower()\n \n program = self.source_systemtool()\n \n if not program == '':\n return env.check_program(program)\n# for dist, program in distributions:\n# if distribution.startswith(dist):\n# return env.check_program(program)\n return False", "title": "" }, { "docid": "84388ce3606c656167e8cfd2faf0beec", "score": "0.5408296", "text": "def check_mp_ver(min_msticpy_ver=MSTICPY_REQ_VERSION):\n display(HTML(\"Checking msticpy version...\"))\n try:\n import msticpy\n wrong_ver_err = \"msticpy %s.%s.%s or later is needed.\" % min_msticpy_ver\n mp_version = tuple([int(v) for v in msticpy.__version__.split(\".\")])\n if mp_version < min_msticpy_ver:\n raise ImportError(wrong_ver_err)\n\n except ImportError:\n display(HTML(MISSING_PKG_ERR.format(package=\"msticpy\")))\n resp = input(\"Install? (y/n)\") # nosec\n if resp.casefold().startswith(\"y\"):\n raise ImportError(\"Install msticpy\")\n\n display(\n HTML(\n \"\"\"\n <h3><font color='red'>The notebook cannot be run without\n the correct version of '<b>%s</b>' (%s.%s.%s or later)\n </font></h3>\n Please see the <b><a href=\"./TroubleShootingNotebooks.ipynb\">\n TroubleShootingNotebooks</a></b>\n in this folder for more information<br><br><hr>\n \"\"\"\n % (\"msticpy\", *min_msticpy_ver)\n )\n )\n raise RuntimeError(wrong_ver_err)\n\n display(HTML(\"msticpy version %s.%s.%s OK\" % mp_version))", "title": "" }, { "docid": "6219f304311390cda694e447679d7cad", "score": "0.5399641", "text": "def set_version_error_level(level:int=1):\n global _VER_ERROR_LVL\n _VER_ERROR_LVL = level", "title": "" }, { "docid": "e4410f9aa395c0ac3c66030fb844774c", "score": "0.5383972", "text": "def python_version(self):\n return sys.version.replace(\"\\n\", \"\")", "title": "" }, { "docid": "353639496ddaeb0b9e73c39f4d0b9968", "score": "0.53585654", "text": "def priority(self):\n try:\n __import__('argon2.low_level')\n except ImportError: # pragma: no cover\n raise RuntimeError(\"argon2_cffi package required\")\n try:\n __import__('Crypto.Cipher.AES')\n except ImportError: # pragma: no cover\n raise RuntimeError(\"PyCryptodome package required\")\n if not json: # pragma: no cover\n raise RuntimeError(\"JSON implementation such as simplejson \"\n \"required.\")\n return 2.5", "title": "" }, { "docid": "10835daaac8c1d1a0934d90a86b9537e", "score": "0.5357149", "text": "def test_no_raise_codename(self):\n\n manifest_input = self.manifest_tmpl % self.uses_sdk(min='P')\n expected = self.manifest_tmpl % self.uses_sdk(min='P', target='28')\n output = self.raise_min_sdk_version_test(manifest_input, '28', '28', False)\n self.assertEqual(output, expected)", "title": "" }, { "docid": "5da127c3731d0d55336fdce7446bbcee", "score": "0.53567344", "text": "def opt_version(self):\n self._sys_module.stdout.write(__version__.encode('utf-8') + b'\\n')\n raise SystemExit(0)", "title": "" }, { "docid": "22e6457ab8dd6ed7d91ceee8aae9ba4e", "score": "0.534527", "text": "def check_args():\r\n if len(sys.argv) > 1:\r\n print(\"\\n\\n\"\r\n \"No arguments needed for this version. Simply run and enjoy.\"\r\n \"\\n\\n\")\r\n sys.exit()", "title": "" }, { "docid": "c7d83476ab0577f3f18b4a61b0e91345", "score": "0.5340226", "text": "def test_version(capsys):\n with pytest.raises(SystemExit) as exinfo:\n main(['--version'])\n capture = capsys.readouterr()\n assert capture.out == f\"{__version__}\\n\"\n assert capture.err == \"\"\n assert exinfo.value.code == 0", "title": "" }, { "docid": "e0d9ea3da3e5aa43c761495dad814ade", "score": "0.53352773", "text": "def is_py3() -> bool:\n return sys.version_info[0] == 3", "title": "" }, { "docid": "106a1f19b1d6fa206a945be3e9cab37b", "score": "0.53208786", "text": "def _check_valid_version():\n\n # Locate the full path to npm\n npm_path = salt.utils.path.which(\"npm\")\n\n # pylint: disable=no-member\n res = salt.modules.cmdmod.run(\n \"{npm} --version\".format(npm=npm_path), output_loglevel=\"quiet\"\n )\n npm_version = Version(res)\n valid_version = Version(\"1.2\")\n # pylint: enable=no-member\n if npm_version < valid_version:\n raise CommandExecutionError(\n \"'npm' is not recent enough({} < {}). Please Upgrade.\".format(\n npm_version, valid_version\n )\n )", "title": "" }, { "docid": "c1bdfc4aed3b90d1adbf9be4e9024641", "score": "0.5320596", "text": "def isPython2():\n return sys.version_info[0] == 2", "title": "" }, { "docid": "f3f1016011064759395b7f3b9f740cdb", "score": "0.5309727", "text": "def require_minimum_bitcoind_version(min_version):\n networkinfo = bitcoin_cli_json(\"getnetworkinfo\")\n\n if int(networkinfo[\"version\"]) < min_version:\n print(\"ERROR: Your bitcoind version is too old. You have {}, I need {} or newer. Exiting...\".format(networkinfo[\"version\"], min_version))\n sys.exit()", "title": "" }, { "docid": "7efe9deb8e283d0c83ad74ed2211ca04", "score": "0.53046757", "text": "def test_get_version_user_abort(versioner, mock_input_no):\n\n with pytest.raises(SystemExit) as my_exit:\n\n versioner.get_version()\n assert my_exit.type == SystemExit\n assert my_exit.exception.args[0] == \"Version number not accepted. User abort\"", "title": "" }, { "docid": "d07ab4041b9f659ba78122f0c5f95fc5", "score": "0.53033596", "text": "def check_downgrade(self, engine):", "title": "" }, { "docid": "5cb2b78fbde7caa399416418308275ac", "score": "0.5293463", "text": "def confirm_psycopg2_version():\r\n\r\n ver_str = psycopg2.__version__\r\n ver = [int(i) for i in ver_str.partition(' ')[0].split('.')]\r\n min_ver = [2, 4, 5]\r\n min_ver_str = '.'.join(str(v) for v in min_ver)\r\n\r\n if ver < min_ver:\r\n msg = ('The installed psycopg2 version ({0}) is older than the'\r\n ' minimally required version ({1}). Its path is {2}.'\r\n ).format(ver_str, min_ver_str, psycopg2.__path__)\r\n exit(msg) # Return code is 1.\r", "title": "" }, { "docid": "31dd6802148f1471259cf52da9681680", "score": "0.5288207", "text": "def mustBeSmall(n):\n print \"Unable to handle inputs >=\",n,\"with these settings.\"\n sys.exit(0)", "title": "" }, { "docid": "2344b938940747c486d5ffb491b43151", "score": "0.5280438", "text": "def _env_threadless_compliant() -> bool:\n return not IS_WINDOWS and sys.version_info >= (3, 8)", "title": "" }, { "docid": "be36041d2914c4f8b684e540dba2375f", "score": "0.5274581", "text": "def test_python_version(image):\n ctx = Context()\n container_name = f\"py-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}\"\n\n py_version = \"\"\n for tag_split in image.split('-'):\n if tag_split.startswith('py'):\n if len(tag_split) > 3:\n py_version = f\"Python {tag_split[2]}.{tag_split[3]}\"\n else:\n py_version = f\"Python {tag_split[2]}\"\n _start_container(container_name, image, ctx)\n output = _run_cmd_on_container(container_name, ctx, \"python --version\")\n\n container_py_version = output.stdout\n # Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to\n # stream to stderr, hence the pytorch condition.\n if \"Python 2\" in py_version or \"pytorch\" in image:\n container_py_version = output.stderr\n\n assert py_version in container_py_version, f\"Cannot find {py_version} in {container_py_version}\"", "title": "" }, { "docid": "e71fa047a0ee025a0080907f979bac8a", "score": "0.5273992", "text": "def checkVersionExact():\n return xmlsecmod.checkVersionExact()", "title": "" }, { "docid": "5aa69dc6a08f6cd58003dac6697dac85", "score": "0.5271911", "text": "def check_distribution():\n if settings.OPENSTACK_RELEASE not in (settings.OPENSTACK_RELEASE_CENTOS,\n settings.OPENSTACK_RELEASE_UBUNTU):\n error_msg = (\"{0} distribution is not supported!\".format(\n settings.OPENSTACK_RELEASE))\n logger.error(error_msg)\n raise Exception(error_msg)", "title": "" }, { "docid": "2a9b8f9b492d99b51efb337eb1c57559", "score": "0.5249613", "text": "def check_version(cls):\r\n return float(1.0)", "title": "" }, { "docid": "51a7a74082944266e6abacc3c0e62da4", "score": "0.52464616", "text": "def IsCompatible(ver):", "title": "" }, { "docid": "f209a60b54f7f7b587593f37e9261d73", "score": "0.52423763", "text": "def opt_version(self):\n sys.stdout.write(__version__.encode('utf-8') + b'\\n')\n sys.exit(0)", "title": "" }, { "docid": "a1f752e7cbca05323b32c6e8d02e19f9", "score": "0.5239127", "text": "def check_relenv(self, version):\n relenv = False\n if packaging.version.parse(version) >= packaging.version.parse(\"3006.0\"):\n relenv = True\n return relenv", "title": "" }, { "docid": "77f9b54fdab1210510dcbcb75e2c8755", "score": "0.52181643", "text": "def _detect_runtime_attributes(self):\n self._python_version = \".\".join([str(i) for i in sys.version_info][0:3])", "title": "" }, { "docid": "fae560c7eb539123d622a457816df358", "score": "0.52111626", "text": "def compare_setuptools_version(required: Tuple[int, ...]) -> bool:\n current = tuple(map(int, setuptools.__version__.split('.')[:2]))\n return current >= required", "title": "" }, { "docid": "8f2f789a65b23b0f67b9934615163acf", "score": "0.5173488", "text": "def __init__(self):\n # Initialize key variables\n self.username = getpass.getuser()\n valid = True\n major = 3\n minor = 5\n major_installed = sys.version_info[0]\n minor_installed = sys.version_info[1]\n\n # Exit if python version is too low\n if major_installed < major:\n valid = False\n elif major_installed == major and minor_installed < minor:\n valid = False\n if valid is False:\n log_message = (\n 'Required python version must be >= {}.{}. '\n 'Python version {}.{} installed'\n ''.format(major, minor, major_installed, minor_installed))\n log.log2die_safe(1027, log_message)", "title": "" }, { "docid": "6c8a95327f4e0fe4e460bb86d9ff0519", "score": "0.5161312", "text": "def check_version(self, env):\n return env.check_program('git')", "title": "" }, { "docid": "c6bc5e8c7407f5a0df66c349b5611f6f", "score": "0.51526415", "text": "def exception():\n print(\"Enter a valid language. For help, type '<app> -i'\")\n sys.exit(0)", "title": "" }, { "docid": "ff3c6ade9e0f550596790c00e18b56ce", "score": "0.51508546", "text": "def check_versions(min_versions):\n from distutils.version import StrictVersion\n\n try:\n import scipy\n spversion = scipy.__version__\n except ImportError:\n raise ImportError(\"LIMIX requires scipy\")\n\n try:\n import numpy\n npversion = numpy.__version__\n except ImportError:\n raise ImportError(\"LIMIX requires numpy\")\n \n try:\n import pandas\n pandasversion = pandas.__version__\n except ImportError:\n raise ImportError(\"LIMIX requires pandas\")\n try:\n import SCons\n sconsversion = SCons.__version__\n except ImportError:\n raise ImportError(\"LIMIX requires scons\")\n\n #match version numbers\n try:\n assert StrictVersion(strip_rc(npversion)) >= min_versions['numpy']\n except AssertionError:\n raise ImportError(\"Numpy version is %s. Requires >= %s\" %\n (npversion, min_versions['numpy']))\n try:\n assert StrictVersion(strip_rc(spversion)) >= min_versions['scipy']\n except AssertionError:\n raise ImportError(\"Scipy version is %s. Requires >= %s\" %\n (spversion, min_versions['scipy']))\n try:\n assert StrictVersion(strip_rc(pandasversion)) >= min_versions['pandas']\n except AssertionError:\n raise ImportError(\"pandas version is %s. Requires >= %s\" %\n (pandasversion, min_versions['pandas']))\n try:\n assert StrictVersion(strip_rc(sconsversion)) >= min_versions['scons']\n except AssertionError:\n raise ImportError(\"scons version is %s. Requires >= %s\" %\n (sconsversion, min_versions['scons']))", "title": "" }, { "docid": "df696fad76124c4d8757aa9cebcbbd61", "score": "0.51277286", "text": "def needs_backport(version_info: List[Any]) -> bool:\n major, minor, *rest = version_info\n\n if major < 4 and minor < 5:\n return True\n\n return False", "title": "" }, { "docid": "98673f389cd416ca0b1f5cb46dd5b317", "score": "0.51204884", "text": "def get_python_major_version(python_bin_path):\n return run_shell(\n [python_bin_path, '-c', 'import sys; print(sys.version[0])'])", "title": "" }, { "docid": "6bf93bd038ab431c9bb807b6964601ce", "score": "0.5098482", "text": "def checkEnvironment():\n\n if sys.version_info[0:2] < (2, 4):\n fatal(\"This script must be run with Python 2.4 or later\")\n\n if platform.system() != 'Darwin':\n fatal(\"This script should be run on a Mac OS X 10.4 (or later) system\")\n\n if int(platform.release().split('.')[0]) < 8:\n fatal(\"This script should be run on a Mac OS X 10.4 (or later) system\")\n\n if not os.path.exists(SDKPATH):\n fatal(\"Please install the latest version of Xcode and the %s SDK\"%(\n os.path.basename(SDKPATH[:-4])))\n\n # Because we only support dynamic load of only one major/minor version of\n # Tcl/Tk, ensure:\n # 1. there are no user-installed frameworks of Tcl/Tk with version\n # higher than the Apple-supplied system version in\n # SDKROOT/System/Library/Frameworks\n # 2. there is a user-installed framework (usually ActiveTcl) in (or linked\n # in) SDKROOT/Library/Frameworks with the same version as the system\n # version. This allows users to choose to install a newer patch level.\n\n frameworks = {}\n for framework in ['Tcl', 'Tk']:\n fwpth = 'Library/Frameworks/%s.framework/Versions/Current' % framework\n sysfw = os.path.join(SDKPATH, 'System', fwpth)\n libfw = os.path.join(SDKPATH, fwpth)\n usrfw = os.path.join(os.getenv('HOME'), fwpth)\n frameworks[framework] = os.readlink(sysfw)\n if not os.path.exists(libfw):\n fatal(\"Please install a link to a current %s %s as %s so \"\n \"the user can override the system framework.\"\n % (framework, frameworks[framework], libfw))\n if os.readlink(libfw) != os.readlink(sysfw):\n fatal(\"Version of %s must match %s\" % (libfw, sysfw) )\n if os.path.exists(usrfw):\n fatal(\"Please rename %s to avoid possible dynamic load issues.\"\n % usrfw)\n\n if frameworks['Tcl'] != frameworks['Tk']:\n fatal(\"The Tcl and Tk frameworks are not the same version.\")\n\n # add files to check after build\n EXPECTED_SHARED_LIBS['_tkinter.so'] = [\n \"/Library/Frameworks/Tcl.framework/Versions/%s/Tcl\"\n % frameworks['Tcl'],\n \"/Library/Frameworks/Tk.framework/Versions/%s/Tk\"\n % frameworks['Tk'],\n ]\n\n # Remove inherited environment variables which might influence build\n environ_var_prefixes = ['CPATH', 'C_INCLUDE_', 'DYLD_', 'LANG', 'LC_',\n 'LD_', 'LIBRARY_', 'PATH', 'PYTHON']\n for ev in list(os.environ):\n for prefix in environ_var_prefixes:\n if ev.startswith(prefix) :\n print(\"INFO: deleting environment variable %s=%s\" % (\n ev, os.environ[ev]))\n del os.environ[ev]\n\n base_path = '/bin:/sbin:/usr/bin:/usr/sbin'\n if 'SDK_TOOLS_BIN' in os.environ:\n base_path = os.environ['SDK_TOOLS_BIN'] + ':' + base_path\n # Xcode 2.5 on OS X 10.4 does not include SetFile in its usr/bin;\n # add its fixed location here if it exists\n OLD_DEVELOPER_TOOLS = '/Developer/Tools'\n if os.path.isdir(OLD_DEVELOPER_TOOLS):\n base_path = base_path + ':' + OLD_DEVELOPER_TOOLS\n os.environ['PATH'] = base_path\n print(\"Setting default PATH: %s\"%(os.environ['PATH']))\n # Ensure ws have access to hg and to sphinx-build.\n # You may have to create links in /usr/bin for them.\n runCommand('hg --version')\n runCommand('sphinx-build --version')", "title": "" }, { "docid": "d0e09864d3a7adcd025efbfd90f22b19", "score": "0.5098038", "text": "def _exceptionwarning(ui):\n\n # For compatibility checking, we discard the portion of the hg\n # version after the + on the assumption that if a \"normal\n # user\" is running a build with a + in it the packager\n # probably built from fairly close to a tag and anyone with a\n # 'make local' copy of hg (where the version number can be out\n # of date) will be clueful enough to notice the implausible\n # version number and try updating.\n ct = util.versiontuple(n=2)\n worst = None, ct, b'', b''\n if ui.config(b'ui', b'supportcontact') is None:\n for name, mod in extensions.extensions():\n # 'testedwith' should be bytes, but not all extensions are ported\n # to py3 and we don't want UnicodeException because of that.\n testedwith = stringutil.forcebytestr(\n getattr(mod, 'testedwith', b'')\n )\n version = extensions.moduleversion(mod)\n report = getattr(mod, 'buglink', _(b'the extension author.'))\n if not testedwith.strip():\n # We found an untested extension. It's likely the culprit.\n worst = name, b'unknown', report, version\n break\n\n # Never blame on extensions bundled with Mercurial.\n if extensions.ismoduleinternal(mod):\n continue\n\n tested = [util.versiontuple(t, 2) for t in testedwith.split()]\n if ct in tested:\n continue\n\n lower = [t for t in tested if t < ct]\n nearest = max(lower or tested)\n if worst[0] is None or nearest < worst[1]:\n worst = name, nearest, report, version\n if worst[0] is not None:\n name, testedwith, report, version = worst\n if not isinstance(testedwith, (bytes, str)):\n testedwith = b'.'.join(\n [stringutil.forcebytestr(c) for c in testedwith]\n )\n extver = version or _(b\"(version N/A)\")\n warning = _(\n b'** Unknown exception encountered with '\n b'possibly-broken third-party extension \"%s\" %s\\n'\n b'** which supports versions %s of Mercurial.\\n'\n b'** Please disable \"%s\" and try your action again.\\n'\n b'** If that fixes the bug please report it to %s\\n'\n ) % (name, extver, testedwith, name, stringutil.forcebytestr(report))\n else:\n bugtracker = ui.config(b'ui', b'supportcontact')\n if bugtracker is None:\n bugtracker = _(b\"https://mercurial-scm.org/wiki/BugTracker\")\n warning = (\n _(\n b\"** unknown exception encountered, \"\n b\"please report by visiting\\n** \"\n )\n + bugtracker\n + b'\\n'\n )\n sysversion = pycompat.sysbytes(sys.version).replace(b'\\n', b'')\n\n def ext_with_ver(x):\n ext = x[0]\n ver = extensions.moduleversion(x[1])\n if ver:\n ext += b' ' + ver\n return ext\n\n warning += (\n (_(b\"** Python %s\\n\") % sysversion)\n + (_(b\"** Mercurial Distributed SCM (version %s)\\n\") % util.version())\n + (\n _(b\"** Extensions loaded: %s\\n\")\n % b\", \".join(\n [ext_with_ver(x) for x in sorted(extensions.extensions())]\n )\n )\n )\n return warning", "title": "" }, { "docid": "20c4c876af34dec54fc2b6c3320c1e7f", "score": "0.50936615", "text": "def test_entrypoints_python_syntax_errors():\n pass # NOQA", "title": "" }, { "docid": "cbe67619552253b710dc11c2dce73dc0", "score": "0.50904626", "text": "def show_version():\n\n\tprint \" %s version %s\" % (APPNAME, VERSION)\n\tsys.exit(0)", "title": "" }, { "docid": "7a8094b949e974dd6b87be9fe0e55649", "score": "0.5082099", "text": "def minimum_cross_python_versions(package_name, request_min=None, refresh=False):\n if request_min is not None:\n request_min = Version(request_min)\n\n new_table, hacked_pyver_to_pkgvers = build_package_table(package_name, refresh)\n\n import rich\n rich.print(new_table.to_string())\n rich.print(new_table['min_pyver'].unique())\n rich.print(new_table['max_pyver'].unique())\n\n summarize_package_availability(package_name)\n\n chosen_minmax_for = {}\n chosen_minimum_for = {}\n\n # groups = dict(list(new_table.groupby('min_pyver')))\n _grouped = sorted(new_table.groupby('min_pyver'), key=lambda t: Version(t[0]))\n\n for min_pyver, subdf in _grouped:\n # print('--- min_pyver = {!r} --- '.format(min_pyver))\n\n if 'version' in subdf.columns:\n version_to_support = dict(list(subdf.groupby('version')))\n else:\n version_to_support = dict(list(subdf.groupby('pkg_version')))\n\n cand_to_score = {}\n try:\n version_to_support = ub.sorted_keys(version_to_support, key=Version)\n except Exception:\n maybe_bad_keys = list(version_to_support.keys())\n print('version_to_support = {!r}'.format(maybe_bad_keys))\n maybe_ok_keys = [k for k in maybe_bad_keys if '.dev0' not in k]\n version_to_support = ub.dict_subset(version_to_support, maybe_ok_keys)\n\n if 'os' in subdf.columns and 'arch' in subdf.columns:\n combo_values = {\n ('linux', 'x86_64'): 101,\n ('macosx', 'x86_64'): 5,\n ('win', 'x86_64'): 11,\n }\n for cand, support in version_to_support.items():\n has_combos = support.value_counts(['os', 'arch']).index.tolist()\n total_have = sum(combo_values.get(k, 0) for k in has_combos)\n score = total_have\n cand_to_score[cand] = score\n\n cand_to_score = ub.sorted_vals(cand_to_score)\n cand_to_score = ub.sorted_keys(cand_to_score, key=Version)\n\n # Filter to only the versions we requested, but if\n # none exist, return something\n if request_min is not None:\n valid_cand = [cand for cand in cand_to_score if Version(cand) >= request_min]\n else:\n valid_cand = [cand for cand in cand_to_score]\n if len(valid_cand) == 0:\n valid_cand = list(cand_to_score)\n cand_to_score = {c: cand_to_score[c] for c in valid_cand}\n\n # This is a proxy metric, but a pretty good one in 2021\n if len(cand_to_score) == 0:\n ...\n # print('no cand for')\n # print(f'min_pyver={min_pyver}')\n else:\n max_score = max(cand_to_score.values())\n min_cand = min(cand_to_score.keys())\n\n best_cand = min([\n cand for cand, score in cand_to_score.items()\n if score == max_score\n ], key=Version)\n max_cand = max([\n cand for cand, score in cand_to_score.items()\n ], key=Version)\n # print('best_cand = {!r}'.format(best_cand))\n # print('max_cand = {!r}'.format(max_cand))\n chosen_minmax_for[min_pyver] = {\n 'min': min_cand,\n 'best': best_cand,\n 'max': max_cand\n }\n\n # For each Python version find the minimum and maximum Package version it\n # can handle\n # TODO: implement this\n python_versions = PythonVersions()\n for pyver in python_versions.python_vstrings:\n ...\n\n # TODO better logic:\n # FOR EACH PYTHON VERSION\n # find the minimum version that will work with that Python version.\n rich.print('chosen_minmax_for = {}'.format(ub.repr2(chosen_minmax_for, nl=1)))\n\n chosen_minimum_for = {k: t['best'] for k, t in chosen_minmax_for.items()}\n\n # HACK because our other logic is wrong too\n if 1:\n for pyver, pkgvers in hacked_pyver_to_pkgvers.items():\n if pyver not in chosen_minimum_for:\n chosen_minimum_for[pyver] = min(pkgvers, key=Version)\n\n chosen_python_versions = sorted(chosen_minimum_for, key=Version)\n lines = []\n for cur_pyver, next_pyver in ub.iter_window(chosen_python_versions, 2):\n pkg_ver = chosen_minimum_for[cur_pyver]\n if not pkg_ver.startswith('stdlib'):\n line = f\"{package_name}>={pkg_ver:<8} ; python_version < {next_pyver!r:<6} and python_version >= {cur_pyver!r:<6} # Python {cur_pyver}\"\n lines.append(line)\n else:\n line = f\"# {package_name}>={pkg_ver:<8} is in the stdlib for python_version < '{next_pyver}' and python_version >= '{cur_pyver}' # Python {cur_pyver}\"\n lines.append(line)\n # last\n # https://peps.python.org/pep-0508/\n if len(chosen_python_versions):\n cur_pyver = chosen_python_versions[-1]\n pkg_ver = chosen_minimum_for[cur_pyver]\n if not pkg_ver.startswith('stdlib'):\n # line = f\"{package_name}>={pkg_ver:<8} ; python_version >= {cur_pyver!r:<6} # Python {cur_pyver}+\"\n next_pyver = '4.0'\n line = f\"{package_name}>={pkg_ver:<8} ; python_version < '{next_pyver}' and python_version >= {cur_pyver!r:<6} # Python {cur_pyver}+\"\n lines.append(line)\n else:\n line = f\"# {package_name}>={pkg_ver:<8} is in the stdlib for python_version < '{next_pyver}' and python_version >= '{cur_pyver}' # Python {cur_pyver}\"\n lines.append(line)\n text = '\\n'.join(lines[::-1])\n rich.print(text)", "title": "" }, { "docid": "e5c1d662e4f30bc9c7065e56b041f8d5", "score": "0.5073007", "text": "def _check_dist_requires_python(\n dist: BaseDistribution,\n version_info: Tuple[int, int, int],\n ignore_requires_python: bool = False,\n) -> None:\n # This idiosyncratically converts the SpecifierSet to str and let\n # check_requires_python then parse it again into SpecifierSet. But this\n # is the legacy resolver so I'm just not going to bother refactoring.\n try:\n requires_python = str(dist.requires_python)\n except FileNotFoundError as e:\n raise NoneMetadataError(dist, str(e))\n try:\n is_compatible = check_requires_python(\n requires_python,\n version_info=version_info,\n )\n except specifiers.InvalidSpecifier as exc:\n logger.warning(\n \"Package %r has an invalid Requires-Python: %s\", dist.raw_name, exc\n )\n return\n\n if is_compatible:\n return\n\n version = \".\".join(map(str, version_info))\n if ignore_requires_python:\n logger.debug(\n \"Ignoring failed Requires-Python check for package %r: %s not in %r\",\n dist.raw_name,\n version,\n requires_python,\n )\n return\n\n raise UnsupportedPythonVersion(\n \"Package {!r} requires a different Python: {} not in {!r}\".format(\n dist.raw_name, version, requires_python\n )\n )", "title": "" }, { "docid": "6b67b0946ff82efc5fae0b38173d53e0", "score": "0.5068802", "text": "def get_python_version():\n python_version = sys.version.split(\" \")[0]\n return \"Python version: {}.\".format(python_version)", "title": "" }, { "docid": "e113b53b934dff980c8434e81f6cc0a4", "score": "0.506378", "text": "def GetToolkitMinorVersion(self):", "title": "" }, { "docid": "475a29bfdbeb97dc5806fd10b8f5f889", "score": "0.5060644", "text": "def _determine_version(self, version_path):\n branch, number = cpath.split(version_path)\n\n if number == 'CHECKEDOUT':\n return sys.maxint\n\n return int(number)", "title": "" } ]
1de74d3420e875c832d2504b4f8f75b4
send request to gateway and see what happens
[ { "docid": "819dd0c5c231a4caec4b1d38d80b0574", "score": "0.5543378", "text": "def _send_request(gateway_port, protocol, request_size=1):\n c = Client(host='localhost', port=gateway_port, protocol=protocol)\n res = c.post('/foo', inputs=DocumentArray.empty(2), request_size=request_size)\n assert len(res) == 2\n return res", "title": "" } ]
[ { "docid": "d7599c2466337c11497f121f43b7029e", "score": "0.68202937", "text": "def send(self, request : str):\n pass", "title": "" }, { "docid": "2102a2b11d82925d33626cb88db9f718", "score": "0.66711557", "text": "def _send_in_request(self):\n try:\n req_params = urllib.urlencode(self._params)\n except Exception as ex:\n raise ProxyError('Error signing request string') \n \n try:\n self.logger.debug('Send api request to: %s' % self._api_url)\n self.logger.debug('Request params: %s' % req_params)\n self.logger.debug('Request timeout: %s' % self._timeout)\n if len(self._params) > 0:\n f = urllib2.urlopen(self._api_url, req_params, self._timeout)\n response = f.read()\n self.logger.debug('Response length: %s' % len(response))\n f.close() \n return response\n else:\n return \"{'command':'ping', 'message':'ok'}\" \n except (urllib2.URLError) as ex:\n self._error = json.loads(ex.fp.readline()).values()\n raise ProxyResponseError()\n except (IOError) as ex:\n raise ProxyError(ex)", "title": "" }, { "docid": "9c14c0174afd5737be6d03efb3d6bafd", "score": "0.6612925", "text": "def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)", "title": "" }, { "docid": "b5fd862ba0a3f6eb37cc201e3edae392", "score": "0.65615106", "text": "def _send_request(self) -> None:\n logger.debug(f'Sent: {self.request.hex()} to {self.transport.get_extra_info(\"peername\")}')\n self.transport.sendto(self.request)\n asyncio.get_event_loop().call_later(self._retry_timeout, self.retry_mechanism)", "title": "" }, { "docid": "64f58aa8a11b4f7aa9b2f0ad816a9337", "score": "0.6471462", "text": "def send(self):\n url = \"{}:{}\".format(self.url, self.port)\n headers = dict(self.request.get_headers())\n body = self.request.get_body()\n self.response = requests.post(url, data=body, headers=headers)", "title": "" }, { "docid": "73e0852acddef735497d7b22cf48b32f", "score": "0.64319456", "text": "def send(self, request: Request, **requests_kwargs) -> Response:", "title": "" }, { "docid": "a7597244b8bb2ce1093bb02d34c4b272", "score": "0.6307366", "text": "def send_request(self, function_name, body):\n pass", "title": "" }, { "docid": "acae61c65da7820c5385bacb0ec22ed8", "score": "0.62872076", "text": "def exec_request(self, request: Request, expected_response: ExpectedResponse):\n url = self.get_url_for_endpoint(\n endpoint=request.endpoint,\n method=request.method,\n object_id=request.object_id,\n )\n url_params = request.url_params.copy()\n\n step_name = f\"Send {request.method.name} {url.replace(self._base_url, '')}\"\n if url_params:\n step_name += f\"?{urlencode(url_params)}\"\n with allure.step(step_name):\n response = request.method.function(\n url=url,\n params=url_params,\n json=request.data,\n headers=request.headers,\n )\n\n attach_request_log(response)\n\n status_code_should_be(\n response=response, status_code=expected_response.status_code\n )\n\n if expected_response.body is not None:\n body_should_be(response=response, expected_body=expected_response.body)\n\n return response", "title": "" }, { "docid": "8d309ebc4a096a11717e144d0a25fe94", "score": "0.62871194", "text": "def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)", "title": "" }, { "docid": "261c4ef6ea34c82807c362380ade7f78", "score": "0.61477804", "text": "def send(self) -> None:\n\n payload = self.get_payload()\n try:\n self.response = requests.get(url=FAST_API, params=payload)\n except requests.exceptions.ConnectionError:\n print(f\"requests.exceptions.ConnectionError! Trying again in 5 seconds...\")\n sleep(5)\n self.send()", "title": "" }, { "docid": "33a6d2c2bf8ed6da2136703feeedff42", "score": "0.6145818", "text": "def request() -> None:\n\t_flag.set()", "title": "" }, { "docid": "30d219e1c02ca4edd88c1b044a0e650f", "score": "0.614521", "text": "def perform_request(request):\n info = evmapy.util.get_app_info()\n try:\n return send_request(request)\n except (ConnectionRefusedError, FileNotFoundError):\n exit(\"No %s instance is running as %s\" % (\n info['name'], info['user'].pw_name\n ))\n except TimeoutError:\n exit(\"Timeout waiting for a response from %s\" % info['name'])", "title": "" }, { "docid": "5af288073fda15a5f37ea3bc03dce80f", "score": "0.609736", "text": "def send_request(self, request):\n # Below line is a debug to show what the full request URL is. Useful in testing multitenancy API calls\n #print(\"KARTIK : CONN OBJECT : send_request called with URL: '\"+self._url_prefix + request.endpoint+\"'\")\n #POORVA: changed url-prefix because only admin has right to update spot-region in geo-fabric present in any non-mm tenant\n if '_tenant' in request.endpoint and '_fabric' in request.endpoint:\n find_url = self._url_prefix.find('/_tenant')\n find_url += 1\n url = self._url_prefix[0:find_url]\n final_url = url + request.endpoint\n else:\n final_url = self._url_prefix + request.endpoint\n\n return self._http_client.send_request(\n method=request.method,\n url=final_url,\n params=request.params,\n data=request.data,\n headers=request.headers,\n auth=self._auth,\n )", "title": "" }, { "docid": "c4e3b127c6152ed66568ace1858930fe", "score": "0.60967714", "text": "def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()", "title": "" }, { "docid": "feb61361caffee40f9713e1247c82f63", "score": "0.6076453", "text": "def run(send_to_node=False):\n\n # Signed request\n sk = read_signing_key_file(os.path.join(SIGNING_KEY_DIR, 'bank_02_nid'))\n signed_request = generate_signed_request(\n data={\n 'ip_address': '104.131.41.225',\n 'port': None,\n 'protocol': 'http'\n },\n nid_signing_key=sk\n )\n\n if send_to_node:\n send_request_to_node(signed_request, live_pv=True)\n\n write_json(\n os.path.join(SIGNED_REQUESTS_DIR, 'connection-request.json'),\n signed_request\n )", "title": "" }, { "docid": "e64c0f8a57b1f0e0be09fb8d15904031", "score": "0.60684776", "text": "def request( key, server, node, netrc=os.getenv('NETRC', os.path.join(os.path.expanduser('~'), '.netrc')), verbose=False ):\n ### format and send the packet\n packet = Packet(server, node, ptype='request', key=key)\n if verbose:\n print( \"%s->%s : %s\"%(server, node, packet.dumps()) )\n send( packet, server, node, netrc, verbose=verbose )", "title": "" }, { "docid": "bed4e13fc92b992bb1bc17a9027ed057", "score": "0.6065804", "text": "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "title": "" }, { "docid": "bdef504c22b716fa91e35b588cbe81cb", "score": "0.60542214", "text": "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "title": "" }, { "docid": "bc5f3745f7c199ae3c64acf74b65fab8", "score": "0.604769", "text": "def send(msg):\n r = \"No response\"\n try:\n r = requests.post(\"http://127.0.0.1:8000\", data=msg)\n except:\n pass\n #print(r.status_code, r.reason)\n print(r.text[:300] + '...')", "title": "" }, { "docid": "a238038cb023591ce38531c18e8114d7", "score": "0.6022776", "text": "def send_req(self):\n raise NotImplementedError", "title": "" }, { "docid": "83dfeaac21286143ff1082d12d2ad8ca", "score": "0.5988743", "text": "def send(self, response):\n self.mh.send_message(response)", "title": "" }, { "docid": "59c5cddeebd92ef4f9f9bc5aa52e4694", "score": "0.5968064", "text": "def send_request(self, message):\n try:\n self.transport.write(message.encode())\n _LOGGER.debug('ROTEL Data sent: {!r}'.format(message))\n except:\n _LOGGER.debug('ROTEL : transport not ready !')", "title": "" }, { "docid": "7c8fd40a416893bb5a40e12801890fd4", "score": "0.59499323", "text": "def request(self, flow: mitmproxy.http.HTTPFlow):", "title": "" }, { "docid": "7c8fd40a416893bb5a40e12801890fd4", "score": "0.59499323", "text": "def request(self, flow: mitmproxy.http.HTTPFlow):", "title": "" }, { "docid": "e6cc5f296f104ba82a5b058b268140c2", "score": "0.59137785", "text": "def make_request_thread(self, service, request):\n requestResponse = self.callbacks.makeHttpRequest(service, request)\n print self.helpers.analyzeRequest(requestResponse).getUrl().toString()", "title": "" }, { "docid": "17d872c48b97a3382337681b91288f1d", "score": "0.59092957", "text": "def send_request(self):\n\n # Get a formatted version of the request\n self.last_sent_request = self.request.format_request()\n\n # Send request in a byte-encoded format\n self.socket.sendall(self.last_sent_request.encode(\"utf-8\"))\n\n # If POST method is made, params are also sent\n if self.request.method.upper() == \"POST\":\n self.socket.sendall(self.request.params.encode('utf-8'))\n\n return self.get_server_response()", "title": "" }, { "docid": "ded9d5f0197104fd897725836dc4d0d9", "score": "0.59062433", "text": "def send(self):\n \n # Generate the URL to call\n url = self._url + self._generate_query_string()\n logger.info('Sending request: %s' % url)\n \n # Generate GET request\n req = urllib2.Request(url=url)\n \n if not self._service.debug:\n try:\n f = urllib2.urlopen(req)\n data = f.read()\n f.close()\n \n # Log raw response\n logger.info('Raw response: %s' % data)\n \n except Exception, err:\n logger.exception('Request failed.')\n data = None\n else:\n # Debug data\n data = 'OK\\r\\nMessageID=1234'\n \n return self.parse_response(data)", "title": "" }, { "docid": "2a78089a8c1a1dd8fc64fd5f61835cd6", "score": "0.58932704", "text": "def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)", "title": "" }, { "docid": "2a78089a8c1a1dd8fc64fd5f61835cd6", "score": "0.58932704", "text": "def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)", "title": "" }, { "docid": "7471f333ad691a7c947ada6c56100eb7", "score": "0.5872071", "text": "def test_single_request(self):\n s = self.api.session()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/bar\").end()\n s.end()\n data = self.connector.transcription()\n assert len(data) == 2\n assert data[0].get('action') == \"session_start\"\n assert data[1].get('action') == \"session_end\"", "title": "" }, { "docid": "1ac2df9de61ff0b2ffa257c16960c861", "score": "0.58563906", "text": "def __send_response(self, response):\n logger.debug(' >>> %s', binascii.b2a_qp(response[0]))\n self.request.send(struct.pack('!I', len(response)))\n self.request.send(response)", "title": "" }, { "docid": "7209be07465e7095c44fecde829304ca", "score": "0.58538073", "text": "def handle_one_request(self):\n import socket\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(414)\n return\n if not self.raw_requestline:\n self.close_connection = 1\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n\n ##### Customization\n # origin\n \"\"\"\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mname)\n method()\n \"\"\"\n # now\n #import pdb; pdb.set_trace()\n self.delegate(self.get_environ(), self.gen_response, self.send_error)\n\n self.wfile.flush() #actually send the response if not already done.\n except socket.timeout, e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "title": "" }, { "docid": "a020751580bdf1eace2407791088f998", "score": "0.5825972", "text": "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "title": "" }, { "docid": "d16e7ed61f796dd22c2e05a3b3df2ef3", "score": "0.58130175", "text": "def __send_request(self, msg, sock):\n if type(msg) != bytes:\n response = bytes(f\"{msg}\", \"ascii\")\n print(f\"--> Sending: {msg}\")\n sock.sendall(response)", "title": "" }, { "docid": "d651a75933e6b9e32e8ef49945bbbad2", "score": "0.5804456", "text": "def request(self, msg):\n\t\tif msg.command in ('AUTH', 'EXIT', 'GET', 'SET', 'VERSION', 'COMMAND', 'UPLOAD'):\n\t\t\tmethod = 'handle_request_%s' % (msg.command.lower(),)\n\t\telse:\n\t\t\tmethod = 'handle_request_unknown'\n\n\t\tself.execute(method, msg)", "title": "" }, { "docid": "3e2c05a6d3ec1f1da2603bd12754f496", "score": "0.57987964", "text": "def send_request_to_node(signed_request, live_pv=False):\n\n if live_pv:\n node_address = format_address(\n ip_address='64.225.47.205',\n port=None,\n protocol='http'\n )\n else:\n node_address = format_address(\n ip_address='192.168.1.75',\n port=8000,\n protocol='http'\n )\n\n url = f'{node_address}/connection_requests'\n results = post(url=url, body=signed_request)\n\n if isinstance(results, dict):\n for k, v in results.items():\n print(f'{k}: {v}')\n\n print(results)", "title": "" }, { "docid": "a8808fec00e4e7047bf9f08fd7c90f6c", "score": "0.5791291", "text": "def test_request(comms):\n kernel_comm, frontend_comm = comms\n\n def handler(a, b):\n return a + b\n\n kernel_comm.register_call_handler('test_request', handler)\n\n res = frontend_comm.remote_call(blocking=True).test_request('a', b='b')\n\n assert res == 'ab'", "title": "" }, { "docid": "5829762604e9c5d37ba1dbcf86d8be5c", "score": "0.5765756", "text": "def request(self, flow: mitmproxy.http.HTTPFlow):\n pass", "title": "" }, { "docid": "524c68d70c027e04c479e7cacbb1e034", "score": "0.5738168", "text": "def _send_request(self):\n url = self.config['url']\n agent = Agent(reactor)\n response = (yield agent.request(\n 'GET',\n url.encode(\"ASCII\"),\n ))\n\n d = defer.Deferred()\n response.deliverBody(ReceiveBody(d))\n defer.returnValue((yield d))", "title": "" }, { "docid": "691f573d54cabd0230ed5ab146b40b93", "score": "0.57332647", "text": "def send_request(request: blpapi.request.Request, **kwargs):\n logger = logs.get_logger(send_request, **kwargs)\n try:\n bbg_session(**kwargs).sendRequest(request=request)\n except blpapi.InvalidStateException as e:\n logger.exception(e)\n\n # Delete existing connection and send again\n port = kwargs.get('port', _PORT_)\n con_sym = f'{_CON_SYM_}//{port}'\n if con_sym in globals(): del globals()[con_sym]\n\n # No error handler for 2nd trial\n bbg_session(**kwargs).sendRequest(request=request)", "title": "" }, { "docid": "65cbbaccafaf35bfe0747b7353a78547", "score": "0.5729618", "text": "def handle_one_request(self):\n \n try:\n \n self.raw_requestline = self.rfile.readline(65537)\n \n if len(self.raw_requestline) > 65536:\n \n self.requestline = ''\n \n self.request_version = ''\n \n self.command = ''\n \n self.send_error(414)\n \n return\n \n if not self.raw_requestline:\n \n self.close_connection = 1\n \n return\n \n if not self.parse_request():\n \n # An error code has been sent, just exit\n \n return\n \n mname = 'do_' + self.command\n \n if not hasattr(self, mname):\n \n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n \n return\n \n method = getattr(self, mname)\n \n print \"before call do_Get\"\n \n method()\n \n #增加 debug info 及 wfile 判断是否已经 close\n \n print \"after call do_Get\"\n \n if not self.wfile.closed:\n self.wfile.flush() #actually send the response if not already done.\n \n print \"after wfile.flush()\"\n \n except socket.timeout, e:\n \n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "title": "" }, { "docid": "1b003a86f60bea94a0d5f6fa50f95f4c", "score": "0.5726766", "text": "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "title": "" }, { "docid": "ce0659133e0e36d4d14044b63cd64908", "score": "0.5705143", "text": "async def handle_request(self, request: aioweb.request.Request):", "title": "" }, { "docid": "0a2321b632d3cc8902119a98320f6383", "score": "0.5697896", "text": "def sendRequest(self, param1, param2):\n time.sleep(random.randint(190, 200) / 1000.0)\n print param1, param2\n result = self.app.callAdd(param1, param2)\n print \"result:\", result\n # 注意param1和param2都是int类型,需要转换成str\n if result.find(str(param1) + ' + ' + str(param2)) == -1:\n print 'Failed!'\n info = time.strftime(ISOTIMEFORMAT,\n time.localtime(time.time())) + ' Test:' + TEST_NAME + ' [[]] Params:' + str(param1) + \\\n ',' + str(param2) + ' [[]] Result:' + result\n logfile.write(info + '\\n')\n grinder.getStatistics().getForCurrentTest().setSuccess(False)", "title": "" }, { "docid": "cd68d8939df4f3591a3b9129daa9040d", "score": "0.56687605", "text": "def basicRequest(self):\n endpoint = \"/foo\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking basic request method.\")\n o(request.url).equals(endpoint)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint)\n self.testServer.respond()", "title": "" }, { "docid": "5fd63cd22be8b5142d43031bd5c0d671", "score": "0.5656457", "text": "def send_request(data, url, proxy, headers=None):\n session = requests.Session()\n session.trust_env = False\n session.proxies = { 'http': proxy } if proxy else {}\n session.headers = HEADERS if headers is None else headers\n r = session.post(url=url, data=data)\n print r.status_code", "title": "" }, { "docid": "9d56d9ab840641af1c33688c5d837b8e", "score": "0.56485164", "text": "def _assemble_and_send_request(self):\r\n # Fire off the query.\r\n response = self.client.service.processShipment(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n RequestedShipment=self.RequestedShipment)\r\n return response", "title": "" }, { "docid": "c1388c8066ca5822563c84132e550774", "score": "0.56472117", "text": "def processRequest():\n # if we are not in the address list, then this is not an initialized connection\n if request.remote_addr not in addressList:\n # if the address is not in the list and it is not a market\n # request, then it is web gallery traffic\n if not urlEncode.isMarket(request.url):\n sendToImageGallery(request)\n return\n # if this is a market request, then proceed with new session initialization\n else:\n encoded = {'url':request.url, 'cookie':[]}\n decoded = urlEncode.decode(encoded)\n sender, receiver = frame.initServerConnection(decoded, PASSWORDS, callback)\n # if the client sent a bad password, print an error message\n # and return an empty image\n if sender == False:\n print \"Bad password entered\"\n return sendToImageGallery(request)\n # Note: this will need to change to accomodate multiple client sessions\n htptObject.assembler = sender\n htptObject.disassembler = receiver\n addressList.append(request.remote_addr)\n #send back a blank image with the new session id\n framed = htptObject.assembler.assemble('')\n image = imageEncode.encode(framed, 'png')\n return serveImage(image)\n #TODO\n #setup some way to maintain a single Internet connection per client\n # if this is an initialized client, then receive the data and see\n # if we have anything to send\n else:\n #receive the data\n decoded = urlEncode.decode({'url':request.url, 'cookie':request.cookies})\n htptObject.disassembler.disassemble(decoded)\n # see if we have any data to return\n readyToRead, readyToWrite, inError = \\\n select.select([htptObject.torSock], [], [], 0)\n # if we have received data from the Tor network for the Tor\n # client, then send it\n if readyToRead != []:\n # get up to a megabyte\n dataToSend = readyToRead[0].recv(1024*1000)\n# print \"Server Sending: {}\".format(dataToSend)\n else:\n dataToSend = ''\n # put the headers on the data (not the actual function name)\n framed = htptObject.assembler.assemble(dataToSend)\n # encode the data\n encoded = imageEncode.encode(framed, 'png')\n # send the data with apache\n return serveImage(encoded)", "title": "" }, { "docid": "676c1e5c727d82d85d35a3334be009b5", "score": "0.56410146", "text": "def handle_request(self):\n\t\ttry:\n\t\t\tr,w,e=select.select([self.socket],[],[], 1.0)\n\t\t\tif not r:\n\t\t\t\treturn\n\t\t\trequest, client_address=self.socket.accept()\n\t\texcept:\n\t\t\treturn\t\t\n\t\t\n\t\ttry:\n\t\t\tif self.debug:\n\t\t\t\tprint \"got request\"\n\t\t\tself.process_request(request, client_address)\n\t\texcept:\n\t\t\tself.handle_error(request, client_address)", "title": "" }, { "docid": "5d837541f9f9e0dec602062939ab8a5f", "score": "0.56395954", "text": "def send_req(self):\n self.n_send_req += 1", "title": "" }, { "docid": "a59e6d1feae27d21a0cf68780f551c4d", "score": "0.56326485", "text": "def send_request(request):\n info = evmapy.util.get_app_info()\n client_socket_name = '%s-client.%d.socket' % (info['name'], os.getpid())\n client_socket_path = os.path.join(info['config_dir'], client_socket_name)\n try:\n client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n client_socket.bind(client_socket_path)\n os.chmod(client_socket_path, stat.S_IRUSR | stat.S_IWUSR)\n request_data = json.dumps(request).encode()\n client_socket.sendto(request_data, _get_control_socket_path())\n if request['wait']:\n (read_fds, _, _) = select.select([client_socket], [], [], 1.0)\n if read_fds:\n data = client_socket.recv(1024)\n return json.loads(data.decode())\n else:\n raise TimeoutError\n finally:\n os.remove(client_socket_path)", "title": "" }, { "docid": "c369a291f72850c7b7abc76691d201f1", "score": "0.56316733", "text": "def send_request_to_bank(signed_request):\n\n node_address = format_address(\n ip_address='192.168.1.232',\n port=8000,\n protocol='http'\n )\n url = f'{node_address}/validator_confirmation_services'\n results = post(url=url, body=signed_request)\n\n if isinstance(results, dict):\n for k, v in results.items():\n print(f'{k}: {v}')\n\n print(results)\n\n write_json(\n os.path.join(SIGNED_REQUESTS_DIR, 'signed-validator-confirmation-services-response.json'),\n results\n )", "title": "" }, { "docid": "6fb3c2f47a44ac3056797b0a38be9938", "score": "0.56216186", "text": "def send_returns(returns):\n #Build the header\n head = {'Content-Type':'application/json'}\n head.update(auth_head)\n r = pool.urlopen('POST',server_address+'/command_returns?client_name='+client_name,headers=head,body=json.dumps(returns))\n if r.data!=\"Processed\":\n print \"Something went horribly wrong on the other end. Here are the bits of information to retry later with...\"\n print json.dumps(returns)", "title": "" }, { "docid": "7b5d25cdddb7053a21e25ed4ea3eb0de", "score": "0.561687", "text": "def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()", "title": "" }, { "docid": "438d0e5ca4e29f90d1dc5a178e5295d1", "score": "0.5614876", "text": "def send_request_and_get_response(self, request):\n try:\n node_id = self.get_controller()\n except Exception:\n self.module.fail_json(\n msg='Cannot determine a controller for your current Kafka '\n 'server. Is your Kafka server running and available on '\n '\\'%s\\' with security protocol \\'%s\\'?' % (\n self.client.config['bootstrap_servers'],\n self.client.config['security_protocol']\n )\n )\n\n if self.connection_check(node_id):\n future = self.client.send(node_id, request)\n self.client.poll(future=future)\n if future.succeeded():\n return future.value\n else:\n self.close()\n self.module.fail_json(\n msg='Error while sending request %s to Kafka server: %s.'\n % (request, future.exception)\n )\n else:\n self.close()\n self.module.fail_json(\n msg='Connection is not ready, please check your client '\n 'and server configurations.'\n )", "title": "" }, { "docid": "94dd0b0fbc91906fa1ac54b5d9a4ab7d", "score": "0.5614774", "text": "def run(self):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n # allow anyone else to handle the request at this point\n handled = tools.run_callback(\"handle\", \n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n if not handled == 1:\n blosxom_handler(self._request)\n\n # do end callback\n tools.run_callback(\"end\", {'request': self._request})", "title": "" }, { "docid": "4d28937edaf49baad34231978ac56a37", "score": "0.5609847", "text": "def send_request(self, request, listen_port):\n\n request = self.adapt_request(request, listen_port)\n\n self.chaussette.send(request)", "title": "" }, { "docid": "0d6676050a46385fbd39a1fb99c17a69", "score": "0.55988383", "text": "def _send(self, payload: dict) -> dict:\n logging.debug(f\"posting request with payload {payload}\")\n\n r = self.session.post(url=self._api_url, json=payload)\n\n r.raise_for_status()\n\n response = r.json()\n\n # check if successful\n if str.lower(response[\"status\"]) != \"success\":\n self.__exit__()\n raise APIException(response[\"longmessage\"])\n\n # return the actual information\n logging.debug(f\"request returned success with response {response}\")\n return response[\"responsedata\"]", "title": "" }, { "docid": "0050a38708300899b051cd04fded5870", "score": "0.5589857", "text": "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "title": "" }, { "docid": "d399bb91b71d47674a7e7406be104472", "score": "0.5587394", "text": "def handle_response(self, order):\n print config.RESP_PROMPT + \" sending results of order %s...\" % (order.uuid)\n node = order.node\n responder_type = node[config.BEACON_TYPE_IND]\n params = node[config.PARAMS_IND]\n \n ip = params.get(config.NODE_IP_KEY)\n port = params.get(config.NODE_PORT_KEY)\n \n responder_class = self.response_map.get(responder_type) # get this from the beacon map based on beacon type\n responder = responder_class() # instantiate the object\n try:\n success = responder.send_response(params, order.response)\n except Exception, e:\n print \"%s Error connecting to %s:%s (%s)\" % (config.RESP_PROMPT, ip, port, e)\n success = False\n \n return success", "title": "" }, { "docid": "b6c036dff79ba136439067887694d782", "score": "0.5573435", "text": "async def send(self):", "title": "" }, { "docid": "02750e4cd3901d4eb7e3804c16365ad6", "score": "0.5570495", "text": "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "title": "" }, { "docid": "fc4f306e81575e4f7cc43bfe0d4729fe", "score": "0.55691123", "text": "def send_request(request):\n auth()\n response = urllib2.urlopen(request)\n\n return BeautifulSoup(response).resultmessage.string", "title": "" }, { "docid": "90e908a583f092a878aca6e350a0c50d", "score": "0.55673015", "text": "def request(ctx, flow):\n ctx.log(\"request\")\n #print \"REQUEST:\"\n #print flow.request._assemble()\n #print str(flow.request.headers[\"Host\"][0])\n try:\n # no windows update\n if str(flow.request.headers[\"Host\"][0]).endswith('windowsupdate.com'):\n flow.request.host = \"127.0.0.1\"\n flow.request.headers[\"Host\"] = [\"127.0.0.1\"]\n\n file = open(\"data/urls.txt\", \"a\")\n if flow.request.port == 443:\n file.write(\"HTTPS \" + str(flow.request.headers[\"Host\"][0]) + \"\\n\")\n else:\n file.write(\"http \" + str(flow.request.headers[\"Host\"][0]) + \"\\n\")\n file.close()\n\n #if 'Accept-Encoding' in flow.request.headers:\n flow.request.headers[\"Accept-Encoding\"] = ['none']\n\n form = flow.request.get_form_urlencoded()\n if form:\n file = open(\"data/forms.txt\", \"a\")\n file.write(flow.request.path + \"\\n\")\n file.write(str(form))\n file.close()\n\n except Exception as ee:\n ctx.log(str(ee))", "title": "" }, { "docid": "8df585bd22cc6c3b76fdaac4cf422fd7", "score": "0.55634326", "text": "def do_request(self, request_wrapper):\n assert isinstance(request_wrapper, RequestWrapper)\n logger.debug('Request to %s (thread %s)', request_wrapper.url,\n threading.current_thread().name)\n start_time = time.time()\n try:\n response = request_wrapper.session.request(\n *request_wrapper.args, **request_wrapper.kwargs)\n except Exception as exc:\n logger.error('Request exception: %s\\n%s',\n exc, '\\n'.join(traceback.format_tb(exc.__traceback__)))\n with self.lock:\n self.failure_count += 1\n response = exc\n else:\n request_time = time.time() - start_time\n logger.debug('Response from %s in %.3f s. (thread %s)', request_wrapper.url,\n request_time, threading.current_thread().name)\n\n with self.lock:\n self.request_count += 1\n self.total_request_time += request_time\n\n response_wrapper = ResponseWrapper(response=response, request_wrapper=request_wrapper)\n self.response_queue.put(response_wrapper)", "title": "" }, { "docid": "45cf8ab3043767a43ad70283f660b67a", "score": "0.5563403", "text": "def request_handler(self, client_connection):\n request = client_connection.recv(1024)\n\n #Make sure we recieved some data before proceeding\n if not request:\n response = 'Empty request'\n http_code = 400\n else:\n response, http_code = self.parse_request(request)\n\n #print response\n self.send_response(client_connection, response, http_code)", "title": "" }, { "docid": "802688564ad24a3cdc673b9a4794ddf6", "score": "0.5563066", "text": "def perform_request(self,\n request: RequestBase,\n method: str='POST'\n ):\n headers = {\n 'Accept': 'application/json',\n 'User-Agent': self.user_agent()\n }\n if APIAuthentication.use_http_auth:\n headers['Authorization'] = 'Basic {auth}'.format(auth=self.get_auth())\n\n # Lazy loader for api credentials.\n if request.requires_api_token() and ParamValidator.is_empty(request.api_token)\\\n and ParamValidator.not_empty(APIAuthentication.api_token):\n request.api_token = APIAuthentication.api_token\n if request.requires_service_id() and ParamValidator.is_empty(request.service_id)\\\n and ParamValidator.not_empty(APIAuthentication.service_id):\n request.service_id = APIAuthentication.service_id\n\n # Build url\n url = \"{0}/{1}\".format(PAYNL_END_POINT, request.get_url())\n parameters = request.get_parameters()\n if APIAuthentication.use_http_auth and 'token' in parameters:\n del parameters['token']\n\n if self.print_debug:\n print(\"Calling {} using {}\".format(url, method))\n print(\"HTTP Headers: {}\".format(json.dumps(headers)))\n print(\"Params: {}\".format(json.dumps(parameters)))\n\n if method.upper() == 'GET':\n response = requests.get(url, verify=True, headers=headers, params=parameters)\n else:\n response = requests.post(url, verify=True, headers=headers, data=parameters)\n\n if response.status_code not in self.__supported_status_codes:\n response.raise_for_status()\n\n if self.print_debug:\n print(\"Response object: {}\".format(response))\n print(\"Raw response: {}\".format(response.text))\n\n # Now the we have a response, let the request class handle the response.\n request.raw_response = response.text\n\n if self.print_debug:\n print(type(request.response))\n\n if request.response.is_error():\n raise ErrorException(request.response.request)", "title": "" }, { "docid": "66ce354c26022ffc589f480e5c33e8b4", "score": "0.556228", "text": "def connectionMade(self):\n print \"connection received from\", self.addr", "title": "" }, { "docid": "e33ac89ab524d5a9381a5584c2125e11", "score": "0.55598384", "text": "def process_request(t):\n time.sleep(t)", "title": "" }, { "docid": "e33ac89ab524d5a9381a5584c2125e11", "score": "0.55598384", "text": "def process_request(t):\n time.sleep(t)", "title": "" }, { "docid": "e33ac89ab524d5a9381a5584c2125e11", "score": "0.55598384", "text": "def process_request(t):\n time.sleep(t)", "title": "" }, { "docid": "f42fdacf73f3e26f87581dfbf67608ce", "score": "0.55586785", "text": "def send_request(bytestr, mode, tag=''):\n init = Initializer.create_init()\n queue = init.queue\n\n addr = queue.get()\n client = ipc.HTTPTransceiver(addr, 12345)\n requestor = ipc.Requestor(PROTOCOL, client)\n\n data = dict()\n data['input'] = bytestr\n data['next'] = mode\n data['tag'] = tag\n\n start = time.time()\n requestor.request('forward', data)\n end = time.time()\n\n init.node_timer(mode, end - start)\n\n client.close()\n queue.put(addr)", "title": "" }, { "docid": "51f5e5a82f29260a408173430e85c051", "score": "0.55576026", "text": "def log_request(self, code='-', size='-'):\n print self._heading(\"HTTP Request\")\n #First, print the resource identifier and desired operation.\n print self.raw_requestline,\n #Second, print the request metadata\n for header, value in self.headers.items(): \n print header + \":\", value", "title": "" }, { "docid": "a6d162ce0e6fd9c0ed50e73220c589f0", "score": "0.55549246", "text": "def call(self, request):\n if not self.address and not request.address:\n raise ValueError(\"Request message has no address: %s\" % request)\n request.reply_to = self.reply_to\n request.correlation_id = correlation_id = self.correlation_id.next()\n self.sender.send(request)\n def wakeup():\n return self.response and (self.response.correlation_id == correlation_id)\n self.connection.wait(wakeup, msg=\"Waiting for response\")\n response = self.response\n self.response = None # Ready for next response.\n self.receiver.flow(1) # Set up credit for the next response.\n return response", "title": "" }, { "docid": "a68f005a31bc41c9a76292d8ad3303b2", "score": "0.5533119", "text": "async def test_send(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n\n http_message = HttpMessage(\n dialogue_reference=(\"\", \"\"),\n target=0,\n message_id=1,\n performative=HttpMessage.Performative.REQUEST,\n method=\"get\",\n url=\"/\",\n headers=\"\",\n body=\"\",\n version=\"\",\n )\n envelope = Envelope(\n to=\"addr\",\n sender=\"my_id\",\n message=http_message,\n )\n with patch.object(self.webhook_connection.logger, \"warning\") as mock_logger:\n await self.webhook_connection.send(envelope)\n await asyncio.sleep(0.01)\n mock_logger.assert_any_call(\n RegexComparator(\n \"Dropping envelope=.* as sending via the webhook is not possible!\"\n )\n )", "title": "" }, { "docid": "323b3b34009bf26924a349af16409752", "score": "0.55295384", "text": "def test_t10_message():\n send_json_message_to_t10(\"10.89.130.68\", \"cisco\", \"cisco\", request.get_json())\n return \"ok\"", "title": "" }, { "docid": "e48c94979653312bfd043b1cb08514f2", "score": "0.55265045", "text": "def _send_request(self, method, url, params=None, data=None, headers=None, json=None,\n http_status=2, parse_json=False, error_processors=[],\n allow_redirects=None, cookies=None, stream=False):\n call_time = now()\n if self.last_call_time:\n if self.request_wait_seconds:\n delta = (call_time - self.last_call_time).total_seconds()\n if delta < self.request_wait_seconds:\n self.sleep(self.request_wait_seconds - delta,\n log_reason='request wait')\n self.last_call_time = now()\n else:\n self.last_call_time = call_time\n else:\n self.last_call_time = call_time\n else:\n self.first_call_time = self.last_call_time = call_time\n\n if not urlparse(url).scheme and self.base_url:\n url = urljoin(self.base_url, url)\n\n if allow_redirects is None:\n allow_redirects = self.allow_redirects\n\n if self.debug_level >= 5:\n self.logger.debug(\n _color_em('REQUEST %s' % method) + ' ' + url + (' params=%s' % params)\n + '\\n' + _color_em('REQUEST HEADERS:', back=colorama.Back.BLUE) + '\\n'\n + pprint(headers, print_=False)\n + (('\\n' + _color_em('REQUEST BODY:', back=colorama.Back.BLUE) + '\\n'\n + pprint(data or json, print_=False)) if (data or json) else '')\n )\n\n try:\n kwargs = dict(params=params, data=data, json=json, headers=headers,\n allow_redirects=allow_redirects, proxies=self.proxy,\n verify=self.ssl_verify, cookies=cookies, stream=stream)\n if self.timeout is not None:\n # Allow session (ConfigurableSession for example) to handle timeout\n kwargs['timeout'] = self.timeout\n response = self.session.request(method, url, **kwargs)\n except Exception as exc:\n self.error_processor(exc, error_processors)\n raise\n finally:\n if self.request_wait_since_response:\n self.last_call_time = now()\n\n if self.debug_level >= 5:\n self.logger.debug(\n _color_em('RESPONSE %s' % response.request.method, back=colorama.Back.GREEN)\n + colorama.Style.RESET_ALL + colorama.Style.BRIGHT + (' %s ' % response.status_code)\n + colorama.Style.RESET_ALL + response.url\n + '\\n' + _color_em('RESPONSE HEADERS:', back=colorama.Back.GREEN) + '\\n'\n + pprint(response.headers, print_=False)\n + '\\n' + _color_em('RESPONSE BODY:', back=colorama.Back.GREEN) + '\\n'\n + (stream and '<stream>' or pprint(response.text, print_=False))\n )\n\n elapsed_seconds = response.elapsed.total_seconds()\n if elapsed_seconds > self.request_warn_elapsed_seconds:\n self.logger.warning('Request %s %s took %s seconds after calls(%s/%s) since(%s)',\n response.request.method, response.request.url,\n elapsed_seconds, self.calls_count, self.calls_elapsed_seconds,\n self.first_call_time)\n self.calls_elapsed_seconds += elapsed_seconds\n self.calls_count += 1\n self.last_response = response # NOTE: only for debug purposes!\n\n if (http_status and not check_http_status(response.status_code, http_status)):\n self.set_response_json_data(response, parse_json, raise_=False)\n exc = self.HTTPError(response, expected_status=http_status)\n self.error_processor(exc, error_processors)\n raise exc\n\n try:\n self.set_response_json_data(response, parse_json, raise_=True)\n except _JSONDecodeError as exc:\n exc = self.JSONDecodeError(response, exc)\n self.error_processor(exc, error_processors)\n raise exc\n\n return response", "title": "" }, { "docid": "2beb2d5e6b1d0f5a8ef606cea238f9d6", "score": "0.552576", "text": "def call(self, request, expect=error.OK):\n response = self.client.call(request)\n self.check_response(response, expect=expect)\n return response", "title": "" }, { "docid": "8e07ba45b34180c193543a32dcc74840", "score": "0.552529", "text": "def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)", "title": "" }, { "docid": "bb619e3de5a239f1b127810dfad4b94b", "score": "0.551526", "text": "def postTo(self,conn,data):\n #log(\"postTo: \"+str(conn))\n conn.request(self.command,self.path,data,self.headers)\n resp = conn.getresponse()\n log(\"postTo: \"+str(resp.status)+\", \"+str(resp.reason)+\", \"+str(resp.version))\n return resp", "title": "" }, { "docid": "47eacf7ac65cf906845810473379319f", "score": "0.5501415", "text": "def start_request(self,request_handler,client_address):\n logger.debug('start_request(%s:%s)' % client_address)", "title": "" }, { "docid": "3865a3208098da150a4e30c14b896774", "score": "0.5498369", "text": "def run(self):\n # client -> server\n self.client.send_message(\n new_order_message(self.client,\n symbol='abc',\n side='0',\n order_type='1',\n extra_tags=[(38, 100), # orderQty\n (44, 10), ])) # price\n\n # server <- client\n message = self.server.wait_for_message('waiting for new order')\n assert_is_not_none(message)\n\n # server -> client\n self.server.send_message(\n execution_report(self.server,\n message,\n exec_trans_type='0',\n exec_type='0',\n ord_status='0',\n symbol='abc',\n side='0',\n leaves_qty='100',\n cum_qty='0',\n avg_px='0'))\n\n # client <- server\n message = self.client.wait_for_message('waiting for new order ack')\n assert_is_not_none(message)", "title": "" }, { "docid": "f3e390f3f722cc42fa114474a2239a16", "score": "0.54967743", "text": "def send_final_request(self):\n with open(self.output_path, \"r\") as text_file:\n data = json.load(text_file)\n print self.request_handler.send(data)", "title": "" }, { "docid": "56eccd92ef0928519322588b391d256f", "score": "0.54956967", "text": "def send_request(self, method, params):\n path = reverse('api_v1_mountpoint')\n\n req = {\n 'jsonrpc': '1.0',\n 'id': 'jsonrpc',\n 'method': method,\n 'params': params,\n }\n\n req_json = json.dumps(req)\n return self.client.post(path, req_json, content_type='text/plain; charset=UTF-8')", "title": "" }, { "docid": "fe3485ecbe50d654de534fa90b84944d", "score": "0.54944015", "text": "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDurationMillis': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 链上交易中的事件\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.3.1',\n '_prod_code': 'BAASDATAGW',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "title": "" }, { "docid": "c7f60d470fe2d7784adf84128c77b7de", "score": "0.5493666", "text": "def Check_Gateway(gateway):\n\n global STATUS\n logging.info(\"Pinging gateway\")\n if ping_machine(gateway) != 0:\n add_info(gateway, GATEWAY, \"unpingable\")\n STATUS = 1 # can't work w/out gateway\n return 1\n else:\n add_info(gateway, GATEWAY, \"OK\")\n return 0", "title": "" }, { "docid": "ebb51571c774737ad4c3ce3832021388", "score": "0.54916763", "text": "def post(self):\n policies = json.loads(self.request.get('policies'))\n request = json.loads(self.request.get('request_json'))\n response = json.loads(self.request.get('response_json'))\n\n maybe_notify_backend('LEASED', response['hostname'], policies)\n maybe_notify_lessee(request, response)", "title": "" }, { "docid": "48ee7794b4a1ebf35cf48e545921ea92", "score": "0.5480641", "text": "def send_request(self, request):\n json_results = requests.get(request).json()\n\n status = json_results['status']\n\n if status == const.STATUS_OK:\n return json_results['results']\n\n self.log.warning(self.get_status_code(status))", "title": "" }, { "docid": "831ae19a01527ead5a1bd979bc51bf2a", "score": "0.5480119", "text": "def call(self):\n # if this is a POST request, process data\n if self.data:\n post_json = json.dumps(self.data)\n values = {'json': post_json, 'apikey': API_KEY}\n post = urllib.parse.urlencode(values)\n\n else:\n post = None\n\n req = urllib.request.Request(self.url, post)\n\n try:\n self.response = urllib.request.urlopen(req, timeout=self.timeout)\n\n except (URLError, HTTPError, timeout) as error:\n self.response = error", "title": "" }, { "docid": "eb693c25a32dcdd52a06b8b02d6d42c5", "score": "0.5478504", "text": "def test_returns_200(self):\r\n c = Client()\r\n response = c.post('/sg/send', {\r\n 'username': 'foo',\r\n 'password': 'bar',\r\n 'from': 'from@from.com',\r\n 'to': 'to@to.com',\r\n 'subj': 'subj text',\r\n 'body': 'body text',\r\n })\r\n\r\n self.assertEqual(200,response.status_code)", "title": "" }, { "docid": "1c4ed34d53dbae482ff4dbdbac61eed9", "score": "0.5475592", "text": "def test_goaway_frame_in_request(self):\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # Client opens many streams and does not close them\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=self.post_request, end_stream=False)\n\n # Client send GOAWAY frame with PROTOCOL_ERROR as bytes\n # because `_terminate_connection` method changes state machine to closed\n client.send_bytes(b\"\\x00\\x00\\x08\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\")\n\n # Client sends frames in already open streams.\n # Tempesta must handle these frames and must not close streams,\n # because sender closes connection, but not receiver.\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=\"asd\", end_stream=True)\n\n self.assertTrue(\n client.wait_for_response(), \"Tempesta closed connection after receiving GOAWAY frame.\"\n )", "title": "" }, { "docid": "82f7bb4b866805f212f3ec9e3fad37c9", "score": "0.5468208", "text": "def handle(self):\n self.request.recv(1024)\n self.request.sendall(pickle.dumps(self.server.lymphocytes_getter()))", "title": "" }, { "docid": "0929fde5a9efb8ba6a130447d4505dd2", "score": "0.54652846", "text": "def do_POST(self):\r\n self.do_GET()", "title": "" }, { "docid": "84e2e9c7ccbfac27d6902e60fa2f56bb", "score": "0.5456939", "text": "def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!", "title": "" }, { "docid": "aed3089f2720e989e88298b62a5c8c3b", "score": "0.54537135", "text": "async def request(self):\n # TODO: validate the state\n message = Message(self.name_path)\n await self.issue_command(Command(message))", "title": "" }, { "docid": "1609a244248df9008f61694cb714d8b3", "score": "0.5453205", "text": "def test_save_send(self):\r\n # Don't really know how to test this effectively...\r\n # Would require to simulate a blocking socket on the recipient side...\r\n pass", "title": "" }, { "docid": "c8b89b843adb3d585341404c0c178d21", "score": "0.54483306", "text": "def send_request(self, request):\n assert isinstance(request, Request)\n if request.block1 or (request.payload is not None and len(request.payload) > defines.MAX_PAYLOAD):\n host, port = request.destination\n key_token = hash(str(host) + str(port) + str(request.token))\n if request.block1:\n num, m, size = request.block1\n else:\n num = 0\n m = 1\n size = defines.MAX_PAYLOAD\n\n self._block1_sent[key_token] = BlockItem(size, num, m, size, request.payload, request.content_type)\n request.payload = request.payload[0:size]\n del request.block1\n request.block1 = (num, m, size)\n elif request.block2:\n host, port = request.destination\n key_token = hash(str(host) + str(port) + str(request.token))\n num, m, size = request.block2\n item = BlockItem(size, num, m, size, \"\", None)\n self._block2_sent[key_token] = item\n return request\n return request", "title": "" }, { "docid": "15639bf8506361a19c0d59e9b8346d22", "score": "0.5447594", "text": "def PostRequest(self):\n if self.__Payload: \n self.__Answer = requests.post(self.__URL, data = self.__Payload, headers = self.__Headers)\n Logs.WriteToLog(\"Data transited to web server\")\n else:\n Logs.WriteToLog(\"No payload in HTTP request\")\n raise Exception(\"Payload must be setted\")", "title": "" }, { "docid": "340d69e7d4789173d79bb7e3a1b2891b", "score": "0.544591", "text": "def test_get_request_output(self):\n pass", "title": "" }, { "docid": "8dd22c463a850bd2565535331ec8cc9f", "score": "0.54423547", "text": "def _send_request(self, method='post', headers=None, json=None):\n response = getattr(requests, method)(self.url, headers=headers, json=json)\n return response", "title": "" } ]
ce64364992c6bc20f154f3764ce0f11e
prints info about the state
[ { "docid": "9b891587e8aaf264f0bf683b7daea014", "score": "0.0", "text": "def print_info(self, as_str: bool = False, file: TextIO = None) -> str:", "title": "" } ]
[ { "docid": "6b63db4d1dd56ad5a154cab28942a6a9", "score": "0.8899063", "text": "def print_state():\n ...", "title": "" }, { "docid": "a2ad2d1917f408c900da9218030ba604", "score": "0.84972256", "text": "def print_state(self):\n print(self._state)", "title": "" }, { "docid": "a7a85020f536e2a52e8f49f9da70fd43", "score": "0.8091616", "text": "def show_state(self):\r\n print(\"I don't know how to show_state.\")", "title": "" }, { "docid": "9a35858052162d3022ada6124b2606bb", "score": "0.80087197", "text": "def print_state(self):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(self.get_state())", "title": "" }, { "docid": "f8cc27ac503e5656e22166e3d4905147", "score": "0.79866695", "text": "def show_state(self) -> None:\n log.info(\"-\" * 40)\n for army in self.__armies:\n log.info(f'state: {army.state()}')\n log.info(\"\\n\" + \"=\" * 100)\n log.info(\"\\n\")", "title": "" }, { "docid": "bb352e472f1ab717d326696650ae6783", "score": "0.795555", "text": "def cmd_state(self, args):\r\n print \"\\n%s\\n\" % (self.statestr())", "title": "" }, { "docid": "8a15005435e474cb407ee5514bb369e1", "score": "0.783787", "text": "def print_state(state):\n print(\"\")\n print(\"Current player: {}\".format(state.cur_player()))\n print(state)\n\n # Example of more queries to provide more about this state. For\n # example, bots could use these methods to to get information\n # about the state in order to act accordingly.\n print(\"### Information about the state retrieved separately ###\")\n print(\"### Information tokens: {}\".format(state.information_tokens()))\n print(\"### Life tokens: {}\".format(state.life_tokens()))\n print(\"### Fireworks: {}\".format(state.fireworks()))\n print(\"### Deck size: {}\".format(state.deck_size()))\n print(\"### Discard pile: {}\".format(str(state.discard_pile())))\n print(\"### Player hands: {}\".format(str(state.player_hands())))\n print(\"\")", "title": "" }, { "docid": "92514daa1d65612aef48c92923e63d17", "score": "0.77830446", "text": "async def print_state(self):\n try:\n async for current_state in self.state:\n if self.should_exit:\n break\n else:\n state = current_state[self.symbol]\n logger.info(state[\"spread\"])\n # prevent blocking\n await asyncio.sleep(0)\n except Exception as e:\n log_exception(logger, e)", "title": "" }, { "docid": "7526fe53ca92465665c4ea44ab0dcf95", "score": "0.75745803", "text": "def show_state(self):\n return self.state_t", "title": "" }, { "docid": "4d76065057df9fa1360ea27dc048598f", "score": "0.7564376", "text": "def describe(self):\n for state in self.definition:\n print(\"State Index/Label: \", state[0], \" , \", state[1])", "title": "" }, { "docid": "2eed0bcbab06d544ae4db282ec6284fe", "score": "0.7543665", "text": "def _print_state(self):\n flowers_alive = len(list(filter(lambda f: not f.is_dead(), self._flowers)))\n print('\\tHealthy flowers:\\t{num}'.format(num=flowers_alive))\n print('\\tDead flowers:\\t\\t{num}'.format(num=len(self._flowers) - flowers_alive))\n print('=> Flower Details')\n list(map(lambda flower: print(f'\\t {flower}'.format(flower=flower)), self._flowers))\n print(\"\\n\")", "title": "" }, { "docid": "7231e9d5d8fc8c97b2b93483ca0f673b", "score": "0.75087017", "text": "def show_state(self):\n return 0", "title": "" }, { "docid": "3f79472b8d09b0ea368d4d675f0fcfe7", "score": "0.74658585", "text": "def printState(self):\n\t\tprint(\"t: {2} x: {0:.0f} y: {1:.0f} Vx: {3:.0f} Vy: {4:.0f}\".\n\t\t\tformat(self.x, self.y, self.t, self.Vx, self.Vy))", "title": "" }, { "docid": "1a5264267c26db3fd9b12c31ddae7abe", "score": "0.7392016", "text": "def printer(self):\n print \" ID \" + repr(self.id)\n if self.type == 0:\n print \" Tag: - \"\n print \" Start State - \"\n elif self.type == 1:\n print \" Push \" + repr(self.sym)\n elif self.type == 2:\n print \" Pop State \" + repr(self.sym)\n elif self.type == 3:\n print \" Read State \" + repr(self.sym)\n elif self.type == 4:\n print \" Stop State \" + repr(self.sym)\n for j in self.trans:\n if len(self.trans[j]) > 1 or (len(self.trans[j]) == 1):\n for symbol in self.trans[j]:\n print \" On Symbol \" + repr(symbol) + \" Transition To State \" + repr(j)", "title": "" }, { "docid": "39126cd2706c7952d05a745f9743232d", "score": "0.7315346", "text": "def print_switch_info(state, inp):\n print('in state: ' + str(state))\n print('input detected:')\n print(str(inp))", "title": "" }, { "docid": "09274211f551807442727d4543c676fa", "score": "0.7307894", "text": "def print_state(state,indent=4):\n if state != False:\n for (name,val) in vars(state).items():\n if name != '__name__':\n for x in range(indent): sys.stdout.write(' ')\n sys.stdout.write(state.__name__ + '.' + name)\n print(' =', val)\n else: print('False')", "title": "" }, { "docid": "a699f6ce3e96a6ff07a3e36aad1ff8d0", "score": "0.7286546", "text": "def state(self, argv):\n self._print(self._client.state)", "title": "" }, { "docid": "b4c9954af47f4db80c3cfb749f1148ca", "score": "0.7222205", "text": "def print(self):\r\n print(f\"Initial state: {self.initial_state}\")", "title": "" }, { "docid": "cf75d707489f2b3bebfe5715fef0f5f4", "score": "0.7196031", "text": "def show(self):\n\n print (\"\\n---------------------------------------------------------\")\n\n print (\"\\n{0}\".format(self.name))\n print (\"\\n\\tGating the following Mechanism InputStates and/or OutputStates:\".format(self.name))\n # Sort for consistency of output:\n state_names_sorted = sorted(self.output_states)\n for state_name in state_names_sorted:\n for projection in self.output_states[state_name].efferents:\n print (\"\\t\\t{0}: {1}\".format(projection.receiver.owner.name, projection.receiver.name))\n print (\"\\n---------------------------------------------------------\")", "title": "" }, { "docid": "df5dfa3d704974eb1c81de7e11918d33", "score": "0.7169179", "text": "def __repr__(self):\n return str(self.state)", "title": "" }, { "docid": "1b1c0d3bdc092ec8fb9a5f33b3f9143c", "score": "0.7163882", "text": "def print_status(self):\n print(f\"depth: {len(self.game.get_moves()):<12}\",\n f\"iterations: {self.iterations:<12}\",\n f\"archive size: {len(self.archive):<12}\",\n f\"{self.states.__class__.__name__} size: {self.states.qsize():<12}\",\n f\"time elapsed: {time() - self.start:.1f} s\", end=\"\\r\")", "title": "" }, { "docid": "474c5b247000d25b7169462623bef559", "score": "0.7148422", "text": "def state_info(self):\n return self.state.name", "title": "" }, { "docid": "331a0c30df9d9ccccdf24594bd9ff2d4", "score": "0.7072376", "text": "def debugPrintState(self):\n logLine = None\n for address in range( 0, self.SIZE ):\n if address % 64 == 0:\n logLine = '$%04x : ' % address\n logLine += '%d' % self._state[address]\n if address % 64 == 63:\n self._printer.printStatus( 1, logLine )\n logLine = None", "title": "" }, { "docid": "511a71122f401f6bea010fe7c194ba76", "score": "0.70717597", "text": "def debug(cls, state):", "title": "" }, { "docid": "503428a33932057bb28a2b1c9173bdb6", "score": "0.70669806", "text": "def print_info(self) -> None:", "title": "" }, { "docid": "2b7cd6a6aa72cdae1feb4db11a6a0085", "score": "0.703368", "text": "def __printState(self):\n\n today = datetime.date.today()\n print()\n print( today.strftime(\"MFND - %B %d, %Y\") )\n print()\n\n print(str(self.taskTree))", "title": "" }, { "docid": "eef7ae0c6d9ee96a723f987a3c9b05da", "score": "0.702371", "text": "def print_info(self):\r\n pass", "title": "" }, { "docid": "ea6472573edec351173c082429bcc82a", "score": "0.7021639", "text": "def dump(state, action):\n\n print('State {0}'.format(state)),\n print('Action input {0} last_output {1} '.format(action.input_val,\n action.last_output)),\n print('output_val {0} next_state {1}'.format(action.output_val,\n action.next_state))", "title": "" }, { "docid": "1028c59145fcc5419188d6327d966d81", "score": "0.70153034", "text": "def _print_state(satellite):\n\n print \" >> Cartesian: \"\n print \" R: \" + str(satellite.abs_state.R) + \" [km]\"\n print \" V: \" + str(satellite.abs_state.V) + \" [km/s]\"\n print \"\"\n\n kep_osc = satellite.get_osc_oe()\n\n print \" >> Osculating orbital elements: \"\n print \" a: \" + str(kep_osc.a)\n print \" e: \" + str(kep_osc.e)\n print \" i: \" + str(kep_osc.i)\n print \" O: \" + str(kep_osc.O)\n print \" w: \" + str(kep_osc.w)\n print \" v: \" + str(kep_osc.v)\n print \"\"\n\n kep_mean = satellite.get_mean_oe()\n\n print \" >> Mean orbital elements: \"\n print \" a: \" + str(kep_mean.a)\n print \" e: \" + str(kep_mean.e)\n print \" i: \" + str(kep_mean.i)\n print \" O: \" + str(kep_mean.O)\n print \" w: \" + str(kep_mean.w)\n print \" v: \" + str(kep_mean.v)\n\n if hasattr(satellite, 'rel_state'):\n print \"\"\n print \" >> Cartesian LVLH: \"\n print \" R: \" + str(satellite.rel_state.R) + \" [km]\"\n print \" V: \" + str(satellite.rel_state.V) + \" [km/s]\"", "title": "" }, { "docid": "4106ce73c7d6378b786ee160ea09a60d", "score": "0.69965607", "text": "def __repr__(self):\n return self.mdp.state_string(self.state)", "title": "" }, { "docid": "f1ad053adf1807f3884505490513dfd5", "score": "0.69880587", "text": "def state(self, message):\n self._print(message, self._state_file)", "title": "" }, { "docid": "6dbd15b919a8402a50434070d0e3b7b5", "score": "0.6949616", "text": "def print_state(self):\n print('--------------------')\n for row in self.board:\n print(\" \".join(row))\n print('--------------------')", "title": "" }, { "docid": "fbbd9d7600d6e150ef18c3c549eb8dcc", "score": "0.6933243", "text": "def print_state(self):\n for model_name, model in six.iteritems(self.scalar_models):\n print(model.get_state())", "title": "" }, { "docid": "fbbd9d7600d6e150ef18c3c549eb8dcc", "score": "0.6933243", "text": "def print_state(self):\n for model_name, model in six.iteritems(self.scalar_models):\n print(model.get_state())", "title": "" }, { "docid": "51705bb8e061de2b31eec9f7d98a43f3", "score": "0.6926977", "text": "def printer(self):\n i = 0\n while i < self.n + 1:\n print \"--------- State No --------\" + repr(i)\n self.s[i].printer()\n i = i + 1", "title": "" }, { "docid": "6ae9ab26ac7036e98a99ec0a05b02d55", "score": "0.690894", "text": "def state(self, message):\n self._print(message, 7)", "title": "" }, { "docid": "d16e969cafdce86110cbdd623f32469e", "score": "0.68980056", "text": "def __repr__(self):\n return f'{self.name} - {self.state}'", "title": "" }, { "docid": "9a0ad38a8ba297ccd9aca6e2803b1766", "score": "0.689123", "text": "def show_state(self, state: np.ndarray):\n raise NotImplementedError", "title": "" }, { "docid": "254382e9f1d954279325e8f87eddd5ee", "score": "0.6879125", "text": "def __str__(self):\n return self.state()", "title": "" }, { "docid": "fe66b1bd9a18ea2f91839d48fb69b468", "score": "0.68742865", "text": "def printme(self):\n print self.Region + \" : \"+ \" <<Name>> \" +self.Name + \" <<instance ID>> \" + \\\n self.ID+ \" <<with the state>> \" + self.State + \" <<DNS>> \" + self.PublicDns", "title": "" }, { "docid": "dc594bf704f46a9d0ea6b200b8cd8a4e", "score": "0.6850899", "text": "def state_details(self) -> str:\n return pulumi.get(self, \"state_details\")", "title": "" }, { "docid": "62d5ab5d6efc18207be2f457d25dab19", "score": "0.6844621", "text": "def __init__(self):\n print('Current State: ' + str(self))", "title": "" }, { "docid": "8e5d58447979989106708c4989837fa8", "score": "0.6834074", "text": "def __str__(self):\n return \"State{\\n %s \\n}\" % (\n \"\\n\".join([\"\\t{}: {}\".format(k, str(i)) for k, i in self.__dict__.items() if not k.startswith(\"_\")]))", "title": "" }, { "docid": "c42fd18df64cd6260a7a745d4fcc7e92", "score": "0.6780822", "text": "def info(self):\n print(\"Grrrr..I am an Orc Wolf Rider. Don't mess with me.\")", "title": "" }, { "docid": "738215edb24694477d9bae1e72b0d19c", "score": "0.67762876", "text": "def _print_summary(self):\n time_label = datetime.datetime.now().strftime(\"%T\")\n description = self._describe_states(self.states, self.threads)\n print_msg(\"\\n%s Pipeline: %s\" % (time_label, description))\n logfile = pypeline.logger.get_logfile()\n if logfile:\n print_debug(\"Log-file located at %r\" % (logfile,))", "title": "" }, { "docid": "06fa6a062a8986ef5f3103774bb6d967", "score": "0.67636764", "text": "def print_fsm():\n non_terminals = set(t.src for t in transitions)\n for s in states:\n shape = 'circle' if s in non_terminals else 'doublecircle'\n print(\"\\t{} [ label = \\\"State: {}\\\\nLine: {}\\\\nPoll: {}\\\" shape = {} ];\".format(s, s, s.linestate, s.poll_time, shape))\n\n for t in transitions:\n label = \"\"\n if t.is_event:\n label += \"!\"\n label += t.comment\n label += \"\\\\n\"\n label += \"Line == {}\".format(t.linestate)\n if t.timer_wait:\n label += \" && timer == 0\"\n if t.timer_set != UNCHANGED:\n label += \" | set timer = {}\".format(t.timer_set)\n\n print(\"\\t{} -> {} [label = \\\"{}\\\"];\".format(t.src, t.tgt, label))", "title": "" }, { "docid": "79badc6c918c13d0577770ef6793ae8d", "score": "0.6761274", "text": "def print_state(state: List[int]) -> None:\n for i in state:\n print(\"{:02x}\".format(i), end=\" \")\n\n print()", "title": "" }, { "docid": "1310ff59f7fc1f98b5f87bf842ce7407", "score": "0.6756879", "text": "def post_log(self):\n if self.composite != '':\n print((self.indent)*4*' ' + ' |-- ' + self.name + ' ' + self.composite + \" --> \" + self.state_info())\n else:\n print((self.indent)*4*' ' + 5*' ' + self.func_info + \" --> \" + self.state_info())", "title": "" }, { "docid": "2dad788335ee74970582f610ff4d4e01", "score": "0.6742542", "text": "def dumpStateNames(self):\n for x in Setup_getSetupNames():\n print x", "title": "" }, { "docid": "2dad788335ee74970582f610ff4d4e01", "score": "0.6742542", "text": "def dumpStateNames(self):\n for x in Setup_getSetupNames():\n print x", "title": "" }, { "docid": "ecdf07d00d34f63cfcd849d63498dd09", "score": "0.67416286", "text": "def dumpStateNames(self):\n try:\n for x in Setup_getSetupNames():\n print x\n except Exception, e:\n print \"initOracle::DumpState : \",e.getMessage()\n return", "title": "" }, { "docid": "ecdf07d00d34f63cfcd849d63498dd09", "score": "0.67416286", "text": "def dumpStateNames(self):\n try:\n for x in Setup_getSetupNames():\n print x\n except Exception, e:\n print \"initOracle::DumpState : \",e.getMessage()\n return", "title": "" }, { "docid": "09c6b71e750548f3684db9c082d91778", "score": "0.67290837", "text": "def printListNextState(self):\n for i in range(len(self.nextStateList)):\n print('{} {} {}'.format(self.nextStateList[i][0],\n self.lookup[self.nextStateList[i][1]],\n self.nextStateList[i][2]))", "title": "" }, { "docid": "a80337ae3aef6a557cbd4e827fb43147", "score": "0.67247987", "text": "def __print_state(lines_to_move_up):\n print_info(terminal.MOVE_UP * lines_to_move_up, end='')\n print_info('\\n'.join([f'{item} ... {state}' for [item, state] in data]))", "title": "" }, { "docid": "d182b5b74ff6f96d9ddfa047abc0c522", "score": "0.6709298", "text": "def __print_current_state(p_round: PartialRound) -> str:\n info_str = [CommandLinePlayer.__current_position(p_round.index),\n CommandLinePlayer.__current_hand(p_round.hand),\n CommandLinePlayer.__flipped_card(p_round.flipped_card)]\n if p_round.trump is not None:\n info_str.append(CommandLinePlayer.__trump_status(p_round))\n for trick in p_round.tricks[0:-1]:\n info_str.append(CommandLinePlayer.__trick_result(trick, p_round.index))\n if len(p_round.tricks) > 0:\n info_str.append(CommandLinePlayer.__trump_status(p_round))\n\n return \"\\n\".join(info_str)", "title": "" }, { "docid": "4beefdc24150eb28b338fc8b16f83e4d", "score": "0.66997147", "text": "def display_final_state_message(self):\n print(self.get_final_state_message())", "title": "" }, { "docid": "bf69171dc5597f7d0fd282d5861e0fc0", "score": "0.66930944", "text": "def _show(self, indent = 0):\n\n col = self.resource_type\n print(\" \" * indent, \"Type:\", col)\n print(\" \" * indent, \"Application:\", self.application)\n print(\" \" * indent, \"Name:\", self.res_name)\n\n print(\" \" * indent, \"Current state:\")\n self.current_state.show(indent + 2)\n\n print(\" \" * indent, \"Expected state:\")\n self.expected_state.show(indent + 2)", "title": "" }, { "docid": "9acce0f7faa2e9ecc4eb2f90abf3cd7d", "score": "0.6691656", "text": "def print_stats(cls):\n\n states = set()\n stack_symbols = set()\n for a, b in cls.transitions.items():\n try:\n (state_name, character, stack_top_or_None) = a\n (new_state, direction, stack_op, stack_value) = b\n except ValueError:\n print('Incorrectly formatted transition:', a, b)\n raise\n\n if not isinstance(character, bytes):\n raise ValueError(a, b)\n\n states.add(state_name)\n states.add(new_state)\n stack_symbols.add(stack_top_or_None)\n stack_symbols.add(stack_value)\n\n print(f'Stats about {cls.name} 2PDA:')\n print('Number of states:', len(states))\n print('Number of transitions:', len(cls.transitions))\n print('Number of stack symbols:', len(stack_symbols))", "title": "" }, { "docid": "7402ee1596193465c87ca0179eea05a0", "score": "0.6684796", "text": "def info() -> None:\n __init__conf__.print_info()", "title": "" }, { "docid": "a7a0d9995ea536af37282392f573201d", "score": "0.66798437", "text": "def print_state(state, num):\n grid_range = range(5) # A range variable used to iterate through the grid\n\n print(\"\\nSTATE %s:\" %(str(num)))\n print(\"-----------\")\n for i in grid_range:\n print('|', end='')\n for j in grid_range:\n square = state[i][j]\n if square == -2:\n print('M', end='')\n elif square == -1:\n print('?', end='')\n else:\n print(str(square), end='')\n if j < len(state[i]) - 1:\n print(\" \", end='')\n print('|')\n print(\"-----------\\n\")", "title": "" }, { "docid": "1f86d95d5249896833066c514aa3cc41", "score": "0.6675583", "text": "def DebugOutput(self):\n depth = self.HSM.GetMaxNestingDepth()\n for x in range(0,depth):\n for i in self.HSM.StateLevelList[x]:\n for key, value in dict.items(i.Dict):\n # let's format the output a little bit\n print '{:} {:12} {:>3} {:<15} '.format(\"-\".rjust(i.Dict['NestLevel'] * 2), key,\":\", value)", "title": "" }, { "docid": "16af8b176f5197a4568617fff19a2e13", "score": "0.6669293", "text": "def printStatus():", "title": "" }, { "docid": "4d40898e2f9cce581c488a72961b711d", "score": "0.66638124", "text": "def display(self, state):\n print(\"Total turns: \" + str(state.turns_taken))\n print(state.to_move + \"'s turn\")\n boardutils.print_board(state.board)", "title": "" }, { "docid": "c13051cafc7aecc3369a1709cf1e3861", "score": "0.6659937", "text": "def log(self):\n indent = ' '\n for s in self._states:\n logger.info(f\"State {s} {indent}\")\n for a in self._actions:\n logger.info(f\"{indent} Action {a}\")\n for ss in self._states:\n if self.P[s, a, ss] > 0.0:\n logger.info(f'{2 * indent} transition to {ss} '\n f'with prob {self.P[s, a, ss]: .2f}')\n logger.info(\"~~~~~~~~~~~~~~~~~~~~\")", "title": "" }, { "docid": "f275dd21058e506dc42c8ed74009c3d7", "score": "0.6655012", "text": "def __repr__( self ):\n\n return self.__class__.__name__ + \"( \" + repr(self.state) + \")\";", "title": "" }, { "docid": "03e0d742ba5e4a1d3956ff5c6dcadc63", "score": "0.66319954", "text": "def house_state(self):\r\n print(\"House ##,\\tcolor,\\tresident,\\tpet,\\tdrink,\\tsmoke\")\r\n for house in self.houses:\r\n house.describe() \r\n print()", "title": "" }, { "docid": "70ddab088f903c549bb74b3415aee658", "score": "0.6625113", "text": "def _info(self, n, d, e):\n\n print(\"Layer {}:\".format(n))\n print(\"d: \", d)\n print(\"e: \", e)\n print(\"CTE: \", self.CTE[n])\n print()", "title": "" }, { "docid": "ad2d0c2e8fa6016cdd7f237fd7bc5d2e", "score": "0.6623914", "text": "def info(self):", "title": "" }, { "docid": "ad2d0c2e8fa6016cdd7f237fd7bc5d2e", "score": "0.6623914", "text": "def info(self):", "title": "" }, { "docid": "6e843b9bffb44c16a63df08b90cecbb8", "score": "0.6607457", "text": "def get_state(self):\n state = \"\"\n state += '\\nname: {}'.format(self.name)\n state += '\\nis_plotting: {}'.format(self.is_plotting)\n state += '\\ncan_plot: {}'.format(self.can_plot)\n state += '\\nline_artist: {}'.format(self.line_artist)\n return state", "title": "" }, { "docid": "d0ab28e82320447867d0135beb518956", "score": "0.6586651", "text": "def states(**kwargs):\n # fetch data from server\n client = Client()\n\n entries = client.SystemStates.instances()\n\n # present results\n if kwargs.pop('long_info'):\n print_items(STATE_FIELDS, client.SystemStates, None, entries,\n PrintMode.LONG)\n else:\n print_items(STATE_FIELDS, client.SystemStates, None, entries,\n PrintMode.TABLE)", "title": "" }, { "docid": "3db91b4359a91642ef622d6e54ff836d", "score": "0.65843564", "text": "def debug(self):\n perc_seen = (self.seen / self.visited) * 100\n perc_unseen = (self.unseen / self.visited) * 100\n\n print(\n f\"Episode: {self.episode}\\t\"\n f\"Total reward: {self.total_reward:.2f}\\t\"\n f\"Success: {self.success}\\t\"\n f\"Ep. size: {self.visited}\\t\"\n f\"({perc_seen:.2f}% seen / {perc_unseen:.2f}% unseen)\\t\"\n f\"States: {len(self.states)}\"\n )", "title": "" }, { "docid": "5359aba9bb73f064d849dbd5ff8bbe9e", "score": "0.657683", "text": "def print_info(self):\n print('%s to move.' % self.turn.upper())\n if self.castling == '-':\n print('Neither side can castle.')\n else:\n print('Castling: %s' % self.castling)\n if self.en_passant != '-':\n print('En passant available on %s.' % self.en_passant)\n print('White king is at %s.' % str(self.white_king))\n print('Black king is at %s.' % str(self.black_king))", "title": "" }, { "docid": "a27f26a878e52ca87d16838e1fea83dc", "score": "0.65735465", "text": "def info(self):\r\n pprint(self.__dict__, indent=2)", "title": "" }, { "docid": "2836284b76af6e7ec217e008ccefb7bc", "score": "0.65697306", "text": "def actionStatus(state, nextState, record, fields):\n global lineNumber\n global stack\n return \"line \" + str(lineNumber) + \" state: \" + str(state) + \" nextState: \" + str(nextState) \\\n + \" record: \" + str(record) + \"\\n stack: \" + str(stack) + \" \\nfields: \" + str(fields)", "title": "" }, { "docid": "8f8d05e6a461dc8581608cafc31774cf", "score": "0.6564057", "text": "def report(self):\n from pprint import pprint\n print('s2n')\n pprint(self.symbol2number)\n print('n2s')\n pprint(self.number2symbol)\n print('states')\n pprint(self.states)\n print('dfas')\n pprint(self.dfas)\n print('labels')\n pprint(self.labels)\n print('start', self.start)", "title": "" }, { "docid": "3a6c4ec872787ae565f75d67227f17e8", "score": "0.65592325", "text": "def get_state_display(self, obj):\n return obj.get_state_display()", "title": "" }, { "docid": "a94740380fd4aa2ddb646e06ba71ecde", "score": "0.6554352", "text": "def displayStatus(self):\n print(\"Health: \", self.health)\n print(\"Hunger: \", self.hunger)\n print(\"Hygiene:\", self.hygiene)", "title": "" }, { "docid": "156524ee69954032cbd575a0992e435f", "score": "0.65540063", "text": "def print_state(state, message=None):\n setting_words = state.current_setting.replace('_', ' ').split(' ')\n\n buffer_lines = (pretty_line_break(abbreviate(setting_words), 20) + [''] * 4)[:4]\n buffer_lines[2] = '-' * 20\n\n final_line = '{:^20}'.format(\n '{}{}{}'.format(\n state.current_value,\n ' ({:+})'.format(state.current_increment) if state.current_increment != 0 else '',\n ' ({})'.format(message) if message is not None else ''\n )\n )\n buffer_lines[3] = final_line\n\n __BUFFER.clear()\n __BUFFER.extend(buffer_lines)\n\n _flush()", "title": "" }, { "docid": "f2bf66efeaca37028d6481c8365a1acb", "score": "0.6527651", "text": "def GetState(self):", "title": "" }, { "docid": "f2bf66efeaca37028d6481c8365a1acb", "score": "0.6527651", "text": "def GetState(self):", "title": "" }, { "docid": "329895e350621aefd7b4c71577ad2500", "score": "0.6525194", "text": "def short_state_info(self) -> str:\n items = []\n\n if self._last_xbdm_execution_state:\n items.append(self._last_xbdm_execution_state)\n\n if self._active_thread_id is not None:\n tid: int = self._active_thread_id or 0 # Suppress pylint warning.\n items.append(\"TID[%d]\" % tid)\n\n return \" \".join(items)", "title": "" }, { "docid": "d8402856c1a1838e6eb2d3552d4cb07a", "score": "0.6502043", "text": "def __print_state(self, q: str, prettify: bool):\n cells = \" | \".join(\"%6s\" % self.__cell_to_str(q, c, prettify) for c in self.alphabet)\n print(\"| %6s |\" % q, cells, \"|\")", "title": "" }, { "docid": "d836fbea66561a322b701f890d5bba71", "score": "0.65006304", "text": "def __str__(self):\r\n try:\r\n output = f\"{self.label:s}: {self.get_state()[1]:s} @ {self.get_pos():.3f}\"\r\n except:\r\n output = f\"{self.label:s}: Unknown state. Unknown pos\"\r\n raise\r\n return output", "title": "" }, { "docid": "6393612cbd9f7ffae5160cb4696aa0d1", "score": "0.64930993", "text": "def info(self):\n pass", "title": "" }, { "docid": "6393612cbd9f7ffae5160cb4696aa0d1", "score": "0.64930993", "text": "def info(self):\n pass", "title": "" }, { "docid": "6393612cbd9f7ffae5160cb4696aa0d1", "score": "0.64930993", "text": "def info(self):\n pass", "title": "" }, { "docid": "6393612cbd9f7ffae5160cb4696aa0d1", "score": "0.64930993", "text": "def info(self):\n pass", "title": "" }, { "docid": "6393612cbd9f7ffae5160cb4696aa0d1", "score": "0.64930993", "text": "def info(self):\n pass", "title": "" }, { "docid": "6393612cbd9f7ffae5160cb4696aa0d1", "score": "0.64930993", "text": "def info(self):\n pass", "title": "" }, { "docid": "7b8384df0f06e52c5cf66516d07c93b2", "score": "0.64907897", "text": "def printState(state):\n stateCopy = deepcopy(state)\n maxLen = max([len(peg) for peg in stateCopy])\n for peg in stateCopy:\n while len(peg) < maxLen:\n peg.insert(0, ' ')\n for i in range(maxLen):\n for peg in stateCopy:\n print(peg[i], end=' ')\n print()\n print('-'*6)", "title": "" }, { "docid": "75e3814a8772ab6e308c02fb30ee46f8", "score": "0.6490504", "text": "def debug(self):\n episode_size = len(self.visited)\n\n perc_seen = (self.seen / episode_size) * 100\n perc_unseen = (self.unseen / episode_size) * 100\n\n print(\n f\"Episode: {self.episode}\\t\"\n f\"Total reward: {self.total_reward:.2f}\\t\"\n f\"Success: {self.success}\\t\"\n f\"Ep. size: {episode_size}\\t\"\n f\"({perc_seen:.2f}% seen / {perc_unseen:.2f}% unseen)\\t\"\n f\"States: {len(self.states)}\"\n )", "title": "" }, { "docid": "1f42d7a00b1b48c1edc2c6073c34c35a", "score": "0.6483491", "text": "def state_description(self):\n return self.STATES[self.state]", "title": "" }, { "docid": "65211dee934d5046e4d50681a9dde9ee", "score": "0.6483382", "text": "def info():\n pass", "title": "" }, { "docid": "65211dee934d5046e4d50681a9dde9ee", "score": "0.6483382", "text": "def info():\n pass", "title": "" }, { "docid": "a55cc9e5a2c4044ee26269266de7b27e", "score": "0.64805275", "text": "def __str__(self):\n return \"Level %s (Alive: %s)\" % (str(self.Level), str(self.State))", "title": "" }, { "docid": "bf726dca9dd91908a676e888fa17879a", "score": "0.647555", "text": "def print_status(self):\n\t\tprint 'Step {0:d}'.format(self.iteration)\n\t\tprint 'Initial roll: {0:2.3f} degrees'.format(self.starting_roll)\n\t\tprint 'Turbulence change: {0:2.3f}'.format(self.wind.tilt_backlog[-1])\n\t\tprint 'Compensation: {0:2.1f} %'.format(self.correction_backlog[-1])\n\t\tprint 'Final roll: {0:2.3f} degrees'.format(self.current_roll)", "title": "" }, { "docid": "bb4f141c2fd305170312ebffbd82059b", "score": "0.64702284", "text": "def info(self):\n print(\"I am a Knight!\")", "title": "" }, { "docid": "1b4e339e61500fd681ad945066ec81c4", "score": "0.6465549", "text": "def printState(state):\n if type(state) is not list:\n print(\"Not a valid list\")\n\n # Substitute '' for 0, to make the puzzle look better\n display = [x if x != 0 else '' for x in state]\n sqrt = squareOfState(state)\n\n # Iterate through the list, display tab-delimited\n for x in range(sqrt):\n print(*display[(x * sqrt):(x * sqrt) + sqrt], sep='\\t')", "title": "" }, { "docid": "315b9e2dee4ae992629189b3b92b50af", "score": "0.6462789", "text": "def print_status(self):\n if not self.game.status:\n return \n print ' '.join([fmt % args for (fmt, args) in self.game.status]),", "title": "" }, { "docid": "6fb3e869a3b7c76246e90c7c6c954100", "score": "0.6449615", "text": "def state(self):", "title": "" } ]
bcc11dc5d3ed124862ba585c4e4fd20e
Should answers if the single point (x, y) is on black.
[ { "docid": "7e9f388570f8d4b23da6bdcf5714d9a2", "score": "0.0", "text": "def onBlack(self, p, path=None):", "title": "" } ]
[ { "docid": "e049a1e2d55a6aa4ff11eeaa97d59aa3", "score": "0.6627004", "text": "def IsByColor(self) -> bool:", "title": "" }, { "docid": "e049a1e2d55a6aa4ff11eeaa97d59aa3", "score": "0.6627004", "text": "def IsByColor(self) -> bool:", "title": "" }, { "docid": "6ba50191159d10ee8fa7e4cd2a62204d", "score": "0.65876037", "text": "def isWater(x, y):\n if interface.pix[x, y][:3] == (0,0,255):\n return True\n return False", "title": "" }, { "docid": "411ed7d5cbc4e2514551c5a09dac5742", "score": "0.6576384", "text": "def isGreen( pixel ):\n r, g, b = pixel\n if g>= 220 and r <= 125 and b <= 125:\n return True\n return False", "title": "" }, { "docid": "0f16e29c952cb56f1596fcacf1fad161", "score": "0.6518451", "text": "def hasColor(self):\n \n pass", "title": "" }, { "docid": "0f16e29c952cb56f1596fcacf1fad161", "score": "0.6518451", "text": "def hasColor(self):\n \n pass", "title": "" }, { "docid": "c76ef72ba338b17abb00b7e4d59f25f5", "score": "0.64755803", "text": "def isPixelValid(self, point):\n # this check approximately doubles the total time of this method\n # this function is (or was) about 15% of total execution time\n #if point[0] < 0 or point[1] < 0 or point[0] > xyMax or point[1] > xyMax:\n # raise ValueError(\"the supplied point:\", point, \n # \"is outside the sky map\")\n\n # doing mask[p[0],p[1]] is about 50% faster than mask[tuple(p)]\n # this function is (or was) about 15% of total execution time\n return self.validMask[point[0],point[1]]", "title": "" }, { "docid": "d46285312eaf19dc751b6b8389cf0ab7", "score": "0.63158756", "text": "def _green_screen_check(rgb, sensibility, reverse=False):\n if sensibility is None:\n return False\n op = operator.gt if reverse else operator.le\n for x in rgb:\n if not op(x, sensibility):\n return False\n return True", "title": "" }, { "docid": "a6eef9b429992c46c990dbdaeec641ff", "score": "0.6198835", "text": "def TrueColor(self) -> int:", "title": "" }, { "docid": "61e1ed83e720da37395479dc16deaa90", "score": "0.619602", "text": "def turn_pixel_neighbors_black(image, x, y):\n \n pixels_to_check = [(x, y)]\n \n while len(pixels_to_check) > 0:\n (x, y) = pixels_to_check.pop(0)\n \n pixel = image.getpixel((x, y))\n \n if pixel == WHITE:\n image.putpixel((x, y), BLACK)\n \n if x < image.width - 1:\n pixels_to_check.append((x+1, y))\n \n if x > 0:\n pixels_to_check.append((x-1, y))\n \n if y < image.height - 1:\n pixels_to_check.append((x, y+1))\n \n if y > 0:\n pixels_to_check.append((x, y-1))", "title": "" }, { "docid": "99170d834629f878277f20beea0df91a", "score": "0.615546", "text": "def is_at_point(self, x, y, cm_threshold=8):\n return self.displacement_to_point(x, y) < cm_threshold", "title": "" }, { "docid": "028dc7ccaab215a74e9c68e2beae6a65", "score": "0.61316705", "text": "def IsByPen(self) -> bool:", "title": "" }, { "docid": "028dc7ccaab215a74e9c68e2beae6a65", "score": "0.61316705", "text": "def IsByPen(self) -> bool:", "title": "" }, { "docid": "853da11532526bc92498eb4734e77ebc", "score": "0.60442865", "text": "def check_coloring(self) -> bool:\n if self.color == 1:\n if color(self.left) == 1 or color(self.right) == 1:\n return False\n if self.left and not self.left.check_coloring():\n return False\n if self.right and not self.right.check_coloring():\n return False\n return True", "title": "" }, { "docid": "52343cb52faf549f21e16bbebc8d8dc1", "score": "0.6015768", "text": "def _is_bright(rgb):\n L = 0\n for c, coeff in zip(rgb, (0.2126, 0.7152, 0.0722)):\n if c <= 0.03928:\n c = c / 12.92\n else:\n c = ((c + 0.055) / 1.055) ** 2.4\n L += c * coeff\n if (L + 0.05) / (0.0 + 0.05) > (1.0 + 0.05) / (L + 0.05):\n return True", "title": "" }, { "docid": "30fb4bbc983240dfeeb63620a1dead2d", "score": "0.60116017", "text": "def bright_spot(location):\n bright_value = 210\n \n x = location.getX()\n y = location.getY()\n r,g,b = img.getPixel(x,y)\n return to_grey(r,g,b) > bright_value", "title": "" }, { "docid": "23ad1a57869a3b2a683f9e4fd1c1c9ac", "score": "0.6006406", "text": "def border_check(x_, y_):\n return 0 <= x_ < board.width and 0 <= y_ < board.height", "title": "" }, { "docid": "46391232fe88d9d33a22acbaa417f54c", "score": "0.5961372", "text": "def confirm_continuing_dark_at(image,y,threshold=200):\n current_y = y\n miss_count = 0\n for x in range(image.size[0]-5):\n if current_y >=2:\n p0 = image.getpixel((x,current_y-2))\n else:\n p0 = (255,255,255)\n if current_y >=1:\n p1 = image.getpixel((x,current_y-1))\n else:\n p1 = (255,255,255)\n if current_y >=0:\n p2 = image.getpixel((x,current_y))\n else:\n p2 = (255,255,255)\n if current_y < (image.size[1]-2):\n p3 = image.getpixel((x,current_y+1))\n else:\n p3 = (255,255,255)\n\n if p1[0]<threshold or p2[0]<threshold:\n # line ok\n pass\n elif p0[0]<threshold:\n current_y = current_y - 1\n elif p3[0]<threshold:\n current_y = current_y + 1\n else:\n miss_count = miss_count + 1\n if miss_count > 15:\n return False\n return True", "title": "" }, { "docid": "01667eaa6d866cf8a2317edc00c1ec7d", "score": "0.5950173", "text": "def is_lighter_than(pix,threshold):\n return (pix[0]>threshold and pix[1]>threshold and pix[2]>threshold)", "title": "" }, { "docid": "6033e8113695ba1094d64d72729ee0a2", "score": "0.5940742", "text": "def isGrey(img: np.ndarray):\n if len(img.shape) == 2:\n return True\n else:\n return False", "title": "" }, { "docid": "95ba6e017ce3792cacb8b88920c68e14", "score": "0.5932977", "text": "def __is_inside(self, pixel):\n return self.mask[pixel] == 1", "title": "" }, { "docid": "4284a701f0fb17d6399a48ce3f1f0b09", "score": "0.5927695", "text": "def check_point(self, x, y):\n\t\treturn self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2", "title": "" }, { "docid": "850715854d9c4167866e3f6c99ad6d11", "score": "0.58956796", "text": "def is_on_board(x, y):\n return 0 <= x < BOARDWIDTH and y < BOARDHEIGHT", "title": "" }, { "docid": "b722719277950f708fa879ac5243fe1a", "score": "0.5894406", "text": "def has_color(self):\n return self.voxelcloud.has_color()", "title": "" }, { "docid": "8afd53dc4b9014fc93c9ebececcd0bd1", "score": "0.5893134", "text": "def contains_point(self, x, y):\n return (y * y - ((x * x + self.__a) * x + self.__b)) % self.__p == 0", "title": "" }, { "docid": "11a9732c8ba484f24a5ea412c71d7aaf", "score": "0.5860548", "text": "def segment_contains_solid_color(video_segment):\n first_frame_is_solid_color = False\n last_frame_is_solid_color = False\n first_frame = video_segment.get_frame(t='00:00:00')\n last_frame = video_segment.get_frame(t=video_segment.duration)\n\n #Check first frame\n frame_image = Image.fromarray(first_frame)\n extrema = frame_image.convert(\"L\").getextrema()\n if abs(extrema[1] - extrema[0]) <= s.MIN_EXTREMA_RANGE:\n first_frame_is_solid_color = True\n\n #Check last frame\n frame_image = Image.fromarray(last_frame)\n extrema = frame_image.convert(\"L\").getextrema()\n if abs(extrema[1] - extrema[0]) <= s.MIN_EXTREMA_RANGE:\n last_frame_is_solid_color = True\n\n return True if first_frame_is_solid_color or last_frame_is_solid_color else False", "title": "" }, { "docid": "58a208430553805e3692257a0a6b0dbe", "score": "0.58575183", "text": "def hitTest(self, x, y):\n return (x >= 0 and x < self.width and y >= 0 and y < self.height)", "title": "" }, { "docid": "a9c9ec25cacd3ce9da16c22df6f8fc2e", "score": "0.5854938", "text": "def miss_pixel(patch):\n return np.argwhere(patch == -100).shape[0] > 0", "title": "" }, { "docid": "c20b4202f47b3272e923c533a5800ea8", "score": "0.5845442", "text": "def blackAndWhite(image):\n\n def change(triple):\n \"\"\"Converts a pixel to black and white.\"\"\"\n (r, g, b) = triple\n average = (r + g + b) // 3\n if average < 128:\n return (0, 0, 0)\n else:\n return (255, 255, 255)\n\n transform(image, change)", "title": "" }, { "docid": "3daf2fce1c49754b44c25ae03073f180", "score": "0.5843489", "text": "def contains_point(self, x, y):\n # Convert coordinate to image coordinate\n row, col = self.xy_to_imagecoord(x, y)\n\n # Check bounds\n if row < 0 or row > (self.ny - 1):\n return False\n if col < 0 or col > (self.nx - 1):\n return False\n return True", "title": "" }, { "docid": "c11aeb0836b7027b6fca143453d92d0b", "score": "0.58416915", "text": "def pointInside((x, y), evenOdd=0):", "title": "" }, { "docid": "afca7fd98cf6cdd9219671383e22b6c2", "score": "0.5838347", "text": "def all_black(M,r,c,D): \n for i in range(r,r+D):\n for j in range(c,c+D):\n if M[i][j] != BLACK:\n return False\n return True", "title": "" }, { "docid": "46514e1a1921dc54d1747c85b6e6db60", "score": "0.5825409", "text": "def in_square(self, c_x, c_y):\r\n\r\n is_in_x = self.x_1 <= c_x <= self.x_2\r\n is_in_y = self.y_1 <= c_y <= self.y_2\r\n return is_in_x and is_in_y", "title": "" }, { "docid": "80559d2c8bcdf11c22c08d712ebc7dcc", "score": "0.5804871", "text": "def sunset(self, pixel):\n state = pixel.state\n if state[\"red\"] > 200 and state[\"green\"] > 200 and state[\"blue\"] > 200:\n self.bright_warm(pixel)", "title": "" }, { "docid": "7063930d427f634512c9b7c6d493836a", "score": "0.58044225", "text": "def is_overlap(self, x, y, color):\n\n\t\tfor piece in (self.get_pieces()):\n\t\t\tif (piece.get_x() == x) and (piece.get_y() == y):\n\t\t\t\tif (piece.get_color() == color):\n\t\t\t\t\treturn 1\n\t\t\t\t\n\t\t\t\treturn 0\n\t\t\n\t\treturn -1", "title": "" }, { "docid": "6cd35eaa228276a2a23310c37653018a", "score": "0.58033353", "text": "def constraint(self, color1, color2): \n \n return color1 != color2", "title": "" }, { "docid": "37374fca689ca5af0256c4b722539fca", "score": "0.5786151", "text": "def boundary_check(self,x,y):\n if x<0 or x>4 or y<0 or y>4:\n return False\n else:\n return True", "title": "" }, { "docid": "3f17f1afe5e316df5a49e0983c98ad06", "score": "0.5781506", "text": "def process_background_is_white(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert it to grayscale\n image = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)[1] # thresholding\n n_white_pix = np.sum(image == 255)\n n_black_pix = np.sum(image == 0)\n return image, n_white_pix > n_black_pix", "title": "" }, { "docid": "bc701f4ce9de48e8bfb066daa68d1f3e", "score": "0.5763697", "text": "def are_same_color(piece1, piece2):\n return color_of(piece1) == color_of(piece2)", "title": "" }, { "docid": "a4808b24503d8715cb558fbf731aea4d", "score": "0.57615733", "text": "def approximately_black(im, mean_threshold=1, std_threshold=1, count_threshold=3):\n im = to_uint8(im)\n return np.mean(im) <= mean_threshold \\\n and np.std(im) <= std_threshold \\\n and np.count_nonzero(im) <= count_threshold", "title": "" }, { "docid": "bd74efb89af056e57980347b2032aaf1", "score": "0.5760531", "text": "def should_change_PhotometricInterpretation_to_RGB(ds):\r\n return False", "title": "" }, { "docid": "b44c83cbc22e34bb03dd5b06453c520f", "score": "0.5754795", "text": "def is_coloring(self):\n return self._coloring", "title": "" }, { "docid": "3c1314581330c94a10f6c5ef23233639", "score": "0.57352924", "text": "def in_palace(self, color, coordinates):\n if color == 'RED':\n return coordinates in {3, 4, 5, 13, 14, 15, 23, 24, 25}\n else:\n return coordinates in {73, 74, 75, 83, 84, 85, 93, 94, 95}", "title": "" }, { "docid": "b5b8dc53e2095895b25f6d0a4831de50", "score": "0.572342", "text": "def surrounded(self, x :int, y :int) -> bool:\n # return which stone with 0 perimeter\n Index = self.xyToIndex(x,y)\n surrounded = self.Perimeters[self.root(Index)] == 0\n return surrounded", "title": "" }, { "docid": "6585633b09e1699f58d924486250801b", "score": "0.57196105", "text": "def find_pixel(x, y):\n while x % 5 != 0:\n x -= 1\n while y % 5 != 0:\n y -= 1\n return x, y", "title": "" }, { "docid": "0bea791dbc221e53c7b07b12df72ec2c", "score": "0.57047397", "text": "def rectilinear(self):\n return self.m[0, 1] == 0 and self.m[1, 0] == 0", "title": "" }, { "docid": "9a01efba5a730a3cd3236a5790e2c8d1", "score": "0.570049", "text": "def get_pixel(self, x, y):", "title": "" }, { "docid": "85342b90a96c759a805c03f37f536d5f", "score": "0.56962353", "text": "def hasWon(board):\r\n color = board[0][0]\r\n for x in range(COLS):\r\n for y in range(ROWS):\r\n if board[x][y] != color:\r\n return False\r\n return True", "title": "" }, { "docid": "66b0f72965abe64da84edf4bc8a37554", "score": "0.5691584", "text": "def color_equal(v1=None, v2=None): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "c51675a718dfdf95627d251351ac63d4", "score": "0.56835324", "text": "def dark(self):\n return not (cave.bitset(self.loc, Cond.LIGHT) or\n (self.prop[Item.LAMP] and self.here(Item.LAMP)))", "title": "" }, { "docid": "2f6eabca792b271a3d94c13bfc261951", "score": "0.5683298", "text": "def check_for_different_colour(self, arg1_position, arg2):\n return (not self.check_position_free(arg1_position)) and (not self.check_for_same_colour(arg1_position, arg2))", "title": "" }, { "docid": "4dd2aae38aaa1de77e99c7b6b5ff85c3", "score": "0.5680681", "text": "def isUsedAsColor(self):\n \n pass", "title": "" }, { "docid": "c198ec47f5ae2c5d2339b4492ea95f97", "score": "0.56683487", "text": "def is_opaque(self):\n if self._image.get_alpha() >= 255:\n return True\n else:\n return False", "title": "" }, { "docid": "4237b33b130aa8275808615987404da3", "score": "0.56626856", "text": "def GetPixelColour(self, x, y):\n img = self.GetAsBitmap().ConvertToImage()\n red = img.GetRed(x, y)\n green = img.GetGreen(x, y)\n blue = img.GetBlue(x, y)\n return wx.Colour(red, green, blue)", "title": "" }, { "docid": "9c185478c9dd63007ccff1753b9fdd9a", "score": "0.565523", "text": "def point_is_inside_mask(self, another_point):\r\n # only areas that have a value == 255 can't be placed\r\n return image_utils. \\\r\n point_is_inside_mask(self.raw_mask, another_point, self.error_threshold, value=0)", "title": "" }, { "docid": "f0d02dd8a7183ba63d5749c075b6d00e", "score": "0.56512547", "text": "def isOnBoard(self, x, y):\n return 0 <= x < BOARD_WIDTH and 0 <= y < BOARD_HEIGHT", "title": "" }, { "docid": "606ff7aabc15883b54fa653d974b46bb", "score": "0.5646437", "text": "def is_patch_white(hsv_patch):\n\n sat = hsv_patch[:,:,1] # in range [0,1]\n percent = np.mean(sat < SAT_THRESHOLD)\n\n return (percent > PERCENT_WHITE_PIXELS_THRESHOLD)", "title": "" }, { "docid": "0f16586d898e12142454e9d40248f581", "score": "0.5645323", "text": "def is_point(self, x, y):\n if isinstance(x, int):\n x = self.finite_field.get_element(x)\n if isinstance(y, int):\n y = self.finite_field.get_element(y)\n return y ** 2 == x ** 3 + self.a * x + self.b", "title": "" }, { "docid": "c6cf04da6ed2885dd2261fac8e218693", "score": "0.5640128", "text": "def is_rgb(self):\n\t\treturn self.num_channels == 3", "title": "" }, { "docid": "4fd82eeab92a231ae4d2f670d30922e1", "score": "0.5639914", "text": "def isOnTheDartBoard():\n xCoord = generateCoordinate()\n yCoord = generateCoordinate()\n # checks if the co-ordinates fall in the circle\n if ((xCoord ** 2) + (yCoord ** 2)) < 1:\n return True # inside the circle - area of circle = piR^2 = pi(0.5)(0.5)\n else:\n return False # outside of the cirle but still inside the 1 x 1 square = 1", "title": "" }, { "docid": "8b572107bc4c16363360f9373f0712e4", "score": "0.5638851", "text": "def isColor(mode)->bool:\n if not isinstance(mode,str):\n mode=imageMode(mode)\n return mode[0]!='L'", "title": "" }, { "docid": "cad621c310e5b106c1364915c27c351b", "score": "0.5631962", "text": "def get_white(self):\n return self.rectangles_unknown", "title": "" }, { "docid": "266723b4a6cb32688b1848fcd17ca41a", "score": "0.56200296", "text": "def inside(self,x,y):\n return 0<=x<self.get_width() and 0<=y<self.get_height()\n print(x,y)", "title": "" }, { "docid": "59f3683cea7d9f6a291afae8b130b079", "score": "0.5617721", "text": "def blackwhite(im):\n maxval = im.getextrema()[1]\n return Image.eval(im, lambda x: 255 if x >= maxval/2 else 0)", "title": "" }, { "docid": "5560a7f77b1040be3c2cb4c05c7ed7e1", "score": "0.56096685", "text": "def is_point_tiling(self) -> bool:\n return self.dimensions == (1, 1) and (0, 0) in self.point_cells", "title": "" }, { "docid": "4b70ed914ecac547c146f3041dba54dd", "score": "0.560931", "text": "def usesAlpha(self):\n max = 1.0 if self.color.dtype.kind == 'f' else 255\n return np.any(self.color[:,3] != max)", "title": "" }, { "docid": "56f081654d08f3d3ab3bcbbfd4a8ec29", "score": "0.56087303", "text": "def black_box_function(x, y):\n \n \n return -x ** 2 - (y - 1) ** 2 + 1", "title": "" }, { "docid": "a8b6b97d749d1718ac544fe8c997b878", "score": "0.5608727", "text": "def contains_high_colors(self):\n for frame in self.frames:\n for line in frame.newColorMap:\n for pair in line:\n if pair[0] > 16:\n return True\n return False", "title": "" }, { "docid": "0dfa3b24ea5f051273be4449e94ed26d", "score": "0.5603763", "text": "def is_on_curve(self, point):\n\n if point is None:\n # None represents the point at infinity.\n return True\n\n x, y = point\n\n return (y * y - x * x * x - self.a * x - self.b) % self.p == 0", "title": "" }, { "docid": "668e1c07c9327ebb111b32c5c031dbb3", "score": "0.5600666", "text": "def pick_point(px, py, x, y, size=2):\r\n return abs(x-px)<=size and abs(y-py)<=size", "title": "" }, { "docid": "611aa3ffd06ed270aca06ea942058b42", "score": "0.55976003", "text": "def action_needed(self):\n pixel = self.board.image[854:855, 715:716]\n if not (pixel[0, 0, 0] < 50 and pixel[0, 0, 1] and \n pixel[0, 0, 2] < 50):\n return True\n\n return False", "title": "" }, { "docid": "f095e9ac1a7b9b4e4abed31812bd84bf", "score": "0.55972683", "text": "def is_colinear(self, point):\n pass", "title": "" }, { "docid": "93ca722f5da2d5ba7af05ffc17917ae6", "score": "0.5579334", "text": "def __contains__(self, point):\n\n val = self.surface.evaluate(point)\n return val >= 0. if self.side == '+' else val < 0.", "title": "" }, { "docid": "f233e22dcdd86b2015432467a6432067", "score": "0.55744237", "text": "def is_parm_tuple_color(parm_tuple: hou.ParmTuple) -> bool:\n parm_template = parm_tuple.parmTemplate()\n\n return parm_template.look() == hou.parmLook.ColorSquare", "title": "" }, { "docid": "d2346083fa5920928a88f3510650dc7a", "score": "0.55688256", "text": "def supports_xyy_color(self) -> bool:\n return self.xyy_color.initialized", "title": "" }, { "docid": "c8482290f41c47575668917d7065a7ed", "score": "0.55681115", "text": "def test_bad_pixels(self) :\n\n # check channel 7\n bt = self.get_ch(7)\n bad = bt.quality.get_pixels_from_condition('NO_VALUE')\n self._mask_out_pixels(bad, 'MISSING_CH7')\n bad = bt.quality.get_pixels_from_condition('OUT_OF_RANGE')\n self._mask_out_pixels(bad, 'SAT_CH7')\n\n # check channel 14\n bt = self.get_ch(14)\n bad = bt.quality.get_pixels_from_condition('NO_VALUE')\n self._mask_out_pixels(bad, 'MISSING_CH14')\n bad = bt.quality.get_pixels_from_condition('OUT_OF_RANGE')\n self._mask_out_pixels(bad, 'SAT_CH14')", "title": "" }, { "docid": "d030ab3d205b7eab8c8765275063c3f9", "score": "0.55662566", "text": "def use_white_text(color):\n luminance = 0.2126 * color[0] + 0.7152 * color[1] + 0.0722 * color[2]\n return luminance <= 0.179", "title": "" }, { "docid": "44baf930219d26c7b5c8a085eedd1186", "score": "0.5563374", "text": "def checkforsaturation(data):\n param = get_params()\n return True if np.any(data >= param['well']) else False", "title": "" }, { "docid": "97af0c7c3eff2372a9385446238576fa", "score": "0.55631495", "text": "def get_coordinate_value(x, y, grid):\n x, y = wrap_border(x, y, grid)\n\n if grid[y][x] == alive_c:\n return True\n\n return False", "title": "" }, { "docid": "b4c6da1926536ca4dd6a4a0b5cf66aa6", "score": "0.5562954", "text": "def __contains__(self, x_y_tuple):\n x, y = x_y_tuple\n return self.is_chip_at(x, y)", "title": "" }, { "docid": "bd5a3419f18a75a8692ea295aff32d1a", "score": "0.55624515", "text": "def is_dark(self) -> bool:\n return self.lightness() < 128", "title": "" }, { "docid": "deb04222d43e2718c7f387b219e99b6f", "score": "0.5558512", "text": "def inside_walls(self, point):\n EPS = 1e-4\n return (EPS <= point[0] < self.size[0] - EPS and\n EPS <= point[1] < self.size[1] - EPS)", "title": "" }, { "docid": "9a1d9a267145052c8b0d9efbee15fb0b", "score": "0.55558383", "text": "def inClass(self, x, y):\n\t\tposition = (x-self.cx)**2 + (y-self.cy)**2\n\t\treturn (position > self.r**2) and (position < (self.r+self.deltaR)**2)", "title": "" }, { "docid": "5d72bd3a27e8c7a18d215e0c140759cf", "score": "0.5546215", "text": "def is_light_color(self):\n return self.get_color_lightness() > 127", "title": "" }, { "docid": "47ed84554ad0e6625d2511aa0839d11b", "score": "0.5545403", "text": "def is_gray(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "5259ebec0c1510bda84be191b31693a4", "score": "0.55385005", "text": "def check_in_pixel(self, ra, dec, pix):\n for val, name in zip([ra, dec], self.names):\n if (val < self.bounds[name][0]) or (val > self.bounds[name][1]):\n return False\n phi, theta = ra, 0.5 * np.pi - dec\n pixel = self.hp.ang2pix(self.nside, theta, phi)\n return pix == pixel", "title": "" }, { "docid": "efd47363d1001bf6deec6b866d84c91a", "score": "0.5525002", "text": "def transparent(self) -> bool:", "title": "" }, { "docid": "446ecc6d24dbd7603fb864beb1d0ce32", "score": "0.5520495", "text": "def is_legal(self, point, color):\n assert is_black_white(color)\n # Special cases\n if self.board[point] != EMPTY:\n return False \n \n # General case: deal with captures, suicide\n opp_color = GoBoardUtil.opponent(color)\n self.board[point] = color\n neighbors = self.neighbors[point]\n # Captur \n for nb in neighbors:\n if self.board[nb] == opp_color:\n if self._detect_capture(nb):\n self.board[point] = EMPTY\n return False \n # Sucide\n if not self._stone_has_liberty(point):\n # check suicide of whole block\n block = self._block_of(point)\n if not self._has_liberty(block): # undo suicide move\n self.board[point] = EMPTY\n return False \n # Undo \n self.board[point] = EMPTY\n return True", "title": "" }, { "docid": "b3db9089cee4f08d6a78067b16829000", "score": "0.5518534", "text": "def on_board(x):\n\n return 0 <= x[0] < 8 and 0 <= x[1] < 8", "title": "" }, { "docid": "2b45ef2e59b44fe92c88eef5da8cb009", "score": "0.55077195", "text": "def pixelate_frame(xy):\n x = xy[0]\n y = xy[1]\n\n sq_x = (np.arange(px) - x)**2\n sq_y = (np.arange(py) - y)**2\n\n sq = sq_x.reshape(1,-1) + sq_y.reshape(-1,1)\n\n image = 1*(sq < rr)\n\n return image", "title": "" }, { "docid": "21e3817ff5fae04b226aca6792cbbdfa", "score": "0.5505648", "text": "def draw_black():\n return random.randint(1, 10)", "title": "" }, { "docid": "02ec796fe9ded142ed7be32cfdb8b426", "score": "0.5500279", "text": "def under_mouse(self, x, y):\n\t\tedge_left = self.x - self.get_sprite().width // 2\n\t\tedge_right = self.x + self.get_sprite().width // 2\n\t\tedge_top = self.y + self.get_sprite().height // 2\n\t\tedge_bottom = self.y - self.get_sprite().height // 2 \n\t\treturn (x in range(edge_left, edge_right + 1)) and (y in range(edge_bottom, edge_top + 1))", "title": "" }, { "docid": "791d0b3eafd65214d4e4415bedd1993b", "score": "0.54978293", "text": "def supports_color(self) -> bool:\n return self.color.initialized or all(\n c.brightness.initialized for c in (self.red, self.green, self.blue)\n )", "title": "" }, { "docid": "9d783d15b2f8c760c88d7f9b4c7e02af", "score": "0.54974276", "text": "def color_calculated(self, myPerceptron):\n # let the critter guess whether they are\n # under the line\n critter_guess = self.guess()\n # is their guess correct?\n answer = myPerceptron.under_the_line(self.x, self.y)\n if answer != critter_guess:\n self.color = RED\n else:\n self.color = BLUE", "title": "" }, { "docid": "23785ae3f2518c028357d2aa0ce08aae", "score": "0.5495149", "text": "def is_contain(self, x, y):\n return (self.point1[0] - x) * (self.point2[0] - x) + (self.point1[1] - y) * (self.point2[1] - y) <= 0", "title": "" }, { "docid": "37599f2ff88f0c94d1869b236f95edc9", "score": "0.54894495", "text": "def color_match(target, color, threshold=30):\n global obj_threshold\n return (obj_threshold[target][0].r < color.r and color.r < obj_threshold[target][1].r \n and obj_threshold[target][0].g < color.g and color.g < obj_threshold[target][1].g \n and obj_threshold[target][0].b < color.b and color.b < obj_threshold[target][1].b )", "title": "" }, { "docid": "a66cfc5558364ada380a5f940e5cdf2b", "score": "0.54817235", "text": "def is_not_wall(self, x, y):\n if self.structure[x][y] != 'm':\n return True\n else:\n return False", "title": "" }, { "docid": "00322bc180d36843d4d4de0725d32bf5", "score": "0.547551", "text": "def badPixels():\n badpix_x = np.array([234,423,965, 1492, 1525, 1364])\n badpix_y = np.array([881, 1205, 808, 1043, 829, 746])\n return badpix_x, badpix_y", "title": "" }, { "docid": "d3afdca384ab9288bcee95e82c20bb8f", "score": "0.54703367", "text": "def is_luminous(rgb):\n new_color = []\n\n for c in rgb:\n if c <= 0.03928:\n new_color.append(c / 12.92)\n else:\n new_color.append(((c + 0.055) / 1.055) ** 2.4)\n L = sum([x * y for x, y in zip([0.2126, 0.7152, 0.0722], new_color)])\n\n return True if L < 0.179 else False", "title": "" }, { "docid": "b62259a7cbf7ce06da1f5654746ceb08", "score": "0.5470017", "text": "def _get_colour(self, x: int, y: int) -> str:\n kind = self._field.get_square_kind(x,y)\n if kind == \"J\":\n return \"blue\"\n elif kind == \"L\":\n return \"orange\"\n elif kind == \"O\":\n return \"yellow\"\n elif kind == \"I\":\n return \"cyan\"\n elif kind == \"S\":\n return \"green\"\n elif kind == \"Z\":\n return \"red\"\n elif kind == \"T\":\n return \"purple\"\n elif kind == None:\n return \"black\"", "title": "" }, { "docid": "8bae4bb819478ee9dc86d6b2e264634f", "score": "0.546559", "text": "def contains(self, x):\n\n return np.all(point > 0.) and np.all(point < 1.)", "title": "" } ]
8141ad562afe95aa356d6a6fbfb33095
Return the index of the last occurrence of obj in seq
[ { "docid": "252c38dee4f187c3fdc4cbe11e021502", "score": "0.86840785", "text": "def lastIndexOf(obj, seq):\n try:\n if __version__ < 2.6:\n for index in (i for i in xrange(len(seq) - 1, -1, -1) if seq[i] == obj):\n return index\n else:\n return next((i for i in xrange(len(seq) - 1, -1, -1) if seq[i] == obj), None)\n except:\n raise", "title": "" } ]
[ { "docid": "6d108c14eef57a621782cb5c243aeb17", "score": "0.6792807", "text": "def indexOf(obj, seq):\n try:\n if __version__ < 2.6:\n for index in (i for i in xrange(len(seq)) if seq[i] == obj):\n return index\n else:\n return next((i for i in xrange(len(seq)) if seq[i] == obj), None)\n except:\n raise", "title": "" }, { "docid": "69604df915a3eadd475d4bccf6c7e99e", "score": "0.6576846", "text": "def last_in_chain(self, start):\n for idx in self.record_list(start):\n pass\n return idx", "title": "" }, { "docid": "3212406e64bd969d41f20571fc7c3fad", "score": "0.6495377", "text": "def _index(item, seq):\n return seq.index(item) if item in seq else -1", "title": "" }, { "docid": "af4b764d178d4dfd428f8885e42421a7", "score": "0.62736386", "text": "def last(it):\n return it[-1]", "title": "" }, { "docid": "b9803a5bb3b513491b36921d2cc13af9", "score": "0.61658967", "text": "def find_last_value(self, value):\n arr = self.to_array()\n indices = np.argwhere(arr == value)\n if not indices:\n raise ValueError('value not found')\n return indices[-1, 0]", "title": "" }, { "docid": "7129ea328ce027a4baf74b074786b50d", "score": "0.61593133", "text": "def find_last(self, term):\n\n term = term.lower()\n if term not in self.index:\n return float('inf')\n \n return self.index[term][-1]", "title": "" }, { "docid": "6a75ac774033f70fc0425a85346aba8b", "score": "0.6102559", "text": "def find(nda, obj):\n for i in range(0, len(nda)):\n if nda[i] == obj:\n return i\n return -1", "title": "" }, { "docid": "50574cc89a26e24c61a042d9ee0cff07", "score": "0.60844", "text": "def test_last_index(self):\n vowels = 'aeiou'\n is_vowel = lambda x: x in vowels\n s1 = 'ebcua'\n s2 = 'bcbaef'\n s3 = ''\n s4 = 'cbd'\n self.assertEqual(last_index(s1, is_vowel), 4)\n self.assertEqual(last_index(s2, is_vowel), 4)\n self.assertEqual(last_index(s3, is_vowel), None)\n self.assertEqual(last_index(s4, is_vowel), None)", "title": "" }, { "docid": "af09c3d8304ec4f74369310e2dfb6e45", "score": "0.6048306", "text": "def find(self, obj: Any) -> int:\n curr_node = self.head\n index = 0\n while curr_node is not None:\n if curr_node.value == obj:\n return index\n curr_node = curr_node.nextNode\n index += 1\n return -1", "title": "" }, { "docid": "4cd81dc334e31ac57cadb8a17d8df5d3", "score": "0.6032641", "text": "def best_index(self):\n return self.order[-1]", "title": "" }, { "docid": "3d11464fa31e270a26ddfb7b64a329a8", "score": "0.6022257", "text": "def _latest_index(self):\n if self._last_index:\n return self._last_index\n if not self._stream:\n raise OutOfRange()\n return self._stream[-1].index", "title": "" }, { "docid": "e0be892ad703c0f2c3bcbea670637c42", "score": "0.6001756", "text": "def _last(self):\n return len(self._keys)-1", "title": "" }, { "docid": "2274a68b9ea99cbdf18651fbff36a534", "score": "0.5994692", "text": "def retrieve_last_sequence_number(match_history):\n matches = match_history['matches']\n return matches[len(matches) - 1]['match_seq_num']", "title": "" }, { "docid": "30fff688eaa73a699831d0961728c923", "score": "0.59687585", "text": "def get_last_coordinate(l):\n return l[-1]", "title": "" }, { "docid": "c2c7bde32207318fdf42acce9d2db896", "score": "0.5965175", "text": "def last_pos(self):\n return self.route[len(self.route) - 1]", "title": "" }, { "docid": "4410a0d3096073927f1578db6d209f4c", "score": "0.59641963", "text": "def __get_index(self, index):\n return (self.__len__() - 1 - index) % self.__len__()", "title": "" }, { "docid": "8076ce35524b9e67768ca24366aaf058", "score": "0.596233", "text": "def rindex(self, val):\n for n in range(len(self.array)-1, -1, -1):\n if self.array[n] == val:\n return n\n return None", "title": "" }, { "docid": "b9b6cbb55e2d3571b14543b791fc5261", "score": "0.5957747", "text": "def last_seq(self):\n return self._last_seq", "title": "" }, { "docid": "6fbcb79d3eb686734b37faee7252b2db", "score": "0.59033054", "text": "def lastIndexOf(self, searchvalue, start=0):\n s = str_(self)\n return s.rfind(searchvalue, start)", "title": "" }, { "docid": "e0f6404ab1c86f9f17501ec2f24f6695", "score": "0.5865014", "text": "def find(self, obj):\n for k in range(self._n):\n if self[k] == obj:\n return k\n return None", "title": "" }, { "docid": "ba069a7d41a818258cdb35d237c20a63", "score": "0.5853777", "text": "def _isLater(idx, ref):\n refSlice = ref[idx:]\n refSlice[0] = -1\n try:\n return refSlice.index(ref[idx]) + idx\n except ValueError as e:\n return -1", "title": "" }, { "docid": "d89f9b3060a4fb99df9aa2f68133d4b7", "score": "0.58477706", "text": "def last(self):\r\n return self.make_position(self.trailer.prev)", "title": "" }, { "docid": "9ea1cf9e6882d44f2f0308974e53cd80", "score": "0.583224", "text": "def find(seq, target):\n for i, value in enumerate(seq):\n if value == target:\n break\n else:\n return -1\n\n return i", "title": "" }, { "docid": "5ec0a14b60985678e1fa1f6fa50ab4f3", "score": "0.57939756", "text": "def last(t):\n v = store.last(t)\n if v:\n return v[-1]", "title": "" }, { "docid": "4597db1f67be17fdfbc276b0f14a1ea5", "score": "0.5789687", "text": "def last(self):\n sz = len(self._annotations)\n if sz == 0:\n raise Exception(\"Empty set, there is no last annotation\")\n elif sz == 1:\n return next(iter(self._annotations.values()))\n self._create_index_by_offset()\n _, _, annid = next(self._index_by_offset.irange(reverse=True))\n return self._annotations[annid]", "title": "" }, { "docid": "3eee1172240b783fae104db3a9916493", "score": "0.57750523", "text": "def end(self):\n return self.length-1", "title": "" }, { "docid": "ba55080034f770f25c1bcb727771dedd", "score": "0.5768853", "text": "def end(self):\n if self.size == 0:\n raise Exception(\"Annotation set is empty, cannot determine end offset\")\n self._create_index_by_offset()\n return self._index_by_offset.max_end()", "title": "" }, { "docid": "100dcabbdf00bc22b35021c37d6320d3", "score": "0.575981", "text": "def last(self):\n return self._make_position(self._trailer.previous)", "title": "" }, { "docid": "1c3a4bd3c0abebfb1334dcc311582b98", "score": "0.57427657", "text": "def _find_chain_end(parm, endidx, template) :\n for idx in range(endidx-1, 2, -1) :\n a = _get_atom(parm, \"%s%d\"%(template, idx))\n if a is not None :\n return idx\n return -1", "title": "" }, { "docid": "18c3f3b1af6994f796347281bd86ee64", "score": "0.5741472", "text": "def findLastEntry(entryList):\n\ti = 0\n\tfor t in entryList:\n\t\tif i == 0:\n\t\t\tidVal = (t['id'])\n\t\t\ti += 1\n\t\telif i > 0:\n\t\t\tif t['id'] > idVal:\n\t\t\t\tidVal = t['id']\n\t\t\t\t#print(i)\n\t\t\t\ti += 1\n\treturn i-1", "title": "" }, { "docid": "15b274512bb779a7e4bc68d27fe89dd6", "score": "0.5716856", "text": "def whichmax(seq):\n return max([(b, a) for a, b in enumerate(seq)])[1]", "title": "" }, { "docid": "68f70b81eb68d9d2328d473825c60842", "score": "0.5697099", "text": "def getLast(self):\n return self.items[self.size - 1]", "title": "" }, { "docid": "e829840b7bb7c522b17e4240b87c6b2b", "score": "0.5692506", "text": "def get_last_item(li):\n return (li[-1])", "title": "" }, { "docid": "fe787bddef9ae824f5b697cc5a23edbc", "score": "0.56910384", "text": "def back(self):\n\t\t# runtime O(1) retrieving value at an index\n\t\treturn self.list[-1]", "title": "" }, { "docid": "8edd7aa498ccf6b6bfbaeed272ba55f4", "score": "0.567729", "text": "def get_last_document_number(self):\n return self.sequence_last", "title": "" }, { "docid": "4e72ca097814230720b3ecc6f7ec5a26", "score": "0.5673417", "text": "def test_last_index_in_set(self):\n vowels = 'aeiou'\n s1 = 'ebcua'\n s2 = 'bcbaef'\n s3 = ''\n s4 = 'cbd'\n self.assertEqual(last_index_in_set(s1, vowels), 4)\n self.assertEqual(last_index_in_set(s2, vowels), 4)\n self.assertEqual(last_index_in_set(s3, vowels), None)\n self.assertEqual(last_index_in_set(s4, vowels), None)", "title": "" }, { "docid": "6d117a7f24a8fa11d9873a66d76cc5c8", "score": "0.5670909", "text": "def getlast(self):\n return self.getvallist()[-1]", "title": "" }, { "docid": "8b1377bc6034806857c288dccaf3b011", "score": "0.56646484", "text": "def argmax(sequence):\n # YOUR CODE HERE\n max_num=0\n max_index=-1\n for index,item in enumerate(sequence):\n if max_num<item:\n max_num=item\n max_index=index\n return max_index", "title": "" }, { "docid": "636c6520426b124cae247cd6474ee9a2", "score": "0.5641788", "text": "def last_record(self):\n num_records = self.current_size // self.recordsize\n return self.record_number(num_records-1)", "title": "" }, { "docid": "881e8e3ae88a38b8b00cd38a2b8eea8e", "score": "0.5619034", "text": "def getNextObjNo(self):\n\t\tvReturn = self.nextObjNo\n\t\tself.nextObjNo = self.nextObjNo + 1\n\t\treturn vReturn", "title": "" }, { "docid": "9101a4c5b6e285f7c42c7cf0d68ce446", "score": "0.56180775", "text": "def first_searching_round(sequence):\n for i in range(len(sequence)-1):\n if sequence[i] >= sequence[i+1]:\n return i\n return -1", "title": "" }, { "docid": "2c22aba5baeab75fd2a095c1afd18e68", "score": "0.56073856", "text": "def ordinal_or_last(m) -> str:\n if m[0] == \"last\":\n return -1\n return m.ordinals - 1", "title": "" }, { "docid": "2f8efcc719f1fb4c80073bff0c8b068a", "score": "0.5607357", "text": "def findlast(s, substrs):\n i = -1\n result = None\n for substr in substrs:\n pos = s.rfind(substr)\n if pos > i:\n i = pos\n result = substr\n return result", "title": "" }, { "docid": "980cf1d5ab5a1c387271c6726925cf36", "score": "0.55875015", "text": "def getLast(self) -> object:\n ...", "title": "" }, { "docid": "dfaa0f20d545186e030b74f49d233a2b", "score": "0.55863565", "text": "def index(elem, seq):\n if seq == []:\n return 0\n if seq == (''):\n return 0\n else:\n rest = index(elem, seq[1:])\n if seq[0] == elem:\n return 0\n else:\n return 1 + rest", "title": "" }, { "docid": "7634d2ca0df609f485a2357adc676302", "score": "0.55762243", "text": "def last(self):\n\n return self.guard.prev", "title": "" }, { "docid": "8b1fa65d806bd6635e551588b4f0d8dc", "score": "0.5566034", "text": "def student_index(self, student_obj):\r\n i = 0\r\n for student in self.students:\r\n if student.name == student_obj.name:\r\n return i \r\n i += 1\r\n \r\n return -1", "title": "" }, { "docid": "f8ccb0ebb30ddd897dc825e0f8f8d164", "score": "0.5564308", "text": "def getNextRecordIdx(self):", "title": "" }, { "docid": "b4a0c1765e580ad93bc3d55592a28be2", "score": "0.5560225", "text": "def last_end(self):\r\n return max([end for start, end in self.spans])", "title": "" }, { "docid": "6dc76661b8d6afa1c08ad9ddbe80e468", "score": "0.5546248", "text": "def buffer_idx(self, seq):\n return seq % self.buffer_size", "title": "" }, { "docid": "9148e32516804dd0f30e2408dafc6e87", "score": "0.5545109", "text": "def index(self, item):\n counter = 0\n for r in self.records:\n assert isinstance(r, Record)\n v = r.index(item)\n if v is not None:\n return counter + v\n counter += len(r)\n\n raise ValueError(f\"{item} not found\")", "title": "" }, { "docid": "194b4cec7f0a8cada69b1bbecea99154", "score": "0.55374986", "text": "def last_position(self):\n if len(self):\n last = len(self) - 1\n return [last] + self[last].last_position()\n else:\n return []", "title": "" }, { "docid": "2d9a2ce7b79cbe74b9dc82f7cea135df", "score": "0.55329347", "text": "def find_1st_xind(elem, l):\n\ti=0\n\tfor u in l:\n\t\tif u>elem:\n\t\t\tif (i>0):\n\t\t\t\treturn i-1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\ti+=1\n\treturn -1", "title": "" }, { "docid": "27c96b8d81053c51e499ac4c4b15c927", "score": "0.55275977", "text": "def get_sequence_end(mmap, start):\n return mmap.find(b\">\", start + 1)", "title": "" }, { "docid": "55fd3210172ffd4ac0ee936d4979cf6b", "score": "0.5524161", "text": "def last_event(self):\n last = -np.inf\n for series in self.data:\n if series[-1] > last:\n last = series[-1]\n return last", "title": "" }, { "docid": "9f2ca0cbd0aaf15a7befba7a6e84a9f7", "score": "0.5523845", "text": "def _find_item_at_position(self, l, pos):\n i=0\n for x in l:\n if x.pos[0] == pos[0] and x.pos[1] == pos[1]:\n return i\n else :\n i += 1\n return i", "title": "" }, { "docid": "3089cfb346f18c464b71d1bd8f370eb2", "score": "0.551699", "text": "def find_first(self,item, vec):\n for i, cum_p in enumerate(vec):\n if item <= cum_p:\n return i\n return -1", "title": "" }, { "docid": "913ceaa4967f51a7520b78a2c82b8851", "score": "0.5505722", "text": "def index(self,item):\n if self._seq is not None:\n return self._seq.index(item)\n raise ValueError(str(item)+\" is not in the sequence\")", "title": "" }, { "docid": "5fbe6f2e891b1568ba2f1a13da67f544", "score": "0.5504572", "text": "def _return_longest_sequence(self, sequence):\n if len(sequence) == 0:\n raise ValueError(\"Can not find the longest sequence in an empty sequence\")\n\n if len(sequence) == 1: # single value sequence\n return 0, 1 # index, length_of_longest_sequence\n\n start_index_of_longest_sequence = None\n length_of_longest_sequence = 1 # a single element \"sequence\" has the length 1\n\n start_of_current_sequence = None\n length_of_current_sequence = 1\n\n for index, element in enumerate(sequence):\n\n if index != len(sequence) - 1: # for all elements except the last one\n if element + 1 == sequence[index + 1]: # if we have a sequence\n if start_of_current_sequence is None: # if the first element in a new sequence\n start_of_current_sequence = index\n length_of_current_sequence += 1\n else: # not the first index in the curren sequence\n length_of_current_sequence += 1\n\n # Compare the Sequence to the longest one\n if length_of_longest_sequence < length_of_current_sequence:\n length_of_longest_sequence = length_of_current_sequence\n start_index_of_longest_sequence = start_of_current_sequence\n\n else: # if no sequence detected or if sequence has stopped\n start_of_current_sequence = None\n length_of_current_sequence = 1\n\n return start_index_of_longest_sequence, length_of_longest_sequence", "title": "" }, { "docid": "41f8f366deabc558578661d83cd13ed3", "score": "0.55012506", "text": "def tuple_find_last(\n tuple: MaybeSequence[Union[int, float, str]],\n to_find: MaybeSequence[Union[int, float, str]]\n) -> int:\n with HalconOperator(128) as proc:\n proc.set_input_tuple(0, tuple)\n proc.set_input_tuple(1, to_find)\n proc.init_oct(0)\n proc.execute()\n index = proc.get_output_tuple_s(0)\n return index # type: ignore", "title": "" }, { "docid": "d776b02cfd864cff64b9f7ee76d4600f", "score": "0.5481025", "text": "def _find_position(self, elem):\n walker = self._data.first()\n while walker is not None and walker.element()._value != elem:\n walker = self._data.after(walker)\n return walker", "title": "" }, { "docid": "79763b58bd01dbecac0b4ac009999c5e", "score": "0.547724", "text": "def find_last_bid(lmsg):\n for i in range(len(lmsg) - 1, 0, -1):\n if lmsg[i][1] in ['A', 'F', 'U']:\n return i", "title": "" }, { "docid": "8374b43b32b85a470d7b693dc114ace7", "score": "0.5468662", "text": "def safe_index(l, e):\n try:\n return l[0].index(e) + l[1]\n except:\n return len(l[0]) - 1 + l[1]", "title": "" }, { "docid": "a5966cbb6d2af5cb056f3a9d2d35f978", "score": "0.5467961", "text": "def lastindexof(string, substring):\n raise RuntimeError('lastindexof' + error_message)", "title": "" }, { "docid": "50864cb6e796b15682e3dcffc5abe2ed", "score": "0.5465984", "text": "def last(self):\n if self.n:\n return self.full_arr[self.n - 1]\n else:\n raise IndexError(\"This numbavec has no elements: cannot return 'last'.\")", "title": "" }, { "docid": "da402ab0d2a6c850951bf2dfdcbabeda", "score": "0.5464215", "text": "def last(arr: List):\r\n if len(arr) == 0:\r\n return None\r\n else:\r\n return arr[len(arr) - 1]", "title": "" }, { "docid": "5c9b22b1943a6ea26542d8ad17b17f46", "score": "0.54636824", "text": "def last_el(x):\r\n if np.isscalar(x): return x\r\n else: return x[-1]", "title": "" }, { "docid": "3d6b834a5281619a8f0fcbd120b3fed5", "score": "0.54610807", "text": "def one(idx):\r\n return idx - 1", "title": "" }, { "docid": "09fe0a7c19d5e6daa185ce8d0a4a6153", "score": "0.5433566", "text": "def _idIndex(lst, el):\n \n for i, e in enumerate(lst):\n if e is el:\n return i\n raise ValueError('element %r not found' % el)", "title": "" }, { "docid": "62f54c20ac9114a1103663fecbc80956", "score": "0.543234", "text": "def last(self):\n return self.store.get('counter')", "title": "" }, { "docid": "6087db55093168dcbc7f8358ba831f6f", "score": "0.54318774", "text": "def index(self, item):\n\n current = self.head\n pos = 0\n found = False\n while current is not None and not found:\n if current.get_data() is item:\n found = True\n else:\n current = current.get_next()\n pos += 1\n if found:\n pass\n else:\n pos = None\n\n return pos", "title": "" }, { "docid": "fa42b2a8b9fe802d00926c954e582294", "score": "0.54203147", "text": "def _find_position(self, e):\n\t\twalk = self._data.first()\n\t\twhile walk is not None and walk.element()._value != e:\n\t\t\twalk = self._data.after(walk)\n\t\treturn walk", "title": "" }, { "docid": "704be6810232b1d3e85d10a143f5b5d9", "score": "0.5420285", "text": "def cursor_word(words, cursor_pos):\n words = [Word(*word) for word in words]\n\n for i, word in enumerate(words):\n if cursor_pos < word.start:\n return i - 1\n elif cursor_pos <= word.end:\n return i\n\n return len(words) - 1", "title": "" }, { "docid": "d4e9760a210c4dc71e94d00cc13f8cd3", "score": "0.54107773", "text": "def push_tail(self, el):\n # Subtracting 1 so that we get the real index within the sequence\n return self._client.rpush(self.key, el) - 1", "title": "" }, { "docid": "8b9cef2a31205a6b88622588f8e5c7f0", "score": "0.54064983", "text": "def current_index(self):\n\n if self._current_index is None:\n return\n now = time.time()\n while self.ordered[self._current_index]['end'] < now:\n if self._current_index == len(self.ordered) - 1:\n self._current_index = None\n break\n self._current_index += 1\n return self._current_index", "title": "" }, { "docid": "ef41bde823f26b4797d64142dfaaba70", "score": "0.5405126", "text": "def __get_last_item(ser):\n ser_ = ser.dropna()\n if ser_.last_valid_index() is None:\n return np.nan\n else:\n return ser_.values[-1]", "title": "" }, { "docid": "be2c15fb4ac923636b1069748e5bb90e", "score": "0.54013085", "text": "def max_index(data):\n index = None\n ###########################################################################\n # Please finish the code here\n # Can you implement the function with just one line of code?\n\n ###########################################################################\n return index", "title": "" }, { "docid": "67f7ede1bc6658c8e166804027d8fa34", "score": "0.5397681", "text": "def index(self, item):\n try:\n return list.index(self, item)\n except ValueError:\n pass\n return -1", "title": "" }, { "docid": "330776ff7d18ceb986c82cd4eb0e6e79", "score": "0.5389465", "text": "def last(word):\n return word[-1]", "title": "" }, { "docid": "330776ff7d18ceb986c82cd4eb0e6e79", "score": "0.5389465", "text": "def last(word):\n return word[-1]", "title": "" }, { "docid": "fe39670ce7da370dd7111c67beb65ae7", "score": "0.5388555", "text": "def __len__(self):\n i = 0\n last = self.first\n while last.next_item != self.first:\n i += 1\n last = last.get_next()\n return i + 1", "title": "" }, { "docid": "12775c8ba63c6438c67dbd4fd1451623", "score": "0.53849953", "text": "def last_index_of(self, item: ItemType) -> Future[int]:\n check_not_none(item, \"Value can't be None\")\n try:\n item_data = self._to_data(item)\n except SchemaNotReplicatedError as e:\n return self._send_schema_and_retry(e, self.last_index_of, item)\n\n request = list_last_index_of_codec.encode_request(self.name, item_data)\n return self._invoke(request, list_last_index_of_codec.decode_response)", "title": "" }, { "docid": "4725a884046d852473baa6d82e26799a", "score": "0.5377175", "text": "def lastMove(self):\n if self.lastmove == 0:\n return 1\n else:\n return -1", "title": "" }, { "docid": "4010e008588fa3cd977a26df99b13636", "score": "0.5373272", "text": "def last(self, s, check=all):\n ret = self.get(s, check)\n return ret[-1] if ret else None", "title": "" }, { "docid": "9f2fce6deb72278db3b9e71cda34bb44", "score": "0.53702676", "text": "def get_longest_called(self):\n sort_store = sorted(self.store, key = lambda x:x[1])\n return sort_store[-1][0]", "title": "" }, { "docid": "af3145917ed5a7bc67c2255e07c153e2", "score": "0.5369215", "text": "def last(self):\n\n if self:\n cell = list.__getitem__(self, -1)\n return cell\n\n return None", "title": "" }, { "docid": "57bb3827f9deb5f5fa23731f3a5b6594", "score": "0.53678954", "text": "def elem_index(self, item):\n if self.inf:\n raise Warning(\n \"Calling elemIndex on infinite list! Might run forever...\")\n i = 0\n while True:\n try:\n if self[i] == item:\n return i\n except IndexError:\n return -1\n i += 1", "title": "" }, { "docid": "c115896777abf9f61e1cd6db11ce066e", "score": "0.5367519", "text": "def end(self):\n return max(seg.start + seg.length for seg in self)", "title": "" }, { "docid": "f29f09c60eb2f98ce2c79d8aaeedbc77", "score": "0.5366642", "text": "def getOccurence(self) -> int:\n ...", "title": "" }, { "docid": "eaeb891050a5c29f31a7d87effd8a765", "score": "0.53603053", "text": "def find_validator_membership_history_index(self, epoch: int, history: List[dict]) -> int:\n rev_history = reversed(history)\n rev_index = None\n for ind, el in enumerate(rev_history):\n if el['epoch'] <= epoch:\n rev_index = ind\n break\n\n return -1 if rev_index == None else len(history) - rev_index - 1", "title": "" }, { "docid": "19fadf2c0a4beb911f2acc86bc36090a", "score": "0.5358826", "text": "def last_log_index(self):\n last_log_index = len(self.cache)\n logger.debug(f'{self.id} last_log_index at: {last_log_index}.')\n return (last_log_index)", "title": "" }, { "docid": "79f2af2a6f03707c1ff41bddd7ab4ec2", "score": "0.5355557", "text": "def last(self):\n return self._last", "title": "" }, { "docid": "781b00f71eb73f8a3fe332ea7bbd96e6", "score": "0.53512615", "text": "def last_unique_element(L):\n\n # First, determine the unique elements of the array.\n # Keep track of 'uniqueness' elements in a dictionary (hashtable).\n unique_dict = {}\n\n for element in L:\n # Element has appeared before and cannot be unique\n if element in unique_dict:\n unique_dict[element] = False\n # Element has not appeared before and may be unique\n else: \n unique_dict[element] = True\n\n # Second, go through the array backward until a unique value is found\n for element in reversed(L):\n if unique_dict[element]:\n return element\n\n # In the event that there are no unique elements, return null\n return None", "title": "" }, { "docid": "1eace3b5b708f63366a026113077a0f9", "score": "0.53389037", "text": "def eos_index(self):\n return self._token_to_index[self._eos]", "title": "" }, { "docid": "222f992dbc1e83a4a0954589b03fbafc", "score": "0.53295726", "text": "def get_last_sample_ind(self):\n return self.get_num_samples() - 1", "title": "" }, { "docid": "8495353400bfcc527bafcd948ad40a62", "score": "0.5325855", "text": "def find_rightmost_index(lst, end_date):\n i = bisect.bisect_left(lst, end_date)\n if i:\n return i\n raise ValueError", "title": "" }, { "docid": "a3e50295d24ca7029b549a2b10a7fbf1", "score": "0.5310841", "text": "def get_index(line):\n for dummy_i in range(0,len(line) - 1):\n if line[dummy_i] !=0 and line[dummy_i] == line[dummy_i+1]:\n return dummy_i", "title": "" }, { "docid": "367ab439370a90a4149a8857e15122b2", "score": "0.5302287", "text": "def get_index_from_oid(self, oid, length=1):\n\n length = int(length)\n oid = utils.parse_oid(oid)\n\n if length == 1:\n return oid[-1]\n else:\n return oid[-length:]", "title": "" }, { "docid": "03b87942e0406a17ad4cac73f94effd1", "score": "0.52950394", "text": "def Index(self) -> int:", "title": "" }, { "docid": "d9fd26c3f1a7a1a2f84cd2ee1d806574", "score": "0.52935165", "text": "def GetLastItem(self, flags=PG_ITERATE_DEFAULT):", "title": "" } ]
4b94c59b0a2448d4d51330e6a6f17910
Cuts the string at the last dot '.' . The return has a proper ending.
[ { "docid": "ecbea9709a6f94d312c1fe08e166c133", "score": "0.7466881", "text": "def endSentence(string): \n\tind = string.rfind('.')\n\treturn string if ind == -1 else string[0:ind+1]", "title": "" } ]
[ { "docid": "91a5a97e7735f1fa13c411c46141835d", "score": "0.69080025", "text": "def get_suffix(filename, has_dot=False):\n\tpos = filename.rfind('.')\n\tif 0 < pos < len(filename) - 1:\n\t\tindex = pos if has_dot else pos + 1\n\t\treturn filename[index:]\n\telse:\n\t\treturn ''", "title": "" }, { "docid": "0233999463e61f344a1d25f8f8fccbc3", "score": "0.6644992", "text": "def _findSuffix(self, gname):\n\t\ti = gname.find(\".\")\n\t\tif i != -1 and i != 0:\n\t\t\treturn gname[i+1:]\n\t\telse:\n\t\t\treturn None", "title": "" }, { "docid": "976299343f76079c44acc02363b3cc0c", "score": "0.66254175", "text": "def utlRemoveEnd(self, thestring, ending):\n \t\tif thestring.endswith(ending):\n \t\t\treturn thestring[:-len(ending)]\n \t\treturn thestring", "title": "" }, { "docid": "782516dd2ddddf1c4fb6fb18d5d0e44e", "score": "0.6506025", "text": "def strip_suffix(filename):\n\n dotidx = filename.rfind(\".\")\n return filename[:dotidx] if dotidx != -1 else filename", "title": "" }, { "docid": "f487fbb190ed4de7ad0eaf641d116969", "score": "0.6474608", "text": "def remove_ext(string, ext=None):\n if ext is None:\n if string.rfind('.') >= 0:\n ext = string[string.rfind('.'):]\n else:\n ext =\"\"\n if string.lower().endswith(ext.lower()):\n return string[:-len(ext)]\n else:\n return string", "title": "" }, { "docid": "6518d67a040cd4a93317612309886709", "score": "0.63603675", "text": "def strip_end(h, s):\n if h.endswith(s):\n h = h[:-len(s)]\n return h", "title": "" }, { "docid": "455b82326d08940f3a670384c7f2649f", "score": "0.63524777", "text": "def elide_end(text: str, max_n_chars: int) -> str:\n if len(text) <= max_n_chars:\n return text\n if max_n_chars <= 3:\n return \".\" * max_n_chars\n\n n_first_chars = max_n_chars - 3\n return f\"{text[:n_first_chars]}...\"", "title": "" }, { "docid": "34207197aff08146c49fde9ead9622c7", "score": "0.63411057", "text": "def _fix_filename(filename):\n return filename.split('..')[-1].lstrip('./')", "title": "" }, { "docid": "64f0788b32a84e55794d5109d906541b", "score": "0.6314211", "text": "def str_after_last(src, sub):\n idx = src.rfind(sub)\n return src[idx + len(sub):] if idx >= 0 else \"\"", "title": "" }, { "docid": "d1db0e5dd8ee6c6b7307993748d4d3fe", "score": "0.6284773", "text": "def end_on_period(words):\n # get the last word of the of the capitalized touple/key\n # assign it to a variable\n last_word = words[-1]\n\n # get the last letter of the of the capitalized touple/key\n last_letter = last_word[-1]\n\n # if the last letter ends in a period or a questions mark or\n # exlamation point\n # return true\n # otherwise return false\n if last_letter == \".\" or last_letter == \"?\" or last_letter == \"!\":\n return True \n\n return False", "title": "" }, { "docid": "aea9c0f0a1c90f09778d2a0e859ecdfc", "score": "0.62044156", "text": "def get_domain_ending(url: str) -> str:\n netloc = urlparse(url).netloc\n match = re.search(r'\\.(.+?)$', netloc)\n return match.group(1)", "title": "" }, { "docid": "6040207c173e564020459d511bb53eaa", "score": "0.61705035", "text": "def strip_optional_suffix(string, suffix):\n if string.endswith(suffix):\n string = string[:-len(suffix)]\n return string", "title": "" }, { "docid": "7298950e740d2eaa8ac7698eb7eb5baa", "score": "0.60978043", "text": "def remove_end(text, end):\n if text.endswith(end):\n return text[:-len(end)], True\n else:\n return text, False", "title": "" }, { "docid": "ff395b9ef9ad4286eddd6a2d628a3471", "score": "0.60557723", "text": "def str_before_last(src, sub):\n idx = src.rfind(sub)\n return src[:idx] if idx >= 0 else \"\"", "title": "" }, { "docid": "85adb7060820a72fdc95aad00f70e1e4", "score": "0.6045822", "text": "def strip_suffix(string, suffix):\n assert string.endswith(suffix)\n return string[:-len(suffix)]", "title": "" }, { "docid": "5e8afb84db9a5e901eec3f78874ca111", "score": "0.6036715", "text": "def str_trunc_end(S, L):\n if len(S) > L:\n return S[:max(L-3,0)] + \"...\"\n else:\n return S", "title": "" }, { "docid": "de0aa5fd96b0f56dd69ce1249638f694", "score": "0.6031136", "text": "def before_last(path):\n return path[-2]", "title": "" }, { "docid": "41911bc3a49aaeb838952559fb089852", "score": "0.60064465", "text": "def CutEnd(rawstring, remove):\n\t\n\trawstring_enddigit = rawstring.rfind(remove)\n\trawstring = rawstring[:rawstring_enddigit]\n\t\n\treturn rawstring", "title": "" }, { "docid": "b987742030395644b30adad9ac9e08d9", "score": "0.59972733", "text": "def get_the_last_part(data):\n if len(data) == 0:\n return []\n\n mc = re.search('\\/', data[0])\n if mc is None:\n return data\n else:\n clipped = []\n for ent in data:\n atemp = re.split('\\/', ent)\n clipped.append(atemp[-1])\n\n return clipped", "title": "" }, { "docid": "28809b82cd9ce35520fe402884af745e", "score": "0.59848046", "text": "def last_char(word):\n return word[-1:]", "title": "" }, { "docid": "69ca698a1ba2a278920f24eb21887040", "score": "0.5970547", "text": "def _suffix():\n return \"\"", "title": "" }, { "docid": "8b8e4cde0ceedd572f577c6915e3e269", "score": "0.59466946", "text": "def _url_last_chunk(url: str) -> str:\n return url[url.rfind(\"/\")+1:]", "title": "" }, { "docid": "869a6302b71ebacfe91bc039fe0df68a", "score": "0.59421486", "text": "def clean(base_name):\r\n \r\n return base_name.split('.')[0]", "title": "" }, { "docid": "25964c43a24cbb647264815be6a4049c", "score": "0.5933292", "text": "def _last(txt, length):\n try:\n return \"\\n\".join(txt.splitlines()[(-1) * int(length) :])\n except ValueError:\n return txt", "title": "" }, { "docid": "b55d98a1db7e94a8e975bae2d8f2a2fa", "score": "0.5923157", "text": "def _ends_in_by(word):\n return word[-3:] == \".by\"", "title": "" }, { "docid": "a18f5b68a4f1b470666097e879d96637", "score": "0.59183866", "text": "def unqualify(name: str) -> str:\n return name.rsplit(\".\", maxsplit=1)[-1]", "title": "" }, { "docid": "8caa5eb2554cceeba148de27718c5b74", "score": "0.5909491", "text": "def cut_suffix(s, suffix):\n ...", "title": "" }, { "docid": "7399fadd4d7df142c4a51dd8df038a26", "score": "0.58932227", "text": "def extension(self) -> str:\n return self.name.rsplit(\".\", 1)[-1]", "title": "" }, { "docid": "a68f4dcff5c4c41124e0cea96926511b", "score": "0.5891819", "text": "def ending(self, ending = None, dontWrite = False):\n\n if ending == None: \n return self._ending\n else:\n if ending[-1:] != \".\":\n ending = \".\" + ending\n self._ending = ending\n self._changeFilePath(self.filePath(), ending, dontWrite)", "title": "" }, { "docid": "2bb654ea4480ddbbc7840630e669eb32", "score": "0.5876808", "text": "def remove_suffix(name):\n return name[:name.index('[')] if '[' in name else None", "title": "" }, { "docid": "dc06aee214ed34c7610e0cc38f829bf3", "score": "0.5868694", "text": "def desentencify(s):\n assert isinstance(s, str)\n if len(s) >= 2 and s[0].isupper() and not s[1].isupper():\n s = s[0].lower() + s[1:]\n if s.endswith(\".\"):\n return s[:-1]\n else:\n return s", "title": "" }, { "docid": "e4f10acacf7ef68a77512d5fc4769977", "score": "0.58384734", "text": "def rep_end(self, word, suffix, rep = ''):\n if word.endswith(suffix):\n return word[:len(word) - len(suffix)] + rep\n return word", "title": "" }, { "docid": "92185f8c212a320be408a49772e232c2", "score": "0.58359456", "text": "def remove_from_end(string, text_to_remove):\n if string is not None and string.endswith(text_to_remove):\n return string[:-len(text_to_remove)]\n return string", "title": "" }, { "docid": "0664b6a1aef939fdfe71ea2373166aea", "score": "0.5806978", "text": "def get_proper_str(str_option, end=False):\n if len(str_option) > 32:\n if end:\n return '...'+str_option[-29:]\n else:\n return str_option[:29]+'...'\n else:\n return str_option", "title": "" }, { "docid": "0d698d6f3ef551756bcc7e13dac4840e", "score": "0.5787785", "text": "def strip_post_release(v: str) -> str:\r\n\r\n if len(v.split(\".\")) > 3:\r\n return '.'.join(v.split('.')[:3])\r\n else:\r\n return v", "title": "" }, { "docid": "7962eb30ab750b819f9b22303d129fdc", "score": "0.5766014", "text": "def _strip_trailing_zeros(s, remove_decimal_point=True):\n if '.' not in s:\n return s\n s = s.rstrip(u'0')\n if remove_decimal_point and s[-1] == u'.':\n s = s[:-1]\n if not s:\n return u'0'\n return s", "title": "" }, { "docid": "7a4fbcae77b887e0a2f2c71565b62a44", "score": "0.57582706", "text": "def name_without_extension(self) -> str:\n return self.name.rsplit(\".\", 1)[0]", "title": "" }, { "docid": "ce08be4a98618409a83ee53089f9dadb", "score": "0.574536", "text": "def get_oid_last_octet(oid):\r\n \r\n octets = oid.split('.')\r\n return octets[-1]", "title": "" }, { "docid": "b872c6d5c4bffe913d787b00eb506646", "score": "0.57330996", "text": "def nolast(l):\n return l[:-1]", "title": "" }, { "docid": "160e732851162776625c94aea5ebc7d8", "score": "0.5728994", "text": "def add_stop(utt):\n if utt[-1] not in '.,!:;?':\n utt = utt + ' .'\n return utt", "title": "" }, { "docid": "85758e06267f6818854afa44d4a0c85e", "score": "0.57149434", "text": "def rchop(original_string, substring):\n if original_string.endswith(substring):\n return original_string[:-len(substring)]\n return original_string", "title": "" }, { "docid": "426df459bbbe36106c9c137e6b7f870b", "score": "0.5711546", "text": "def is_final_word(string):\n # TODO: this sucks, change into a regexp\n return string[-1] in \".!?:;\"", "title": "" }, { "docid": "e8fb27287fb16c17aeddcccbbc20da8f", "score": "0.5710262", "text": "def removeFileExt(file_name):\n split_num = file_name.strip().split('.');\n if len(split_num) > 0:\n return split_num[0];\n else:\n return file_name;", "title": "" }, { "docid": "330776ff7d18ceb986c82cd4eb0e6e79", "score": "0.56811243", "text": "def last(word):\n return word[-1]", "title": "" }, { "docid": "330776ff7d18ceb986c82cd4eb0e6e79", "score": "0.56811243", "text": "def last(word):\n return word[-1]", "title": "" }, { "docid": "d0b0033168bc8a864e72c2b96bf99f62", "score": "0.5675095", "text": "def ends_in_regular(string):\n string = string.strip().split()[-1]\n return string is 'Regular'", "title": "" }, { "docid": "8eb3ec47424f086ee401e0bb2479eecc", "score": "0.5673251", "text": "def segment_terminator_suffix(self) -> pulumi.Input['SegmentTerminatorSuffix']:\n return pulumi.get(self, \"segment_terminator_suffix\")", "title": "" }, { "docid": "8eb3ec47424f086ee401e0bb2479eecc", "score": "0.5673251", "text": "def segment_terminator_suffix(self) -> pulumi.Input['SegmentTerminatorSuffix']:\n return pulumi.get(self, \"segment_terminator_suffix\")", "title": "" }, { "docid": "8eb3ec47424f086ee401e0bb2479eecc", "score": "0.5673251", "text": "def segment_terminator_suffix(self) -> pulumi.Input['SegmentTerminatorSuffix']:\n return pulumi.get(self, \"segment_terminator_suffix\")", "title": "" }, { "docid": "8eb3ec47424f086ee401e0bb2479eecc", "score": "0.5673251", "text": "def segment_terminator_suffix(self) -> pulumi.Input['SegmentTerminatorSuffix']:\n return pulumi.get(self, \"segment_terminator_suffix\")", "title": "" }, { "docid": "05ba30422b1536eb1109889939fa6099", "score": "0.5664935", "text": "def _clean_response(response):\n return response.lstrip(\".\")", "title": "" }, { "docid": "c284bcbfabe3dd50cb9ca32be31c2ada", "score": "0.56615925", "text": "def is_final_word(string):\n\n if re.match(r\".+[.!?]$\", string):\n return True\n else:\n return False", "title": "" }, { "docid": "c1b5a49e0672333f0398af9f3ee2c9f2", "score": "0.5658688", "text": "def _last_char(word):\n for i in range(len(word)):\n if word[len(word)-1-i].isalpha() or word[len(word)-1-i].isdigit():\n return len(word) - 1 - i\n return -1", "title": "" }, { "docid": "932a0b1b410cdfe9dc9e3d136d33e187", "score": "0.5652709", "text": "def splitext(fn):\n s = fn.rfind(\"/\")\n if s == -1:\n s = 0\n i = fn[s:].find(\".\")\n if i == -1:\n return fn, ''\n else:\n return fn[:s+i], fn[s+i:]", "title": "" }, { "docid": "dafa5b1165ed57db6d1c433a10224864", "score": "0.56381", "text": "def _clean_response(response):\n return response.lstrip('.')", "title": "" }, { "docid": "40f77b74b8aaa52f61966ece1d306a69", "score": "0.5631113", "text": "def _last_version_tag():\n re_num = re.compile('[^0-9.]')\n\n tags = sorted([map(int, re_num.sub('', t).split('.')) for t in _get_tags()])\n print tags\n if tags:\n return '.'.join(map(str, tags[-1]))\n return None", "title": "" }, { "docid": "2e3a36dbb601de96fe3dc014077057e5", "score": "0.56211525", "text": "def has_failure(string:str, sz:int=1) -> bool:\n sz = min(len(string), sz)\n return string[-sz:] != '.'*sz", "title": "" }, { "docid": "c72c416be31f08e396bbd14d0c4a26c2", "score": "0.561229", "text": "def smet(string: str, end: str=\"/\") -> str:\n return string if string.endswith(end) else string + end", "title": "" }, { "docid": "2686647035d61f3e393bf10642981c7e", "score": "0.56074214", "text": "def strip_py2_long_suffix(value_str):\n if value_str[-1] in 'lL':\n return value_str[:-1]\n return value_str", "title": "" }, { "docid": "7ecbcaa7b1cb81e0fbd3e0934f12a590", "score": "0.5593231", "text": "def curtail_string(s: str, length: int = 20) -> str:\n if len(s) > length:\n return s[:length] + \"...\"\n else:\n return s", "title": "" }, { "docid": "ef9e17b637e91d77231727aaa39307b7", "score": "0.5591488", "text": "def Get_Base_Name( file_name ):\n end_index = file_name.rfind( \".\" )\n base_name = file_name[ 0 : end_index ]\n return( base_name )", "title": "" }, { "docid": "f6fee1bfa230f644c00695dd82c3870c", "score": "0.5590364", "text": "def last(word):\n\treturn word[-1]", "title": "" }, { "docid": "72c2c8787634b006413f11992772130d", "score": "0.5590148", "text": "def getFilenameNoext(file):\n rlt = file\n ret = file.rfind('/')\n if ret != -1:\n rlt = file[ret + 1:]\n ret = file.rfind('.')\n if ret != -1:\n return rlt\n return rlt[:ret]", "title": "" }, { "docid": "9ae6791f9a48236b02055816492ae2aa", "score": "0.5579895", "text": "def trimmed_base1_end(self):\n return self.end", "title": "" }, { "docid": "97f23119f907d3fd2f0aa4dd4bb1d04d", "score": "0.55565697", "text": "def fix_missing_period(line):\n\n dm_single_close_quote = u'\\u2019' # unicode\n dm_double_close_quote = u'\\u201d'\n END_TOKENS = ['.', '!', '?', '...', \"'\", \"`\", '\"', dm_single_close_quote, dm_double_close_quote,\n \")\"] # acceptable ways to end a sentence\n\n if \"@highlight\" in line: return line\n if line == \"\": return line\n if line[-1] in END_TOKENS: return line\n\n return line + \" .\"", "title": "" }, { "docid": "bf594b75623b5f66c11db890e178645c", "score": "0.5556431", "text": "def get_file_name(raw_name):\n return raw_name.split('.')[-2]", "title": "" }, { "docid": "6829b546a44d2b135b7a35039216604b", "score": "0.55412185", "text": "def is_suffix(self) -> bool:\n return self._lema.endswith('-')", "title": "" }, { "docid": "4644ea3e9dcb369bc7201f4cca1dc174", "score": "0.55355495", "text": "def remove_extension(sFilename):\n # Remove the extension\n nIndice = sFilename.rfind('.')\n if nIndice < 0:\n sFilenameShort = sFilename\n else:\n sFilenameShort = sFilename[:sFilename.rfind('.')]\n\n return sFilenameShort", "title": "" }, { "docid": "a6b4a9a47c9a143a4d312469d06f323a", "score": "0.55350345", "text": "def _last_name(self, full_name):\n name_partition = full_name.partition(u',')\n no_suffix = name_partition[0].strip()\n suffix = name_partition[2].strip()\n name_parts = no_suffix.split()\n part_count = len(name_parts)\n if part_count == 1 or part_count == 2:\n return name_parts[-1], suffix\n else:\n assert part_count > 2\n if name_parts[-2].islower():\n return u' '.join(name_parts[-2:]), suffix\n else:\n return name_parts[-1], suffix", "title": "" }, { "docid": "41816f1b9e76c719097b6b5dbd5959f1", "score": "0.55350107", "text": "def suffix(self) -> str:\n return self.relative_path.suffix", "title": "" }, { "docid": "f20942688d8698e9d187e3af877a4dc7", "score": "0.55202645", "text": "def hw_2_2_6(number):\n number = str(number)\n index_point = number.find('.')\n number = number[index_point + 1]\n return number", "title": "" }, { "docid": "3e0bd25705880edceaf761f540dd62db", "score": "0.5519798", "text": "def __ext(self,fname):\n\t\treturn fname.split('.')[-1].strip()", "title": "" }, { "docid": "e1f61f2ebbac47327f770264fae20491", "score": "0.5513818", "text": "def extension(self, path):\n return os.path.splitext(self.check_path(path))[-1]", "title": "" }, { "docid": "5b24a0369c03adfc75915b9dadc423c9", "score": "0.5511297", "text": "def test_get_suffix(self) -> None:\n n = SCons.Node.Node()\n s = n.get_suffix()\n assert s == '', s", "title": "" }, { "docid": "bc786de4363eba6415bb8a4d2e760cdb", "score": "0.5505208", "text": "def remove_extension(str1):\n\n file_name = str1.split('.')\n return file_name[0]", "title": "" }, { "docid": "c115c3fcdb277b2c5d197ab612eaa70f", "score": "0.5502113", "text": "def __trim(s):\n return s if len(s) <= 200 else s[:197] + \"...\"", "title": "" }, { "docid": "2b1428da299637c547393adea6c3e93c", "score": "0.5498329", "text": "def remove_dbl_last_char(self, cond = True):\n if cond and self.stemmed[-2] == self.stemmed[-1]:\n self.stemmed = self.stemmed[:-1]", "title": "" }, { "docid": "58e06850ae3a4658eddf4af88c4f0cf7", "score": "0.54947734", "text": "def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"", "title": "" }, { "docid": "9a39998fc2586ecfedaf5212203d5476", "score": "0.54945123", "text": "def blast(self):\n import re\n try:\n ln = self.lineageName()\n x = re.split(\"[. ]\", ln)\n return x[0]\n except Exception:\n return \"\"", "title": "" }, { "docid": "c23c5e067d917af0d7cc182460f2861b", "score": "0.54899126", "text": "def getFileExt(fileName):\n\n lastDot = fileName.rindex('.')\n return fileName[lastDot:]", "title": "" }, { "docid": "ac09e0c4735c1fb1d20dcd992badee27", "score": "0.548516", "text": "def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n # print line[-1]\n return line + \" .\"", "title": "" }, { "docid": "706ab83e18cc539f58cfbee6f87890fe", "score": "0.54817986", "text": "def dvzn(stri):\r\n istr = stri.find('.')\r\n stri1 = stri[0:(istr+4)]\r\n return stri1", "title": "" }, { "docid": "b4021af1b8bc72fd571b4a3a4c8a5bc6", "score": "0.547959", "text": "def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n # print line[-1]\n return line + \".\"", "title": "" }, { "docid": "4198b89fe6f0e3e06eca7ff7b39ece24", "score": "0.54668826", "text": "def remove_suffix(str1, suffix):\n strData = str1\n for elm in suffix:\n if strData.endswith(elm): # Check if it starts with specified suffix\n strData = strData[:len(strData)-len(elm)] # return string without the specified string\n return strData", "title": "" }, { "docid": "c04b36ba021fc4f007d9ab4ef7e767eb", "score": "0.5466599", "text": "def hw_2_2_5(number):\n number = str(number)\n index_point = number.find('.')\n number = number[index_point + 1:]\n return number", "title": "" }, { "docid": "957fffc4ff42bad1c2c48b02aed7fa85", "score": "0.54640067", "text": "def _split_suffix(filename, suffix_list):\r\n name = os.path.splitext(filename)[0]\r\n for suffix in suffix_list:\r\n if name.endswith(suffix):\r\n cut_pos = len(suffix)\r\n cut_filename = name[:-cut_pos]\r\n return cut_filename\r\n return False", "title": "" }, { "docid": "b1fb8f3b6796c64a89e26bac3d72577e", "score": "0.5463681", "text": "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "title": "" }, { "docid": "b1fb8f3b6796c64a89e26bac3d72577e", "score": "0.5463681", "text": "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "title": "" }, { "docid": "b1fb8f3b6796c64a89e26bac3d72577e", "score": "0.5463681", "text": "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "title": "" }, { "docid": "b1fb8f3b6796c64a89e26bac3d72577e", "score": "0.5463681", "text": "def trim(s):\n return s if len(s) <= 80 else s[:77] + \"...\"", "title": "" }, { "docid": "d6b49385e25152f9b56e0e3032e1357b", "score": "0.54418814", "text": "def ends_withs(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ends_withs\")", "title": "" }, { "docid": "d6b49385e25152f9b56e0e3032e1357b", "score": "0.54418814", "text": "def ends_withs(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ends_withs\")", "title": "" }, { "docid": "c1ae2659ecbd8d02b32d6aae507682e1", "score": "0.54410833", "text": "def basename(file):\n fileParts = file.split(\".\")\n basename = \".\".join(fileParts[:-1])\n return basename", "title": "" }, { "docid": "46c2e10f94354c115b84bbc541baa485", "score": "0.54396313", "text": "def suffix(self):\n return pathlib.Path(str(self)).suffix", "title": "" }, { "docid": "e884b6d2228718539b7146a22ab1025c", "score": "0.5438317", "text": "def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line==\"\":\n return line\n if line[-1] in config.END_TOKENS:\n return line\n return line + \" .\"", "title": "" }, { "docid": "907a6c6bbf3e500a5d2c055f2fdd6e9b", "score": "0.5438267", "text": "def extract_suffix(name):\n return name[name.index('['):] if '[' in name else None", "title": "" }, { "docid": "8226f7a1233b2667c987162d5f799bf5", "score": "0.5429417", "text": "def fix_missing_period(line):\r\n if \"@highlight\" in line: return line\r\n if line==\"\": return line\r\n if line[-1] in END_TOKENS: return line\r\n # print line[-1]\r\n return line + \" .\"", "title": "" }, { "docid": "cc3b95755f7a380d2f0cc788915a14d9", "score": "0.54278207", "text": "def get_extension(self):\n ext = os.path.splitext(self.img.name)[1]\n if ext:\n # Remove period from extension\n return ext[1:]\n return ext", "title": "" }, { "docid": "f9040d8aacb0904390a4e32c3cbe593a", "score": "0.54267836", "text": "def _clean_name(name):\n pattern = r'(\\b[A-Za-z]\\.? ?\\b)|\\.'\n return regex.sub(pattern, '', name).strip()", "title": "" }, { "docid": "33ace5d9085617faf02c2e61cc2fa662", "score": "0.5423073", "text": "def str_rstrip(S, substr):\n # This is akin to ${VAR%$substr} in Bourne-shell dialect.\n if len(substr) > 0 and S.endswith(substr):\n return S[:-len(substr)]\n else:\n return S", "title": "" } ]
768a424a7fbaf8e532540aedfefb3526
Load rules from database
[ { "docid": "df8400c592d652b7f4d401077a1a95a7", "score": "0.6805147", "text": "def load_rules(self):\n self.logger.debug(\"Loading rules\")\n self.rules = {}\n self.back_rules = {}\n nr = 0\n nbr = 0\n for c in EventClass.objects.all():\n if c.disposition:\n r = []\n for dr in c.disposition:\n rule = Rule(c, dr)\n r += [rule]\n nr += 1\n if dr.combo_condition != \"none\" and dr.combo_window:\n for cc in dr.combo_event_classes:\n try:\n self.back_rules[cc.id] += [dr]\n except KeyError:\n self.back_rules[cc.id] = [dr]\n nbr += 1\n self.rules[c.id] = r\n self.logger.debug(\"%d rules are loaded. %d combos\" % (nr, nbr))", "title": "" } ]
[ { "docid": "a468dffbce4892037f26c4c154e990e6", "score": "0.6632937", "text": "def load_rules(self, force_reload=False):\n self.enforcer.load_rules(force_reload)", "title": "" }, { "docid": "7042a162636269b04044cde3ba146696", "score": "0.66135895", "text": "def handle_rules():\n Rule.config_into_rules()\n create_new_rules()\n return Rule.load_rules()", "title": "" }, { "docid": "331c71541bb944a16154cc9038b60196", "score": "0.6574628", "text": "def reload(self):\n li = readcsv('rules', self._dir)\n self._list = []\n for r in li:\n desc = r[8] if len(r) > 8 else ''\n rule = Rule(enabled=r[0], name=r[1], src=r[2],\n src_serv=r[3], dst=r[4], dst_serv=r[5], action=r[6],\n log_level=r[7], desc=desc)\n self._list.append(rule)", "title": "" }, { "docid": "1089c425665c8ab8f8ef02c0ca9055b8", "score": "0.65646553", "text": "def parse_rules(path_to_rules):\n # Read Data\n print(\"Reading file: \", path_to_rules, \"...\")\n data_file = open(path_to_rules,'r')\n lines = data_file.readlines()\n data_file.close()\n\n # Parse Rules\n rules = {}\n for line in lines:\n rule = Rule(line)\n rules[rule.outer_bag] = rule \n\n return rules", "title": "" }, { "docid": "06bf7a0b7f176a97e1bb65ca20708bd4", "score": "0.65192515", "text": "def load_rules_from_mysql(configs: dict) -> RuleBunch:\n import pymysql\n\n for key in [\"host\", \"port\", \"user\", \"password\", \"db\", \"table\", \"customer\"]:\n assert key in configs, \"mysql configs error!\"\n\n words_rules = []\n context_rules = []\n intent_labels = []\n\n db = pymysql.connect(host=configs[\"host\"], port=configs[\"port\"],\n user=configs[\"user\"], password=configs[\"password\"])\n cursor = db.cursor()\n sql = \"select words_rule, context_rule, intent_labels \" \\\n \"from {db}.{table} \" \\\n \"where in_use=1 and customer='{customer}'\". \\\n format(db=configs[\"db\"], table=configs[\"table\"],\n customer=configs[\"customer\"])\n cursor.execute(sql)\n for words_rule, context_rule, intent_label in cursor.fetchall():\n if not intent_label or not intent_label.strip():\n continue\n if not words_rule and (not context_rule or context_rule.strip() == \"{}\"):\n continue\n words_rules.append(words_rule) if words_rule \\\n else words_rules.append(\"\")\n context_rules.append(context_rule) if context_rule \\\n else context_rules.append({})\n intent_labels.append([label.stip() for label in intent_label.split(\",\")])\n\n cursor.close()\n db.close()\n\n return RuleBunch(words_rules=words_rules, context_rules=context_rules,\n intent_labels=intent_labels)", "title": "" }, { "docid": "b0a6e5114c248481151e6122374f6313", "score": "0.6499273", "text": "def parse_rules(path_to_rules):\n # Read Data\n data_file = open(path_to_rules,'r')\n lines = data_file.readlines()\n data_file.close()\n\n # Parse Rules\n rules = {}\n for line in lines:\n rule = Rule(line)\n rules[rule.outer_bag] = rule \n\n return rules", "title": "" }, { "docid": "b8b5d868f24f5e982e93b60d443e5a47", "score": "0.6471951", "text": "def _load_validaton_rules(self) -> None:\n # self.specs_version\n rules_file = Path(*self.PATH_TO_RULES_FILE)\n\n with open(rules_file) as f:\n csv_reader = csv.reader(f, delimiter=\",\", quotechar='\"')\n for line in csv_reader:\n spec_version = line[1]\n if spec_version == self.specs_version:\n rule = KeyValueValidationRule.from_rule(line)\n self._rules[rule.keyword] = rule\n\n if rule.mandatory_keyword:\n self.mandatory_keywords.append(rule.keyword)", "title": "" }, { "docid": "37d93c2ad9043ac2db578c8082ef0a1e", "score": "0.64711934", "text": "def test_load_available_rules():\n rules_dir = \"rules\"\n rule_files = pkg_resources.resource_listdir(seabird.__name__, rules_dir)\n rule_files = [f for f in rule_files if re.match(\"^(?!refnames).*json$\", f)]\n for rule_file in rule_files:\n print(\"loading rule: %s\", (rule_file))\n text = pkg_resources.resource_string(\n seabird.__name__, os.path.join(rules_dir, rule_file)\n )\n rule = json.loads(text.decode(\"utf-8\"))\n assert type(rule) == dict\n assert len(rule.keys()) > 0", "title": "" }, { "docid": "957807c10e8970bb2fb33567462923a0", "score": "0.6444081", "text": "def load_rules(self, force_reload=False):\r\n\r\n if force_reload:\r\n self.use_conf = force_reload\r\n\r\n if self.use_conf:\r\n if not self.policy_path:\r\n self.policy_path = self._get_policy_path()\r\n\r\n reloaded, data = fileutils.read_cached_file(\r\n self.policy_path, force_reload=force_reload)\r\n if reloaded or not self.rules:\r\n rules = Rules.load_json(data, self.default_rule)\r\n self.set_rules(rules)\r\n LOG.debug(\"Rules successfully reloaded\")", "title": "" }, { "docid": "6860385f0b8763a560ab69739a55a114", "score": "0.6423734", "text": "def _get_rules_from_database(fsort_code):\n if Rule: # running under Django so use Django ORM, otherwise use psycopg2 direct access\n dbrules = Rule.objects.filter(start_sort__lte=fsort_code, end_sort__gte=fsort_code).values()\n else:\n dbrules = []\n cursor.execute('SELECT * from rules_rule where start_sort <=%s and end_sort >=%s', (fsort_code, fsort_code))\n for row in cursor:\n rules.append(row)\n return dbrules", "title": "" }, { "docid": "3af9fee61884d3bb8e605300236cdc12", "score": "0.6400004", "text": "def rules(self):\n if self._rules is None:\n self._rules = [x['id'] for x in self.db.execute(\"SELECT id FROM rules\").fetchall()]\n return self._rules", "title": "" }, { "docid": "5b5b3602fd78f70b13fff4f2e9472226", "score": "0.63580424", "text": "def load_rules(self, fn):\n if not os.path.exists(fn):\n return\n fd = open(fn, 'r')\n lines = []\n if fd:\n lines = fd.readlines()\n fd.close()\n\n for line in lines:\n if line.startswith('#'):\n pass\n elif line.startswith('-A '):\n # remove head\n ss = line[2:]\n # remove comment\n ss = ss.split('#')[0]\n #\n m = r'(\\w*)\\s*:\\s*(\\d*)'\n res = re.findall(m, ss)\n if len(res):\n k, v = res[0]\n self._user2id[k] = v\n\n elif line.startswith('-T '):\n # remove head\n ss = line[2:]\n # remove comment\n ss = ss.split('#')[0]\n #\n m = r'([\\w\\.]*)\\s*:\\s*(.*)'\n res = re.findall(m, ss)\n if len(res):\n #print res\n k, v = res[0]\n self._tst2user[k] = v\n elif line.startswith('-V '):\n # remove head\n ss = line[2:]\n # remove comment\n ss = ss.split('#')[0]\n #\n m = r'([\\w\\.]*)\\s*:\\s*(.*)'\n res = re.findall(m, ss)\n if len(res):\n #print res\n k, v = res[0]\n if v and len(v):\n self._cfg[k] = v\n print 'load env :', k, v\n elif line.startswith('-P '):\n # remove head\n ss = line[2:]\n # remove comment\n ss = ss.split('#')[0]\n #\n m = r'([\\w\\.]*)\\s*:\\s*(.*)'\n res = re.findall(m, ss)\n if len(res):\n #print res\n k, v = res[0]\n if v and len(v):\n self._dut2pid[k] = v\n elif line.startswith('-B '):\n # remove head\n ss = line[2:]\n # remove comment\n ss = ss.split('#')[0]\n #\n m = r'([\\w\\.]*)\\s*:\\s*(.*)'\n res = re.findall(m, ss)\n if len(res):\n #print res\n k, v = res[0]\n if v and len(v):\n self._tb2author[k] = v\n else:\n pass\n #\n #pprint(self._user2id)\n #pprint(self._tst2user)\n #pprint(self._cfg)\n #exit(0)", "title": "" }, { "docid": "6827ebe998ab9fed45a1c5d187ba4935", "score": "0.6308695", "text": "def _read_rules(self):\n import xlrd\n workbook = xlrd.open_workbook(self._RulesPath)\n content = workbook.sheet_by_index(0)\n self._RulesNum = content.nrows\n for i in range(0, self._RulesNum):\n premise = []\n j = 1\n while j < content.ncols and content.cell_value(i, j) != '':\n premise.append(content.cell_value(i, j))\n j += 1\n conclusion = content.cell_value(i, 0)\n self._Rules.append({conclusion : set(premise)})\n print('Read successfully~')\n return self._Rules", "title": "" }, { "docid": "d7550ff9d0ca8b0817b9b4dea2d215ab", "score": "0.62851757", "text": "def LoadRules(cls, dirname):\n # check if this directory has already been loaded\n rules_file = os.path.join(dirname, 'RULES')\n if not os.path.isfile(rules_file):\n TermColor.Error('Cannot find file: %s' % rules_file)\n return\n\n if rules_file in cls.loaded:\n return\n\n with cls.LOAD_LOCK:\n cls.loaded |= set([rules_file])\n # Save basedir for restoration later.\n oldbasedir = cls.basedir\n cls.basedir = dirname\n TermColor.VInfo(5, 'Reading %s' % rules_file)\n exec(compile(open(rules_file).read(), rules_file, 'exec'))\n cls.basedir = oldbasedir", "title": "" }, { "docid": "d89ee38291237ed0b6a4caffcf78d3b4", "score": "0.6182408", "text": "def _get_rules(self, sort_code ):\n def _get_rules_from_database(fsort_code):\n \"\"\" get rules from Django ORM or psycopg depending if run from command line or Django\n :param fsort_code: str\n :rtype: rules as list[int][str]\n \"\"\"\n if Rule: # running under Django so use Django ORM, otherwise use psycopg2 direct access\n dbrules = Rule.objects.filter(start_sort__lte=fsort_code, end_sort__gte=fsort_code).values()\n else:\n dbrules = []\n cursor.execute('SELECT * from rules_rule where start_sort <=%s and end_sort >=%s', (fsort_code, fsort_code))\n for row in cursor:\n rules.append(row)\n return dbrules\n\n if not self._CACHE:\n return _get_rules_from_database(sort_code)\n\n rule_does_not_exist = 'DoesNotExist'\n cached_rules = cache.get(sort_code)\n if cached_rules == rule_does_not_exist:\n print('cache get DOES_NOT_EXIST')\n return None\n elif cached_rules is not None:\n rules = {}\n for i, obj in enumerate(serializers.deserialize(\"json\", cached_rules)):\n rules[i]=obj.object\n print('cache get {start} {end}'.format(start=obj.object['start_sort'], end=obj.object['end_sort']))\n else:\n rules = _get_rules_from_database(sort_code)\n if rules:\n data = serializers.serialize(\"json\", rules)\n cache.set(sort_code, data)\n print('db get and cached {start}'.format(start=rules[0]['start_sort']))\n else:\n cache.set(sort_code, rule_does_not_exist)\n print('db get doesnt exit - set cache does not exist')\n return rules", "title": "" }, { "docid": "27f5ffb5bf80cfa7b29d9e0d261e5093", "score": "0.61659503", "text": "def load_file(file):\n with open(file) as f:\n rules = f.readlines()\n return AdblockRules(rules)", "title": "" }, { "docid": "15c77fb15d3e93f8feed94a168fb9481", "score": "0.6149682", "text": "def _load_to_database(self):\n pass", "title": "" }, { "docid": "70055b261b9bdd7fdbbe56478d0ae98a", "score": "0.6116085", "text": "def _insert_rules(self, data, rdkit_func):\n n_before = self.db.execute(\"SELECT count(*) FROM rules\").fetchone()['count(*)']\n # First, convert data to a Dict-like structure with key as identifiers\n if not isinstance(data, collections.abc.Mapping):\n offset = RuleBurner._get_highest_int(self.rules) + 1 if self.rules else 0\n data = {k+offset: v for k, v in enumerate(data)}\n # Sniff the structure of data values (Dict-like or plain text)\n example = data[next(iter(data))]\n if isinstance(example, collections.abc.Mapping):\n other_colnames = [x for x in example.keys() if x != 'rd_rule']\n else:\n other_colnames = []\n # Standardize items and generate the RDKit objects\n logger.debug(f\"Inserting {len(data)} RDKit rules into the database.\")\n cols_str = ','.join(['id', 'rd_rule'] + other_colnames)\n values_str = ','.join(['?'] * (2 + len(other_colnames)))\n self.db.executemany(f\"insert into rules ({cols_str}) values ({values_str})\",\n self._gen_records(data, rdkit_func, 'rd_rule', other_colnames))\n self.db.commit()\n self._rules = None # Reset the list of identifiers\n n_after = self.db.execute(\"SELECT count(*) FROM rules\").fetchone()['count(*)']\n return n_after - n_before", "title": "" }, { "docid": "a763aa8c334b2284b40972ea9f1b6a7e", "score": "0.61017674", "text": "def load_rules(self, force_reload=False, overwrite=True):\n\n # double-checked locking\n if self.load_once and self._policy_loaded:\n return\n with self._load_lock:\n if self.load_once and self._policy_loaded:\n return\n\n reloaded, data = _cache.read_file(\n self.policy_file, force_reload=force_reload)\n self._policy_loaded = True\n if reloaded or not self.rules:\n rules = Rules.load_json(data, self.default_rule, self.raise_error)\n self._set_rules(rules, overwrite=overwrite)\n LOG.debug('Reload policy file: %s', self.policy_file)", "title": "" }, { "docid": "1cddc646d2040cb0fa17b4dc8575adba", "score": "0.60963845", "text": "def load_from_sql(self):\n pass", "title": "" }, { "docid": "f26581a2224fbec33048a294296ca346", "score": "0.60534805", "text": "def load_urirules() :\n pass", "title": "" }, { "docid": "441ac03cb3631c16dd168bd883b544d7", "score": "0.60518074", "text": "def load_rca_rules(self):\n self.logger.info(\"Loading RCA Rules\")\n n = 0\n self.rca_forward = {}\n self.rca_reverse = {}\n for a in AlarmClass.objects.filter(root_cause__0__exists=True):\n if not a.root_cause:\n continue\n self.rca_forward[a.id] = []\n for c in a.root_cause:\n rc = RCACondition(a, c)\n self.rca_forward[a.id] += [rc]\n if rc.root.id not in self.rca_reverse:\n self.rca_reverse[rc.root.id] = []\n self.rca_reverse[rc.root.id] += [rc]\n n += 1\n self.logger.info(\"%d RCA Rules have been loaded\" % n)", "title": "" }, { "docid": "3d50fc90d88b435d13296c610dea4598", "score": "0.598082", "text": "def load_data(database_filepath):", "title": "" }, { "docid": "3d50fc90d88b435d13296c610dea4598", "score": "0.598082", "text": "def load_data(database_filepath):", "title": "" }, { "docid": "3d50fc90d88b435d13296c610dea4598", "score": "0.598082", "text": "def load_data(database_filepath):", "title": "" }, { "docid": "5cdd07e44220515cdb52322c64e9f3d6", "score": "0.5976721", "text": "def get_rules_from_db(tenantId):\n import MySQLdb as mysql\n conn = mysql.connect(charset=DB_CHARSET, use_unicode=True,\n host=DB_HOST, user=DB_USER, passwd=DB_PASSWD, db=DB_NAME)\n #conn = db.connect(\"cloto.db\")\n #conn.row_factory = db.Row\n cur = conn.cursor()\n SQL = \"SELECT * FROM %s.cloto_specificrule WHERE specificRule_Id IN \" \\\n \"(SELECT ruleId FROM %s.cloto_subscription WHERE %s IN \" \\\n \"(SELECT %s FROM %s.cloto_entity WHERE tenantId='%s'))\" % (DB_NAME, DB_NAME, SERVERID, SERVERID, DB_NAME, tenantId)\n cur.execute(SQL)\n while True:\n r = cur.fetchone()\n if not r:\n conn.close()\n break\n else:\n rule_name = r[2]\n rule_cond = r[5]\n rule_action = r[6]\n #rule_name = r['name']\n #rule_cond = r['condition']\n #rule_action = r['action']\n e1.BuildRule(rule_name, rule_cond, rule_action)", "title": "" }, { "docid": "fbe535a6ee74001d7ae8331d501c5e5a", "score": "0.59619826", "text": "def read_rules(rules_file):\n rows = []\n f = open(rules_file, 'rb')\n reader = csv.reader(f, delimiter='_', quoting=csv.QUOTE_NONE)\n reader.next() #skip headings\n for r in reader:\n #skip emtpy rules\n if \"\" == r[0] or \"\" == r[1]:\n continue\n rows.append((r[0], r[1]))\n f.close()\n return rows", "title": "" }, { "docid": "4f2af410adab36202ff38abd2c60335f", "score": "0.5934355", "text": "def _load_pre_generated_assumption_rules():\n _assume_rules=FactRules._from_python(_assumptions)\n return _assume_rules", "title": "" }, { "docid": "f865fdfd251806efbe46fa7036d1b3b5", "score": "0.59111065", "text": "def load(self):\n c = self.connection.cursor()\n\n # Now we need to simply load all data from each table\n # we are interested in. To prevent serious code duplication, we\n # create some objects that group everything needed for extraction.\n class Extractor:\n def __init__(self, db_name, associated_dict, db_type):\n self.db_name = db_name\n self.associtaed_dict = associated_dict\n self.db_type = db_type\n\n extractors = [\n Extractor(\"Category\", self.categories, Category),\n Extractor(\"exercise\", self.exercises, Exercise),\n Extractor(\"Routine\", self.routines, Routine),\n Extractor(\"RoutineSection\", self.routine_sections, RoutineSection),\n Extractor(\"RoutineSectionExercise\", self.routine_section_exercises, RoutineSectionExercise),\n Extractor(\"RoutineSectionExerciseSet\", self.routine_sets, RoutineSectionExerciseSet)\n ]\n # Iterate all tables\n for extr in extractors:\n # Iterate all entries\n for row in c.execute('SELECT * FROM {}'.format(extr.db_name)):\n # Create new object of correct type and assert that it is base type DbObject\n o = extr.db_type()\n assert isinstance(o, DbObject)\n # Load data from row\n o.load(row)\n # Add to dict\n extr.associtaed_dict[o.id] = o", "title": "" }, { "docid": "dd64ff7fb0b26fb963a4c9f09418d1f6", "score": "0.58943504", "text": "def load_patterns():\n\n print \"Patterns\"\n\n Pattern.query.delete()\n\n for row in open(\"seed_data/patterns.txt\"):\n row = row.rstrip()\n pattern_id, pattern_name, pattern_link, pattern_pdf, chosen, group_id = row.split(\"|\")\n\n pattern = Pattern(pattern_id=pattern_id,\n pattern_name=pattern_name,\n pattern_link=pattern_link,\n pattern_pdf=pattern_pdf,\n chosen=chosen,\n group_id=group_id\n )\n\n db.session.add(pattern)\n\n db.session.commit()", "title": "" }, { "docid": "6a9de9de61ef57dde7d0a692ff818089", "score": "0.5888926", "text": "def load_database(self):\n with open(self.filename, 'r') as f:\n for line in f.readlines()[1:]:\n _, position, name, salary, hourly, hours_accrued = line.split(',')\n if position == 'Doctor':\n self.append(Doctor(name, int(salary)))\n elif position == 'Receptionist':\n self.append(Receptionist(name, int(salary), hourly, int(hours_accrued)))", "title": "" }, { "docid": "372095a4f4eb29cfc61d3a4e515d159b", "score": "0.58780694", "text": "def load_rules(rulefile, use_json=False):\n if use_json:\n import json\n with open(rulefile) as fin:\n return json.load(fin)\n else:\n import yaml\n with open(rulefile) as fin:\n return yaml.load(fin)", "title": "" }, { "docid": "1a48fb8c159b1268ddd85cc56e91775a", "score": "0.58744746", "text": "def read_rules(self):\n dict_rules = {} # dictionnaire contenant les valeurs de naissance, survie et mort des cellules\n fr = open(\"rules.txt\", 'r')\n for line in fr:\n line = line.strip() # retirer le \\n final\n color, rules = line.split(':')\n list_rules = rules.split(',')\n list_rules = [int(i) for i in list_rules]\n dict_rules[color] = list_rules # ajouter un element {color : [list[0], list[1], ..., list[n]]}\n fr.close()\n return dict_rules", "title": "" }, { "docid": "e6de8053c0591c4a10dae02664c66ba0", "score": "0.5868904", "text": "def getRules(self):\n try:\n with open(self.rulesFile, 'r') as f:\n rules = [line.rstrip('\\n') for line in f]\n except Exception as e:\n QtGui.QMessageBox.warning(self, self.tr('Warning!'), self.tr('Problem reading file!'))\n return\n \n ret = list()\n for line in rules:\n split = line.split(',')\n layer1 = split[0] \n necessity = self.necessity[int(split[1].split('_')[0])]\n predicate = self.predicates[int(split[2].split('_')[0])]\n layer2 = split[3]\n cardinality = split[4]\n min_card = cardinality.split('..')[0]\n max_card = cardinality.split('..')[1]\n rule = split[1]+' '+split[2]\n ret.append((layer1, necessity, predicate, layer2, min_card, max_card, rule))\n \n return ret", "title": "" }, { "docid": "26b9fed6cee5430d27880225965b8ec0", "score": "0.58318293", "text": "def _from_catalog(self):\n for rule in self.fetch():\n do_loc = rule.definition.index(' DO ')\n if 'WHERE' in rule.definition:\n rule.condition = rule.definition[rule.definition.index(\n ' WHERE ') + 7:do_loc]\n if hasattr(rule, 'instead') and rule.instead:\n do_loc += 8\n rule.actions = rule.definition[do_loc + 4:-1]\n del rule.definition\n self[rule.key()] = rule", "title": "" }, { "docid": "32d99e07d0d4efaab6903c97bb7a522d", "score": "0.58120155", "text": "def read_rules(rules):\n\n\twith open(rules, 'r') as file:\n\t\tlines = file.readlines()\n\n\trules = {'colon': {}, 'cervix': {}, 'celiac': {}, 'lung': {}}\n\tfor line in lines:\n\t\ttrigger, candidates, position, mode, use_cases = line.strip().split('\\t')\n\t\tuse_cases = use_cases.split(',')\n\t\tfor use_case in use_cases:\n\t\t\trules[use_case][trigger] = (candidates.split(','), position, mode)\n\treturn rules", "title": "" }, { "docid": "22dfbad0c55ac18d7811a503912bae8b", "score": "0.57797337", "text": "def load_style_rules(self):\n if \"word_substitutions_file\" in self.config:\n with open(self.config[\"word_substitutions_file\"], \"r\", encoding=\"utf-8\") as f:\n self.disallowed_words = yaml.load(f)\n else:\n logger.warning(\"No 'word_substitutions_file' found in config.\")\n self.disallowed_words = {}\n\n if \"phrase_substitutions_file\" in self.config:\n with open(self.config[\"phrase_substitutions_file\"], \"r\", encoding=\"utf-8\") as f:\n self.disallowed_phrases = yaml.load(f)\n else:\n logger.warning(\"No 'phrase_substitutions_file' found in config.\")\n self.disallowed_phrases = {}", "title": "" }, { "docid": "b4196d35dadf893519ef3238584656f9", "score": "0.5764781", "text": "def load_records_from_file_to_db(self):\n self.service.load_records()", "title": "" }, { "docid": "9a67c7ba2cce6bcba7641d4fba349f08", "score": "0.5756084", "text": "def load_static_rules(self):\n\n valid_entries = defaultdict(list)\n\n for switch in Configuration.get('switches'):\n if \"static_rules\" in switch and Configuration.get('static_rules') == True:\n data = Configuration.load(switch['static_rules'])[\"entries\"]\n\n for entry in data:\n if entry['table'] != \"ingress.ipv4_c.ipv4\":\n continue\n\n e = TableEntry(switch=entry[\"switch\"],\n match_fields={\"hdr.ipv4.dstAddr\": (str(entry[\"match_fields\"][0]), int(entry[\"match_fields\"][1])),\n \"meta.ports.status\": (BierComputation.id_to_bitstring(id=int(entry[\"match_fields\"][2])), int(entry[\"match_fields\"][3]))},\n action_name=entry[\"action_name\"],\n action_params={\"port\": int(entry[\"action_params\"])},\n priority=1\n )\n\n\n TableEntryManager.handle_table_entry(self.table_manager,\n table_name=entry[\"table\"],\n table_entry=e)\n\n valid_entries[entry[\"switch\"]].append(e.match_fields)\n\n Log.async_info(\"Static rules for IPv4 loaded.\")\n\n return valid_entries", "title": "" }, { "docid": "898aeff0c4b264f985a278a27cd146ec", "score": "0.5754242", "text": "def _load_file(filename):\n path = pathlib.Path(__file__).parent / 'rules' / filename\n with path.open(encoding='utf-8') as f:\n return set(line.strip() for line in f)", "title": "" }, { "docid": "eec5b60b574404f0045a25de6e84835d", "score": "0.57064897", "text": "def load_data() -> tuple[dict[str, str], dict[str, str], dict[str, str]]:\n with open('rules.json') as f:\n data = json.load(f)\n rules = data['rules']\n categories = data['categories']\n searchable_categories = {\n make_string_searchable(name): number\n for number, name in categories.items()\n }\n return rules, categories, searchable_categories", "title": "" }, { "docid": "d94b5ef48d2357ccf214056ec111b614", "score": "0.57035416", "text": "def set_rules(self):\n\n FILEOPENOPTIONS = dict(title='Choose rule book', initialfile='', filetypes=[('Comma Separated Values', ('.csv')), ('Text files', ('.txt')), ('All files', ('*.*'))])\n fhandle = filedialog.askopenfile(**FILEOPENOPTIONS)\n\n with open(fhandle.name, 'rt', encoding='utf8') as rulebook:\n tmpcontent = rulebook.readlines()\n csvReader = csv.reader(filter(lambda row:row[0]!='#', tmpcontent))\n self.rule = [row for row in csvReader]\n self.rule_path.set(\"Rule: \"+fhandle.name)", "title": "" }, { "docid": "b4096b5df1e8b51beed2ec0a8b183640", "score": "0.5694539", "text": "def LoadRules(path):\n config = yaml.load(open(path))\n if (CONFIG_GROUPS not in config) or (CONFIG_RULES not in config):\n raise ValueError('Syntax error in %s' % path)\n\n groups = config[CONFIG_GROUPS]\n rules = {}\n for key, value in config[CONFIG_RULES].items():\n # Expand value into imports\n imports = []\n for package in value:\n match = re.match(CONFIG_GROUP_PATTERN, package)\n if match:\n imports += groups[match.group(1)]\n else:\n imports.append(package)\n\n match = re.match(CONFIG_GROUP_PATTERN, key)\n if match:\n # Duplicate multiple rules\n for module in groups[match.group(1)]:\n rules[module] = imports\n else:\n rules[key] = imports\n\n def RulePriority(key):\n \"\"\"Priority of a rule.\n\n Larger number means more strict and should be used first.\n \"\"\"\n if key.startswith('='):\n return 4\n if key.endswith('.*'):\n return 2\n if key == '*':\n return 1\n return 3\n\n return sorted(rules.items(),\n key=lambda k_v: (RulePriority(k_v[0]), k_v[0]),\n reverse=True)", "title": "" }, { "docid": "198fd4d595c25d2c49c350dd7b8d51ba", "score": "0.5694064", "text": "def load(self):\n script = 'SELECT * FROM settings'\n data = self.db.get(script)\n self.ids = {}\n for line in data:\n setattr(self, line[1], line[2])\n self.ids[line[1]] = line[0]", "title": "" }, { "docid": "5154d592e2449da3592e8ff8748b98dd", "score": "0.56868285", "text": "def load_case_100() -> None:\n with open(cypher_file('case_100', 'i1'), \"r\") as f:\n Trinity().clean().run(f.read()).create_constraints()", "title": "" }, { "docid": "2cd5e6ba477a8cec77b4a6ac32b4787b", "score": "0.56645215", "text": "def load_logic(self):\n for logic in models.Logic.objects.all():\n try:\n expression = self.parse_logic(logic)\n except ParseError:\n continue\n if logic.symmetric:\n self.logic_to_expr[logic] = (expression, )\n else:\n expression2 = self.parse_logic(logic, switch_sides=True)\n self.logic_to_expr[logic] = (expression, expression2)", "title": "" }, { "docid": "203b5ddae12783a65690f9fd90b39273", "score": "0.56606114", "text": "def load_rules(self, rule_dir):\r\n rule_list = {}\r\n for rule_file in listdir(rule_dir):\r\n if rule_file.endswith('.json'):\r\n #aspect为community,compatibility等\r\n aspect = rule_file[:-5]\r\n rule_list[aspect] = []\r\n for rule_line in open(join(rule_dir, rule_file)).readlines():\r\n rule_json = json.loads(rule_line.strip())\r\n rule_json['aspect'] = aspect\r\n #rule_list[community]=\r\n #rule_list[compatibility]=\r\n rule_list[aspect].append(rule_json)\r\n return rule_list", "title": "" }, { "docid": "cb2b14cef90c76b73954dc597aab084b", "score": "0.5649893", "text": "async def rules(self, ctx):\n\t\treturn", "title": "" }, { "docid": "1feb09d48d292a825a04c76bbda62131", "score": "0.5640476", "text": "def implement_rules(df: pd.DataFrame, rules_path: str = 'rules.txt') -> pd.DataFrame:\n df = df.reset_index()\n\n with open(rules_path) as f:\n for line in f:\n\n if not line.strip():\n continue\n\n flag = line[:2]\n\n if flag == '-s':\n df = set_flag(df, line)\n elif flag == '-l':\n df = like_flag(df, line)\n elif flag == '-a':\n df = array_flag(df, line)\n elif flag == '-r':\n df = raw_flag(df, line)\n else:\n print(\"No flags in line to process in rules.txt\")\n\n return df", "title": "" }, { "docid": "e6b753b05486d69bacc35a1b27c9097a", "score": "0.5638967", "text": "def _load_operators(self, rule_path):\n skipped = 0\n with open(rule_path) as infile:\n # Get all reaction rules from tsv file and store in dict (rdr)\n rdr = csv.DictReader((row for row in infile if not\n row.startswith('#')), delimiter='\\t')\n for rule in rdr:\n try:\n # Get reactants and products for each reaction into list\n # form (not ; delimited string)\n rule['Reactants'] = rule['Reactants'].split(';')\n rule['Products'] = rule['Products'].split(';')\n # Ensure that all coreactants are known and accounted for\n all_rules = rule['Reactants'] + rule['Products']\n for coreactant_name in all_rules:\n if ((coreactant_name not in self.coreactants\n and coreactant_name != 'Any')):\n raise ValueError(\n \"Undefined coreactant:{coreactant_name}\"\n )\n # Create ChemicalReaction object from SMARTS string\n rxn = AllChem.ReactionFromSmarts(rule['SMARTS'])\n rule.update({'_id': rule['Name'],\n 'Reactions_predicted': 0,\n 'SMARTS': rule['SMARTS']})\n # Ensure that we have number of expected reactants for\n # each rule\n if rxn.GetNumReactantTemplates() != len(rule['Reactants'])\\\n or rxn.GetNumProductTemplates() != \\\n len(rule['Products']):\n skipped += 1\n print(\"The number of coreactants does not match the \"\n \"number of compounds in the SMARTS for reaction \"\n \"rule: \" + rule['Name'])\n if rule['Name'] in self.operators:\n raise ValueError(\"Duplicate reaction rule name\")\n # Update reaction rules dictionary\n self.operators[rule['Name']] = (rxn, rule)\n except Exception as e:\n raise ValueError(str(e) \n + f\"\\nFailed to parse {rule['Name']}\")\n if skipped:\n print(\"WARNING: {skipped} rules skipped\")", "title": "" }, { "docid": "fac0289b189eab1d59689f16dfe6f270", "score": "0.56253815", "text": "def _init_rules(self, f):\n rules_file = codecs.open(f, \"r\", 'utf-8')\n\n # compile the orthography rules\n for line in rules_file:\n line = line.strip()\n\n # skip any comments\n if line.startswith(\"#\") or line == \"\":\n continue\n\n line = unicodedata.normalize(\"NFD\", line)\n rule, replacement = line.split(\",\")\n rule = rule.strip() # just in case there's trailing whitespace\n replacement = replacement.strip() # because there's probably trailing whitespace!\n self.op_rules.append(re.compile(rule))\n self.op_replacements.append(replacement)\n rules_file.close()\n\n # check that num rules == num replacements; if not fail\n if len(self.op_rules) != len(self.op_replacements):\n raise ValueError(\"[i] Number of inputs does not match number of outputs in the rules file.\")", "title": "" }, { "docid": "3278ecc215b798f889edbdefb264c2ac", "score": "0.561341", "text": "def getRules(self):\n try:\n while i < len(list_):\n rules.add(GdlFactory.create(list_.get(i)))\n i += 1\n return rules\n except GdlFormatException as e:\n e.printStackTrace()\n return None\n except SymbolFormatException as e:\n e.printStackTrace()\n return None", "title": "" }, { "docid": "5a42122cae1547fbcb51948fc9a87600", "score": "0.56125367", "text": "def _load_cosmosdb_virtual_network_rules(\n neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,\n) -> None:\n if 'virtual_network_rules' in database_account and len(database_account['virtual_network_rules']) > 0:\n database_account_id = database_account['id']\n virtual_network_rules = database_account['virtual_network_rules']\n\n ingest_virtual_network_rules = \"\"\"\n UNWIND $virtual_network_rules_list AS vnr\n MERGE (rules:AzureCosmosDBVirtualNetworkRule{id: vnr.id})\n ON CREATE SET rules.firstseen = timestamp()\n SET rules.lastupdated = $azure_update_tag,\n rules.ignoremissingvnetserviceendpoint = vnr.ignore_missing_v_net_service_endpoint\n WITH rules\n MATCH (d:AzureCosmosDBAccount{id: $DatabaseAccountId})\n MERGE (d)-[r:CONFIGURED_WITH]->(rules)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $azure_update_tag\n \"\"\"\n\n neo4j_session.run(\n ingest_virtual_network_rules,\n virtual_network_rules_list=virtual_network_rules,\n DatabaseAccountId=database_account_id,\n azure_update_tag=azure_update_tag,\n )", "title": "" }, { "docid": "750c0a504ffdc3b65880cfc1a387265a", "score": "0.5608313", "text": "def load(self):\n with sqlite3.connect(self._dbFilename) as db:\n cursor = db.cursor()\n try:\n self._readSystemTable(cursor)\n except (sqlite3.OperationalError, sqlite3.DatabaseError):\n raise exceptions.RepoInvalidDatabaseException(\n self._dbFilename)\n self._readOntologyTable(cursor)\n self._readReferenceSetTable(cursor)\n self._readReferenceTable(cursor)\n self._readDatasetTable(cursor)\n self._readReadGroupSetTable(cursor)\n self._readReadGroupTable(cursor)\n self._readVariantSetTable(cursor)\n self._readCallSetTable(cursor)\n self._readVariantAnnotationSetTable(cursor)\n self._readFeatureSetTable(cursor)\n self._readBioSampleTable(cursor)\n self._readIndividualTable(cursor)\n self._readPhenotypeAssociationSetTable(cursor)\n self._readRnaQuantificationSetTable(cursor)", "title": "" }, { "docid": "b079cb79fea604d5e7752083c34e1ad9", "score": "0.5608214", "text": "def load_schema(self):", "title": "" }, { "docid": "bc4bdc79c82fc8118ff692e9ce773a71", "score": "0.55886734", "text": "def read_rules(grammar_filename):\n rules = set()\n with open(grammar_filename) as f:\n for rule in f.readlines():\n rule = rule.strip()\n log_prob, lhs, rhs = rule.split('\\t')\n rhs = tuple(rhs.split(' '))\n assert rhs and rhs[0], rule\n rules.add(GrammarRule(lhs, rhs, math.log(float(log_prob))))\n return rules", "title": "" }, { "docid": "bc4bdc79c82fc8118ff692e9ce773a71", "score": "0.55886734", "text": "def read_rules(grammar_filename):\n rules = set()\n with open(grammar_filename) as f:\n for rule in f.readlines():\n rule = rule.strip()\n log_prob, lhs, rhs = rule.split('\\t')\n rhs = tuple(rhs.split(' '))\n assert rhs and rhs[0], rule\n rules.add(GrammarRule(lhs, rhs, math.log(float(log_prob))))\n return rules", "title": "" }, { "docid": "54c0284c8dfadb75e3390cff4897ab8e", "score": "0.55471975", "text": "def list_rules():\n\n sess = obtain_session()\n all_rules = sess.query(Rule).all()\n rules_schema = RuleSchema(many=True)\n result = rules_schema.dump(all_rules)\n logger.debug(f\"Successfully fetched all the actions.\")\n return make_response(jsonify(result), status.HTTP_200_OK)", "title": "" }, { "docid": "49d65934be4501426b9ae1e5f9714371", "score": "0.5532508", "text": "def get_scraping_rules(self):\r\n if not self.conn.table_already_exists(\"shownews_scrapingrule\"):\r\n scraper_utils.log_warning(\r\n \"Table 'shownews_scrapingrule' does not exists in the database.\"\r\n )\r\n return {}\r\n\r\n rules_rows = self.conn.get_fields_by_conditions(\r\n \"shownews_scrapingrule\",\r\n (\"*\",)\r\n )\r\n\r\n rules_map = {\r\n rule_id: ScrapingRule(name=rule_name, is_active=is_active)\r\n for (rule_id, is_active, rule_name) in rules_rows\r\n }\r\n\r\n for rule_id, keyword_name, to_include in self._get_keywords_info():\r\n rules_map[rule_id].add_keyword(keyword_name, to_include)\r\n\r\n for rule_id, tag_name in self._get_tags_info():\r\n rules_map[rule_id].tags.add(tag_name)\r\n\r\n return rules_map", "title": "" }, { "docid": "f2c3c03cd7dcba98b4e49158c5290605", "score": "0.55066884", "text": "def rules(self, rules):\n\n self._rules = rules", "title": "" }, { "docid": "74d3c071044492260fff13f2db55c935", "score": "0.54963946", "text": "def load_ages():\n\n #print \"Ages\"\n\n for row in open('seed_data/u.age'):\n row = row.rstrip()\n age_id, age_category = row.split(\"|\")\n\n age = Age(age_category=age_category)\n\n db.session.add(age)\n db.session.commit()", "title": "" }, { "docid": "80a0e1d1cd678d0c884fa9de22092c05", "score": "0.5472692", "text": "def __init__(self, rules=[]):\n self.rules = rules", "title": "" }, { "docid": "26c9d7a50ed9cb60e0b420f131e3a08f", "score": "0.54593134", "text": "def __init__(self, rules):\r\n\r\n self.rules = rules", "title": "" }, { "docid": "26c9d7a50ed9cb60e0b420f131e3a08f", "score": "0.54593134", "text": "def __init__(self, rules):\r\n\r\n self.rules = rules", "title": "" }, { "docid": "006e43803e5df97887f87b892576efaf", "score": "0.5455256", "text": "def _load_rules(\n rawrules: List[JsonDict],\n enabled_map: Dict[str, bool],\n experimental_config: ExperimentalConfig,\n) -> FilteredPushRules:\n\n ruleslist = [\n PushRule.from_db(\n rule_id=rawrule[\"rule_id\"],\n priority_class=rawrule[\"priority_class\"],\n conditions=rawrule[\"conditions\"],\n actions=rawrule[\"actions\"],\n )\n for rawrule in rawrules\n ]\n\n push_rules = PushRules(ruleslist)\n\n filtered_rules = FilteredPushRules(\n push_rules,\n enabled_map,\n msc1767_enabled=experimental_config.msc1767_enabled,\n msc3664_enabled=experimental_config.msc3664_enabled,\n msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,\n )\n\n return filtered_rules", "title": "" }, { "docid": "eb04fc6e99d5d8d6660ed861b9e2fd59", "score": "0.5440558", "text": "def load(self):\n try:\n infile=open(self.database_file, \"rb\")\n loaded = pickle.load(infile)\n self.operations=loaded['database']\n self.persons=loaded['whos']\n self.categories=loaded['categories']\n self.db_changed = False\n except:\n print(\"could not open\", self.database_file)", "title": "" }, { "docid": "08b03649daeb0db51c8e78c58b825972", "score": "0.5437856", "text": "def __init__(self, rules):\n\n self.setModel(rules)", "title": "" }, { "docid": "f873ed58941eb0b0a875c8d2f8ef4470", "score": "0.54282916", "text": "def _get_rules(self) -> list:\n if not self._rules:\n self._rules = []\n return self._rules", "title": "" }, { "docid": "edc8ab381b5d106c2faa068acf1206a9", "score": "0.54217976", "text": "def load():\n\n print('Loading data from the CSV files. Please note that this might take about 5 minutes')\n with open('courses.csv', 'r') as file_in:\n result = map(str, csv.reader(file_in))\n # match '[\"ZZZZ9999 99z9\"]' like string and get the\n # alphanumeric\n pattern = r'..([A-Z]{4}[0-9]{4})..\\s.([0-9]{2}[a-z][0-9])..'\n result = [re.match(pattern, i).group(1) + \" \" + re.match(pattern, i).group(2)\n for i in result if re.match(pattern, i)]\n\n for course in result:\n try:\n c = Course(course_code=course)\n db.session.add(c)\n db.session.commit()\n except:\n db.session.rollback()\n\n with open('passwords.csv', 'r') as file_in:\n result = csv.reader(file_in)\n for user in result:\n try:\n role = Role.query.filter_by(name=user[2]).first()\n new_user = User(username=str(\n user[0]), password=user[1], role=role)\n db.session.add(new_user)\n db.session.commit()\n except:\n db.session.rollback()\n\n with open('enrolments.csv', 'r') as file_in:\n result = csv.reader(file_in)\n for enrolment in result:\n user = User.get_by_name(enrolment[0])\n course = enrolment[1] + \" \" + enrolment[2]\n user.add_course(course)\n print('Data successfully loaded into database.')", "title": "" }, { "docid": "06043671a3d29501850c8505c690f560", "score": "0.54214823", "text": "def _load_db(self):\n raise NotImplementedError('Please override this method in the child class')", "title": "" }, { "docid": "07a212e0dc1d3e1390c20c23b8c81b14", "score": "0.5414849", "text": "def readRules(rulesPath, outputPatterns):\n global _rulesCache\n\n with READ_RULES_CALLS.time():\n mtime = os.path.getmtime(rulesPath)\n key = (rulesPath, mtime)\n if _rulesCache[:2] == key:\n _, _, rulesN3, ruleStore = _rulesCache\n else:\n rulesN3 = open(rulesPath, 'rb').read() # for web display\n\n ruleStore = ConjunctiveGraph()\n _loadAndEscape(ruleStore, rulesN3, outputPatterns)\n log.debug('%s rules' % len(ruleStore))\n\n _rulesCache = key + (rulesN3, ruleStore)\n return rulesN3, ruleStore", "title": "" }, { "docid": "eb5328f3c738429cd28c72b729af040d", "score": "0.5413819", "text": "def load(self, file, validate=True):\n \n def generateTryAssign(xmlElement):\n attrib = xmlElement.attrib\n def tryAssign(key, default=None):\n try:\n if default is not None:\n if attrib[key] is None:\n return default\n else:\n return self._TextToBoolMap[attrib[key]]\n else:\n return attrib[key]\n except KeyError:\n return default\n return tryAssign\n \n \n with open(file, 'r') as f:\n tree = lxml.etree.parse(f)\n xmlDatabase = tree.getroot() \n \n if validate:\n dtd = lxml.etree.DTD(urllib.urlopen(self.dtd))\n if not dtd.validate(xmlDatabase):\n raise ValueError, dtd.error_log.filter_from_errors()\n \n tryAssign = generateTryAssign(xmlDatabase)\n \n database = torque.model.Database()\n database.name = tryAssign('name')\n database.interfaceName = tryAssign('interfaceName')\n database.description = tryAssign('description')\n \n # Tables, Columns, Indicies\n \n for xmlTable in xmlDatabase.iterchildren('table'):\n tryAssign = generateTryAssign(xmlTable)\n \n table = torque.model.Table()\n database.table.append(table)\n table.name = tryAssign('name')\n table.interfaceName = tryAssign('interfaceName')\n table.description = tryAssign('description')\n \n for xmlColumn in xmlTable.iterchildren('column'):\n tryAssign = generateTryAssign(xmlColumn)\n \n column = torque.model.Column()\n table.appendColumn(column)\n column.name = tryAssign('name')\n column.interfaceName = tryAssign('interfaceName')\n column.description = tryAssign('description')\n column.default = tryAssign('default')\n column.length = tryAssign('length')\n column.precision = tryAssign('precision')\n column.scale = tryAssign('scale')\n column.autoIncrement = tryAssign('autoIncrement', False)\n column.primaryKey = tryAssign('primaryKey', False)\n column.unique = tryAssign('unique', False)\n column.nullable = tryAssign('nullable', False)\n column.defaultIsNull = tryAssign('defaultIsNull', False)\n datatype = tryAssign('type')\n column.datatype = self._TextToTypeMap[datatype]\n \n for xmlIndex in xmlTable.iterchildren('index'):\n tryAssign = generateTryAssign(xmlIndex)\n \n index = torque.model.Index()\n table.index.append(index)\n index.name = tryAssign('name')\n index.description = tryAssign('description')\n index.unique = tryAssign('unique', False)\n index.ascendingOrder = tryAssign('ascendingOrder', False)\n \n for xmlIndexColumn in xmlIndex.iterchildren('index-column'):\n a = xmlIndexColumn.attrib\n indexColumn = [c for c in table.column \n if c.name == a['name']][0]\n index.column.append(indexColumn)\n \n # Foreign Keys\n \n for xmlTable in xmlDatabase.iterchildren('table'):\n restrictedTable = [i for i in database.table \n if i.name == xmlTable.attrib['name']][0]\n \n for xmlKey in xmlTable.iterchildren('foreign-key'):\n a = xmlKey.attrib\n \n referencedTable = [i for i in database.table\n if i.name == a['referencedTable']][0]\n \n foreignKey = torque.model.ForeignKey()\n restrictedTable.foreignKey.append(foreignKey)\n foreignKey.name = a['name']\n foreignKey.onDelete = self._TextToActionMap[a['onDelete']]\n foreignKey.onUpdate = self._TextToActionMap[a['onUpdate']]\n \n foreignKey.restrictedColumn = [i for i in restrictedTable.column\n if i.name == a['restrictedColumn']][0]\n foreignKey.referencedColumn = [i for i in referencedTable.column\n if i.name == a['referencedColumn']][0]\n \n return database", "title": "" }, { "docid": "6b7db9775bb8f7f931012e15a7645ef8", "score": "0.54112375", "text": "def _load_normalizers_db(self, load_normalizer_db):\n if load_normalizer_db:\n load_disease = load_therapy = load_gene = True\n else:\n load_disease = self._check_normalizer(\n DiseaseDatabase(), {src.value for src in DiseaseSources})\n load_therapy = self._check_normalizer(\n TherapyDatabase(), {src for src in TherapySources})\n load_gene = self._check_normalizer(\n GeneDatabase(), {src.value for src in GeneSources})\n\n for load_source, normalizer_cli in [\n (load_disease, DiseaseCLI), (load_therapy, TherapyCLI),\n (load_gene, GeneCLI)\n ]:\n name = \\\n str(normalizer_cli).split()[1].split('.')[0][1:].capitalize()\n self._update_normalizer_db(name, load_source, normalizer_cli)\n echo_info(\"Normalizers database loaded.\\n\")", "title": "" }, { "docid": "5bbef614f2736e544f76512aeebc7270", "score": "0.54110503", "text": "def load(self, filename):\n self.ldb = sa.listdb_load_from_file(filename)", "title": "" }, { "docid": "ac98b13058526b0aff2b0425f941e4b6", "score": "0.5410607", "text": "def load_all(self):\r\n\t\tself.load_aliases()\r\n\t\tself.all_assoc_for_respondents()\r\n\t\tself.all_assoc_for_patients()\r\n\t\tself.all_assoc_for_quotes()\r\n\t\tself.all_assoc_for_patient_quotes()\r\n\t\t##load_respondents_x_conditions()\r\n\t\t#self.load_patients_x_conditions()\r\n\t\tself.load_testimonials_x_conditions()\r\n\t\t#self.load_conditions()\r", "title": "" }, { "docid": "07f4ec8cfacfcbdccf2dc3ec5a3983ec", "score": "0.5409357", "text": "def test_get_rules(self):\n pass", "title": "" }, { "docid": "cae287e0d9b96140fee561babd162b78", "score": "0.539817", "text": "def read_rules(data):\n rules = []\n for line in data.splitlines():\n in_pattern, out_pattern = line.split(' => ')\n in_rule, out_rule = [], []\n for row in in_pattern.split('/'):\n in_rule.append(list(row))\n for row in out_pattern.split('/'):\n out_rule.append(list(row))\n rules.append((in_rule, out_rule))\n return rules", "title": "" }, { "docid": "5ec8c9780da97434e01260deebc12ffa", "score": "0.5396327", "text": "def load(self):\r\n for egg in self.eggs:\r\n egg.load(self.concepts)\r\n server.db.session.commit()", "title": "" }, { "docid": "a79ca9d2d6f417dcbbea2c0ad59a96fa", "score": "0.53844714", "text": "def list_rules(self):\n return self.rules", "title": "" }, { "docid": "eee9059bce1c3991befa0062d1385c61", "score": "0.537352", "text": "def __init__(self,rules_list):\n self.rules = []\n rnumber = 1\n for r in rules_list:\n try: \n rex = r[0]\n funct = r[1]\n except IndexError:\n raise LexicalError(rnumber,r)\n try: rec = re.compile(rex)\n except TypeError:\n raise LexicalRulesErrorRE(rex,rnumber)\n try:\n op,prec,assoc = r[2]\n if not self.__dict__.has_key(\"operators\"):\n self.operators = {}\n if not self.operators.has_key(op):\n self.operators[op] = (prec,assoc)\n except IndexError:\n pass\n self.rules.append((rec,funct))\n \n rnumber = rnumber + 1 \n if _DEBUG and self.__dict__.has_key(\"operators\"):\n print \"operators %s\" %self.operators", "title": "" }, { "docid": "af8cd477cb9778cd447c3157993a4915", "score": "0.53571475", "text": "def load_users_from_db(self):", "title": "" }, { "docid": "4af074e23def31ea90965a1ae2d8a3df", "score": "0.5341844", "text": "def load_routes():\n\n print \"Buses\"\n\n # Read the routes.text file to \n\n Bus.query.delete()\n for row in open(\"seed_data/routes.txt\"):\n row = row.rstrip()\n bus_code, city, bus_name, bus_lname = row.split(\",\")[:4]\n\n bus = Bus(bus_code=bus_code, city=city, bus_name=bus_name, bus_lname=bus_lname)\n print bus\n\n\n\n db.session.add(bus)\n\n db.session.commit()", "title": "" }, { "docid": "125153593449dffe6137cad2a1881fd0", "score": "0.5335667", "text": "def rules(self):\n return self.__rules", "title": "" }, { "docid": "a8a17c70671568817c9858cf531c1fba", "score": "0.5325333", "text": "def load(self):\n try:\n with open(settings.DB_PATH) as db_file:\n db_json = db_file.read()\n except IOError:\n # TODO: handle other file errors\n raise errors.PasswordFilePermission()\n\n try:\n db = json.loads(db_json)\n except ValueError:\n raise errors.PasswordFileNotJSON()\n\n # load to modules\n KEYMAN.load(db['key data'])\n TABLE.load(db['table'])", "title": "" }, { "docid": "5888ed70201d2472b8c0ddff3cab2ae6", "score": "0.532278", "text": "def hrzn_load_db():\n # Get a list of all the TXT files with Horizons data to be loaded\n runs = ['planets/daily', 'moons/daily', 'moons/weekly', 'asteroids/weekly']\n files = []\n for run in runs:\n files += sorted(glob.glob(f'../data/jpl/horizons/{run}/*.txt'))\n\n # Do all the database operations using just one DB connection\n with db_engine.connect() as conn:\n # Truncate the JPL.HorizonsImport table\n sql = \"truncate table JPL.HorizonsImport;\"\n conn.execute(sql)\n\n # Iterate through all the text files in order; load each one\n for fname_txt in tqdm(files):\n hrzn_txt2db(fname_txt, conn)", "title": "" }, { "docid": "37fd5c3a7703b86a630012bc0a384df2", "score": "0.5309598", "text": "def test_by_sql_file(self):\n udb = UnittestDB()\n metadata = udb.load_from_file('starting_db.yaml')\n connection = self.engine.connect()\n metadata.bind = connection\n metadata.drop_all()\n connection.close()\n\n udb = UnittestDB()\n metadata = udb.read_from_oracle('oracle.sql')\n# for table in metadata.sorted_tables:\n# print table.name\n# for col in table.columns:\n# print col\n metadata.bind = self.engine\n metadata.create_all()\n udb.fill_tables(metadata, 10)\n metadata.drop_all()", "title": "" }, { "docid": "123b342be94bb4f164e3539fe102e38c", "score": "0.53080535", "text": "def ReadRules(filestart):\n global Rule, Cat\n Rule = []\n Cat = []\n\n # Open the file\n filename = '%s.sc'%filestart\n with open(filename, 'rt') as f:\n for line in f.readlines():\n line = line.rstrip()\n if line[0] != '*':\n if '/' in line:\n Rule.append(line)\n elif '=' in line:\n Cat.append(line)\n\n if Cat:\n print \"%d categories found\"%len(Cat)\n\n if PRINT_RULES:\n for cat in Cat:\n print cat\n print\n else:\n print \"No rules were found.\"\n print\n\n if Rule:\n print \"%d rules found\"%len(Rule)\n\n if PRINT_RULES:\n for rule in Rule:\n print rule\n\n print\n else:\n print \"No rules were found.\\n\"\n\n return len(Rule)", "title": "" }, { "docid": "5e9d2fcbbc4b8c7acd2af34cb9d6bd14", "score": "0.5305386", "text": "def load_category_table():\n categories = ['Soccer',\n 'Basketball',\n 'Baseball',\n 'Frisbee',\n 'Snowboarding',\n 'Rock Climbing',\n 'Football',\n 'Skating',\n 'Hockey']\n\n for category in categories:\n db.session.add(Category(name=category))\n db.session.commit()", "title": "" }, { "docid": "7ec4e928f548a6b1526035daac4b2434", "score": "0.5304789", "text": "def load_file_db():\n\n return 0", "title": "" }, { "docid": "659b8584ea472bf181ca418abe1344b0", "score": "0.5293496", "text": "def read_input_file():\n print input_file_name\n with open(input_file_name, 'r') as fh: \n for line in fh:\n rules.append(line.strip())", "title": "" }, { "docid": "0b9554ccb6f4c306457e9c126089b17c", "score": "0.52831346", "text": "def get_rules(datadir=None):\n if not datadir:\n datadir = Path.cwd()\n rulefile_chain = _find_rulefile_chain(datadir)\n rule_component_lists_aggregated = []\n for rulefile in rulefile_chain:\n rule_component_lists = _read_components_from_rulefile(rulefile)\n rule_component_lists_aggregated.extend(rule_component_lists)\n rules = _get_ruleobjs_from_components(rule_component_lists_aggregated)\n return rules", "title": "" }, { "docid": "d080c5a50f8a0a4946e3d29afc73de4d", "score": "0.52779454", "text": "def induce_rules(self):\n \n # induce the entire rules\n rules = None\n for tree in tqdm(self.listTrees):\n if rules:\n rules.extend(self.induce_rules_from_a_tree(tree))\n else:\n rules = self.induce_rules_from_a_tree(tree)\n \n # cast the induced rules into DataFrame\n for r in tqdm(rules):\n index = r.pop('index')\n pdRule = pd.DataFrame(index=index)\n for k in r.keys():\n pdRule[k]=r[k]\n if 'child' in k: # set a child to None if it is not visited next\n for i in range(len(index)-1):\n if index[i+1]!=pdRule.loc[index[i],k]:\n pdRule.loc[index[i],k] = None\n \n self.rules.append(pdRule)\n \n return self.rules", "title": "" }, { "docid": "530d8b72fcf958d90c6cad29fadc3f3f", "score": "0.52724624", "text": "def _refreshRuleList(self):\n self.rules.clean()\n command = \"ip -6 rule list\"\n cmdOut = externalCommand(command, False)\n out, err = cmdOut.communicate()\n if cmdOut.returncode != 0:\n raise FailedRoutingCommand(f\"Failed to get rule list. Out: {out}, Err: {err}\")\n for line in out.split(b'\\n'):\n match = re.match(br'(\\d+):[ \\t]+from ([^ \\t]+) lookup ([^ \\t]+)$', line)\n if match:\n matched = match.groups()\n self.rules.add_rule(matched[0], matched[1], None, matched[2])\n continue\n match = re.match(br'(\\d+):[ \\t]+from ([^ \\t]+) to ([^ \\t]+) lookup ([^ \\t]+)', line)\n if match:\n matched = match.groups()\n self.rules.add_rule(matched[0], matched[1], matched[2], matched[3])", "title": "" }, { "docid": "a0a29e7f190e35d62eab4f714a98dcdb", "score": "0.526849", "text": "def load(self):\n\n # Prevent loading the same grammar multiple times.\n if self._loaded: return\n self._log_load.debug(\"Grammar %s: loading.\" % self._name)\n\n self._engine.load_natlink_grammar(self, all_results=True)\n self._loaded = True\n self._in_context = False\n\n # Update all lists loaded in this grammar.\n for rule in self._rules:\n if rule.active != False:\n rule.activate()\n # Update all lists loaded in this grammar.\n for lst in self._lists:\n lst._update()", "title": "" }, { "docid": "76101550f502b25d40889aaa5c4d886e", "score": "0.5267323", "text": "def __init__(self, rules):\n self._rules = {}\n for uri, app in rules.items():\n self._rules[uri] = {'app': app, 'regex': re.compile(uri)}", "title": "" }, { "docid": "1e7ae8f2c7c525657fb25d61887120e5", "score": "0.52592474", "text": "def rules(self):\n return list(self._rules)", "title": "" }, { "docid": "2cdc9928be37ec652a5a70764bdc6cca", "score": "0.52572125", "text": "def load_rescues():\n\n #print \"Rescues\"\n\n for row in open('seed_data/u.rescue'):\n row = row.rstrip()\n rescue_id, name, phone, address, email = row.split(\"|\")\n\n rescue = Rescue(name=name,\n phone=phone,\n address=address,\n email=email)\n\n db.session.add(rescue)\n db.session.commit()", "title": "" }, { "docid": "d80162644cee4819642e7abf9819a1e3", "score": "0.5249263", "text": "def do_load(self,c):\n c = c.split()\n if len(c) >1:\n utils.newline_msg(\"ERR\", \"only one db can be loaded at a time\", 2)\n return\n ret = self.get_db_from_cmdline(c[0])\n if ret:\n self.current_param_db = ret \n print \" --- loaded db '%s'\"%self.shorten_name( self.current_param_db.full_name )\n else: \n utils.newline_msg(\"ERR\", \"db does not exist\", 2)", "title": "" }, { "docid": "bbe8e9121ccfdab4bf1229be1f06cdf5", "score": "0.52428734", "text": "def load(self, path):\n Database.load(self, path, local_context={'SoluteData': SoluteData}, global_context={})", "title": "" }, { "docid": "82b8b05f4ae58f8d7767c287107ec3ff", "score": "0.5237872", "text": "def rules(cls):\n rules_Base = {\"data_path\": {\"type\": (str, None)},\n \"batch_size\": {\"type\": int},\n \"num_workers\": {\"type\": int},\n \"shuffle\": {\"type\": bool},\n \"distributed\": {\"type\": bool},\n \"download\": {\"type\": bool},\n \"pin_memory\": {\"type\": bool},\n \"drop_last\": {\"type\": bool},\n \"transforms\": {\"type\": list},\n }\n return rules_Base", "title": "" } ]
1bf5a427d9751c3d3ea5fc7078d6316d
Initial method for PFEMElementCompressible
[ { "docid": "1c4bd45509485b8c3ba8ce1f0be94cbf", "score": "0.0", "text": "def __init__(self, osi, ele_nodes, rho, mu, b1, b2, thickness, kappa):\n self.osi = osi\n self.ele_node_tags = [x.tag for x in ele_nodes]\n self.ele_nodes = ele_nodes\n self.rho = float(rho)\n self.mu = float(mu)\n self.b1 = float(b1)\n self.b2 = float(b2)\n self.thickness = float(thickness)\n self.kappa = float(kappa)\n osi.n_ele += 1\n self._tag = osi.n_ele\n self._parameters = [self.op_type, self._tag, *self.ele_node_tags, self.rho, self.mu, self.b1, self.b2, self.thickness, self.kappa]\n self.to_process(osi)", "title": "" } ]
[ { "docid": "44a31e94dea9ad3f6f0a37de5a8aeb34", "score": "0.6386662", "text": "def compress(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "6f4c1934e03e72884567ccec72b36dec", "score": "0.6303826", "text": "def TransformCompress(deck, entity):", "title": "" }, { "docid": "fbc9ce463e1d2e4be6340c256e8159ad", "score": "0.61409944", "text": "def compress(self):\r\n self.compressed = True", "title": "" }, { "docid": "46830a35ec38b78b2f323f959a9e6266", "score": "0.60635996", "text": "def compress(self):\n self.compressed=None\n return self.compressed", "title": "" }, { "docid": "f0ada3f118f65adf415515aff24af29d", "score": "0.60619026", "text": "def Compress(fields, deck):", "title": "" }, { "docid": "91dd3d81b149eb56f1f41f6fd8629bf1", "score": "0.5940891", "text": "def compress(self):\n self.unit = compress(self.unit)\n return self", "title": "" }, { "docid": "ac0d9e6a21a2baa0b3e100b9f14d7e88", "score": "0.5903741", "text": "def _pfp__pack(self):\n\t\tpass", "title": "" }, { "docid": "a7cfa17f073d92da11807a65fc0e6415", "score": "0.5770782", "text": "def compressionAlgorithmValidity(self):", "title": "" }, { "docid": "0252428f0aee38236987fadf0b45e5be", "score": "0.5709427", "text": "def compress(self, condition, axis=None, out=None):\n return None", "title": "" }, { "docid": "e25e392198cdf9353b2ad097a5e47d06", "score": "0.5705086", "text": "def compress(self, z, parent=None):\n raise NotImplementedError()", "title": "" }, { "docid": "515f40e0cec1d7e65d6dda561e4cd11a", "score": "0.56728274", "text": "def Pack(self):\n pass", "title": "" }, { "docid": "fd3d5f0f16429e8e1f7a3a7eaad34d32", "score": "0.56283116", "text": "def compress(parsed, context, token):\r\n return parsed", "title": "" }, { "docid": "b712f4efccb49a5342d70d91936ff6d6", "score": "0.5591279", "text": "def compress(self, condition, axis=None, out=None):\n return None", "title": "" }, { "docid": "b712f4efccb49a5342d70d91936ff6d6", "score": "0.5591279", "text": "def compress(self, condition, axis=None, out=None):\n return None", "title": "" }, { "docid": "b712f4efccb49a5342d70d91936ff6d6", "score": "0.5591279", "text": "def compress(self, condition, axis=None, out=None):\n return None", "title": "" }, { "docid": "b712f4efccb49a5342d70d91936ff6d6", "score": "0.5588903", "text": "def compress(self, condition, axis=None, out=None):\n return None", "title": "" }, { "docid": "b712f4efccb49a5342d70d91936ff6d6", "score": "0.5588903", "text": "def compress(self, condition, axis=None, out=None):\n return None", "title": "" }, { "docid": "31e1dd15005c627d9c355097bd242a47", "score": "0.55197054", "text": "def CompressTree(tree):\t\r\n\r\n\tif tree is not None:\r\n\t\ttree.recursiveCompression(None)\r\n\treturn tree", "title": "" }, { "docid": "ba04a790689e684b283a5e5c4e669e39", "score": "0.54829323", "text": "def _pack_data(self):\n return NotImplementedError", "title": "" }, { "docid": "7a2739598d0308582e65c31479cda5f4", "score": "0.5481345", "text": "def compress(self):\n if not self.value:\n return self\n q = []\n curr = self.value[0]\n for i in range(1,len(self.value)):\n if positively_parallel_weights(curr,self.value[i]):\n curr = curr + self.value[i]\n else:\n q.append(curr)\n curr = self.value[i]\n q.append(curr)\n return self.parent()(tuple(q))", "title": "" }, { "docid": "3edfb32d57a887263439dd16faf5f325", "score": "0.5479268", "text": "def compression ( self ) :\n return self.__compression", "title": "" }, { "docid": "7f770bf205877c1e9c5c25fbe3d7b8b5", "score": "0.5458809", "text": "def packs(self):\n pass", "title": "" }, { "docid": "bf09e352b10e14a9bc6f089e84bd85b6", "score": "0.54197747", "text": "def test_compressed(self):\r\n try:\r\n import zlib\r\n except ImportError:\r\n self.skipTest('zlib is missing')\r\n\r\n ba = amf3.ByteArray()\r\n\r\n self.assertFalse(ba.compressed)\r\n\r\n z = zlib.compress('b' * 100)\r\n ba = amf3.ByteArray(z)\r\n\r\n self.assertTrue(ba.compressed)\r\n\r\n z = zlib.compress('\\x00' * 100)\r\n ba = amf3.ByteArray(z)\r\n\r\n self.assertTrue(ba.compressed)", "title": "" }, { "docid": "4e1290eb46f298d1b652067ee8b695d1", "score": "0.53986704", "text": "def pack(self, x):\n pass # pragma: no cover", "title": "" }, { "docid": "dd2a973516831e277a95c304a8add3af", "score": "0.5398055", "text": "def compressor(__gas_composition, __pv1, __tv1, __mv1, __piv, __etav, __menext=0):\n\n m_eq = Symbol('m_eq')\n\n md = __menext / 100\n\n eq1 = sym.Eq((__mv1 / m_eq - 1), md)\n\n mv1_eq = solve(eq1, m_eq)[0]\n\n gas_phase = gas_object(__gas_composition, __tv1, __pv1)\n\n compressor_inlet_bulk = ct.Quantity(gas_phase, mass=__mv1)\n\n compressor_outlet_bulk = ct.Quantity(gas_phase, mass=__mv1)\n\n pv2 = __piv * __pv1\n sv1 = compressor_outlet_bulk.phase.s\n hv1 = compressor_outlet_bulk.phase.h\n\n compressor_outlet_bulk.SP = sv1, pv2\n tv2_is = compressor_outlet_bulk.phase.T\n hv2_is = compressor_outlet_bulk.phase.h\n hv2 = (hv2_is - hv1) / __etav + hv1\n\n compressor_outlet_bulk.HP = hv2, pv2\n tv2 = compressor_outlet_bulk.phase.T\n compressor_power = - __mv1 * (hv2 - hv1) / 1000\n compressor_power_equivalent = - mv1_eq * (hv2 - hv1) / 1000\n\n bulk_vector = [compressor_inlet_bulk, compressor_outlet_bulk]\n\n attribute_vector = [__mv1, mv1_eq, __pv1, pv2, __piv, __tv1, tv2, tv2_is, compressor_power, compressor_power_equivalent]\n\n df1 = create_state_dataframe(compressor_inlet_bulk, \"Compressor Inlet\")\n df2 = create_state_dataframe(compressor_outlet_bulk, \"Compressor Outlet\")\n gas_properties = pd.concat([df1, df2], axis=1)\n\n return bulk_vector, attribute_vector, gas_properties", "title": "" }, { "docid": "a034330e2b4322094640924eba6cebc4", "score": "0.5395532", "text": "def compressSH(self, X, SHparam):\n\n if X.ndim == 1:\n X = X.reshape((1, -1))\n\n Nsamples, Ndim = X.shape\n nbits = SHparam['nbits']\n\n X = X.dot(SHparam['pc'])\n X = X - SHparam['mn'].reshape((1, -1))\n omega0 = np.pi / (SHparam['mx'] - SHparam['mn'])\n omegas = SHparam['modes'] * omega0.reshape((1, -1))\n\n U = np.zeros((Nsamples, nbits))\n for i in range(nbits):\n omegai = omegas[i, :]\n ys = np.sin(X * omegai + np.pi/2)\n yi = np.prod(ys, 1)\n U[:, i] = yi\n\n b = np.require(U > 0, dtype=np.int)\n #B = compactbit(b)\n return b, U", "title": "" }, { "docid": "42a045600a74da1d25bbf6a6ab52d417", "score": "0.5355002", "text": "def compress(items, flags):\n return list(it.compress(items, flags))", "title": "" }, { "docid": "689b2f6bb0fb0fb375b6ce679e4ed893", "score": "0.53339994", "text": "def _compress(data: NamedTag) -> bytes:\n data = data.save_to(compressed=False)\n return b\"\\x02\" + zlib.compress(data)", "title": "" }, { "docid": "87280128992c52afa7edec3bf39da818", "score": "0.52913034", "text": "def transform(self):", "title": "" }, { "docid": "cb9e577a88f430667f46253e23d8c5fb", "score": "0.5279791", "text": "def compress(self, chunk):\n return _compress(self._context, chunk)", "title": "" }, { "docid": "0e4e1241a55ab35e3278b31c05c78dd2", "score": "0.52240896", "text": "def prepare_compressor_(self):\n\n # make sure that the coder is available\n self.undo_pickable_()\n\n # mae sure that the parameters for compressing are available\n self.update(force=True)", "title": "" }, { "docid": "6127bd8174d1d66f670ea6b7950ee838", "score": "0.52214175", "text": "def compress(self, grid) :\n gi = self.get_grid_indices() \n\n v=grid[gi]\n v = self.mask_vec(v)\n\n return v", "title": "" }, { "docid": "9a2849d87e201cd95ca8eb46704dfc5c", "score": "0.5214988", "text": "def _encode(self):\n _code = []\n _compror = []\n\n if not self.comp_attributes['compror']:\n j = 0\n else:\n j = self.comp_attributes['compror'][-1][0]\n\n i = j\n while j < self.statistics['n_states'] - 1:\n while self._condition(i, j):\n i += 1\n if i == j:\n i += 1\n _code.append([0, i])\n _compror.append([i, 0])\n else:\n _code.append(\n [i - j, self.basic_attributes['sfx'][i] - i + j + 1])\n _compror.append([i, i - j])\n j = i\n return _code, _compror", "title": "" }, { "docid": "bd4ff5cb424832573f9c30a7f96be454", "score": "0.51341116", "text": "def encode(self, msg):\n\n compressed_msg = _compressor(pickle.dumps(msg, protocol=settings.PICKLE_PROTOCOL))\n compressed_msg = crypt.encrypt(compressed_msg)\n # Check compressed task size.\n if len(compressed_msg) >= _task_size_limit:\n task_id = msg.get('task_id')\n raise exceptions.ChubbyTaskException(\n 'Task %s is over the limit of %d bytes.' % (task_id,\n _task_size_limit))\n\n return compressed_msg", "title": "" }, { "docid": "78de2c7e7c5b83a902fe0ff9ab770580", "score": "0.5133051", "text": "def fmri_compression(data, labels, n_clusters):\n labels = _check_parcelation_results(labels, n_clusters)\n fmri_reduced = fmri_reduction(data, labels)\n fmri_compressed = np.array(fmri_reduced.T[labels])\n return fmri_compressed", "title": "" }, { "docid": "bc2efc3413edb4bd690c792813000fea", "score": "0.51240623", "text": "def compress_message(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"compress_message\")", "title": "" }, { "docid": "7ff43d3552a3b7165b49cabbdb51565c", "score": "0.51052815", "text": "def process(self):\n logger.info('Encoder processing')\n self._encode()", "title": "" }, { "docid": "884cc2dfdae25f629930422770899c96", "score": "0.51008457", "text": "def perform_compression(task_id):\n raise NotImplementedError", "title": "" }, { "docid": "ee9d130fab1feae0dc17187cfa91f5f9", "score": "0.5083107", "text": "def _compress_content(self, content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n content.file = zbuf\n content.seek(0)\n return content", "title": "" }, { "docid": "1d06519e1c1571d457c0ca3d8f5b4975", "score": "0.5079332", "text": "def transform(\n self,\n data_pack: 'mz.DataPack',\n verbose: int = 1\n ) -> 'mz.DataPack':", "title": "" }, { "docid": "3bdc9fdc0cf99fc61edb1a6df894843f", "score": "0.5055561", "text": "def CompressUserDefinedAttributes():", "title": "" }, { "docid": "b518d7a0b99ca959c9836dafc554b1f6", "score": "0.50422466", "text": "def _pack(self) -> None:\n\n body = self._body()\n\n # Prepend last 3 bits of previous odd byte so we can correctly\n # decode the effective colours at the beginning of the 22-bit tuple\n prev_col = np.roll(body, 1, axis=1).astype(np.uint64)\n header = self._make_header(prev_col)\n # Don't leak header across page boundaries\n header[:, 0] = 0\n\n # Append first 3 bits of next even byte so we can correctly\n # decode the effective colours at the end of the 22-bit tuple\n next_col = np.roll(body, -1, axis=1).astype(np.uint64)\n footer = self._make_footer(next_col)\n # Don't leak footer across page boundaries\n footer[:, -1] = 0\n\n self.packed = header ^ body ^ footer", "title": "" }, { "docid": "5bd368c573a300af6d14532900e6d663", "score": "0.50399965", "text": "def byte_(self):", "title": "" }, { "docid": "30febac25b18359459aa36faab7d772f", "score": "0.503313", "text": "def compact(self) -> None:\n ...", "title": "" }, { "docid": "aaaf953a3d1f622ab228448456ccbbe4", "score": "0.5031542", "text": "def pack_data(self):\n raise NotImplementedError", "title": "" }, { "docid": "9672ca559a82aed8ea1fd658fb29dd05", "score": "0.50293195", "text": "def trial_execute_compress(cls):\n pass", "title": "" }, { "docid": "a8cdcdbcd1dcc032d8f11985acc5be26", "score": "0.50251526", "text": "def save(self, *args, **kwargs):\n if self.picture != self._original_picture:\n new_picture = self.compress(self.picture)\n self.compressed_picture = new_picture\n super().save(*args, **kwargs)", "title": "" }, { "docid": "61c33a1c4699ac4878e27c8bec1fd1e3", "score": "0.50197196", "text": "def compressed(x):\n return asanyarray(x).compressed()", "title": "" }, { "docid": "ffc7581b7ae936e1735dbe87045d3b1d", "score": "0.5018791", "text": "def test_repr_compress(self):\n self.assertEqual(\n \"<Message v0 CODEC_GZIP value=None>\",\n repr(Message(0, CODEC_GZIP, None, None)),\n )\n self.assertEqual(\n \"<Message v0 CODEC_SNAPPY value=None>\",\n repr(Message(0, CODEC_SNAPPY, None, None)),\n )\n self.assertEqual(\"<Message v0 CODEC_LZ4 value=None>\", repr(Message(0, CODEC_LZ4, None, None)))", "title": "" }, { "docid": "9614b5e5c9375b554d556fea3250ea30", "score": "0.5003834", "text": "def compress_image(self, original, image, imtype, original_shape):\n # Finds the best DWT Compression that Fits\n DWT_result = None\n DWT_thresh = 127 #XXX\n DWT_result_size = TRANS_SIZE - 1\n thresh_increment = -1\n\n while DWT_result_size < TRANS_SIZE and DWT_thresh > 0: #max values for int8\n DWT_result = haar_downsample(image, DWT_thresh)\n DWT_result_encode = rle_encode(DWT_result)\n DWT_result_size = DWT_result_encode.size\n print('DWT Threshold: ' + str(DWT_thresh) + \" DWT_Size: \" + str(DWT_result_size))\n if DWT_result_size > TRANS_SIZE and DWT_thresh == 127:\n DWT_result = None\n break;\n if DWT_result_size > TRANS_SIZE:\n DWT_thresh += 1\n DWT_result = haar_downsample(image, DWT_thresh)\n DWT_result_encode = rle_encode(DWT_result)\n DWT_result_size = DWT_result_encode.size\n break;\n #thresh_increment = (DWT_result_size - TRANS_SIZE) // 1000\n #thresh_increment = 1 if thresh_increment < 1 else thresh_increment\n DWT_thresh += thresh_increment\n\n\n # Finds the best DCT Compression that Fits\n DCT_result = None\n DCT_thresh = 127 #XXX\n DCT_result_size = TRANS_SIZE - 1\n thresh_increment = -1\n\n while DCT_result_size < TRANS_SIZE and DCT_thresh > 0: #max values for int8\n DCT_result = dct_downsample(image, DCT_thresh)\n DCT_result_encode = rle_encode(DCT_result)\n DCT_result_size = DCT_result_encode.size\n print('DCT Threshold: ' + str(DCT_thresh) + \" DCT_Size: \" + str(DCT_result_size))\n if DCT_result_size > TRANS_SIZE and DCT_thresh == 127:\n DCT_result = None\n break;\n if DCT_result_size > TRANS_SIZE:\n DCT_thresh += 1\n DCT_result = dct_downsample(image, DCT_thresh)\n DCT_result_encode = rle_encode(DCT_result)\n DCT_result_size = DCT_result_encode.size\n break;\n #thresh_increment = (DCT_result_size - TRANS_SIZE) // 5000\n #thresh_increment = 1 if thresh_increment < 1 else thresh_increment\n DCT_thresh += thresh_increment\n\n # Finds the best DECIMATE Compression that Fits\n DECIMATE_result = decimate_downsample(image, imtype)\n #RLE_result = self.run_length_encoding_compression(image)\n NO_COMPRESSION_result = None\n if image.size < TRANS_SIZE:\n NO_COMPRESSION_result = image\n\n DWT_PSNR = 0.0\n if DWT_result != None:\n DWT_PSNR = PSNR(original, haar_upsample(DWT_result, original_shape, imtype))\n DCT_PSNR = 0.0\n if DCT_result != None:\n DCT_PSNR = PSNR(original, dct_upsample(DCT_result, original_shape, imtype))\n DECIMATE_PSNR = PSNR(original, decimate_upsample(DECIMATE_result, imtype, original_shape))\n #RLE_PSNR = PSNR(image, self.run_length_encoding_decompression(RLE_result, imtype))\n NO_COMPRESSION_PSNR = PSNR(original, NO_COMPRESSION_result)\n\n psnr_arr = np.array([DWT_PSNR, DCT_PSNR, DECIMATE_PSNR, NO_COMPRESSION_PSNR])\n print(\"PSNR VALUES: w, c, deci, none \", psnr_arr)\n\n result_arr = [({'values' : DWT_result_encode, 'shape' : original_shape}, DWT), \\\n ({'values' : DCT_result_encode, 'shape' : original_shape}, DCT), \\\n ({'values' : DECIMATE_result, 'shape': original_shape}, DECIMATE), \\\n ({'values' : image, 'shape': original_shape}, NO_COMPRESSION)]\n\n return result_arr[np.argmax(psnr_arr)]", "title": "" }, { "docid": "feb4d1b8e7d0563f8972352a67e164a1", "score": "0.5003405", "text": "def CompressMaterials(DECK, MATERIAL_MATRIX, COMPARE_BY_NAMES, COMPARE_BY_VALUES, DELETE_DUPLICATED):", "title": "" }, { "docid": "3af57e95af25e5f939446432ffa26a60", "score": "0.4996571", "text": "def _tocomplex(self, arr):\n return None", "title": "" }, { "docid": "1a35e8f0983a5dcfb0343a210ece6af6", "score": "0.49788636", "text": "def compress(condition, a=None, axis=None, out=None):\n return ndarray()", "title": "" }, { "docid": "482679fb443f32595ff1d8df133512a7", "score": "0.49768326", "text": "def compress_layer(raw_state, phys_dim, threshold, compressed_state=0, plot=0):\n if compressed_state == 0:\n compressed_state = init.initialize_random_MPS_with_changing_phys_dim(phys_dim,\n num_sites=len(raw_state),\n bond_dim=1)\n bond_dim_raw_state = raw_state[math.ceil(len(raw_state)/2)].shape[0]\n max_bond_dim = 1\n\n # Initialize accuracy metrics\n dist = [] # Frobenius norm\n sim = [] # Cosine similarity (Scalar product)\n dist.append(metrics.overlap(compressed_state, raw_state))\n sim.append(metrics.scalar_product(compressed_state, raw_state))\n best_dist = []\n best_sim = []\n compressions = []\n # We sweep left to right and then back right to left across the mixed state\n while True:\n # Left->right sweep\n for site in range(0, len(raw_state)-1):\n compressed_state[site], compressed_state[site+1] = comp.update_site(compressed_state, raw_state,\n site=site, dir='right')\n # Right->left sweep\n for site in range(len(raw_state)-1, 0, -1):\n compressed_state[site], compressed_state[site-1] = comp.update_site(compressed_state, raw_state,\n site=site, dir='left')\n\n # Metrics taken after each sweep\n dist.append(metrics.overlap(compressed_state, raw_state))\n sim.append(metrics.similarity(compressed_state, raw_state))\n # Check if sweeps are still working\n if np.abs(dist[-2]-dist[-1]) < threshold:\n # Normalize to maintain length and update metrics\n #compressed_state, _ = can.left_normalize(compressed_state)\n best_dist.append(dist[-1])\n best_sim.append(sim[-1])\n print(\"Sim:\", best_sim[-1], \"Dist:\", best_dist[-1], \"BondDim:\", max_bond_dim)\n compressions.append(compressed_state[:])\n\n # Break if we cannot increase bond dimension anymore\n if max_bond_dim+1 == bond_dim_raw_state:\n break\n\n # Break if changing bond dimension did not do enough\n if len(best_dist) > 1 and np.abs(best_dist[-2]-best_dist[-1] < threshold):\n break\n\n # Update each tensor by increasing bond dimension\n for i, tensor in enumerate(compressed_state):\n if tensor.ndim == 2:\n new_tensor = np.zeros((tensor.shape[0], tensor.shape[1]+1))\n new_tensor[:tensor.shape[0], :tensor.shape[1]] = tensor\n compressed_state[i] = new_tensor\n\n elif tensor.ndim == 3:\n new_tensor = np.zeros((tensor.shape[0]+1, tensor.shape[1]+1, tensor.shape[2]))\n new_tensor[:tensor.shape[0], :tensor.shape[1], :tensor.shape[2]] = tensor\n compressed_state[i] = new_tensor\n max_bond_dim = compressed_state[math.ceil(len(compressed_state)/2)].shape[0]\n\n if plot == 1:\n max_bond_dim = range(1, len(best_dist)+1)\n fig, ax1 = plt.subplots()\n\n color = 'tab:blue'\n ax1.set_xlabel('Compressed Dimension')\n ax1.set_ylabel('Cosine Similarity', color=color)\n ax1.plot(max_bond_dim, best_sim, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx()\n color = 'tab:red'\n ax2.set_ylabel('Euclidean Distance', color=color)\n ax2.plot(max_bond_dim, best_dist, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n plt.title('Metrics vs. Compressed Dimension')\n\n fig.tight_layout()\n plt.show()\n\n plt.figure()\n plt.title(\"Cosine Similarity vs. Euclidean Distance\")\n plt.xlabel(\"Euclidean Distance\")\n plt.ylabel(\"Cosine Similarity\")\n\n plt.plot(best_dist, best_sim)\n\n return compressions, best_dist, best_sim", "title": "" }, { "docid": "e22f0848e287768ea1fdffe77a36b223", "score": "0.4972513", "text": "def transform(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "2593796a5c1b35f40e4a8d0e81ef4501", "score": "0.49659356", "text": "def compress_replace_branch(self, compress_new):\n self.compress = copy.deepcopy(compress_new)\n if type(self.compress) is list or type(self.compress) is tuple:\n self.compress = nn.ModuleList(self.compress)", "title": "" }, { "docid": "fbd787faa5ee1349665c810c5a4010ed", "score": "0.49254817", "text": "def test_compression(tmp_path):\n\n uncompressed = (\n pe.Node(\n SimpleShowMaskRPT(\n generate_report=True,\n background_file=str(\n get_template(\"OASIS30ANTs\", resolution=1, desc=None, suffix=\"T1w\")\n ),\n mask_file=str(\n get_template(\n \"OASIS30ANTs\",\n resolution=1,\n desc=\"BrainCerebellumRegistration\",\n suffix=\"mask\",\n )\n ),\n compress_report=False,\n ),\n name=\"uncompressed\",\n base_dir=str(tmp_path),\n )\n .run()\n .outputs.out_report\n )\n\n compressed = (\n pe.Node(\n SimpleShowMaskRPT(\n generate_report=True,\n background_file=str(\n get_template(\"OASIS30ANTs\", resolution=1, desc=None, suffix=\"T1w\")\n ),\n mask_file=str(\n get_template(\n \"OASIS30ANTs\",\n resolution=1,\n desc=\"BrainCerebellumRegistration\",\n suffix=\"mask\",\n )\n ),\n compress_report=True,\n ),\n name=\"compressed\",\n base_dir=str(tmp_path),\n )\n .run()\n .outputs.out_report\n )\n\n size = int(os.stat(uncompressed).st_size)\n size_compress = int(os.stat(compressed).st_size)\n assert size >= size_compress, (\n \"The uncompressed report is smaller (%d)\"\n \"than the compressed report (%d)\" % (size, size_compress)\n )", "title": "" }, { "docid": "292981900034edf4e8773aeb0cd6d72a", "score": "0.49061888", "text": "def compress(parser, token):\r\n from compressor.templatetags.compress import compress\r\n return compress(parser, token)", "title": "" }, { "docid": "4874fac772200b4e6e0af72e9ce473b7", "score": "0.48848227", "text": "def processed(self):", "title": "" }, { "docid": "42c43db5b62ebac1128a879f8d7a27ff", "score": "0.48826417", "text": "def _compress_fragments(self, fragments):\n compressed_fragments = [zlib.compress(fragment) for fragment in fragments]\n return compressed_fragments", "title": "" }, { "docid": "61774aa9d52175a307b81d68e17157ae", "score": "0.48791492", "text": "def __reduce_ex__(self, proto): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "785dd747d8586347022e2de2a1173492", "score": "0.48764634", "text": "def pack(self):\r\n\r\n func = \"packLcpPolicyMleLegacyElement\"\r\n print(\"%s packing MLE hdr\" % (func)) # DBGDBG\r\n\r\n # Initialize to None to check for supported HashAlg.\r\n hashAlgName = None\r\n try:\r\n # reverse lookup of the hash algorithm name(key) for the given HashAlg value\r\n hashAlgName = (key for key,val in DEFINES.TPM_ALG_HASH.items() if (val == self.HashAlg)).next()\r\n hashAlgName = 'SHA1'\r\n except StopIteration:\r\n print (\"MLE elements with unsupported hash algorithm, aborting build\")\r\n print(\"%s - build failed, see status bar\" % (func)) # DBGDBG\r\n return\r\n \r\n if(self.HashAlg != DEFINES.TPM_ALG_HASH['SHA1_LEGACY']):\r\n #elementFileFormat = MleDataSha256HashFormatString\r\n print (\"MLE Legacy elements only support SHA1, aborting build\")\r\n return\r\n\r\n # pack the element based on its type and return the binary string\r\n elementData = pack( self.MleDataFormatString,\r\n self.ElementSize, self.ElementType,\r\n self.PolEltControl, self.SINITMinVersion,\r\n self.HashAlg, self.NumHashes)\r\n print(\"%s PolEltControl=%d, SINITMinVersion=%d\" %(func, self.PolEltControl, self.SINITMinVersion)) # DBGDBG\r\n \r\n fileCnt = 0\r\n while(fileCnt < self.NumHashes):\r\n print(\"%s packing MLE hash %d\" % (func, fileCnt)) # DBGDBG\r\n elementData += self.packHash(hashAlgName, self.Hashes[fileCnt])\r\n fileCnt += 1\r\n\r\n return( elementData )", "title": "" }, { "docid": "1e44df6ab70620b4260167d83269b0dd", "score": "0.48668435", "text": "def pack(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b83e16ca2748511674a00034b8e89a2f", "score": "0.48633954", "text": "def test_compress(row_data_ishan: List, ishan: Entity) -> None:\n reference_tensor = REPT(rows=row_data_ishan)\n\n output = reference_tensor.compress([0, 1])\n\n target_output = list()\n for row in row_data_ishan:\n assert row.entity == ishan\n new_row = row.compress([0, 1])\n assert new_row.entity == ishan\n target_output.append(new_row)\n\n for result, target in zip(output, target_output):\n assert isinstance(result.child, SEPT)\n assert isinstance(target, SEPT)\n assert result.child == target, \"Compress operation failed\"", "title": "" }, { "docid": "2bfa30525189a83300fd993a63da8073", "score": "0.48446357", "text": "def Massless():", "title": "" }, { "docid": "e443f179011d211b9c0992eddf02451c", "score": "0.48443186", "text": "def preprocess(self, x):", "title": "" }, { "docid": "33c5bf08552499629f544479ca29ec2d", "score": "0.48322847", "text": "def decomposed(self):\n ...", "title": "" }, { "docid": "843f2bca4dd8034b4cf185adb5216e52", "score": "0.4826001", "text": "def semantic_compression(alert_dict, schema_map) -> dict:\n\n source = alert_dict[schema_map[\"source\"]]\n\n source_dict = {\n \"jd\": source[\"jd\"],\n \"sourceId\": source[schema_map[\"sourceId\"]],\n \"ra\": source[\"ra\"],\n \"dec\": source[\"dec\"],\n # for classify_snn\n \"mag\": source[schema_map[\"mag\"]],\n \"magerr\": source[schema_map[\"magerr\"]],\n \"magzp\": source[schema_map[\"magzp\"]],\n \"magzpsciunc\": source[\"magzpsciunc\"],\n \"diffmaglim\": source[\"diffmaglim\"],\n # for tag\n \"isdiffpos\": source[\"isdiffpos\"],\n \"rb\": source[\"rb\"],\n \"drb\": source[\"drb\"],\n \"nbad\": source[\"nbad\"],\n \"fwhm\": source[\"fwhm\"],\n \"elong\": source[\"elong\"],\n \"magdiff\": source[\"magdiff\"],\n \"filter\": source[schema_map[\"filter\"]],\n }\n\n access_prev = alert_dict[schema_map[\"prvSources\"]]\n\n prev_sources = []\n\n for prv_s in access_prev:\n\n prev_source_dict = {\n \"jd\": prv_s[\"jd\"],\n \"sourceId\": prv_s[schema_map[\"sourceId\"]],\n \"ra\": prv_s[\"ra\"],\n \"dec\": prv_s[\"dec\"],\n # for classify_snn\n \"mag\": prv_s[schema_map[\"mag\"]],\n \"magerr\": prv_s[schema_map[\"magerr\"]],\n \"magzp\": prv_s[schema_map[\"magzp\"]],\n \"magzpsciunc\": prv_s[\"magzpsciunc\"],\n \"diffmaglim\": prv_s[\"diffmaglim\"],\n # for tag\n \"isdiffpos\": prv_s[\"isdiffpos\"],\n \"rb\": prv_s[\"rb\"],\n \"nbad\": prv_s[\"nbad\"],\n \"fwhm\": prv_s[\"fwhm\"],\n \"elong\": prv_s[\"elong\"],\n \"magdiff\": prv_s[\"magdiff\"],\n \"filter\": prv_s[schema_map[\"filter\"]],\n }\n\n prev_sources.append(prev_source_dict)\n\n xmatch = {\n \"ssdistnr\": source[\"ssdistnr\"],\n \"ssmagnr\": source[\"ssmagnr\"],\n \"objectidps1\": source[\"objectidps1\"],\n \"distpsnr1\": source[\"distpsnr1\"],\n \"sgscore1\": source[\"sgscore1\"],\n \"objectidps2\": source[\"objectidps2\"],\n \"distpsnr2\": source[\"distpsnr2\"],\n \"sgscore2\": source[\"sgscore2\"],\n \"objectidps3\": source[\"objectidps3\"],\n \"distpsnr3\": source[\"distpsnr3\"],\n \"sgscore3\": source[\"sgscore3\"],\n }\n\n alert_lite = {\n \"alertIds\": types.AlertIds(schema_map, alert_dict=alert_dict).ids._asdict(),\n \"source\": source_dict,\n \"prvSources\": tuple(prev_sources),\n \"xmatch\": xmatch,\n }\n\n return alert_lite", "title": "" }, { "docid": "a73dc78635f3839866659a7a19604585", "score": "0.48249876", "text": "def compress(uncompressed):\n \n # Build the dictionary.\n dict_size = 256\n #dictionary = dict((chr(i), i) for i in range(dict_size))\n dictionary = {chr(i): i for i in range(dict_size)}\n \n w = \"\"\n result = []\n for c in uncompressed:\n wc = w + c\n if wc in dictionary:\n w = wc\n else:\n result.append(dictionary[w])\n # Add wc to the dictionary.\n dictionary[wc] = dict_size\n dict_size += 1\n w = c\n \n # Output the code for w.\n if w:\n result.append(dictionary[w])\n return result", "title": "" }, { "docid": "34973636de2585eec011f8908dd2b19d", "score": "0.48227435", "text": "def process(self, unprocessed):\n if isinstance(unprocessed, int) and not isinstance(unprocessed, bool):\n return unprocessed\n\n value = self._serializer.serialize(unprocessed)\n return self._compressor.compress(value)", "title": "" }, { "docid": "33a14670c662b57b4088d0db793bf107", "score": "0.48105252", "text": "def transform(self):\n pass", "title": "" }, { "docid": "0a9b7263957bd0692e04109e98d6750c", "score": "0.4798625", "text": "def compress_layer_single_dim(raw_state, phys_dim, threshold, compressed_dim):\n compressed_state = init.initialize_random_MPS_with_changing_phys_dim(phys_dim,\n num_sites=len(raw_state),\n bond_dim=compressed_dim)\n bond_dim_raw_state = raw_state[math.ceil(len(raw_state)/2)].shape[0]\n\n # Initialize accuracy metrics\n dist = [] # Frobenius norm\n sim = [] # Cosine similarity (Scalar product)\n dist.append(metrics.overlap(compressed_state, raw_state))\n sim.append(metrics.similarity(compressed_state, raw_state))\n # We sweep left to right and then back right to left across the mixed state\n while True:\n # Left->right sweep\n for site in range(0, len(raw_state)-1):\n compressed_state[site], compressed_state[site+1] = comp.update_site(compressed_state, raw_state,\n site=site, dir='right')\n # Right->left sweep\n for site in range(len(raw_state)-1, 0, -1):\n compressed_state[site], compressed_state[site-1] = comp.update_site(compressed_state, raw_state,\n site=site, dir='left')\n\n # Metrics taken after each sweep\n dist.append(metrics.overlap(compressed_state, raw_state))\n sim.append(metrics.similarity(compressed_state, raw_state))\n # Check if sweeps are still working\n if np.abs(dist[-2]-dist[-1]) < threshold:\n print(\"Sim:\", sim[-1], \"Dist:\", dist[-1], \"BondDim:\", compressed_dim)\n break\n\n return compressed_state, dist, sim", "title": "" }, { "docid": "9eace7aa33474439d23c923971065a42", "score": "0.47976196", "text": "def get_modules_to_compress(self):\n return self.modules_to_compress", "title": "" }, { "docid": "2498bfad2034b0c9b6285d3b7ce37d17", "score": "0.47959524", "text": "def compress_packet_data(self, data):\n\n if self.comp_enabled:\n data_len = 0\n\n if len(data) > self.comp_threshold:\n data_len = len(data)\n data = zlib.compress(data)\n\n data = VarInt.pack(data_len, ctx=self.ctx) + data\n\n return data", "title": "" }, { "docid": "9ed86a863d004bc84c56555345d55c2c", "score": "0.47858927", "text": "def compress(self, axis, condition):\r\n axis, axis_index = self._axis_and_index(axis)\r\n new_axis = axis.compress(condition)\r\n axes = self._axes.replace(axis_index, new_axis)\r\n values = self._values.compress(condition, axis_index)\r\n return self.__class__(values, axes)", "title": "" }, { "docid": "a82ca883e5e580f89a178f76d6d85e57", "score": "0.47841462", "text": "def transform(self, A):", "title": "" }, { "docid": "0650515bd34570ccce7fa03dd4856865", "score": "0.47783414", "text": "def compression(self):\n return self.reader.compression()", "title": "" }, { "docid": "fabb068f9b6d87f8b092f6b40a9294f1", "score": "0.47778228", "text": "def compress_fun(local_compress_module, local_update_model_grad_dict: dict):\n local_compress_grad_dict = local_compress_module.compress(local_update_model_grad_dict)\n local_recon_grad_dict = local_compress_module.reconstruct(local_compress_grad_dict)\n return local_recon_grad_dict", "title": "" }, { "docid": "86799268512754ad94b1a2d7719a3b9a", "score": "0.47773013", "text": "def calculateCompressibility(virial):\r\n\r\n compressibility = 1 - 0.5*virial/(3*vr.N*vr.T_initial)\r\n return compressibility", "title": "" }, { "docid": "09743d34d4d810045216abcb215ee0ed", "score": "0.47709844", "text": "def build():\r\n\r\n compress()\r\n demo()", "title": "" }, { "docid": "56bdac507a77979439c08b425ed60c74", "score": "0.47642747", "text": "def byte(self):", "title": "" }, { "docid": "748c9ead2495121853ca9ba6363697d8", "score": "0.4752668", "text": "def pack(self):\r\n func = 'packLcpPolicyMleElement'\r\n\r\n print(\"%s packing MLE hdr\" % (func)) # DBGDBG\r\n \r\n # Initialize to None to check for supported HashAlg.\r\n hashAlgName = None\r\n # reverse lookup of the hash algorithm name(key) for the given HashAlg value\r\n hashAlgName = (key for key,val in DEFINES.TPM_ALG_HASH.items() if (val == self.HashAlg)).next()\r\n if (hashAlgName == None):\r\n print (\"MLE elements with unsupported hash algorithm, aborting build\")\r\n print(\"%s - build failed, see status bar\" % (func)) # DBGDBG\r\n return\r\n\r\n # pack the element based on its type and return the binary string\r\n elementData = pack(self.MleDataFormatString,\r\n self.ElementSize, self.ElementType,\r\n self.PolEltControl, self.SINITMinVersion, 0,\r\n self.HashAlg, self.NumHashes)\r\n \r\n print(\"%s PolEltControl=%d, SINITMinVersion=%d\" %(func, self.PolEltControl, self.SINITMinVersion)) # DBGDBG\r\n fileCnt = 0\r\n while(fileCnt < self.NumHashes):\r\n print(\"%s packing MLE hash %d\" % (func, fileCnt)) # DBGDBG\r\n elementData += self.packHash(hashAlgName, self.Hashes[fileCnt])\r\n fileCnt += 1\r\n\r\n return( elementData )", "title": "" }, { "docid": "324e12b55ee64babb847952010d1d4d9", "score": "0.47475386", "text": "def compressed(self):\n data = ndarray.ravel(self._data)\n if self._mask is not nomask:\n data = data.compress(np.logical_not(ndarray.ravel(self._mask)))\n return data", "title": "" }, { "docid": "83ba27ab5fa124db05c8ee6c461900fa", "score": "0.47412497", "text": "def Serialized(self):\n raise NotImplementedError", "title": "" }, { "docid": "2ae65810cd3e7148f7fef6adab6d67e4", "score": "0.47347236", "text": "def compress(self, name):\n am.compress_path(name, self.ood_path)", "title": "" }, { "docid": "e1c994ae262b533fab0a241d06ce9909", "score": "0.47229466", "text": "def _encode(self, x):\n count = 0\n if x <= self.inter[0]:\n pass\n elif x >= self.inter[-1]:\n count = len(self.inter)\n pass\n else:\n for first, second in zip(self.inter, self.inter[1:]):\n count += 1\n if x > first and x < second:\n break \n \n #x = torch.zeros(self.cf[\"n_in\"],1)\n #x[count] = 1.0\n x = torch.zeros(1,1,self.LAYERS[0])\n x[0,0,count] = 1.0\n return x", "title": "" }, { "docid": "6771e2fce0a6a4a8546ac1400e896ade", "score": "0.47162858", "text": "def compress(self: \"Options\") -> bool:\n return self._compress", "title": "" }, { "docid": "b79a1b2a10e1ecb8045f5e07e7dea61a", "score": "0.47045892", "text": "def compress_replace(self, compress_new):\n self.compress = copy.deepcopy(compress_new)\n if type(self.compress) is list or type(self.compress) is tuple:\n for idx, module in enumerate(self.compress):\n self.add_module(\"compress\" + str(idx), module)", "title": "" }, { "docid": "109e19cba3c03a9ababd5cd5b0738003", "score": "0.4698967", "text": "def compress(self, condition, axis=None, out=None):\n # Get the basic components\n (_data, _mask) = (self._data, self._mask)\n\n # Force the condition to a regular ndarray and forget the missing\n # values.\n condition = np.asarray(condition)\n\n _new = _data.compress(condition, axis=axis, out=out).view(type(self))\n _new._update_from(self)\n if _mask is not nomask:\n _new._mask = _mask.compress(condition, axis=axis)\n return _new", "title": "" }, { "docid": "292b4622ffd80140fb441e93cd41f94c", "score": "0.46961123", "text": "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "title": "" }, { "docid": "b69c5d85bd31a23bbe866a8d74ab82d7", "score": "0.46943521", "text": "def __init__(self, result_jet):\n _fastjet.MassDropTaggerStructure_swiginit(self, _fastjet.new_MassDropTaggerStructure(result_jet))", "title": "" }, { "docid": "a430206f0815691b68c304f19c7eae37", "score": "0.4691554", "text": "def checkCompression(testCase, additionalData):\n for plane in (\"image\", \"mask\", \"variance\"):\n for entry in (\"compression.algorithm\",\n \"compression.columns\",\n \"compression.rows\",\n \"compression.quantizeLevel\",\n \"scaling.algorithm\",\n \"scaling.bitpix\",\n \"scaling.maskPlanes\",\n \"scaling.seed\",\n \"scaling.quantizeLevel\",\n \"scaling.quantizePad\",\n \"scaling.fuzz\",\n \"scaling.bscale\",\n \"scaling.bzero\",\n ):\n additionalData.getScalar(plane + \".\" + entry)\n testCase.assertNotEqual(additionalData.getScalar(plane + \".scaling.seed\"), 0)", "title": "" }, { "docid": "68f89d0f2e1f8e63ea74e29351f96309", "score": "0.46895358", "text": "def __isQueueDueForCompression(self):\n return self.__size <= (len(self.__data) // 4)", "title": "" }, { "docid": "5eb2633e06acf931179931fda4d75a29", "score": "0.4676754", "text": "def compress(params):\n compressed = []\n for p in params:\n t = p.view(-1)\n i = t.nonzero().view(1, -1)\n compressed.append((i.to(torch.int32),\n t[i].view(-1), t.size(), t.dtype))\n return compressed", "title": "" }, { "docid": "7a501f89c081574752178dc9b65c2450", "score": "0.46744144", "text": "def test_enable_compression(self):\n self.cdmr.deflate = 4\n d = self.cdmr.fetch_data(latitude=[slice(None)])\n assert d", "title": "" }, { "docid": "45d9281681f97248cd88b637062c782e", "score": "0.46642527", "text": "def testCompression(self):\n compression_average = self.getCurrentCompressionMetricsWithRetry()\n self.fetchFromGoogleStorage()\n data = {}\n with open(DATA_FILE, 'r') as data_fp:\n data = json.load(data_fp)\n if self.updateDataObject(compression_average, data):\n with open(DATA_FILE, 'w') as data_fp:\n json.dump(data, data_fp)\n self.uploadToGoogleStorage()", "title": "" }, { "docid": "3428e24b7bf6d80d66326d6b3ac68b4d", "score": "0.46563104", "text": "def serialize(self):", "title": "" }, { "docid": "3428e24b7bf6d80d66326d6b3ac68b4d", "score": "0.46563104", "text": "def serialize(self):", "title": "" }, { "docid": "17e5a65871d1c99c067367acd6b0a8b5", "score": "0.4655155", "text": "def mt2pcm(mt, bits):\n \n quantisation_level = np.power(2,bits) # number of quantization level\n bits=bits-1 # number of bits is reduced by one to incorporate the sign of number\n \n if abs(min(mt)) > abs(max(mt)): # defining the delta in uniform fashion\n delt = 2*abs(min(mt))/quantisation_level\n else:\n delt = 2*abs(max(mt))/quantisation_level\n\n pow = np.power(2.0,1+arange(-bits,0)) # making array of increasing negative power of 2\n array_code = array([]) # initialisation of code_array\n mt_quantized = array([])\n for val in mt: # starting the loop to check each values in mt\n if val > 0:\n x = floor(val/delt)\n if x == val/delt: # if val is equal maximum quantization level, it's reduced by 1 to encode it\n x = x-1\n mt_quantized = np.append(mt_quantized,array([x*delt+0.5*delt])) # making of new quantized mt\n x_bin = array(mod(floor(outer(array([x]),pow)),2),int)\n array_code = np.append(array_code,hstack((array([[0]]),x_bin))) # making of code array\n elif val < 0:\n x = floor(abs(val)/delt)\n if x == abs(val)/delt: # if val is equal maximum quantization level, it's reduced by 1 to encode it\n x = x-1\n mt_quantized = np.append(mt_quantized,array([-x*delt-0.5*delt])) ## making of new quantized mt\n x_bin = array(mod(floor(outer(array([x]),pow)),2),int)\n array_code = np.append(array_code,hstack((array([[1]]),x_bin))) # making of code array\n else:\n mt_quantized = np.append(mt_quantized,array([0])) ## making of new quantized mt\n x_bin = array(mod(floor(outer(array([0]),pow)),2),int)\n array_code = np.append(array_code,hstack((array([[0]]),x_bin))) # making of code array\n \n return mt_quantized, array(array_code,int8)", "title": "" }, { "docid": "8e7d56b1890d62c3a0f880c2897cde1a", "score": "0.4643873", "text": "def Archive(self):\n pass", "title": "" }, { "docid": "aa1a11aef7a14ac398968e88fea927a2", "score": "0.46435738", "text": "def test_compress():\n # Spec test\n assert_equals('c1o17l1k1a1n1g1a1r1o3',\n hw1.compress('cooooooooooooooooolkangarooo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n # Addtional test\n assert_equals('c1a1b2a1g1e1', hw1.compress('cabbage'))\n assert_equals('b4a2o1r1o3', hw1.compress('bbbbaaorooo'))", "title": "" } ]
36aed014ebb5272053f13d612b4487c6
A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value.
[ { "docid": "290bfb9ada89baa1712c649d5090e93b", "score": "0.0", "text": "def present_match(self) -> Optional[bool]:\n return pulumi.get(self, \"present_match\")", "title": "" } ]
[ { "docid": "a03941438ea712611c36539805b0d33a", "score": "0.7539977", "text": "def verify_header(headers, header_name, header_value):\n matching_headers = list()\n\n for header in headers:\n if header[0] == header_name:\n matching_headers.append(header)\n\n # Hmmm....\n if len(matching_headers) != 1:\n if len(matching_headers) == 0:\n msg = \"Header `{}` has not been found\".format(header_name)\n elif len(matching_headers) > 1:\n msg = \"More than one `{}` header has been found\".format(header_name)\n\n assert len(matching_headers) == 1, msg\n\n assert matching_headers[0][1] == header_value", "title": "" }, { "docid": "5124e4f631c24bdf828a385cd37e9e68", "score": "0.7028525", "text": "def _validate_header(self, header: str) -> None:", "title": "" }, { "docid": "7766a825f5a7f4d4cfde77c53f39c41d", "score": "0.6970077", "text": "def header(context, header, value):\n assert context.response.headers[header] == value", "title": "" }, { "docid": "7330e9c2c26dcc57f7c1abbf5114736a", "score": "0.6893718", "text": "def _checkHeader(self, header):\r\n return True", "title": "" }, { "docid": "cbebf638f8a4d041496ea865346c131c", "score": "0.6891734", "text": "def check(header):\n return request.headers[header] if header in request.headers else \"\"", "title": "" }, { "docid": "451ae179c4f80064a6cb688f54d94946", "score": "0.6813507", "text": "def test_header_value(response, key, value):\n\tactual_value = get_header_value(response, key)\n\tif actual_value is not None:\n\t\treturn value == actual_value\n\treturn False", "title": "" }, { "docid": "3c045131db8820729af6523779938fbc", "score": "0.6740637", "text": "def check_header(self, path, header, content):\n try:\n r = get(self.make_url(path), headers={'host': self.host},\n allow_redirects=False)\n except ConnectionError as e:\n return (False, str(e))\n if header not in r.headers:\n return (False, \"{} not found in headers\".format(header))\n if content in r.headers[header]:\n return (True, \"\")\n return(False, \"'{}' not found in {}\".format(content, header))", "title": "" }, { "docid": "2812f3d2cb8cd86ddd782187cb3df8bd", "score": "0.667172", "text": "def get_header(self, name):\r\n name = name.lower()\r\n for key, value in self._inheaders:\r\n if key == name:\r\n return value\r\n return None", "title": "" }, { "docid": "70481d400fcd62d6bf1a6c4108e834bc", "score": "0.659035", "text": "def match_headers(endpoint):\n if 'headers' not in endpoint and 'absent_headers' not in endpoint:\n return True\n headers = dict(flask.request.headers)\n if 'headers' in endpoint:\n for (key, val) in endpoint['headers'].items():\n if val != headers.get(key):\n return False\n # Enforce that certain headers must be absent\n if 'absent_headers' in endpoint:\n header_keys = set(key.lower() for key in headers.keys())\n print('headers are', headers)\n for key in endpoint['absent_headers']:\n print('checking absent', key)\n if key.lower() in header_keys:\n return False\n return True", "title": "" }, { "docid": "d388aa0a8f15c768737d0d0bb5af89fd", "score": "0.6560017", "text": "def __headerMatches(self, h):\n rv=None\n for header in self.headers:\n if h.lower().find(header.lower()) == 0:\n rv=1\n return rv", "title": "" }, { "docid": "a7b33f1c0edc16c37054f9c40979ebed", "score": "0.6546098", "text": "def match_header(self, header_line):\n raise NotYetImplementedError", "title": "" }, { "docid": "7948d46be752b4e9c2bd23bc1c90e331", "score": "0.6497373", "text": "def header_is_absent(headers, header_name):\n for header in headers:\n assert header[0] != header_name", "title": "" }, { "docid": "4516216d537210c9294abee783e37121", "score": "0.6479443", "text": "def check_header(self, header):\r\n\r\n status = True\r\n required_params = ['DATE-OBS', 'EXPTIME', 'LATITUDE', 'LONGITUD', 'HEIGHT']\r\n for par in required_params:\r\n if par not in header.keys():\r\n status = False\r\n return status", "title": "" }, { "docid": "6a3a26c93ec142dd203ed168e1725a18", "score": "0.64290357", "text": "def get_header(event, name, required=False):\n\n if name in event.get(\"headers\", {}):\n return event[\"headers\"][name]\n\n lower_name = name.lower()\n for key in event.get(\"headers\", {}).keys():\n if lower_name == key.lower():\n return event[\"headers\"][key]\n\n if required:\n raise KeyError(name)\n\n return None", "title": "" }, { "docid": "b17b75703d0482a8bad4b2b059b1d275", "score": "0.6383328", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "5fd9636d0be0fd863d2793438c60a99e", "score": "0.6378348", "text": "def get_header(val, headers):\n for h in headers:\n k = h.key.get_generator({})\n if len(k) == len(val) and k[:].lower() == val.lower():\n return h\n return None", "title": "" }, { "docid": "bcc4f6872df4e1d39467ed5dd4c7685c", "score": "0.6350375", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "b24d50691322e223610d7bd038a0d663", "score": "0.6326261", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "232deab53dc126c9d2cc300c6b161d6a", "score": "0.63072467", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "8fe0d4e82064fb0500b16443c14aaa0a", "score": "0.6288755", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "a4d3614be694f45e990dd5b2d00827e3", "score": "0.62874675", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "68825d7bce56577661ac8014ca3dd715", "score": "0.6279807", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "47b5cfb4001c573b5014332669ac4e67", "score": "0.6277703", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "1d788e302565fc870416bdbdd7776e1a", "score": "0.6272355", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "33041497606aeb03d9a419be6ef99ec7", "score": "0.6270448", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "f3a09896b045d02c3d82bb2ece5dde9b", "score": "0.6268649", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "7c05ea2cb0a4dac06f73b3e47f4832fe", "score": "0.62615633", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "b12fd85050e6d6fc5df13e061dfc8172", "score": "0.6261076", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "5cf1e39bee9e5d7dddf2ba30cd9d251c", "score": "0.62588435", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "afb283525319e0217844a3f4666afdaa", "score": "0.6253941", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "a31a155e5fcf69260faec18cb44c9824", "score": "0.62533593", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "e20d35e3b93a27b223017e17113e3508", "score": "0.6243499", "text": "def test_request_headers_methods(self):\n url = \"http://example.com\"\n req = Request(url, headers={\"Spam-eggs\": \"blah\"})\n self.assertTrue(req.has_header(\"Spam-eggs\"))\n self.assertEqual(req.header_items(), [('Spam-eggs', 'blah')])\n\n req.add_header(\"Foo-Bar\", \"baz\")\n self.assertEqual(sorted(req.header_items()),\n [('Foo-bar', 'baz'), ('Spam-eggs', 'blah')])\n self.assertFalse(req.has_header(\"Not-there\"))\n self.assertIsNone(req.get_header(\"Not-there\"))\n self.assertEqual(req.get_header(\"Not-there\", \"default\"), \"default\")\n\n req.remove_header(\"Spam-eggs\")\n self.assertFalse(req.has_header(\"Spam-eggs\"))\n\n req.add_unredirected_header(\"Unredirected-spam\", \"Eggs\")\n self.assertTrue(req.has_header(\"Unredirected-spam\"))\n\n req.remove_header(\"Unredirected-spam\")\n self.assertFalse(req.has_header(\"Unredirected-spam\"))", "title": "" }, { "docid": "f15d321b687676a1ae4e0413b7440939", "score": "0.6240447", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "f160b8a4b4db64ae71a04a8f89801381", "score": "0.62404126", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "681ced340bc96117d44d6dd6b0a99707", "score": "0.6238815", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "6c254569ed439ed49f384b943fa29de3", "score": "0.62303483", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "1b90ab64836f2ca979f32303197058e0", "score": "0.6222756", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "2d0b87d80d59fb87b245d04c04ca410c", "score": "0.62131596", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "27604bd7a95c9f3021b14eaa3f5cda61", "score": "0.62114704", "text": "def test_check_for_invalid_header(test_input: str, should_match: bool) -> None:\r\n actual = check_for_invalid_header(test_input)\r\n expected = FormattingIssue.INVALID_HEADER if should_match else None\r\n assert actual == expected", "title": "" }, { "docid": "f15314c4955cac9e4a51c45259e24bb5", "score": "0.6201364", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "0c76f0d774ff7792a4145b2bcabc3c68", "score": "0.6198842", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "4d6f68ee1376fbc40a5958d570ffb72a", "score": "0.6196632", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "b7cfc741b1ee09f5225f8c9f2e37399d", "score": "0.61962575", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "94db2e0ce911e78f7e5b08d7a61f30ba", "score": "0.619388", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "ec009c813d8867527a3c417c3758530f", "score": "0.61859596", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "3cbdc1ae86f859266d0eedd6e6db3d40", "score": "0.6184554", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "6aa0392d42678a8410f4e1feb4a86747", "score": "0.6179455", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "20da4d41caa293922e75477eeb8f61ac", "score": "0.61792123", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "a9ed5591f515c203528cd48a099ceece", "score": "0.6176439", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementOrStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "d9ca1dd16593e866ccf6e625b5e30978", "score": "0.61742306", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "fc99198b24a645e552af00b5dc059acd", "score": "0.6166086", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "bfba33cc8bc703800684521de0962d39", "score": "0.61643374", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "1446e53540b875183057eeb7222928e7", "score": "0.61616725", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementNotStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "323f44dd9462fab18d0ec12e6078b6b7", "score": "0.6158034", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "384ff4d8b7f94cbcbd4dddc3fcf9f1dd", "score": "0.6143775", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementAndStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "b713be362fd3e43764e013c3aaf0e948", "score": "0.61411524", "text": "def raise_if_missing_required_headers(\n headers, required_headers=[], case_sensitive=False\n):\n if not case_sensitive:\n headers = [h.lower() if h else None for h in headers]\n missing_headers = [h for h in required_headers if h.lower() not in headers]\n else:\n missing_headers = [h for h in required_headers if h not in headers]\n\n if missing_headers:\n raise MissingHeaders(missing_headers)", "title": "" }, { "docid": "b65c38f552700cca82abd4fcad21679f", "score": "0.61402404", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "236a4adb40c34a4304e9d06fc6f5cbc4", "score": "0.6138567", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "1a9f16aa71d9571b8a56c5393e0cda74", "score": "0.61348003", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "ed08704903bc33a31033deb7f092b0c7", "score": "0.61314565", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementAndStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "bde2a708ff13dea7b613cd75971b6f24", "score": "0.6130021", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "0741040ff925008d2eed58b97f91ae71", "score": "0.6129586", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "5301b8a56d3f76a58e398844f8a4713f", "score": "0.61277366", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementOrStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "7db992600e05e5ec10e07a2f92082c0c", "score": "0.6127499", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "7be2435d7275dc86921707bef0c22e23", "score": "0.61257905", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "cddedd80d297caba583a371e24f99799", "score": "0.61250055", "text": "def test_custom_header(self) -> None:\n\n response = self.request(\n \"/\",\n headers={\"Special_Header\": \"Special Value\"},\n accept=\"json\"\n )\n\n pair = next(\n pair\n for pair in response.json\n if pair[0] == \"Special_Header\"\n )\n\n self.assertEqual(pair[1], \"Special Value\")", "title": "" }, { "docid": "7c66e5d69cdbfd104580931559dae9e9", "score": "0.61233956", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementAndStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "4ae2a02f449dca27eb69b11f118e9e6b", "score": "0.61230433", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "27277c039a1b72f5f785e30e5b88a88f", "score": "0.6122833", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "b478c89c52206125d236606f18aa0a4c", "score": "0.6122253", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementOrStatementStatementNotStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "5fae64b952f0c8f321c89ce68bc7ebfd", "score": "0.61218154", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementNotStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "165da0beb17c017054ae9d1842ffb2ae", "score": "0.6120075", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "543a07686a6cacd6a8536ed0c20f5f3f", "score": "0.6119539", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "20cabddf6d1c5768a840a2a6d9db450f", "score": "0.6119333", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "a0ab65ae8a325819fc14b30e714e7003", "score": "0.6118359", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "a26c6247bcdfbbd5dfe680aaca53d238", "score": "0.61176646", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "3cabd9edad9c55389c8c29fb74dc9390", "score": "0.6117381", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "960cd0d3637bb29ed84b9021432aa342", "score": "0.6117214", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementNotStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "8310c0c75a5a591b8f3fc56d2e1ff25c", "score": "0.61166924", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "f23efd2a7f73b146127afdcda971947f", "score": "0.6113494", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "083be31d6be58e310f028fc1e1c9cad3", "score": "0.61129767", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "e593b28ba1e145f1114a8accb84d08bc", "score": "0.6112687", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "170f131da9fb1d7e5d88fb31e5d9a83a", "score": "0.6112018", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "3530398840f139dc1dc7abb400b5c67f", "score": "0.6109268", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "6eab092e33b2474ac5cf20ec7f0e45a4", "score": "0.610843", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "8b610cda08524c5be3a998b4e1f84f48", "score": "0.6108396", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "309fd5c1afd8b2477ad2bcb4ad7dce80", "score": "0.61069626", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "58a96b6e5511c0e01a1df7163589d1ee", "score": "0.61025596", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "10fa316a4ce454985908f5b0bd0b2661", "score": "0.6100184", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "2c1929d595313c1215f5e9b96fa7ef1c", "score": "0.6098221", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "c238a8ea368a4ce1c25675837fd999f5", "score": "0.6096368", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementAndStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "3ddde309bdee376e3edd1b4e6d5543d0", "score": "0.60949314", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementOrStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "680d115c2cc8ea19906754f0c5b3dbb5", "score": "0.60929126", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementNotStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "c180c841b00cf4aa6142fc2d4732376e", "score": "0.6092361", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "d5fb8ce212a0237d44dc34e04308d939", "score": "0.6088101", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementAndStatementStatementAndStatementStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "df6e71d0403b8fe1f5a5b0b8a5104a56", "score": "0.6086879", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "5fa5e390416611e3480ad0cb0f564c85", "score": "0.6086842", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementOrStatementStatementNotStatementStatementNotStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "44b1d912d47dfea1286093c878c5dd85", "score": "0.608305", "text": "def single_header(self) -> Optional['outputs.RuleGroupRuleStatementOrStatementStatementXssMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "69cfaa15969ce57e0a9a38fb8ebbef20", "score": "0.60796624", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementAndStatementStatementSqliMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "584eb21a266cbf1bf006766da3595dfa", "score": "0.60785913", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" }, { "docid": "0b7e164d3b04e72dacd8740dfbffb45d", "score": "0.6074986", "text": "def single_header(self) -> Optional['outputs.WebAclRuleStatementNotStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchSingleHeader']:\n return pulumi.get(self, \"single_header\")", "title": "" } ]
bad8a2eb03ed3c9eac0d82fdf186d7d7
Passes XML file to get_hosts_details_list(), expects a dictionary. For nodes which sgeexecd isn't running, np_load_avg is '', this function checks for such instances.
[ { "docid": "a5b253859bb36d436f01120784621e21", "score": "0.5170468", "text": "def seek_ooc_nodes(xmlfile):\n\n # Passes XML file get_hosts_details_list() for parsing.\n hosts_lists = get_hosts_details_list(xmlfile)\n\n # Dictionary of out of circulation nodes.\n ooc_nodes = {}\n\n for node in hosts_lists.keys():\n # Omits out global.\n # Omits out Login nodes, admin nodes and head nodes.\n if node not in ('global', 'admin.default.domain', 'aquilah1.default.domain', 'aquilah2.default.domain'):\n\n # Get np_load_avg(load average)\n np_load_avg = hosts_lists[node]['hostvalue']['np_load_avg']\n\n # If value of hosts_lists[node]['hostvalue']['np_load_avg'] is a dash (-).\n if (np_load_avg == '-'):\n ooc_nodes[node] = hosts_lists[node]['hostvalue']\n\n # Passes dictionary out to writeout to generate HTML file.\n writeout.write_to_html('ooc',ooc_nodes)\n # If there are any nodes which are out of circulation, call send_email.py to send e-mail notification.\n if len(ooc_nodes) > 0:\n send_email.ooc_email()\n return True", "title": "" } ]
[ { "docid": "4df47758b7d2ee9b8c0d07074529331c", "score": "0.62492794", "text": "def get_hosts_details_list(xmlfile):\n\n findall_hosts = []\n\n try:\n dom = ElementTree.parse(xmlfile)\n except Exception, e:\n print(e)\n else:\n # Find all hosts in dom.\n findall_hosts = dom.findall('host')\n\n cluster = {}\n for h in findall_hosts:\n # Create a key in cluster dictionary, named by each unique host in the cluster.\n cluster[h.attrib['name']] = {}\n # Loop through the list of hostvalues under host entity\n # hv is the memory element of the hostvalue (e.g. <Element 'hostvalue' at 0x10dd30c50>), hence we break the element down into two sections, hv.attrib['name'] (the name of the hostvalue) and hv.text (the value of the hostvalue), put these two values into a set, and put these sets into a list.\n # Example - [('arch_string', 'lx-amd64'), ('num_proc', '40'), ('m_socket', '2'), ('m_core', '20'), ('m_thread', '40'), ('np_load_avg', '0.32'), ('mem_total', '125.9G'), ('mem_used', '4.7G'), ('swap_total', '7.8G'), ('swap_used', '15.5M')]\n all_hostvalue = [(hv.attrib['name'], hv.text) for hv in h.findall('.//hostvalue')]\n # Next, we create a sub dictionary key call 'hostvalue' under the host key.\n cluster[h.attrib['name']]['hostvalue'] = {}\n # We loop through the hostvalue list and populate the values for the hostvalue key under the host key with the name of the hostvalue and the value of the hostvalue.\n for hv in all_hostvalue:\n cluster[h.attrib['name']]['hostvalue'][hv[0]] = hv[1]\n # Each host is usually assigned to 1 or more queue, here, we identify the complete list of queues, which each particular host can be a part of, or not, and create sub keys under the host key.\n all_queue = [q.attrib['name'] for q in h.findall('.//queue')]\n # Next, we loop through the whole list of qname from `all_queue`, and compare with the list of qname in h.findall('.//queue/queuevalue'), when there is a match (there's always a match), take the names and the values under matched qname and add to qname sub key.\n for qname in all_queue:\n cluster[h.attrib['name']][qname] = {}\n for qv in h.findall('.//queue/queuevalue'):\n if qname == qv.attrib['qname']:\n cluster[h.attrib['name']][qname][qv.attrib['name']] = qv.text\n return cluster", "title": "" }, { "docid": "1aaf42241632699dc960ed2c2112edda", "score": "0.60220104", "text": "def seek_heavy_load(xmlfile):\n\n hosts_lists = get_hosts_details_list(xmlfile)\n\n heavy_load = {}\n\n for node in hosts_lists.keys():\n # Omits out global.\n # Omits out Login nodes, admin nodes and head nodes.\n\n if node not in ('global', 'admin.default.domain', 'aquilah1.default.domain', 'aquilah2.default.domain'):\n \n # Check if node is down, as marked by a dash (-) in NLOAD, MEMUS and SWAPUS. \n if (hosts_lists[node]['hostvalue']['mem_used']) == '-':\n pass\n else:\n # Get mem_used and mem_total. Convert it to float.\n memory_used = float(hosts_lists[node]['hostvalue']['mem_used'].rstrip(\"G\"))\n memory_total = float(hosts_lists[node]['hostvalue']['mem_total'].rstrip(\"G\"))\n\n # Get np_load_avg(load average)\n np_load_avg = hosts_lists[node]['hostvalue']['np_load_avg']\n\n # Calculate memory (RSS) usage percentage.\n mem_used_percentage = (float(memory_used)/float(memory_total))*100\n \n # Verbose printing of memory (RSS) usage.\n # print(\"{0}, {1:2.2f}%\").format(node, mem_used_percentage)\n\n # If node has high load average AND high memory usage.\n if (float(np_load_avg) >= 0.80) and (mem_used_percentage >= 80.0):\n hosts_lists[node]['hostvalue']['np_load_avg'] = \"<font style=\\\"color:red;\\\">\" + hosts_lists[node]['hostvalue']['np_load_avg']\n hosts_lists[node]['hostvalue']['mem_used'] = \"<font style=\\\"color:red;\\\">\" + hosts_lists[node]['hostvalue']['mem_used']\n # If node has high load average ONLY.\n elif (float(np_load_avg) >= 0.80) and (mem_used_percentage < 80.0):\n hosts_lists[node]['hostvalue']['np_load_avg'] = \"<font style=\\\"color:red;\\\">\" + hosts_lists[node]['hostvalue']['np_load_avg']\n heavy_load[node] = hosts_lists[node]['hostvalue']\n # If node has high memory usage ONLY.\n elif (float(np_load_avg) < 0.80) and (mem_used_percentage >= 80.0):\n hosts_lists[node]['hostvalue']['mem_used'] = \"<font style=\\\"color:red;\\\">\" + hosts_lists[node]['hostvalue']['mem_used']\n heavy_load[node] = hosts_lists[node]['hostvalue']\n else:\n pass\n\n # Passes dictionary out to writeout.py to generate HTML file.\n writeout.write_to_html('hl',heavy_load)\n return True", "title": "" }, { "docid": "6a25cae13c4a34d6ff62c3299fc1525a", "score": "0.56235987", "text": "def parse_nmap_xml(self, xml, baseline={}):\n\n hosts = {}\n\n try:\n root = Et.fromstring(xml)\n except:\n raise Exception(\"Unable to parse nmap output\")\n\n # Get every host found\n for host in root.findall(\"host\"):\n # Get the IP\n ip = host.find('address').get('addr')\n\n # Get the hostname if defined (user selected), otherwise use the IP address as the hostname\n hostname = ''\n hostnames = []\n for hn in host.find('hostnames'):\n if hn.get('type') == 'user':\n hostname = hn.get('name')\n else:\n hostnames.append(hn.get('name'))\n if not hostname:\n hostname = ip\n\n hosts[hostname] = {}\n hosts[hostname]['hostnames'] = hostnames\n hosts[hostname]['ip'] = ip\n hosts[hostname]['ports'] = {}\n\n # Get open ports, and stuff their data into our hosts data structure\n ports = host.find('ports')\n for port in ports.findall('port'):\n portid = port.get('portid')\n state = port.find('state').get('state')\n\n service = port.find('service')\n product = service.get('product')\n version = service.get('version')\n\n if product and version:\n product = product + ' ' + version\n\n if 'open' in state:\n hosts[hostname]['ports'][portid] = {\n 'protocol': port.get('protocol'),\n 'state': state,\n 'product': product,\n 'name': service.get('name'),\n }\n\n\n # If a baseline has been passed in, we need to prune the output of all those entries\n if baseline:\n # Let's remove the __ALLHOSTS__ entry from the baseline, turn into things we can use\n allhosts = baseline.pop('__ALLHOSTS__', {'ports': [], 'products': []})\n allhosts_ports = set()\n for port in allhosts['ports']:\n allhosts_ports.update(self._generate_port_range(port))\n allhosts_products = [re.compile('^' + x, re.IGNORECASE) for x in allhosts['products']]\n\n # \"Deserialize\" the baseline, where you can't use periods in key entries (hostnames)\n for baseline_host in baseline.iterkeys():\n baseline[ baseline_host.replace('_', '.') ] = baseline.pop(baseline_host)\n\n\n for host in hosts.iterkeys():\n for port in hosts[host]['ports'].keys():\n # If a port or product is in the __ALLHOSTS__ entry, it shouldn't show up in the results\n if (int(port) in allhosts_ports or\n [True for x in allhosts_products if x.search(str(hosts[host]['ports'][port]['product']))]):\n del(hosts[host]['ports'][port])\n continue\n\n # If a host -> port -> state and host -> port -> product match their mirror in the baseline, remove them\n # from the output\n if ((hosts[host]['ports'][port]['state'] ==\n baseline.get(host, {})\n .get('ports', {})\n .get(port, {})\n .get('state', None)) and\n (re.search('^' + str(\n baseline.get(host, {})\n .get('ports', {})\n .get(port, {})\n .get('product', '')), str(hosts[host]['ports'][port]['product']), re.IGNORECASE))):\n del(hosts[host]['ports'][port])\n\n return hosts", "title": "" }, { "docid": "ac0aa14add31d2be06040c81204aae3b", "score": "0.55152714", "text": "def check_data_nodes(self, config):\n ip_addresses = {}\n datanodes_ip = []\n for ds in config.datasources:\n try:\n ip = getHostByName(ds.title.split(':')[0])\n except Exception:\n continue\n datanodes_ip.append(ip)\n ip_addresses.update({ds.title.split(':')[0]: ds})\n if ds.manageIp not in datanodes_ip:\n ip_addresses.update({ds.manageIp: None})\n return ip_addresses", "title": "" }, { "docid": "4b4d836cc8913f85ed46b1601817997f", "score": "0.54728377", "text": "def polling_metrics(self, hosts, hosts_map):\n def _all_instance_details():\n response = self.nv_client.instance_get_all()\n servers = [server.to_dict() for server in response]\n if not servers:\n LOG.warning(\"Servers list is empry,\"\n \"skip to update zabbix values\")\n return servers\n\n All_INSTANCES = _all_instance_details() or []\n for instance in All_INSTANCES:\n if instance['id'] in hosts and utils.is_active(instance):\n LOG.info(\"Start Checking host : %s\"\n % hosts_map[instance['id']][1])\n # Get links for instance compute metrics\n resources = self.gnocchi_client.list_resources(resource_type='instance',\n resource_id=instance['id'])\n \n # Add a new instance and its metrics\n if instance['id'] not in METRIC_CACEHES.keys():\n rs_items = {}\n print(resources[\"metrics\"])\n\n # for rs in resources[\"metrics\"]:\n # if rs.resource_id.startswith('instance'):\n # rs_items[rs.resource_id] = NETWORK_METRICS\n # # NOTE:remove disk metrics\n # elif utils.endswith_words(rs.resource_id):\n # pass\n # else:\n # rs_items[rs.resource_id] = INSTANCE_METRICS\n #METRIC_CACEHES[instance['id']] = rs_items\n # Update metric_caches where instance_in exists.For the case:\n # instance add/remove a nic\n # instance add/remove a volume\n else:\n rs_items = METRIC_CACEHES[instance['id']]\n rs_item_keys = rs_items.keys()\n for rs in resources:\n if rs.resource_id not in rs_item_keys and \\\n rs.resource_id.startswith('instance'):\n rs_items[rs.resource_id] = NETWORK_METRICS\n METRIC_CACEHES[instance['id']] = rs_items\n # NOTE:remove disk metrics\n elif rs.resource_id not in rs_item_keys and \\\n utils.endswith_words(rs.resource_id):\n pass\n else:\n continue\n LOG.info(\"Starting to polling %s(%s) metric into zabbix\"\n % (instance.get('name'), instance.get('id')))\n pxy = self.zabbix_hdl.get_by_proxyid(\n hosts_map[instance['id']][2])\n if pxy:\n proxy_name = pxy['host']\n else:\n LOG.warning(\"Can't find the proxy:%s,Skip to polling \"\n \"instance_id %s metrics.\"\n % (hosts_map[instance['id']][2],\n instance['id']))\n continue\n # Polling Gnocchi the latest sample into zabbix\n # CLI:Gnocchi statistics -m {...} -q resource_id={...} -p ..\n self._polling_metrics(instance['id'],\n proxy_name)\n LOG.debug(\"Finshed to polling %s(%s) metric into zabbix\"\n % (instance.get('name'), instance.get('id')))\n else:\n LOG.debug(\"Can't find the instance : %s(%s), \"\n \"or the status of %s is not active\"\n % (instance.get('name'),\n instance.get('id'),\n instance.get('name'))\n )", "title": "" }, { "docid": "dff28faad4b3c5de7cf877d8c41a2e46", "score": "0.5330758", "text": "def test_hadoop_hosts_file(self):\n hadoop_nodes = HadoopCluster().nodes\n\n for node in hadoop_nodes:\n self.assertEqual(node.ip_address, self.ec2.getInstance(node.name).ip_address)\n self.assertEqual(node.private_ip_address, self.ec2.getInstance(node.name).private_ip_address)\n self.assertEqual(node.dns_name, self.ec2.getInstance(node.name).dns_name)", "title": "" }, { "docid": "920325b51618515c0ecd097780cb8d16", "score": "0.5254477", "text": "def _check_hosts_and_ports(executive, xml):\n log.debug('In \"_check_hosts_and_ports\"')\n if executive is None:\n log.debug('executive is None - all good')\n return True\n root = ET.fromstring(xml)\n context = root.find('.//{' + CONFIG.xdaqxmlnamespace + '}Context')\n contexturl = context.attrib['url'].split(':')\n contexthost = contexturl[-2][2:] # drop two slashes after 'http:'\n contextport = int(contexturl[-1])\n endpoint = context.find('.//{' + CONFIG.xdaqxmlnamespace + '}Endpoint')\n endpointhost = endpoint.attrib['hostname']\n endpointport = int(endpoint.attrib['port'])\n log.debug(\n 'executive.host: {}, executive.port: {}, context.host: {}, '\n 'context.port: {}, endpoint.host: {}, endpoint.port: {}'.format(\n executive['host'], executive['port'], contexthost, contextport,\n endpointhost, endpointport))\n if executive['host'] == contexthost and executive['host'] == endpointhost:\n if executive['port'] == contextport and contextport != endpointport:\n return True\n raise err.ConfiguratorUserError(\n 'Failed hosts&ports check',\n details=(\n 'Some of the following violated:\\n'\n '1) Executive.host ({}) = Context.host ({}) = Endpoint.host ({})\\n'\n '2) Executive.port ({}) = Context.port ({})\\n'\n '3) Context.port ({}) != Endpoint.port ({})').format(\n executive['host'], contexthost, endpointhost,\n executive['port'], contextport, contextport, endpointport))", "title": "" }, { "docid": "90b99c7354143a8559288a8b42516cbb", "score": "0.517839", "text": "def load_instances():\n ec2 = boto3.resource('ec2')\n ec2_instances = ec2.instances.all()\n instances = list()\n not_running = 0\n no_key = 0\n\n for i in ec2_instances:\n if i.key_pair:\n n = i.key_pair.name\n if n in pkeys:\n if i.state['Name'] == 'running':\n mi = MyInstance(i.id, find_name(i), i.private_ip_address, n)\n instances.append(mi)\n else:\n not_running += 1\n else:\n no_key += 1\n else:\n no_key += 1\n\n print \"{} instances not running\".format(not_running)\n print \"{} instances without valid keypair\".format(no_key)\n return instances", "title": "" }, { "docid": "eb38f92f35bd88ae290cbf5f8a55b275", "score": "0.51634264", "text": "def validate_sdx_nodes_number_and_content(sdx_topo, expected_nodes):\\\n\n if len(sdx_topo['nodes']) == 0: return Exception(\"List of nodes is EMPTY \"\n \"after Napp initialization\")\n\n if isinstance(sdx_topo, dict):\n if len(sdx_topo['nodes']) == expected_nodes:\n print(\"Success\")\n else:\n raise Exception(\"The returned Amlight-SDX topology is not a dictionary\")", "title": "" }, { "docid": "fc7ced797c94324711791752c171afeb", "score": "0.5071314", "text": "def parse_xml():\n\n try:\n tree = ET.parse('config.xml')\n except ET.ParseError:\n print('Error at parsing the config.xml file. Please check the file')\n exit()\n\n root = tree.getroot()\n\n for child in root:\n user = child.attrib['username']\n host = child.attrib['ip']\n pasw = child.attrib['password']\n mail = child.attrib['mail']\n\n ssh = config_ssh()\n\n try:\n ssh.connect(host, username=user, password=pasw)\n except ssh_auth_exceptions:\n print('SSH authentication problem on', host)\n continue\n except ssh_conn_exceptions:\n print('Connection problem to host', host)\n continue\n\n isWin = is_win(ssh)\n\n data = upload_and_run(ssh, isWin, pasw)\n\n insertValues = [host]\n for i in range(0, 3):\n insertValues.append(float(data[i]))\n\n query = 'INSERT INTO systems (ip, memory_usage, cpu_usage, uptime) '\n query += 'VALUES (?, ?, ?, ?)'\n\n if isWin:\n query = 'INSERT INTO systems'\n query += '(ip, memory_usage, cpu_usage, uptime, events)'\n query += 'VALUES (?, ?, ?, ?, ?)'\n\n events = ''\n for i in range(3, len(data)):\n events += data[i] + ', '\n insertValues.append(events)\n\n save_to_database(query, insertValues, cursor)\n\n for alert in child:\n email_alert(host, alert, data, smtpObj, mail)\n\n ssh.close()", "title": "" }, { "docid": "b6a55bdc1a200571b4ceb859f48a3869", "score": "0.506782", "text": "def load(self):\n with open(self.filename) as f:\n content = f.readlines()\n content = [\" \".join(x.split()) .strip('\\n').lstrip().split(' ', 1) for x in content] \n # removes duplicated spaces, and splits in two fields, removes leading spaces\n hosts = {}\n host = \"NA\"\n for line in content:\n if line[0].startswith('#') or line[0] is '':\n pass # ignore line\n else:\n attribute = line[0]\n value = line[1]\n if attribute in ['Host']:\n host = value\n hosts[host] = {'host': host}\n else:\n hosts[host][attribute] = value\n pass\n self.hosts = hosts", "title": "" }, { "docid": "ec98a05625cfd5f4a7b41b486491a520", "score": "0.5032809", "text": "def load():\n\tload = None\n\tuptime_out = run(\"uptime\")\n\tnodes = re.split('[ ,:]+', uptime_out.strip())\n\tprint nodes\n\tis_load = False\n\tfor node in nodes: #picking next node after average keyword\n\t\tif is_load:\n\t\t\tis_load = False\n\t\t\tload = node\n\t\tif node == \"average\":\n\t\t\tis_load = True\n\t_extend_summary(env.host,\"load\",load)\n\tprint load", "title": "" }, { "docid": "fa01f4b5f1fcb6d2b350940f639bba64", "score": "0.50218165", "text": "def nodelist():\n process = sp.Popen(['pbsnodes -av'], stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out, err = process.communicate()\n lines, types, status, used_cores, cores, used_memory, mem = [], [], [], [], [], [], []\n nodes_list = []\n node = ''\n for l in out.splitlines():\n l = l.decode('utf-8')\n node += l\n if l == '':\n nodes_list.append(node)\n node = ''\n\n for node in nodes_list:\n type_match = re.search(r'(\\w\\w\\w)\\d\\d\\d', node)\n\n cpu_match = re.search(r'resources_available.ncpus = (\\w+)', node)\n used_cpu_match = re.search(r'resources_assigned.ncpus = (\\w+)', node)\n\n mem_match = re.search(r'resources_available.mem = (\\w+)kb', node)\n used_mem_match = re.search(r'resources_assigned.mem = (\\w+)kb', node)\n\n status_match = re.search(r'state = ([a-z\\-]+ [a-z]*)', node)\n if type_match and cpu_match and mem_match and status_match:\n type = type_match.group(1)\n total_cpu = int(cpu_match.group(1))\n used_cpu = int(used_cpu_match.group(1))\n total_mem = int(int(mem_match.group(1)) / 1024 / 1024)\n used_mem = int(int(used_mem_match.group(1)) / 1024 / 1024)\n node_status = status_match.group(1).strip()\n if node_status == 'free' and used_cpu != 0:\n node_status = 'partially free'\n\n types.append(type)\n status.append(node_status)\n cores.append(total_cpu)\n mem.append(total_mem)\n used_cores.append(used_cpu)\n used_memory.append(used_mem)\n df = pd.DataFrame(dict(\n type=types,\n cores=cores,\n used_cores=used_cores,\n memory=mem,\n used_memory=used_memory,\n status=status\n ))\n return df", "title": "" }, { "docid": "b606d4280c8d840b0442d53830a07e14", "score": "0.5008108", "text": "def __readConfigInfo(self):\n self.logger.debug(\"Begin read config file...\")\n try:\n self.clusterInfo = dbClusterInfo()\n self.clusterInfo.initFromXml(self.xmlfile)\n hostName = socket.gethostname()\n self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)\n if (self.dbNodeInfo is None):\n self.logger.logExit(\"Get local instance info failed!There is no host named %s!\" % hostName)\n except Exception, e:\n self.logger.logExit(str(e))\n\n self.logger.debug(\"Instance info on local node:\\n%s\" % str(self.dbNodeInfo))\n self.logger.debug(\"End read config file\")", "title": "" }, { "docid": "d1a797141100e9a9fe1e19dbf9027e68", "score": "0.500438", "text": "def list_nodes_hostnames(self):\n\t\treturn self.stats_colector.list_nodes_hostnames()", "title": "" }, { "docid": "9c3c24ff062354aa7ef3082b4eb88833", "score": "0.49821407", "text": "def ex_describe_addresses(self, nodes):\r\n if not nodes:\r\n return {}\r\n\r\n params = {'Action': 'DescribeAddresses'}\r\n\r\n if len(nodes) == 1:\r\n self._add_instance_filter(params, nodes[0])\r\n\r\n result = self.connection.request(self.path, params=params).object\r\n\r\n node_instance_ids = [node.id for node in nodes]\r\n nodes_elastic_ip_mappings = {}\r\n\r\n # We will set only_associated to True so that we only get back\r\n # IPs which are associated with instances\r\n only_associated = True\r\n\r\n for node_id in node_instance_ids:\r\n nodes_elastic_ip_mappings.setdefault(node_id, [])\r\n for addr in self._to_addresses(result,\r\n only_associated):\r\n\r\n instance_id = addr.instance_id\r\n\r\n if node_id == instance_id:\r\n nodes_elastic_ip_mappings[instance_id].append(\r\n addr.ip)\r\n\r\n return nodes_elastic_ip_mappings", "title": "" }, { "docid": "1ab4f9e712a39497fefa691567e51926", "score": "0.49488676", "text": "def get_bcbio_nodes(path):\n with open(path, 'r') as file_handle:\n hosts = collections.defaultdict(dict)\n for line in file_handle:\n matches = re.search(r'\\]\\s([^:]+):', line)\n if not matches:\n continue\n # Format of the record will be \"[Date] host: Timing: Step\" if distributed,\n # otherwise the host will be missing and it means its a local run, we can stop\n elif 'Timing: ' in line and line.split(': ')[1] != 'Timing':\n hosts = collections.defaultdict(dict, {socket.gethostname() : {}})\n break\n\n hosts[matches.group(1)]\n\n return hosts", "title": "" }, { "docid": "3e9c765b7b08dd591c532a046348cd85", "score": "0.4944694", "text": "def create_execute_nodes(instances, config:Munch, execute_config_file:str=None, nr:int=1):\n\n nr = 1\n\n node_names = []\n\n\n for i in range(0, nr ):\n\n cloud_name = None\n clouds = list(instances.clouds().keys())\n\n clouds_usable = []\n \n for cloud_name in clouds:\n cloud = instances.get_cloud( cloud_name )\n resources = cloud.get_resources_available()\n if ( resources['ram'] > config.daemon.min_ram*1024 and\n resources['cores'] > config.daemon.min_cores and\n resources['instances'] > config.daemon.min_instances):\n clouds_usable.append( cloud_name )\n\n clouds = clouds_usable\n \n if ( clouds == []):\n logger.warn('No resources available to make a new node')\n return\n \n # for round-robin\n ### find the next cloud name\n if 'node_allocation' not in config.daemon:\n config.daemon['node_allocation'] = 'round-robin'\n\n if ( config.daemon.node_allocation == 'round-robin'):\n\n nodes_created = len( instances.get_nodes())\n\n if nodes_created == 0:\n cloud_name = clouds[ 0 ]\n else:\n cloud_name = clouds[ nodes_created%len( clouds )]\n \n node_name = make_node_name(config.ehos.project_prefix, \"execute\")\n\n elif ( config.ehos.deamon.node_allocation == 'random'):\n cloud_name = random.choice( clouds )\n elif ( config.ehos.deamon.node_allocation == 'fill first'):\n cloud_name = clouds[ 0 ]\n\n else:\n logger.critical(\"Unknown node allocation method ({})\".format( config.daemon.node_allocation ))\n raise RuntimeError(\"Unknown node allocation method ({})\".format( config.daemon.node_allocation ))\n\n\n cloud = instances.get_cloud( cloud_name )\n\n logger.debug( \"Using image {}\".format( config.clouds[ cloud_name ].image ))\n \n try:\n config.ehos.image= config.clouds[ cloud_name ].image\n \n node_id = cloud.server_create( name=node_name,\n userdata_file=execute_config_file,\n **config.ehos )\n\n if ( 'scratch_size' in config.ehos and\n config.ehos.scratch_size is not None and\n config.ehos.scratch_size != 'None'):\n\n try:\n volume_id = cloud.volume_create(size=config.ehos.scratch_size, name=node_name)\n cloud.attach_volume( node_id, volume_id=volume_id)\n except:\n logger.warning(\"Could not create execute server, not enough disk available, deleting the instance.\")\n cloud.server_delete( node_id )\n\n\n instances.add_node( id=node_id, name=node_name, cloud=cloud_name, node_state='node_starting', vm_state='vm_booting')\n logger.debug(\"Execute server {}/{} is vm_booting\".format( node_id, node_name))\n node_names.append(node_name)\n\n except Exception as e:\n logger.warning(\"Could not create execute server\")\n logger.debug(\"Error: {}\".format(e))\n\n \n\n return node_names", "title": "" }, { "docid": "208d7224e713ea72ca067cd8478b7819", "score": "0.493363", "text": "def describe_addresses(self, xml_bytes):\n results = []\n root = XML(xml_bytes)\n for address_data in root.find(\"addressesSet\"):\n address = address_data.findtext(\"publicIp\")\n instance_id = address_data.findtext(\"instanceId\")\n results.append((address, instance_id))\n return results", "title": "" }, { "docid": "17f8b5ed2f3e771a36facfa4b6763fca", "score": "0.4858748", "text": "def check(self):\n # reread conf file every time so we can see changes\n success = True\n for cluster_name, monitors in list(self.cluster_monitors.items()):\n if self.is_active(cluster_name):\n unreachable = 0\n for monitor in monitors:\n try:\n success = monitor.check()\n except IOError:\n try:\n # check the network connection\n urlopen('http://google.com')\n except IOError:\n logger.log('no network connection')\n else:\n unreachable += 1\n logger.error('Failed to get %s' % monitor.url)\n\n # we've tried at least one monitor by this point\n # if it was reachable (or the netowrk is down) so\n # we don't want to check any more monitors\n if unreachable == 0:\n break\n\n if unreachable >= (len(monitors) + 1) // 2:\n sent = self.send_email('More than half of HAProxy instances are'\n 'unreachable on %s' % cluster_name + \". Please refer to doc https://docs.google.com/a/room77.com/document/d/1Ii1cxpIucAU3Qb63Zv3Cc-Ymf9WX6a945guZ_Cg01NI/edit#heading=h.7pw52dk9gnzc\", logger)\n success = False\n return success", "title": "" }, { "docid": "efc7cd384aa14a47fe0a7feab78f63fe", "score": "0.48480535", "text": "def validate_topology_configuration(self):\n\n node_with_busybox = self.topology_helper.deploy_busybox()\n sleep_time = 30\n logger.info(f\"give {sleep_time}sec to render on ODF Topology view\")\n time.sleep(sleep_time)\n\n self.read_all_topology()\n logger.info(\"\\n\" + self.get_topology_str())\n\n topology_cli_df = self.topology_helper.read_topology_cli_all()\n logger.debug(self.topology_helper.get_topology_cli_str())\n\n topology_deviation = dict()\n\n node_names = get_node_names()\n random_node_name = random.choice(node_names)\n navigation_bar_check = self.validate_topology_navigation_bar(random_node_name)\n if not navigation_bar_check:\n logger.error(\"search bar validation check failed\")\n topology_deviation[\"topology_navigation_bar_select_fail\"] = True\n\n logger.info(\"check node bar filtering functionality\")\n deployment_view = self.nodes_view.nav_into_node(\n node_name_option=random_node_name\n )\n another_random_node = random.choice(\n [node_name for node_name in node_names if node_name != random_node_name]\n )\n\n deployment_view.filter_node_by_toggle_from_deployments_level(\n another_random_node\n )\n node_selected = (\n deployment_view.get_current_selected_node_from_deployments_level()\n )\n deployment_view.nav_back_main_topology_view()\n\n if node_selected != another_random_node:\n logger.error(\"search bar navigate to another node check failed\")\n topology_deviation[\n \"search_bar_navigate_to_another_node_check_failed\"\n ] = True\n\n topology_ui_df = self.get_topology_df()\n\n ceph_cluster = OCP(\n kind=\"CephCluster\", namespace=config.ENV_DATA[\"cluster_namespace\"]\n )\n cluster_app_name_cli = (\n ceph_cluster.get().get(\"items\")[0].get(\"metadata\").get(\"labels\").get(\"app\")\n )\n cluster_name_ui = self.nodes_view.get_cluster_name()\n\n if cluster_app_name_cli != cluster_name_ui:\n logger.error(\n \"cluster app name from UI and from CLI are not identical\\n\"\n f\"cluster_app_name_cli = '{cluster_app_name_cli}'\"\n f\"cluster_name_ui = '{cluster_name_ui}'\"\n )\n topology_deviation[\"cluster_app_name_not_equal\"] = True\n\n storage_cluster = OCP(\n kind=constants.STORAGECLUSTER,\n namespace=config.ENV_DATA[\"cluster_namespace\"],\n )\n groups_cli = (\n storage_cluster.get()\n .get(\"items\")[0]\n .get(\"status\")\n .get(\"failureDomainValues\")\n )\n\n # zoom out to read rack/zone label\n zoom_out_times = 1 if len(node_names) < 4 else 2\n for i in range(1, zoom_out_times + 1):\n self.nodes_view.zoom_out_view()\n groups_ui = self.nodes_view.get_group_names()\n\n # check group names such as racks or zones from ODF Topology UI and CLI are identical\n if not sorted(groups_cli) == sorted(groups_ui):\n logger.error(\n f\"group names for worker nodes (labels) of the cluster {cluster_app_name_cli} \"\n \"from UI and from CLI are not identical\\n\"\n f\"groups_cli = {sorted(groups_cli)}\\n\"\n f\"groups_ui = {sorted(groups_ui)}\"\n )\n topology_deviation[\"worker_group_labels_not_equal\"] = True\n\n # check node names from ODF Topology UI and CLI are identical\n if not sorted(list(topology_ui_df[\"entity_name\"])) == sorted(\n list(topology_cli_df.columns)\n ):\n logger.error(\n f\"nodes of the cluster {cluster_app_name_cli} from UI and from CLI are not identical\\n\"\n f\"deployments_list_cli = {sorted(list(topology_ui_df['entity_name']))}\\n\"\n f\"deployments_list_ui = {sorted(list(topology_cli_df.columns))}\"\n )\n topology_deviation[\"nodes_not_equal\"] = True\n\n for index, row in topology_ui_df.iterrows():\n\n node_name = row[\"entity_name\"]\n # comment left here for further usage as a point where we can work with states of deployments iteratively\n # node_status = row[\"entity_status\"]\n\n deployments_names_list_cli = (\n self.topology_helper.get_deployment_names_from_node_df_cli(node_name)\n )\n deployments_names_list_ui = list(row[\"nested_deployments\"][\"entity_name\"])\n\n if not sorted(deployments_names_list_cli) == sorted(\n deployments_names_list_ui\n ):\n logger.error(\n f\"deployments of the node '{node_name}' from UI do not match deployments from CLI\\n\"\n f\"deployments_list_cli = '{sorted(deployments_names_list_cli)}'\\n\"\n f\"deployments_list_ui = '{sorted(deployments_names_list_ui)}'\"\n )\n topology_deviation[f\"{node_name}__deployments_not_equal\"] = True\n\n busybox_depl_name = self.topology_helper.get_busybox_depl_name()\n if node_name == node_with_busybox and (\n busybox_depl_name not in deployments_names_list_ui\n ):\n logger.error(\n f\"busybox deployment '{busybox_depl_name}' deployed on the node '{node_with_busybox}' \"\n f\"during the test was not found in UI\"\n )\n topology_deviation[\"added_deployment_not_found\"] = True\n elif node_name == node_with_busybox and (\n busybox_depl_name in deployments_names_list_ui\n ):\n self.topology_helper.delete_busybox()\n sleep_time = 30\n logger.info(\n f\"delete '{busybox_depl_name}' deployment from cluster, give {sleep_time}sec to update ODF \"\n \"Topology and verify deployment was removed\"\n )\n time.sleep(sleep_time)\n\n deployment_topology = self.nodes_view.nav_into_node(\n node_name_option=node_with_busybox\n )\n\n # zoom out Topology view before trying to find busybox deployment\n if len(deployments_names_list_ui) < 6:\n zoom_out_times = 1\n elif len(deployments_names_list_ui) < 12:\n zoom_out_times = 2\n else:\n zoom_out_times = 3\n for i in range(1, zoom_out_times + 1):\n self.zoom_out_view()\n\n # check deployed during the test deployment is present\n if deployment_topology.is_entity_present(busybox_depl_name):\n logger.info(\n f\"Deployment '{busybox_depl_name}' was successfully removed from ODF Topology view\"\n )\n else:\n logger.error(\n f\"busybox deployment '{busybox_depl_name}' deployed on the node '{node_with_busybox}' \"\n f\"during the test was not rempoved from ODF Topology\"\n )\n topology_deviation[f\"{busybox_depl_name}__not_removed\"] = True\n deployment_topology.nav_back_main_topology_view()\n return topology_deviation", "title": "" }, { "docid": "f9ca9bd504bcd52cc0bbb947269d3fff", "score": "0.4839296", "text": "def print_instances_grid():\n refresh_rate = 30\n\n global_state.set_ec2_attributes()\n\n name_len = global_state.df_ec2_attributes.Name.astype(str).map(len).max()\n uptime_len = global_state.df_ec2_attributes.Uptime.astype(str).map(len).max()\n type_len = global_state.df_ec2_attributes.InstanceType.astype(str).map(len).max()\n fqdn_len = global_state.df_ec2_attributes.FQDN.astype(str).map(len).max()\n\n if uptime_len == 0:\n uptime_len = 1\n\n click.clear()\n print(\n f\"{arrow.now().format('YYYY-MM-DD HH:mm:ss')} Press: CTRL-C for all interactions\"\n )\n\n for cntr, row in global_state.df_ec2_attributes.iterrows():\n # default values\n color = \"white\"\n cpu_for_instance = \"\"\n reverse = False\n\n if (\n global_state.search_match_string != \"\"\n and re.search(global_state.search_match_string, row[\"Name\"], re.IGNORECASE)\n ) or (\n global_state.search_match_string != \"\"\n and re.search(\n global_state.search_match_string, row[\"InstanceId\"], re.IGNORECASE\n )\n ):\n reverse = True\n\n if row[\"State\"] == \"running\":\n color = \"green\"\n cpu_for_instance = \"\\u2589\" * int(\n global_state.cpu_for_instance.get(row[\"InstanceId\"], 0) // 10\n )\n elif row[\"State\"] in (\"pending\", \"stopping\"):\n color = \"yellow\"\n refresh_rate = 3\n\n table_line = (\n f\"{cntr:3}|{row['Name']:<{name_len}}|{row['EMRNodeType']}|{row['PrivateIpAddress']:<15}|\"\n f\"{row['PublicIp']:<15}|{row['FQDN']:<{fqdn_len}}|\"\n f\"{row['State']:<14}|{row['LocalLaunchTime'].strftime('%d/%m/%y %H:%M')}|\"\n f\"{row['Uptime']:{uptime_len}}|{row['InstanceId']:<19}|\"\n f\"{row['InstanceType']:{type_len}}|{cpu_for_instance:<10}|\"\n )\n\n click.echo(click.style(table_line, fg=color, reverse=reverse))\n\n return refresh_rate", "title": "" }, { "docid": "408779a8ae383dac4dae70720ed73d31", "score": "0.48361766", "text": "def inspect_host(self, host):\n url = \"%s/v2/%s/os-hosts/%s\" % (OpenstackConf.NOVA_URL, self.tenantId, host)\n results = self.get_rest_data(url)['host']\n assert results[0]['resource']['project'] == '(total)'\n assert results[1]['resource']['project'] == '(used_now)'\n info = dict()\n info['cpu'] = {'total': results[0]['resource']['cpu'], 'used': results[1]['resource']['cpu']}\n info['mem'] = {'total': results[0]['resource']['memory_mb'], 'used': results[1]['resource']['memory_mb']}\n info['disk'] = {'total': results[0]['resource']['disk_gb'], 'used': results[1]['resource']['disk_gb']}\n return info", "title": "" }, { "docid": "eec03d53c8d7e4f9cba7a4392588d3cb", "score": "0.48265722", "text": "def _vmware_get_hosts(self, host_metrics):\n logging.info(\"Starting host metrics collection\")\n\n if self.fetch_tags:\n results, host_labels, host_tags = yield parallelize(\n self.host_system_inventory,\n self.host_labels,\n self.host_tags\n )\n\n else:\n results, host_labels = yield parallelize(self.host_system_inventory, self.host_labels)\n\n # fetch Custom Attributes Labels (\"values\")\n customAttributes = {}\n customAttributesLabelNames = {}\n if self.fetch_custom_attributes:\n customAttributes = yield self.hostsCustomAttributes\n customAttributesLabelNames = yield self.hostsCustomAttributesLabelNames\n\n # Insert custom attributes names as metric labels\n self.updateMetricsLabelNames(host_metrics, ['hosts'])\n\n for host_id, host in results.items():\n try:\n labels = host_labels[host_id]\n\n if self.fetch_tags:\n tags = host_tags.get(host_id, [])\n tags = ','.join(tags)\n if not tags:\n tags = 'n/a'\n\n labels += [tags]\n\n customLabels = []\n for labelName in customAttributesLabelNames:\n customLabels.append(customAttributes[host_id].get(labelName))\n\n labels += customLabels\n\n except KeyError as e:\n logging.info(\n \"Key error, unable to register host {error}, host labels are {host_labels}\".format(\n error=e, host_labels=host_labels\n )\n )\n continue\n\n \"\"\"\n filter red and yellow alarms\n \"\"\"\n if self.fetch_alarms:\n alarms = [a for a in host.get('triggeredAlarmState', '').split(',') if ':' in a]\n\n # Red alarms\n red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red']\n red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a'\n host_metrics['vmware_host_red_alarms'].add_metric(\n labels + [red_alarms_label],\n len(red_alarms)\n )\n\n # Yellow alarms\n yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow']\n yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a'\n host_metrics['vmware_host_yellow_alarms'].add_metric(\n labels + [yellow_alarms_label],\n len(yellow_alarms)\n )\n\n # Numeric Sensor Info\n sensors = host.get('runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo', '').split(',') + \\\n host.get('runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', '').split(',') + \\\n host.get('runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', '').split(',')\n\n sensors = [s for s in sensors if ':' in s]\n\n for s in sensors:\n sensor = dict(item.split(\"=\") for item in re.split(r':(?=\\w+=)', s)[1:])\n\n if not all(key in sensor for key in ['sensorStatus', 'name', 'type', 'unit', 'value']):\n continue\n\n sensor_status = {\n 'red': 0,\n 'yellow': 1,\n 'green': 2,\n 'unknown': 3,\n }[sensor['sensorStatus'].lower()]\n\n host_metrics['vmware_host_sensor_state'].add_metric(\n labels + [sensor['name'], sensor['type']],\n sensor_status\n )\n\n # FAN speed\n if sensor[\"unit\"] == 'rpm':\n host_metrics['vmware_host_sensor_fan'].add_metric(\n labels + [sensor['name']],\n int(sensor['value']) * (10 ** (int(sensor['unitModifier'])))\n )\n\n # Temperature\n if sensor[\"unit\"] == 'degrees c':\n host_metrics['vmware_host_sensor_temperature'].add_metric(\n labels + [sensor['name']],\n int(sensor['value']) * (10 ** (int(sensor['unitModifier'])))\n )\n\n # Power Voltage\n if sensor[\"unit\"] == 'volts':\n host_metrics['vmware_host_sensor_power_voltage'].add_metric(\n labels + [sensor['name']],\n int(sensor['value']) * (10 ** (int(sensor['unitModifier'])))\n )\n\n # Power Current\n if sensor[\"unit\"] == 'amps':\n host_metrics['vmware_host_sensor_power_current'].add_metric(\n labels + [sensor['name']],\n int(sensor['value']) * (10 ** (int(sensor['unitModifier'])))\n )\n\n # Power Watt\n if sensor[\"unit\"] == 'watts':\n host_metrics['vmware_host_sensor_power_watt'].add_metric(\n labels + [sensor['name']],\n int(sensor['value']) * (10 ** (int(sensor['unitModifier'])))\n )\n\n # Redundancy\n if sensor[\"unit\"] == 'redundancy-discrete':\n host_metrics['vmware_host_sensor_redundancy'].add_metric(\n labels + [sensor['name']],\n int(sensor['value'])\n )\n\n # Standby Mode\n standby_mode = 1 if host.get('runtime.standbyMode') == 'in' else 0\n standby_mode_state = host.get('runtime.standbyMode', 'unknown')\n host_metrics['vmware_host_standby_mode'].add_metric(\n labels + [standby_mode_state],\n standby_mode\n )\n\n # Power state\n power_state = 1 if host['runtime.powerState'] == 'poweredOn' else 0\n host_metrics['vmware_host_power_state'].add_metric(labels, power_state)\n\n # Host connection state (connected, disconnected, notResponding)\n connection_state = host.get('runtime.connectionState', 'unknown')\n host_metrics['vmware_host_connection_state'].add_metric(\n labels + [connection_state],\n 1\n )\n\n # Host in maintenance mode?\n if 'runtime.inMaintenanceMode' in host:\n host_metrics['vmware_host_maintenance_mode'].add_metric(\n labels,\n host['runtime.inMaintenanceMode'] * 1,\n )\n\n if not power_state:\n continue\n\n if host.get('runtime.bootTime'):\n # Host uptime\n host_metrics['vmware_host_boot_timestamp_seconds'].add_metric(\n labels,\n self._to_epoch(host['runtime.bootTime'])\n )\n\n # CPU Usage (in Mhz)\n if 'summary.quickStats.overallCpuUsage' in host:\n host_metrics['vmware_host_cpu_usage'].add_metric(\n labels,\n host['summary.quickStats.overallCpuUsage'],\n )\n\n cpu_core_num = host.get('summary.hardware.numCpuCores')\n if cpu_core_num:\n host_metrics['vmware_host_num_cpu'].add_metric(labels, cpu_core_num)\n\n cpu_mhz = host.get('summary.hardware.cpuMhz')\n if cpu_core_num and cpu_mhz:\n cpu_total = cpu_core_num * cpu_mhz\n host_metrics['vmware_host_cpu_max'].add_metric(labels, cpu_total)\n\n # Memory Usage (in MB)\n if 'summary.quickStats.overallMemoryUsage' in host:\n host_metrics['vmware_host_memory_usage'].add_metric(\n labels,\n host['summary.quickStats.overallMemoryUsage']\n )\n\n if 'summary.hardware.memorySize' in host:\n host_metrics['vmware_host_memory_max'].add_metric(\n labels,\n float(host['summary.hardware.memorySize']) / 1024 / 1024\n )\n\n config_ver = host.get('summary.config.product.version', 'unknown')\n build_ver = host.get('summary.config.product.build', 'unknown')\n host_metrics['vmware_host_product_info'].add_metric(\n labels + [config_ver, build_ver],\n 1\n )\n\n hardware_cpu_model = host.get('summary.hardware.cpuModel', 'unknown')\n hardware_model = host.get('summary.hardware.model', 'unknown')\n host_metrics['vmware_host_hardware_info'].add_metric(\n labels + [hardware_model, hardware_cpu_model],\n 1\n )\n logging.info(\"Finished host metrics collection\")\n return results", "title": "" }, { "docid": "e6556746662ba29609eca2573f2ae6df", "score": "0.4823964", "text": "def list_nodes(self, ex_node_ids=None):\r\n params = {'Action': 'DescribeInstances'}\r\n if ex_node_ids:\r\n params.update(self._pathlist('InstanceId', ex_node_ids))\r\n elem = self.connection.request(self.path, params=params).object\r\n nodes = []\r\n for rs in findall(element=elem, xpath='reservationSet/item',\r\n namespace=NAMESPACE):\r\n nodes += self._to_nodes(rs, 'instancesSet/item')\r\n\r\n nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)\r\n for node in nodes:\r\n ips = nodes_elastic_ips_mappings[node.id]\r\n node.public_ips.extend(ips)\r\n return nodes", "title": "" }, { "docid": "94c0fd757e4d97ef813bb0f43adab681", "score": "0.48220718", "text": "def instances_status(cfg: Config):\n print_instances(Instance.elb_instances(target_group_arn_for(cfg)), number=False)", "title": "" }, { "docid": "cf4eaf4303ad4efb5234f4414ed3d185", "score": "0.47893077", "text": "def ex_describe_addresses(self, nodes):\r\n nodes_elastic_ip_mappings = {}\r\n for node in nodes:\r\n # empty list per node\r\n nodes_elastic_ip_mappings[node.id] = []\r\n return nodes_elastic_ip_mappings", "title": "" }, { "docid": "54daae5eaa7a170fdc945109e114fb36", "score": "0.47810307", "text": "def test_cpu_pinning_one_numa_cell(\n self, env, os_conn, networks, flavors, security_group,\n aggregate):\n hosts = aggregate.hosts\n vms = []\n network_for_instances = [networks[0], networks[1], networks[0]]\n hosts_for_instances = [hosts[0], hosts[1], hosts[0]]\n cpus = get_cpu_distribition_per_numa_node(env)\n\n for i in range(2):\n vms.append(os_conn.create_server(\n name='vm{}'.format(i),\n flavor=flavors[0].id,\n nics=[{'net-id': network_for_instances[i]}],\n availability_zone='nova:{}'.format(hosts_for_instances[i]),\n security_groups=[security_group.id]))\n\n for vm in vms:\n host = getattr(vm, \"OS-EXT-SRV-ATTR:host\")\n assert host in hosts\n self.check_cpu_for_vm(os_conn, vm, 1, cpus[host])\n\n network_checks.check_vm_connectivity(env, os_conn)", "title": "" }, { "docid": "d505f453236e287071a26837f6c84097", "score": "0.47763905", "text": "def verify_nodes_added(self, hosts):\n timeout = 600\n ocp_obj = ocp.OCP(kind='node')\n node_info = ocp_obj.get()\n for i in range(len(hosts)):\n for entry in node_info['items']:\n for each in entry['status']['addresses']:\n if each['type'] == 'Hostname':\n if each['address'] in hosts:\n logging.info(\n f\"Checking status for {each['address']}\"\n )\n sample = TimeoutSampler(\n timeout, 3,\n self.get_ready_status, entry\n )\n try:\n assert sample.wait_for_func_status(result=True)\n except AssertionError:\n raise exceptions.FailedToAddNodeException(\n \"Failed to add RHEL node\"\n )", "title": "" }, { "docid": "3730fc83eda77439f7ff9441024155dd", "score": "0.4775826", "text": "def check_affinity(vm_details,host):\n try:\n logger.debug(\"Entering into check_affinity for vm:\"+str(vm_details.vm_name)) \n if(vm_details.vm_name in ('superopt','largevm','NeuroImaging2','sniper-big','csl788-1','NeuroImaging','sniper-large', 'mooc_6')):\n return False \n else:\n return True \n except:\n logger.exception('Exception in check_affinity') \n return False", "title": "" }, { "docid": "b40190fcf1c9eb7a5b850bdc57227dbd", "score": "0.47755665", "text": "def get_hosts(self, vim_tenant):\n r, hype_dict = self.get_hosts_info()\n if r < 0:\n return r, hype_dict\n hypervisors = hype_dict[\"hosts\"]\n try:\n servers = self.nova.servers.list()\n for hype in hypervisors:\n for server in servers:\n if server.to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname'] == hype['hypervisor_hostname']:\n if 'vm' in hype:\n hype['vm'].append(server.id)\n else:\n hype['vm'] = [server.id]\n return 1, hype_dict\n except nvExceptions.NotFound as e:\n error_value = -vimconn.HTTP_Not_Found\n error_text = (str(e) if len(e.args) == 0 else str(e.args[0]))\n except (ksExceptions.ClientException, nvExceptions.ClientException) as e:\n error_value = -vimconn.HTTP_Bad_Request\n error_text = type(e).__name__ + \": \" + (str(e) if len(e.args) == 0 else str(e.args[0]))\n # TODO insert exception vimconn.HTTP_Unauthorized\n # if reaching here is because an exception\n self.logger.debug(\"get_hosts \" + error_text)\n return error_value, error_text", "title": "" }, { "docid": "184e2d8d6421c3bf0d6e579dcf08b7bc", "score": "0.47722316", "text": "def _load_hosts():\n\n hosts = {}\n\n for f in os.listdir(HOSTS_DIR):\n f_path = os.path.join(HOSTS_DIR, f)\n if os.path.isfile(f_path):\n with open(f_path, 'r') as fp:\n for line in fp:\n line = line.strip()\n if line == '' or line.startswith('#'):\n continue\n\n ip, hostname = line.split()\n\n hostname = _validate_hostname(hostname)\n ip = _validate_ip(ip)\n\n assert hostname not in hosts, f\"Hostname {hostname} duplicated\"\n hosts[hostname] = ip\n\n return hosts", "title": "" }, { "docid": "c07e85aa96c369efc400b4ac7949f72a", "score": "0.4758956", "text": "def host_nodes(self):\n if self._docker_nodes == []:\n if j.core.db.get(\"gcc.host_nodes\") is None:\n self.init()\n nodes = j.core.db.get(\"gcc.host_nodes\").decode()\n self._host_nodes = self._parseNode(nodes)\n return self._host_nodes", "title": "" }, { "docid": "52368fdaef0610cb1e840b92939c215c", "score": "0.47277254", "text": "def getComponentLayoutValidations(self, services, hosts):\n items = super(HDF20StackAdvisor, self).getComponentLayoutValidations(services, hosts)\n\n # Use a set for fast lookup\n hostsSet = set(super(HDF20StackAdvisor, self).getActiveHosts([host[\"Hosts\"] for host in hosts[\"items\"]])) #[host[\"Hosts\"][\"host_name\"] for host in hosts[\"items\"]]\n hostsCount = len(hostsSet)\n\n componentsListList = [service[\"components\"] for service in services[\"services\"]]\n componentsList = [item for sublist in componentsListList for item in sublist]\n\n # Validating cardinality\n for component in componentsList:\n if component[\"StackServiceComponents\"][\"cardinality\"] is not None:\n componentName = component[\"StackServiceComponents\"][\"component_name\"]\n componentDisplayName = component[\"StackServiceComponents\"][\"display_name\"]\n componentHosts = []\n if component[\"StackServiceComponents\"][\"hostnames\"] is not None:\n componentHosts = [componentHost for componentHost in component[\"StackServiceComponents\"][\"hostnames\"] if componentHost in hostsSet]\n componentHostsCount = len(componentHosts)\n cardinality = str(component[\"StackServiceComponents\"][\"cardinality\"])\n # cardinality types: null, 1+, 1-2, 1, ALL\n message = None\n if \"+\" in cardinality:\n hostsMin = int(cardinality[:-1])\n if componentHostsCount < hostsMin:\n message = \"At least {0} {1} components should be installed in cluster.\".format(hostsMin, componentDisplayName)\n elif \"-\" in cardinality:\n nums = cardinality.split(\"-\")\n hostsMin = int(nums[0])\n hostsMax = int(nums[1])\n if componentHostsCount > hostsMax or componentHostsCount < hostsMin:\n message = \"Between {0} and {1} {2} components should be installed in cluster.\".format(hostsMin, hostsMax, componentDisplayName)\n elif \"ALL\" == cardinality:\n if componentHostsCount != hostsCount:\n message = \"{0} component should be installed on all hosts in cluster.\".format(componentDisplayName)\n else:\n if componentHostsCount != int(cardinality):\n message = \"Exactly {0} {1} components should be installed in cluster.\".format(int(cardinality), componentDisplayName)\n\n if message is not None:\n items.append({\"type\": 'host-component', \"level\": 'ERROR', \"message\": message, \"component-name\": componentName})\n\n # Validating host-usage\n usedHostsListList = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if not self.isComponentNotValuable(component)]\n usedHostsList = [item for sublist in usedHostsListList for item in sublist]\n nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]\n for host in nonUsedHostsList:\n items.append( { \"type\": 'host-component', \"level\": 'ERROR', \"message\": 'Host is not used', \"host\": str(host) } )\n\n return items", "title": "" }, { "docid": "3d48ef12348aa5ab990f0afbd86bc15b", "score": "0.47254387", "text": "def is_supervisor_node(inv_files, hostname):\n dut_vars = get_host_visible_vars(inv_files, hostname)\n if 'type' in dut_vars and dut_vars['type'] == 'supervisor':\n return True\n return False", "title": "" }, { "docid": "de7a12fd101de412ab0f3c27ac38ccf2", "score": "0.47200584", "text": "def get_details(self):\n\n full_page = urllib.request.urlopen(\"https://mon.crc.nd.edu/xymon-cgi/svcstatus.sh?HOST={0}&SERVICE=cpu\".format(self.exec_host))\n pageStr = full_page.read().decode(\"utf-8\") # getting all html into a string\n full_page.close()\n del full_page\n\n # Grabbing the first 5 lines as one single string for $(top) header\n self.host_top = \"\\n\".join(pageStr.split('\\n')[0:5])\n # Each line below will be a line in Top for processes\n top_bool = True\n for index, line in enumerate(pageStr.split('\\n')):\n if top_bool:\n if \"load average\" in line:\n top_bool = False\n top_index = index\n \n if self.user in line:\n tmp_proc = {}\n lineSplit = line.split()\n tmp_proc[\"PID\"] = lineSplit[0]\n tmp_proc[\"RESMEM\"] = lineSplit[5]\n tmp_proc[\"CPU%\"] = lineSplit[8]\n tmp_proc[\"TIME\"] = lineSplit[10]\n tmp_proc[\"PNAME\"] = lineSplit[11]\n self.details.append(tmp_proc)\n\n self.host_top = \"\\n\".join(pageStr.split('\\n')[top_index:(top_index + 5)])\n\n return", "title": "" }, { "docid": "00f208598c45024eacda149fc16f5cb5", "score": "0.47162923", "text": "def test_parse_single_host_invalid_config(self):\n hosts = '192.168.1.10:fred,192.168.1.11,192.168.1.12:1236'\n with ExpectedException(vpp_exc.InvalidEtcHostConfig):\n self.parse_config_test_run(hosts, OVERRIDE_PORT)", "title": "" }, { "docid": "3e981dab6bd084cdc99dde401341c00b", "score": "0.47092074", "text": "def test_vms_connectivity_after_evacuation(self, env, os_conn, networks,\n flavors, aggregate,\n security_group, devops_env):\n cpus = get_cpu_distribition_per_numa_node(env)\n hosts = aggregate.hosts\n vms = []\n\n for i in range(2):\n vm = os_conn.create_server(\n name='vm{}'.format(i), flavor=flavors[i].id,\n nics=[{'net-id': networks[i]}],\n availability_zone='nova:{}'.format(hosts[i]),\n security_groups=[security_group.id])\n vms.append(vm)\n network_checks.check_vm_connectivity(env, os_conn)\n self.check_cpu_for_vm(os_conn, vms[0], 1, cpus[hosts[0]])\n\n with self.change_compute_state_to_down(os_conn, devops_env, hosts[0]):\n vm0_new = self.evacuate(os_conn, devops_env, vms[0])\n new_host = getattr(vm0_new, \"OS-EXT-SRV-ATTR:host\")\n assert new_host in hosts, \"Unexpected host after evacuation\"\n assert new_host != hosts[0], \"Host didn't change after evacuation\"\n os_conn.wait_servers_ssh_ready(vms)\n network_checks.check_vm_connectivity(env, os_conn)\n self.check_cpu_for_vm(os_conn, vm0_new, 1, cpus[new_host])\n\n old_hv = os_conn.nova.hypervisors.find(hypervisor_hostname=hosts[0])\n assert old_hv.running_vms == 0, (\n \"Old hypervisor {0} shouldn't have running vms\").format(hosts[0])\n\n instance_name = getattr(vm0_new, \"OS-EXT-SRV-ATTR:instance_name\")\n assert instance_name in self.get_instances(os_conn, new_host), (\n \"Instance should be in the list of instances on the new host\")\n assert instance_name not in self.get_instances(os_conn, hosts[0]), (\n \"Instance shouldn't be in the list of instances on the old host\")", "title": "" }, { "docid": "73e061d445fb3f4f4bd27de228b27dd3", "score": "0.47047168", "text": "def storage_params_extract(config_3par, system_params, system_params_add, pattern_dct): \n\n # file name\n configname = os.path.basename(config_3par)\n # search control dictionary. continue to check file until all parameters groups are found\n collected = {'system': False, 'ip': False, 'port': False, 'host': False}\n\n # initialize structures to store collected data for current storage\n # dictionary to store all DISCOVERED parameters\n showsys_dct = {}\n showsys_lst = []\n # if lists remains empty after file parsing than status_info shows NO_DATA for current file\n port_lst = []\n host_lst = []\n # Storage IP address\n ip_addr = None\n \n with open(config_3par, encoding='utf-8', errors='ignore') as file:\n # check file until all groups of parameters extracted\n while not all(collected.values()):\n line = file.readline()\n if not line:\n break\n # showsys section start\n if re.search(pattern_dct['showsys_header'], line) and not collected['system']:\n collected['system'] = True\n line = reop.extract_key_value_from_line(showsys_dct, pattern_dct, line, file, \n extract_pattern_name='parameter_value_pair', \n stop_pattern_name='section_end')\n # showsys section end\n # port section start\n elif re.search(pattern_dct['showport_header'], line) and not collected['port']:\n collected['port'] = True\n line = reop.extract_list_from_line(port_lst, pattern_dct, \n line, file, \n extract_pattern_name= 'port_line', \n stop_pattern_name='section_end', \n first_line_skip=False, line_add_values=configname)\n # port section end\n # host section start\n elif re.search(pattern_dct['showhost_header'], line) and not collected['host']:\n collected['host'] = True\n line = reop.extract_list_from_line(host_lst, pattern_dct, \n line, file, \n extract_pattern_name= 'host_line', \n stop_pattern_name='section_end', \n first_line_skip=False, line_add_values=configname)\n # host section end\n # ip_address section start\n elif re.search(pattern_dct['ip_address'], line) and not collected['ip']:\n collected['ip'] = True\n ip_addr = re.search(pattern_dct['ip_address'], line).group(1)\n # ip_address section end\n\n # additional values which need to be added to the switch params dictionary \n # switch_params_add order ('configname', 'chassis_name', 'switch_index', 'ls_mode')\n # values axtracted in manual mode. if change values order change keys order in init.xlsx switch tab \"params_add\" column\n showsys_values = (configname, ip_addr)\n\n if showsys_dct:\n # adding additional parameters and values to the parameters dct\n dsop.update_dct(system_params_add, showsys_values, showsys_dct) \n # creating list with REQUIRED parameters for the current system.\n # if no value in the dct for the parameter then None is added \n # and appending this list to the list of all systems \n showsys_lst.append([showsys_dct.get(param) for param in system_params])\n return showsys_lst, port_lst, host_lst", "title": "" }, { "docid": "b5a37d2e218513289a2243f9ea6c46f1", "score": "0.47010845", "text": "def __get_cluster_hosts(self):\n try:\n host_file = open(os.environ['FABRIC_HOST_FILE'], 'r+')\n ClusterController.hosts = [h.strip('\\n') for h in host_file.readlines()]\n host_file.close()\n msg = \"Hosts read from FABRIC_HOST_FILE: %s\" % '\\n'.join(ClusterController.hosts)\n LOG.debug(msg)\n self.local = False\n except Exception as e:\n LOG.error(\"No host file specified, running command on localhost\")\n LOG.error(\"Error: %s\" % e)", "title": "" }, { "docid": "192c11e25c9276291ec31c34326b81df", "score": "0.46959335", "text": "def test_get_cluster_nodes_available(self):\n pass", "title": "" }, { "docid": "91c4b200155c3984cf5cf490fc8eae22", "score": "0.4687427", "text": "def node_list_option(self):\n if (self.node_list is not None and\n self.host_file is not None and\n self.node_list != self.host_file):\n raise SyntaxError('<geopm> geopmpy.launcher: Node list and host name cannot both be specified.')\n\n result = []\n if self.node_list is not None:\n result = ['-hosts', self.node_list]\n elif self.host_file is not None:\n result = ['-f', self.host_file]\n else:\n # If this error is encountered, without the is_once check it will be displayed 3 times per run:\n # 1. For the PlatformTopo cache creation.\n # 2. For the call to 'lscpu --hex' used in the Launcher itself.\n # 3. For actually running the app requested.\n if IMPIExecLauncher._is_once:\n sys.stderr.write('Warning: <geopm> geopmpy.launcher: Hosts not defined, GEOPM may fail to start. '\n 'Use \"-f <host_file>\" or \"-hosts\" to specify the hostnames of the compute nodes.\\n')\n IMPIExecLauncher._is_once = False\n\n # If the user specified the bootstrap option, assume they know better than we do\n if not any(aa.startswith('-bootstrap') for aa in self.argv):\n if self.is_slurm_enabled:\n server = 'slurm'\n if os.getenv('SLURM_CLUSTER_NAME') == 'endeavour':\n server = 'ssh'\n result += ['-bootstrap', server]\n\n return result", "title": "" }, { "docid": "92be6e9e05078af8dbedde773d6c5616", "score": "0.46847212", "text": "def test_get_topologyitems_with_host_regexes(self):\n instance = {'name': 'vsphere_mock', 'host': \"ESXi\", \"host_include_only_regex\": \"localhost\"}\n config = {}\n self.load_check(config)\n # self.check._is_excluded = MagicMock(return_value=False)\n\n server_mock = MagicMock()\n server_mock.configure_mock(**{'RetrieveContent.return_value': self.mock_content(\"host\")})\n self.check._get_server_instance = MagicMock(return_value=server_mock)\n\n # mock the vpshere client connect\n self.check.vsphere_client_connect = MagicMock()\n # get the client\n client = vsphere_client()\n client.tagging.TagAssociation.list_attached_tags = MagicMock(return_value=[])\n self.check.client = client\n\n topo_dict = self.check.get_topologyitems_sync(instance)\n self.assertEqual(len(topo_dict[\"hosts\"]), 1)\n self.assertEqual(len(topo_dict[\"hosts\"][0]['topo_tags']['identifiers']), 0)", "title": "" }, { "docid": "e0a3c2808cd5e3dec31119f0ac4a4d3a", "score": "0.46589443", "text": "def list_nodes(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The list_nodes function must be called with -f or --function.\"\n )\n\n ret = {}\n nodes = list_nodes_full()\n if \"error\" in nodes:\n raise SaltCloudSystemExit(\n \"An error occurred while listing nodes: {}\".format(\n nodes[\"error\"][\"Errors\"][\"Error\"][\"Message\"]\n )\n )\n for node in nodes:\n ret[node] = {\n \"id\": nodes[node][\"hostname\"],\n \"ram\": nodes[node][\"memoryCount\"],\n \"cpus\": nodes[node][\"processorPhysicalCoreAmount\"],\n }\n if \"primaryIpAddress\" in nodes[node]:\n ret[node][\"public_ips\"] = nodes[node][\"primaryIpAddress\"]\n if \"primaryBackendIpAddress\" in nodes[node]:\n ret[node][\"private_ips\"] = nodes[node][\"primaryBackendIpAddress\"]\n return ret", "title": "" }, { "docid": "d5a169b9cf5c49afd5f69754e834a9a9", "score": "0.46451515", "text": "def get_ec2_instances(region):\n\n # get ec2 connection by region\n conn = ec2.connect_to_region(region)\n # get all instances\n reservations = conn.get_all_instances()\n\n ####\n # Getting a dictionary of all the isntances.\n # Key is instance name, value is an array, of which each element is a id, ip tuple\n #\n # Example:\n # {\n # \"name1\": [\n # [\"id1\", \"ip1\"],\n # [\"id2\", \"ip2\"],\n # ...\n # ],\n # \"name2\": [\n # [\"id3\", \"ip3\"],\n # ...\n # ],\n # }\n ####\n instances_dict = defaultdict(lambda: [])\n\n for res in reservations:\n for inst in res.instances:\n\n # ignore terminated instances\n if inst.state == 'terminated':\n continue\n # key 'Nagios' value 'ignore' means do not add it into nagios\n if IGNORE_NAGIOS_TAG_INSTANCE and 'Nagios' in inst.tags and inst.tags['Nagios'] == 'ignore':\n continue\n # ignore instances that has 'test' as part of its name tag\n if IGNORE_TEST_INSTANCE and 'Name' in inst.tags and 'test' in inst.tags['Name'].lower():\n continue\n\n name = inst.tags['Name'] + CONCAT_CHAR + inst.id if 'Name' in inst.tags else inst.id\n name = name.replace(' ', CONCAT_CHAR)\n name = re.sub(r\"[\\(\\)]\", \"\", name)\n name = re.sub(r\"[^-_a-zA-Z0-9]\", CONCAT_CHAR, name)\n instances_dict[name].append([inst.id, inst.private_ip_address])\n\n ####\n # Solve duplicate instance names\n # The idea is adding the last section of IP as part of the name, if the name is duplicated.\n ####\n instance_list = []\n\n for name, id_ip_list in instances_dict.items():\n if len(id_ip_list) == 1:\n inst = id_ip_list[0]\n instance_list.append([inst[0], name, inst[1]])\n else:\n for id, ip in id_ip_list:\n instance_list.append([id, name + CONCAT_CHAR + ip.split('.')[-1], ip])\n\n return instance_list", "title": "" }, { "docid": "9fc73231b233e314216bdff6b3e4fb06", "score": "0.46422812", "text": "def test_parse_multi_host_invalid_config(self):\n hosts = '192.168.1.10:fred,192.168.1.11,192.168.1.12:1236'\n with ExpectedException(vpp_exc.InvalidEtcHostConfig):\n self.parse_config_test_run(hosts, OVERRIDE_PORT)", "title": "" }, { "docid": "e88ecafca0161edfb5e9dcd8c235f7eb", "score": "0.46307158", "text": "def _get_compute_nodes(keypath, user, controller):\n nodes = []\n if keypath is None or user is None:\n return nodes\n cmd = \"nova-manage service list | grep ^nova-compute\"\n lines = stress.utils.ssh(keypath, user, controller, cmd).split('\\n')\n # For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46\n # This is fragile but there is, at present, no other way to get this info.\n for line in lines:\n words = line.split()\n if len(words) > 0 and words[4] == \":-)\":\n nodes.append(words[1])\n return nodes", "title": "" }, { "docid": "7643285b9a4cd1395286f50e0c567926", "score": "0.46297336", "text": "def test_get_nodes_provision_status(drydock):\n drydock.get_nodes.return_value = NODE_LIST\n nodes_provision_status = sh.get_nodes_provision_status(drydock)\n actual = nodes_provision_status.get('nodes_provision_status')\n expected = MACH_STATUS_DICT.get('nodes_provision_status')\n\n assert actual == sorted(expected, key=lambda x: x['hostname'])", "title": "" }, { "docid": "6d701b03aeb01629613ff01a71e5509f", "score": "0.46258628", "text": "def test_collect_hosts(self, m):\n self._set_http_responses(m, hosts=_read_test_file(\"host_response.json\"))\n\n self.check.url = self.instance.get('url')\n\n self.check.run()\n\n topo_instances = topology.get_snapshot(self.check.check_id)\n actual_topology = _read_data(\"host_response_topology.json\")\n\n # sort the keys of components and relations, so we match it in actual\n components, relations = sort_topology_data(topo_instances)\n actual_components, actual_relations = sort_topology_data(actual_topology)\n\n self.assertEqual(components, actual_components)\n self.assertEqual(len(relations), len(actual_relations))\n for relation in relations:\n self.assertIn(relation, actual_relations)", "title": "" }, { "docid": "55038cf79cc309621cb80f8dae7c0e1a", "score": "0.46229857", "text": "def gethostcfg():\n url = 'http://localhost:8500/v1/kv/' + PREFIX + '?recurse'\n try:\n hostjson = urllib.request.urlopen(url).read().decode('utf-8')\n except urllib.error.HTTPError as exc:\n if exc.code == 404:\n print('Cluster not started')\n return {}\n else:\n raise\n hostkvs = json.loads(hostjson)\n\n hostcfg = {}\n for kv_detail in hostkvs:\n key = kv_detail['Key']\n val = base64.b64decode(kv_detail['Value']).decode('utf-8')\n assert key.startswith(PREFIX)\n key = key[len(PREFIX):]\n hostcfg[key] = val\n return hostcfg", "title": "" }, { "docid": "dadb3facf846cec20d520e24d4b07a65", "score": "0.4621811", "text": "def get_flavors_vms_and_nodes(conf):\n src = conf['src']\n username = src['user']\n password = src['password']\n tenant = src['tenant']\n auth_url = src['auth_url']\n region = src.get('region')\n\n dst_comp = conf['dst_compute']\n core_ratio = dst_comp['cpu_allocation_ratio']\n ram_ratio = dst_comp['ram_allocation_ratio']\n\n cli = client.Client(2, username, password, tenant, auth_url,\n region_name=region)\n servers = cli.servers.list(search_opts={\"all_tenants\": True})\n nova_flavors = cli.flavors.list()\n\n flavors = {\n i.id: {\n \"fl_id\": i.id,\n \"core\": i.vcpus,\n \"name\": i.name,\n \"ram\": i.ram,\n \"ephemeral\": i.ephemeral,\n \"swap\": i.swap\n } for i in nova_flavors\n }\n\n hypervisors = {}\n\n down_hosts = set([service.host for service in cli.services.findall(\n binary='nova-compute', state='down')])\n\n def vm_host_is_up(vm):\n host_is_up = (getattr(vm, nova_compute.INSTANCE_HOST_ATTRIBUTE)\n not in down_hosts)\n if not host_is_up:\n LOG.warning(\"VM '%s' is running on a down host! Skipping.\", vm.id)\n\n return host_is_up\n\n def vm_is_in_valid_state(vm):\n return vm.status in nova_compute.ALLOWED_VM_STATUSES\n\n vms = {\n vm.id: {\n \"id\": vm.id,\n \"flavor\": vm.flavor.get(\"id\"),\n \"host\": getattr(vm,\n nova_compute.INSTANCE_HOST_ATTRIBUTE)\n } for vm in servers if vm_host_is_up(vm) and vm_is_in_valid_state(vm)\n }\n\n for hypervisor in cli.hypervisors.list():\n host = hypervisor.hypervisor_hostname\n if host not in down_hosts:\n hypervisors[host] = {\n 'core': hypervisor.vcpus,\n 'ram': hypervisor.memory_mb,\n 'core_ratio': core_ratio,\n 'ram_ratio': ram_ratio}\n\n return flavors, vms, hypervisors", "title": "" }, { "docid": "105210f97767adf07a13e0f22325c0e5", "score": "0.4618463", "text": "def parse_instances(test_or_dev_set, conn, in_file_fname):\n with open(in_file_fname, \"r\") as in_file:\n instances = []\n for line in in_file:\n instances.append(json.loads(line))\n\n training_instances = []\n\n for i in trange(instances):\n instance = instances[i]\n if test_or_dev_set or instance['verifiable'] != 'NOT VERIFIABLE':\n claim = instance['claim']\n claim_id = instance['id']\n docs = instance['predicted_pages']\n if not test_or_dev_set:\n gold_docs = get_golden_docs(instance['evidence'])\n for gold_doc in gold_docs:\n if gold_doc not in docs:\n docs.append(gold_doc) # make sure all positive examples are added to the data\n\n for doc_id in docs:\n doc_raw = get_doc_text(conn, doc_id)[0]\n\n doc_sentences = parse_doc(doc_raw)\n doc_as_string = ' '.join(doc_sentences)\n context = doc_as_string\n\n if not test_or_dev_set:\n if doc_id in gold_docs:\n label = 1\n else:\n label = 0\n else:\n label = None\n training_instances.append([label, claim, context, claim_id, doc_id])\n return training_instances", "title": "" }, { "docid": "6af207a2800b124780292dfa500f6e26", "score": "0.4594571", "text": "def test_hosts(self):\n eq_(list(self.data.hosts()), ['localhost'])", "title": "" }, { "docid": "6d36594e22be587f0cb437b10af38123", "score": "0.45906076", "text": "def discovery_ip(ip):\n default = {}\n\n # Getting host information for ip\n try:\n logger.info(\"Running port scan...\")\n subprocess.getoutput(\"nmap -oX host.xml -O -sV -p 1-1023 \" + ip)\n logger.info(\"Examining port scan results...\")\n\n # Need to do make before...\n binary_location = PROJECT_ROOT + \"/build_topology\"\n host_json = subprocess.getoutput(binary_location + \" host.xml\")\n if \"Aborted\" in host_json:\n return default\n else:\n return json.loads(host_json)\n except Exception as e:\n return default", "title": "" }, { "docid": "dd3e1cf471935361cb0df2d5dafcd34c", "score": "0.45782053", "text": "def get_available_instances():\n resources = query_openvz(False, \"ctid,hostname\")\n candidates = {}\n for r in resources:\n cid, hn = r\n candidates[int(cid)] = \"%s (%s)\" % (hn, cid)\n return candidates", "title": "" }, { "docid": "88d7def42031effabd0769cd72cead8d", "score": "0.45756683", "text": "def _ClassifyHosts(self):\n logging.info('Classifying hosts')\n one_hour_ago = _Now() - datetime.timedelta(hours=1)\n for host in self.hosts:\n host.extra_info = host.extra_info or {}\n loas_info = host.extra_info.get('gcertstatus', '')\n if host.timestamp is None or host.timestamp < one_hour_ago:\n self.hosts_checkin.append(host)\n elif loas_info and _GetLoasSeconds(loas_info) <= WEEK_TO_SECONDS:\n self.hosts_loas.append(host)\n logging.info('Offline hosts %d', len(self.hosts_checkin))\n logging.info('Hosts needing LOAS renewal %d', len(self.hosts_checkin))", "title": "" }, { "docid": "6896e7c01e534faeae346afcc2d695df", "score": "0.45726743", "text": "def find_application_instances(app_guid):\n cfg = Config()\n\n # for each instance, find information about where it is hosted and its connected ports\n instances = []\n raw_apps = bosh.get_apps()\n if not raw_apps:\n logger.warning(\"No application instances found for %s.\", app_guid)\n return None\n for instance in raw_apps:\n if instance['app_guid'] != app_guid:\n continue\n if instance['state'] != 'RUNNING':\n continue\n diego_ip = instance['address']\n cont_ip = instance['instance_address']\n diego_id = 'diego_cell/' + instance['cell_id']\n app_ports = set() # ports the application is listening on within the container\n\n for ports in instance['ports']:\n diego_port = ports['host_port'] # node port on the diego-cell\n cont_port = ports['container_port'] # port the application is listening on in the container\n\n add_diego_port = diego_port not in cfg['host-port-whitelist']\n add_cont_port = cont_port not in cfg['container-port-whitelist']\n if add_diego_port and add_cont_port:\n app_ports.add((diego_port, cont_port))\n logger.debug('Found application at %s:%d with container port %d', diego_ip, diego_port, cont_port)\n\n diego_tls_port = ports.get('host_tls_proxy_port')\n cont_tls_port = ports.get('container_tls_proxy_port')\n\n add_diego_tls_port = diego_tls_port is not None and diego_tls_port not in cfg['host-port-whitelist']\n add_cont_tls_port = cont_tls_port is not None and cont_tls_port not in cfg['container-port-whitelist']\n if add_diego_tls_port and add_cont_tls_port:\n app_ports.add((diego_tls_port, cont_tls_port))\n logger.debug('Found application at %s:%d with tls container port %d', diego_ip, diego_tls_port, cont_tls_port)\n\n # Lookup the virtual network interface\n _, stdout, _ = monarch.pcf.util.run_cmd_on_diego_cell(diego_id, 'ip a')\n stdout = util.group_lines_by_hanging_indent(stdout)\n index = util.find_string_in_grouping(stdout, cont_ip.replace('.', r'\\.'))\n if not index:\n logger.warning(\"Could not find virtual interface!\")\n diego_vi = None\n else:\n diego_vi = stdout[index[0]][0] # want to get parent of the match\n match = re.match(r'\\d+: ([\\w-]+)(@[\\w-]+)?:', diego_vi)\n assert match # This should never fail, so the regex must be wrong!\n diego_vi = match[1]\n logger.debug(\"Hosting diego-cell Virtual Interface: %s\", diego_vi)\n\n # Lookup the Container ID\n cmd = \"sudo cat /var/vcap/sys/log/rep/rep.stdout.log | grep {} | tail -n 1\".format(cont_ip)\n rcode, stdout, _ = monarch.pcf.util.run_cmd_on_diego_cell(diego_id, cmd)\n if rcode:\n logger.error(\"Failed retrieving container GUID from %s.\", diego_id)\n cont_id = None\n else:\n cont_id = util.extract_json(stdout)[0]['data']['container-guid']\n logger.debug(\"Hosting container GUID: %s.\", cont_id)\n\n # Record the app instance information\n app_instance = AppInstance(\n diego_id=diego_id,\n diego_ip=diego_ip,\n cont_id=cont_id,\n cont_ip=cont_ip,\n app_ports=app_ports,\n diego_vi=diego_vi\n )\n instances.append(app_instance)\n logger.info(\"Found instance: %s\", app_instance)\n return instances", "title": "" }, { "docid": "f5dd3a4ab9233a9564edc0f934405b60", "score": "0.45688975", "text": "def loadInstances(self, toscaDef, tpl):\n node_templates = toscaDef[\"topology_template\"][\"node_templates\"]\n for name, impl in tpl.get(\"installers\", {}).items():\n if name not in node_templates:\n node_templates[name] = dict(type=self.InstallerType, properties=impl)\n else:\n raise UnfurlValidationError(\n 'can not add installer \"%s\", there is already a node template with that name'\n % name\n )\n\n for name, impl in tpl.get(\"instances\", {}).items():\n if name not in node_templates and impl is not None:\n node_templates[name] = self.loadInstance(impl.copy())\n\n if \"discovered\" in tpl:\n # node templates added dynamically by configurators\n self.discovered = tpl[\"discovered\"]\n for name, impl in tpl[\"discovered\"].items():\n if name not in node_templates:\n node_templates[name] = impl", "title": "" }, { "docid": "00da6ebbd47548897942f0951b89f0b0", "score": "0.45668766", "text": "def config_exists(cls, instance_name: str, environment: str = N_ENV,\n system_folders: bool = N_FOL) -> bool:\n return super().config_exists(\"node\", instance_name,\n environment=environment,\n system_folders=system_folders)", "title": "" }, { "docid": "a838e1ba4592c45719ef020ef9c58062", "score": "0.45578372", "text": "def test_get_cluster_external_ips(self):\n pass", "title": "" }, { "docid": "4d01d31aa6bf2d956f72d56ddae81595", "score": "0.45569932", "text": "def run_instances(self):\n # create an entry in the s3 log for the start of this task \n self.log_to_s3('run-instances-start.log', 'start')\n\n session = botocore.session.get_session()\n client = session.create_client('ec2', region_name=self.aws_region)\n\n # convert user-data to base64\n user_data = ''\n # NOTE conversion of file to string, then string to bytes, the bytes encoded \n # base64 - then decode the base64 bytes into base64 string\n with open(self.ec2_user_data, 'r') as f:\n user_data = base64.b64encode(bytes(f.read(), \"utf-8\")).decode(\"utf-8\")\n\n if self.ec2_type in (CONST.VALID_EC2_INSTANCE_TYPES_EBS_ONLY).split('|'):\n # block device mapping for ebs backed instances\n # creates an ephemeral EBS volume (delete on terminate)\n # Note that gp2 instance type is EBS SSD\n custom_block_device_mapping = [{\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0',\n 'Ebs':{\n 'VolumeSize': self.ec2_ebs_only_volume_size,\n 'VolumeType': self.ec2_ebs_only_volume_type,\n },\n }]\n else:\n # block device mapping allows for 2 extra drives\n # - works for either single ssd or 2 ssd's\n custom_block_device_mapping = [ \n {\n 'DeviceName': '/dev/sdb',\n 'VirtualName': 'ephemeral0'\n },\n {\n 'DeviceName': '/dev/sdc',\n 'VirtualName': 'ephemeral1'\n }\n ]\n\n r = client.request_spot_instances(\n InstanceCount=self.ec2_count,\n SpotPrice=self.ec2_spot_price,\n LaunchSpecification= {\n 'SecurityGroupIds': [\n self.ec2_security_group_id,\n ],\n 'SecurityGroups': [\n self.ec2_security_groups,\n ],\n 'Placement': {\n 'AvailabilityZone': self.ec2_availability_zone,\n },\n 'BlockDeviceMappings': custom_block_device_mapping,\n 'IamInstanceProfile': {\n 'Arn': self.ec2_arn_id,\n },\n 'UserData': user_data,\n 'ImageId': self.ec2_image_id,\n 'InstanceType': self.ec2_type,\n 'KeyName': self.ec2_security_key,\n },\n )\n\n # get the spot instance request ids\n spot_ids = []\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Spot request ids:'))\n for i, spot_inst in enumerate(r['SpotInstanceRequests']):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, \n inst_str + '\\t' + spot_inst['SpotInstanceRequestId']))\n spot_ids.append(spot_inst['SpotInstanceRequestId'])\n utility.list_to_file(CONST.SPOT_REQUEST_IDS, spot_ids)\n\n # create a list of spot instance statuses - so we can print out\n # some updates to the user\n spot_status = ['']*len(spot_ids)\n # Expecting status codes of \"pending-evaluation\", \"pending-fulfillment\", or \n # fulfilled. Any other status-code should be printed out & the program \n # terminated.\n expected_status = ['fulfilled', 'pending-evaluation', 'pending-fulfillment']\n instance_ids = [None]*len(spot_ids)\n\n # check the status of the spot requests\n while True:\n fulfilled = 0\n for i, id in enumerate(spot_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_spot_instance_requests(SpotInstanceRequestIds=[id])\n status_code = r['SpotInstanceRequests'][0]['Status']['Code']\n if status_code not in expected_status:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, 'Unexpected status for spot request ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': ') +\n colour_msg(Colour.PURPLE, status_code))\n sys.exit(1)\n if status_code != spot_status[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Spot instance request: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tStatus: ') +\n colour_msg(Colour.PURPLE, status_code))\n spot_status[i] = status_code\n if status_code == 'fulfilled':\n fulfilled += 1\n # record the instance id\n instance_ids[i] = r['SpotInstanceRequests'][0]['InstanceId']\n if fulfilled == len(spot_ids):\n break\n time.sleep(1)\n\n utility.list_to_file(CONST.INSTANCE_IDS, instance_ids)\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ids:'))\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n tag_val = self.ec2_instance_tag + str(i)\n client.create_tags(Resources=[id], Tags=[{'Key':'Name', 'Value':tag_val}])\n\n # monitor the instances until all running\n instance_states = ['']*len(instance_ids)\n expected_states = ['running', 'pending']\n instance_ips = [None]*len(instance_ids)\n running = 0\n while True:\n running = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instances(InstanceIds=[id])\n state = r['Reservations'][0]['Instances'][0]['State']['Name']\n if state not in expected_states:\n lgr.error(CONST.ERROR + \n colour_msg(Colour.CYAN, \n 'Unexpected instance state for instance-id ') +\n colour_msg(Colour.PURPLE, id) +\n colour_msg(Colour.CYAN, ': \\t') +\n colour_msg(Colour.PURPLE, state))\n sys.exit(1)\n if state != instance_states[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tState: ') +\n colour_msg(Colour.PURPLE, state))\n instance_states[i] = state\n if state == 'running':\n running += 1\n # record the instance id\n instance_ips[i] = r['Reservations'][0]['Instances'][0]['PublicDnsName']\n if running == len(instance_ids):\n break\n time.sleep(10)\n\n lgr.debug(CONST.DEBUG + colour_msg(Colour.CYAN, 'Instance Ips:'))\n for i, id in enumerate(instance_ips):\n inst_str = '[' + str(i) + ']'\n lgr.debug(CONST.DEBUG + colour_msg(Colour.PURPLE, inst_str + '\\t' + id))\n \n utility.list_to_file(CONST.INSTANCE_IPS_FILE, instance_ips)\n # need to at least wait until all the instances are reachable\n # possible statuses: (passed | failed | initializing | insufficient-data )\n reachability = ['']*len(instance_ids)\n while True:\n passed = 0\n for i, id in enumerate(instance_ids):\n inst_str = '[' + str(i) + ']'\n r = client.describe_instance_status(InstanceIds=[id])\n state = r['InstanceStatuses'][0]['InstanceStatus']['Details'][0]['Status']\n if state != reachability[i]:\n lgr.debug(CONST.DEBUG + \n colour_msg(Colour.CYAN, 'Instance id: ') +\n colour_msg(Colour.PURPLE, inst_str) +\n colour_msg(Colour.CYAN, '\\tReachability: ') +\n colour_msg(Colour.PURPLE, state))\n reachability[i] = state\n if state == 'passed':\n passed += 1\n if passed == len(instance_ids):\n break\n time.sleep(10)\n \n lgr.info(CONST.INFO + colour_msg(Colour.GREEN, 'Instances are reachable'))\n \n # if user-data configuration file supplied - check that it has worked\n # Note that this checker is run once on each instance\n if self.ec2_user_data:\n lgr.info(CONST.INFO + colour_msg(Colour.CYAN, \n 'Starting job to monitor user-data configuration...'))\n # at the moment is calling a local script that does the checking\n result = subprocess.call('./' + self.ec2_user_data_check) \n if result:\n lgr.error(CONST.ERROR + colour_msg(Colour.CYAN, \n 'user data checker FAILED'))\n sys.exit(1)\n\n # create an entry in the s3 log for finish this task \n self.log_to_s3('run-instances-finish.log', 'finish')\n\n # return the list of ip's for the newly created instances\n return utility.file_to_list(CONST.INSTANCE_IPS_FILE)", "title": "" }, { "docid": "600eef654d130f43ab429abcff5f5808", "score": "0.45511362", "text": "def isNodeHealthy(self):\n instances = self.coordinators + self.gtms + self.datanodes\n \n for inst in instances:\n if (not inst.isInstanceHealthy()):\n return False\n \n return True", "title": "" }, { "docid": "a7f46cf313288f133bb602b32c397c85", "score": "0.4545711", "text": "def _is_elastic_launched():\n required_env_vars = {'RANK', 'GROUP_RANK', 'LOCAL_RANK', 'LOCAL_WORLD_SIZE'}\n return required_env_vars.issubset(os.environ.keys())", "title": "" }, { "docid": "a23037d0568f764c354b56f56fbf5465", "score": "0.45423296", "text": "def check_for_running_hostengine_and_log_details(quiet):\n\n header = \"*************** List of nv-hostengine processes ***************\"\n ps_output = check_output([\"ps\", \"-ef\"])\n processes_list = ps_output.split(\"\\n\")\n process_ids = []\n for process in processes_list:\n if \"nv-hostengine\" in process:\n if header != None:\n if not quiet:\n logger.info(header)\n header = None\n if not quiet:\n logger.info(process)\n fields = process.split(' ')\n if len(fields) > 1 and fields[1]:\n process_ids.append(fields[1])\n \n if header is None:\n if not quiet:\n logger.info(\"*************** End list of nv-hostengine processes ***************\")\n elif not quiet: \n logger.info(\"No hostengine process found\")\n\n return process_ids", "title": "" }, { "docid": "6635e3b435dca1a2a8f1dab096d25a0d", "score": "0.45341045", "text": "def collect_load_balanced_instances():\r\n\r\n require('load_balancer')\r\n require('ec2_connection')\r\n require('elb_connection')\r\n instance_states = env.elb_connection.describe_instance_health(\r\n env.load_balancer)\r\n ids = []\r\n for instance in instance_states:\r\n print(\"Adding instance %s\" % instance.instance_id)\r\n ids.append(instance.instance_id)\r\n instances = None\r\n instance_fqdns = []\r\n if ids:\r\n instances = env.ec2_connection.get_all_instances(instance_ids=ids)\r\n for instance in instances:\r\n if (instance.instances[0].update() == 'running'\r\n and instance.instances[0].dns_name):\r\n instance_fqdns.append(\r\n '%s:%d' % (instance.instances[0].dns_name, env.ssh_port))\r\n print(\"Found instances %s behind load balancer\" % instance_fqdns)\r\n return instance_fqdns", "title": "" }, { "docid": "bbc365074a3206a77dda55d90359aca7", "score": "0.45306373", "text": "def run_instances(self, xml_bytes):\n root = XML(xml_bytes)\n # Get the security group information.\n groups = []\n for group_data in root.find(\"groupSet\"):\n group_id = group_data.findtext(\"groupId\")\n groups.append(group_id)\n # Create a reservation object with the parsed data.\n reservation = model.Reservation(\n reservation_id=root.findtext(\"reservationId\"),\n owner_id=root.findtext(\"ownerId\"),\n groups=groups)\n # Get the list of instances.\n instances = self.instances_set(root, reservation)\n return instances", "title": "" }, { "docid": "c77d043c5984f94cb5f4e3bc430cc034", "score": "0.45290348", "text": "def hyperv_check(ip):\r\n hostip = hostd[ip][\"hyperv\"]\r\n log_msg(INFO, \"Remote HyperV Check: \" + str(hostip))\r\n if hostip not in hvdict.keys():\r\n hvdict[hostip] = {}\r\n try:\r\n url = \"https://\" + hostip + \":5986/wsman\"\r\n global psClient\r\n psClient = Protocol(endpoint=url, transport=\"ntlm\", username=wdusername, password=wdpassword, server_cert_validation=\"ignore\")\r\n # Get the hostname\r\n cmd = \"hostname\"\r\n op = rpscmd(cmd)\r\n hostname = \"\"\r\n for line in op:\r\n hostname = line.strip()\r\n log_msg(INFO, \"Host Name: \" + str(hostname))\r\n hvdict[hostip][\"HostName\"] = {\"Status\": str(hostname), \"Result\": \"Check if the hostname is defined.\"}\r\n\r\n # 1) Failover cluster manager role is enabled\r\n cmd = \"Get-WindowsFeature Failover*\"\r\n op = rpscmd(cmd)\r\n flchk = \"\"\r\n for line in op:\r\n if \"Installed\" in line:\r\n flchk = \"Installed\"\r\n elif \"Not Installed\" in line:\r\n flchk = \"Not Installed\"\r\n hvdict[hostip][\"Cluster Failover\"] = {\"Status\": flchk,\r\n \"Result\": \"Check if the Failover Cluster Manager feature is installed.\"}\r\n\r\n # 2) Hyper-V Manager role / feature enabled\r\n cmd = \"Get-WindowsFeature Hyper-V\"\r\n op = rpscmd(cmd)\r\n fechk = \"\"\r\n for line in op:\r\n if \"Installed\" in line:\r\n fechk = \"Installed\"\r\n elif \"Not Installed\" in line:\r\n fechk = \"Not Installed\"\r\n hvdict[hostip][\"Hyper-V Role\"] = {\"Status\": fechk,\r\n \"Result\": \"Check if the Hyper-V Manager feature is installed.\"}\r\n\r\n # 3) Check Node State\r\n cmd = \"Get-ClusterNode\"\r\n op = rpscmd(cmd)\r\n ndstate = \"\"\r\n for line in op:\r\n if hostname in line:\r\n if \"up\" in line.lower():\r\n ndstate = \"PASS\"\r\n elif \"down\" in line.lower():\r\n ndstate = \"Fail\"\r\n if ndstate == \"PASS\":\r\n hvdict[hostip][\"Node State\"] = {\"Status\": \"PASS\", \"Result\": \"Check the Node State.\"}\r\n else:\r\n hvdict[hostip][\"Node State\"] = {\"Status\": ndstate,\r\n \"Result\": \"Please check the Cluster Failover status.\"}\r\n\r\n # 4) Check network interfaces state\r\n cmd = \"Get-ClusterNetwork\"\r\n op = rpscmd(cmd)\r\n nwstate = \"PASS\"\r\n for line in op:\r\n if \"down\" in line.lower():\r\n nwstate = \"FAIL\"\r\n break\r\n hvdict[hostip][\"Network Interfaces State\"] = {\"Status\": nwstate,\r\n \"Result\": \"Check the Network Interfaces State.\"}\r\n\r\n # 5) Check Remote Management is enabled\r\n cmd = \"Get-Service WinRM\"\r\n op = rpscmd(cmd)\r\n rmstate = \"\"\r\n for line in op:\r\n if \"running\" in line.lower() and \"winrm\" in line.lower():\r\n rmstate = \"PASS\"\r\n break\r\n hvdict[hostip][\"Remote Management Enabled\"] = {\"Status\": nwstate,\r\n \"Result\": \"Check if the Remote Management is enabled on the node.\"}\r\n\r\n # 6) Check the Domain and forest details\r\n cmd = \"\"\"Get-WmiObject Win32_NTDomain -Filter \\\"DnsForestName = '$( (Get-WmiObject Win32_ComputerSystem).Domain)'\\\"\"\"\"\r\n op = rpscmd(cmd)\r\n fdetails = []\r\n for line in op:\r\n if \"Description\" in line:\r\n fdetails.append(line.strip())\r\n elif \"DnsForestName\" in line:\r\n fdetails.append(line.strip())\r\n elif \"DomainControllerAddress\" in line:\r\n fdetails.append(line.strip())\r\n elif \"DomainControllerName\" in line:\r\n fdetails.append(line.strip())\r\n elif \"DomainName\" in line:\r\n fdetails.append(line.strip())\r\n elif \"Status\" in line:\r\n fdetails.append(line.strip())\r\n hvdict[hostip][\"Check the Domain and forest details\"] = {\"Status\": \"\\n\".join(fdetails),\r\n \"Result\": \"Check the Domain and forest details of the cluster.\"}\r\n\r\n # 7) Check host file entries\r\n cmd = \"Get-Content $env:SystemRoot\\System32\\Drivers\\etc\\hosts\"\r\n op = rpscmd(cmd)\r\n hdetails = []\r\n for line in op:\r\n if not line.startswith(\"#\"):\r\n hdetails.append(line.strip())\r\n hvdict[hostip][\"Check host file entries\"] = {\"Status\": \"\\n\".join(hdetails),\r\n \"Result\": \"Check if the host file have correct entries.\"}\r\n\r\n # 8) Check Adapter details\r\n cmd = \"Get-NetIPConfiguration | Format-Table InterfaceAlias, InterfaceDescription, IPv4Address -auto\"\r\n adetails = rpscmd(cmd)\r\n hvdict[hostip][\"Check Adapter details\"] = {\"Status\": \"\\n\".join(adetails),\r\n \"Result\": \"Check Adapter details of the node.\"}\r\n\r\n # 9) Check MTU for Storage Data Network\r\n cmd = \"Get-NetIPInterface -AddressFamily IPv4 -InterfaceAlias vswitch-hx-storage-data | select NlMtu*\"\r\n op = rpscmd(cmd)\r\n mtu = \"\"\r\n for line in op:\r\n if (line.strip()).isdigit():\r\n mtu = line.strip()\r\n hvdict[hostip][\"MTU for Storage Data Network\"] = {\"Status\": str(mtu),\r\n \"Result\": \"Check MTU for the Storage Data Network.\"}\r\n\r\n # 10) Check the status minifilter driver\r\n cmd = \"fltmc\"\r\n drivertails = rpscmd(cmd)\r\n hvdict[hostip][\"Drivers test\"] = {\"Status\": \"\\n\".join(drivertails),\r\n \"Result\": \"Check the status of minifilter drivers.\"}\r\n\r\n # 11) Virtual Machine Management service check\r\n vmmCheck = \"\"\r\n con = 0\r\n cmd = \"Get-Process vmms | Format-list Name, Id, Responding\"\r\n op = rpscmd(cmd)\r\n for line in op:\r\n if \"Id\" in line:\r\n m = re.search(r\":\\s(\\d+)\", line)\r\n if m:\r\n con = 1\r\n elif \"Responding\" in line:\r\n if \"True\" in line and con:\r\n vmmCheck = \"PASS\"\r\n else:\r\n vmmCheck = \"FAIL\"\r\n if vmmCheck == \"FAIL\":\r\n hvdict[hostip][\"Virtual Machine Management service check\"] = {\"Status\": \"FAIL\",\r\n \"Result\": \"Please manually verify the status of VMMS service.\"}\r\n else:\r\n hvdict[hostip][\"Virtual Machine Management service check\"] = {\"Status\": vmmCheck,\r\n \"Result\": \"Checking if VMMS service is Up and Running.\"}\r\n\r\n # 12) SMB Test\r\n smbtest = []\r\n smbfqdn = \"\"\r\n smbResult = []\r\n if hdetails:\r\n for line in hdetails:\r\n l = line.split()\r\n if len(l) == 2:\r\n smbfqdn = l[1].strip()\r\n break\r\n if smbfqdn:\r\n try:\r\n psClient = Protocol(endpoint=url, transport=\"ntlm\", username=wdusername, password=wdpassword, server_cert_validation=\"ignore\")\r\n for ds in datastorelist:\r\n cmd = r\"test-path \\\\{}\\{}\".format(smbfqdn, ds)\r\n op = rpscmd(cmd)\r\n if op:\r\n smbResult.append(str(op[0]))\r\n rs = cmd + \" \" + str(op[0])\r\n smbtest.append(rs)\r\n except Exception as eps:\r\n log_msg(ERROR, str(eps))\r\n log_msg(INFO, \"SMB Test:\" + str(smbResult))\r\n if smbResult:\r\n if \"False\" in smbResult:\r\n hvdict[hostip][\"SMB Test\"] = {\"Status\": \"FAIL\", \"Result\": \"\\n\".join(smbtest)}\r\n else:\r\n hvdict[hostip][\"SMB Test\"] = {\"Status\": \"PASS\", \"Result\": \"Checking SMB reachability of node.\"}\r\n else:\r\n hvdict[hostip][\"SMB Test\"] = {\"Status\": \"\", \"Result\": \"Checking SMB reachability of node.\"}\r\n\r\n log_msg(INFO, \"Remote HyperV Check Complete:\" + str(hostip))\r\n\r\n except Exception as er:\r\n log_msg(INFO, \"Not able to connect remote Hyper-V host: \" + str(hostip))\r\n log_msg(INFO, \"\\r\\nInvalid Hyper-V password\\r\")\r\n log_msg(ERROR, str(er))", "title": "" }, { "docid": "76d0d569c2a0cedf3c34ad6eabed8ea6", "score": "0.45275927", "text": "def get_hosts_info(self):\n if self.debug:\n print(\"osconnector: Getting Host info from VIM\")\n try:\n h_list = []\n self._reload_connection()\n hypervisors = self.nova.hypervisors.list()\n for hype in hypervisors:\n h_list.append(hype.to_dict())\n return 1, {\"hosts\": h_list}\n except nvExceptions.NotFound as e:\n error_value = -vimconn.HTTP_Not_Found\n error_text = (str(e) if len(e.args) == 0 else str(e.args[0]))\n except (ksExceptions.ClientException, nvExceptions.ClientException) as e:\n error_value = -vimconn.HTTP_Bad_Request\n error_text = type(e).__name__ + \": \" + (str(e) if len(e.args) == 0 else str(e.args[0]))\n # TODO insert exception vimconn.HTTP_Unauthorized\n # if reaching here is because an exception\n self.logger.debug(\"get_hosts_info \" + error_text)\n return error_value, error_text", "title": "" }, { "docid": "77f45fe30ef1f04273a892d8d1131d42", "score": "0.45270738", "text": "def list(ctx):\r\n config = ctx.obj['config']\r\n config.validate()\r\n host = config.get_active_host()\r\n instances = host.get_instances()\r\n logger.info(\"Instances on: %s\", host.name)\r\n outputters.table([x.dump() for x in instances])", "title": "" }, { "docid": "106a63df282e027493dce2d922c64487", "score": "0.45203173", "text": "def is_nfs_have_host_with_host_obj(nfs_details):\n host_obj_params = ('no_access_hosts', 'read_only_hosts',\n 'read_only_root_access_hosts', 'read_write_hosts',\n 'root_access_hosts')\n for host_obj_param in host_obj_params:\n if nfs_details.get(host_obj_param):\n return True\n return False", "title": "" }, { "docid": "d2d49cf78924f1e935af761b543dfa8b", "score": "0.45201692", "text": "def _load_instance_conf_envs(self, cname):\n if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:\n return dict()\n try:\n path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)\n path = os.path.join(path, \"{}.env.yml\".format(cname))\n res = load_yaml(path)\n LOG.info(\"Loaded instance-specific env file for '{}': {}\"\n .format(cname, res))\n return res\n except BaseException as ex:\n LOG.info(\"No instance-specific env file found for: {}\"\n .format(cname))\n del ex\n return dict()", "title": "" }, { "docid": "375905844a4ba76f38eada938e9f41fc", "score": "0.45145303", "text": "def build_hostlist():\n\n # by default, we just assume that we are running\n # on one node (i.e. the one we are running on)\n nodes = ['localhost'] # [socket.gethostname()]\n\n slurm_nodelist = os.environ.get(\"SLURM_NODELIST\", None)\n if slurm_nodelist is not None:\n o, e, dt = calcit.process.execute(\"scontrol show hostname\")\n # in python3 we might get a series of bytes and not\n # a regular python string from the above, thus\n # we need to decode it\n nodes = [n.decode('utf-8') for n in set(o.split())]\n\n return nodes", "title": "" }, { "docid": "59c97dbcd1b6830d659e8a2b89af341b", "score": "0.45110297", "text": "def check_predefined_conditions():\n try:\n node_info = json.loads(\n current_k8s_corev1_api_client.list_node(\n _preload_content=False\n ).data.decode()\n )\n for node in node_info[\"items\"]:\n # check based on the predefined conditions about the\n # node status: MemoryPressure, OutOfDisk, KubeletReady\n # DiskPressure, PIDPressure,\n for condition in node.get(\"status\", {}).get(\"conditions\", {}):\n if not condition.get(\"status\"):\n return False\n except ApiException as e:\n log.error(\"Something went wrong while getting node information.\")\n log.error(e)\n return False\n return True", "title": "" }, { "docid": "0857bc3d2c768e5a3120e3d28489f716", "score": "0.44985753", "text": "def get_machine_proc_stats(hostname, filepath):\n cat_output = ssh_machine_proc_stats(hostname, filepath).split('\\n')\n if 'disk' in filepath:\n parsed_stats = parse_disk(cat_output)\n else:\n parsed_stats = parse_net(cat_output)\n\n return parsed_stats", "title": "" }, { "docid": "2b87aa9ae1044a4a3fc74e33a9ecbe50", "score": "0.44941208", "text": "def _check_conf_file(self):\n root = self.parse_xml_file(self.xml_file_path)\n check_list = ['Storage/ControllerIP0', 'Storage/ControllerIP1',\n 'Storage/UserName', 'Storage/UserPassword']\n for item in check_list:\n if not self.is_xml_item_exist(root, item):\n err_msg = (_('_check_conf_file: Config file invalid. '\n '%s must be set.') % item)\n LOG.error(err_msg)\n raise exception.InvalidInput(reason=err_msg)\n\n # Make sure storage pool is set.\n if not self.is_xml_item_exist(root, 'LUN/StoragePool', 'Name'):\n err_msg = _('_check_conf_file: Config file invalid. '\n 'StoragePool must be set.')\n LOG.error(err_msg)\n raise exception.InvalidInput(reason=err_msg)\n\n # If setting os type, make sure it valid.\n if self.is_xml_item_exist(root, 'Host', 'OSType'):\n os_list = constants.OS_TYPE.keys()\n if not self.is_xml_item_valid(root, 'Host', os_list, 'OSType'):\n err_msg = (_('_check_conf_file: Config file invalid. '\n 'Host OSType is invalid.\\n'\n 'The valid values are: %(os_list)s')\n % {'os_list': os_list})\n LOG.error(err_msg)\n raise exception.InvalidInput(reason=err_msg)", "title": "" }, { "docid": "81e624e1b9f312f82752cc88d751eab0", "score": "0.44932422", "text": "def describe_instances(self, xml_bytes):\n root = XML(xml_bytes)\n results = []\n # May be a more elegant way to do this:\n for reservation_data in root.find(\"reservationSet\"):\n # Create a reservation object with the parsed data.\n reservation = model.Reservation(\n reservation_id=reservation_data.findtext(\"reservationId\"),\n owner_id=reservation_data.findtext(\"ownerId\"))\n # Get the list of instances.\n instances = self.instances_set(\n reservation_data, reservation)\n results.extend(instances)\n return results", "title": "" }, { "docid": "ddb194272151e6be506bd163b0502a41", "score": "0.44923005", "text": "def create_node(self, **kwargs):\r\n image = kwargs[\"image\"]\r\n size = kwargs[\"size\"]\r\n params = {\r\n 'Action': 'RunInstances',\r\n 'ImageId': image.id,\r\n 'MinCount': str(kwargs.get('ex_mincount', '1')),\r\n 'MaxCount': str(kwargs.get('ex_maxcount', '1')),\r\n 'InstanceType': size.id\r\n }\r\n\r\n if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs:\r\n raise ValueError('You can only supply ex_security_groups or'\r\n ' ex_securitygroup')\r\n\r\n # ex_securitygroup is here for backward compatibility\r\n ex_security_groups = kwargs.get('ex_security_groups', None)\r\n ex_securitygroup = kwargs.get('ex_securitygroup', None)\r\n security_groups = ex_security_groups or ex_securitygroup\r\n\r\n if security_groups:\r\n if not isinstance(security_groups, (tuple, list)):\r\n security_groups = [security_groups]\r\n\r\n for sig in range(len(security_groups)):\r\n params['SecurityGroup.%d' % (sig + 1,)] =\\\r\n security_groups[sig]\r\n\r\n if 'location' in kwargs:\r\n availability_zone = getattr(kwargs['location'],\r\n 'availability_zone', None)\r\n if availability_zone:\r\n if availability_zone.region_name != self.region_name:\r\n raise AttributeError('Invalid availability zone: %s'\r\n % (availability_zone.name))\r\n params['Placement.AvailabilityZone'] = availability_zone.name\r\n\r\n if 'auth' in kwargs and 'ex_keyname' in kwargs:\r\n raise AttributeError('Cannot specify auth and ex_keyname together')\r\n\r\n if 'auth' in kwargs:\r\n auth = self._get_and_check_auth(kwargs['auth'])\r\n key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey)\r\n params['KeyName'] = key['keyName']\r\n\r\n if 'ex_keyname' in kwargs:\r\n params['KeyName'] = kwargs['ex_keyname']\r\n\r\n if 'ex_userdata' in kwargs:\r\n params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\\\r\n .decode('utf-8')\r\n\r\n if 'ex_clienttoken' in kwargs:\r\n params['ClientToken'] = kwargs['ex_clienttoken']\r\n\r\n if 'ex_blockdevicemappings' in kwargs:\r\n params.update(self._get_block_device_mapping_params(\r\n kwargs['ex_blockdevicemappings']))\r\n\r\n if 'ex_iamprofile' in kwargs:\r\n if not isinstance(kwargs['ex_iamprofile'], basestring):\r\n raise AttributeError('ex_iamprofile not string')\r\n\r\n if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'):\r\n params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile']\r\n else:\r\n params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile']\r\n\r\n if 'ex_ebs_optimized' in kwargs:\r\n params['EbsOptimized'] = kwargs['ex_ebs_optimized']\r\n\r\n object = self.connection.request(self.path, params=params).object\r\n nodes = self._to_nodes(object, 'instancesSet/item')\r\n\r\n for node in nodes:\r\n tags = {'Name': kwargs['name']}\r\n if 'ex_metadata' in kwargs:\r\n tags.update(kwargs['ex_metadata'])\r\n\r\n try:\r\n self.ex_create_tags(resource=node, tags=tags)\r\n except Exception:\r\n continue\r\n\r\n node.name = kwargs['name']\r\n node.extra.update({'tags': tags})\r\n\r\n if len(nodes) == 1:\r\n return nodes[0]\r\n else:\r\n return nodes", "title": "" }, { "docid": "0c9710e1a60ea981ad302e3cce8e213a", "score": "0.44854483", "text": "def _ReadInstances(input_file=None, data_format=None):\n instances = []\n line_num = 0\n\n for line_num, line in enumerate(input_file):\n line_content = line.rstrip('\\n')\n if not line_content:\n raise InvalidInstancesFileError('Empty line is not allowed in the '\n 'instances file.')\n if line_num > 100:\n raise InvalidInstancesFileError(\n 'Online prediction can process no more than 100 '\n 'instances per file. Please use batch prediction instead.')\n if data_format == 'json':\n try:\n instances.append(json.loads(line_content))\n except ValueError:\n raise InvalidInstancesFileError(\n 'Input instances are not in JSON format. '\n 'See \"gcloud beta ml predict --help\" for details.')\n elif data_format == 'text':\n instances.append(line_content)\n\n if not instances:\n raise InvalidInstancesFileError('No valid instance was found.')\n\n return instances", "title": "" }, { "docid": "c186b2c44d01e1df2630c863aeeae701", "score": "0.44849432", "text": "def get_all_instances():\n resources = query_openvz(False, \"ctid,hostname,status\")\n candidates = {}\n for r in resources:\n candidates[r[0]] = {\"vm_id\": r[0],\n \"name\": r[1],\n \"status\": r[2],\n \"memory\": get_memory(r[0]) / 1024,\n \"disk\": get_diskspace(r[0]) / 1024,\n \"vcpu\": get_vcpu(r[0]),\n \"vm_type\": \"openvz\"\n }\n return candidates", "title": "" }, { "docid": "0a18d081290e57989da95a77a52c0c34", "score": "0.44831207", "text": "def auto_check_avg_cpu_utilization():\n\n with app.app_context():\n autoScalingConfig = AutoScalingConfig.query.first()\n print(\"auto config: \" + str(autoScalingConfig))\n if not autoScalingConfig:\n return\n\n if autoScalingConfig.isOn and not has_pending_instances():\n print(\"auto scaling on\")\n # only getting the instances that are serving the app\n _, num_workers = get_serving_instances()\n _, num_running_instances = get_running_instances()\n\n if num_workers != num_running_instances:\n return\n print('all the created instances in service now!')\n _, num_non_terminated_instances = get_non_terminated_instances()\n # avg util > expand_threshold\n all_has_cpu_util, avg_cpu_util = all_instance_has_valid_cpu_util()\n if not all_has_cpu_util:\n print('newly created worker has no cpu util yet, wait!')\n return\n if avg_cpu_util > autoScalingConfig.expand_threshold:\n if num_non_terminated_instances >= 8:\n print('number of instances created reaches limit !')\n return\n to_create = int(\n math.ceil((autoScalingConfig.expand_ratio - 1) * num_workers))\n if to_create + num_non_terminated_instances >= 8:\n to_create = max(8 - num_non_terminated_instances, 0)\n print(\"max number of workers reached! only creating {} additional workers\".format(\n to_create))\n print(\"CPU expand threshold: {} reached ---- creating {} new instances --- expand ratio: {}\".format(\n autoScalingConfig.expand_threshold, to_create, autoScalingConfig.expand_ratio))\n for i in range(to_create):\n celery_create_worker()\n\n elif avg_cpu_util < autoScalingConfig.shrink_threshold:\n to_destroy = int(autoScalingConfig.shrink_ratio * num_workers)\n if to_destroy > 0:\n print(\"CPU shrink threshold: {} reached ---- destorying {} instances --- shrink ratio: {}\".format(\n autoScalingConfig.shrink_threshold, to_destroy, autoScalingConfig.shrink_ratio))\n random_destroy_worker(to_destroy)\n else:\n print(\"CPU utilization within range\")\n\n elif has_pending_instances():\n print('there are pending instances')\n else:\n print('auto config is off')", "title": "" }, { "docid": "9387d331fdbc78295030da41b3226a4f", "score": "0.448063", "text": "def check_min_healthy_instances(min_healthy):\n healthy = ec2_utils.GetELBInstancesByHealth(env.region, node_types=[env.nodetype])\n num_healthy = len(healthy['InService'])\n assert num_healthy >= min_healthy, 'Not enough backends with healthy ELB status (%d vs %d)' % \\\n (num_healthy, min_healthy)", "title": "" }, { "docid": "869fd127e0285ecd37f61e2a5d72c872", "score": "0.4476563", "text": "def _get_instances_info(self, instance_ids: list):\n instances = []\n partial_instance_ids = instance_ids\n\n retry = 4\n # Wait for instances to be available in EC2\n time.sleep(0.1)\n while retry > 0 and partial_instance_ids:\n complete_instances, partial_instance_ids = self._retrieve_instances_info_from_ec2(partial_instance_ids)\n instances.extend(complete_instances)\n retry = retry - 1\n if retry > 0:\n time.sleep(0.3)\n\n return instances, partial_instance_ids", "title": "" }, { "docid": "72e247422e6056355d1d2a4b5135d5c5", "score": "0.44728583", "text": "def hostname_params(self):\n HOSTNAME = {}\n if self.GENERAL_SNAP:\n hostname_params = self.__snap_stanza_read(self.GENERAL_SNAP, 'lsattr -El inet0')\n if hostname_params:\n for record in hostname_params:\n if 'hostname' in record:\n HOSTNAME.update({'hostname' : record.split()[1]})\n else:\n return None\n return HOSTNAME", "title": "" }, { "docid": "5ccd5b4b39d4f7e81ce6a30b4bdec1af", "score": "0.4462372", "text": "def test_list_instances(self):\n # Run test to list available instances\n instances = self.RSD.list_instances()\n\n # Confirm the result matches the internal list\n self.assertEqual(instances, {self.inst1.uuid: self.inst1})", "title": "" }, { "docid": "8c59b10a8ac083e693ac0c6a85448dba", "score": "0.4443987", "text": "def _resume(arg_nodes, resume_config, slurm_resume):\n # Check heartbeat\n current_time = datetime.now(tz=timezone.utc)\n if not is_clustermgtd_heartbeat_valid(\n current_time, resume_config.clustermgtd_timeout, resume_config.clustermgtd_heartbeat_file_path\n ):\n log.error(\n \"No valid clustermgtd heartbeat detected, clustermgtd is down!\\n\"\n \"Please check clustermgtd log for error.\\n\"\n \"Not launching nodes %s\",\n arg_nodes,\n )\n _handle_failed_nodes(arg_nodes)\n return\n log.info(\"Launching EC2 instances for the following Slurm nodes: %s\", arg_nodes)\n node_list = []\n node_list_with_status = []\n for node in get_nodes_info(arg_nodes):\n node_list.append(node.name)\n node_list_with_status.append((node.name, node.state_string))\n log.info(\"Current state of Slurm nodes to resume: %s\", node_list_with_status)\n\n instance_manager = InstanceManagerFactory.get_manager(\n region=resume_config.region,\n cluster_name=resume_config.cluster_name,\n boto3_config=resume_config.boto3_config,\n table_name=resume_config.dynamodb_table,\n hosted_zone=resume_config.hosted_zone,\n dns_domain=resume_config.dns_domain,\n use_private_hostname=resume_config.use_private_hostname,\n head_node_private_ip=resume_config.head_node_private_ip,\n head_node_hostname=resume_config.head_node_hostname,\n fleet_config=resume_config.fleet_config,\n run_instances_overrides=resume_config.run_instances_overrides,\n create_fleet_overrides=resume_config.create_fleet_overrides,\n job_level_scaling=resume_config.job_level_scaling,\n )\n instance_manager.add_instances(\n slurm_resume=slurm_resume,\n node_list=node_list,\n launch_batch_size=resume_config.launch_max_batch_size,\n assign_node_batch_size=resume_config.assign_node_max_batch_size,\n terminate_batch_size=resume_config.terminate_max_batch_size,\n update_node_address=resume_config.update_node_address,\n all_or_nothing_batch=resume_config.all_or_nothing_batch,\n )\n failed_nodes = set().union(*instance_manager.failed_nodes.values())\n success_nodes = [node for node in node_list if node not in failed_nodes]\n log.info(\"Successfully launched nodes %s\", print_with_count(success_nodes))\n\n if failed_nodes:\n log.error(\n \"Failed to launch following nodes, setting nodes to down: %s\",\n print_with_count(failed_nodes),\n )\n for error_code, node_list in instance_manager.failed_nodes.items():\n _handle_failed_nodes(node_list, reason=f\"(Code:{error_code})Failure when resuming nodes\")\n\n event_publisher = ClusterEventPublisher.create_with_default_publisher(\n event_logger,\n resume_config.cluster_name,\n \"HeadNode\",\n \"slurm-resume\",\n resume_config.head_node_instance_id,\n )\n event_publisher.publish_node_launch_events(instance_manager.failed_nodes)", "title": "" }, { "docid": "77029afa465074d5e8f18ac7f3ce7d0c", "score": "0.4432957", "text": "def validateHiveInteractiveSiteConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):\n validationItems = []\n hsi_hosts = self.getHostsForComponent(services, \"HIVE\", \"HIVE_SERVER_INTERACTIVE\")\n llap_queue_name = None\n llap_queue_cap_perc = None\n MIN_ASSUMED_CAP_REQUIRED_FOR_SERVICE_CHECKS = 512\n llap_queue_cap = None\n hsi_site = self.getServicesSiteProperties(services, self.HIVE_INTERACTIVE_SITE)\n\n if len(hsi_hosts) == 0:\n return []\n\n # Get total cluster capacity\n node_manager_host_list = self.getHostsForComponent(services, \"YARN\", \"NODEMANAGER\")\n node_manager_cnt = len(node_manager_host_list)\n yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)\n total_cluster_cap = node_manager_cnt * yarn_nm_mem_in_mb\n capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)\n\n if not capacity_scheduler_properties:\n self.logger.warning(\"Couldn't retrieve 'capacity-scheduler' properties while doing validation checks for Hive Server Interactive.\")\n return []\n\n if hsi_site:\n if \"hive.llap.daemon.queue.name\" in hsi_site and hsi_site['hive.llap.daemon.queue.name']:\n llap_queue_name = hsi_site['hive.llap.daemon.queue.name']\n llap_queue_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties, llap_queue_name, total_cluster_cap)\n\n if llap_queue_cap:\n llap_queue_cap_perc = float(llap_queue_cap * 100 / total_cluster_cap)\n min_reqd_queue_cap_perc = self.min_queue_perc_reqd_for_llap_and_hive_app(services, hosts, configurations)\n\n # Validate that the selected queue in 'hive.llap.daemon.queue.name' should be sized >= to minimum required\n # to run LLAP and Hive2 app.\n if llap_queue_cap_perc < min_reqd_queue_cap_perc:\n errMsg1 = \"Selected queue '{0}' capacity ({1}%) is less than minimum required capacity ({2}%) for LLAP \" \\\n \"app to run\".format(llap_queue_name, llap_queue_cap_perc, min_reqd_queue_cap_perc)\n validationItems.append({\"config-name\": \"hive.llap.daemon.queue.name\", \"item\": self.getErrorItem(errMsg1)})\n else:\n self.logger.error(\"Couldn't retrieve '{0}' queue's capacity from 'capacity-scheduler' while doing validation checks for \"\n \"Hive Server Interactive.\".format(llap_queue_name))\n\n # Validate that current selected queue in 'hive.llap.daemon.queue.name' state is not STOPPED.\n llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, llap_queue_name)\n if llap_selected_queue_state:\n if llap_selected_queue_state == \"STOPPED\":\n errMsg2 = \"Selected queue '{0}' current state is : '{1}'. It is required to be in 'RUNNING' state for LLAP to run\"\\\n .format(llap_queue_name, llap_selected_queue_state)\n validationItems.append({\"config-name\": \"hive.llap.daemon.queue.name\",\"item\": self.getErrorItem(errMsg2)})\n else:\n self.logger.error(\"Couldn't retrieve '{0}' queue's state from 'capacity-scheduler' while doing validation checks for \"\n \"Hive Server Interactive.\".format(llap_queue_name))\n else:\n self.logger.error(\"Couldn't retrieve 'hive.llap.daemon.queue.name' config from 'hive-interactive-site' while doing \"\n \"validation checks for Hive Server Interactive.\")\n\n # Validate that 'hive.server2.enable.doAs' config is not set to 'true' for Hive2.\n if 'hive.server2.enable.doAs' in hsi_site and hsi_site['hive.server2.enable.doAs'] == \"true\":\n validationItems.append({\"config-name\": \"hive.server2.enable.doAs\", \"item\": self.getErrorItem(\"Value should be set to 'false' for Hive2.\")})\n\n # Validate that 'Maximum Total Concurrent Queries'(hive.server2.tez.sessions.per.default.queue) is not consuming more that\n # 50% of selected queue for LLAP.\n if llap_queue_cap and 'hive.server2.tez.sessions.per.default.queue' in hsi_site:\n num_tez_sessions = hsi_site['hive.server2.tez.sessions.per.default.queue']\n if num_tez_sessions:\n num_tez_sessions = long(num_tez_sessions)\n yarn_min_container_size = long(self.get_yarn_min_container_size(services, configurations))\n tez_am_container_size = self.calculate_tez_am_container_size(services, long(total_cluster_cap))\n normalized_tez_am_container_size = self._normalizeUp(tez_am_container_size, yarn_min_container_size)\n llap_selected_queue_cap_remaining = llap_queue_cap - (normalized_tez_am_container_size * num_tez_sessions)\n if llap_selected_queue_cap_remaining <= llap_queue_cap/2:\n errMsg3 = \" Reducing the 'Maximum Total Concurrent Queries' (value: {0}) is advisable as it is consuming more than 50% of \" \\\n \"'{1}' queue for LLAP.\".format(num_tez_sessions, llap_queue_name)\n validationItems.append({\"config-name\": \"hive.server2.tez.sessions.per.default.queue\",\"item\": self.getWarnItem(errMsg3)})\n\n # Validate that 'remaining available capacity' in cluster is at least 512 MB, after 'llap' queue is selected,\n # in order to run Service Checks.\n if llap_queue_name and llap_queue_cap_perc and llap_queue_name == self.AMBARI_MANAGED_LLAP_QUEUE_NAME:\n curr_selected_queue_for_llap_cap = float(llap_queue_cap_perc) / 100 * total_cluster_cap\n available_cap_in_cluster = total_cluster_cap - curr_selected_queue_for_llap_cap\n if available_cap_in_cluster < MIN_ASSUMED_CAP_REQUIRED_FOR_SERVICE_CHECKS:\n errMsg4 = \"Capacity used by '{0}' queue is '{1}'. Service checks may not run as remaining available capacity \" \\\n \"({2}) in cluster is less than 512 MB.\".format(self.AMBARI_MANAGED_LLAP_QUEUE_NAME, curr_selected_queue_for_llap_cap, available_cap_in_cluster)\n validationItems.append({\"config-name\": \"hive.llap.daemon.queue.name\",\"item\": self.getWarnItem(errMsg4)})\n\n validationProblems = self.toConfigurationValidationProblems(validationItems, \"hive-interactive-site\")\n return validationProblems", "title": "" }, { "docid": "c6646c67b086b1ea605cd89f873f2b02", "score": "0.4426133", "text": "def load_instances():\n # TODO: get this properly\n # pylint: disable=line-too-long\n instance_data = [\n {\n \"name\": \"bookwyrm.social\",\n \"path\": \"https://bookwyrm.social/\",\n \"logo\": \"https://bookwyrm-social.sfo3.digitaloceanspaces.com/static/images/logo.png\",\n \"contact_name\": \"@tripofmice@friend.camp\",\n \"contact_link\": \"https://friend.camp/@tripofmice\",\n \"description\": \"Flagship instance, general purpose\",\n },\n {\n \"name\": \"wyrms.de\",\n \"path\": \"https://wyrms.de/\",\n \"logo\": \"https://wyrms.de/images/logos/wyrm_bright_300.png\",\n \"contact_name\": \"@tofuwabohu@subversive.zone\",\n \"contact_link\": \"https://subversive.zone/@tofuwabohu\",\n \"description\": \"The Dispossessed (Le Guin) and everything else\",\n },\n {\n \"name\": \"cutebook.club\",\n \"path\": \"https://cutebook.club/\",\n \"logo\": \"https://cutebook.club/images/logos/logo.png\",\n \"contact_name\": \"@allie@tech.lgbt\",\n \"contact_link\": \"https://tech.lgbt/@allie\",\n \"description\": \"General purpose\",\n },\n {\n \"name\": \"在我书目/Dans Mon Catalogue\",\n \"path\": \"https://book.dansmonorage.blue/\",\n \"logo\": \"https://book.dansmonorage.blue/images/logos/BC12B463-A984-4E92-8A30-BC2E9280A331_1.jpg\",\n \"contact_name\": \"@faketaoist@mstd.dansmonorage.blue\",\n \"contact_link\": \"https://mstd.dansmonorage.blue/@faketaoist\",\n \"description\": \"General purpose\",\n },\n {\n \"name\": \"Y Not Read\",\n \"path\": \"https://yyyyy.club/\",\n \"logo\": \"https://mastomedia.fra1.digitaloceanspaces.com/static/images/logo.png\",\n \"contact_name\": \"yyyyyadmin@protonmail.com\",\n \"contact_link\": \"mailto:yyyyyadmin@protonmail.com\",\n \"description\": \"General purpose\",\n }\n ]\n print(\" Fetching instance statistics:\")\n for instance in instance_data:\n print(\" - Fetching: %s\" % instance[\"name\"])\n try:\n response = requests.get(\"{:s}nodeinfo/2.0\".format(instance[\"path\"]),\n timeout=15)\n data = response.json()\n instance[\"users\"] = data[\"usage\"][\"users\"][\"activeMonth\"]\n instance[\"open_registration\"] = data[\"openRegistrations\"]\n except Exception as e: # pylint: disable=broad-except\n print(\" ! %s\" % str(e))\n print(\" - Site could possibly be down. Please check it manually:\")\n print(\" - Site url: %s\" % instance[\"path\"])\n instance[\"skip\"] = True\n return instance_data", "title": "" }, { "docid": "59db06899e493f83521ef50204f38dd0", "score": "0.44254866", "text": "def _parseXML(self):\n try:\n self.doc = wps.CreateFromDocument(file(self.__xmlFileName).read())\n\t \n if len(self.doc.ProcessDescription) > 1:\n raise IOError(\"Only one Process is supported\")\n \n for process in self.doc.ProcessDescription:\n\t proc = {} \n\n\t proc['processVersion'] = str(process.processVersion)\n\t proc['storeSupported'] = bool(process.storeSupported)\n\t proc['statusSupported'] = bool(process.statusSupported)\n\t\tproc[\"Identifier\"] = str(process.Identifier.value())\n\n if process.Metadata != None:\n metaData = []\n for meta in process.Metadata:\n content = {}\n if meta.title != None:\n content[\"title\"] = str(meta.title)\n if meta.about != None:\n content[\"about\"] = str(meta.about)\n if meta.arcrole != None:\n content[\"arcrole\"] = str(meta.arcrole)\n if meta.actuate != None:\n content[\"actuate\"] = str(meta.actuate)\n if meta.href != None:\n content[\"href\"] = str(meta.href)\n if meta.role != None:\n content[\"role\"] = str(meta.role)\n if meta.type != None:\n content[\"type\"] = str(meta.type)\n if meta.show != None:\n content[\"show\"] = str(meta.show)\n metaData.append(content)\n proc[\"Metadata\"] = metaData\n\n\n\t ita = self._getTitleAbstract(process)\n\t for key in ita.keys():\n\t\t proc[key] = ita[key] \n\t proc[\"DataInputs\"] = self._getDataInputs(process)\n\t proc[\"ProcessOutputs\"] = self._getProcessOutputs(process)\n\t self._content[\"ProcessDescription\"] = proc\n except:\n raise", "title": "" }, { "docid": "579d23a5bb0ba0719a5c2ea969011e22", "score": "0.44196713", "text": "def has_nodes(self):\n pass", "title": "" }, { "docid": "7de78519f045d580cd4add1180b0451e", "score": "0.44093195", "text": "def list_hosts(configfile, **kwargs):\n\n run_list_hosts(configfile)", "title": "" }, { "docid": "6746b98a9824bd29ba66e09074336784", "score": "0.44058445", "text": "def terminate_instances(self, xml_bytes):\n root = XML(xml_bytes)\n result = []\n # May be a more elegant way to do this:\n instances = root.find(\"instancesSet\")\n if instances is not None:\n for instance in instances:\n instanceId = instance.findtext(\"instanceId\")\n previousState = instance.find(\"previousState\").findtext(\n \"name\")\n currentState = instance.find(\"currentState\").findtext(\n \"name\")\n result.append((instanceId, previousState, currentState))\n return result", "title": "" }, { "docid": "e41509a145314b7ecdb5cdb2e637cace", "score": "0.44050357", "text": "def main():\n\n # the AnsibleModule object will be our abstraction for working with Ansible.\n # This includes instantiation, a couple of common attr that will be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=dict(\n hostvars=dict(type='raw', required=True),\n report_timestamp=dict(type=str, required=False, default=''),\n registered_dict_name=dict(type=str, required=False, default=\"get_sas_host_details_results\"),\n include_hotfix_report=dict(type=bool, required=False, default=True),\n hotfix_url = dict(type=str, required=True)\n ),\n supports_check_mode=True\n )\n\n # get module parameters\n hostvars = module.params['hostvars']\n report_timestamp = module.params['report_timestamp']\n registered_dict_name = module.params['registered_dict_name']\n include_hotfix_report = module.params['include_hotfix_report']\n hotfix_url = module.params['hotfix_url']\n\n # Starting in Ansible 2.8.1, there is the potential for hostvars\n # to be passed as a byte string, if the dict is too large\n # This will convert the str back to a dict before proceeding\n if isinstance(hostvars, str):\n hostvars = ast.literal_eval(hostvars.decode())\n\n results = dict()\n results['sas_hosts'] = dict()\n results['created'] = report_timestamp\n\n for inventory_hostname, host_vars in hostvars.items():\n\n # set up returnable values\n unreachable = True\n failed = True\n failure_details = dict(\n msg=\"\",\n rc=0,\n stderr=\"\",\n stdout=\"\",\n )\n\n # get the host details dict\n host_details = host_vars.get(registered_dict_name)\n\n # check if the host has the registered dict\n if host_details is not None:\n\n # host details exist, so host was reachable\n unreachable = False\n\n # check if the host failed\n failed = host_details['failed']\n\n # if the module reported a failure, collect details\n if failed:\n failure_details['msg'] = host_details['msg']\n failure_details['rc'] = host_details['rc']\n failure_details['stderr'] = host_details['module_stderr']\n failure_details['stdout'] = host_details['module_stdout']\n else:\n # get module results\n host_results = host_details.get('sas_host_details')\n\n if host_results is not None:\n results['sas_hosts'].update(host_results)\n else:\n failed = True\n\n # if the results dict could not be found, mark the host as unreachable\n if failed or unreachable:\n host_groups = host_vars.get('group_names')\n\n if host_groups is not None and 'sas_all' in host_groups:\n hostname = host_vars.get('ansible_fqdn')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_hostname')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_host')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('inventory_hostname')\n if hostname is None or hostname == \"\":\n hostname = inventory_hostname\n\n try:\n host_groups.remove('sas_all')\n host_groups.remove('sas-all')\n except ValueError:\n pass # do nothing\n\n results['sas_hosts'][hostname] = dict(\n _id=hostname.replace('.', '-'),\n _unreachable=unreachable,\n _failed=failed,\n _failure_details=failure_details,\n ansible_host_groups=host_groups\n )\n else:\n pass # this host isn't in sas_all so there's no need to try and report on it\n\n ##################################################################################\n # This section will find all of the hotfixes available and add them to the report.\n ##################################################################################\n #\n # There are a few data structures that are complicated enough to warrant a description:\n # fullReport\n # This will hold all of the data in a format condusive to printing it out in the final report. This is how\n # It is structured:\n # fullReport (dict):\n # key=Hot Fix Name, point to another dict:\n # key=\"released\", points to a string containing the release date of the hotfix.\n # key= \"installed\", points to a boolean that will reflect whether any of the packages used by this hotfix are installed on any of the machines in the deployment.\n # key=\"upToDate\", point to a boolean that will reflest whether ALL of the packages used by this hotfix are up to date on ALL of the machines in the deployment.\n # key=\"sasnote\", points to another dict:\n # key=SASNote number, points to the description of the SASNote.\n # key=\"package\", points to another dict:\n # key=\"platform\" , points to another dict:\n # key=OS, points to another dict:\n # key=\"version\", points to the string of the version of the package.\n # key=\"installed\", points to a boolean which reflects whether this package is installed on any machine in the deployment.\n # key=\"upToDate\", points to a boolean which reflects whether this package is up to data on ALL of the machines in the deployment.\n # key=\"os\", points to the fully qualified name of the operating system.\n # key=\"arch\", points to the architecture of the OS (NOTE: This does not exist on Windows systems.)\n # key=\"alreadyUpdated\", points to a boolean, which is used to keep track of whether the upToDate has already been set.\n # key=\"installedVersions\", points to another dict:\n # key=machineName, points to a 2 element list:\n # [0]=string containing package version that is currently installed.\n # [1]=boolean reflecting whether this version is at or above the package delevered in this hotfix.\n #\n ###########################################################################\n #\n # packageToHotFix\n # This will hold a dict of lists:\n # key: package name, pointing to a 2 element list:\n # [0] OS\n # [1] The Hotfix that this package is associated with.\n #\n ###########################################################################\n #\n # environmentReportDict\n # This is inherited from the environment report, but it's probably worth documenting what it looks like.\n # There is a lot of data inerherited, and I'm only describing what is used in this script.\n # environmentReportDict\n # key=hostname (for each machine in the deployment), pointing to another dict:\n # key=\"OS\", pointing to string for the OS family.\n # key=\"arch\", pointing to the string for the architecture of the host.\n # key=\"sas_packages\", pointing to another dict:\n # key=package number, pointing to another dict:\n # key=\"attributes\", pointing to another dict:\n # key=\"version\", pointing to a string of the package versions currently installed on the host.\n ############################################################################\n\n\n # These properties will determine whether sections in the ansible output should be displayed or not.\n results[\"legacy_products_found\"] = False\n results[\"hotfix_legacy_products\"] = \"There is no hotfix data available for the following products due to their age:\\n\"\n results[\"no_hotfixes_available\"] = False\n results[\"no_hotfix_products\"] = \"The following products are installed, but there are no associated hotfixes for them:\\n\"\n\n results[\"include_hotfix_report\"] = include_hotfix_report\n files_to_scan = []\n if include_hotfix_report:\n\n # Constants for the RPMs.\n VA_RPM='sas-sasvisualanalytics'\n ESM_RPM='sas-esm-service'\n ESP_RPM='sas-espcondb'\n IIM_RPM='sas-svi-intelligence-management'\n VI_RPM='sas-svi-visual-investigator'\n SPRE_RPM='sas-basecfg1'\n\n # The following list contains the RPMs that are unique to each of the product sets defined\n # in the default baseURL\n key_rpms = [VA_RPM,\n ESM_RPM,\n ESP_RPM,\n IIM_RPM,\n VI_RPM\n ]\n # This is a list of the files on the hotfix website to use, depending on what is currently installed.\n # This is a dictionary of all rpms to their versions, across all machines in the deployment.\n all_installed_rpms = {}\n\n # Walk through all of the machines in the deployment. Build the list of RPM -> Version.\n # If there is more than one copy of an RPM (expected on multi-machine deployments) and\n # there is a difference in version (which there should NOT be, though it is possible),\n # use the lowest version possible.\n for current_machine in results['sas_hosts']:\n if not results['sas_hosts'][current_machine][\"_unreachable\"] and not results['sas_hosts'][current_machine][\n \"_failed\"] and results['sas_hosts'][current_machine]['_sas_installed']:\n for current_rpm in results['sas_hosts'][current_machine]['sas_packages']:\n # Skip any \"noarch\" packages, as they are not needed.\n if results['sas_hosts'][current_machine]['sas_packages'][current_rpm]['attributes']['arch'].find('noarch') == -1:\n current_rpm_version = \\\n results['sas_hosts'][current_machine]['sas_packages'][current_rpm]['attributes']['version']\n if current_rpm in all_installed_rpms.keys():\n if compare_versions(all_installed_rpms[current_rpm], current_rpm_version) < 0:\n all_installed_rpms[current_rpm] = current_rpm_version\n else:\n all_installed_rpms[current_rpm] = current_rpm_version\n\n # Loop through the key RPM list. If a key RPM exists, check the version and then add it to the list to be checked.\n for current_rpm in key_rpms:\n if current_rpm in all_installed_rpms.keys():\n rpm_version = all_installed_rpms[current_rpm]\n if current_rpm == VA_RPM:\n # Viya 3.5 shipped with sas-visualanalytics at version 2.5.10. If the version is at or above this,\n # use the VA 3.5 hotfix file.\n if compare_versions(rpm_version, \"2.5.10\", False) >= 0:\n files_to_scan.append(\"Viya_3_5_lax_home.xml\")\n # Viya 3.4 (19w34) shipped with sas-visualanalytics at version 1.9.543. If the version is at or\n # above this, use the VA 3.4 hotfix file.\n elif compare_versions(rpm_version, \"1.9.543\", False) >= 0:\n files_to_scan.append(\"Viya_3_4_lax_0819_home.xml\")\n # Viya 3.4 (18w30) shipped at version 1.4.244. However, there was a refresh at 19w21, but VA was not\n # refreshed. So, an additional check will need to be made to see if we are before or after the\n # 19w21 refresh.\n elif (compare_versions(rpm_version, \"1.4.244\", False) >= 0):\n # basecfg1 was updated at 19w21. So, that is what will be looked at to see if the deployment is\n # at least 19w21.\n basecfg1_version = all_installed_rpms[SPRE_RPM]\n if compare_versions(basecfg1_version, \"3.19\", False) >= 0:\n files_to_scan.append(\"Viya_3_4_lax_0519_home.xml\")\n else:\n files_to_scan.append(\"Viya_3_4_lax_home.xml\")\n # Viya 3.3 shipped with sas-visualanalytics at version 1.2.557. If the version is at or\n # above this, use the VA 3.3 hotfix file.\n elif (compare_versions(rpm_version, \"1.2.557\", False) >= 0):\n files_to_scan.append(\"Viya_3_3_home.xml\")\n # Viya 3.2 shipped with sas-visualanalytics at version 1.2.557. If the version is at or\n # above this, use the VA 3.3 hotfix file.\n elif (compare_versions(rpm_version, \"1.0.328\", False) >= 0):\n files_to_scan.append(\"Viya_3_2_home.xml\")\n # Otherwise, the version is too old. Just note that this DU, though deployed, is too old and it\n # won't be reported on.\n else:\n results[\"legacy_products_found\"] = True\n results[\"hotfix_legacy_products\"] = results[\"hotfix_legacy_products\"] + \" \" + current_rpm + \\\n \" is at version \" + str(rpm_version) + \\\n \", but the minimum reported version is 1.0.328.\\n\"\n elif current_rpm == ESM_RPM:\n # ESM 6.2 shipped with sas-esm-service at version 6.2.7. If the version is at or above this,\n # use the ESM 6.2 hotfix file.\n if compare_versions(rpm_version, \"6.2.7\", False) >= 0:\n files_to_scan.append('Viya_ESM_6_2_home.xml')\n # ESM 6.1 shipped with sas-esm-service at version 6.1.76. If the version is at or above this,\n # use the ESM 6.1 hotfix file.\n elif compare_versions(rpm_version, \"6.1.76\", False) >= 0:\n files_to_scan.append('Viya_ESM_6_1_home.xml')\n # ESM 5.2 shipped with sas-esm-service at version 5.2.40. If the version is at or above this,\n # use the ESM 5.2 hotfix file.\n elif compare_versions(rpm_version, \"5.2.40\", False) >= 0:\n files_to_scan.append('Viya_ESM_5_2_home.xml')\n # ESM 5.1 shipped with sas-esm-service at version 5.1.13. If the version is at or above this,\n # use the ESM 5.1 hotfix file.\n elif compare_versions(rpm_version, \"5.1.13\", False) >= 0 :\n files_to_scan.append('Viya_ESM_5_1_home.xml')\n # ESM 4.3 shipped with sas-esm-service at version 4.3.20. If the version is at or above this,\n # use the ESM 4.3 hotfix file.\n elif compare_versions(rpm_version, \"4.3.20\", False) >= 0:\n files_to_scan.append('Viya_ESM_4_3_home.xml')\n # Otherwise, the version is too old. Just note that this DU, though deployed, is too old and it\n # won't be reported on.\n else:\n results[\"legacy_products_found\"] = True\n results[\"hotfix_legacy_products\"] = results[\"hotfix_legacy_products\"] + \" \" + current_rpm + \\\n \" is at version \" + str(rpm_version) + \\\n \", but the minimum reported version is 6.1.76.\\n\"\n elif current_rpm == ESP_RPM:\n # ESP 6.2 shipped with sas-espcondb at version 6.2.0. If the version is at or above this,\n # use the ESP 6.2 hotfix file.\n if compare_versions(rpm_version, \"6.2.0\", False) >= 0:\n files_to_scan.append(\"Viya_ESP_6_2_home.xml\")\n # ESP 6.1 shipped with sas-espcondb at version 6.1.0. If the version is at or above this,\n # use the ESP 6.1 hotfix file.\n elif compare_versions(rpm_version, \"6.1.0\", False) >= 0:\n files_to_scan.append(\"Viya_ESP_6_1_home.xml\")\n # ESP 5.2 shipped with sas-espcondb at version 5.2.0. If the version is at or above this,\n # use the ESP 5.2 hotfix file.\n elif compare_versions(rpm_version, \"5.2.0\", False) >= 0:\n files_to_scan.append(\"Viya_ESP_5_2_home.xml\")\n # ESP 5.1 shipped with sas-espcondb at version 5.1.0. If the version is at or above this,\n # use the ESP 5.1 hotfix file.\n elif compare_versions(rpm_version, \"5.1.0\", False) >= 0:\n files_to_scan.append(\"Viya_ESP_5_1_home.xml\")\n # ESP 4.3 shipped with sas-espcondb at version 4.3.0. If the version is at or above this,\n # use the ESP 4.3 hotfix file.\n elif compare_versions(rpm_version, \"4.3.0\", False) >= 0:\n files_to_scan.append(\"Viya_ESP_4_3_home.xml\")\n # Otherwise, the version is too old. Just note that this DU, though deployed, is too old and it\n # won't be reported on.\n else:\n results[\"legacy_products_found\"] = True\n results[\"hotfix_legacy_products\"] = results[\"hotfix_legacy_products\"] + \" \" + current_rpm + \\\n \" is at version \" + str(rpm_version) + \\\n \", but the minimum reported version is 4.3.0.\\n\"\n elif current_rpm == IIM_RPM:\n # IIM 1.5 shipped with sas-svi-intelligence-management at version 1.5.11. If the version is at or\n # above this, use the IIM 1.5 hotfix file.\n if compare_versions(rpm_version, \"1.5.11\", False) >= 0:\n files_to_scan.append(\"Viya_IIM_1_5_home.xml\")\n # IIM 1.4 shipped with sas-svi-intelligence-management at version 1.4.7. If the version is at or\n # above this, use the IIM 1.4 hotfix file.\n elif compare_versions(rpm_version, \"1.4.7\", False) >= 0:\n files_to_scan.append(\"Viya_IIM_1_4_home.xml\")\n # IIM 1.3 shipped with sas-svi-intelligence-management at version 1.3.10. If the version is at or\n # above this, use the IIM 1.3 hotfix file.\n elif compare_versions(rpm_version, \"1.3.10\", False) >= 0:\n files_to_scan.append('Viya_IIM_1_3_home.xml')\n # Otherwise, the version is too old. Just note that this DU, though deployed, is too old and it\n # won't be reported on.\n else:\n results[\"legacy_products_found\"] = True\n results[\"hotfix_legacy_products\"] = results[\"hotfix_legacy_products\"] + \" \" + current_rpm + \\\n \" is at version \" + str(rpm_version) + \\\n \", but the minimum reported version is 1.3.10.\\n\"\n elif current_rpm == VI_RPM:\n # VI 10.6 shipped with sas-svi-visual-investigator at version 8.2.72. If the version is at or\n # above this, use the VI 10.6 hotfix file.\n if compare_versions(rpm_version, \"8.2.72\", False) >= 0:\n files_to_scan.append(\"Viya_VI_10_6_home.xml\")\n # VI 10.5.1 shipped with sas-svi-visual-investigator at version 7.5.129. If the version is at or\n # above this, use the VI 10.5.1 hotfix file.\n elif compare_versions(rpm_version, \"7.5.129\", False) >= 0:\n files_to_scan.append(\"Viya_VI_10_5_1_home.xml\")\n # VI 10.5 shipped with sas-svi-visual-investigator at version 7.4.22. If the version is at or\n # above this, use the VI 10.5 hotfix file.\n elif compare_versions(rpm_version, \"7.4.22\", False) >= 0:\n files_to_scan.append(\"Viya_VI_10_5_home.xml\")\n # VI 10.4 shipped with sas-svi-visual-investigator at version 7.1.51. If the version is at or\n # above this, use the VI 10.4 hotfix file.\n elif compare_versions(rpm_version, \"7.1.51\", False) >= 0:\n files_to_scan.append(\"Viya_VI_10_4_home.xml\")\n # VI 10.3.1 shipped with sas-svi-visual-investigator at version 6.4.6. If the version is at or\n # above this, use the VI 10.3.1 hotfix file.\n elif compare_versions(rpm_version, \"6.4.6\", False) >= 0:\n files_to_scan.append(\"Viya_VI_10_3_1_home.xml\")\n # VI 10.3 shipped with sas-svi-visual-investigator at version 6.3.2. If the version is at or\n # above this, use the VI 10.3 hotfix file.\n elif compare_versions(rpm_version, \"6.3.2\", False) >= 0:\n files_to_scan.append(\"Viya_VI_10_3_home.xml\")\n # Otherwise, the version is too old. Just note that this DU, though deployed, is too old and it\n # won't be reported on.\n else:\n results[\"legacy_products_found\"] = True\n results[\"hotfix_legacy_products\"] = results[\"hotfix_legacy_products\"] + \" \" + current_rpm + \\\n \" is at version \" + str(rpm_version) + \\\n \", but the minimum reported version is 6.3.2.\\n\"\n\n # This is the URL base from which to pull the hotfix files.\n # Because the user can specify hotfix_url, we need to check to see if the trailing slash is there. If not,\n # add it.\n if hotfix_url[-1:] == '/':\n baseURL = hotfix_url\n else:\n baseURL = hotfix_url + '/'\n # This is the top level object to store the hotfix report information (see above).\n fullReport = {}\n # This is a dict of package to hotfixes (see above).\n packageToHotfix = {}\n # This boolean will help with debugging.\n debug = False\n\n # Check to see if the base site can be reached. If not, an error will be displayed in the deployment report\n # itself. Note: We don't actually care about the content. This check is just to see if the page can be\n # reached.\n results[\"master_website\"] = baseURL\n try:\n landing_page = web_request.urlopen(baseURL)\n http_output = landing_page.read()\n results[\"contact_hotfix_website\"] = True\n except web_error.URLError :\n results[\"contact_hotfix_website\"] = False\n if debug:\n print(\"***** Error parsing \" + baseURL)\n print(traceback.format_exc())\n print(\"***** No hot fix information obtained. Skipping hot fix report.\\n\\n\")\n\n files_to_remove = []\n if len(files_to_scan) > 0:\n for currentFile in files_to_scan:\n fileToParse = baseURL + currentFile\n # Retrieve each file.\n # Inside of each file, the lines are keyed by the hot fix id. There are three types of lines, in order:\n # 1) id and release date\n # 2) id, sasnote, sasnotetitle\n # 3) id, OS, package.\n # This script loops through to build a dictionary of dictonaries with the basic structure:\n # ID\n # Release Date\n # SASNotes\n # SASNote and Title\n # ...\n # Packages\n # Package Name, Version, and OS\n try:\n currentFileXML = web_request.urlopen(fileToParse)\n currentFileRoot = ET.fromstring(currentFileXML.read())\n updateID = \"\"\n for update_tag in currentFileRoot.findall('update'):\n currentUpdate = update_tag.get('id')\n releaseDate = update_tag.get('released')\n # To get the top level Dictionary seeded with the hot fix Name and release date.\n if releaseDate is not None:\n if currentUpdate in fullReport:\n if debug:\n print(\"WARNING! Hot Fix \" + currentUpdate + \" already discovered. Skipping\")\n updateID = \"DUPLICATE-SKIP\"\n else:\n # The SCXXXX hot fixes are special. The package files are only included in\n # Viya_<version>_<platform>_home.xml files. So, the entries in the\n # scheduled_update_<platform>_<shipevent>.xml files can be skipped.\n if currentUpdate.startswith(\"SC\") and currentFile.find(\"scheduled_update_\") < 0:\n continue\n updateID = currentUpdate\n fullReport[updateID] = {}\n fullReport[updateID][\"release_date\"] = releaseDate\n fullReport[updateID][\"installed\"] = False\n fullReport[updateID][\"upToDate\"] = False\n # To get the SASNote information under the hot fix\n else:\n if updateID == \"DUPLICATE-SKIP\":\n continue\n sasNote = update_tag.get('sasnote')\n sasNoteTitle = update_tag.get('sasnoteTitle')\n if sasNote is not None:\n if \"sasnote\" not in fullReport[updateID]:\n fullReport[updateID][\"sasnote\"] = {}\n # This string needs to be encoded because some non-ASCII characters are\n # in some of the titles.\n fullReport[updateID][\"sasnote\"][sasNote] = sasNoteTitle.encode('utf-8')\n # To get the Package information under the hot fix.\n else:\n os = update_tag.get(\"os\")\n fullPackage = update_tag.get(\"package\")\n if fullPackage is not None:\n if \"package\" not in fullReport[updateID]:\n fullReport[updateID][\"package\"] = {}\n\n lastPeriodIndex = fullPackage.rfind(\".\")\n # Format the package information.\n # Windows does not have a dash in the version; Linux does. So, we need to break differently,\n # depending on the OS.\n if os.lower().find(\"windows\") >= 0:\n versionStartIndex = fullPackage.rfind(\"-\")\n achitectureStartIndex = -1\n versionEndIndex = lastPeriodIndex\n osFamily = \"Windows\"\n else:\n versionStartIndex = fullPackage.rfind(\"-\", 0, fullPackage.rfind(\"-\"))\n # Linux has architecture in the package. This will be stored in its own key.\n achitectureStartIndex = fullPackage.rfind(\".\", 0, lastPeriodIndex)\n # SLES has the string 'suse' in its package. This will strip it out (as well as an extra .).\n if os.lower().find(\"suse\") >= 0:\n versionEndIndex = achitectureStartIndex - 5\n osFamily = \"Suse\"\n else:\n if os.lower().find(\"yocto\") >= 0:\n versionEndIndex = achitectureStartIndex - 6\n osFamily = \"Yocto\"\n else:\n if os.lower().find(\"ubuntu\") >= 0:\n versionStartIndex = fullPackage.rfind(\"_\", 0, fullPackage.rfind(\"_\"))\n versionEndIndex = fullPackage.rfind(\"_\")\n achitectureStartIndex = versionEndIndex\n osFamily = \"Ubuntu\"\n else:\n if os.lower().find(\"red hat enterprise linux 7\") >= 0:\n versionStartIndex = versionStartIndex = fullPackage.rfind(\":\")\n versionEndIndex = len(fullPackage)\n achitectureStartIndex = -1\n osFamily = \"RedHat\"\n else:\n versionEndIndex = achitectureStartIndex\n osFamily = \"RedHat\"\n package = fullPackage[:versionStartIndex]\n packageVersion = fullPackage[versionStartIndex + 1:versionEndIndex]\n architecture = fullPackage[achitectureStartIndex + 1:lastPeriodIndex]\n\n if package not in fullReport[updateID][\"package\"]:\n fullReport[updateID][\"package\"][package] = {}\n if \"platform\" not in fullReport[updateID][\"package\"][package]:\n fullReport[updateID][\"package\"][package][\"platform\"] = {}\n if osFamily not in fullReport[updateID][\"package\"][package][\"platform\"]:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily] = {}\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"version\"] = packageVersion\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installed\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"upToDate\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"os\"] = os\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installedVersions\"] = {}\n if achitectureStartIndex != -1:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"arch\"] = architecture\n # This property is used to make sure that when evaluating the installed packages,\n # the upToDate=false does not get overridden by a True at the end.\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"alreadyUpdated\"] = False\n\n # Add to the package to hot fix dict.\n if package not in packageToHotfix:\n packageToHotfix[package] = []\n packageToHotfix[package].append([osFamily, updateID])\n\n except ET.ParseError:\n if debug:\n print(\"***** Error parsing \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping file.\\n\\n\")\n except web_error.HTTPError:\n results[\"no_hotfixes_available\"] = True\n results[\"no_hotfix_products\"] = results[\"no_hotfix_products\"] + \" \" + currentFile + \"\\n\"\n files_to_remove.append(currentFile)\n if debug:\n print(\"***** Cannot access \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n except:\n if debug:\n print(\"***** Error encountered with \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n\n # Remove files that have been flagged as not existing\n for this_file in files_to_remove:\n files_to_scan.remove(this_file)\n\n if debug:\n print(\"**** Build complete. Here are the hot fixes:\")\n print_Full_Report(fullReport)\n print(\"***********************************************************************************\")\n print(\"**** Here is the package to hot fix dict:\")\n print(\"***********************************************************************************\")\n for current_package in packageToHotfix:\n print(\" \" + current_package)\n for machine_list in packageToHotfix[current_package]:\n print(\" \" + machine_list[0] + \" @ \" + machine_list[1] + \".\")\n print(\"***********************************************************************************\")\n print(\"Report built.\")\n print(\"Accessing environment Data.\")\n\n for currentMachine in results['sas_hosts']:\n if not results['sas_hosts'][currentMachine][\"_unreachable\"] and not results['sas_hosts'][currentMachine][\"_failed\"]\\\n and results['sas_hosts'][currentMachine]['_sas_installed']:\n currentOS = results['sas_hosts'][currentMachine]['os']['family']\n for currentPackage in results['sas_hosts'][currentMachine]['sas_packages']:\n if currentPackage in packageToHotfix:\n for osHotfix in packageToHotfix[currentPackage]:\n if osHotfix[0] == currentOS:\n currentHotfix = osHotfix[1]\n installedVersion = \\\n results['sas_hosts'][currentMachine]['sas_packages'][currentPackage]['attributes']['version']\n if installedVersion.endswith('.suse'):\n installedVersion = installedVersion[:-5]\n else:\n if installedVersion.endswith('.yocto'):\n installedVersion = installedVersion[:-6]\n else:\n if '_' in installedVersion:\n installedVersion = installedVersion[0:installedVersion.rfind(\"_\")]\n hotfixVersion = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n upToDate = compare_versions(installedVersion, hotfixVersion) >= 0\n fullReport[currentHotfix][\"installed\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"] = True\n # If a previous package marked updateToDate=True, it can still be pulled back to false if another package isn't\n # up to date. If the previous package was marked upToDate=false, the hotfix cannot be marked true.\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] or \\\n (fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] and\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"]):\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"] = upToDate\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentMachine] = [installedVersion, upToDate]\n\n if debug:\n print(\"Comparing evironment data to hotfix data.\")\n for currentHotFix in fullReport:\n cumulativeOverallUpToDate = True\n # This will only allow the top level \"upToDate\" property to be set, if there is a package installed on this OS.\n allowTopLevelUpdate = False\n for currentPackage in fullReport[currentHotFix][\"package\"]:\n cumulativeOSUpToDate = True\n for currentOS in fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"]:\n if len(fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]) > 0:\n cumulativeOSUpToDate = cumulativeOSUpToDate and \\\n fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\n \"upToDate\"]\n allowTopLevelUpdate = True\n\n cumulativeOverallUpToDate = cumulativeOverallUpToDate and cumulativeOSUpToDate\n if allowTopLevelUpdate:\n fullReport[currentHotFix][\"upToDate\"] = cumulativeOverallUpToDate\n\n # Now that the fullReport has been updated, go back and add to results, for the final report.\n results[\"available_hotfixes\"] = {}\n results[\"installed_hotfixes\"] = {}\n\n for currentHotfix in fullReport:\n if not fullReport[currentHotfix][\"installed\"]:\n continue\n if fullReport[currentHotfix][\"upToDate\"]:\n hotfix_dict_to_use = \"installed_hotfixes\"\n else:\n hotfix_dict_to_use = \"available_hotfixes\"\n results[hotfix_dict_to_use][currentHotfix] = {}\n results[hotfix_dict_to_use][currentHotfix][\"release_date\"] = fullReport[currentHotfix][\"release_date\"]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"] = []\n for currentPackage in fullReport[currentHotfix][\"package\"]:\n for currentOS in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"]:\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"]:\n continue\n for currentHost in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]:\n temp_dict = {}\n temp_dict[\"hostname\"] = currentHost\n temp_dict[\"package\"] = currentPackage\n temp_dict[\"installed_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][0]\n temp_dict[\"hotfix_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n temp_dict[\"up_to_date\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][1]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"].append(temp_dict)\n # Format the SAS Note description so that we can respect any HTML tags that are included in the text.\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"] = {}\n if \"sasnote\" in fullReport[currentHotfix]:\n for current_number in fullReport[currentHotfix][\"sasnote\"]:\n # Honor any html that is coming through.\n temp_sasnote_description = fullReport[currentHotfix][\"sasnote\"][current_number].decode('utf-8')\n temp_sasnote_description = temp_sasnote_description.replace(\"&lt;\", \"<\")\n temp_sasnote_description = temp_sasnote_description.replace(\"&gt;\", \">\")\n # Build a link to the URL for the SAS Note.\n hot_fix_prefix = current_number[:2]\n hot_fix_postfix = current_number[2:]\n sas_note_url = \"http://support.sas.com/kb/\" + hot_fix_prefix + \"/\" + hot_fix_postfix + \".html\"\n sas_note_html_link = \"<a href=\\\"\" + sas_note_url + \"\\\"\\>\" + current_number + \"</a>\"\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"][current_number] = {\"sas_note_link\":sas_note_html_link, \"description\":temp_sasnote_description}\n\n if len(files_to_scan) == 0:\n formatted_file_output = \"Installed products analyzed; no applicable hotfix files to report on.\\n\"\n else:\n formatted_file_output = \"Installed Products analyzed; hotfix files used in report:\\n\"\n current_file_number = 1\n for current_file in files_to_scan:\n formatted_file_output = formatted_file_output + \" \" + current_file_number.__str__() + \") \" + baseURL + current_file + \"\\n\"\n current_file_number += 1\n\n results[\"hotfix_scanned_files\"] = formatted_file_output\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n #\n # changed will always be 'False' since we'll never alter state on a host\n module.exit_json(changed=False, processed_host_details=results)", "title": "" }, { "docid": "54126a47aa48c3b80f173fb823b53aa4", "score": "0.43976724", "text": "def get_instance_info(self):\n params = {}\n results = []\n\n try:\n v_ids = {}\n response = self.get_status('DescribeInstances', params)\n results.append(response)\n \n except Exception as ex: \n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n\n return results", "title": "" }, { "docid": "82ff844451b08aba8336f67d6476e41e", "score": "0.43954968", "text": "def do_printInstances(self,args):\n parser = CommandArgumentParser(\"printInstances\")\n parser.add_argument(dest='filters',nargs='*',default=[\"*\"],help='Filter instances');\n parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses');\n parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags');\n parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details');\n parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh');\n parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones');\n args = vars(parser.parse_args(args))\n \n client = AwsConnectionFactory.getEc2Client()\n\n filters = args['filters']\n addresses = args['addresses']\n tags = args['tags']\n details = args['details']\n availabilityZones = args['availabilityZones']\n needDescription = addresses or tags or details\n\n if args['refresh']:\n self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup])\n \n # print \"AutoScaling Group:{}\".format(self.scalingGroup)\n print \"=== Instances ===\"\n instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']\n\n instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances)\n if availabilityZones:\n instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances)\n \n index = 0\n for instance in instances:\n instance['index'] = index\n print \"* {0:3d} {1} {2} {3}\".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId'])\n description = None\n if needDescription:\n description = client.describe_instances(InstanceIds=[instance['InstanceId']])\n if addresses:\n networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces']\n number = 0\n print \" Network Interfaces:\"\n for interface in networkInterfaces:\n print \" * {0:3d} {1}\".format(number, interface['PrivateIpAddress'])\n number +=1\n if tags:\n tags = description['Reservations'][0]['Instances'][0]['Tags']\n print \" Tags:\"\n for tag in tags:\n print \" * {0} {1}\".format(tag['Key'],tag['Value'])\n if details:\n pprint(description)\n \n index += 1", "title": "" }, { "docid": "4c0c604cac22b7ea138b7a836a40596a", "score": "0.43935823", "text": "def _nfvi_host_get_callback(nfvi_host_uuid, nfvi_host_name):\n DLOG.debug(\"Host get, nfvi_host_uuid=%s, nfvi_host_name=%s.\"\n % (nfvi_host_uuid, nfvi_host_name))\n\n instances = 0\n instances_failed = 0\n instances_stopped = 0\n\n host_table = tables.tables_get_host_table()\n host = host_table.get(nfvi_host_name, None)\n if host is not None:\n instance_table = tables.tables_get_instance_table()\n for instance in instance_table.on_host(host.name):\n if instance.is_deleting() or instance.is_deleted():\n continue\n\n if instance.is_failed():\n instances_failed += 1\n\n if instance.is_locked():\n instances_stopped += 1\n\n instances += 1\n\n DLOG.info(\"Host %s has %s instances, failed=%s, stopped=%s.\"\n % (host.name, instances, instances_failed, instances_stopped))\n return True, instances, instances_failed, instances_stopped", "title": "" }, { "docid": "3dd0a4f0b7aacd2764cc2c8f157519ca", "score": "0.43895268", "text": "def get_instances(self):\n return list(self._raw[\"hosts\"][\"instances\"].keys())", "title": "" }, { "docid": "2a43694b38047b54ed55e466390e9255", "score": "0.438775", "text": "def test_confidential_vm(self):\n for instance in self.all_instances:\n instance_name = instance.get('name')\n # Verify confidentialInstanceConfig exists\n self.assertIn('confidentialInstanceConfig', instance,\n 'Confidential Computing not enabled in instance {instance_name}.'\n .format(instance_name=instance_name))\n # Verify Confidential Computing is Enabled\n self.assertTrue(\n instance['confidentialInstanceConfig']['enableConfidentialCompute'],\n 'Confidential Computing not enabled in instance {instance_name}.'\n .format(instance_name=instance_name))", "title": "" }, { "docid": "af93670e9f4bdc880237bf6381073e52", "score": "0.43862942", "text": "def parse_client_topology():\n nodes = {}\n count = 0\n for machine in get_topology():\n if not is_attribute_declared(key='public-ip', some_dict=machine):\n count += 1\n for component in machine.get('cloud-components'):\n if component == 'clc':\n topology['clc-1'] = get_component_ip(machine, count)\n elif component == 'cc':\n check_cluster_definition(machine=machine, component='cc-1', count=count)\n elif component == 'sc':\n check_cluster_definition(machine=machine, component='sc-1', count=count)\n elif component == 'walrus':\n topology['walrus'] = get_component_ip(machine, count)\n elif component == 'riakcs':\n topology['riakcs'] = {}\n elif component == 'ufs':\n if is_attribute_declared(key=\"user-facing\", some_dict=topology):\n topology['user-facing'].append(get_component_ip(machine, count))\n else:\n topology['user-facing'] = [get_component_ip(machine, count)]\n elif component == 'nc':\n if not is_attribute_declared(key=\"clusters\", some_dict=topology):\n nodes[machine.get('cluster-name')] = [get_component_ip(machine, count)]\n topology['clusters'] = {machine.get('cluster-name'): {}}\n topology['clusters'][machine.get('cluster-name')]['nodes'] = \" \".join(nodes[machine.get('cluster-name')])\n elif machine.get('cluster-name') in topology['clusters'].keys():\n if not is_attribute_declared(key=machine.get('cluster-name'), some_dict=nodes):\n nodes[machine.get('cluster-name')] = [get_component_ip(machine, count)]\n else:\n nodes[machine.get('cluster-name')].append(get_component_ip(machine, count))\n topology['clusters'][machine.get('cluster-name')]['nodes'] = \" \".join(nodes[machine.get('cluster-name')])\n else:\n if not is_attribute_declared(key=machine.get('cluster-name'), some_dict=nodes):\n nodes[machine.get('cluster-name')] = [get_component_ip(machine, count)]\n else:\n nodes[machine.get('cluster-name')].append(get_component_ip(machine, count))\n topology['clusters'][machine.get('cluster-name')] = {}\n topology['clusters'][machine.get('cluster-name')]['nodes'] = \" \".join(nodes[machine.get('cluster-name')])\n return", "title": "" }, { "docid": "29272c3212bb1b70716e7c9a6db775a4", "score": "0.4384945", "text": "def checkhost(hostname):\n\n\t#print \"checking hostname\",hostname\n\tfor host in hostname:\n\t\t#print \"host is\",host\n\t\tobj4 = {\"jsonrpc\": \"2.0\",\"method\": \"host.exists\",\"params\": {\"host\": host},\"auth\": hash_pass,\"id\": 3}\n\t\tdata4 = json.dumps(obj4)\n\t\trequest4 = urllib2.Request(url, data4, {'Content-Type': 'application/json'})\n\t\tresponse4 = urllib2.urlopen(request4)\n\t\tres4 = json.load(response4)\n\t\t#print \"hostname check results - \",res4[\"result\"]\n\t\tif res4[\"result\"] != True:\n\t\t\tprint \"%s - This host doesn't exist in zabbix\" %(host)", "title": "" }, { "docid": "f976a63af90f3a7b2e98cc90e37aef1a", "score": "0.43843043", "text": "def validate_nodes_count(namespace):\r\n if namespace.cpu is not None:\r\n if namespace.cpu < 1 or namespace.cpu > 4:\r\n raise CLIError('--cpu must be in the range [1,4]')\r\n if namespace.memory is not None:\r\n if namespace.memory < 1 or namespace.memory > 8:\r\n raise CLIError('--memory must be in the range [1,8]')\r\n if namespace.instance_count is not None:\r\n if namespace.instance_count < 1 or namespace.instance_count > 20:\r\n raise CLIError('--instance-count must be in the range [1,20]')", "title": "" } ]
4260c860a2adc3fb1078bafeeb794104
transfer time_str to seconds
[ { "docid": "b2e90d66a3adcda65cc4efdb98bc1a62", "score": "0.751273", "text": "def to_second(time_str):\n format_time_str = \"%s-%s-%s %s:%s:%s\"%(\n time_str[0:4],\n time_str[4:6],\n time_str[6:8],\n time_str[8:10],\n time_str[10:12],\n time_str[12:]\n )\n seconds = datetime.strptime(format_time_str, \"%Y-%m-%d %H:%M:%S\")\n return time.mktime(seconds.timetuple())", "title": "" } ]
[ { "docid": "f14a4ed4850f99c6f69590e97d5ffd04", "score": "0.8461396", "text": "def seconds_from_string(time_str):\n h, m, s = time_str.split(\":\")\n return int(h) * 3600 + int(m) * 60 + int(s)", "title": "" }, { "docid": "57a8ff0914734f63ca6a05afa09ad6dc", "score": "0.7974478", "text": "def time_str(time_str):\n\n m = re.search('([0-9]+)s', time_str) # match seconds\n m2 = re.search('([0-9]+)m', time_str) # match minutes\n m3 = re.search('([0-9]+)h', time_str) # match hours\n\n if m:\n seconds = m.group(1)\n else: seconds = 0\n\n if m2:\n minutes = m2.group(1)\n else: minutes = 0\n\n if m3:\n hours = m3.group(1)\n else: hours = 0\n\n seconds_total = int(seconds) + (int(minutes) * 60) + (int(hours) * 3600)\n\n return(seconds_total)", "title": "" }, { "docid": "4a4cb5167b68075a88c33410d93448ef", "score": "0.7882141", "text": "def _time_str_to_secs(time_str='30 min'):\n\n s1 = str(time_str).replace('_', ' ') + \" min\"\n time_part = float((s1.split(\" \")[0]))\n text_part = s1.split(\" \")[1]\n\n if text_part == 'sec':\n time_sec = time_part\n elif text_part == 'min':\n time_sec = time_part * 60\n elif text_part == 'hrs':\n time_sec = time_part * 3600\n elif text_part in ('hr', 'hrs'):\n time_sec = time_part * 3600\n else:\n time_sec = 1200 #default to 20 minutes\n\n return time_sec", "title": "" }, { "docid": "85dd1dfa3c7789a4e3e8e70407967060", "score": "0.78543735", "text": "def _convert_to_seconds(self, time):\r\n return 3600*time[0] + 60*time[1] + time[0]", "title": "" }, { "docid": "e2b98391df5575574d7a117e7e322e26", "score": "0.7655791", "text": "def get_sec(time_str):\n h, m, s = time_str.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)", "title": "" }, { "docid": "7b5426bbb97521ad1c6525e75950b23c", "score": "0.75356346", "text": "def _timedelta_to_sec(str_time):\n sec = 0\n try:\n if str_time.endswith('d'):\n sec = float(str_time.replace('d', ''))*86400\n elif str_time.endswith('h'):\n sec = float(str_time.replace('h', ''))*3600\n elif str_time.endswith('m'):\n sec = float(str_time.replace('m', ''))*60 \n elif str_time.endswith('s'):\n sec = float(str_time.replace('s', ''))\n else:\n sec = -1\n except ValueError:\n sec = 0\n return sec", "title": "" }, { "docid": "ec46bafb51aa3af6344de34eb2ea0388", "score": "0.75246954", "text": "def string_to_secs(time):\n cdef str time_string = str(time).upper()\n\n if time_string.endswith('MS') or time_string.endswith('MSEC'):\n time_string = ''.join(i for i in time_string if not i.isalpha())\n return float(time_string) / 1000.0\n\n elif time_string.endswith('S') or time_string.endswith('SEC'):\n time_string = ''.join(i for i in time_string if not i.isalpha())\n return float(time_string)\n\n elif 'D' in time_string:\n time_string = ''.join(i for i in time_string if not i.isalpha())\n return float(time_string) * 86400\n\n elif 'H' in time_string:\n time_string = ''.join(i for i in time_string if not i.isalpha())\n return float(time_string) * 3600\n\n elif 'M' in time_string:\n time_string = ''.join(i for i in time_string if not i.isalpha())\n return float(time_string) * 60\n\n elif not time_string or time_string == 'NONE':\n return 0.0\n\n else:\n try:\n return float(time_string)\n except:\n return 0.0", "title": "" }, { "docid": "376d849199440e5fd1fa06247f51d46e", "score": "0.7518198", "text": "def to_seconds(tstr):\n try:\n val, suffix = re.match(\"^([0-9]+)([HhMmSs]?)$\", tstr).groups()\n except:\n alohomora.die(\"Can't parse duration '%s'\" % tstr)\n scale = {'h': 3600, 'm': 60}.get(suffix.lower(), 1)\n\n return int(val) * scale", "title": "" }, { "docid": "a8157dd985608b0427d572d544613dae", "score": "0.73962015", "text": "def time_str_to_seconds(s):\n match = _pattern.match(s)\n if match:\n return sum(\n _multipliers[k] * float(v)\n for k, v in match.groupdict().items()\n if v and k in _multipliers\n )\n raise ValueError('Unknown time format: \"{}\"'.format(s))", "title": "" }, { "docid": "4220bb8e1513a29e53cddbc521d329f5", "score": "0.7272327", "text": "def Time2Seconds( self, t ):\n\t\t\n\t\tt = str( t )\n\t\t\n\t\tif \" \" in t:\n\t\t\tdate,t = str( t ).split(\" \")\n\t\t\tyy,mo,dd = date.split(\"-\")\n\t\telse:\n\t\t\tyy=2000\n\t\t\tmo=dd= 1\n\t\t\t\n\t\tif \".\" in t:\n\t\t\tt,ignore = t.split(\".\")\n\t\thh,mm,ss = t.split(\":\")\n\t\t\n\t\treturn time.mktime(( int( yy ), int( mo ), int( dd ), int( hh ), int( mm ), int( ss ), 0, 1, -1 ))", "title": "" }, { "docid": "d00d5b6dd6b60630a440f7129b13508e", "score": "0.72024816", "text": "def _get_seconds(cls, text):\n if RE_S.match(text.lower()):\n numbers = RE_NUM.findall(text)\n return int(numbers[0])\n elif RE_M.match(text.lower()):\n numbers = RE_NUM.findall(text)\n return int(numbers[0]) * 60 # Min to sec.\n raise TimeException()", "title": "" }, { "docid": "4d11ab1cc1ac8a45048a9f414784ff95", "score": "0.71745586", "text": "def get_sec(time): #helper function to convert serial output time string into int, just for testing convenience\n time = str(time)[2:-1]\n h, m, s = str(time).split(':')\n ts = int(h) * 3600 + int(m) * 60 + float(s)\n return ts", "title": "" }, { "docid": "8e903edb260507f49a743e825b5f9ee6", "score": "0.71642584", "text": "def cvsecs(time):\n\n if is_string(time):\n if (',' not in time) and ('.' not in time):\n time = time + '.0'\n expr = r\"(\\d+):(\\d+):(\\d+)[,|.](\\d+)\"\n finds = re.findall(expr, time)[0]\n nums = list( map(float, finds) )\n return ( 3600*int(finds[0])\n + 60*int(finds[1])\n + int(finds[2])\n + nums[3]/(10**len(finds[3])))\n\n elif isinstance(time, tuple):\n if len(time)== 3:\n hr, mn, sec = time\n elif len(time)== 2:\n hr, mn, sec = 0, time[0], time[1]\n return 3600*hr + 60*mn + sec\n\n else:\n return time", "title": "" }, { "docid": "1c3d2a39867e79955881d46241e92f13", "score": "0.7105693", "text": "def time_to_number(time_str):\r\n \r\n t = time_str.split(\":\")\r\n t = int(t[0])*60 + int(t[1])\r\n \r\n return t", "title": "" }, { "docid": "a1c339933c239774babc8cf26711cb0e", "score": "0.7098537", "text": "def convert_time(time_str):\n try:\n if (type(time_str) == float):\n local_time = ctime(time_str)\n else :\n local_time = ctime(float(time_str))\n return(local_time)\n except:\n return (time_str)", "title": "" }, { "docid": "1fa562ac956396394ece6d55b4314b78", "score": "0.7064129", "text": "def parse_time(time_slot):\n convert = {\n 'D': lambda x: 24*60*60*x,\n 'H': lambda x: 60*60*x,\n 'M': lambda x: 60*x,\n 'S': lambda x: x\n }\n seconds = 0\n for i, c in enumerate(time_slot):\n if c.isdigit() and (time_slot[i-1].isalpha() or i == 0):\n if time_slot[i+1].isdigit():\n seconds += convert[time_slot[i+2]](int(time_slot[i:i+2]))\n else:\n seconds += convert[time_slot[i+1]](int(c))\n else:\n pass\n return str(seconds)", "title": "" }, { "docid": "8a72982298a4d4d905723647de71fbeb", "score": "0.7030402", "text": "def text_to_seconds(text):\n dt = datetime.strptime(text.split('.')[0], '%Y-%m-%d %H:%M:%S')\n return int(dt.timestamp())", "title": "" }, { "docid": "aca772b31808a85c3543fd1c523999b4", "score": "0.6958745", "text": "def toSeconds(time):\n\tminutes = np.floor(time)\n\tseconds = time - minutes\n\treturn (minutes*60) + (seconds*100)", "title": "" }, { "docid": "478f45e15fde661117398ea63c2cd0d3", "score": "0.6931637", "text": "def getMiliSec(time_str: float) -> int:\n# if(type(time_str) == \"float\"):\n# if(type(time_str) is types.Float):\n# elif isinstance(obj, np.floating):\n\n time_str = str(time_str)\n if(len(time_str.split(':')) ==3 ):\n h, m,s = time_str.split(':')\n else:\n # print(len(time_str.split(':')))\n # print(time_str)\n h, m = time_str.split(':')\n s= \"00\"\n \n return int(h) * 3600 + int(m) * 60 + int(s)", "title": "" }, { "docid": "884439a0623dfec48c26052fce9bc097", "score": "0.6931253", "text": "def convert_subriptime_to_seconds(obj):\r\n\r\n return 3600 * obj.hours + 60 * obj.minutes + obj.seconds", "title": "" }, { "docid": "04b3d413020e1189db69a784490b2730", "score": "0.6871826", "text": "def seconds_in(s):\n return s - (hours_in(s) * 3600) - (minutes_in(s) * 60)", "title": "" }, { "docid": "cdf7199b26b41cd97b704713df55e28f", "score": "0.6860123", "text": "def isotimetoseconds(durationvalue):\n try:\n val = time.strptime(durationvalue, \"%H:%M:%S\")\n timeinseconds = datetime.timedelta(hours=val.tm_hour, minutes=val.tm_min, seconds=val.tm_sec).seconds\n return timeinseconds\n except:\n print \"Error in converting iso time time to seconds\"\n return None", "title": "" }, { "docid": "167461d95f05fac304e4ed011c3f1b52", "score": "0.6850567", "text": "def dhms_secs(timestr):\n period = int(timestr[0:-1])\n unit = timestr[-1].lower()\n if unit == \"d\":\n return period * 24 * 60 * 60\n elif unit == \"h\":\n return period * 60 * 60\n elif unit == \"m\":\n return period * 60\n elif unit == \"s\":\n return period\n raise ValueError(\"%s: Unknown time period char\" % unit)", "title": "" }, { "docid": "74c7f7e8dd27c8bac8c7b6cf7f74e234", "score": "0.6802868", "text": "def convert_time(self, time):\r\n time = time.split(':')[0:2]\r\n hours = int(time[0])\r\n minutes = int(time[1])\r\n return minutes + hours * 60", "title": "" }, { "docid": "b9be93187d3ac594f5cdd6f35b3356cc", "score": "0.6783047", "text": "def _convertToSeconds(timeSpan, start=None):\n if isinstance(timeSpan, str):\n return timeSpan\n else:\n if isinstance(timeSpan, datetime.datetime):\n seconds = (timeSpan - start).total_seconds()\n elif isinstance(timeSpan, datetime.timedelta):\n seconds = timeSpan.total_seconds()\n else:\n seconds = timeSpan\n return str(seconds) + 's'", "title": "" }, { "docid": "8c1d9448a07392d96e280a827952be95", "score": "0.6782899", "text": "def to_time(str_time):\n return datetime.strptime(str_time, '%H:%M').time()", "title": "" }, { "docid": "269c829f20a205d61496d7b9ac07b9d8", "score": "0.67590344", "text": "def time_to_int(time):\n minutes = time.hour * 60 + time.minute\n seconds = minutes * 60 + time.second\n return seconds", "title": "" }, { "docid": "34d4f6621d30de67fb96b5603606518f", "score": "0.6716479", "text": "def time2number(time):\n\n arr = time.split(':')\n\n min = int(arr[0])\n sec = int(arr[1])\n\n timer = (min * 60) + sec\n\n return timer", "title": "" }, { "docid": "3e43abb33a739ac0cc27253fe20ad647", "score": "0.6695175", "text": "def valToSeconds(value):\n \n if re.search('[mshdwcyMSHDWCY]', value):\n if int(value):\n return int(value)\n elif float(value) and not int(value):\n return int(str(value).split('.')[0])\n timeCh = value[-1]\n value = value[:-1]\n try:\n mult = atom(timeCh)\n except Exception as e:\n naAwait(log.logEvent.debug(e)); pass\n if mult == None:\n return None\n elif mult == 0:\n return 0\n return Main.atoi(value)*mult", "title": "" }, { "docid": "cadde7e9f6ec0c2ba4689ec04d8d2574", "score": "0.66767746", "text": "def time_to_int(str_time):\n dt = time.mktime(\n datetime.datetime.strptime(str_time, \"%Y-%m-%dT%H:%M:%S\").timetuple()\n )\n return dt", "title": "" }, { "docid": "d74de11a05fc7f790592802c9105d8f8", "score": "0.6663057", "text": "def validate_time(time) -> int:\n\tif time.find('.') == -1:\n\t\treturn int(time)\n\telse:\n\t\tsplited_time = time.split('.')\n\t\tseconds = int(splited_time[0]) * 60 + int(splited_time[1])\n\treturn seconds", "title": "" }, { "docid": "797beb610c20f71c8f0132a1e609495f", "score": "0.66520655", "text": "def time_call(time):\n time = re.split(r'\\:', time)\n time = int(time[0]) * 3600 + int(time[1]) * 60 + int(time[2])\n return time", "title": "" }, { "docid": "ffdfd7593fcf38ff13fa80c9f2f707d3", "score": "0.6645444", "text": "def time(s):\n return datetime.datetime.strptime(s, TIMEFMT)", "title": "" }, { "docid": "b113d8b51f8c6e977e8cb5c9249c26ec", "score": "0.66368103", "text": "def convertTime(seconds):\n hour=seconds//3600\n m=seconds//60\n m=m%60\n sec=seconds%60\n print(\"{} seconds = {} hours, {} minutes, {} seconds\".format(seconds, hour, m, sec))", "title": "" }, { "docid": "4036d462bec4de405294744deb526e70", "score": "0.66180664", "text": "def convert_time(time):\n time_string = time.split(':')\n return timedelta(hours=int(time_string[0]),\n minutes=int(time_string[1]),\n seconds=int(time_string[2])\n )", "title": "" }, { "docid": "3a2bd81c9c45c09917f79d099d173d70", "score": "0.66156983", "text": "def test_format_time_seconds(self):\n from natcap.invest.utils import _format_time\n\n seconds = 7\n self.assertEqual(_format_time(seconds), '7s')", "title": "" }, { "docid": "93c32e74f14d5281dfdfd08f910dffc2", "score": "0.6608167", "text": "def convert(time_string, from_seconds_to_minute, to_seconds_from_minute):\n get_minutes = int(time_string[0:2]) # get minutes as string value and convert to integer\n get_seconds = int(time_string[-2:]) # get seconds as string value and convert to integer\n a = possible(get_minutes, get_seconds, from_seconds_to_minute)\n if 0 <= a:\n return formating(a, to_seconds_from_minute)\n else:\n return", "title": "" }, { "docid": "5cabc1ca2aab289858c89102c59bf2c1", "score": "0.6588355", "text": "def test_duration_to_seconds_with_invalid_str(self):\n time = {\n \"duration\": \"3hh30m\",\n \"project\": \"ganeti-web-manager\",\n \"user\": \"example-user\",\n \"activities\": [\"documenting\"],\n \"notes\": \"Worked on docs\",\n \"issue_uri\": \"https://github.com/\",\n \"date_worked\": \"2014-04-17\",\n }\n\n self.assertEquals(self.ts._TimeSync__duration_to_seconds\n (time['duration']),\n [{self.ts.error:\n \"time object: invalid duration string\"}])", "title": "" }, { "docid": "f245a0d177688ceb130f7f3b4a31bcef", "score": "0.6585535", "text": "def TimestampToSeconds(iso_time_string):\n timestamp = time.strptime(iso_time_string, '%Y-%m-%dT%H:%M:%S+00:00')\n return calendar.timegm(timestamp)", "title": "" }, { "docid": "7b10bbaf437d57155f41cacd6b3a0481", "score": "0.65637964", "text": "def string_to_unix_sec(time_string, time_directive):\n\n error_checking.assert_is_string(time_string)\n error_checking.assert_is_string(time_directive)\n return calendar.timegm(time.strptime(time_string, time_directive))", "title": "" }, { "docid": "3765a858722af234ae1f723eaddf9567", "score": "0.6541727", "text": "def sec_str_to_ts(sec_str):\n sec_res = sec_str.split('.')\n if not len(sec_res) == 2:\n sys.exit(\"Incorrect input, it should be like secs.nsecs\")\n else:\n secs = sec_res[0]\n nsecs = sec_res[1]\n assert len(nsecs) == 9\n\n return secs, nsecs", "title": "" }, { "docid": "9791b1ec2614a9b2a86244e847766f22", "score": "0.6536011", "text": "def convert_termination_time_to_seconds(termination_time: Tuple[float, str]):\n unit_conversion = {'micro-s': 1e-6,\n 'ms': 1e-3,\n 's': 1,\n 'hrs': 3600,\n 'hours': 3600,\n 'days': 3600*24,\n }\n t_final, units = termination_time\n t_final = t_final * unit_conversion[units]\n return t_final", "title": "" }, { "docid": "3c9203878f4dc43f113bd6796ec3cd93", "score": "0.6535947", "text": "def get_millisec(time_str):\n m, sms = time_str.split(':')\n s, ms = sms.split('.')\n return int(m) * 60000 + int(s) * 1000 + int(ms) * 10", "title": "" }, { "docid": "0baf4fc1f1d93237fde4a0b79ccf6b6c", "score": "0.65280265", "text": "def convert_time(seconds):\n seconds = int(seconds) # removing ms\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n return days, hours, minutes, seconds", "title": "" }, { "docid": "309487e88ba143fe259ba278211e1a2a", "score": "0.65263313", "text": "def time_to_int(self):\n minutes=self.hour*60+self.minute;\n seconds=minutes*60+self.second;\n return seconds;", "title": "" }, { "docid": "cffaf183a9f66e56e7c70d4eb84dd7dc", "score": "0.6525884", "text": "def sec2time(seconds):\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n return \"%d:%02d:%02d\" % (h, m, s)", "title": "" }, { "docid": "b1913ff5092f754e7d967416e803df40", "score": "0.6518661", "text": "def ascetime(sec):\r\n\th = sec / 60**2\r\n\tm = 60 * (h - h // 1)\r\n\ts = 60 * (m - m // 1)\r\n\treturn \"%dh%02d:%02d\" % (h, m, s)", "title": "" }, { "docid": "26b3e478e017c690a73e8ac56a598383", "score": "0.65128255", "text": "def ts_to_sec(ts):\n return ts.secs + ts.nsecs / float(1e9)", "title": "" }, { "docid": "88d7eb9bc145117fc45dd6d9a4b19736", "score": "0.6497451", "text": "def parse_time(str_time,filepath = ''):\n pattern_time = r\"(?P<h1>\\d+):(?P<m1>\\d+):(?P<s1>\\d+),(?P<ms1>\\d+)\\W*-->\\W*(?P<h2>\\d+):(?P<m2>\\d+):(?P<s2>\\d+),(?P<ms2>\\d+)$\"\n try:\n d = re.match(pattern_time, str_time.strip()).groupdict()\n except:\n message = u\"Invalid string format '%s' , expect hh:mm:ss,msc --> hh:mm:ss,msc\" % str_time\n return None,None\n markInBlackList(filepath)\n raise SrtFormatError(message)\n get_ms = lambda h, m, s, ms: (int(s) + int(m) * 60 + int(h) * 60 * 60) * 1000 + int(ms)\n return get_ms(d['h1'], d['m1'], d['s1'], d['ms1']), get_ms(d['h2'], d['m2'], d['s2'], d['ms2'])", "title": "" }, { "docid": "9788f7a854da21f01fea742a5b907740", "score": "0.64882576", "text": "def parse_hms(t: Union[int, float, str]) -> Union[int, float]:\n if isinstance(t, (float, int)):\n return t\n\n # Try : and ; separators\n for sep in ':;':\n if sep not in t:\n continue\n\n sec = 0\n\n for s in t.split(sep):\n sec *= 60\n\n try:\n sec += str2num(s)\n except ValueError:\n raise ValueError('cannot parse seconds from %s' % repr(t))\n\n return sec\n try:\n return str2num(t)\n except ValueError:\n raise ValueError('cannot parse seconds from %s' % repr(t))", "title": "" }, { "docid": "1877e0317eb2128a59b4a36e1ae0ee5f", "score": "0.6480132", "text": "def convert_datetime(datestr,x):\n start = datetime.strptime(datestr, \"%Y-%m-%d %H:%M:%S.%f\")\n return start + timedelta(seconds=x)", "title": "" }, { "docid": "04d9545dc13793574f625d71c0d8c66c", "score": "0.64683545", "text": "def string_to_timedelta(self, time):\n parts = time.split(\":\")\n try:\n seconds = int(parts[-1].split(\".\")[0])\n except (IndexError, ValueError):\n seconds, milliseconds = 0, 0\n try:\n milliseconds = int(parts[-1].split(\".\")[1])\n except (IndexError, ValueError):\n milliseconds = 0\n try:\n minutes = int(parts[-2])\n except (IndexError, ValueError):\n minutes = 0\n try:\n hours = int(parts[-3])\n except (IndexError, ValueError):\n hours = 0\n return timedelta(\n hours=hours, minutes=minutes, seconds=seconds, milliseconds=milliseconds\n )", "title": "" }, { "docid": "16604da4f8b52546d0d3c201d70b471a", "score": "0.64669657", "text": "def process_time(time):\n return float(time[0]) + round(float(time[1][:-2]) / 60.0, 2)", "title": "" }, { "docid": "4d17562fba5419215cb8a283870760a7", "score": "0.64538777", "text": "def str_to_ts_s(s: str, mask='%Y-%m-%d %H:%M:%S') -> int:\n return int(time.mktime(time.strptime(s, mask)))", "title": "" }, { "docid": "fd340bab07b91a202d61058ddd93c872", "score": "0.64521176", "text": "def ascetime(sec):\n h = sec / 60**2\n m = 60 * (h - h // 1)\n s = 60 * (m - m // 1)\n return \"%dh%02d:%02d\" % (h, m, s)", "title": "" }, { "docid": "f6a91c79ef40e5d5eb719c877729c53b", "score": "0.6433904", "text": "def convert_time_text_format_to_microsec(time_text):\n before_dec_point, after_dec_point = time_text.split('.')\n d_sec = datetime.datetime.strptime(\n before_dec_point, '%Y-%m-%d %H:%M:%S')\n assert len(after_dec_point) == 6\n # instead of use .%f, , add msec part (digits after the decimal)\n m_seconds = datetime.timedelta(\n microseconds=int(after_dec_point))\n result_d = d_sec + m_seconds\n return result_d", "title": "" }, { "docid": "6ecf31e1662b5f77754e160330d30f78", "score": "0.6421107", "text": "def time_in_seconds(arg):\n try:\n return int(arg)\n except ValueError:\n i = pytimeparse.timeparse.timeparse(arg)\n if i is None or isinstance(i, float):\n raise argparse.ArgumentTypeError(\"'%s' is not a valid time\" % arg)\n else:\n return i", "title": "" }, { "docid": "69ae140377467d7da0535762a41d630a", "score": "0.64126444", "text": "def _seconds_from_value(value):\n if isinstance(value, int) and value != 0:\n return value * 3600\n elif not isinstance(value, basestring):\n return None\n\n try:\n suffix = value[-1].lower()\n value = value[:-1]\n if suffix == 's':\n return int(value)\n elif suffix == 'm':\n return int(value) * 60\n elif suffix == 'h':\n return int(value) * 3600\n elif suffix == 'd':\n return int(value) * 3600 * 24\n else:\n return None\n except:\n return None", "title": "" }, { "docid": "349ffd1eb21e21dbac7dec557bd37ff1", "score": "0.64068455", "text": "def convert_time(self, period, time):\n colon_idx = time.index(\":\")\n if period < 3:\n result = -2400 + ((period - 1) * 1200) # Period 1 -> 2400, period 2 -> 1200\n mins_to_secs = 1200 - int(time[:colon_idx]) * 60 # 20 mins -> 0, 0 mins -> 1200\n secs_to_secs = int(time[colon_idx + 1:])\n self.time_converted = result + mins_to_secs - secs_to_secs\n else:\n # TODO: Check if this works\n result = (period - 3) * 5 * 60\n mins_to_secs = 300 - int(time[:colon_idx]) * 60\n secs_to_secs = int(time[colon_idx + 1:])\n self.time_converted = result + mins_to_secs - secs_to_secs", "title": "" }, { "docid": "b21b606e718a0e742c87f9568fd5059c", "score": "0.6403776", "text": "def durationToSeconds(self, duration):\n split = duration.split('T')\n period = split[0]\n time = split[1]\n timeD = {}\n\n # days & weeks\n if len(period) > 1:\n timeD['days'] = int(period[-2:-1])\n if len(period) > 3:\n timeD['weeks'] = int(period[:-3].replace('P', ''))\n\n # hours, minutes & seconds\n if len(time.split('H')) > 1:\n timeD['hours'] = int(time.split('H')[0])\n time = time.split('H')[1]\n if len(time.split('M')) > 1:\n timeD['minutes'] = int(time.split('M')[0])\n time = time.split('M')[1]\n if len(time.split('S')) > 1:\n timeD['seconds'] = int(time.split('S')[0])\n\n # convert to seconds\n timeS = timeD.get('weeks', 0) * (7 * 24 * 60 * 60) + \\\n timeD.get('days', 0) * (24 * 60 * 60) + \\\n timeD.get('hours', 0) * (60 * 60) + \\\n timeD.get('minutes', 0) * (60) + \\\n timeD.get('seconds', 0)\n\n return timeS", "title": "" }, { "docid": "3494da39d9359e89a4d2d9338ae09eef", "score": "0.6402125", "text": "def int_to_time(seconds):\n time=Time();\n minutes,time.second=divmod(seconds,60);\n time.hour,time.minute=divmod(minutes,60);\n return time;", "title": "" }, { "docid": "51801705890ff6cf082b4630f8517c66", "score": "0.6400741", "text": "def time_converter(time_string):\n hour,minute,second=time_string.split(\":\")\n hour=int(hour)\n day=1\n if hour>23:\n hour=hour%24\n day+=1\n dt=datetime(year=1970,month=1,day=day,hour=hour,minute=int(minute),second=int(second))\n return int(dt.replace(tzinfo=timezone.utc).timestamp())", "title": "" }, { "docid": "7c13f2a99bb6402dd55ca520e7b0120e", "score": "0.6399199", "text": "def parse_time(time_str):\n parts = timeregex.match(time_str)\n assert parts is not None, \"Could not parse any time information from '{}'. Examples of valid strings: '8h', '2d8h5m20s', '2m4s'\".format(time_str)\n time_params = {name: float(param) for name, param in parts.groupdict().items() if param}\n return timedelta(**time_params)", "title": "" }, { "docid": "0db451545fd246fee3314ca1631ebe21", "score": "0.6393288", "text": "def parse_time(self):\n time_split = self.timestamp.split()\n formatted = \" \".join(time_split[i] for i in [1, 2, 3, 5])\n date = datetime.strptime(formatted, '%b %d %H:%M:%S %Y')\n return (date - START_DEBATE).seconds", "title": "" }, { "docid": "5d338f6fe012460723ab4c10ec275513", "score": "0.6391121", "text": "def convert_compact_duration_to_seconds(compact_duration: str) -> float:\n try:\n hours, minutes, seconds = compact_duration.split(\":\")\n except ValueError:\n raise ValueError(f\"Duration is in an invalid format: {compact_duration}\")\n\n try:\n hours, minutes, seconds = map(float, [hours, minutes, seconds])\n except ValueError:\n raise ValueError(f\"Duration has an invalid value: {compact_duration}\")\n\n return (hours * 3600) + (minutes * 60) + seconds", "title": "" }, { "docid": "d06e42726658c47d0566a378eb7e4e9a", "score": "0.63859004", "text": "def test_time_string_to_unix_sec(self):\n\n this_time_unix_sec = storm_events_io._time_string_to_unix_sec(\n TIME_STRING)\n self.assertTrue(this_time_unix_sec == TIME_UNIX_SEC)", "title": "" }, { "docid": "c1c53045d1a7733d8947d6d92a2c883f", "score": "0.6374778", "text": "def seconds_to_time(total_seconds):\n\n hours = total_seconds // 3600\n minutes = (total_seconds % 3600) // 60\n seconds = total_seconds % 60\n\n return time(hours, minutes, seconds)", "title": "" }, { "docid": "f8c653c7073847db6aea01f8e447f00c", "score": "0.6373065", "text": "def _secs_to_time_str(time_sec):\n\n if time_sec < 60:\n time_str = str(round(time_sec, 0)) + \" sec\"\n elif time_sec < 3600:\n time_str = str(round(time_sec/60, 1)) + \" min\"\n elif time_sec == 3600:\n time_str = \"1 hr\"\n else:\n time_str = str(round(time_sec/3600, 1)) + \" hrs\"\n\n # xx.0 min/hr --> xx min/hr\n time_str = time_str.replace('.0 ', ' ')\n return time_str", "title": "" }, { "docid": "1bfc86fe292b01211f2b32d1b3fbbe4f", "score": "0.63721716", "text": "def operation_p(time_str):\n this_time = datetime.strptime(time_str[:4], '%H%M')\n if time_str[-1] == 'H':\n this_time = this_time + timedelta(seconds=30)\n return this_time", "title": "" }, { "docid": "b1e05089166d41b14992f8ff7dddfe45", "score": "0.637204", "text": "def int_to_time(seconds):\n return Time(0, 0, seconds)", "title": "" }, { "docid": "f5bac3f27b23617f958d659b82fc2ac7", "score": "0.63714373", "text": "def _convert_time_string_to_float(time_string):\n time_object = timeutils.parse_isotime(time_string)\n return (timeutils.normalize_time(time_object) -\n datetime.datetime.utcfromtimestamp(0)).total_seconds()", "title": "" }, { "docid": "9727da8a0075948e8f055afc8a95cd2d", "score": "0.6347886", "text": "def forward(self, time_string: str) -> timedelta:\n hh = int(time_string[:2])\n mm = int(time_string[-2:])\n td: timedelta = timedelta(hours=hh, minutes=mm)\n self.dt += td\n return td", "title": "" }, { "docid": "6848b3b8d29c2d990410890962c134d9", "score": "0.6347542", "text": "def cert_time_to_seconds(cert_time):\n from time import strptime\n from calendar import timegm\n\n months = (\n \"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\n \"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"\n )\n time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT\n try:\n month_number = months.index(cert_time[:3].title()) + 1\n except ValueError:\n raise ValueError('time data %r does not match '\n 'format \"%%b%s\"' % (cert_time, time_format))\n else:\n # found valid month\n tt = strptime(cert_time[3:], time_format)\n # return an integer, the previous mktime()-based implementation\n # returned a float (fractional seconds are always zero here).\n return timegm((tt[0], month_number) + tt[2:6])", "title": "" }, { "docid": "d8d2a9fba890b2c4dca953073707bea7", "score": "0.63302374", "text": "def time_to_number(time):\n hours, minutes, seconds = time.split(':')\n hours, minutes, seconds = int(hours) * 60, int(minutes), float(seconds) / 60\n return hours + minutes + seconds", "title": "" }, { "docid": "d986d87904549d0e33b586309135b35c", "score": "0.6327099", "text": "def convert_time(string, format_time):\n dt = datetime.fromtimestamp(int(string, 16) // 1000000000)\n return dt.strftime(format_time)", "title": "" }, { "docid": "2ce84ae0c641fae272a5573e6c8c1b4f", "score": "0.6325385", "text": "def seconds_from_millis(time):\n return time/1000.0", "title": "" }, { "docid": "06947513924e0a31bb09acbef13060b8", "score": "0.6321681", "text": "def readSeconds(self, seconds: float, **kwargs) -> str:\n _UNITS = [\"day\", \"hour\", \"minute\", \"second\"]\n _CONVERSION = {\n \"day\": 60*60*24,\n \"hour\": 60*60,\n \"minute\": 60,\n \"second\": 1\n }\n\n # set default value\n decimals = kwargs.get(\"decimals\", 2)\n seconds = round(seconds, decimals)\n\n v = 0\n i = -1\n while not v:\n i += 1\n if i == len(_UNITS):\n return \"%s seconds\" % seconds\n unit = _UNITS[i]\n v = seconds // _CONVERSION[unit]\n res = \"\"\n for unit in _UNITS[i:]:\n v = seconds // _CONVERSION[unit]\n seconds = seconds % _CONVERSION[unit]\n v = int(v)\n if unit == \"second\":\n v = v + seconds\n v = round(v, decimals)\n if not v:\n continue\n if v != 1:\n res += \"%s %ss \" % (v, unit)\n else:\n res += \"%s %s \" % (v, unit)\n return res.strip()", "title": "" }, { "docid": "f7e99a5dcf9a729f0a83cfc614f05f08", "score": "0.6304759", "text": "def date_to_sec(time_struct):\n return (time_struct.hour * 3600.0 +\n time_struct.minute * 60.0 +\n time_struct.second * 1.0 +\n time_struct.millisecond * 1e-3)", "title": "" }, { "docid": "86803a4caba847a207d63230cbdc6205", "score": "0.6300735", "text": "def date2secs(date_string):\n tokens = date_string.split(\".\")\n prefix = tokens[0]\n suffix = tokens[1]\n d = datetime.strptime(prefix, time_fmt)\n secs = (d - epoch).total_seconds()\n microsecs = int(suffix)\n return secs + microsecs/1000000.0", "title": "" }, { "docid": "aeeca37bd9410aba754bae7c49deb258", "score": "0.6300287", "text": "def convert_to_data_time_format_microsec(time_text):\n time_cols = [int(txt) for txt in time_text.split('_')]\n \n txt = '%02d:%02d:%02d' % (time_cols[0], time_cols[1], time_cols[2])\n d_sec = datetime.datetime.strptime(txt, '%H:%M:%S')\n # original is 7 digits after the decimal point,\n # and change to 6 digits after the decimal point\n m_seconds = datetime.timedelta(\n microseconds=int(time_cols[3]//10))\n result_d = d_sec + m_seconds\n return result_d", "title": "" }, { "docid": "669e46284a331960ce3dce77b009e1db", "score": "0.628012", "text": "def toSeconds(td):\n return td.seconds + td.days * 86400", "title": "" }, { "docid": "21fb5c9f5d30c2a68ce2b4178d0a4225", "score": "0.6269789", "text": "def timeToInt(self):\n assert self.isValid()\n return self.hour * 3600 + self.minute * 60 + self.second # totalseconds", "title": "" }, { "docid": "09a84c8d17793ce4d9e55312e0018dfe", "score": "0.6267843", "text": "def convert_to_seconds(minutes):\n return int(minutes) * 60", "title": "" }, { "docid": "6afff78790b2bb8666f177622428bf71", "score": "0.62630576", "text": "def parse_time(value):\n value = value.strip()\n\n if value == '':\n return 0\n\n hours = minutes = seconds = fraction = 0\n parsed = False\n\n m = re.match(r'(\\d+)[:](\\d\\d?)[:](\\d\\d?)([.]\\d+)?$', value)\n if not parsed and m:\n hours, minutes, seconds, fraction = m.groups()\n fraction = float(fraction or 0.0)\n parsed = True\n\n m = re.match(r'(\\d+)[:](\\d\\d?)([.]\\d+)?$', value)\n if not parsed and m:\n minutes, seconds, fraction = m.groups()\n fraction = float(fraction or 0.0)\n parsed = True\n\n m = re.match(r'(\\d+)([.]\\d+)?$', value)\n if not parsed and m:\n seconds, fraction = m.groups()\n fraction = float(fraction or 0.0)\n parsed = True\n\n if not parsed:\n try:\n seconds = int(value)\n except ValueError:\n logger.warning('Could not parse time value: \"%s\"', value)\n return 0\n\n return (int(hours) * 60 + int(minutes)) * 60 + int(seconds)", "title": "" }, { "docid": "d8cd8968c8345a5b1f51ac7e2f366f00", "score": "0.62614715", "text": "def convertor(time_string):\n return datetime.strptime(time_string, timeformating)", "title": "" }, { "docid": "0597b045f6e4365b16010165b29f8767", "score": "0.6259681", "text": "def parse_time(value):\n if value == '':\n return 0\n\n if not value:\n raise ValueError('Invalid value: %s' % (str(value),))\n\n m = re.match(r'(\\d+)[:.](\\d\\d?)[:.](\\d\\d?)', value)\n if m:\n hours, minutes, seconds = m.groups()\n return (int(hours) * 60 + int(minutes)) * 60 + int(seconds)\n\n m = re.match(r'(\\d+)[:.](\\d\\d?)', value)\n if m:\n minutes, seconds = m.groups()\n return int(minutes) * 60 + int(seconds)\n\n return int(value)", "title": "" }, { "docid": "db376db788df176b8b6cd3f2f440c2fe", "score": "0.62533325", "text": "def compute_actual_seconds(self):\r\n \r\n if self.output == \"00:00\":\r\n raise ValueError(\"Please input a time greater than 00:00\")\r\n self.list_output = list(self.output)\r\n if \":\" in self.list_output:\r\n self.list_output.remove(\":\")\r\n \r\n minutes = self.list_output[0] + self.list_output[1]\r\n seconds = self.list_output[2] + self.list_output[3]\r\n \r\n self.actual_seconds = float((int(minutes) * 60) + int(seconds))\r\n \r\n return self.actual_seconds", "title": "" }, { "docid": "a18fc00a6cfaef872b547bc6c0e6936a", "score": "0.6251116", "text": "def fromStamp(time_string):\n try:\n return datetime.datetime.strptime(time_string, STAMPFORMAT).replace(microsecond=999999)\n except:\n return getTime()", "title": "" }, { "docid": "6433b6cb95c47ce327ca386d4906eeb1", "score": "0.62281704", "text": "def ts_to_sec_str(ts):\n # return \"{}.{:9d}\".format(ts.secs, ts.nsecs)\n sec = ts.secs + ts.nsecs / float(1e9)\n return \"{:.9f}\".format(sec)", "title": "" }, { "docid": "4c8fc6ac75863321ecb869d6a36d77e6", "score": "0.6221761", "text": "def test_duration_to_seconds(self):\n time = {\n \"duration\": \"3h30m\",\n \"project\": \"ganeti-web-manager\",\n \"user\": \"example-user\",\n \"activities\": [\"documenting\"],\n \"notes\": \"Worked on docs\",\n \"issue_uri\": \"https://github.com/\",\n \"date_worked\": \"2014-04-17\",\n }\n\n self.assertEquals(self.ts._TimeSync__duration_to_seconds\n (time['duration']), 12600)", "title": "" }, { "docid": "9f5878afadfe0acb6a8e4f13b6630a0b", "score": "0.6216447", "text": "def StartTimeHandler(self, timeStr):\n standardTime = 0.0\n baseYear = 2011\n baseMonth = 1\n baseDay = 1\n\n # parse the string\n res = timeStr.split(' ')\n date = res[0]\n clock = res[1]\n\n # parse date part\n res = date.split('/')\n year = res[0]\n month = res[1]\n day = res[2]\n diffYear = int(year) - baseYear\n diffMonth = int(month) - baseMonth + diffYear * 12\n diffDay = int(day) - baseDay + diffMonth * 31\n standardTime += diffDay * 24 * 60 * 60\n\n # parse clock part\n res = clock.split(':')\n hour = res[0]\n minute = res[1]\n second = res[2] \n standardTime += int(hour) * 60 * 60 + int(minute) * 60 + float(second)\n\n return standardTime", "title": "" }, { "docid": "cc83610837a57ceb7b5620919a262642", "score": "0.62163854", "text": "def int_to_time(seconds):\n time = Time()\n minutes, time.second = divmod(seconds, 60)\n time.hour, time.minute = divmod(minutes, 60)\n return time", "title": "" }, { "docid": "5396a84c5131366940170a0555059b47", "score": "0.6211538", "text": "def seconds_to_time(s: int, date: datetime.datetime) -> datetime.datetime:\n return datetime.datetime.combine(date, datetime.datetime.min.time()) - datetime.timedelta(seconds=s)", "title": "" }, { "docid": "fd443f29ab7405866193030d0db28b25", "score": "0.62110656", "text": "def _slurmtimesecs (elapsed_time) :\n \n multipliers4 = [86400,3600,60,1]\n multipliers3 = [3600,60,1]\n seconds = None\n d_h_m_s = re.findall('\\d+', elapsed_time)\n if len(d_h_m_s) == 4 :\n seconds = sum([a*b for a,b in zip(multipliers4, map(int,d_h_m_s))])\n elif len(d_h_m_s) == 3 :\n seconds = sum([a*b for a,b in zip(multipliers3, map(int,d_h_m_s))])\n return seconds", "title": "" }, { "docid": "51d3d697aef1bf209050e8e18f69b3a4", "score": "0.62076896", "text": "def to_seconds(self):\n return self.hours * 3600 + self.minutes * 60 + self.seconds", "title": "" }, { "docid": "3aa167bb4286a491556a31c48a605414", "score": "0.62073123", "text": "def ssm_to_time(ssm):\n secs = int(ssm)\n usecs = round((ssm % 1) * 1e6)\n return time(secs // 3600, secs % 3600 // 60, secs % 60, usecs)", "title": "" }, { "docid": "eff8a45b0c2c2f164b4879cc7b9ddd73", "score": "0.62039834", "text": "def parseTime(time):\n if isinstance(time, datetime.time):\n return 60 * time.hour + time.minute\n elif isinstance(time, (str, unicode)):\n h, m = time.split(':', 1)\n return 60*int(h) + int(m)\n else:\n return time", "title": "" }, { "docid": "5c5065c6f4e32cfa7eb181f5e2753484", "score": "0.6183113", "text": "def seconds_from_hours(hours):\n\n return (60*60)*hours", "title": "" }, { "docid": "052f3ce358a6c6a186164cc5a284caf9", "score": "0.6177616", "text": "def Convert_to_seconds(minutes):\n seconds = minutes * 60\n return seconds", "title": "" }, { "docid": "69770a13fec0fc6cc5886b6b49b4f588", "score": "0.6176292", "text": "def MyTime(self,b):\n\n \n s=b.decode('ascii')\n \n a =md.date2num(datetime.datetime.strptime(s,'%H:%M:%S')) \n \n return a", "title": "" } ]
bd7aea159980b3602622f3dfae95db5f
Ensure the storage directories has the same tags as specified for the table in src_table_tags. Location for storage is looked up in hive server.
[ { "docid": "2b4e011e0066b1b71fda618179c169c4", "score": "0.61331445", "text": "def sync_table_storage_tags(self, src_table_tags, clear_not_listed=False):\n self.worklog = {}\n run = 0\n while True:\n try:\n run += 1\n self.ensure_tags_in_atlas(src_table_tags)\n if clear_not_listed:\n schemas = schemas_from_src(src_table_tags)\n src_tables = tables_from_src(src_table_tags)\n atlas_tables = self.get_tables_for_schema_from_atlas(schemas)\n tables_only_known_by_atlas = set(atlas_tables.keys())-src_tables\n if len(tables_only_known_by_atlas) != 0:\n for t in tables_only_known_by_atlas:\n (schema, table) = t.split(\".\")\n src_table_tags.append({'schema': schema, 'table': table, 'tags': ''})\n for s in src_table_tags:\n self.worklog.update(\n self._sync_tags_for_one_tables_storage(s['schema'], s['table'], _tags_as_set(s)))\n return self.worklog\n except (SyncError, IOError, AtlasError, HiveError) as e:\n if run > self.retries:\n raise e\n time.sleep(self.retry_delay)", "title": "" } ]
[ { "docid": "3c70b31b4047e9d45943652a4e2857e5", "score": "0.7110176", "text": "def _sync_tags_for_one_tables_storage(self, schema, table, expected_tags):\n storage_url = self.hive_client.get_location(schema, table)\n if storage_url is not None:\n guid = self.atlas_client.add_hdfs_path(storage_url)\n\n tags_on_storage = self.atlas_client.get_tags_on_guid(guid)\n tags_to_add = expected_tags-tags_on_storage\n tags_to_delete = tags_on_storage-expected_tags\n if len(tags_to_add) != 0:\n self.atlas_client.add_tags_on_guid(guid, list(tags_to_add))\n self.worklog['{} added tag'.format(storage_url)] = tags_to_add\n if len(tags_to_delete) != 0:\n self.atlas_client.delete_tags_on_guid(guid, list(tags_to_delete))\n self.worklog['{} deleted tag'.format(storage_url)] = tags_to_delete\n else:\n self.worklog['{}.{} is a view, not doing any hdfs tagging for it.'.format(schema, table)] = ''\n return self.worklog", "title": "" }, { "docid": "ce6ca8d2ce4f998887dc80b1d8237cf3", "score": "0.5437353", "text": "def test_table_location_path(sdc_builder, sdc_executor, deltalake, aws):\n table_name = f'stf_{get_random_string()}'\n table_location = '/deltalake/'\n\n engine = deltalake.engine\n DATA = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=DATA, stop_after_first_batch=True)\n\n # AWS S3 destination\n s3_key = f'stf-deltalake/{get_random_string()}'\n\n # Databricks Delta lake destination stage\n databricks_deltalake = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n databricks_deltalake.set_attributes(staging_location='AWS_S3',\n stage_file_prefix=s3_key)\n databricks_deltalake.set_attributes(table_name=table_name,\n purge_stage_file_after_ingesting=True,\n table_location_path=table_location)\n\n dev_raw_data_source >> databricks_deltalake\n\n pipeline = pipeline_builder.build().configure_for_environment(deltalake, aws)\n\n try:\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished(timeout_sec=180)\n\n # Assert data from deltalake table is same as what was input.\n connection = deltalake.connect_engine(engine)\n result = connection.execute(f'select * from {table_name}')\n data_from_database = sorted(result.fetchall())\n\n expected_data = [tuple(v for v in d.values()) for d in ROWS_IN_DATABASE]\n\n assert len(data_from_database) == len(expected_data)\n\n assert expected_data == [(record['title'], record['author'], record['genre'], record['publisher'])\n for record in data_from_database]\n\n result = connection.execute(f'Show create table {table_name}')\n\n assert f'dbfs:{table_location}{table_name}' in str(result.fetchall()[0])\n result.close()\n finally:\n _clean_up_databricks(deltalake, table_name)", "title": "" }, { "docid": "11de67a64a04234bc699183194c3572e", "score": "0.5432228", "text": "def local_inventory(source_dir, bucketname):\n global dblock\n if os.path.exists(options.inventory_db):\n response = None \n while response not in ('y','n'):\n response = raw_input(\"Database exists at %s. Clobber it? (y/n)\" % options.inventory_db)\n if response == 'n':\n return False\n inventory_path = options.inventory_path\n ensure_exists(inventory_path)\n conn = sqlite3.connect(options.inventory_db)\n dblock.acquire()\n cursor = conn.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS files;\")\n cursor.execute(\"DROP INDEX IF EXISTS idx_path;\")\n cursor.execute(\"\"\"\n CREATE TABLE files (\n id INTEGER PRIMARY KEY,\n path TEXT NOT NULL,\n local_hash TEXT,\n remote_hash TEXT,\n remote_exists INTEGER,\n transferred INTEGER,\n local_deleted INTEGER DEFAULT 0,\n );\n \"\"\")\n conn.commit()\n\n count = 0\n for (dirpath, dirnames, filenames) in os.walk(source_dir):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n relpath = os.path.relpath(filepath, source_dir)\n #filehash = md5_digest(filepath)\n #cursor.execute('INSERT INTO files (path, local_hash) VALUES (?, ?);', (relpath, filehash))\n cursor.execute('INSERT INTO files (path) VALUES (?);', (relpath, ))\n count += 1\n if count % DB_TRANSACTION_SIZE == 0:\n conn.commit()\n print \"%s: %d inserts.\" % (dirpath, count)\n else:\n conn.commit()\n print \"Creating index...\",\n cursor.execute('CREATE INDEX IF NOT EXISTS idx_path ON files(path);')\n cursor.execute('CREATE INDEX IF NOT EXISTS idx_remote_exists ON files(remote_exists);')\n cursor.execute('CREATE INDEX IF NOT EXISTS idx_transferred ON files(transferred);')\n cursor.execute('CREATE INDEX IF NOT EXISTS idx_delete ON files(local_deleted);')\n print \"Done.\"\n dblock.release()", "title": "" }, { "docid": "67c3227b684ae3bf5f2935bd017f38c1", "score": "0.5301467", "text": "def check_tables(self, dcon, tables):\r\n\r\n dcur = dcon.cursor()\r\n for tbl, inf in tables.items():\r\n if skytools.exists_table(dcur, tbl):\r\n continue\r\n\r\n sql = self.part_template\r\n sql = sql.replace('_DEST_TABLE', skytools.quote_fqident(inf['table']))\r\n sql = sql.replace('_PARENT', skytools.quote_fqident(inf['parent']))\r\n sql = sql.replace('_PKEY', inf['key_list'])\r\n # be similar to table_dispatcher\r\n schema_table = inf['table'].replace(\".\", \"__\")\r\n sql = sql.replace('_SCHEMA_TABLE', skytools.quote_ident(schema_table))\r\n\r\n dcur.execute(sql)\r\n dcon.commit()\r\n self.log.info('%s: Created table %s' % (self.job_name, tbl))", "title": "" }, { "docid": "0ea7236a28ff10a869bf8acbbcfbe1cb", "score": "0.52050793", "text": "def check_tables(self, dcon, tables):\r\n\r\n dcur = dcon.cursor()\r\n for tbl in tables.keys():\r\n if not skytools.exists_table(dcur, tbl):\r\n if not self.part_template:\r\n raise Exception('Dest table does not exists and no way to create it.')\r\n\r\n sql = self.part_template\r\n sql = sql.replace(DEST_TABLE, skytools.quote_fqident(tbl))\r\n\r\n # we do this to make sure that constraints for \r\n # tables who contain a schema will still work\r\n schema_table = tbl.replace(\".\", \"__\")\r\n sql = sql.replace(SCHEMA_TABLE, skytools.quote_ident(schema_table))\r\n\r\n dcur.execute(sql)\r\n dcon.commit()\r\n self.log.info('%s: Created table %s' % (self.job_name, tbl))", "title": "" }, { "docid": "fe09988818874d5cd13742a3bbc47f8d", "score": "0.50892246", "text": "def load_staging_tables(cur, conn): \n #Iterate over the copy_table_queries list and execute them and populate the staging_songs\n #and staging_events table\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "816b490d459f7cb4856ad2b419b072b9", "score": "0.5076124", "text": "def createStagingTable(self, strTableName: str) -> bool:\n strStagingTable = \"staging_\" + strTableName\n\n cursor = self.objConn.cursor()\n strQuery = \"create table \" + strStagingTable + \" (like \" + strTableName + \")\"\n\n try:\n cursor.execute(strQuery)\n self.objConn.commit()\n\n self.lstSwapTables.append(strTableName)\n except Exception as error:\n print(\"creating staging table failed with error:\" + str(error))\n return False\n\n cursor.close()\n\n return True", "title": "" }, { "docid": "25eba6baa5b9d70318c490f19d35985e", "score": "0.5033682", "text": "def load_to_df_staging_tbl(self): \r\n try:\r\n for filename in os.listdir(self.get_local_config['stagingpath']):\r\n \r\n with open(os.path.join(self.get_local_config['stagingpath'],filename), 'r') as f_read:\r\n bottom = f_read.readlines()[-1]\r\n df = pd.read_csv(os.path.join(self.get_local_config['stagingpath'],filename),sep=self.data_attribute_details['source_detail']['delimiter'],lineterminator= self.data_attribute_details['source_detail']['line_terminator'] )\r\n #The row count also includes the trailer record. \r\n row_count = df.shape[0]\r\n \r\n #Drop the trailer record after taking the count\r\n if self.data_attribute_details['source_detail']['tail_record_count_pattern']:\r\n df = df[:-1] \r\n \r\n #if self.check_file_integrity(filename, bottom,row_count):\r\n self.write_df_to_db(self,df)\r\n except:\r\n raise Exception('Error while writing to staging table')", "title": "" }, { "docid": "bde25c580b43cdb9864934455b9a0aba", "score": "0.5017078", "text": "def check_table(self, t1, t2, lock_db, src_db, dst_db, setup_db):\r\n\r\n src_tbl = t1.dest_table\r\n dst_tbl = t2.dest_table\r\n\r\n lock_curs = lock_db.cursor()\r\n src_curs = src_db.cursor()\r\n dst_curs = dst_db.cursor()\r\n\r\n if not skytools.exists_table(src_curs, src_tbl):\r\n self.log.warning(\"Table %s does not exist on provider side\", src_tbl)\r\n return\r\n if not skytools.exists_table(dst_curs, dst_tbl):\r\n self.log.warning(\"Table %s does not exist on subscriber side\", dst_tbl)\r\n return\r\n\r\n # lock table against changes\r\n try:\r\n if self.provider_info['node_type'] == 'root':\r\n self.lock_table_root(lock_db, setup_db, dst_db, src_tbl, dst_tbl)\r\n else:\r\n self.lock_table_branch(lock_db, setup_db, dst_db, src_tbl, dst_tbl)\r\n\r\n # take snapshot on provider side\r\n src_db.commit()\r\n src_curs.execute(\"SELECT 1\")\r\n\r\n # take snapshot on subscriber side\r\n dst_db.commit()\r\n dst_curs.execute(\"SELECT 1\")\r\n finally:\r\n # release lock\r\n if self.provider_info['node_type'] == 'root':\r\n self.unlock_table_root(lock_db, setup_db)\r\n else:\r\n self.unlock_table_branch(lock_db, setup_db)\r\n\r\n # do work\r\n bad = self.process_sync(t1, t2, src_db, dst_db)\r\n if bad:\r\n self.bad_tables += 1\r\n\r\n # done\r\n src_db.commit()\r\n dst_db.commit()", "title": "" }, { "docid": "e5c744768a7133a38196f019cb0f34d2", "score": "0.49569878", "text": "def check_existing_tables(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n tables = [row[0] for row in cursor.fetchall()]\n for name in tables:\n self.tables[name] = None", "title": "" }, { "docid": "dc74e3511367206c018e2887b08a152f", "score": "0.48652664", "text": "def loadStagingTable(self, strTableName: str, strBucketName: str, strRemoteFile: str) -> bool:\n strStagingTable = \"staging_\" + strTableName\n\n cursor = self.objConn.cursor()\n\n strQuery = \"copy \" + strStagingTable + \" from 's3://\" + strBucketName + \"/\" + strRemoteFile + \"' \\\n IGNOREHEADER 1 credentials 'aws_iam_role=\" + self.strIAmRole + \"' \\\n CSV region '\" + self.strAWSRegion + \"';\\\n \"\n try:\n cursor.execute(strQuery)\n\n self.objConn.commit()\n except Exception as error:\n print(\"loading staging table failed with error:\" + str(error))\n return False\n\n cursor.close()\n\n return True", "title": "" }, { "docid": "3f17ce3373458cbcd59ff197e45b94e5", "score": "0.4863153", "text": "def loadTables(self, dicLoadInfo: dict, strS3BucketName: str, strS3Folder: str) -> bool:\n #create and load staging tables\n for strKey, lstTables in dicLoadInfo.items():\n for dicTableInfo in lstTables:\n if not self.createStagingTable(dicTableInfo[\"table\"]):\n raise ValueError(\"creation of staging table for \" + dicTableInfo[\"table\"] + \" failed\")\n\n if not self.loadStagingTable(dicTableInfo[\"table\"], strS3BucketName, os.path.join(strS3Folder, dicTableInfo[\"file\"])):\n raise ValueError(\"loading of staging table for \" + dicTableInfo[\"table\"] + \" failed\")\n\n self.swapTables()\n\n return True", "title": "" }, { "docid": "d0fec60ef37b592583c75faf36d4b5f4", "score": "0.48350033", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n print(\"Staging tables query:\", query)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "9f7e3f803c6eb047bb599d7e5c5371b3", "score": "0.48322934", "text": "def upload_chunked_table(df, full_table_name, gbucket_dir, gbucket_subdir, local_dir='/tmp', clustering_fields=None):\n import pyarrow\n import subprocess\n\n assert gbucket_dir.startswith('gs://')\n project, dataset, table = full_table_name.split('.')\n os.makedirs(f\"{local_dir}/{table}\")\n\n bytes_per_row = df.memory_usage().iloc[1:].sum() // len(df)\n chunk_bytes = 200 * (2**20)\n chunk_rows = chunk_bytes // bytes_per_row\n chunks = iter_batches(df, chunk_rows)\n digits = len(str(len(chunks)-1))\n for i, chunk_df in enumerate(chunks):\n name = f\"{local_dir}/{table}/{i:0{digits}d}.parquet\"\n t = pyarrow.Table.from_pandas(chunk_df)\n pyarrow.parquet.write_table(t, name)\n\n cp_cmd = f\"gsutil -q cp -r {local_dir}/{table} {gbucket_dir}/{gbucket_subdir}/\"\n logger.info(cp_cmd)\n subprocess.run(cp_cmd, shell=True, check=True)\n\n ls_cmd = f\"gsutil ls -d {gbucket_dir}/{gbucket_subdir}/{table}\"\n logger.info(ls_cmd)\n subprocess.run(ls_cmd, shell=True, check=True)\n\n bq_cmd = f\"bq load --source_format=PARQUET --project_id={project}\"\n\n # schema_types = {\n # object: 'STRING',\n # **{np.dtype(dtype): 'FLOAT' for dtype in ('float32', 'float64')},\n # **{np.dtype(f'{s}int{w}'): 'INTEGER' for s,w in product(('u', ''), (8,16,32,64))},\n # }\n # schema = \",\".join(f\"{col}:{schema_types[dtype]}\" for col, dtype in df.dtypes.items())\n # bq_cmd = f\"{bq_cmd} --schema={schema}\"\n\n if clustering_fields:\n if isinstance(clustering_fields, str):\n clustering_fields = [clustering_fields]\n bq_cmd = f\"{bq_cmd} --clustering_fields={','.join(clustering_fields)}\"\n bq_cmd = f\"{bq_cmd} {dataset}.{table} {gbucket_dir}/{gbucket_subdir}/{table}/*.parquet\"\n\n logger.info(bq_cmd)\n subprocess.run(bq_cmd, shell=True, check=True)", "title": "" }, { "docid": "d7531fd15dc2547e4602764cb7a173aa", "score": "0.4812548", "text": "def create_std_table(connection):\n check_table_query = '''\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema='std'\n AND table_type='BASE TABLE';\n ''' \n\n try: \n cursor = execute_query(connection, check_table_query)\n exist_table_list = cursor.fetchall()\n std_dir_path = '../migration/standard/'\n std_dir_file = os.listdir(std_dir_path)\n\n for file_name in sorted(std_dir_file):\n if ('_'.join(file_name.split('_')[3:]).replace('.sql', ''), ) not in exist_table_list: \n with open(std_dir_path+file_name) as create_file:\n create_query = \"\".join(create_file.readlines())\n cursor = execute_query(connection, create_query)\n print(\"Successfully created {} table.\".format(file_name.replace('create_table_', '').replace('.sql', '')))\n\n except Exception as e:\n print(\"An error occurred: {}\".format(e))", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.47887084", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "1a8fb39fe7e8f170a23fe58b76fec623", "score": "0.47875553", "text": "def createTablesIfNeeded(self):\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _file (\n id integer NOT NULL PRIMARY KEY,\n name text,\n dir text\n )''')\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _comp (\n id integer NOT NULL PRIMARY KEY,\n fid integer NOT NULL REFERENCES _file(id),\n name text\n )''')\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _lang (\n id integer NOT NULL PRIMARY KEY,\n name text\n )''')\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _trans (\n fid integer NOT NULL REFERENCES _file(id),\n cid integer NOT NULL REFERENCES _comp(id),\n lid integer NOT NULL REFERENCES _lang(id),\n key text,\n value text\n )''')", "title": "" }, { "docid": "e0a61126ad5e7cfe7e9aaa350568f5fa", "score": "0.4782434", "text": "def test_overcloud_images(staged_env):\n config, stage_info = staged_env\n # Check images subtree, all full hases should be there\n overcloud_images_path = config.qcow_server['root']\n base_path = os.path.join(\n overcloud_images_path,\n config['distro'],\n config['release'],\n 'rdo_trunk',\n )\n # Check stage_info has the requred attributes\n overcloud_images = stage_info['overcloud_images']\n attributes = [\n 'user',\n 'key_path',\n 'root'\n ]\n for attribute in attributes:\n assert attribute in overcloud_images\n check_paths = []\n existing_paths = []\n for commit in stage_info['dlrn']['promotions'].values():\n dlrn_hash = DlrnHash(source=commit)\n # check commit attributes are there\n hash_path = os.path.join(base_path, dlrn_hash.full_hash)\n check_paths.append(hash_path)\n\n # We don't block at the first path found, I want to see all\n # the missing paths\n try:\n os.stat(hash_path)\n existing_paths.append(hash_path)\n except OSError:\n raise\n\n assert check_paths == existing_paths\n\n # check if we have a leaf with the symbolic link\n # and the dir linked exists\n promotion_commit = \\\n stage_info['dlrn']['promotions']['currently_promoted']\n promotion_name = promotion_commit['name']\n promotion_link = os.path.join(base_path, promotion_name)\n promotion_target = os.readlink(promotion_link)\n # The fist commit is \"the current promotion link\"\n dlrn_hash = DlrnHash(source=promotion_commit)\n sample_path = \\\n os.path.join(base_path, dlrn_hash.full_hash)\n assert promotion_target == sample_path", "title": "" }, { "docid": "89cb354d746d1dac192833b007a2d784", "score": "0.47777647", "text": "def load_staging_tables(cur, conn):\n print('Loading Staging Tables')\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print('Error in: ', query)\n print(e)", "title": "" }, { "docid": "02ff7f3535abdf39c783f39661da4b24", "score": "0.47758514", "text": "def test_folder_or_parent(self):\n with pytest.raises(ValueError):\n self.syn.create_s3_storage_location()", "title": "" }, { "docid": "6ae999b0a7f3c499c967493b13e81021", "score": "0.47593686", "text": "def _recreate_tables(self):\r\n self.con.executescript(\"\"\"\r\n DROP TABLE IF EXISTS tags;\r\n CREATE TABLE tags ( \r\n id INTEGER PRIMARY KEY, \r\n tag_name TEXT NOT NULL UNIQUE\r\n );\r\n \r\n DROP TABLE IF EXISTS files;\r\n CREATE TABLE files ( \r\n id INTEGER PRIMARY KEY, \r\n file_path TEXT NOT NULL, \r\n file_name TEXT NOT NULL, \r\n file_extension TEXT NOT NULL, \r\n file_tag_id INTEGER NOT NULL\r\n ); \r\n \r\n DROP TABLE IF EXISTS files_tags;\r\n CREATE TABLE files_tags ( \r\n file_id INTEGER NOT NULL, \r\n tag_id INTEGER NOT NULL, \r\n PRIMARY KEY (file_id, tag_id)\r\n );\r\n \"\"\")", "title": "" }, { "docid": "39d11a2400561f21cd5541ab5f473caf", "score": "0.47442085", "text": "def _is_lun_valid_on_storage(self, lun):\n if self.volume_list:\n lun_vol = lun.get_metadata_property('Volume')\n if lun_vol not in self.volume_list:\n return False\n return True", "title": "" }, { "docid": "01b5c91bc0b78c2c65a1bb8923ab2efe", "score": "0.47269472", "text": "def _add_table_if_present(self, path, files):\n if self.table_file_name not in files:\n logger.debug('No tables found in directory %s', path)\n return\n #self._cutoff += 1\n current_table_path = os.path.join(path, self.table_file_name)\n this_table = Table.read(current_table_path, format='ascii')\n self._add_to_table(this_table, path)\n self._current_table_path = None", "title": "" }, { "docid": "c779b7703026fb2b4d2c228695b5eb90", "score": "0.47207093", "text": "def _validate_storage_classes(self, client: 'KubernetesClient'):\n super()._validate_storage_classes(client)\n STORAGE_CLASS_ERROR = \"Storage class '{}' does not exist\"\n\n datalogs = getattr(self.spec.storage, \"datalogs\", None)\n if datalogs and datalogs.volumes:\n for v in datalogs.volumes:\n if v.className and not client.storage_class_exists(v.className):\n raise ValueError(STORAGE_CLASS_ERROR.format(v.className))", "title": "" }, { "docid": "e5dd7cea41e572091d9a75bfcd3afe98", "score": "0.4717614", "text": "def load_staging_tables(cur, conn, KEYSECRET, fname): \n\n \"\"\"\n print('\\r{:5}* {}'.format('',schema_queries[2]['message'])) \n cur.execute(schema_queries[2]['query'])\n \n for o in copy_table_queries:\n print('\\r{:5}* {}'.format('',o['message'])) \n try:\n cur.execute(o['query'])\n conn.commit()\n except psycopg2.Error as e:\n print(e)\n conn.close()\n \"\"\" \n print('\\r{:5}* {}'.format('',schema_queries[2]['message'])) \n cur.execute(schema_queries[2]['query'])\n \n table_data = json.load(open(fname))\n for table, data in table_data.items(): \n if(data == 'ignore'):\n continue \n \n print('Loading {} with data from {}'.format(table, data))\n copy_stmt = make_copy_statement(table, data, KEYSECRET)\n try:\n cur.execute(copy_stmt)\n conn.commit()\n except psycopg2.Error as e:\n print(e)\n conn.close()", "title": "" }, { "docid": "f13633ef7747d1d65aec1dfa0f5c8332", "score": "0.47113544", "text": "def load_staging_tables(cur, conn):\n \n for query in copy_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "a25994e6741d9219e20a7cd419a31068", "score": "0.47082603", "text": "def match_files_in_land_to_config(config: dict) -> dict:\n\n land_base_path = config[\"land-base-path\"]\n if land_base_path.startswith(\"s3://\"):\n land_files = get_filepaths_from_s3_folder(land_base_path)\n else:\n land_files = get_filepaths_from_local_folder(land_base_path)\n\n if not land_files and config.get(\"fail-no-files\", False):\n raise FileNotFoundError(f\"No files found in the path: {land_base_path}\")\n else:\n total_files = len(land_files)\n log.info(f\"Found {total_files} in {land_base_path}\")\n\n # Check for requrired tables\n all_matched = []\n for table_name, table_params in config[\"tables\"].items():\n if table_params.get(\"pattern\"):\n table_params[\"matched_files\"] = [\n land_file\n for land_file in land_files\n if re.match(\n table_params.get(\"pattern\"), land_file.replace(land_base_path, \"\")\n )\n ]\n else:\n table_params[\"matched_files\"] = [\n land_file\n for land_file in land_files\n if land_file.replace(land_base_path, \"\").startswith(table_name)\n ]\n\n if not table_params[\"matched_files\"] and table_params.get(\"required\"):\n raise FileNotFoundError(\n f\"Config states file for {table_name} must exist but no files matched.\"\n )\n\n all_matched.extend(table_params[\"matched_files\"])\n\n if len(all_matched) != len(set(all_matched)):\n large_error_traceback = \"\"\n for table_name, table_params in config[\"tables\"].items():\n large_error_traceback += f\"{table_name}: {table_params['matched_files']} \\n\"\n raise FileExistsError(\n f\"We matched the same files to multiple tables.\\n{large_error_traceback}\"\n )\n\n # Fail if expecting no unknown files\n if \"fail-unknown-files\" in config:\n file_exeptions = config[\"fail-unknown-files\"].get(\"exceptions\", [])\n land_diff = set(land_files).difference(all_matched)\n land_diff = land_diff.difference(file_exeptions)\n if land_diff:\n raise FileExistsError(\n \"Config states no unknown should exist. \"\n f\"The following were unmatched: {land_diff}\"\n )\n\n return config", "title": "" }, { "docid": "89e9cfda91121c48408f76a5b3f0e232", "score": "0.47079644", "text": "def check_storage():\n rep = subprocess.Popen('df -kh /dev/sda1', shell=True, stdout=subprocess.PIPE)\n rep = rep.communicate()[0].decode().split(' ')\n for elt in reversed(rep):\n if elt.endswith('%'):\n global status_info\n status_info['storage'] = elt[0:-1]\n break", "title": "" }, { "docid": "a816c6849adbf4da8d08b7a929d4b88e", "score": "0.46811625", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n logging.info(f'Executing query:\\n {query}')\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "3aca236e0c10836d2e05308fa19f2f36", "score": "0.46741658", "text": "def testHasTable(self):\n with shared_test_lib.TempDirectory() as temp_directory:\n temp_file = os.path.join(temp_directory, 'plaso.sqlite')\n storage_file = sqlite_file.SQLiteStorageFile()\n storage_file.Open(path=temp_file, read_only=False)\n\n result = storage_file._HasTable(storage_file._CONTAINER_TYPE_EVENT_DATA)\n self.assertTrue(result)\n\n result = storage_file._HasTable('bogus')\n self.assertFalse(result)\n\n storage_file.Close()", "title": "" }, { "docid": "9a6067bbc8c7c45ca6c08208248be4c7", "score": "0.46659133", "text": "def get_storage_lookup(resources):\n buckets = resources.values_list(\"bucket\", flat=True).distinct()\n # This is to avoid a circular import\n Bucket = resources.model._meta.get_field(\"bucket\").related_model\n bucket_lookup = {\n bucket: get_tator_store(Bucket.objects.get(pk=bucket)) if bucket else get_tator_store()\n for bucket in buckets\n }\n return {\n resource.path: bucket_lookup[resource.bucket.pk] if resource.bucket else bucket_lookup[None]\n for resource in list(resources)\n }", "title": "" }, { "docid": "fbfaed1baa026e5de52138c86099ba4e", "score": "0.46649554", "text": "def load_staging_table(cur, conn):\n print(\"Loading staging table\")\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "542244148ccd3b382aa3364c80f18a71", "score": "0.4659642", "text": "def load_staging_i94_table(cur, conn, bucket_name): \n\n print('\\r{:5}* {}'.format('',schema_queries[2]['message'])) \n cur.execute(schema_queries[2]['query'])\n \n table = 'staging_ids'\n IAM_ROLE = 'arn:aws:iam::164084742828:role/dwhRole'\n \n #Parquet Issues : Null column are not written to output\n #copy_stmt = \"COPY {} FROM '{}' IAM_ROLE '{}' FORMAT AS PARQUET ;\".format(table, \"s3://sushanth-dend-capstone-files/i94-apr16.parquet/\", IAM_ROLE)\n\n copy_stmt = \"COPY {} FROM '{}' IAM_ROLE '{}' csv gzip IGNOREHEADER 1 region 'us-west-2' ;\".format(table, bucket_name, IAM_ROLE)\n print(copy_stmt)\n #print('Loading {} with data from {}'.format(table, data)) \n try:\n cur.execute(copy_stmt)\n conn.commit()\n except psycopg2.Error as e:\n print(e)\n conn.close()", "title": "" }, { "docid": "a123e5ee2d7825f9607ebd45c835860d", "score": "0.46567413", "text": "def create_table_glue(self, target_S3URI, name_crawler, Role, DatabaseName,\n TablePrefix, from_athena=True, update_schema=None):\n\n # Get connection\n #client_glue = client['glue']\n\n table_name = '{}{}'.format(TablePrefix, os.path.basename(target_S3URI).lower())\n\n ## Remove table if exist\n try:\n response = self.client['glue'].delete_table(\n DatabaseName=DatabaseName,\n Name=table_name\n )\n except Exception as e:\n print(e)\n pass\n\n # Remove if exist\n try:\n self.client['glue'].delete_crawler(\n Name=name_crawler\n )\n except Exception as e:\n print(e)\n pass\n\n self.client['glue'].create_crawler(\n Name=name_crawler,\n Role=Role,\n DatabaseName=DatabaseName,\n #Description='Parse the symbols filtered by a given timestamp',\n Targets={\n 'S3Targets': [\n {\n 'Path': target_S3URI,\n 'Exclusions': [\n '*.csv.metadata',\n ]\n },\n ],\n },\n TablePrefix=TablePrefix,\n SchemaChangePolicy={\n 'UpdateBehavior': 'LOG',\n 'DeleteBehavior': 'LOG'\n },\n )\n\n # Wait until job is done\n self.client['glue'].start_crawler(Name=name_crawler)\n status = None\n while status == 'RUNNING' or status == None:\n response_crawler = self.client['glue'].get_crawler(Name=name_crawler)\n status = response_crawler['Crawler']['State']\n if (status == 'RUNNING' or status == None):\n time.sleep(10)\n\n # Update Schema\n if from_athena:\n # update schema serde\n\n response = self.client['glue'].get_table(\n DatabaseName=DatabaseName,\n Name=table_name\n )['Table']\n\n serde = {\n \"SerializationLibrary\": \"org.apache.hadoop.hive.serde2.OpenCSVSerde\",\n \"Parameters\": {\n \"escapeChar\": \"\\\\\",\n \"quoteChar\": '\"',\n \"separatorChar\": \",\",\n \"serialization.format\": \"1\",\n },\n }\n\n response['StorageDescriptor']['SerdeInfo'] = serde\n key_to_remove = ['DatabaseName', 'CreateTime', 'UpdateTime', 'CreatedBy',\n 'IsRegisteredWithLakeFormation', 'CatalogId']\n for key in key_to_remove:\n response.pop(key, None)\n\n self.client['glue'].update_table(\n DatabaseName=DatabaseName,\n TableInput=response\n )\n\n # Update schema\n\n if update_schema != None:\n\n self.update_schema_table(\n database=DatabaseName, table=table_name, schema=update_schema)\n\n response = self.client['glue'].get_table(\n DatabaseName=DatabaseName,\n Name=table_name\n )['Table']\n\n return response", "title": "" }, { "docid": "3c94c0288234f46850a6246dde1e615d", "score": "0.46482816", "text": "def validate_storage_with_head(self):\n if self.manager.storage.current is None:\n self.get_storage_list()\n\n if len(self.manager.storage.current) == 0:\n self.get_storage_list()\n\n if self.manager.storage.orphaned is None:\n self.manager.storage.orphaned = []\n\n # Note: This version relies on Deuce to tell us that a block is\n # orphaned so it is the most accurate at the time this\n # function is called. However, there is still the chance that\n # a block has a change of state between when we access it here\n # and when it actually gets cleaned up\n for storage_id in self.manager.storage.current:\n\n self.log.debug('Project ID {0}, Vault {1} - '\n 'Validating Storage Block: {2}'\n .format(self.vault.project_id,\n self.vault.vault_id,\n storage_id))\n try:\n block = self.deuceclient.HeadBlockStorage(self.vault,\n self.vault.\n storageblocks[\n storage_id])\n except Exception as ex:\n # if there was a problem just mark the block as None so it\n # get ignored for this iteration of the loop\n self.log.warn('Project ID {0}, Vault {1} - '\n 'Storage Block {2} error heading block ({3}): {4}'\n .format(self.vault.project_id,\n self.vault.vault_id,\n storage_id,\n type(ex),\n str(ex)))\n block = None\n\n # if there was a problem then go to the next block_id\n if block is None:\n self.log.warn('Project ID {0}, Vault {1} - '\n 'Storage Block {2} no block data to analyze'\n .format(self.vault.project_id,\n self.vault.vault_id,\n storage_id))\n continue\n\n if block.block_orphaned:\n self.log.info('Project ID {0}, Vault {1} - '\n 'Found Orphaned Storage Block {2}'\n .format(\n self.vault.project_id,\n self.vault.vault_id,\n storage_id))\n self.manager.storage.orphaned.append(storage_id)\n\n block_size = len(block)\n\n self.log.info('Storage Block ID {0} - block size {1}'\n .format(storage_id, block_size))\n\n if block_size == 0:\n mid, sid = storage_id.split('_')\n\n self.log.info('\\tBlock ID: {0}'.format(mid))\n self.log.info('\\tStorage UUID: {0}'.format(sid))\n\n if mid in self.vault.blocks:\n self.log.info('\\tBlock ID {0} in Vault'.format(mid))\n\n self.log.info('Project ID {0}, Vault {1} - '\n 'Located block {2} matching '\n 'orphaned block {3}. Using for '\n 'block size'.format(\n self.vault.project_id,\n self.vault.vault_id,\n mid,\n storage_id))\n self.log.info('\\tUpdating Block Size from {0} to {1}'\n .format(block_size,\n len(self.vault.blocks[mid])))\n block_size = len(self.vault.blocks[mid])\n self.manager.orphaned_counter.add(1, block_size)", "title": "" }, { "docid": "8d2679c24ae2e7fb3d99d33502b6c4e6", "score": "0.46305004", "text": "def create_non_exist_tables(self, table_diff):\n if len(table_diff) > 0:\n for table in table_diff:\n Table_Init.create_table(table, self.global_config)\n self.log.info('Updated database schema, table names now match configuration.')\n else:\n self.log.info('Database Schema and Configuration table names already match.')", "title": "" }, { "docid": "bb640d5ec7c2e8c1a5b23b1307ebec09", "score": "0.4630255", "text": "def create_new_table_from_old(self, table_name, old_table_name):\n if table_name == \"\":\n return False\n if table_name in self.available_tables():\n return False\n dic = self.get_table(old_table_name)\n dic['_owner'] = COMPUTER_NAME\n dic['_plays'] = 0\n dic['table_name'] = table_name\n self.db[TABLES_COLLECTION].insert_one(dic)\n return True", "title": "" }, { "docid": "ec30a803c5d9337264ddd24645919c84", "score": "0.46291238", "text": "def check_stored_cv_files(dataset_name=\"basil\"):\n if \"basil\" == dataset_name:\n cv_url = \"gs://nkem/basil_4k_oldnet/region_graph/\"\n elif \"pinky40\" == dataset_name:\n cv_url = \"gs://nkem/pinky40_v11/mst_trimmed_sem_remap/region_graph/\"\n elif \"pinky100\" == dataset_name:\n cv_url = \"gs://nkem/pinky100_v0/region_graph/\"\n else:\n raise Exception(\"Could not identify region graph ressource\")\n\n with storage.SimpleStorage(cv_url) as cv_st:\n dir_path = creator_utils.dir_from_layer_name(\n creator_utils.layer_name_from_cv_url(cv_st.layer_path))\n\n file_paths = list(cv_st.list_files())\n\n c = 0\n n_file_paths = len(file_paths)\n time_start = time.time()\n for i_fp, fp in enumerate(file_paths):\n if i_fp % 1000 == 1:\n dt = time.time() - time_start\n eta = dt / i_fp * n_file_paths - dt\n print(\"%d / %d - dt: %.3fs - eta: %.3fs\" % (\n i_fp, n_file_paths, dt, eta))\n\n if not os.path.exists(dir_path + fp[:-4] + \".h5\"):\n print(dir_path + fp[:-4] + \".h5\")\n c += 1\n\n print(\"%d files were missing\" % c)", "title": "" }, { "docid": "0a6900facab9c77ae8e55628e63fe9fe", "score": "0.46216965", "text": "def clean_temp_storage_dirs(self):\n curr_tmp_dir = os.path.join('/tmp', 'owtf', str(self.config.owtf_pid))\n new_tmp_dir = os.path.join('/tmp', 'owtf', 'old-%d' % self.config.owtf_pid)\n if os.path.exists(curr_tmp_dir) and os.access(curr_tmp_dir, os.W_OK):\n os.rename(curr_tmp_dir, new_tmp_dir)", "title": "" }, { "docid": "cc0bc829c35812f27d351751b191f460", "score": "0.4609658", "text": "def get_table_location(self, database: str, table: str):\n res: Dict = self._client_glue.get_table(DatabaseName=database, Name=table)\n try:\n return res[\"Table\"][\"StorageDescriptor\"][\"Location\"]\n except KeyError:\n raise InvalidTable(f\"{database}.{table}\")", "title": "" }, { "docid": "3470da81db20a0548faa723797f0adce", "score": "0.46041432", "text": "def serialize_df_to_gcs_parquet(self, source_tbl: pd.DataFrame, dest_table: str) -> str:\n LOG.info(f\"Serializing {source_tbl.shape[0]} rows...\")\n arrow_tbl = pa.Table.from_pandas(source_tbl, preserve_index=False)\n tmp_file = tempfile.TemporaryFile()\n pq_writer = pq.ParquetWriter(tmp_file, arrow_tbl.schema, version='2.0',\n use_deprecated_int96_timestamps=True)\n try:\n pq_writer.write_table(arrow_tbl, 50000)\n except Exception as err:\n LOG.error(f'Serlialiation error: {arrow_tbl.schema}!')\n raise err\n pq_writer.close()\n tmp_file.seek(0)\n\n LOG.info(f'Uploading file {tmp_file} to gcs...')\n tstamp = dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_id)\n blob = bucket.blob(f'etl/{dest_table}/{tstamp}.parquet')\n blob.upload_from_file(tmp_file)\n tmp_file.close()\n dest_path = f\"gs://{self.bucket_id}/{blob.name}\"\n LOG.info(f\"Upload complete...destination is: {dest_path}...\")\n return dest_path", "title": "" }, { "docid": "3fac6602bd53afb2359ad71b09e790bc", "score": "0.45872152", "text": "def verify_temp_storage_path():\n global TEMP_STORAGE_PATH\n if not os.path.exists(TEMP_STORAGE_PATH):\n os.makedirs(TEMP_STORAGE_PATH)\n return True", "title": "" }, { "docid": "35d9025bbc3cb9be9823ddab53ad05ef", "score": "0.45780593", "text": "def db_to_storage(bucket):\n with tqdm(total=total_files) as pbar:\n query = (\"SELECT filename FROM os_migration.dzis WHERE copied IS NULL OR copied = 0\")\n cursor.execute(query)\n for file in cursor:\n print(file[0])\n client.upload_file(file[0], bucket, file[0].replace(\"/mnt/dzis/\",\"\" ), ExtraArgs={'ACL':'public-read'})\n update_query = (\"UPDATE os_migration.dzis SET copied = 1 WHERE filename LIKE '%s'\" % file[0])\n cursor1.execute(update_query)\n cnx1.commit()\n pbar.update(1)", "title": "" }, { "docid": "6f2772fb8760283cf2a0eafd4c1408d6", "score": "0.4576871", "text": "def extract_table(table_name, src, dst, search_string):\n # find files\n fnames = list(src.rglob(search_string))\n # user input\n is_ok = ask_user_go_ahead(fnames)\n # exit early if user aborted\n if not is_ok:\n print('Nothing done')\n return\n # main loop\n for fname in tqdm(fnames):\n dst_fname = dst / fname.relative_to(src)\n dst_fname.parent.mkdir(parents=True, exist_ok=True)\n copy_table(table_name, fname, dst_fname)", "title": "" }, { "docid": "c6667cb9619f4da187b66644fc51e8c1", "score": "0.45749632", "text": "def _checktables(self):\n self._cur.execute(\"PRAGMA TABLE_INFO(NODES)\")\n if (self._cur.fetchone() is None):\n # table doesn't exist, create it\n # SQLite does have constraints implemented at the moment\n # so datatype will just be a string\n self._cur.execute(\"CREATE TABLE NODES\"\n + \"(ID INTEGER PRIMARY KEY AUTOINCREMENT,\"\n + \"DATA BLOB NOT NULL)\")\n self._cur.execute(\"CREATE TABLE TAGS\"\n + \"(ID INTEGER PRIMARY KEY AUTOINCREMENT,\"\n + \"DATA BLOB NOT NULL UNIQUE)\")\n self._cur.execute(\"CREATE TABLE LOOKUP\"\n + \"(NODE INTEGER NOT NULL, TAG INTEGER NOT NULL,\"\n + \" PRIMARY KEY(NODE, TAG))\")\n\n self._cur.execute(\"CREATE TABLE KEY\"\n + \"(THEKEY TEXT NOT NULL DEFAULT '')\")\n self._cur.execute(\"INSERT INTO KEY VALUES('')\")\n\n try:\n self._con.commit()\n except DatabaseException as e:\n self._con.rollback()\n raise e", "title": "" }, { "docid": "444867df3ebc83d6f3998b79a6effa4c", "score": "0.45730546", "text": "def contains_storage_reference(trace):\n for t_constraint in trace.tran_constraints:\n if any(map(lambda n: n.startswith(\"storage[\"), t_constraint.slot_names)):\n return True\n return False", "title": "" }, { "docid": "053b3cdb2d6e0d360b2a12de9aab8188", "score": "0.45681733", "text": "def test_process_usage_and_storage_with_invalid_data(self):\n pod_report = f\"{self.temp_dir}/e6b3701e-1e91-433b-b238-a31e49937558_February-2019-my-ocp-cluster-1-invalid.csv\"\n storage_report = f\"{self.temp_dir}/e6b3701e-1e91-433b-b238-a31e49937558_storage-invalid.csv\"\n\n pod_data = []\n storage_data = []\n with open(self.test_report_path) as f:\n reader = csv.DictReader(f)\n for row in reader:\n row[\"node\"] = None\n pod_data.append(row)\n\n header = pod_data[0].keys()\n with open(pod_report, \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=header)\n writer.writeheader()\n writer.writerows(pod_data)\n\n with open(self.storage_report_path) as f:\n reader = csv.DictReader(f)\n for row in reader:\n row[\"persistentvolume\"] = None\n storage_data.append(row)\n\n header = storage_data[0].keys()\n with open(storage_report, \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=header)\n writer.writeheader()\n writer.writerows(storage_data)\n\n storage_processor = OCPReportProcessor(\n schema_name=\"acct10001\",\n report_path=storage_report,\n compression=UNCOMPRESSED,\n provider_uuid=self.ocp_provider_uuid,\n )\n\n report_db = self.accessor\n table_name = OCP_REPORT_TABLE_MAP[\"storage_line_item\"]\n report_schema = report_db.report_schema\n table = getattr(report_schema, table_name)\n with schema_context(self.schema):\n storage_before_count = table.objects.count()\n\n storage_processor.process()\n\n with schema_context(self.schema):\n storage_after_count = table.objects.count()\n self.assertEqual(storage_after_count, storage_before_count)\n\n processor = OCPReportProcessor(\n schema_name=\"acct10001\",\n report_path=pod_report,\n compression=UNCOMPRESSED,\n provider_uuid=self.ocp_provider_uuid,\n )\n\n report_db = self.accessor\n table_name = OCP_REPORT_TABLE_MAP[\"line_item\"]\n report_schema = report_db.report_schema\n table = getattr(report_schema, table_name)\n with schema_context(self.schema):\n before_count = table.objects.count()\n\n processor.process()\n\n with schema_context(self.schema):\n after_count = table.objects.count()\n self.assertEqual(after_count, before_count)", "title": "" }, { "docid": "9687d135bd8919beb1ef0b6cc83c193d", "score": "0.456417", "text": "def test_directories_upsert_directory(self):\n pass", "title": "" }, { "docid": "a7aa65e8b843ec66b52695f6a0c6c990", "score": "0.45477638", "text": "def check_disk_usage():\n csv_sizes = {}\n total_size = 0\n for dir_name in os.listdir(os.path.join(settings.MEDIA_ROOT, 'tiles')):\n size = get_directory_size(os.path.join(settings.MEDIA_ROOT, 'tiles', dir_name))\n total_size = size + total_size\n csv_sizes[dir_name] = size\n\n if total_size < max_usage:\n return\n accesses = {}\n\n for csv_name in csv_sizes:\n doc = Document.objects.get(file_name=csv_name+'.csv')\n accesses[doc.last_access] = csv_name\n\n while total_size > max_usage:\n oldest = accesses.pop(min(accesses.keys()))\n shutil.rmtree(os.path.join(settings.MEDIA_ROOT, 'tiles', oldest))\n total_size = total_size - csv_sizes.pop(oldest)", "title": "" }, { "docid": "98e48513f91b108fce0a392cebab9fa7", "score": "0.45466164", "text": "def _findTable(self, tableName):\n def has_table(tableName, schema):\n identifier_preparer = self.engine.dialect.identifier_preparer\n qschema = identifier_preparer.quote_identifier(schema)\n tableNames = self.selectScalars(\n text(\"SELECT name FROM %s.sqlite_master\" % qschema))\n return tableName in tableNames\n\n import sys\n if sys.platform == 'win32' and self.engine.name == 'sqlite':\n # work around bug http://bugs.python.org/issue8192\n hasTable = has_table\n else:\n hasTable = self.engine.has_table\n if hasTable(tableName, schema=self._mainSchema):\n return self._mainSchema\n else:\n for schema in self.attached.values():\n if hasTable(tableName, schema=schema):\n return schema\n return None", "title": "" }, { "docid": "6579bda1f44ccea58229b71f06d26644", "score": "0.45174897", "text": "def upload_and_write_to_bq(self,\n source_tbl: pd.DataFrame,\n dest_table: str,\n append: bool = False) -> None:\n t1 = time.time()\n dest_path = self.serialize_df_to_gcs_parquet(source_tbl, dest_table)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.PARQUET\n if append:\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND\n else:\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n\n job = self.bqclient.load_table_from_uri(dest_path, dest_table,\n job_config=job_config)\n job.result()\n if job.errors:\n LOG.error(job.errors)\n raise JobInsertError\n LOG.info(f'Load job complete in {round(time.time()-t1)} seconds...')", "title": "" }, { "docid": "1ca6c7f899b5a265d7cd2639f6af612a", "score": "0.45104846", "text": "def load_staging_tables(cur, conn):\n print('Start loading staging tables...')\n \n for query in copy_table_queries:\n print('executing',query,'this process might takes several minutes...be patient.')\n cur.execute(query)\n conn.commit()\n \n print('All staging tables loaded')", "title": "" }, { "docid": "fbed739934a78a0e2a82e84ac3841581", "score": "0.4505371", "text": "def testCheckStorageMetadata(self):\n with shared_test_lib.TempDirectory():\n storage_file = sqlite_file.SQLiteStorageFile()\n\n metadata_values = {\n 'compression_format': definitions.COMPRESSION_FORMAT_ZLIB,\n 'format_version': '{0:d}'.format(storage_file._FORMAT_VERSION),\n 'serialization_format': definitions.SERIALIZER_FORMAT_JSON,\n 'storage_type': definitions.STORAGE_TYPE_SESSION}\n storage_file._CheckStorageMetadata(metadata_values)\n\n metadata_values['format_version'] = 'bogus'\n with self.assertRaises(IOError):\n storage_file._CheckStorageMetadata(metadata_values)\n\n metadata_values['format_version'] = '1'\n with self.assertRaises(IOError):\n storage_file._CheckStorageMetadata(metadata_values)\n\n metadata_values['format_version'] = '{0:d}'.format(\n storage_file._FORMAT_VERSION)\n metadata_values['compression_format'] = None\n with self.assertRaises(IOError):\n storage_file._CheckStorageMetadata(metadata_values)\n\n metadata_values['compression_format'] = (\n definitions.COMPRESSION_FORMAT_ZLIB)\n metadata_values['serialization_format'] = None\n with self.assertRaises(IOError):\n storage_file._CheckStorageMetadata(metadata_values)\n\n metadata_values['serialization_format'] = (\n definitions.SERIALIZER_FORMAT_JSON)\n metadata_values['storage_type'] = None\n with self.assertRaises(IOError):\n storage_file._CheckStorageMetadata(metadata_values)", "title": "" }, { "docid": "e4e90dcfe81c28e01854d675f249f640", "score": "0.45035547", "text": "def test_pep8_conformance_db_storage(self):\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['models/engine/db_storage.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "title": "" }, { "docid": "e4e90dcfe81c28e01854d675f249f640", "score": "0.45035547", "text": "def test_pep8_conformance_db_storage(self):\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['models/engine/db_storage.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "title": "" }, { "docid": "a90b20c57daf3e9e4de39a00ea5a2352", "score": "0.45004964", "text": "def search_sha224_exist(self, shaHD=None):\n for dirpath in list(self.image_paths.keys()):\n if self.fexistSha(dirpath + os.sep, sha_hd=shaHD):\n return (True, dirpath)\n return (False, None)", "title": "" }, { "docid": "541825ed153c6fe4122c40b9e2a7b1e5", "score": "0.4496844", "text": "def master_backup(self):\r\n\r\n self.remote_xlock()\r\n errors = False\r\n\r\n try:\r\n self.pg_start_backup(\"FullBackup\")\r\n self.remote_walmgr(\"xrotate\")\r\n\r\n data_dir = self.cf.getfile(\"master_data\")\r\n dst_loc = self.cf.getfile(\"full_backup\")\r\n if dst_loc[-1] != \"/\":\r\n dst_loc += \"/\"\r\n\r\n master_spc_dir = os.path.join(data_dir, \"pg_tblspc\")\r\n slave_spc_dir = dst_loc + \"tmpspc\"\r\n\r\n # copy data\r\n self.chdir(data_dir)\r\n cmdline = [\r\n \"--delete\",\r\n \"--exclude\", \".*\",\r\n \"--exclude\", \"*.pid\",\r\n \"--exclude\", \"*.opts\",\r\n \"--exclude\", \"*.conf\",\r\n \"--exclude\", \"pg_xlog\",\r\n \"--exclude\", \"pg_tblspc\",\r\n \"--exclude\", \"pg_log\",\r\n \"--exclude\", \"base/pgsql_tmp\",\r\n \"--copy-unsafe-links\",\r\n \".\", dst_loc]\r\n self.exec_big_rsync(cmdline)\r\n\r\n # copy tblspc first, to test\r\n if os.path.isdir(master_spc_dir):\r\n self.log.info(\"Checking tablespaces\")\r\n list = os.listdir(master_spc_dir)\r\n if len(list) > 0:\r\n self.remote_mkdir(slave_spc_dir)\r\n for tblspc in list:\r\n if tblspc[0] == \".\":\r\n continue\r\n tfn = os.path.join(master_spc_dir, tblspc)\r\n if not os.path.islink(tfn):\r\n self.log.info(\"Suspicious pg_tblspc entry: %s\", tblspc)\r\n continue\r\n spc_path = os.path.realpath(tfn)\r\n self.log.info(\"Got tablespace %s: %s\", tblspc, spc_path)\r\n dstfn = slave_spc_dir + \"/\" + tblspc\r\n\r\n try:\r\n os.chdir(spc_path)\r\n except Exception, det:\r\n self.log.warning(\"Broken link: %s\", det)\r\n continue\r\n cmdline = [ \"--delete\", \"--exclude\", \".*\", \"--copy-unsafe-links\", \".\", dstfn]\r\n self.exec_big_rsync(cmdline)\r\n\r\n # copy the pg_log and pg_xlog directories, these may be\r\n # symlinked to nonstandard location, so pay attention\r\n self.rsync_log_directory(os.path.join(data_dir, \"pg_log\"), dst_loc)\r\n self.rsync_log_directory(os.path.join(data_dir, \"pg_xlog\"), dst_loc)\r\n\r\n # copy config files\r\n conf_dst_loc = self.cf.getfile(\"config_backup\", \"\")\r\n if conf_dst_loc:\r\n master_conf_dir = os.path.dirname(self.cf.getfile(\"master_config\"))\r\n self.log.info(\"Backup conf files from %s\", master_conf_dir)\r\n self.chdir(master_conf_dir)\r\n cmdline = [\r\n \"--include\", \"*.conf\",\r\n \"--exclude\", \"*\",\r\n \".\", conf_dst_loc]\r\n self.exec_big_rsync(cmdline)\r\n\r\n self.remote_walmgr(\"xpurgewals\")\r\n except Exception, e:\r\n self.log.error(e)\r\n errors = True\r\n finally:\r\n try:\r\n self.pg_stop_backup()\r\n except:\r\n pass\r\n\r\n try:\r\n self.remote_walmgr(\"xrelease\")\r\n except:\r\n pass\r\n\r\n if not errors:\r\n self.log.info(\"Full backup successful\")\r\n else:\r\n self.log.error(\"Full backup failed.\")", "title": "" }, { "docid": "e89d654b88aa29e4ce7ed7532bb225ea", "score": "0.44964465", "text": "def cleanup_file_storage(self):\n gsettings = GlobalSettings(self.context)\n storage_loc = gsettings.storage_location\n if not os.path.exists(storage_loc):\n return 'storage location path \"%s\" does not exist' % storage_loc\n\n catalog = getToolByName(self.context, 'portal_catalog')\n number = self.clean_folder(catalog, storage_loc)\n return 'cleaned %i' % number", "title": "" }, { "docid": "d5828c1468961b5a51aa9774ef6fdf06", "score": "0.44921824", "text": "def table_coverage_check():\n seen = {}\n for group in model_group:\n for cls in model_group[group]['classes']:\n tbl = cls.__tablename__\n seen[tbl] = 1\n for tbl in Base.metadata.tables:\n if tbl in seen:\n del seen[tbl]\n else:\n seen[tbl] = 0\n for tbl in seen:\n print \"Table %s is missing from this script\" % tbl", "title": "" }, { "docid": "19daa70c5146989030fa68d15aebd106", "score": "0.44848758", "text": "def create_raw_table(connection):\n check_table_query = '''\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema='raw'\n AND table_type='BASE TABLE';\n ''' \n\n try: \n cursor = execute_query(connection, check_table_query)\n exist_table_list = cursor.fetchall()\n raw_dir_path = '../migration/raw/'\n raw_dir_file = os.listdir(raw_dir_path)\n\n for file_name in raw_dir_file:\n if (file_name.replace('create_table_', '').replace('.sql', ''),) not in exist_table_list:\n with open(raw_dir_path+file_name) as create_file:\n create_query = \"\".join(create_file.readlines())\n cursor = execute_query(connection, create_query)\n print(\"Successfully created {} table.\".format(file_name.replace('create_table_', '').replace('.sql', '')))\n\n except Exception as e:\n print(\"An error occurred: {}\".format(e))", "title": "" }, { "docid": "a1916a5d7094bb933f455bc74d4c52a6", "score": "0.44784948", "text": "def sync_bucket(self, schema_name, destination_bucket_name, date_range):", "title": "" }, { "docid": "4721e626a6e8d84561b596a7117f8a83", "score": "0.44708285", "text": "def copy_data(self):\n\n bucket_id = self.get_bucket_id()\n seen = set()\n\n def copy_to_workspace(value):\n if isinstance(value, str) and value.startswith('gs://'):\n src = getblob(value)\n destpath = 'gs://{}/{}'.format(bucket_id, src.name)\n if not (src.bucket.name == bucket_id or destpath in seen):\n try:\n copyblob(src, destpath)\n seen.add(destpath)\n return destpath\n except:\n print(\"Failed to copy\", value, file=sys.stderr)\n return value\n\n with self.hound.with_reason(\"<AUTOMATED>: Migrating files into workspace bucket\"):\n attributes = self.get_attributes()\n updated_attributes = {\n key:copy_to_workspace(value) for key,value in attributes.items()\n }\n # To save avoid redundant updates, only post updated attributes\n self.update_attributes({\n key:value for key,value in updated_attributes.items()\n if value != attributes[key]\n })\n\n for etype in self.get_entity_types():\n entity_df = self._get_entities_internal(etype).dropna(axis='columns', how='all')\n updated_df = entity_df.copy().applymap(copy_to_workspace)\n update_mask = (entity_df == updated_df).all()\n # Here's some crazy pandas operations, but ultimately, it just\n # grabs columns with at least one changed entity\n self.update_entity_attributes(etype, updated_df[update_mask[~update_mask].index])", "title": "" }, { "docid": "7e46a62901faed3bfb39205c844f7577", "score": "0.44690555", "text": "def test_storages_fstab(self, context: dict, cloud: Cloud, servers: dict):\n server = servers['M1']\n mount_table = lifecycle.get_mount_table(cloud, server)\n context['M1_mount_table'] = mount_table\n lifecycle.assert_mount_point_in_fstab(cloud, server,\n mount_table=mount_table,\n mount_point='/media/diskmount')", "title": "" }, { "docid": "723d564ab63a0ce21815b43c07f96143", "score": "0.4465439", "text": "def table_ddl_if_exists(self):\n\n return only_on([\"postgresql\", \"mysql\", \"mariadb\", \"sqlite\"])", "title": "" }, { "docid": "091466b6f6a40b5ab817d60f6b33a6d0", "score": "0.44538397", "text": "def create_db_config(connection):\n check_table_query = '''\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema='public'\n AND table_type='BASE TABLE';\n ''' \n\n try: \n cursor = execute_query(connection, check_table_query)\n exist_table_list = cursor.fetchall()\n file_dir_path = '../migration/system/'\n file_dir_file = os.listdir(file_dir_path)\n\n for file_name in file_dir_file:\n if (file_name.replace('.sql', '').replace('create_table_', ''), ) not in exist_table_list: \n with open(file_dir_path+file_name) as create_file:\n create_query = \"\".join(create_file.readlines())\n cursor = execute_query(connection, create_query)\n print(\"Successfully created {} table.\".format(file_name.replace('create_table_', '').replace('.sql', '')))\n\n except Exception as e:\n print(\"An error occurred: {}\".format(e))", "title": "" }, { "docid": "fb40e46d6da406ccda57276cabd1de6d", "score": "0.4453249", "text": "def _Check2():\r\n self.RunGsUtil(['rsync', '-d', tmpdir, suri(bucket_uri)])\r\n listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\r\n listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\r\n # Dir should have un-altered content.\r\n self.assertEquals(\r\n listing1, set(['/obj1', '/obj2', '/subdir/obj3', '/symlink']))\r\n # Bucket should have content like dir but without the symlink, and\r\n # without subdir objects synchronized.\r\n self.assertEquals(\r\n listing2, set(['/obj1', '/obj2', '/subdir/obj5', '/symlink']))\r\n self.assertEquals('obj1', self.RunGsUtil(\r\n ['cat', suri(bucket_uri, 'symlink')], return_stdout=True))", "title": "" }, { "docid": "d617b0f749f316e0fbd54dfa2efd9b43", "score": "0.44516984", "text": "def _Check2():\r\n self.RunGsUtil(['rsync', '-d', '-c', suri(bucket_uri), tmpdir])\r\n listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\r\n listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\r\n # Bucket should have un-altered content.\r\n self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))\r\n # Dir should have content like bucket but without the subdir objects\r\n # synchronized.\r\n self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))\r\n # Assert that the src/dest objects that had same length but different\r\n # content were synchronized (bucket to dir sync with -c uses checksums).\r\n self.assertEquals('obj2', self.RunGsUtil(\r\n ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))\r\n with open(os.path.join(tmpdir, 'obj2')) as f:\r\n self.assertEquals('obj2', '\\n'.join(f.readlines()))", "title": "" }, { "docid": "b571ebc0eb9875e14660bc873ae27b87", "score": "0.4450228", "text": "def table_location(connection, tableName):\n desc = desc_table(connection, tableName, True)\n\n location = util.search_with_regex(desc, \"location\\\\s*:\\\\s*[^,]+\", 0)\n location = util.replace_with_regex(\"^location\\\\s*:(\\\\s*)\", \"\", location)\n location = util.replace_with_regex(\"^[A-Za-z ]*://[^:/]*(:[0-9]+)?\", \"\", location)\n\n return location", "title": "" }, { "docid": "00919d61140da9bf44450ce6e96d9836", "score": "0.44489363", "text": "def PrepareMergeTaskStorage(self, unsused_task_name):\n raise NotImplementedError()", "title": "" }, { "docid": "066c5dd6e1f113d2f67e622b32cd4deb", "score": "0.4447711", "text": "def test_getNewAndOldTables(self):\n\n self.backup.cs.getStorageFolders = mock.MagicMock(\n return_value=['deleteddir','table1'])\n self.backup.bq.getAllTables = mock.MagicMock(\n return_value=['table1', 'newtable'])\n\n new_tables = ['newtable']\n existing_tables = ['table1']\n\n self.assertEqual((new_tables, existing_tables),\n self.backup.getNewAndOldTables('some path'))", "title": "" }, { "docid": "caedd55f86905e45cc05cd67bb4fb102", "score": "0.44460967", "text": "def _rename_hstore_required(\n self, old_table_name, new_table_name, old_field, new_field, key\n ):\n\n old_name = self._required_constraint_name(\n old_table_name, old_field, key\n )\n new_name = self._required_constraint_name(\n new_table_name, new_field, key\n )\n\n sql = self.sql_hstore_required_rename.format(\n table=self.quote_name(new_table_name),\n old_name=self.quote_name(old_name),\n new_name=self.quote_name(new_name),\n )\n self.execute(sql)", "title": "" }, { "docid": "1d25ce007df13116ccde2ded905ba55e", "score": "0.44412425", "text": "def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):\n src_vocab_table = lookup_ops.index_table_from_file(\n src_vocab_file, default_value=UNK_ID)\n if share_vocab:\n tgt_vocab_table = src_vocab_table\n else:\n tgt_vocab_table = lookup_ops.index_table_from_file(\n tgt_vocab_file, default_value=UNK_ID)\n return src_vocab_table, tgt_vocab_table", "title": "" }, { "docid": "12b52a4965e300ef6b3ef81bc9bebe79", "score": "0.44304895", "text": "def write_upload_table(table_dict):\n df = table_dict['df']\n name = table_dict['name']\n print(name)\n df.write.mode('overwrite').format('delta').save(name)\n aurora_insight_upload(name)", "title": "" }, { "docid": "09efedd1a1e84e20d156c83b140f8a73", "score": "0.4429989", "text": "def volume_file_exists(self, relative_file_path):", "title": "" }, { "docid": "afa23649b930fd06a83ff9b9c0bbb6e3", "score": "0.44265682", "text": "def table_location(self):\n return url_path_join(self.warehouse_path, self.table) + '/'", "title": "" }, { "docid": "afa23649b930fd06a83ff9b9c0bbb6e3", "score": "0.44265682", "text": "def table_location(self):\n return url_path_join(self.warehouse_path, self.table) + '/'", "title": "" }, { "docid": "25722fd758dec746ff75be6c19823b44", "score": "0.44212493", "text": "def validate_system_storage_config(self):\n self.error_message = None\n\n statvfs = os.statvfs(os.getcwd())\n available_disk_space = (statvfs.f_frsize * statvfs.f_bavail ) / (2 ** 30)\n if not available_disk_space >= Settings.get('MINIMUM_STORAGE', None):\n self.error_message = K.INVALID_SYSTEM_STORAGE_CONFIG + str(Settings.get('MINIMUM_STORAGE', None))\n return K.NOT_VALID\n return K.VALID", "title": "" }, { "docid": "8e00f413295057f2fa1051bb0869f6ab", "score": "0.44186908", "text": "def scan_storage(self):\n self.storage = []\n for el in os.listdir(self.videodir):\n file, ext = os.path.splitext(self.videodir + \"/\" + el)\n _, file = os.path.split(file)\n self.storage.append((file, ext))", "title": "" }, { "docid": "0e63161764637fbf17c21c5fcdc9240e", "score": "0.44168815", "text": "def _Check1():\r\n self.RunGsUtil(['rsync', '-d', suri(bucket_uri), tmpdir])\r\n listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\r\n listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\r\n # Bucket should have un-altered content.\r\n self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))\r\n # Dir should have content like bucket but without the subdir objects\r\n # synchronized.\r\n self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))\r\n # Assert that the src/dest objects that had same length but different\r\n # content were not synchronized (bucket to dir sync doesn't use checksums\r\n # unless you specify -c).\r\n self.assertEquals('obj2', self.RunGsUtil(\r\n ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))\r\n with open(os.path.join(tmpdir, 'obj2')) as f:\r\n self.assertEquals('OBJ2', '\\n'.join(f.readlines()))", "title": "" }, { "docid": "cb7d4caec4776798fd8d8f67aed61ce9", "score": "0.44164097", "text": "def _import_tags_tables(self):\n self._positive_tags = self.data_menager.positive_tags_en\n self._negative_tags = self.data_menager.negative_tags_en", "title": "" }, { "docid": "ea1ce8d68fbe7991d3e9414245abaaaa", "score": "0.44115278", "text": "def test_tables_correct(self):\n for suffix in SORTED_TABLE_NAMES:\n _, html, csv_table, _ = generate_table(\n results=self.results,\n best_results=self.best_results,\n options=self.options,\n group_dir=\"group_dir\",\n fig_dir=self.fig_dir,\n pp_locations=[\"pp_1\", \"pp_2\"],\n table_name=\"table_name\",\n suffix=suffix)\n html_table_name = os.path.join(self.expected_results_dir,\n f\"{suffix}.html\")\n csv_table_name = os.path.join(self.expected_results_dir,\n f\"{suffix}.csv\")\n for f, t in zip([html_table_name, csv_table_name],\n [html[\"table\"], csv_table]):\n self.compare_files(f, t)", "title": "" }, { "docid": "4e595fe9b127435314aa38320a718f69", "score": "0.44112322", "text": "def test_empty_location(self):\n storage = self.storage_class(location='')\n self.assertEqual(storage.base_location, '')\n self.assertEqual(storage.location, os.getcwd())", "title": "" }, { "docid": "9d45450ed8ab8dc354ea610866b488a2", "score": "0.44077817", "text": "def _Check2():\r\n listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],\r\n return_stdout=True).split('\\n')\r\n listing2 = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],\r\n return_stdout=True).split('\\n')\r\n # 2 lines of listing output, 1 summary line, 1 empty line from \\n split.\r\n self.assertEquals(len(listing1), 4)\r\n self.assertEquals(len(listing2), 4)\r\n\r\n # First object in each bucket should match in size and version-less name.\r\n size1, _, uri_str1, _ = listing1[0].split()\r\n self.assertEquals(size1, str(len('data0')))\r\n self.assertEquals(storage_uri(uri_str1).object_name, 'k')\r\n size2, _, uri_str2, _ = listing2[0].split()\r\n self.assertEquals(size2, str(len('data0')))\r\n self.assertEquals(storage_uri(uri_str2).object_name, 'k')\r\n\r\n # Similarly for second object in each bucket.\r\n size1, _, uri_str1, _ = listing1[1].split()\r\n self.assertEquals(size1, str(len('longer_data1')))\r\n self.assertEquals(storage_uri(uri_str1).object_name, 'k')\r\n size2, _, uri_str2, _ = listing2[1].split()\r\n self.assertEquals(size2, str(len('longer_data1')))\r\n self.assertEquals(storage_uri(uri_str2).object_name, 'k')", "title": "" }, { "docid": "c1cd100582ebbe5ba0c7e8ebfb78f32c", "score": "0.44051558", "text": "def mnt_sshfs(self,mnt_src,mnt_as_vol):\n\n sshfs = self.T.os_environ['SSHFS']+' '\n\n cmds = ['sudo umount -f %s > /dev/null 2>&1;' % mnt_as_vol]\n (_out,_err) = self.T.exec_cmds({'cmds':cmds})\n\n cmds = ['mkdir -p %s;' % mnt_as_vol,\n '%s %s %s -o ConnectTimeout=5 2>&1;' % (sshfs,mnt_src,mnt_as_vol),]\n\n err = False\n try:\n (_out,_err) = self.T.exec_cmds({'cmds':cmds})\n if _out.count('Connection reset'):\n err = True\n except:\n err = True\n\n if err:\n t = mnt_as_vol.split('/')[-1]\n serv_is_low_prod_use = len(self.s[ (self.s.tag==t) &\n (self.s.production_usage=='low') ] )\n if not serv_is_low_prod_use:\n i_trace()\n print mnt_src,'mount failed'\n (_out,_err) = self.T.exec_cmds({'cmds':['rm -fR %s ; ' % mnt_as_vol]})\n assert _err is None\n\n return", "title": "" }, { "docid": "87d3d910ff3166c878330475f6b4d463", "score": "0.4405074", "text": "def setUp(self):\n for f in [fastq1, fastq2]:\n if not os.path.exists(f):\n os.symlink(os.path.join(indir, os.path.basename(f)), f)", "title": "" }, { "docid": "87d3d910ff3166c878330475f6b4d463", "score": "0.4405074", "text": "def setUp(self):\n for f in [fastq1, fastq2]:\n if not os.path.exists(f):\n os.symlink(os.path.join(indir, os.path.basename(f)), f)", "title": "" }, { "docid": "87d3d910ff3166c878330475f6b4d463", "score": "0.4405074", "text": "def setUp(self):\n for f in [fastq1, fastq2]:\n if not os.path.exists(f):\n os.symlink(os.path.join(indir, os.path.basename(f)), f)", "title": "" }, { "docid": "87d3d910ff3166c878330475f6b4d463", "score": "0.4405074", "text": "def setUp(self):\n for f in [fastq1, fastq2]:\n if not os.path.exists(f):\n os.symlink(os.path.join(indir, os.path.basename(f)), f)", "title": "" }, { "docid": "a9c66df205ee19be2be8f5bb853dac28", "score": "0.4404362", "text": "def storage_path(store, rc):\n name, url = store['name'], store['url']\n for db in rc.databases:\n if db['name'] == name and db['url'] == url:\n path = os.path.join(rc.builddir, '_dbs', name, store['path'])\n break\n else:\n path = os.path.join(rc.builddir, '_stores', name, store['path'])\n os.makedirs(path, exist_ok=True)\n return path", "title": "" }, { "docid": "297df7a4ad6ea8ab620f5958e09f6ccf", "score": "0.44034997", "text": "def assertStagingEquals(self, expected):\n all_files = []\n for root, _, files in os.walk(self.tempdir):\n rel_root_posix = PosixRelPath(root, self.tempdir)\n for f in files:\n path = posixpath.join(rel_root_posix, StripSo(f))\n if path.startswith('./'):\n path = path[2:]\n all_files.append(path)\n self.assertEqual(set(expected), set(all_files))", "title": "" }, { "docid": "46698b4d99732c735beefbe9e7eecc7e", "score": "0.44000757", "text": "def test_hdu_index_table(hdu_index_table):\n location = hdu_index_table.hdu_location(obs_id=42, hdu_type=\"events\")\n assert location.path().as_posix() == \"a/b\"\n\n location = hdu_index_table.hdu_location(obs_id=42, hdu_type=\"bkg\")\n assert location is None\n\n assert hdu_index_table.summary().startswith(\"HDU index table\")", "title": "" }, { "docid": "95adb15dd70172bf907f524cd55e8ad7", "score": "0.4398608", "text": "def dump_compare(self, tbl, src_fn, dst_fn):\r\n self.log.info(\"Comparing dumps: %s\", tbl)\r\n self.cnt_insert = 0\r\n self.cnt_update = 0\r\n self.cnt_delete = 0\r\n self.total_src = 0\r\n self.total_dst = 0\r\n f1 = open(src_fn, \"r\", 64*1024)\r\n f2 = open(dst_fn, \"r\", 64*1024)\r\n src_ln = f1.readline()\r\n dst_ln = f2.readline()\r\n if src_ln: self.total_src += 1\r\n if dst_ln: self.total_dst += 1\r\n\r\n fix = \"fix.%s.sql\" % tbl\r\n if os.path.isfile(fix):\r\n os.unlink(fix)\r\n\r\n while src_ln or dst_ln:\r\n keep_src = keep_dst = 0\r\n if src_ln != dst_ln:\r\n src_row = self.get_row(src_ln)\r\n dst_row = self.get_row(dst_ln)\r\n\r\n diff = self.cmp_keys(src_row, dst_row)\r\n if diff > 0:\r\n # src > dst\r\n self.got_missed_delete(tbl, dst_row)\r\n keep_src = 1\r\n elif diff < 0:\r\n # src < dst\r\n self.got_missed_insert(tbl, src_row)\r\n keep_dst = 1\r\n else:\r\n if self.cmp_data(src_row, dst_row) != 0:\r\n self.got_missed_update(tbl, src_row, dst_row)\r\n\r\n if not keep_src:\r\n src_ln = f1.readline()\r\n if src_ln: self.total_src += 1\r\n if not keep_dst:\r\n dst_ln = f2.readline()\r\n if dst_ln: self.total_dst += 1\r\n\r\n self.log.info(\"finished %s: src: %d rows, dst: %d rows,\"\r\n \" missed: %d inserts, %d updates, %d deletes\",\r\n tbl, self.total_src, self.total_dst,\r\n self.cnt_insert, self.cnt_update, self.cnt_delete)", "title": "" }, { "docid": "953098218c6baf59e9753caf82c06684", "score": "0.4397646", "text": "def test_bucket_to_dir_minus_d(self):\r\n # Create bucket and dir with 1 overlapping object, 1 extra object at root\r\n # level in each, and 1 extra object 1 level down in each. Make the\r\n # overlapping objects named the same but with different content, to test\r\n # that we detect and properly copy in that case.\r\n bucket_uri = self.CreateBucket()\r\n tmpdir = self.CreateTempDir()\r\n subdir = os.path.join(tmpdir, 'subdir')\r\n os.mkdir(subdir)\r\n self.CreateObject(bucket_uri=bucket_uri, object_name='obj1',\r\n contents='obj1')\r\n self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',\r\n contents='obj2')\r\n self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj3',\r\n contents='subdir/obj3')\r\n self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='OBJ2')\r\n self.CreateTempFile(tmpdir=tmpdir, file_name='obj4', contents='obj4')\r\n self.CreateTempFile(tmpdir=subdir, file_name='obj5', contents='subdir/obj5')\r\n\r\n # Use @Retry as hedge against bucket listing eventual consistency.\r\n @Retry(AssertionError, tries=3, timeout_secs=1)\r\n def _Check1():\r\n \"\"\"Tests rsync works as expected.\"\"\"\r\n self.RunGsUtil(['rsync', '-d', suri(bucket_uri), tmpdir])\r\n listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\r\n listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\r\n # Bucket should have un-altered content.\r\n self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))\r\n # Dir should have content like bucket but without the subdir objects\r\n # synchronized.\r\n self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))\r\n # Assert that the src/dest objects that had same length but different\r\n # content were not synchronized (bucket to dir sync doesn't use checksums\r\n # unless you specify -c).\r\n self.assertEquals('obj2', self.RunGsUtil(\r\n ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))\r\n with open(os.path.join(tmpdir, 'obj2')) as f:\r\n self.assertEquals('OBJ2', '\\n'.join(f.readlines()))\r\n _Check1()\r\n\r\n # Check that re-running the same rsync command causes no more changes.\r\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\r\n ['rsync', '-d', suri(bucket_uri), tmpdir], return_stderr=True))\r\n\r\n # Now rerun the sync with the -c option.\r\n # Use @Retry as hedge against bucket listing eventual consistency.\r\n @Retry(AssertionError, tries=3, timeout_secs=1)\r\n def _Check2():\r\n \"\"\"Tests rsync -c works as expected.\"\"\"\r\n self.RunGsUtil(['rsync', '-d', '-c', suri(bucket_uri), tmpdir])\r\n listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\r\n listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\r\n # Bucket should have un-altered content.\r\n self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))\r\n # Dir should have content like bucket but without the subdir objects\r\n # synchronized.\r\n self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))\r\n # Assert that the src/dest objects that had same length but different\r\n # content were synchronized (bucket to dir sync with -c uses checksums).\r\n self.assertEquals('obj2', self.RunGsUtil(\r\n ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))\r\n with open(os.path.join(tmpdir, 'obj2')) as f:\r\n self.assertEquals('obj2', '\\n'.join(f.readlines()))\r\n _Check2()\r\n\r\n # Check that re-running the same rsync command causes no more changes.\r\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\r\n ['rsync', '-d', '-c', suri(bucket_uri), tmpdir], return_stderr=True))\r\n\r\n # Now add and remove some objects in dir and bucket and test rsync -r.\r\n self.CreateObject(bucket_uri=bucket_uri, object_name='obj6',\r\n contents='obj6')\r\n self.CreateTempFile(tmpdir=tmpdir, file_name='obj7', contents='obj7')\r\n self.RunGsUtil(['rm', suri(bucket_uri, 'obj1')])\r\n os.unlink(os.path.join(tmpdir, 'obj2'))\r\n\r\n # Use @Retry as hedge against bucket listing eventual consistency.\r\n @Retry(AssertionError, tries=3, timeout_secs=1)\r\n def _Check3():\r\n self.RunGsUtil(['rsync', '-d', '-r', suri(bucket_uri), tmpdir])\r\n listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\r\n listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\r\n # Bucket should have un-altered content.\r\n self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))\r\n # Dir should have content like bucket but without the subdir objects\r\n # synchronized.\r\n self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))\r\n _Check3()\r\n\r\n # Check that re-running the same rsync command causes no more changes.\r\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\r\n ['rsync', '-d', '-r', suri(bucket_uri), tmpdir], return_stderr=True))", "title": "" }, { "docid": "2d1d7b4ecad4f8ff7fbcb91d78adb42a", "score": "0.43964177", "text": "def create_table(self):\n try:\n self.c.execute('''CREATE TABLE files (owner TEXT, user_file_name TEXT, server_file_name TEXT)''')\n return True\n except Exception:\n return False", "title": "" } ]
7c446390108b5b78d1dda6462a45bbe7
import_list takes an uploaded file ad inserts it into a previously created list. It requires an account id, list id, fields, data id, delimiter and if using an excel file, the sheetname.
[ { "docid": "fa93b532e1ea93d2c7fab4b532fb56de", "score": "0.81622994", "text": "def import_list(accountId,listId,fields,dataId,delimiter,sheetName='0'):\n\tn = 1\n\tglobal importRequestBody\n\timportRequestBody = '<Parameters><DataId>'+dataId+'</DataId><FieldMappings>'\n\tfor field in fields:\n\t\timportRequestBody += '<FieldMapping><DestinationFieldName>'+field\n\t\timportRequestBody += '</DestinationFieldName><SourceFieldPosition>'\n\t\timportRequestBody += str(n)+'</SourceFieldPosition></FieldMapping>'\n\t\tn = n + 1\n\tif delimiter == 'Excel':\n\t\timportRequestBody += '</FieldMappings><ImportOptions><ExcelOptions><WorksheetName>'+sheetName+'</WorksheetName></ExcelOptions><Format>Excel</Format></ImportOptions></Parameters>'\n\telse:\n\t\timportRequestBody += '</FieldMappings><ImportOptions><CharacterSeperatedOptions><Delimiter>'+delimiter+'</Delimiter></CharacterSeperatedOptions><Format>CharacterSeperated</Format></ImportOptions></Parameters>'\n\tservice_request(service.listImport+accountId+'/'+listId,'POST',importRequestBody,accountLogin.apiUname,accountLogin.password)\n\txmldoc = minidom.parse(response)\n\timportId = xmldoc.getElementsByTagName('Id')[0].firstChild.nodeValue\n\treturn importId", "title": "" } ]
[ { "docid": "402435b65c3e242f259e34c97077a19b", "score": "0.706584", "text": "def list_from_file(accountId,fileName,fileFormat):\n\tglobal fields\n\tglobal listId\n\tglobal listName\n\tglobal delimiter\n\tlistName = fileName.split(\"/\")\n\tlistName = listName[1].rstrip('.csvxtl')\n\tif fileFormat == 'csv':\n\t\tfile = open(fileName,'r')\n\t\thead = file.readline().rstrip()\n\t\tfields = head.split(',')\n\t\tfieldsStr = \" \".join(fields)\n\t\tdelimiter = 'Comma'\n\telif fileFormat == 'tab':\n\t\tprint \"Sorry, support for tab-delimited files is not available at this time\"\n\t\tsys.exit(1)\n\t\t#file = open(fileName,'r')\n\t\t#head = file.readline().rstrip()\n\t\t#fields = head.split('\\t')\n\t\t#fieldsStr = \" \".join(fields)\n\t\t#delimiter = 'Tab'\n\telif fileFormat == 'xls':\n\t\timport xlrd\n\t\tcfileName = fileName.replace('\\\\','/')\n\t\tbook = xlrd.open_workbook(cfileName)\n\t\tsheetName = book.sheet_names(0)\n\t\tsheet = book.sheet_by_index(0)\n\t\tfields = sheet.row_values(0)\n\t\tfieldsStr = \" \".join(fields)\n\t\tdelimiter = 'Excel'\n\tprint \"Creating list %s\" % listName\n\tprint \"with fields %s\" % fields\n\tcreate_list(accountId,listName,fieldsStr)\n\tupload_file(fileName)\n\timport_list(accountId,listId,fields,dataId,delimiter,sheetName)", "title": "" }, { "docid": "9162066f884d5fd3a67058db6a3a7fdf", "score": "0.6164388", "text": "def import_data(self):\n try:\n with open(self.my_list, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in spamreader:\n self.data_list.append(row)\n except IOError:\n print(\"file doesn't exist\")\n else:\n return self.data_list", "title": "" }, { "docid": "d992938afeeb6bbbe99381c544efb673", "score": "0.5858418", "text": "def importXls():\n try:\n fila = None\n # Abrimos el fichero excel\n document = xlrd.open_workbook(\"listadoclientes.xlsx\")\n\n # Guarda cada una de las hojas y el numero indica la hoja\n clientes = document.sheet_by_index(0)\n\n cont = 0\n\n for i in range(clientes.nrows):\n if cont != 0:\n dni = clientes.cell_value(rowx=i, colx=0)\n apellidos = clientes.cell_value(rowx=i, colx=1)\n nombre = clientes.cell_value(rowx=i, colx=2)\n fechatabla = xlrd.xldate_as_datetime(clientes.cell_value(rowx=i, colx=3), document.datemode)\n fecha = datetime.date(fechatabla)\n\n fila = (str(dni), str(apellidos), str(nombre), str(fecha))\n\n if fila is not None:\n conexion.cur.execute('insert into clientes (dni,apel,nome, data) values(?,?,?,?)', fila)\n conexion.conex.commit()\n\n funcionescli.listadocli(variables.listclientes)\n\n else:\n cont = 1\n\n\n except Exception as e:\n print(\"Error posible fallo \", e)", "title": "" }, { "docid": "f1d73d28b7522d2efd021801def6677a", "score": "0.5803383", "text": "def import_records(import_file, headers):\n db = sqlite3.connect('split.db')\n db.execute('drop table if exists records;')\n db.execute('create table records ({0});'.format(new_dbfields(headers)))\n\n print('Importing {}'.format(import_file))\n\n with open(import_file, 'r') as f:\n read = csv.reader(f, delimiter=g.del_type)\n for n, row in enumerate(read, 1):\n\n inserts = '\",\"'.join([\"%s\"] * len(row))\n fields = tuple(row)\n\n query = ('INSERT INTO records VALUES (\"' + inserts + '\");') % (fields)\n\n db.execute(query)\n\n if g.query_head:\n db.execute(\"DELETE FROM records WHERE rowid = 1;\")\n\n db.commit()\n db.close()", "title": "" }, { "docid": "936f20e5f1c1bfd14e42efadca441d8b", "score": "0.56179035", "text": "def importdata(request, formclass=ImportDataForm):\n error = None\n form = formclass(request.POST, request.FILES)\n if form.is_valid():\n try:\n reader = csv.reader(request.FILES['sourcefile'],\n delimiter=form.cleaned_data['sepchar'])\n except csv.Error, e:\n error = str(e)\n\n if error is None:\n try:\n cpt = 0\n for row in reader:\n if not row:\n continue\n try:\n fct = globals()[\"import_%s\" % row[0].strip()]\n except KeyError:\n continue\n try:\n fct(request.user, row, form.cleaned_data)\n except IntegrityError, e:\n if form.cleaned_data[\"continue_if_exists\"]:\n continue\n raise ModoboaException(_(\"Object already exists: %s\" % row))\n cpt += 1\n msg = _(\"%d objects imported successfully\" % cpt)\n return render(request, \"admin/import_done.html\", {\n \"status\": \"ok\", \"msg\": msg\n })\n except (ModoboaException), e:\n error = str(e)\n\n return render(request, \"admin/import_done.html\", {\n \"status\": \"ko\", \"msg\": error\n })", "title": "" }, { "docid": "e7d692a18b6ea7a35acdaf65ee82210a", "score": "0.56049746", "text": "def import_file(self):\n if not self.file:\n raise ValidationError('Debe subir un archivo para realizar la importacion.')\n book = self.open_excel_book()\n self.create_lines(self.sheet_to_array(book.sheet_by_index(0)), book.datemode)", "title": "" }, { "docid": "b49660e13a5eeb69b854cbb890f036d2", "score": "0.5570826", "text": "def import_tasks(self, list_id, import_content):\n list_url = 'http://checkvist.com/checklists/' + list_id + '/import.json'\n payload = {'token': self.api_token, 'import_content': import_content}\n r = requests.post(list_url, data=payload)\n parsed_json = json.loads(r.content)\n if self.bugger: \n pp.pprint(parsed_json)\n if r.status_code == requests.codes.ok: \n return parsed_json\n else: return False", "title": "" }, { "docid": "bece438eab253b1f83c171c52da93d72", "score": "0.55618244", "text": "def import_cards(self, lst):\n from trelloapps.models import Card\n from trelloapps.models import Member\n\n cards = lst.list_cards(card_filter='all')\n \n # UPDATE THE CARDS INFO #####\n for c in cards:\n try:\n card = Card.objects.get(remoteid=c.id)\n except Card.DoesNotExist:\n card = Card(remoteid = c.id)\n\n card.name = c.name\n card.desc = c.description\n card.closed = c.closed\n card.position = c.pos\n card.boardlist = self\n card.last_activity = c.date_last_activity\n card.save()\n\n card.members.through.objects.all().delete()\n for mid in c.member_id:\n try:\n member = Member.objects.get(remoteid=mid)\n except Member.DoesNotExist:\n m = lst.client.get_member(mid)\n member = Member(remoteid=m.id, name=m.username)\n member.save()\n card.members.add(member)", "title": "" }, { "docid": "b2418cfecad535eff8649f43abab5a7d", "score": "0.55617607", "text": "def action_import(self):\n ctx = self._context\n product_obj = self.env['gondola']\n if not self.data:\n raise exceptions.Warning(_(\"You need to select a file!\"))\n # Decode the file data\n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n\n reader = csv.reader(file_input, delimiter=delimeter,lineterminator='\\r\\n')\n\n for row in reader:\n product_ids = product_obj.search([('code','=', row[0])])\n if not product_ids:\n vals = {}\n vals.update({'code': row[0]})\n vals.update({'name': str(row[1])})\n res = self.env['gondola'].create(vals)", "title": "" }, { "docid": "2ce7c8a1a01c41a2f3de17229ff2b554", "score": "0.5489033", "text": "def import_file(self, request):\n uploaded_file = request.FILES['uploaded_file']\n try:\n run_import(uploaded_file, self.importer)\n msg = _('Successfully processed %s.') % uploaded_file\n self.message_user(request, msg)\n except Exception, e:\n msg = _('Import failed: %s') % e\n self.message_user(request, msg, level=messages.ERROR)\n return redirect(self.change_list_url_name)", "title": "" }, { "docid": "af25790a0aa643caf04ef93db8778212", "score": "0.5468085", "text": "def post(self, file):\r\n file_1 = file.pop('file_1') # file_1 is required, so is always here\r\n\r\n try:\r\n missing_columns = None # stores the missing columns of a parser\r\n\r\n for Parser in (TruckAvailabilityParser, # pragma: no branch\r\n OrderListParser):\r\n\r\n parser = Parser(file_1) # instantiate the parser\r\n\r\n # Check if all required columns are in the excel sheet\r\n if len(parser.check_required_columns()) != 0:\r\n\r\n if missing_columns:\r\n # If the second parser also failed to parse\r\n\r\n # Find the parser with the least missing columns\r\n if len(parser.check_required_columns()) \\\r\n < len(missing_columns):\r\n missing_columns = parser.check_required_columns()\r\n\r\n # If there are 5 or more missing columns, we report\r\n # that we didn't recognize the spreadsheet\r\n if len(missing_columns) >= 5:\r\n abort(400,\r\n message=\"Spreadsheet is not recognized.\",\r\n status=\"Bad Request\")\r\n\r\n # If there are less than 5 columns missing, we report\r\n # the missing columns.\r\n else:\r\n abort(422, # pragma: no branch\r\n errors={i: \"Column is missing.\"\r\n for i in missing_columns},\r\n status=\"Unprocessable Entity\"\r\n )\r\n else:\r\n # store the missing columns and try the next parser\r\n missing_columns = parser.check_required_columns()\r\n continue\r\n\r\n # check if the unique columns contain duplicate values\r\n if len(parser.check_unique_columns()) != 0:\r\n abort(422, # pragma: no branch\r\n errors={i: \"Column contains duplicate values.\"\r\n for i in parser.check_unique_columns()},\r\n status=\"Unprocessable Entity\"\r\n )\r\n\r\n # parse the data using a Marshmallow schema\r\n data = parser.parse()\r\n\r\n sheet = parser.sheet_table()\r\n orders = [parser.row_table(**row) for row in data]\r\n sheet.add_rows(orders)\r\n db.session.add(sheet)\r\n db.session.commit()\r\n return sheet, 200\r\n\r\n except XLRDError:\r\n # The file could not be read by the spreadsheet parser\r\n abort(400,\r\n message=\"File type not supported.\"\r\n )\r\n except ValidationError as e:\r\n # The data in the spreadsheet does not have the right type\r\n # or is missing\r\n abort(\r\n 422,\r\n errors=e.normalized_messages(),\r\n status=\"Unprocessable Entity\"\r\n )", "title": "" }, { "docid": "5d25b5aa59df7b77c342f2cd313a9194", "score": "0.5344601", "text": "def import_items(import_file, item_create_func, item_add_func, allow_fabricator=False, quiet=False):\n if not quiet:\n print(' - Importing items from {}'.format(import_file))\n added_count = 0\n with open(import_file) as df:\n for line in df:\n itemline = line.strip()\n if itemline.lower().startswith('bl3(') and itemline.endswith(')'):\n new_item = item_create_func(itemline)\n if not allow_fabricator:\n # Report these regardless of `quiet`\n if not new_item.eng_name:\n print(' - NOTICE: Skipping unknown item import because --allow-fabricator is not set')\n continue\n if new_item.balance_short.lower() == 'balance_eridian_fabricator':\n print(' - NOTICE: Skipping Fabricator import because --allow-fabricator is not set')\n continue\n item_add_func(new_item)\n if not quiet:\n if new_item.eng_name:\n print(' + {} ({})'.format(new_item.eng_name, new_item.get_level_eng()))\n else:\n print(' + unknown item')\n added_count += 1\n if not quiet:\n print(' - Added Item Count: {}'.format(added_count))", "title": "" }, { "docid": "dda27fa40e983e45d0ca7df88f1806c6", "score": "0.533679", "text": "def FileImportBatch(self,pName,pBatchOptions,pReference):\n pass", "title": "" }, { "docid": "56907eeda2bc1ff9810ea6c77671384e", "score": "0.53279924", "text": "def import_file(self, db):\n\n pass", "title": "" }, { "docid": "66cf35922c986ca53a92657cd10a6599", "score": "0.5315606", "text": "def read_list(file_list):\n pass", "title": "" }, { "docid": "9980800cb3b2c7fed8e5122a70625026", "score": "0.5314842", "text": "def importFileToList(f):\r\n with open(f) as file:\r\n res = []\r\n for line in file:\r\n res.append(line.strip())\r\n return res", "title": "" }, { "docid": "679025eb89506e30c97571a368ce93f4", "score": "0.5269769", "text": "def import_files(import_path, server, database=None, schema=None, table=None, delimiter=None, config=None):\n db = Database(server, database, config)\n\n if path.isfile(import_path):\n db.import_file(import_path, table, schema, delimiter=delimiter)\n return\n\n for fpath in [path.join(import_path, p) for p in os.listdir(import_path)]:\n if not fpath.endswith('.csv'):\n continue\n logger.info(\"Importing: %s\" % fpath)\n db.import_file(fpath, None, schema, delimiter=delimiter)", "title": "" }, { "docid": "0c1576070243f701ad81d70db58f8b55", "score": "0.5241329", "text": "def import_from_csv(file):\r\n conn = create_connection()\r\n\r\n if conn is not None:\r\n table = basename(file).lower()[:-4]\r\n with open(file, \"r\") as csvfile:\r\n reader = csv_DictReader(csvfile)\r\n first_row = next(reader)\r\n headers = [header for header, value in first_row.items()]\r\n headers[0] = \"part_num PRIMARY KEY\"\r\n\r\n create_table(\r\n \"CREATE TABLE IF NOT EXISTS \" + table + \"(\" + \",\".join(headers) + \");\"\r\n )\r\n\r\n to_import = [list(row.values()) for row in reader]\r\n to_import.append(list(first_row.values()))\r\n\r\n for list_item in to_import:\r\n for item in list_item:\r\n if item.endswith(\"\\xa0\"):\r\n index_of_list = to_import.index(list_item)\r\n index_of_item = list_item.index(item)\r\n to_import[index_of_list][index_of_item] = item.strip()\r\n\r\n cur = conn.cursor()\r\n columns = [\"?\" for item in headers]\r\n cur.executemany(\r\n \"INSERT OR IGNORE INTO \"\r\n + table\r\n + \" VALUES (\"\r\n + \",\".join(columns)\r\n + \")\",\r\n to_import,\r\n )\r\n\r\n close_connection(conn)\r\n else:\r\n print(\"Error! Unable to connect to the database.\")", "title": "" }, { "docid": "a4b947346c9b2785676f810757d138cd", "score": "0.5235016", "text": "def import_file(file_import):\n headers = get_header_csv(file_import)\n g.original_filename = file_import\n g.db_header = headers\n g.get_query_fields()\n\n if headers:\n import_records(file_import, headers)\n proof_records()\n export_proof_records()\n return True", "title": "" }, { "docid": "4ed5e8a191b81899a63346c33e842da0", "score": "0.523385", "text": "def handle_uploaded_file(inputf):\n\n #TODO Better file format checking!\n if 'Ranked.csv' not in inputf.name:\n return ['Incorrect file format provided.']\n\n input_fstring=u''\n #Chunks for handling larger files - will essentially just have a long string of char's after this\n for chunk in inputf.chunks():\n input_fstring += chunk.decode('utf-8','replace') #Replace accented characters with unicode equivalents\n\n input_fbytes = io.BytesIO(input_fstring.encode('utf-8')) # Python 2's csv.reader doesn't support unicode, only utf-8 encoded bytes\n results = csv.DictReader(input_fbytes)\n\n dne_list = [] #Hold \"list of errors\" to be placed on template. Called \"Does Not Exist (DNE) list\"\n\n ref_num_header = results.fieldnames[0]\n if 'Score' not in results.fieldnames:\n dne_list.append('Could not find score column. It should have the heading \"Score\".')\n if 'Rank' not in results.fieldnames:\n dne_list.append('Could not find rank column. It should have the heading \"Rank\".')\n if len(dne_list) > 0:\n return dne_list\n\n for result in results:\n ref_num = (result[ref_num_header]\n .strip('\"') # Sometimes the ref_num is quoted\n .zfill(7) # Adds back leading zeroes which Excel can remove\n )\n score = result[\"Score\"]\n rank = result[\"Rank\"]\n\n try:\n student = SchoolStudent.objects.get(reference=ref_num)\n student.score = float(score)\n student.rank = rank\n student.award = ''\n student.save()\n # Individual exceptions: using get() generates exceptions\n except ObjectDoesNotExist:\n dne_list.append('Reference number: %s not found in database.' % (ref_num,))\n # Not a fatal error; continue with import\n except ValueError:\n dne_list.append('Reference number: %s contains a data-input error.' % (ref_num,))\n # Not a fatal error; continue with import\n except exceptions.MultipleObjectsReturned:\n dne_list.append('ERROR. Import halted. Two students with the same reference: %s were found in the file. Please ensure that, if the file contains information for PAIRS that PR is present in the filename.'%{ref_num})\n return dne_list # Fatal error; STOP IMPORT where the error occured\n\n #Return error list\n return dne_list", "title": "" }, { "docid": "35dc31fada67d0c9a0d7109546bd6fe9", "score": "0.52298886", "text": "def create_list(accountId,listName,fields):\n\tglobal listId\n\tfields = fields.split(\" \")\n\tcreateListBody = '<ListProperties><Fields>'\n\tfor field in fields:\n\t\tcreateListBody += '<Field>'+field+'</Field>'\n\tcreateListBody += '</Fields><Name>'+listName+'</Name></ListProperties>'\n\tservice_request(service.createList+accountId,'POST',createListBody,accountLogin.apiUname,accountLogin.password)\n\txmldoc = minidom.parse(response)\n\tlistId = xmldoc.getElementsByTagName('Id')[0].firstChild.nodeValue\n\treturn listId", "title": "" }, { "docid": "a766de7f85fbe5453f098db6bfc547bf", "score": "0.52260894", "text": "def importCSV():\n print(\">>>>> importCSV <<<<<\")\n\n if request.method == \"POST\":\n # Read CSV file\n print('>>>>> POST CSV <<<<<')\n if not request.files[\"fileX\"]:\n flash('Missing import file, please select a CSV-file from your computer', 'danger')\n return render_template(\"import.html\") \n try:\n data = pd.read_csv(request.files[\"fileX\"], sep=\";\")\n print('>>>>> STORE ROWS IN DB<<<<<')\n processImport(data)\n print('>>>>> DONE STORING ROWS IN DB<<<<<')\n flash('Transactions are stored, you can now process them', 'success')\n except Exception as e:\n print(\">>>>> exception <<<<<\", e)\n flash('Invalid file or a database error, send an email to slackbyte8@gmail.com', 'danger')\n return redirect(\"/\")\n else:\n print(\">>> /GET <<<\")\n flash('Select your csv-file to import the suspense-accounts to be matched', 'info') \n return render_template(\"import.html\")", "title": "" }, { "docid": "e853ce4b2b57d0e206ad9b5c51725ad8", "score": "0.521949", "text": "def import_order_items(data_file):\n client = MongoClient('localhost', 27017)\n\n db = client.you\n order_items = db.order_items\n\n with open(data_file, 'r') as source_file:\n reader = csv.reader(source_file, delimiter=',')\n keys = reader.__next__()\n for row in reader:\n order_item = dict(zip(keys, row))\n try:\n # Might be better to use insert_many() instead\n order_item_id = order_items.insert_one(order_item).inserted_id\n print('insert success: {}'.format(order_item_id))\n except:\n print('insert failed: {}'.format(order_item))\n\n print('order items import successful')\n client.close()", "title": "" }, { "docid": "11e9b8857af338e6430e93a07ae0b8d8", "score": "0.5205327", "text": "def import_eemaill(self, name, hash_id):\n\t\treturn self._create_list(name, creator_email=None, hash_id=hash_id)", "title": "" }, { "docid": "77f45f94e87e9a8d01950951cd944f62", "score": "0.5195844", "text": "def spreadsheet_and_get_table2(cls, file_: FileStorage) -> T.List[T.List[str]]:\n\n # Something in the combination of the Python version used (3.8.3), and the fact\n # that it is containerized(run inside Docker) neccesitates this.\n try:\n df: pd.DataFrame = pd.read_csv(file_, na_filter=False, header=None, encoding='utf-8') # type: ignore\n except UnicodeDecodeError:\n try:\n df: pd.DataFrame = pd.read_excel(file_, na_filter=False, header=None, encoding='utf-8') # type: ignore\n except UnicodeDecodeError:\n raise BadRequest(\n \"The uploaded file type could not be inferred.\"\n \" Perhaps using a different browser might help.\"\n )\n except BaseException:\n raise BadRequest(\"The uploaded spreadsheet could not be parsed.\")\n except Exception as e:\n current_app.logger.info(f\"Invalid excel file: {e}\")\n raise BadRequest(\"Uploaded text file is not in valid .xls format.\")\n \n except BaseException:\n raise BadRequest(\"The uploaded spreadsheet could not be parsed.\")\n except Exception as e:\n current_app.logger.info(f\"Invalid CSV file: {e}\")\n raise BadRequest(\"Uploaded text file is not in valid CSV format.\")\n table: T.List[T.List[str]] = df.to_numpy().tolist() # type: ignore\n if table == []:\n raise BadRequest(\"An empty file was uploaded.\")\n # strip blanks\n table = [[cell.strip() for cell in row] for row in table]\n return table", "title": "" }, { "docid": "5862a3a2b212ad9fe0ed99d51b852ac8", "score": "0.5184643", "text": "def insert_data(file_list=data_files):\n # initialize the db connection & cursor\n conn = sqlite3.connect(os.path.join(data_path, 'news.db'))\n\n # add manipulation code here\n # (tqdm call used to indicate progress in CLI)\n for data in tqdm(file_list, desc='Dumping Progress'):\n # load file into df object\n df = pd.read_csv(data, sep='\\t')\n # use the connection to insert the df into our db table\n df.to_sql('news', conn, if_exists='append')\n # Save changes (necessary?)\n conn.commit()\n\n # close out connection\n conn.close()", "title": "" }, { "docid": "697a461c8f9828313ec482a11715bbcf", "score": "0.51830244", "text": "def data_import(self, request, *args, **kwargs):\n # pylint: disable=attribute-defined-outside-init\n self.object = self.get_object()\n resp = {}\n if request.method == \"GET\":\n try:\n resp.update(\n get_async_csv_submission_status(\n request.query_params.get(\"job_uuid\")\n )\n )\n self.last_modified_date = timezone.now()\n except ValueError as e:\n raise ParseError(\n (\n \"The instance of the result is not a \"\n \"basestring; the job_uuid variable might \"\n \"be incorrect\"\n )\n ) from e\n else:\n csv_file = request.FILES.get(\"csv_file\", None)\n xls_file = request.FILES.get(\"xls_file\", None)\n\n if csv_file is None and xls_file is None:\n resp.update({\"error\": \"csv_file and xls_file field empty\"})\n\n elif xls_file and xls_file.name.split(\".\")[-1] not in XLS_EXTENSIONS:\n resp.update({\"error\": \"xls_file not an excel file\"})\n\n elif csv_file and csv_file.name.split(\".\")[-1] != CSV_EXTENSION:\n resp.update({\"error\": \"csv_file not a csv file\"})\n\n else:\n if xls_file and xls_file.name.split(\".\")[-1] in XLS_EXTENSIONS:\n csv_file = submission_xls_to_csv(xls_file)\n overwrite = request.query_params.get(\"overwrite\")\n overwrite = (\n overwrite.lower() == \"true\"\n if isinstance(overwrite, str)\n else overwrite\n )\n size_threshold = settings.CSV_FILESIZE_IMPORT_ASYNC_THRESHOLD\n try:\n csv_size = csv_file.size\n except AttributeError:\n csv_size = csv_file.__sizeof__()\n if csv_size < size_threshold:\n resp.update(\n submit_csv(\n request.user.username, self.object, csv_file, overwrite\n )\n )\n else:\n csv_file.seek(0)\n file_name = getattr(csv_file, \"name\", xls_file and xls_file.name)\n upload_to = os.path.join(\n request.user.username, \"csv_imports\", file_name\n )\n file_name = default_storage.save(upload_to, csv_file)\n task = submit_csv_async.delay(\n request.user.username, self.object.pk, file_name, overwrite\n )\n if task is None:\n raise ParseError(\"Task not found\")\n resp.update({\"task_id\": task.task_id})\n\n return Response(\n data=resp,\n status=status.HTTP_200_OK\n if resp.get(\"error\") is None\n else status.HTTP_400_BAD_REQUEST,\n )", "title": "" }, { "docid": "a8ae371dd900629527f824597a02f04e", "score": "0.51585484", "text": "def import_subscriber(list_id, custom_fields=None, resubscribe=True, **params):\n subscriber = createsend.Subscriber(CS_AUTH, list_id=list_id)\n subscriber.add(list_id=list_id, custom_fields=custom_fields,\n resubscribe=resubscribe, **params)", "title": "" }, { "docid": "47b1c376fd7de16d47f9d03d51a4f8b8", "score": "0.5141401", "text": "def createLeads(sf, users, wb, accounts):\n ws = wb[\"Leads\"] # Gets the Leads sheet\n recordTypeMap = getRecordTypes(sf, ws, 1, \"Lead\")\n insertLeads = []\n try:\n logInfo(\"Reading Leads\")\n for row in ws.iter_rows(min_row=2, values_only=True):\n if (row[0] == None):\n continue\n insertLeads.append(\n {'RecordTypeId': recordTypeMap.get(row[0]), 'OwnerId': users.get(row[1]),\n 'Salutation': u\"\" if row[2] is None else row[2], 'FirstName': u\"\" if row[3] is None else row[3], 'LastName': u\"\"\n if row[4] is None else row[4], 'MiddleName': u\"\" if row[5] is None else row[5], 'Suffix': u\"\" if row[6] is None else row[6],\n 'EEP_Preferred_Name__c': u\"\" if row[7] is None else row[7], 'Company': row[8], 'EEP_Gender__c': row[9], 'Email': row[10],\n 'phone': row[11], 'MobilePhone': row[12], 'EEP_Preferred_Day__c': row[13],\n 'EEP_Producer_Account_Tax_Id__c': u\"\" if row[15] is None else row[15], 'EEP_National_Producer_Number__c': u\"\" if row[16] is None else row[16],\n 'EEP_Producer_CBU__c': row[17], 'EEP_Producer_Distribution_Channel__c': u\"\" if row[18] is None else row[18],\n 'Status': row[19], 'EEP_Closed_Lost_Reason__c': row[20],\n 'LeadSource': row[21], 'EEP_Source_Campaign__c': u\"\" if row[22] is None else row[22],\n 'EEP_Restricted_Access__c': row[23], 'EEP_Firm_Segment__c': row[24], 'HasOptedOutOfEmail': row[25],\n 'Street': u\"\" if row[26] is None else row[26], 'City': u\"\" if row[27] is None else row[27],\n 'State': u\"\" if row[28] is None else row[28], 'PostalCode': u\"\" if row[29] is None else row[29],\n 'Country': u\"\" if row[30] is None else row[30], 'FinServ__RelatedAccount__c': accounts.get(row[31]),\n 'FinServ__ReferredByUser__c': users.get(row[32]), 'EEP_Date_Of_Birth__c': \"1970-05-09\"})\n logging.info(insertLeads)\n except Exception as ex:\n logError(\"Could not read Leads\", ex)\n\n try:\n logInfo(\"Creating Leads\")\n Leads = sf.bulk.Lead.insert(insertLeads, batch_size=100)\n logInfo(\"Created Leads\")\n logging.info(Leads)\n except Exception as ex:\n logError(\"Could not create Leads\", ex)", "title": "" }, { "docid": "0517761947a53f3c3d2b938a7ac2cd43", "score": "0.5130956", "text": "def _batch_upload(self, filelist):\n if not filelist:\n return\n\n token = self.auth.upload_token(self.bucketname)\n params = {'x:a': 'a'}\n\n for file in filelist:\n self._upload_file(token, file, self.encoding(file), params)", "title": "" }, { "docid": "1cbbefc7718a93da0a5792454e0c2970", "score": "0.5112426", "text": "def import_data(self, csv_file):\n\n csv_reader = csv.DictReader(f=csv_file, dialect=csv.excel)\n\n self._data_controller.start_import()\n\n for row in csv_reader:\n self._data_controller.process_record(row)\n\n upload_id = self._data_controller.upload_id\n return upload_id", "title": "" }, { "docid": "3ec107e6e6803ece58ebe0d64fb6ce85", "score": "0.5110719", "text": "def _import(ctx,\n csv_file: click.File,\n skip_lines: int):\n # get our guest list\n guest_list = _get_guest_list_from_file(csv_file, skip_lines)\n\n click.echo(\"About to import {guests} guests from {filename}\".format(guests=len(guest_list),\n filename=csv_file.name))\n click.confirm('Do you want to continue?', abort=True)\n\n for guest in tqdm(guest_list):\n if _add_guest(ctx.obj['WM_API_URL'],\n ctx.obj['WM_API_USER'],\n ctx.obj['WM_API_PASS'],\n guest[0],\n guest[1],\n guest[2],\n guest[3],\n int(guest[4])):\n tqdm.write(\"Added {}\".format(guest))\n else:\n tqdm.write(\"Failed to add {}\".format(guest))", "title": "" }, { "docid": "c549f67b7b32cc6d17d718030fc3df6a", "score": "0.50911885", "text": "def inputImportInventory(user, xfile):", "title": "" }, { "docid": "46e9a3b4fb2d6ae80492405a6d1607a3", "score": "0.5088903", "text": "def LoadExistingList(FileName): # Task 1\n lstData = [] # A row of data separated into elements of a list\n dicTable = {} # A dictionary that acts as a 'table' of rows\n\n objFile = open(FileName, \"r\")\n for line in objFile:\n strData = line # reading data\n lstData = strData.split(\",\") # splitting into 2 elements\n dicTable[lstData[0].strip()] = lstData[1].strip()\n objFile.close()\n return dicTable", "title": "" }, { "docid": "ab803bb2129d05fdb7dd5f32bb4e1616", "score": "0.50750166", "text": "def _batch_upload(self, filelist):\n if not filelist:\n return\n\n token = self.auth.upload_token(self.bucketname)\n params = {'x:a': 'a'}\n\n for key in filelist:\n filename = QiniuBackup.__encode_spec_character(key)\n self._upload_file(token, key, filename, params)", "title": "" }, { "docid": "334069527c2c9bb714be60ba72402251", "score": "0.50741017", "text": "def import_data(file_path, control_list=None, exception_list=None, size=1):\n ctrl_info = fileUtils.jsonUtils.read(file_path)\n build_data(ctrl_info, control_list=control_list, exception_list=exception_list, size=size)", "title": "" }, { "docid": "a2df7900a06025e54b1b8f793fdfeb34", "score": "0.5026664", "text": "def importPointListFromDict(filename,listName):\n f = open(filename,'r')\n points = [];\n while True:\n line = f.readline(); # read line by line\n if line == '': # check for EoF\n break;\n \n if line != '\\n': # skip empty lines\n \n if line.split()[0] == listName:\n while(True):\n line = f.readline();\n if line.split()[0] == ');':\n break;\n elif line != '\\n' and len(line.split()) > 1:\n points.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])\n \n points = np.array(points);\n return points;", "title": "" }, { "docid": "27e40718124b9eb045f089a0fd856ff8", "score": "0.5021445", "text": "def ingest_contents(self, listfullnames, **kwargs):\n #starttime = time.time()\n assert isinstance(listfullnames, list)\n\n for fname in listfullnames:\n _ = dfiutils.datafile_ingest_main(self.dbh, self.filetype, fname,\n self.tablename, self.didatadefs)\n #if numrows == None or numrows == 0:\n # miscutils.fwdebug_print(\"WARN: 0 rows ingested from %s\" % fname)\n #elif miscutils.fwdebug_check(1, 'FTMGMT_DEBUG'):\n # miscutils.fwdebug_print(\"INFO: %s rows ingested from %s\" % (numrows, fname))", "title": "" }, { "docid": "a0723a91236fd64a21db6892aad9f5f6", "score": "0.50184464", "text": "def _process_upload_file(lc, dataset, upload_file, geno):\n owner_org = dataset['organization']['name']\n\n expected_sheet_names = dict(\n (resource['name'], resource['id'])\n for resource in dataset['resources'])\n\n upload_data = read_excel(upload_file)\n while True:\n try:\n sheet_name, org_name, column_names, rows = next(upload_data)\n except StopIteration:\n return\n except:\n # XXX bare except because this can fail in all sorts of ways\n if asbool(config.get('debug', False)):\n # on debug we want the real error\n raise\n raise BadExcelData(\n _(\"The server encountered a problem processing the file \"\n \"uploaded. Please try copying your data into the latest \"\n \"version of the template and uploading again. If this \"\n \"problem continues, send your Excel file to \"\n \"open-ouvert@tbs-sct.gc.ca so we may investigate.\"))\n\n if sheet_name not in expected_sheet_names:\n raise BadExcelData(_('Invalid file for this data type. ' +\n 'Sheet must be labeled \"{0}\", ' +\n 'but you supplied a sheet labeled \"{1}\"').format(\n '\"/\"'.join(sorted(expected_sheet_names)),\n sheet_name))\n\n if org_name != owner_org:\n raise BadExcelData(_(\n 'Invalid sheet for this organization. ' +\n 'Sheet must be labeled for {0}, ' +\n 'but you supplied a sheet for {1}').format(\n owner_org, org_name))\n\n # custom styles or other errors cause columns to be read\n # that actually have no data. strip them here to avoid error below\n while column_names[-1] is None:\n column_names.pop()\n\n chromo = get_chromo(sheet_name)\n expected_columns = [f['datastore_id'] for f in chromo['fields']]\n if column_names != expected_columns:\n raise BadExcelData(\n _(\"This template is out of date. \"\n \"Please try copying your data into the latest \"\n \"version of the template and uploading again. If this \"\n \"problem continues, send your Excel file to \"\n \"open-ouvert@tbs-sct.gc.ca so we may investigate.\"))\n\n records = get_records(rows, chromo['fields'])\n method = 'upsert' if chromo.get('datastore_primary_key') else 'insert'\n lc.action.datastore_upsert(\n method=method,\n resource_id=expected_sheet_names[sheet_name],\n records=records)", "title": "" }, { "docid": "d24c183ac9de3ff9e6cbd9482a81721f", "score": "0.50173044", "text": "def import_data(delimited_file):\n with open(delimited_file, 'rb') as csvfile:\n all_data = list(csv.reader(csvfile, delimiter=','))\n return all_data", "title": "" }, { "docid": "50e7a11e76323343482c2ec9276e28e1", "score": "0.49894586", "text": "def get_from_file(self, list_from_file):\n\n logging.info(\"Формирование списка GoodInfoList\")\n\n if len(list_from_file) == 0:\n logging.error(\"После прочтений из файла получился пустой список\")\n return False\n\n for product in list_from_file:\n product_data = product.split(\":\")\n \n if len(product_data) != 5:\n logging.error(\"Следующая строка не была обработана1: {product}\".format(\n product=product))\n continue\n\n name_product = product_data[0]\n price_product = product_data[1]\n product_amount = product_data[2]\n product_date = product_data[3]\n shelf_life = product_data[4]\n date_manufacture = product_data[3]\n\n db_worker = DB_Worker()\n\n db_worker.add(GoodInfo(name_product, \n price_product, \n product_amount, \n product_date, \n shelf_life, \n date_manufacture))\n \n return True", "title": "" }, { "docid": "77db0487def56d691c516169a318b8bf", "score": "0.4985924", "text": "def loadCSVFile (file, sep=\";\"):\n #lst = lt.newList(\"ARRAY_LIST\") #Usando implementacion arraylist\n lst = lt.newList() #Usando implementacion linkedlist\n print(\"Cargando archivo ....\")\n t1_start = process_time() #tiempo inicial\n dialect = csv.excel()\n dialect.delimiter=sep\n try:\n with open(file, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lt.addLast(lst,row)\n except:\n print(\"Hubo un error con la carga del archivo\")\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return lst", "title": "" }, { "docid": "09cdfddc1febf65ac861cb9518de9f09", "score": "0.49746695", "text": "def open_list(file):\r\n #makes sure the input filename has '.csv' extension\r\n if '.csv' in file:\r\n filename = file\r\n else:\r\n filename = file + '.csv'\r\n \r\n #lists for database information \r\n tickers = []\r\n \r\n #opens file\r\n with open(filename,'r') as csvfile:\r\n securities = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n \r\n #populates the various data lists with proper entries\r\n for row in securities:\r\n tickers.append(row[0])\r\n \r\n #assign headers to headers list\r\n headers = [tickers[0]] \r\n \r\n #remove headers from other lists\r\n tickers.remove(tickers[0])\r\n \r\n \r\n #remove quotation marks from strings\r\n for i in range(len(tickers)):\r\n tickers[i] = tickers[i].replace('\"',\"\")\r\n \r\n return tickers", "title": "" }, { "docid": "d1a69fddb0a18136de41c3fc096979bf", "score": "0.49723586", "text": "def cmd_import(arg):\n if not arg:\n raise UsageError('DECK_ID')\n dl = deck.scrapeDeckListing(arg)\n if dl is None:\n return\n cmd_deck(dl.pop(0))\n assert_activedeck()\n pile = active_deck.deck\n i = 0\n tot = len(dl)-1\n for cardset in dl:\n m = re.match('(\\d+)\\s+(.*)$', cardset)\n if m:\n num = int(m.group(1))\n cname = m.group(2)\n sys.stdout.write(' Importing... {0:.0f}% complete\\r' \n .format(float(i)/tot*100))\n sys.stdout.flush()\n if not pile.add(cname, num):\n print('Unable to find card data for \\'' + cname + '\\'.')\n i += 1\n elif re.match('Sideboard$', cardset):\n pile = active_deck.sideboard\n else:\n print('Problem parsing \\'' + cardset + '\\'.')\n cmd_listall('')", "title": "" }, { "docid": "41bd0136b408d2ce6b56163db05984f3", "score": "0.49674347", "text": "def list_accounts_from_file(file_name):\n print(\"Extracting Accounts via File Input\")\n accounts = []\n\n with open(file_name) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n x = 0\n # keep track of the positions, since this is a user defined file\n accountIdPos = None\n for row in readCSV:\n # read in the headers\n if x == 0:\n for y in range(len(row)):\n if row[y].lower() == 'accountid':\n accountIdPos = y\n else:\n if accountIdPos is None:\n print(\"Input needs to have at least 1 field: accountid\")\n raise ValueError(\"Insufficient fields in input file\")\n else:\n if len(row[accountIdPos].strip()) == 12:\n accounts.append(row[accountIdPos].strip())\n else:\n print(f\"Line {x} contains an invalid 12-digit accountid\")\n raise ValueError(\"Invalid values in input file\")\n\n x = x + 1\n return accounts", "title": "" }, { "docid": "3d2a35949b34ed7ce9f9f79a0b310583", "score": "0.49643067", "text": "def import_data(db, ref, data_file):\n try:\n data = read_csv(\n data_file,\n header=0,\n skip_blank_lines=True, \n encoding='latin-1'\n )\n except:\n try:\n data = read_csv(data_file, sep=' ', header=None)\n except:\n try:\n data = read_csv(\n data_file,\n header=0,\n skip_blank_lines=True, \n encoding='utf-16',\n sep='\\t',\n )\n except:\n data = read_excel(data_file, header=0)\n data.columns = map(snake_case, data.columns)\n data = data.where(notnull(data), None)\n data_ref = create_reference(db, ref)\n if isinstance(data_ref, CollectionReference):\n for index, values in data.iterrows():\n doc_id = str(index)\n doc_data = values.to_dict()\n data_ref.document(doc_id).set(doc_data, merge=True)\n else:\n doc_data = data.to_dict(orient='index')\n data_ref.set(doc_data, merge=True)", "title": "" }, { "docid": "98a42f0356cac46239069f541b806ce3", "score": "0.49513617", "text": "def spreadsheet_and_get_table(cls, file_: FileStorage) -> T.List[T.List[str]]:\n\n # Something in the combination of the Python version used (3.8.3), and the fact\n # that it is containerized(run inside Docker) neccesitates this.\n mimetypes.add_type(\n \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\", \".xlsx\"\n )\n\n file_type = mimetypes.guess_extension(file_.mimetype)\n if not file_type:\n raise BadRequest(\n \"The uploaded file type could not be inferred.\"\n \" Perhaps using a different browser might help.\"\n )\n\n if file_type in Settings.SUPPORTED_NON_CSV_FORMATS:\n try:\n df: pd.DataFrame = pd.read_excel(file_, na_filter=False, header=None) # type: ignore\n except BaseException:\n raise BadRequest(\"The uploaded spreadsheet could not be parsed.\")\n else:\n df = df.astype(str)\n table: T.List[T.List[str]] = df.to_numpy().tolist() # type: ignore\n elif file_type in [\".csv\", \".txt\"]:\n try:\n # TODO: Check if the file size is too large\n text_stream = io.TextIOWrapper(T.cast(io.BytesIO, file_))\n table = list(csv.reader(text_stream))\n except Exception as e:\n current_app.logger.info(f\"Invalid CSV file: {e}\")\n raise BadRequest(\"Uploaded text file is not in valid CSV format.\")\n else:\n if table == []:\n raise BadRequest(\"An empty file was uploaded.\")\n # strip blanks\n table = [[cell.strip() for cell in row] for row in table]\n text_stream.close()\n else:\n raise BadRequest(\n f\"File type {file_type} was not understood as a valid spreadhseet type,\"\n \"please upload one of the following file formats: \"\n + \", \".join(Settings.SUPPORTED_NON_CSV_FORMATS | {\".csv\"})\n )\n\n return table", "title": "" }, { "docid": "2b034db3c6a58c4ca6ec86d4ed70bbc0", "score": "0.49499393", "text": "def account_list(self, account_list):\n\n self._account_list = account_list", "title": "" }, { "docid": "d8ef1ae06a51c09c655f132ecaec295f", "score": "0.49250564", "text": "def add_list_to_row(self, reference, items, selected_sheet=\"active_\", workbook=\"default_\", cell_type=\"\"):\n\n\t\tself.edit.add_list_to_row(reference, items, selected_sheet, workbook, cell_type)", "title": "" }, { "docid": "a061c2230374426fd8baf15c4ed6a0a1", "score": "0.49235165", "text": "def write_list_to_jsonl_and_upload(api_params, bq_params, prefix, record_list, local_filepath=None):\n if not local_filepath:\n jsonl_filename = get_filename(api_params,\n file_extension='jsonl',\n prefix=prefix)\n local_filepath = get_scratch_fp(bq_params, jsonl_filename)\n\n write_list_to_jsonl(local_filepath, record_list)\n upload_to_bucket(bq_params, local_filepath, delete_local=True)", "title": "" }, { "docid": "f36d546d752bde0659df31cef8be5e09", "score": "0.49187812", "text": "def import_data(import_file):\n\n mask = '9s14s5s'\n data = []\n count = 0\n with open(import_file, 'rb') as f:\n for line in f:\n count += 1\n # unpack line to tuple\n fields = struct.Struct(mask).unpack_from(line)\n # strip any whitespace for each field\n # pack everything in a list and add to full dataset\n data.append(list([f.strip() for f in fields]))\n\n if count == 1000:\n break\n return data", "title": "" }, { "docid": "ea583db84369100a3ec13af54db59a2a", "score": "0.49145195", "text": "def putRecordToList(file_name, expression, to_list):\n fp = open_file(file_name)\n if not fp:\n log(\"fail to open file %s\"%file_name)\n exit()\n while True:\n line = fp.readline()\n if not line:\n break\n if isIgnored(line):\n continue\n line = line.strip()\n list_entry = entry(line, expression)\n if not list_entry.getValidData():\n continue\n to_list.append(list_entry)\n fp.close()", "title": "" }, { "docid": "98a09f18e12f6691fbacc6ff550797f7", "score": "0.49125665", "text": "def importAnswers(id, list):\n answer = []\n lines = list.splitlines(True)\n for line in lines:\n answer.append(u'\"%s\",%s' % (id,line))\n from tempfile import TemporaryFile\n csv = TemporaryFile()\n csv.write(u'\"complete_id\",\"question_code\",\"value\"\\n')\n csv.writelines(answer)\n csv.seek(0)\n xsl = os.path.join(\"applications\",\n request.application,\n \"static\",\n \"formats\",\n \"s3csv\",\n \"survey\",\n \"answer.xsl\")\n resource = s3mgr.define_resource(\"survey\", \"answer\")\n resource.import_xml(csv, stylesheet = xsl, format=\"csv\",)", "title": "" }, { "docid": "7a7d28665f129c59d3f0f036d2f4fb63", "score": "0.49076718", "text": "def import_data(self, import_fn, *args, **kwargs):\n exception_queue = Queue.Queue()\n import_thd = workerthread.WorkerThread(exception_queue=exception_queue,\n target=import_fn, *args, **kwargs)\n import_thd.start()\n while True:\n import_thd.join(0.125)\n if not import_thd.is_alive():\n try:\n exc_type, exc = exception_queue.get(block=False)\n err_str = str(exc)\n if len(err_str) == 0:\n err_str = exc_type.__name__\n module_logger.error(\"Error importing text file: {0}\".format(err_str))\n err_msg = \"An error occurred during import:\\n{0}\".format(err_str)\n err_dlg = wx.MessageDialog(self.view, message=err_msg,\n caption=\"Unable To Import File\", style=wx.ICON_ERROR)\n err_dlg.ShowModal()\n except Queue.Empty:\n pass\n gc.collect()\n break\n wx.GetApp().Yield()", "title": "" }, { "docid": "56930bae42cb45c420d34b4b1c510be5", "score": "0.4904011", "text": "def load_file(filename):\n inputList = []\n print(f\"Loading list from file... {filename}\") \n with open(filename) as file:\n inputList = [int(x) for x in file.read().split(\",\")]\n\n return inputList", "title": "" }, { "docid": "c3fe6d797ad00f5c4c7f974810a91a65", "score": "0.48980817", "text": "def import_data(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n \n order = self.browse(cr,uid,ids[0],context=context)\n #get the uploaded data\n import_data = base64.decodestring(order.import_file)\n excel_data = xlrd.open_workbook(file_contents=import_data)\n sheet = excel_data.sheets()[0]\n row_cnt = sheet.nrows\n \n '''\n level_no: the root product will be ‘root’, other follows the #.#.# format, and must be unique in one excel.\n erp_part_no: the part# of erp\n quantity:the quantity\n '''\n #1 check the column list: 'level_no,part_no,bom_name,quantity' must be in the list\n row_data = sheet.row_values(0);\n level_no_idx = 0\n part_no_idx = 0\n bom_name_idx = 0\n quantity_idx = 0\n route_name_idx = 0\n sequence_idx = -1\n id_idx = -1\n direct_bom_find_idx = -1\n \n try:\n # get the data column index\n level_no_idx = row_data.index('level_no')\n part_no_idx = row_data.index('part_no')\n part_name_idx = row_data.index('part_name')\n bom_name_idx = row_data.index('bom_name')\n quantity_idx = row_data.index('quantity')\n route_name_idx = row_data.index('route_name')\n no_consume_idx = row_data.index('no_consume')\n except Exception:\n raise osv.except_osv(_('Error!'), _('please make sure the \"level_no, part_no, part_name, bom_name, quantity, route_name, no_consume\" columns in the column list.'))\n try:\n # get the 'id' column index\n id_idx = row_data.index('id')\n except Exception:\n id_idx = -1 \n try:\n sequence_idx = row_data.index('sequence')\n except Exception:\n sequence_idx = -1 \n try:\n direct_bom_find_idx = row_data.index('direct_bom_find')\n except Exception:\n direct_bom_find_idx = -1 \n \n header_idxs={'level_no_idx':level_no_idx,\n 'part_no_idx':part_no_idx,\n 'part_name_idx':part_name_idx,\n 'bom_name_idx':bom_name_idx,\n 'quantity_idx':quantity_idx,\n 'route_name_idx':route_name_idx,\n 'sequence_idx':sequence_idx,\n 'id_idx':id_idx,\n 'direct_bom_find_idx':direct_bom_find_idx,\n 'no_consume_idx':no_consume_idx}\n \n #2 find all of the rows with level_no starting with 'root_'\n root_level_prefix = 'root_'\n #root bom row range, format:{rboom1_level_no:(row_start,row_end),...rboomn_level_no:(row_start,row_end)}\n root_boms = {}\n curr_root_level = None\n curr_root_row_start = None\n for i in range(1,row_cnt):\n row_data = sheet.row_values(i);\n level_no = row_data[level_no_idx]\n #first line level_no must be a root boom\n if i == 1 and not level_no.startswith(root_level_prefix):\n raise osv.except_osv(_('Error!'), _('First row must be a root boom, the level_no should start with \"root_\"'))\n if level_no.startswith(root_level_prefix):\n if root_boms.get(level_no,False):\n raise osv.except_osv(_('Error!'), _('Root BOM level_no \"%s\" is duplicated with others'%(level_no,)))\n if curr_root_row_start:\n root_boms[curr_root_level]=(curr_root_row_start, i-1)\n curr_root_level = level_no\n curr_root_row_start = i\n if curr_root_level and not root_boms.get(curr_root_level,False):\n root_boms[curr_root_level]=(curr_root_row_start, row_cnt-1)\n #for the existing boom, only add import one root boom\n if order.mrp_bom_id and len(root_boms) > 1:\n raise osv.except_osv(_('Error!'), _('Only can import one bom to existing BOM!'))\n \n #loop the root boom to import boom\n mrp_bom_obj = self.pool.get('mrp.bom')\n new_bom_ids = []\n for rbom_level_no, rbom_row in root_boms.items():\n parsed_rows = self._parse_bom(cr, uid, sheet, rbom_level_no, rbom_row[0], rbom_row[1], header_idxs, context=context)\n #having mrp_bom_id: do top BOM updating\n bom_master = parsed_rows[rbom_level_no]\n if order.mrp_bom_id:\n #if from an existing bom ID, then close the window, and refresh parent to show the new data\n mrp_bom_obj.write(cr, uid, order.mrp_bom_id.id, bom_master, context=context) \n #for existing bomm updating, there will be only one BOOM, so return abort loop and return direct \n return {'type': 'ir.actions.act_window_close'} \n else:\n #Show the created new BOM in list view\n new_bom_id = mrp_bom_obj.create(cr, uid, bom_master, context=context)\n new_bom_ids.append(new_bom_id)\n\n #Show the created new BOM in list view\n return {\n 'domain': \"[('id', '=', %s)]\"%(new_bom_ids,),\n 'name': _('MRP BOM'),\n 'view_type':'form',\n 'view_mode':'tree,form',\n 'res_model': 'mrp.bom',\n 'type':'ir.actions.act_window',\n 'context':context,\n }", "title": "" }, { "docid": "0018a4c0a9409cdb40c15c39cdc83692", "score": "0.48974344", "text": "def load_invoice_entry_list_from_file(self):\n return self.load_item_list_from_file(\n path=self.data_path + self.invoice_entry_dir,\n ending='.flentry'\n )", "title": "" }, { "docid": "809df2cc7f91db4b4ae47a0af793d7f4", "score": "0.48940027", "text": "def submit_import(self, to_import, dryrun=False):\n cell_id = str(uuid4())\n run_id = str(uuid4())\n\n function_name = \"kb_uploadmethods.upload_metagenome_fasta_gff_file\"\n app_id = function_name.replace(\".\", \"/\")\n job_meta = {\"cell_id\": cell_id, \"run_id\": run_id}\n\n # plist is the parameter list for the batch import\n plist = []\n for sam in to_import:\n print(\"Import: %s\" % (sam.oid))\n imprt = self.mapping['Imports'][sam.type]\n iv = {\"workspace_name\": self.workspace_name}\n for k, v in imprt['default_params'].items():\n iv[k] = v\n\n for k, v in sam.params.items():\n iv[k] = v\n param = {\n \"method\": function_name,\n \"params\": [iv],\n \"app_id\": app_id,\n \"meta\": job_meta,\n }\n plist.append(param)\n\n # Batch Param.. Just the workspace id\n bp = {'wsid': self.wsid}\n if dryrun:\n return\n resp = self.ee.run_job_batch(plist, bp)\n job_id = resp['batch_id']\n _debug(cell_id)\n _debug(job_id)\n if not dryrun:\n self.add_batch_cell(to_import, cell_id, run_id, job_id)", "title": "" }, { "docid": "4db8300e6b3a989f1bcd626b8bcdd66f", "score": "0.4892109", "text": "def import_file(self, endurl, file, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/importFile/', {'endurl': endurl, 'file': file, 'apikey': apikey})))", "title": "" }, { "docid": "3cb3280c0ad66652364c334f7d12831b", "score": "0.48920113", "text": "def load(cls,loinc_list):\n\n # Open data file and read in the first (header) record\n loincs = csv.reader(open(LOINC_FILE,'U'),dialect='excel-tab')\n header = next(loincs)\n # Now, read in loinc codes:\n for loinc in loincs:\n l = dict(zip(header, loinc)) # build row dictionary of values\n if l['LOINC_NUM'] in loinc_list: # See if we're interested in this code\n cls(l) # If so, create a loinc instance and store it in Loinc.info", "title": "" }, { "docid": "eb44ddd93a71d1ea330c837dd8b00d98", "score": "0.48807117", "text": "def csv_import(filename=None):\n if not filename:\n filename = os.path.join(os.getcwd(), \"scotts.csv\")\n columns = _columns\n new_item_count, existing_item_count = 0, 0\n with open(filename, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for line in spamreader:\n if not new_item_count: # skip the row labels\n new_item_count = 1\n continue\n row = {label: item for label, item in zip(columns, line)}\n # check that item is not already in database\n existing_item = Phone.query.filter_by(MEID=row['MEID']).first()\n if existing_item:\n existing_item_count += 1\n print(\"Item exists {}\".format(row['MEID']))\n continue\n\n print(\"import: {}\".format(row))\n new_device = Phone(OEM=row['OEM'],\n MEID=row['MEID'],\n SKU=row['SKU'],\n MODEL=row['MODEL'],\n Serial_Number=row['Serial_Number'],\n Hardware_Version=row['Hardware_Version'],\n MSL=row['MSL'].strip('\"'),\n History=pickle.dumps([(row['DVT_Admin'], datetime.utcnow())]),\n Comment=row['Comment'].replace(os.linesep, ' '),\n In_Date=datefix(row['In_Date']),\n Archived=bool(row['Archived']),\n TesterId=row['TesterId'],\n DVT_Admin=row['DVT_Admin'])\n try:\n db.session.add(new_device)\n new_item_count += 1\n except Exception as e:\n print(\"ER: {}, {}\".format(e, new_device))\n\n db.session.commit()\n print(\"imported {} items\".format(new_item_count - 1))\n print(\"ignored {} existing items\".format(existing_item_count))\n return True", "title": "" }, { "docid": "99cc848d84660cd4fa079c3bee496a20", "score": "0.48798615", "text": "def import_csv(self, request):\n if request.method == \"POST\":\n uploaded_csv = request.FILES[\"access_code_csv\"]\n return_url = HttpResponseRedirect(\"..\")\n\n if not uploaded_csv.name.endswith(\".csv\"):\n messages.error(request, \"File must be a csv\")\n return return_url\n\n # Check against the maximum upload size (2.5mb by default)\n if uploaded_csv.multiple_chunks():\n messages.error(request, \"Uploaded file is too large.\")\n return return_url\n\n file_data = uploaded_csv.read().decode(\"utf-8\")\n\n lines = file_data.split(\"\\n\")\n\n skipped_codes = 0\n num_codes = 0\n\n for line_num, line in enumerate(lines):\n fields = line.split(\",\")\n num_columns = len(fields)\n # Skip any blank lines. Not an error, can just be ignored.\n if line == \"\":\n continue\n if num_columns != 2:\n messages.error(\n request,\n \"Line {line_num} has {num_columns} columns. \"\n \"Expected 2.\".format(\n line_num=line_num + 1, num_columns=num_columns\n ),\n )\n return return_url\n\n access_code = fields[0].strip()\n\n if len(access_code) > 60:\n messages.error(\n request,\n \"Access code on line {line_num} is \"\n \"too long for the database field.\".format(\n line_num=line_num + 1\n ),\n )\n return return_url\n\n try:\n partner_pk = int(fields[1].strip())\n except ValueError:\n messages.error(\n request,\n \"Second column should only contain \"\n \"numbers. Error on line {line_num}.\".format(\n line_num=line_num + 1\n ),\n )\n return return_url\n\n try:\n Partner.even_not_available.get(pk=partner_pk)\n except ObjectDoesNotExist:\n messages.error(\n request,\n \"File contains reference to invalid \"\n \"partner ID on line {line_num}\".format(line_num=line_num + 1),\n )\n return return_url\n\n # Now that we've verified all access codes are valid, let's try to\n # actually upload them.\n for line in lines:\n if line == \"\":\n continue\n fields = line.split(\",\")\n access_code = fields[0].strip()\n partner_pk = int(fields[1].strip())\n\n # Only upload this code if it doesn't already exist. If it does,\n # increment a counter so we can report that.\n access_code_partner_check = AccessCode.objects.filter(\n code=access_code, partner=partner_pk\n ).count()\n if access_code_partner_check != 0:\n skipped_codes += 1\n else:\n new_access_code = AccessCode()\n new_access_code.code = access_code\n new_access_code.partner = Partner.even_not_available.get(\n pk=partner_pk\n )\n new_access_code.save()\n num_codes += 1\n\n if num_codes > 0:\n messages.info(\n request,\n \"{num_codes} access codes successfully \"\n \"uploaded!\".format(num_codes=num_codes),\n )\n if skipped_codes > 0:\n messages.info(\n request,\n \"{num_duplicates} access codes ignored \"\n \"as duplicates.\".format(num_duplicates=skipped_codes),\n )\n return HttpResponseRedirect(\"admin\")\n return render(request, \"resources/csv_form.html\")", "title": "" }, { "docid": "7cf9bdec93aa9b1b81821d511599110b", "score": "0.4879056", "text": "def test_repeat_import(device_association_list_importer, logger, mocked_statsd, db_conn, metadata_db_conn,\n mocked_config, tmpdir):\n expect_success(device_association_list_importer, 5, db_conn, logger)\n\n # importing same file\n with get_importer(DeviceAssociationListImporter,\n db_conn,\n metadata_db_conn,\n mocked_config.db_config,\n tmpdir,\n logger,\n mocked_statsd,\n DeviceAssociationListParams(filename='sample_associationlist.csv')) as imp:\n expect_success(imp, 5, db_conn, logger)", "title": "" }, { "docid": "8ed5142a8b95c26aa72d2830a14c04c3", "score": "0.4872855", "text": "def importFile(file):", "title": "" }, { "docid": "998581ac9bec4fb2161373e08effe160", "score": "0.4868441", "text": "def import_ldb_file(self, filename):\n fo = open(filename, \"r\")\n lines = fo.readlines()\n single_part = []\n for line in lines:\n if line[0] == '!':\n if single_part:\n part = SwdesPart(\n single_part[0], \n single_part[1], \n single_part[2:])\n if part.parameter == 'Q':\n # simple: we only store Qs\n if not self.data.has_key(part.location):\n self.data[part.location] = {}\n self.data[part.location].update(part.data)\n if not self.start_date or part.start_date < self.start_date:\n self.start_date = part.start_date\n if not self.end_date or part.end_date > self.end_date:\n self.end_date = part.end_date\n \n # Ignore and reset\n single_part = []\n else:\n single_part.append(line.strip())\n\n fo.close()", "title": "" }, { "docid": "ce0b9f3e90bcdfe5f45e1f89802a280d", "score": "0.48619184", "text": "def FileImport(self,pFilename,pMatchModels,pCreateUnmatchedModels):\n pass", "title": "" }, { "docid": "d2ae0cfc655c968e50b0cfd1daff1349", "score": "0.48598197", "text": "def test_csv_import_4(self):\n self.assertEqual(Part.query.count(), 0)\n csv = io.StringIO(\"name,barcode\\nhello,world\\nfoo,\\n,456\\n,433,\\nfoo,bar\\n\")\n Part._import_csv_content(csv)\n self.assertEqual(Part.query.count(), 2)", "title": "" }, { "docid": "7f1d7a23e809b9e82ff63510bc245f31", "score": "0.48589978", "text": "def run_import(csv_input_file, db_host, db_port, db_name, db_collection):\n\n print('Started CSV import - {0}'.format(datetime.now()))\n\n collection = utils.get_collection(db_host, db_port, db_name, db_collection)\n\n with open(csv_input_file, encoding='utf-8', errors='ignore') as csv_file:\n reader = csv.DictReader(csv_file)\n\n for json_obj in create_json(reader):\n collection.insert_one(json_obj)\n\n print('Finished: CSV import - {0}'.format(datetime.now()))", "title": "" }, { "docid": "8d8712b08ad96e937c8cc7b2f10758f7", "score": "0.4850952", "text": "def read_data_from_file(file_name_input, list_of_rows):\r\n list_of_rows.clear() # clear current data\r\n reader = open(file_name_input, \"r\")\r\n for line in reader:\r\n task_input, priority_input = line.split(\",\")\r\n row = {\"Task\": task_input.strip(), \"Priority\": priority_input.strip()}\r\n list_of_rows.append(row)\r\n reader.close()\r\n return list_of_rows, 'Success'", "title": "" }, { "docid": "a34ac727253936bc62d955e9c83684e8", "score": "0.48463002", "text": "def lists_bulk_lookup(T, infile, outfile, hide_progress, **kwargs):\n\n kwargs = _process_expansions_shortcuts(kwargs)\n\n with FileLineProgressBar(infile, outfile, disable=hide_progress) as progress:\n for list_id in infile:\n progress.update()\n\n if \"https\" in list_id:\n list_id = list_id.split(\"/\")[-1]\n if not re.match(\"^\\d+$\", list_id):\n click.echo(\n click.style(\"Skipping invalid List URL or ID: {line}\", fg=\"red\"),\n err=True,\n )\n continue\n result = T.list_lookup(list_id.strip(), **kwargs)\n _write(result, outfile)", "title": "" }, { "docid": "363fbd6b8c71a5afa808fda9437b81c3", "score": "0.48213428", "text": "def test_csv_re_import(self):\n self.assertEqual(Part.query.count(), 0)\n csv1 = io.StringIO(\"name,barcode\\nhello,world\\nfoo,bar\\n123,456\\n\")\n Part._import_csv_content(csv1)\n self.assertEqual(Part.query.count(), 3)\n csv2 = io.StringIO(\"name,barcode\\nhello,world\\nfoo,bar\\n\")\n Part._import_csv_content(csv2)\n self.assertEqual(Part.query.filter(Part.hidden == False).count(), 2)\n self.assertEqual(Part.query.count(), 3)", "title": "" }, { "docid": "01d60b38e9167223745a16aafad635b8", "score": "0.4817864", "text": "def prepare_project_import(self, f):\n val = self._perform_json_upload(\n \"POST\", \"/projects/import/upload\",\n \"tmp-import.zip\", f)\n return TemporaryImportHandle(self, val.json()[\"id\"])", "title": "" }, { "docid": "8b1b4132d342c246dcd0c2e01e0e2661", "score": "0.48142952", "text": "def __importData(self): \n\t\t\n\t\tfor aIdxRow in xrange(self.__sheetNutriments.nrows):\n\t\t\tif aIdxRow > 3 : # Pull title rows\n\t\t\t\taTestNum = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"numero\"]).value\n\t\t\t\ttry: # Test to avoid other titles\n\t\t\t\t\tint(aTestNum)\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\taIngredient = Ingredient()\n\t\t\t\t\n\t\t\t\t# Set of each nutriment\n\t\t\t\taIngredient.numero = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"numero\"]).value\n\t\t\t\taIngredient.nom = (self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"nom\"]).value).encode('utf-8')\n\t\t\t\taIngredient.calories = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"calories\"]).value\n\t\t\t\taIngredient.carbo = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"carbo\"]).value\n\t\t\t\taIngredient.proteine = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"proteine\"]).value\n\t\t\t\taIngredient.lipide = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"lipide\"]).value\n\t\t\t\taIngredient.fibre = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"fibre\"]).value\n\t\t\t\taIngredient.sodium = self.__sheetNutriments.cell(rowx=aIdxRow,colx=self.__valuesColsNutriments[\"sodium\"]).value\n\t\t\t\t\n\t\t\t\t# Append to the imported data list\n\t\t\t\tself.__ingredients.append(aIngredient)\n\t\t\t\t\n\t\tfor aIdxRow in xrange(self.__sheetGrease.nrows):\n\t\t\tif aIdxRow > 3 : # Go to the beginning\n\t\t\t\taTestNum = self.__sheetGrease.cell(rowx=aIdxRow,colx=self.__valuesColsGrease[\"numero\"]).value\n\t\t\t\ttry:\n\t\t\t\t\tint(aTestNum) # Test to avoid Excel titles\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\t\taNumSature = self.__sheetGrease.cell(rowx=aIdxRow,colx=self.__valuesColsGrease[\"numero\"]).value\n\t\t\t\tself.__ingredients[int(aNumSature)-1].sature = self.__sheetGrease.cell(rowx=aIdxRow,colx=self.__valuesColsGrease[\"sature\"]).value\n\t\t\n\t\taDataFile = open(self.__dataFilePath, 'w')\n\t\t\n\t\tpickle.dump(self.__ingredients, aDataFile)\n\t\t\n\t\taDataFile.close()", "title": "" }, { "docid": "0d8edca4405328e584f79d600fb522f1", "score": "0.48058257", "text": "def import_file(self, filepath, table_name=None, schema=None, delimiter=',', truncate=False):\n\n try:\n if is_excel(filepath):\n self.import_excel(filepath, table_name, schema, truncate)\n else:\n if not table_name:\n (table_name, ext) = path.splitext(path.basename(filepath))\n\n if not self.table_exists(table_name, schema):\n columns = generate_schema_from_csv(filepath, delimiter)\n self.create_table(table_name, columns, schema=schema)\n\n table = sqlalchemy.Table(table_name, self.metadata, autoload=True, schema=schema)\n if truncate:\n self.engine.execute(table.delete())\n\n if filepath.endswith('.xlsx'):\n rows = self.store_xlsx(filepath, table)\n else:\n rows = self.store_data(filepath, table, delimiter)\n return rows\n\n except:\n logger.exception(\"Had some trouble storing %s\" % filepath)", "title": "" }, { "docid": "3c27fce29b83bc8874457e393db5f7ee", "score": "0.48018527", "text": "def _insert_filedata(self, filedata_list, member):\n ph_path = '/assets/img/%s' % member.name[len(self.base) + 1:]\n path = self.fs.physical_to_logical(ph_path)\n\n if self.fs.isfile(path):\n self.journal.append('Updating file \\'%s\\'' % ph_path)\n else:\n self.journal.append('Inserting file \\'%s\\'' % ph_path)\n\n if member.size > MAX_ASSET_UPLOAD_SIZE_K * 1024:\n raise BadImportException(\n 'Cannot upload files bigger than %s K' %\n MAX_ASSET_UPLOAD_SIZE_K)\n\n if self.dry_run:\n return\n\n filedata_list.append((path, self.archive.extractfile(member)))", "title": "" }, { "docid": "0ad25b757fdebc91fbe42899c59a8016", "score": "0.47938004", "text": "def process_data(self, list_data, file_path_object):", "title": "" }, { "docid": "7d344a000cc0ee402446ece1fe5159ab", "score": "0.47837532", "text": "def _bulk_import_reports(self, reports):\n self.db.engine.execute(CrimeReport.__table__.insert(), reports)", "title": "" }, { "docid": "edb21dab523ec31dd25220e47a492503", "score": "0.4776904", "text": "def import_users(self, users, hash_alg=None):\n try:\n if not users or len(users) > MAX_IMPORT_USERS_SIZE:\n raise ValueError(\n 'Users must be a non-empty list with no more than {0} elements.'.format(\n MAX_IMPORT_USERS_SIZE))\n if any([not isinstance(u, _user_import.ImportUserRecord) for u in users]):\n raise ValueError('One or more user objects are invalid.')\n except TypeError:\n raise ValueError('users must be iterable')\n\n payload = {'users': [u.to_dict() for u in users]}\n if any(['passwordHash' in u for u in payload['users']]):\n if not isinstance(hash_alg, _user_import.UserImportHash):\n raise ValueError('A UserImportHash is required to import users with passwords.')\n payload.update(hash_alg.to_dict())\n body, http_resp = self._make_request('post', '/accounts:batchCreate', json=payload)\n if not isinstance(body, dict):\n raise _auth_utils.UnexpectedResponseError(\n 'Failed to import users.', http_response=http_resp)\n return body", "title": "" }, { "docid": "5d4c94be94b872b785b8a35deb589e2d", "score": "0.4770939", "text": "def process_upload(typo, site_set, file_list):\n out_file = typo[3]\n for site in site_set: \n new_data = typo[0](site, file_list,typo[1]())\n\n #see what is in site set\n print(\"\\nFor table \" + typo[2] + \":\")\n\n #loop over the site set\n full_file = None\n for site in site_set:\n print(\"processing \" + site)\n site_file = typo[0](site,file_list,typo[1]())\n if not(site_file is None):\n if full_file is None:\n full_file = site_file\n else:\n full_file = full_file.append(site_file)\n else:\n print(\"No file detected for \"+site) \n\n #output file if possible\n if not(full_file is None):\n full_file['Hour'] = pd.DatetimeIndex(full_file['Time']).hour # adding an hour and date column\n full_file['Date'] = pd.DatetimeIndex(full_file['Time']).date\n full_file.to_csv(out_file, index = False)\n bulk_upload(data_path, domain, database, typo[2], out_file, user, password)\n else:\n print(\"No cleaned data to upload. If data was expected, please check that names match\")", "title": "" }, { "docid": "22464a14a04cef3bcb7eddbdb18286aa", "score": "0.47675395", "text": "def import_inventory(inventory, filename):\n pass", "title": "" }, { "docid": "212872df2469af7f73a553619ddee79a", "score": "0.47636592", "text": "def test_csv_import_3(self):\n self.assertEqual(Part.query.count(), 0)\n csv_content = \"name,barcode\\nhello,world\\nfoo,bar\\n123,456\\n\"\n Part._import_csv_content(io.StringIO(csv_content))\n self.assertEqual(Part.query.count(), 3)\n csv_content = \"name,barcode\\nfoo,bar\\n123,456\\nfoo2,bar2\\n324,561\\n\"\n Part._import_csv_content(io.StringIO(csv_content))\n self.assertEqual(Part.query.filter(Part.hidden == False).count(), 4)", "title": "" }, { "docid": "fa9fbf4ef5e6af69ec94780dd71359b4", "score": "0.476206", "text": "def insertData(tableName, dbFileName, dataList):\n conn = sqlite3.connect(dbFileName)\n try:\n cols = len(dataList[0])\n query = f\"insert into {tableName} values ({'?, '*(cols - 1)} ?)\"\n for row in dataList:\n try:\n with conn:\n conn.execute(query, row)\n except sqlite3.IntegrityError as e:\n print ('Error code: ', e)\n conn.close()\n except Exception as e:\n print('Неправильный список!!', e)", "title": "" }, { "docid": "5b898a045a42d05ee3e99f99234418e5", "score": "0.4760199", "text": "def handle_import_data(self, data):\n for entry in data:\n entry['value'] = entry['value'] if type(entry['value']) is str else json_tricks.dumps(entry['value'])\n entry['value'] = json_tricks.loads(entry['value'])\n self.tuner.import_data(data)", "title": "" }, { "docid": "8b028e45d481836d8ecc74c66221e83e", "score": "0.47580254", "text": "def loadSet(self, list, data):\n for line in list:\n line = line.split('#', 1)[0].strip()\n if line:\n item = line.split(None, 1) + [''] # add value if blank\n data[item[0]] = item[1].strip()", "title": "" }, { "docid": "d7588866910b282dc0d558c0b990cca6", "score": "0.47522423", "text": "def handle(self, *args, **kwargs):\n wishes = []\n with open(kwargs['path'], encoding=\"utf8\") as csv_file:\n wish_rows = list(csv.DictReader(csv_file))\n for row in tqdm(wish_rows, total=len(wish_rows)):\n wish = Wishlist.objects.create(\n supplier=row.get('SUPPLIER', row['SUPPLIER']), trade_name=row['TRADE_NAME'], language=row['LANGUAGE']\n )\n # if created:\n wishes.append(wish)\n\n Wishlist.objects.bulk_create(wishes)\n print('csv_file_path: ', kwargs['path'])", "title": "" }, { "docid": "6eb5520339cdbcef49083e1324021d29", "score": "0.4739044", "text": "def __init__(self, configuration, start_index=0, chunk_size=0):\n\n assert start_index >= 0\n assert chunk_size >= 0\n\n self.configuration = configuration\n # list that stores entity objects\n self.list = []\n # session for data retrieval\n self.session = requests.Session()\n # index of first element to import from input_file (default: 0)\n self.start_index = start_index\n # number of elements to import from input_file (default: 0, meaning max.)\n self.chunk_size = chunk_size", "title": "" }, { "docid": "4ae36b38d4e4feb2b661ad39570b4eca", "score": "0.4735238", "text": "def _upload_listing_to_postgres(postgres_conn_id: str,\n schema: str = 'raw',\n **kwargs) -> None:\n df = kwargs['ti'].xcom_pull(task_ids='load_from_file')\n execution_date = kwargs['execution_date'].strftime('%Y%m')\n table_name = f'airbnb_listing_{execution_date}'\n\n pg_hook = PostgresHook(postgres_conn_id=postgres_conn_id)\n logging.info(f'pg_hook: {pg_hook}')\n logging.info(f'postgres_conn_id: {postgres_conn_id}')\n conn_pg_hook = pg_hook.get_conn()\n\n # Create table\n query_create = f\"\"\"\n CREATE TABLE {schema}.{table_name} ( \n id BIGINT, \n \texecution_date TIMESTAMP WITHOUT TIME ZONE,\n listing_url TEXT, \n scrape_id BIGINT, \n last_scraped TEXT, \n name TEXT, \n description TEXT, \n neighborhood_overview TEXT, \n picture_url TEXT, \n host_id BIGINT, \n host_url TEXT, \n host_name TEXT, \n host_since TEXT, \n host_location TEXT, \n host_about TEXT, \n host_response_time TEXT, \n host_response_rate TEXT, \n host_acceptance_rate TEXT, \n host_is_superhost TEXT, \n host_thumbnail_url TEXT, \n host_picture_url TEXT, \n host_neighbourhood TEXT, \n host_listings_count FLOAT(53), \n host_total_listings_count FLOAT(53), \n host_verifications TEXT, \n host_has_profile_pic TEXT, \n host_identity_verified TEXT, \n neighbourhood TEXT, \n neighbourhood_cleansed TEXT, \n neighbourhood_group_cleansed FLOAT(53), \n latitude FLOAT(53), \n longitude FLOAT(53), \n property_type TEXT, \n room_type TEXT, \n accommodates BIGINT, \n bathrooms FLOAT(53), \n bathrooms_text TEXT, \n bedrooms FLOAT(53), \n beds FLOAT(53), \n amenities TEXT, \n price TEXT, \n minimum_nights BIGINT, \n maximum_nights BIGINT, \n minimum_minimum_nights BIGINT, \n maximum_minimum_nights BIGINT, \n minimum_maximum_nights BIGINT, \n maximum_maximum_nights BIGINT, \n minimum_nights_avg_ntm FLOAT(53), \n maximum_nights_avg_ntm FLOAT(53), \n calendar_updated FLOAT(53), \n has_availability TEXT, \n availability_30 BIGINT, \n availability_60 BIGINT, \n availability_90 BIGINT, \n availability_365 BIGINT, \n calendar_last_scraped TEXT, \n number_of_reviews BIGINT, \n number_of_reviews_ltm BIGINT, \n number_of_reviews_l30d BIGINT, \n first_review TEXT, \n last_review TEXT, \n review_scores_rating FLOAT(53), \n review_scores_accuracy FLOAT(53), \n review_scores_cleanliness FLOAT(53), \n review_scores_checkin FLOAT(53), \n review_scores_communication FLOAT(53), \n review_scores_location FLOAT(53), \n review_scores_value FLOAT(53), \n license FLOAT(53), \n instant_bookable TEXT, \n calculated_host_listings_count BIGINT, \n calculated_host_listings_count_entire_homes BIGINT, \n calculated_host_listings_count_private_rooms BIGINT, \n calculated_host_listings_count_shared_rooms BIGINT, \n reviews_per_month FLOAT(53),\n PRIMARY KEY(id, execution_date)\n )\n \"\"\"\n # Create the table\n conn_string = get_connection_string(schema=schema, db_name='airflow')\n engine = sa.create_engine(conn_string)\n engine.connect().execute(query_create)\n\n # Insert the values\n values = df.to_dict('split')['data']\n columns = stringify_columns(df.columns)\n query_insert = f\"\"\"\n INSERT INTO {schema}.{table_name} ({columns}) VALUES %s\n \"\"\"\n result = execute_values(conn_pg_hook.cursor(),\n query_insert,\n values,\n page_size=len(df))\n conn_pg_hook.commit()", "title": "" }, { "docid": "fc67503dce11256034b127738197860a", "score": "0.47277293", "text": "def update_file(origin_path, new_list):\n with open(origin_path, 'w', newline='') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(new_list)\n print('\\n Employees csv file has been updated.')", "title": "" }, { "docid": "f66fafd9c5ee896769022facd083d49e", "score": "0.47246623", "text": "def __call__(self, importer, file, imported_entries, existing_entries):\n raise NotImplementedError", "title": "" }, { "docid": "b9d0ef666cb883408cb7935ee4cc2311", "score": "0.47211596", "text": "def importFileset(self, fs, settings, current=None):\n pass", "title": "" }, { "docid": "82027480c0fb0e7fce7071d0ae67bee0", "score": "0.46987247", "text": "def populate_my_db_list(db_file, db_list):\n with open(db_file, \"r\") as fh:\n for line in fh:\n db_list.append(line)\n return sorted(db_list)", "title": "" }, { "docid": "c1db488429a9f8d25789c55b8e623824", "score": "0.46928424", "text": "def process():\n if request.method == 'POST':\n files = request.form.getlist('files')\n if files:\n try:\n # Get the platform slug a.k.a module name\n slug = Platform.query.get_or_404(\n session['import']['platform']).slug\n # Import the module\n mod = import_module(\n \".indexing\", 'app.importer.platforms.' + slug)\n # Indexing function\n index_function = 'index_bulk' # 'index'\n # get a reference to the init function\n init = getattr(mod, index_function)\n # create a threaded job to index uploaded data according to\n # it's platform\n\n thread = Thread(target=init, args=[files, session['import']])\n # thread.daemon = True\n thread.start()\n # thread.join(1)\n # print(active_count())\n except:\n raise\n else:\n session['import']['files'] = []\n session.modified = True\n flash('Please upload some files before getting here', 'success')\n return redirect(url_for('importer.records'))\n\n return render_template('importer/porcess.html')", "title": "" }, { "docid": "3f97e554e14fa6b31da3b816d5e4153f", "score": "0.46915397", "text": "def import_audit(data, imports):\n if not isinstance(data, GoLismeroAuditData):\n raise TypeError(\"Expected GoLismeroAuditData, got '%s' instead\" % type(data))\n\n config = data.to_json_console\n print config\n\n # Set command\n config[\"command\"] = \"IMPORT\"\n # Set BBDD store location\n config[\"audit_db\"] = \"%s.db\" % join(data.store_path,config['audit_name'])\n\n # Config the plu\n config[\"enable_plugins\"] += \",import\" # Add import plugins\n config[\"disable_plugins\"] = ['all']\n\n print \"-\" * 90\n print config\n # Config the file imports\n config[\"imports\"] = imports\n\n if not BRIDGE.SIMULATE:\n try:\n BRIDGE.RPC.call(\"audit/create\", config)\n except Exception,e:\n raise ExceptionAudit(e)", "title": "" }, { "docid": "c9c63e62d36990c2831b53f7b65f11ea", "score": "0.46859044", "text": "def parseFile(filename):\n # Return list\n tmp_list = []\n c = 0\n\n reqs = [['cli_ref'],['client_id'],['name'],['desk'],\n ['prefix_1'],['phone_no_1'],['extenstion_1'],\n ['invoice_no'],['invoice_amount'],['invoice_due'],['invoice_date'],['invoice_due_date'],\n ['prefix_2'],['phone_no_2'],['extenstion_2'],\n ['full_name'],['contct'],['address'],['email'],['contry'],['timezone']]\n # Parse the trust file\n rows = csv.reader(codecs.open(filename, 'rU', 'utf-16'), delimiter=\",\")\n\n header = rows.__next__()\n h = flexibleHeaderMap(header, reqs)\n\n for l in rows:\n c += 1\n if len(l[h['cli_ref']]) > 1:\n # Required fields\n cli_ref = l[h['cli_ref']]\n client_id = l[h['client_id']]\n name = l[h['name']]\n desk = l[h['desk']]\n prefix_1 = l[h['prefix_1']]\n phone_no_1 = l[h['phone_no_1']]\n invoice_no = l[h['invoice_no']]\n invoice_amount = float(l[h['invoice_amount']])\n invoice_due = float(l[h['invoice_due']])\n invoice_date = l[h['invoice_date']]\n\n if 'extenstion_1' in h:\n extenstion_1 = l[h['extenstion_1']]\n else:\n extenstion_1 = ' '\n\n if 'invoice_due_date' in h:\n invoice_due_date = l[h['invoice_due_date']]\n else:\n invoice_due_date = invoice_date\n\n if 'prefix_2' in h:\n prefix_2 = l[h['prefix_2']]\n else:\n prefix_2 = ' '\n\n if 'phone_no_2' in h:\n phone_no_2 = l[h['phone_no_2']]\n else:\n phone_no_2 = ' '\n\n if 'extenstion_2' in h:\n extenstion_2 = l[h['extenstion_2']]\n else:\n extenstion_2 = ' '\n\n if 'full_name' in h:\n full_name = l[h['full_name']]\n else:\n full_name = ' '\n\n if 'contct' in h:\n contct = l[h['contct']]\n else:\n contct = ' '\n\n if 'address' in h:\n address = l[h['address']]\n else:\n address = ' '\n\n if 'email' in h:\n email = l[h['email']]\n else:\n email = ' '\n\n if 'contry' in h:\n contry = l[h['contry']]\n else:\n contry = ' '\n\n if 'timezone' in h:\n timezone = l[h['timezone']]\n else:\n timezone = ' '\n\n tmp_list.append([cli_ref, client_id, name, desk, prefix_1, phone_no_1, extenstion_1, prefix_2, phone_no_2, extenstion_2, invoice_no, invoice_amount, invoice_due, invoice_date, invoice_due_date, full_name, contct, address, email, contry, timezone])\n\n return c, tmp_list", "title": "" }, { "docid": "1eb28a41c8bc0a22c52ad4e2e3e3bf23", "score": "0.4681935", "text": "def yp_get_competitors(self, business_list):\n file_path = fp.yp_raw_competitors(self.data_path)\n index_list = []\n existing_list = [] \n \"\"\"\n if os.path.exists(file_path):\n with open(file_path, 'r') as f:\n current_file = f.readlines()\n if len(current_file) > 0:\n existing_list = json.loads(current_file[0])\n index_list = [_business[\"alias\"] for _business in existing_list]\n logger.info(f\"existing file found: {len(index_list)} total entries\")\n \"\"\"\n with open(file_path, 'w') as f:\n # find businesses\n for business in business_list:\n new_list = []\n \n try:\n logger.info(f\"import started for : {business}\")\n branch = self.yelp_api.business_query(business)\n offset = 0\n while(True):\n try:\n # status scheme available at: # https://www.yelp.com/developers/documentation/v3/business_search\n competitors = self.yelp_api.search_query(\n longitude=branch['coordinates']['longitude'],\n latitude=branch['coordinates']['latitude'],\n radius=40000,\n # categories='bars,french'\n sort_by='distance',\n limit=50,\n offset=offset)\n \n # add alias name for distance measurement as dist_to_alias\n businesses = competitors[\"businesses\"]\n [i.update({\"dist_to_alias\": business}) for i in businesses] \n\n for i in businesses:\n if i['alias'] not in index_list:\n new_list.append(i)\n index_list.append(i['alias'])\n \n offset = offset + 50\n except self.yelp_api.YelpAPIError:\n break\n \n finally:\n existing_list.extend(new_list)\n logger.info(f\"import completed. existing: {len(existing_list)} new: {len(new_list)}\")\n \n # saving into file\n json.dump(existing_list, f)", "title": "" }, { "docid": "6f95a53e69aaec77581d1e7f5f5d4fbb", "score": "0.4672903", "text": "def test_process_import_file(self):\n notify_path = \"generic_import/FBA-OD-generic.xlsx.ws-ready-payload.json\"\n ready_msg = 'Your file \"FBA-OD-generic.xlsx\" is ready to import'\n self._test_successful_processing(\n 13, CONTEXT_PATH, SERIES_PATH, notify_path, ready_msg, page_count=2\n )", "title": "" }, { "docid": "40d1c4dfbfdf8f06b9f04f1917f27a85", "score": "0.46722814", "text": "def add_words(list, filename):\n\n with open(filename, 'r') as f:\n for line in f:\n for word in line.split():\n list.insert(word)", "title": "" }, { "docid": "c197919bd410fefcff12eb7235ccb3d9", "score": "0.4672102", "text": "def loadcsv(cursor, table, filename):\n GenerateAllowedCSVColumns()\n c = open(filename)\n f = csv.reader(c)\n c.seek(0)\n header = f.next()\n\n # numfields = len(header)\n\n query = buildInsertCmd(table)\n\n for line in f:\n vals = nullify(sanitize(line))\n cursor.execute(query, vals)\n\n return", "title": "" }, { "docid": "ec81122d2ccb7fa651c68a1a862a83fc", "score": "0.46687734", "text": "def import_file():\n temp_output = \"\"\n with open(\"week8-10final/text_files/customer_export.txt\") as text_file:\n data = text_file.read()\n data = data.replace(\"#\", \"\")\n data = data.replace(\"|\", \",\")\n temp_output = temp_output + data\n with open(\"week8-10final/docs/customer_export.txt\", \"w\") as file_output:\n file_output.write(temp_output)\n \"\"\"csv backup area. will create backups if the file already exists \"\"\"\n if os.path.isfile(\"week8-10final/docs/customers.csv\"):\n shutil.copy2(\"week8-10final/docs/customers.csv\", \"week8-10final/docs/customers.csv.backup\" + str(time.time()))\n with open(\"week8-10final/docs/customers.csv\", \"w\") as file_output:\n file_output.write(temp_output)\n\n \"\"\"reads customer_export.txt, and formats the data into a json format \"\"\"\n with open('week8-10final/docs/customer_export.txt', 'r') as csvfile:\n filereader = csv.reader(csvfile, delimiter=',')\n data_list = list()\n for row in filereader:\n data_list.append(row)\n data = [dict(zip(data_list[0],row)) for row in data_list]\n data.pop(0)\n \"\"\"json backup area. will create backups if the file already exists \"\"\"\n if os.path.isfile(\"week8-10final/docs/customers.json\"):\n shutil.copy2(\"week8-10final/docs/customers.json\", \"week8-10final/docs/customers.json.backup\" + str(time.time()))\n with open(\"week8-10final/docs/customers.json\", \"w\") as file_output:\n json.dump(data, file_output)\n\n \"\"\"reads bad characters in customers.csv, and formats the data for use in sql insert statements \"\"\"\n with open('week8-10final/docs/customers.csv', 'r') as csvfile:\n my_db.executeQuery(\"TRUNCATE TABLE crm_data;\")\n my_db.executeQuery(\"TRUNCATE TABLE mailings;\")\n csv_data = csv.reader(csvfile)\n next(csv_data)\n for row in csv_data:\n insert_crm_statement = ('INSERT INTO crm_data (f_name,l_name,company,address,city,state,zip,primary_phone,secondary_phone,email_address) VALUES (\\\"' + str(row[0]) + \" \\\",\\\"\" + str(row[1]) + \"\\\" ,\\\" \" + str(row[2]) + \"\\\",\\\"\" + str(row[3]) + \" \\\",\\\"\" + str(row[4]) + \"\\\",\\\"\" + str(row[6]) + \"\\\",\\\"\" + str(row[7]) + \" \\\",\\\"\"+ str(row[8]) + \"\\\",\\\"\" + str(row[9]) + \"\\\" ,\\\"\" + str(row[10]) + \"\\\");\")\n insert_mailings_statement = ('INSERT INTO mailings(name,company,address) VALUES (\\\"' + str(row[0] + \" \" + row[1]) + \"\\\" ,\\\" \" + str(row[2]) + \"\\\" ,\\\"\" + str(row[3]) + \"\\\");\")\n my_db.executeQuery(insert_crm_statement)\n my_db.executeQuery(insert_mailings_statement)\n my_db.conn.commit()\n \n print(\"IMPORT COMPLETE\")", "title": "" }, { "docid": "f30cf20fb2c844f2fc7bf298e6f460ec", "score": "0.46638638", "text": "def import_file(\n self,\n record: str,\n field: str,\n file_name: str,\n file_object: \"TextIOWrapper\",\n event: Optional[str] = None,\n repeat_instance: Optional[Union[int, str]] = None,\n ) -> EmptyJson:\n self._check_file_field(field)\n # load up payload\n payload: Dict[str, Any] = self._initialize_payload(content=\"file\")\n payload[\"action\"] = \"import\"\n payload[\"field\"] = field\n payload[\"record\"] = record\n if event:\n payload[\"event\"] = event\n if repeat_instance:\n payload[\"repeat_instance\"] = repeat_instance\n file_upload_dict: FileUpload = {\"file\": (file_name, file_object)}\n\n return cast(\n EmptyJson,\n self._call_api(\n payload=payload, return_type=\"empty_json\", file=file_upload_dict\n ),\n )", "title": "" } ]
64445d11317cada3dd6527bb5d7d08db
Change the theme of the UI. Settings are saved in settings,prop `Press Ctrl + D`
[ { "docid": "82e3f70bf59b9b6b496daf00c2ccbbb8", "score": "0.75060755", "text": "def changeTheme(self):\n dump(not dark, \"settings.prop\")\n self.__init__()", "title": "" } ]
[ { "docid": "c51fe62d907454b4e0ecd0cc6a0ca4b7", "score": "0.7390407", "text": "def themes_button_pushed():\n theme = self.settings.value(\"theme\", \"dark\")\n if theme == \"dark\":\n theme = \"light\"\n self.settings.setValue(\"theme\", theme)\n elif theme == \"light\":\n theme = \"dark\"\n self.settings.setValue(\"theme\", theme)\n else: \n self.settings.setValue(\"theme\", \"light\")\n self.setStyleSheet(open(\"style/\" + theme + \"theme.css\").read())\n self.add_path_dialog.setStyleSheet(open(\"style/\" + theme + \"theme.css\").read())", "title": "" }, { "docid": "cb1bde5d8084426fc53ccb1c3d744da6", "score": "0.7364013", "text": "def change_ttk_theme(event):\n\n if style.theme.name == 'litera':\n new = 'darkly'\n else:\n new = 'litera'\n style.theme_use(new)", "title": "" }, { "docid": "364b432cfc02daea9a96f943a71c1e84", "score": "0.73118764", "text": "def setTheme(self,theme):\n pass", "title": "" }, { "docid": "eb8bb7de96b27941c8e44c845eeff318", "score": "0.71936226", "text": "def change_theme(self, *args):\n new_theme = self.theme.get()\n self.theme_use(new_theme)\n # Redefine Entry layout to permit fieldbackground color change\n if new_theme in (\"vista\", \"xpnative\"):\n try:\n self.element_create(\"clam.field\", \"from\", \"clam\")\n except TclError:\n pass\n self.layout(\n \"Box.TEntry\",\n [\n (\n \"Entry.clam.field\",\n {\n \"sticky\": \"nswe\",\n \"border\": \"1\",\n \"children\": [\n (\n \"Entry.padding\",\n {\n \"sticky\": \"nswe\",\n \"children\": [\n (\"Entry.textarea\", {\"sticky\": \"nswe\"})\n ],\n },\n )\n ],\n },\n )\n ],\n )\n self.configure(\n \"Box.TEntry\",\n bordercolor=\"grey\",\n background=\"grey\",\n foreground=\"black\",\n fieldbackground=\"white\",\n )\n if new_theme == \"vista\":\n self.map(\n \"Box.TEntry\",\n bordercolor=[\n (\"hover\", \"!focus\", \"!disabled\", \"black\"),\n (\"focus\", \"dodger blue\"),\n ],\n background=[\n (\"hover\", \"!focus\", \"!disabled\", \"black\"),\n (\"focus\", \"dodger blue\"),\n ],\n )\n # Define style for boxes with highlighted digits or digits' conflicts\n self.configure(\"Box.TEntry\", fieldbackground=\"white\")\n self.map(\"Box.TEntry\", fieldbackground=[(\"disabled\", \"light grey\")])\n self.configure(\"HighlightArea.Box.TEntry\", fieldbackground=\"light cyan\")\n self.map(\n \"HighlightArea.Box.TEntry\",\n fieldbackground=[(\"disabled\", \"light steel blue\")],\n )\n self.configure(\n \"HighlightBox.HighlightArea.Box.TEntry\", foreground=\"midnight blue\"\n )\n self.map(\n \"HighlightBox.HighlightArea.Box.TEntry\", foreground=[(\"disabled\", \"blue\")]\n )\n self.configure(\"ErrorArea.Box.TEntry\", fieldbackground=\"khaki\")\n self.map(\"ErrorArea.Box.TEntry\", fieldbackground=[(\"disabled\", \"dark khaki\")])\n self.configure(\"ErrorBox.ErrorArea.Box.TEntry\", foreground=\"red\")\n self.map(\"ErrorBox.ErrorArea.Box.TEntry\", foreground=[(\"disabled\", \"red\")])", "title": "" }, { "docid": "d3bb839c92538af0208fcb3c7d378f6d", "score": "0.7140083", "text": "def change_theme(self):\n global THEME\n if THEME == dark_blue:\n THEME = default\n else:\n THEME = dark_blue\n self.set_theme()\n\n # Next-Button is by default deactivated following activates it\n if len(PLAYERS) == self.ui.spinBox_Quantity.value():\n self.ui.pushButton_Next.setStyleSheet(THEME.mainButton_Style)", "title": "" }, { "docid": "774012775181f58900b5b2a71c5f41b9", "score": "0.6786582", "text": "def set_theme(self):\n self.ui.frame_Background.setStyleSheet(THEME.windowBackground_Style)\n self.ui.frame_Close.setStyleSheet(THEME.transparentBackground)\n\n self.ui.frame_NextScreen.setStyleSheet(THEME.transparentBackground)\n self.ui.pushButton_Start.setStyleSheet(THEME.mainButton_Style)\n self.ui.pushButton_Next.setStyleSheet(THEME.passiveButton_Style)\n\n self.ui.label_Title.setStyleSheet(THEME.titelLabel_Style)\n self.ui.label_Subtitle.setStyleSheet(THEME.subtitleLabel_Style)\n self.ui.label_Info.setStyleSheet(THEME.infoLabel_Style)\n\n self.ui.frame_PlayerQuantity.setStyleSheet(THEME.transparentBackground)\n self.ui.label_Quantity.setStyleSheet(THEME.entryLabels_Style)\n self.ui.pushButton_QuantityPlus.setStyleSheet(THEME.pulsMinusButton_Style)\n self.ui.pushButton_QuantityMinus.setStyleSheet(THEME.pulsMinusButton_Style)\n self.ui.spinBox_Quantity.setStyleSheet(THEME.spinBox_Style)\n\n self.ui.frame_PlayerEntry.setStyleSheet(THEME.transparentBackground)\n self.ui.label_Entry.setStyleSheet(THEME.entryLabels_Style)\n self.ui.lineEdit_EnterName.setStyleSheet(THEME.playerNameEntry_Style)\n self.ui.pushButton_SelectColor.setStyleSheet(THEME.colorButton_Style)\n self.ui.pushButton_ReturnEntry.setStyleSheet(THEME.okButton_Style)\n self.ui.label_Result.setStyleSheet(THEME.entryLabels_Style)\n\n self.ui.frame_ResetThemeInfo.setStyleSheet(THEME.transparentBackground)\n self.ui.pushButton_Reset.setStyleSheet(THEME.theme_ResetButton_Style)\n self.ui.pushButton_Theme.setStyleSheet(THEME.theme_ResetButton_Style)", "title": "" }, { "docid": "11efab99923f233de7ee2ec8eca7fd8b", "score": "0.67702156", "text": "def apply_theme(self) -> None:\n palette = QPalette()\n for role in self.color_roles:\n palette.setColor(role, Themer.instance.colors[role]) # Set each role to its corresponding color.\n QApplication.setPalette(palette)", "title": "" }, { "docid": "784a7da8e52afe7cea10804122854dee", "score": "0.6762593", "text": "def ui_preset(self):\n self.set_theme()\n self.setWindowTitle(self.titel)\n self.setWindowIcon(QtGui.QIcon(self.icon))\n self.hide_unnecessary()", "title": "" }, { "docid": "9dd60f901effb419b0bddb3b92aef45e", "score": "0.6686928", "text": "def set_theme(theme):\n global DEFAULT_THEME\n if theme.lower() in Themes.keys():\n DEFAULT_THEME = theme.lower()\n else:\n raise ValueError(\"theme '%s' unavailable. use `availabale_themes()`\" % theme)", "title": "" }, { "docid": "b8de6230484bc515d30df7f6d8dad3c5", "score": "0.66753846", "text": "def set_theme(self, theme):\n self._params.theme = theme\n return self", "title": "" }, { "docid": "4f4cdf3de3cc5cf23832225a8415ab75", "score": "0.66477954", "text": "def changeAppearanceSlot(self) -> None:\n if self.styleSheet is style.breezeDark:\n self.setupStylesheet(style.breezeLight)\n else:\n self.setupStylesheet(style.breezeDark)", "title": "" }, { "docid": "2f9001994f566bf3e5eaab809c51b283", "score": "0.662961", "text": "def OnChangeTheme( self, event ) :\n\n print( event.GetString())\n evId = self.themeDict[event.GetString()]\n print (evId)\n\n all_panes = self.aui_mgr.GetAllPanes()\n\n for pane in all_panes :\n\n if isinstance( pane.window, aui.AuiNotebook ) :\n nb = pane.window\n\n if evId == ID_NotebookArtGloss :\n\n nb.SetArtProvider( aui.AuiDefaultTabArt() )\n self._notebook_theme = 0\n\n elif evId == ID_NotebookArtSimple :\n nb.SetArtProvider( aui.AuiSimpleTabArt() )\n self._notebook_theme = 1\n\n elif evId == ID_NotebookArtVC71 :\n nb.SetArtProvider( aui.VC71TabArt() )\n self._notebook_theme = 2\n\n elif evId == ID_NotebookArtFF2 :\n nb.SetArtProvider( aui.FF2TabArt() )\n self._notebook_theme = 3\n\n elif evId == ID_NotebookArtVC8 :\n nb.SetArtProvider( aui.VC8TabArt() )\n self._notebook_theme = 4\n\n elif evId == ID_NotebookArtChrome :\n nb.SetArtProvider( aui.ChromeTabArt() )\n self._notebook_theme = 5\n\n #nb.SetWindowStyleFlag( self._notebook_style )\n nb.Refresh()\n nb.Update()\n \n #end if\n \n # end for", "title": "" }, { "docid": "d7fdf6083541f39f4c3317a406b012df", "score": "0.65618575", "text": "def run(self, color, theme):\n\n # Get needed theme attributes etc.\n pref = sublime.load_settings(PREFERENCES)\n plug = sublime.load_settings(PLUGIN_SETTINGS)\n\n themes = plug.get(\"themes\", {})\n\n # Inherit color from alternate theme if possible\n current = detect_current_theme(pref, themes)\n\n colors = themes.get(theme, {}).get(\"colors\", [])\n theme_file = get_theme(themes.get(theme, {}), None)\n widget_settings = themes.get(theme, {}).get(\"widget_settings\", None)\n widget_scheme = themes.get(theme, {}).get(\"widget_scheme\", None)\n if theme_file.replace(\".sublime-theme\", '').endswith((\"@st3\", \"@st2\")):\n special = \"@st3\" if ST3 else \"@st2\"\n parts = os.path.splitext(widget_settings)\n widget_settings = parts[0] + special + parts[1]\n color_key = themes.get(theme, {}).get(\"color_key\", None)\n\n # See if it is okay to continue\n if (\n color not in colors or color_key is None or widget_scheme is None or\n theme_file is None or widget_settings is None\n ):\n return\n\n # Setup theme\n pref.set(\"theme\", theme_file)\n clear_all_theme_colors(pref, themes, \"colors\", \"color_key\")\n pref.set(color_key % color, True)\n sublime.save_settings(PREFERENCES)\n\n # Setup theme widget\n widget = sublime.load_settings(widget_settings)\n widget.set(\"color_scheme\", widget_scheme % color)\n widget.set(\"draw_shadows\", False)\n sublime.save_settings(widget_settings)\n\n self.set_theme_color(current, theme)", "title": "" }, { "docid": "259d6b1a6bb8840c8a266eed51fda523", "score": "0.6530919", "text": "def set_sel_theme(self):\n\n for idx, theme in enumerate(THEMES):\n if theme == self._theme.get():\n self.lbx_theme.activate(idx)\n self.lbx_theme.see(idx)\n break", "title": "" }, { "docid": "1711b42e945d2b106845327c000daf97", "score": "0.6523286", "text": "def set_theme_color(self, current_theme, new_theme):\n\n if current_theme is not None and current_theme != new_theme:\n sublime.run_command(\n \"inherhit_aprosopo_dirty_color\",\n {\"old_theme\": current_theme, \"new_theme\": new_theme}\n )\n elif current_theme is None:\n sublime.run_command(\n \"set_aprosopo_theme_dirty\",\n {\"color\": \"red\", \"theme\": new_theme}\n )", "title": "" }, { "docid": "3ac51cfdbf8f83d6a87d21a78605419e", "score": "0.64402485", "text": "def set_theme(self):\n self.ui.frame_background.setStyleSheet(THEME.gameWindowBackground_Style)\n self.ui.frame_talons.setStyleSheet(THEME.transparentBackground)\n self.ui.frame_player_hand.setStyleSheet(THEME.transparentBackground)\n\n self.ui.frame_uno_name.setStyleSheet(THEME.transparentBackground)\n self.ui.pushButton_uno.setStyleSheet(THEME.uno_SkipButton_Style)\n\n self.ui.frame_skip.setStyleSheet(THEME.transparentBackground)\n self.ui.pushButton_skip.setStyleSheet(THEME.uno_SkipButton_Style)\n\n self.ui.frame_wild_color_choice.setStyleSheet(THEME.transparentBackground)\n self.ui.pushButton_wild_blue.setStyleSheet(THEME.blueChoice_Style)\n self.ui.pushButton_wild_green.setStyleSheet(THEME.greenChoice_Style)\n self.ui.pushButton_wild_yellow.setStyleSheet(THEME.yellowChoice_Style)\n self.ui.pushButton_wild_red.setStyleSheet(THEME.redChoice_Style)\n\n self.ui.frame_left_last.setStyleSheet(THEME.transparentBackground)\n self.ui.frame_right_next.setStyleSheet(THEME.transparentBackground)", "title": "" }, { "docid": "01a5af81a201896c400ae76f98cef07c", "score": "0.6437444", "text": "def set_theme(self):\n self.ui.frame_Background.setStyleSheet(THEME.gameWindowBackground_Style)\n self.ui.label_gameOverMessage.setStyleSheet(THEME.gameOverMassage_Style)\n self.ui.label_Victor.setStyleSheet(\"background-color: rgba(255, 255, 255, 0);\\n\"\n f\"color: {PLAYERS[0].color};\")\n\n self.ui.frame_gameOverMenu.setStyleSheet(THEME.transparentBackground)\n self.ui.pushButton_PlayAgain.setStyleSheet(THEME.playAgainButton_Style)\n self.ui.pushButton_Menu.setStyleSheet(THEME.menu_QuitButton_Style)\n self.ui.pushButton_Quit.setStyleSheet(THEME.menu_QuitButton_Style)", "title": "" }, { "docid": "da3af4beaca01773a650da72fa1b12ab", "score": "0.6333269", "text": "def unotheme(bot, trigger):\n UnoBot.set_card_theme(bot, trigger)", "title": "" }, { "docid": "f953d5453680890e7ebf45be5644c165", "score": "0.63283736", "text": "def get_sel_theme(self, *args, **kwargs):\n\n idx = self.lbx_theme.curselection()\n if idx == \"\":\n idx = 0\n self._theme.set(self.lbx_theme.get(idx))", "title": "" }, { "docid": "1507c7edf3c7b053b4db5d23e8aa3330", "score": "0.6296296", "text": "def setAppearance():\n style,font = widgets.AppearenceDialog().getResult()\n if style:\n # Get style name, strip off the leading 'Q' and trailing 'Style'\n stylename = style.metaObject().className()[1:-5]\n GD.cfg['gui/style'] = stylename\n GD.gui.setStyle(stylename)\n if font:\n setFont(font)", "title": "" }, { "docid": "1507c7edf3c7b053b4db5d23e8aa3330", "score": "0.6296296", "text": "def setAppearance():\n style,font = widgets.AppearenceDialog().getResult()\n if style:\n # Get style name, strip off the leading 'Q' and trailing 'Style'\n stylename = style.metaObject().className()[1:-5]\n GD.cfg['gui/style'] = stylename\n GD.gui.setStyle(stylename)\n if font:\n setFont(font)", "title": "" }, { "docid": "74957f3610da00e7214327723866fac5", "score": "0.6285962", "text": "def toggle_dark(self):\n do_dark = self.toolbar.action_dark.isChecked()\n self.settings.setValue('toggles/darktheme', do_dark)\n if do_dark:\n self.app.setStyleSheet(qdarkgraystyle.load_stylesheet_pyqt5())\n else:\n self.app.setStyleSheet('')\n self.display.updateText()", "title": "" }, { "docid": "b3875c8e0f775a7a8bb3737bfcf2492e", "score": "0.62826914", "text": "def execute(self, name):\n if name not in self._themes:\n self._session.logger.warning(\"no theme named \\\"%s\\\"\" % name)\n return\n\n from Qt.QtWidgets import QApplication\n \n style_sheet = self._themes[name]()\n\n app = QApplication.instance()\n app.setStyleSheet(style_sheet)", "title": "" }, { "docid": "8f039833c692c0d1b4e2f2d85478cd87", "score": "0.62061405", "text": "def changeStyle(self):\n self.trace('Initializing application style...')\n\n if QT_VERSION_STR.startswith(\"4.\"):\n style = Settings.instance().readValue( key = 'Common/style-%s' % sys.platform )\n else:\n style = Settings.instance().readValue( key = 'Common/style-qt5' )\n QApplication.setStyle( style )", "title": "" }, { "docid": "5de2f6c54c0e35a887a755ed9b9391a6", "score": "0.6134668", "text": "def update_theme_and_fire_event():\n name = hass.data[DATA_DEFAULT_THEME]\n themes = hass.data[DATA_THEMES]\n if name != DEFAULT_THEME and PRIMARY_COLOR in themes[name]:\n MANIFEST_JSON['theme_color'] = themes[name][PRIMARY_COLOR]\n else:\n MANIFEST_JSON['theme_color'] = DEFAULT_THEME_COLOR\n hass.bus.async_fire(EVENT_THEMES_UPDATED, {\n 'themes': themes,\n 'default_theme': name,\n })", "title": "" }, { "docid": "45d2707c568122be7c3ddc9c269d75b6", "score": "0.61337173", "text": "def setGuiStyle(self, style):\n\t\tself._gui_style = style", "title": "" }, { "docid": "09944d2cfddb33e14b4719689642ed45", "score": "0.6133337", "text": "def theme(self, theme):\n allowed_values = [\"Default\", \"Light\", \"Dark\", \"Orange\", \"Material\", \"Joyness\", \"White\"] # noqa: E501\n if theme not in allowed_values:\n raise ValueError(\n \"Invalid value for `theme` ({0}), must be one of {1}\" # noqa: E501\n .format(theme, allowed_values)\n )\n\n self._theme = theme", "title": "" }, { "docid": "aec0a8962fde3ad488db025e2c6e4a49", "score": "0.61046755", "text": "def on_load_theme (self):\n\t\tpass", "title": "" }, { "docid": "ba24a0454f077643c4d8a4944f13b316", "score": "0.60986954", "text": "def themes(self):\n return [\"default\", \"gnome\"]", "title": "" }, { "docid": "4af1c35d0fd35feb1417e55a8b52276f", "score": "0.60096955", "text": "def setPalette(self, theme, appObject):\n # DARK THEME\n if theme:\n # CREATE QPALETTE\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(\"#161616\")) # MAIN WINDOW BACKGROUND COLOR\n palette.setColor(QPalette.WindowText, QColor(\"#fafafa\")) # TEXT COLOR\n palette.setColor(QPalette.Base,QColor(\"#343434\")) # TEXT ENTRY BACKGROUND COLOR\n palette.setColor(QPalette.Text,QColor(\"#fafafa\")) # TEXT ENTRY COLOR\n palette.setColor(QPalette.Button,QColor(\"#353535\")) # BUTTON COLOR\n palette.setColor(QPalette.ButtonText,QColor(\"#fafafa\")) # BUTTON TEXT COLOR \n\n # LIGHT THEME\n else:\n # CREATE QPALETTE\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(\"#e8e8e8\")) # MAIN WINDOW BACKGROUND COLOR\n palette.setColor(QPalette.WindowText, QColor(\"#212121\")) # TEXT COLOR\n palette.setColor(QPalette.Base,QColor(\"#ffffff\")) # TEXT ENTRY BACKGROUND COLOR\n palette.setColor(QPalette.Text,QColor(\"#212121\")) # TEXT ENTRY COLOR\n palette.setColor(QPalette.Button,QColor(\"#eeeeee\")) # BUTTON COLOR\n palette.setColor(QPalette.ButtonText,QColor(\"#212121\")) # BUTTON TEXT COLOR \n\n # APPLY CUSTOM COLOR PALETTE\n appObject.setPalette(palette)", "title": "" }, { "docid": "02af30ad65e4de942c7422f54da076b3", "score": "0.6009443", "text": "def set_theme(self, theme_name=None):\n set_params(self, theme_name)\n\n return self", "title": "" }, { "docid": "112e5c6bc5973014ccae4af933022215", "score": "0.6008791", "text": "def when_change_background_button_pressed(self):\r\n self.theme.change_background()", "title": "" }, { "docid": "78b45f7f0442f6fb07f020c8e825cf1d", "score": "0.59548384", "text": "def on_prefs_update(self):\n print('Updated')\n #util.generate_color_scheme()", "title": "" }, { "docid": "737821ab8d6c2709323afeb1fcb9d173", "score": "0.5936168", "text": "def theme_use(_: Node, key: str, __: Any):\n ttk_style = Style()\n ttk_style.theme_use(key)", "title": "" }, { "docid": "7f31320cf9a73f8670164a3540aecbe6", "score": "0.5926523", "text": "def setup_theme(self):\n\n # Create theme\n self.theme = arcade.gui.Theme()\n\n # Set up font color of theme\n self.theme.font.color = arcade.color.WHITE\n\n # Set up textures of buttons\n normal = \":resources:gui_themes/Fantasy/Buttons/Normal.png\"\n hover = \":resources:gui_themes/Fantasy/Buttons/Hover.png\"\n clicked = \":resources:gui_themes/Fantasy/Buttons/Clicked.png\"\n locked = \":resources:gui_themes/Fantasy/Buttons/Locked.png\"\n self.theme.add_button_textures(normal, hover, clicked, locked)", "title": "" }, { "docid": "2407e3adb3428070a4883aac84da312b", "score": "0.592447", "text": "def theme_init():\n theme_config_create_dir()\n filename = theme_config_get_backup()\n if not os.path.isfile(filename):\n theme = Theme()\n theme.save(filename)", "title": "" }, { "docid": "7de11c5a737726b94058e4adaac27064", "score": "0.5912829", "text": "def _theme_update(self, _):\n\t\tself._active_cell_style, self._active_row_style = self._load_active_cell_style()\n\n\t\tif not self.frames:\n\t\t\treturn\n\n\t\tconf = self._get_listbox_conf(self.frames[0][1])\n\t\tfor f in self.frames:\n\t\t\tf[1].configure(**conf)\n\n\t\tself._redraw_active_cell()", "title": "" }, { "docid": "53afb7eb48232a9219eb6639168ed01f", "score": "0.5895044", "text": "def apply( self ):\n font = self.value('font')\n \n try:\n font.setPointSize(self.value('fontSize'))\n \n # errors in linux for some reason\n except TypeError:\n pass\n \n palette = self.value('colorSet').palette()\n \n if ( qt.unwrapVariant(QApplication.instance().property('useScheme')) ):\n QApplication.instance().setFont(font)\n QApplication.instance().setPalette(palette)\n \n # hack to support MDI Areas\n for widget in QApplication.topLevelWidgets():\n for area in widget.findChildren(QMdiArea):\n area.setPalette(palette)\n else:\n logger.debug('The application doesnt have the useScheme property.')", "title": "" }, { "docid": "bc3c17e587b6c9b0c2e4734a48f96f21", "score": "0.5872174", "text": "def apply_theme(theme_name):\n theme = ThemeManager.get_theme_code(theme_name)\n color_scheme = ThemeManager.get_color_scheme_code(theme_name)\n\n subprocess.run(\n [\n \"spicetify\",\n \"config\",\n \"current_theme\",\n theme,\n \"color_scheme\",\n color_scheme,\n ],\n check=True,\n )\n subprocess.run([\"spicetify\", \"apply\"], check=True)", "title": "" }, { "docid": "1a740f9aa778901b1089c73744dddae8", "score": "0.58622605", "text": "def changeStyle(self, newStyle: dict) -> None:\n\t\tself.textBox.config(bg=newStyle[\"textBoxbg\"], fg=newStyle[\"textBoxfg\"])\n\t\tself.sendButton.config(bg=newStyle[\"sendButtonbg\"], fg=newStyle[\"sendButtonfg\"])\n\t\tself.chatOutput.config(bg=newStyle[\"chatOutputbg\"], fg=newStyle[\"chatOutputfg\"])\n\t\tself.userList.config(bg=newStyle[\"userListbg\"], fg=newStyle[\"userListfg\"])\n\t\tself.upperSide.config(bg=newStyle[\"upperSidebg\"])\n\t\tself.bottomSide.config(bg=newStyle[\"bottomSidebg\"])", "title": "" }, { "docid": "2b1b1c9886374aa4a3a172875f9ab9d0", "score": "0.58519053", "text": "def dark_button(tVIM):\n \n tVIM.scroll_window.config(background=\"gray17\", foreground=\"white\")", "title": "" }, { "docid": "e885f60a502739e3dc71d4d4d7c1b427", "score": "0.58493507", "text": "def change_theme(request):\n theme = request.GET.get('newTheme', None)\n id = request.GET.get('id', None)\n if not id or not theme:\n return respondWithError(\"An error occured while changing theme. Please try again later. \", True)\n response = changeTheme(id, theme)\n if not response:\n respondWithError(\"An error occured while changing theme. Please try again later. \", True)\n return respondWithSuccess(response)", "title": "" }, { "docid": "2256c3cd75bea6b7aea5481eb5675172", "score": "0.5832901", "text": "def applyTheme(self):\n\t\t# main\n\t\tmainstyle = 'QWidget{{background-color: {col1};}}' \\\n\t\t\t\t\t'QScrollBar:vertical{{width: 14px;}}' \\\n\t\t\t\t\t'QScrollBar:horizontal{{height: 14px;}}' \n\t\tmainstyle = mainstyle.format(col1 = self.parent.listcolors[0],\n\t\t\t\t\t\t\t\t\tcol2 = self.parent.listcolors[1],\n\t\t\t\t\t\t\t\t\tcol3 = self.parent.listcolors[2],\n\t\t\t\t\t\t\t\t\tcol4 = self.parent.listcolors[3], \n\t\t\t\t\t\t\t\t\tcol5 = self.parent.listcolors[4])\n\t\tself.setStyleSheet(mainstyle)", "title": "" }, { "docid": "481f21df43de76b09f7674834768605f", "score": "0.58228356", "text": "def set_default(self):\n\n self._choice['M_BG_COLOUR'] = sppasWxOption('wx.Colour', sppasTheme.COLOR1_BG, \"Main background color\")\n self._choice['M_FG_COLOUR'] = sppasWxOption('wx.Colour', sppasTheme.COLOR1_FG, \"Main foreground color\")\n self._choice['M_FONT'] = sppasWxOption('wx.Font', sppasTheme.MAIN_FONT, \"Font\")\n\n self._choice['M_TIPS'] = sppasWxOption('bool', True, 'Show tips at start-up')\n self._choice['M_OUTPUT_EXT'] = sppasWxOption('str', '.xra', \"Output file format\")\n self._choice['M_ICON_THEME'] = sppasWxOption('str', 'Default', \"Icons theme\")\n\n self._choice['M_BGD_COLOUR'] = sppasWxOption('wx.Colour', sppasTheme.COLOR2_FG, \"Secondary main background color\")\n self._choice['M_FGD_COLOUR'] = sppasWxOption('wx.Colour', sppasTheme.COLOR2_BG, \"Secondary main foreground color\")\n\n self._choice['F_SPACING'] = sppasWxOption('int', 2)\n\n self._choice['M_BUTTON_ICONSIZE'] = sppasWxOption('int', 32)\n self._choice['M_TREE_ICONSIZE'] = sppasWxOption('int', 16)\n\n # Menu\n self._choice['M_BGM_COLOUR'] = sppasWxOption('wx.Colour', sppasTheme.COLOR2_BG, \"Menu background color\")\n self._choice['M_FGM_COLOUR'] = sppasWxOption('wx.Colour', sppasTheme.COLOR2_FG, \"Menu foreground color\")\n self._choice['M_MENU_ICONSIZE'] = sppasWxOption('int', 32)\n\n # Toolbar\n self._choice['M_TOOLBAR_ICONSIZE'] = sppasWxOption('int', 24)\n self._choice['M_TOOLBAR_FONT'] = sppasWxOption('wx.Font', sppasTheme.TOOLBAR_FONT, \"Font\")\n\n # Title\n self._choice['M_HEADER_FONT'] = sppasWxOption('wx.Font', sppasTheme.HEADER_FONT, \"Font\")", "title": "" }, { "docid": "2bc591c758ed64df403a1375e6eda8c9", "score": "0.5819709", "text": "def apply_theme(self, theme):\n if \"fill_color\" not in self.library_specific_params:\n self.library_specific_params[\"fill_color\"] = theme.chart_color", "title": "" }, { "docid": "88019eec8c73709048d7c915b1872e30", "score": "0.5773321", "text": "def test_changing_theme(make_test_viewer):\n viewer = make_test_viewer()\n viewer.add_points(data=None)\n assert viewer.palette['folder'] == 'dark'\n\n screenshot_dark = viewer.screenshot(canvas_only=False)\n\n viewer.theme = 'light'\n assert viewer.palette['folder'] == 'light'\n\n screenshot_light = viewer.screenshot(canvas_only=False)\n equal = (screenshot_dark == screenshot_light).min(-1)\n\n # more than 99.5% of the pixels have changed\n assert (np.count_nonzero(equal) / equal.size) < 0.05, \"Themes too similar\"\n\n with pytest.raises(ValueError):\n viewer.theme = 'nonexistent_theme'", "title": "" }, { "docid": "0b63463f7d40fdcaf874182eeb0c461a", "score": "0.5757985", "text": "def darkmode_switch():\n\n # Check current bg colour\n current_bg = root.cget('bg')\n \n # If current_bg is original, change new_bg to dark (vice versa)\n if current_bg == original_bg:\n new_bg = dark_bg\n darkmodetxt_label.config(text=\"Dark Mode: ON\", bg=new_bg)\n darkmode_btn.config(image=onImg, bg=new_bg, activebackground=new_bg)\n elif current_bg == dark_bg:\n new_bg = original_bg\n darkmodetxt_label.config(text=\"Dark Mode: OFF\", bg=new_bg)\n darkmode_btn.config(image=offImg, bg=new_bg, activebackground=new_bg)\n \n # Set bg to new_bg, fg to current_bg\n \n root.config(bg=new_bg)\n for item in all_objects:\n item.config(bg=new_bg, fg=current_bg)", "title": "" }, { "docid": "da120e7fc0e1bbc178e24c4a84988e6e", "score": "0.57042176", "text": "def setupTheme(cls):\n base = DataAccess(DataAccess.engineDatabase)\n themeSettings = base.getTheme(base.getSavedThemeName())\n return themeSettings", "title": "" }, { "docid": "c67953cb201b150d9c647b55c9a5fdc9", "score": "0.5642868", "text": "def setSavedThemeName(cls, themeName):\n cls._setSavedSetting('themes_themeName',themeName)", "title": "" }, { "docid": "2802e9967f7e795fc838b4e9f99a2556", "score": "0.56147903", "text": "def _set_mode_default(cls):\n cls._disconnect_buttons()\n\n cls.btn_settings.setText(\"Settings\")\n cls.btn_settings.setIcon(QIcon.fromTheme(\"preferences-system\"))\n cls.btn_settings.setWhatsThis(\"View and edit settings.\")\n cls.btn_settings.clicked.connect(cls.settings)\n\n cls.btn_about.setText(\"About\")\n cls.btn_about.setIcon(QIcon.fromTheme(\"help-about\"))\n cls.btn_about.setWhatsThis(\"View Timecard credits and license.\")\n cls.btn_about.clicked.connect(cls.about)\n\n cls.btn_help.setText(\"Help\")\n cls.btn_help.setIcon(QIcon.fromTheme(\"help-contents\"))\n cls.btn_help.setWhatsThis(\"Display help for a clicked item.\")\n cls.btn_help.clicked.connect(cls.help)\n\n cls.btn_quit.setText(\"Quit\")\n cls.btn_quit.setIcon(QIcon.fromTheme(\"application-exit\"))\n cls.btn_quit.setWhatsThis(\"Quit Timecard.\")\n cls.btn_quit.clicked.connect(cls.quit)", "title": "" }, { "docid": "efc99dccfd90f99f369869cf0e251305", "score": "0.56115645", "text": "def test_theme(self):\n self._test_env_setting(\n env_variable=\"THEME\",\n env_value=\"material\",\n config_name=\"theme\",\n config_value={\"name\": \"material\"},\n loaded_type=theme.Theme)", "title": "" }, { "docid": "d5e6763e9ffd1d9b22153dc7faf866db", "score": "0.56114715", "text": "def set_default_style(style):\n global default_style\n if style == 'dark':\n style = default_dark_style\n elif style == 'light':\n style = default_light_style\n\n if not issubclass(style, Style):\n raise TypeError(\n \"style must be a subclass of pygments.styles.Style or \"\n \"one of 'dark', 'light'. Got {}\".format(repr(style))\n )\n default_style = style", "title": "" }, { "docid": "94362e099123d6c31eebf2d8fcaf42da", "score": "0.5589799", "text": "def setStyle(self,style):\n style = QtGui.QStyleFactory().create(style)\n pf.app.setStyle(style)\n self.update()", "title": "" }, { "docid": "d760c2c8205d665f7252167e2d5d1c8a", "score": "0.5580002", "text": "def ui_switch(self, *args):\r\n\r\n # pylint: disable=unused-argument\r\n # Positional arguments are used to provide compatibility with\r\n # TKinter.\r\n\r\n if self.ui_mode == 'light':\r\n self.ui_mode = 'dark'\r\n self.ui_state.set('DM')\r\n else:\r\n self.ui_mode = 'light'\r\n self.ui_state.set('LM')\r\n self.ui_refresh()", "title": "" }, { "docid": "619f19a80bf6ef3b7faa5e4019ae29c8", "score": "0.5562514", "text": "def _handleDwadasottariDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultDwadasottariDasaGraphicsItemTextColor\n \n self.dwadasottariDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "8c68b8b1fd52f6995a358c42636439c9", "score": "0.5554875", "text": "def _handleDwisaptatiSamaDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultDwisaptatiSamaDasaGraphicsItemTextColor\n \n self.dwisaptatiSamaDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "53502cdc50bb18e4f48eb73e8ebf1aa3", "score": "0.55313313", "text": "def set_style(self):\n self.style = ttk.Style()\n font = (\"TkDefaultFont {} bold italic\"\n .format(max(int(round(self.ivs2.x_pixels / 43.0)), 19)))\n self.style.configure(\"go_stop_button.TButton\",\n foreground=\"red\",\n padding=4,\n font=font)\n self.style.configure(\"go_stop_button_disabled.TButton\",\n foreground=\"black\",\n padding=4,\n font=font)", "title": "" }, { "docid": "32fdf5a4a5780fd384137d9734e38d1b", "score": "0.5504677", "text": "def on_style(self, style):\n self.log.debug(\"Switching ui style ...\")\n if style == 'default':\n self.setStyleSheet(\"\")\n else:\n self.setStyleSheet(self.applyStyle(styleName=style))\n self._currentStyle = style\n self.log.debug(\"Apply style: %s\" % style)", "title": "" }, { "docid": "9fdff882c8f7db04ba4b5384a76b9fed", "score": "0.5496832", "text": "def update_style(self):\n pass", "title": "" }, { "docid": "753ac1e7c4204ebc0c92a6332ed73eea", "score": "0.5473503", "text": "def day_button(tVIM):\n \n tVIM.scroll_window.config(background=\"steel blue\", foreground=\"white\")", "title": "" }, { "docid": "c47ce4da80b8a3d3bb253171fbe7b450", "score": "0.5460665", "text": "def themeble(value):\n return (value.replace('THEME', config.CURRENT_THEME or '')\n .replace('DEFAULT_THEME', config.DEFAULT_THEME or ''))", "title": "" }, { "docid": "a513f3f09c3b4899c9d6aa1af5663eef", "score": "0.5453293", "text": "def __init__(self) -> None:\n with open(FileManager().get_theme_file(), \"r\") as file:\n json_doc = load(file)\n self.colors = {QPalette.Window: QColor(json_doc[\"colors\"][\"window\"]),\n QPalette.WindowText: QColor(json_doc[\"colors\"][\"windowtext\"]),\n QPalette.Base: QColor(json_doc[\"colors\"][\"base\"]),\n QPalette.AlternateBase: QColor(json_doc[\"colors\"][\"altbase\"]),\n QPalette.Text: QColor(json_doc[\"colors\"][\"text\"]),\n QPalette.BrightText: QColor(json_doc[\"colors\"][\"brighttext\"]),\n QPalette.Button: QColor(json_doc[\"colors\"][\"button\"]),\n QPalette.ButtonText: QColor(json_doc[\"colors\"][\"buttontext\"]),\n QPalette.ToolTipBase: QColor(json_doc[\"colors\"][\"tooltipbase\"]),\n QPalette.ToolTipText: QColor(json_doc[\"colors\"][\"tooltiptext\"]),\n QPalette.Light: QColor(json_doc[\"colors\"][\"light\"]),\n QPalette.Midlight: QColor(json_doc[\"colors\"][\"midlight\"]),\n QPalette.Mid: QColor(json_doc[\"colors\"][\"mid\"]),\n QPalette.Dark: QColor(json_doc[\"colors\"][\"dark\"]),\n QPalette.Shadow: QColor(json_doc[\"colors\"][\"shadow\"]),\n QPalette.Highlight: QColor(json_doc[\"colors\"][\"highlight\"]),\n QPalette.HighlightedText: QColor(json_doc[\"colors\"][\"highlightedtext\"]),\n QPalette.Link: QColor(json_doc[\"colors\"][\"link\"]),\n QPalette.LinkVisited: QColor(json_doc[\"colors\"][\"linkvisited\"])}", "title": "" }, { "docid": "d46f57ec396aecf659f88ceac919c2d6", "score": "0.54491067", "text": "def clear_all_themes(pref, themes):\n\n for v in themes.values():\n theme = v.get(\"theme\")\n if theme is not None:\n if is_valid_theme(pref.get(\"theme\", None), theme):\n pref.erase(\"theme\")", "title": "" }, { "docid": "27141d39bfddd0a8a522a8c7c4bdcb2d", "score": "0.5445602", "text": "def _handleGannFanGraphicsItemDefaultTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultGannFanGraphicsItemDefaultTextColor\n\n self.gannFanGraphicsItemDefaultTextColorEditButton.\\\n setColor(value)", "title": "" }, { "docid": "6a490eb370d17d2b363eac99f8312444", "score": "0.54360366", "text": "def update_color_scheme(self):\n self.foreground = self.White if self.gui.config.get_bool(\"dark_background\") else self.Black\n self.background = self.Black if self.gui.config.get_bool(\"dark_background\") else self.White\n self.bgcolor = self.background\n self.visual_perspective_info.color = np.append(self.foreground[:3], 0.6)", "title": "" }, { "docid": "bb4abd28e317682148653ab846abc6fd", "score": "0.54205704", "text": "def _handleFibFanGraphicsItemDefaultTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultFibFanGraphicsItemDefaultTextColor\n\n self.fibFanGraphicsItemDefaultTextColorEditButton.\\\n setColor(value)", "title": "" }, { "docid": "52d6589373e141dafaff34e0acd39e93", "score": "0.5407343", "text": "def _handleShashtihayaniDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultShashtihayaniDasaGraphicsItemTextColor\n \n self.shashtihayaniDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "f87dd5f0dfb8f16744fb5de49b517d52", "score": "0.5402439", "text": "def show_themes():\n print(\"Theme tree is:\\n%s\" % (main_dic['themes'].to_string(),))", "title": "" }, { "docid": "0295a938391ed27ac656bf176faf8aa6", "score": "0.5385248", "text": "def _handleOctaveFanGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultOctaveFanGraphicsItemTextColor\n \n self.octaveFanGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "474064d939564d1efdef05d4380ca823", "score": "0.53849614", "text": "def _handleShodasottariDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultShodasottariDasaGraphicsItemTextColor\n \n self.shodasottariDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "5bb18d2985e3ba27aa2aafa95899bef8", "score": "0.5379839", "text": "def _theme_faders(self, mode_index):\n for index, fader in enumerate(self.control_surface.fader_elements):\n if mode_index > 0:\n fader.set_theme(mode_colors[mode_index], \"fill\")\n else:\n if index < 4:\n fader.set_theme(mode_colors[mode_index], \"spread\")\n else:\n fader.set_theme(mode_colors[::-1][mode_index], \"fill\")", "title": "" }, { "docid": "3f8e7df0c6f5891f8e80c434ea105fee", "score": "0.53796935", "text": "def add_theme(self, theme_name):\n if theme_name in list_themes():\n choice = input(f\"A theme named {theme_name} already exists, would you like to overwrite? [y/n]\")\n if \"y\" in choice.lower():\n self.set_updated_rcparams()\n with open(path.dirname(path.abspath(__file__))+'/themes/'+theme_name+'.txt', 'w') as file:\n print(str(self.updated_params).replace(', ', ',\\n'), file=file)\n\n print(f\"Theme {theme_name} successfully overwritten\")\n else:\n print(\"please use a different theme name\")\n\n else:\n self.set_updated_rcparams()\n with open(path.dirname(path.abspath(__file__))+'/themes/'+theme_name+'.txt', 'w') as file:\n print(str(self.updated_params).replace(', ', ',\\n'), file=file)\n\n print(f\"Theme {theme_name} successfully added locally\")", "title": "" }, { "docid": "10189b3b7ef32220fe9b7c18842260d4", "score": "0.5359463", "text": "def _handleSataabdikaDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultSataabdikaDasaGraphicsItemTextColor\n \n self.sataabdikaDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "3fe0892c96a62c87fe7af7a2a8e2da8e", "score": "0.53546226", "text": "def onEntryChanged(self):\n self.entry.configure(foreground=\"\")", "title": "" }, { "docid": "c3af01f873873aad4142a08a4799264e", "score": "0.5347663", "text": "def set_window_frame(self):\n self.window.title('TRIPLET CHECKER')\n self.window.geometry('900x600+200+200')\n self.window.resizable(False, False)\n # theme setting\n style = ThemedStyle(self.window)\n style.set_theme('breeze')", "title": "" }, { "docid": "74852d55529bbd6fbdcf5ec8662bdd57", "score": "0.53457123", "text": "def change_background(self):\n if self.color == \"Blue\":\n self.background_color = THEME.blue\n elif self.color == \"Green\":\n self.background_color = THEME.green\n elif self.color == \"Yellow\":\n self.background_color = THEME.yellow\n elif self.color == \"Red\":\n self.background_color = THEME.red\n else:\n self.background_color = THEME.black", "title": "" }, { "docid": "7fa0466ce725136cdcab06a95a0f934b", "score": "0.53249013", "text": "def set_ui_settings(self, widget, settings):\n\n # the default implementation does not show any editable widgets, so this\n # is a no-op. this method is required to be defined in order for the\n # custom UI to show up in the app\n pass", "title": "" }, { "docid": "9e88f00216cce83579c289a97151f63b", "score": "0.53243685", "text": "def vocabResearchTheme(self):\n return RESEARCH_THEME", "title": "" }, { "docid": "6724f33cb7da1e358141657fe41b661e", "score": "0.5313952", "text": "def themefile(show_all=False):\n configfile_print_scope('theme', show_all)", "title": "" }, { "docid": "a3f98e485e0e7e95bfa9de561ba9f340", "score": "0.53034806", "text": "def gray_button(tVIM):\n \n tVIM.scroll_window.config(background=\"gray\", foreground=\"white\")", "title": "" }, { "docid": "eaab720d82a0f91f5efa56a3b23ef167", "score": "0.5286622", "text": "def build(self):\n self.theme_cls.primary_palette = \"Teal\"\n self.theme_cls.accent_palette = \"Cyan\"\n self.theme_cls.theme_style = \"Dark\"\n return self.screen", "title": "" }, { "docid": "0fce06c0d759b3a3f713b22c0b7d75b4", "score": "0.52751267", "text": "def set_wizard_style(self, style: WizardStyleStr | mod.WizardStyle):\n self.setWizardStyle(WIZARD_STYLE.get_enum_value(style))", "title": "" }, { "docid": "a6465bf2869df0db23ae2e25a967e553", "score": "0.52639997", "text": "def getTheme(self):\n return {}", "title": "" }, { "docid": "a77ab09faf2d249ecc31db5cae6f8700", "score": "0.5252308", "text": "def setColor(self,idx, color):\n self.mainWindow.setColor(idx, color)", "title": "" }, { "docid": "305a32b02b2bead06d6a22955b3e5550", "score": "0.5249598", "text": "def _handleAshtottariDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultAshtottariDasaGraphicsItemTextColor\n \n self.ashtottariDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "8dd3edd97f31343f7049e0e3c1a689f3", "score": "0.52470034", "text": "def _handlePanchottariDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultPanchottariDasaGraphicsItemTextColor\n \n self.panchottariDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "c07f234ccc00b72ab05260f08ba2e0b6", "score": "0.5244198", "text": "def applyTheme(self):\n\t\t# main\n\t\tmainstyle = 'QWidget{{background-color: {col1};}}' \\\n\t\t\t\t\t'QComboBox{{background-color: {col2};}}' \\\n\t\t\t\t\t'QScrollBar:vertical{{width: 14px;}}' \\\n\t\t\t\t\t'QScrollBar:horizontal{{height: 14px;}}' \\\n\t\t\t\t\t'QTableView{{alternate-background-color: {col3};background-color: {col4};}}' \\\n\t\t\t\t\t'QTableWidget::item:selected{{ background-color:{col5}; color:white;}}'\n\t\tmainstyle = mainstyle.format(col1 = self.parent.listcolors[0],\n\t\t\t\t\t\t\t\t\tcol2 = self.parent.listcolors[1],\n\t\t\t\t\t\t\t\t\tcol3 = self.parent.listcolors[2],\n\t\t\t\t\t\t\t\t\tcol4 = self.parent.listcolors[3], \n\t\t\t\t\t\t\t\t\tcol5 = self.parent.listcolors[4])\n\t\tself.setStyleSheet(mainstyle)\n\t\tgridstyle = 'QHeaderView::section{{background-color: {col2};border-radius:1px;margin: 1px;padding: 2px;}}'\n\t\tgridstyle = gridstyle.format(col2 = self.parent.listcolors[1])\t\t\t\t\t\t\n\t\tself.tableWidget_general.setStyleSheet(gridstyle)\n\t\tself.tableWidget_envt.setStyleSheet(gridstyle)\n\t\tself.tableWidget_category.setStyleSheet(gridstyle)", "title": "" }, { "docid": "27bf87765bcf5edf605bdb13eb9f43d3", "score": "0.52395827", "text": "def setAppearence(self):\n style = pf.cfg['gui/style']\n font = pf.cfg['gui/font']\n family = pf.cfg['gui/fontfamily']\n size = pf.cfg['gui/fontsize']\n if style:\n self.setStyle(style)\n if font:\n self.setFont(font)\n if family:\n self.setFontFamily(family)\n if size:\n self.setFontSize(size)", "title": "" }, { "docid": "385acd9beb193f9c98fe89db29ef2b4c", "score": "0.5230997", "text": "def __str__(self):\n return self.theme", "title": "" }, { "docid": "9b0b6bd9306689f0ca0233d2eb8c744e", "score": "0.5230963", "text": "def _handleDwisaptatiSamaDasaGraphicsItemColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultDwisaptatiSamaDasaGraphicsItemBarColor\n \n self.dwisaptatiSamaDasaGraphicsItemColorEditButton.setColor(value)", "title": "" }, { "docid": "5a7ce440848941405491a973343b880d", "score": "0.52280205", "text": "def h_switch_to_settings(self, char):\n self.parentApp.switchForm(\"SETTINGS\")", "title": "" }, { "docid": "777299fe20763e0e4545f3740b343c1a", "score": "0.52221304", "text": "def _handleYoginiDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultYoginiDasaGraphicsItemTextColor\n \n self.yoginiDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "1f53f791919b52758c26a4ebe80c1f92", "score": "0.5218918", "text": "def setup(self):\n self.setup_theme()\n self.set_buttons()", "title": "" }, { "docid": "756ee89f514fb15c860742566abe2c0d", "score": "0.5214766", "text": "def onOptionsPress(self):\n\t\tself.changesRequireRestart = False\n\t\tself.isSetToDefault = False\n\n\t\tself.OptionsDlg = self._loadWidget(self._settings_gui_xml)\n\t\tself.OptionsDlg.stylize(self._gui_style)\n\t\tself.fillWidgets()\n\t\tself.OptionsDlg.mapEvents({\n\t\t\t'okButton' : self.applySettings,\n\t\t\t'cancelButton' : self.OptionsDlg.hide,\n\t\t\t'defaultButton' : self.setDefaults\n\t\t})\n\t\tself.OptionsDlg.show()", "title": "" }, { "docid": "ddd1b273d881986f5847629e54a1038c", "score": "0.52103335", "text": "def change_color(self, *args):\n color = QtGui.QColorDialog.getColor()\n if color.isValid():\n self.lbl_color.setStyleSheet(\"background-color: %s\" % color.name())\n self.region.update_color(color.getRgb())", "title": "" }, { "docid": "e9d6cfd7b6addec50158cad0580f0aa1", "score": "0.520826", "text": "def _handleDwadasottariDasaGraphicsItemColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultDwadasottariDasaGraphicsItemBarColor\n \n self.dwadasottariDasaGraphicsItemColorEditButton.setColor(value)", "title": "" }, { "docid": "b71058aaf3d2cf2d3f17057d8f18108f", "score": "0.5204946", "text": "def reset( self ):\n self.setValue('colorSet', XPaletteColorSet())\n self.setValue('font', QApplication.font())\n self.setValue('fontSize', QApplication.font().pointSize())", "title": "" }, { "docid": "b2b98dd828d01a58bfe71518fd64739f", "score": "0.52018887", "text": "def updateColor(self, name):\n if name not in self.confvar:\n return\n txt = self.confvar[name]['entry']\n if not self.ischanged(name):\n txt['fg'] = 'grey'\n else:\n txt['fg'] = 'black'", "title": "" }, { "docid": "79b1cfb7dc35d6f85483c13e229abefd", "score": "0.51930517", "text": "def _handleVimsottariDasaGraphicsItemTextColorResetButtonClicked(self):\n\n value = \\\n PriceBarChartSettings.\\\n defaultVimsottariDasaGraphicsItemTextColor\n \n self.vimsottariDasaGraphicsItemTextColorEditButton.setColor(value)", "title": "" }, { "docid": "bee9edbd28a8867cc4076b10c3dd00a9", "score": "0.5191334", "text": "def __apply_preferences(self, wx_object):\n\n font = self._preferences.GetValue('M_FONT')\n wx_object.SetFont(font)\n wx_object.SetForegroundColour(self._preferences.GetValue('M_FG_COLOUR'))\n wx_object.SetBackgroundColour(self._preferences.GetValue('M_BG_COLOUR'))", "title": "" } ]
e1a9830fb6a965c312254b2ffd48274b
convert v at DH grid points to v in SH
[ { "docid": "a56c130749e423fb5b19c7cbab86ff38", "score": "0.7078962", "text": "def v2vSH(self, v, l_max=None):\n lm = (v.shape[0]-2)//2\n if l_max is not None:\n lm = min(l_max, lm)\n return _sht.shtools.SHExpandDH(v, norm=1, sampling=2, csphase=1)[:,:lm+1,:lm+1]", "title": "" } ]
[ { "docid": "363f89814917abaaee26ce35a157690e", "score": "0.69157463", "text": "def vSH2v(self, vSH, l_max=None, Nth=None):\n if Nth is None and l_max is None:\n l_max = self._get_lmax(vSH)\n Nth = l_max * 2 + 2\n lm = l_max\n elif Nth is None:\n lm = l_max\n else:\n lm = (Nth-2)/2\n SH = self._convert_SHin(vSH, l_max=l_max)\n return _sht.shtools.MakeGridDH(SH, norm=1, sampling=2, csphase=1, lmax_calc=l_max, lmax=lm)", "title": "" }, { "docid": "9d0457dc13c18c823b31ecced731153b", "score": "0.64291507", "text": "def p2v(self, polSH, l_max=14):\n z = self._convert_SHin(polSH, l_max=l_max)\n _,_,_,_,dtr,dpr = _sht.shtools.MakeGravGradGridDH(z, 3480,3480, sampling=2)\n vthp = dtr\n vphp = dpr\n return vthp, vphp", "title": "" }, { "docid": "a72281b904df9dac7773371b614e046d", "score": "0.63679534", "text": "def H(v):\n return round(v * H_SCALE_FACTOR)", "title": "" }, { "docid": "d5f1f8570a16313fc5519907868ba6e2", "score": "0.62672263", "text": "def HF(v):\n return v * H_SCALE_FACTOR", "title": "" }, { "docid": "bf98871ab20dcee8f270247efbb2b970", "score": "0.6217968", "text": "def transform_grid(u_range, v_range, H):\n\n\n\n grid_u, grid_v = np.meshgrid( u_range, v_range )\n\n u_flat = np.expand_dims(np.ndarray.flatten(grid_u), 1)\n v_flat = np.expand_dims(np.ndarray.flatten(grid_v), 1)\n points = np.concatenate([u_flat, v_flat],1)\n \n return points, transform_points(points, H)", "title": "" }, { "docid": "fd5a971e3470fcbe933284dc9f3d6c18", "score": "0.6207214", "text": "def kep_h_vec(x, y, z, u, v, w):\n return np.array((y*w-z*v, z*u-x*w, x*v-y*u))", "title": "" }, { "docid": "643bb2a6e7cde83ddc9eab2e483bbbeb", "score": "0.61572105", "text": "def gas_h(v,l):\n return l*vdwg_f(v) + (1-l)*ig_g(v)", "title": "" }, { "docid": "9dabb9a03155f7f724def804549dcacb", "score": "0.61046153", "text": "def t2v(self, torSH, l_max=14):\n z = self._convert_SHin(torSH, l_max=l_max)\n _, dth, dph, _ = _sht.shtools.MakeGravGridDH(z, 3480, 3480, lmax=l_max, sampling=2)\n vtht = -dph\n vpht = dth\n return vtht, vpht", "title": "" }, { "docid": "7706c7331c9b0aa8cfa617cc0e3d03e9", "score": "0.6003059", "text": "def dph_v(self, vSH, l_max=14, Nth=None):\n if Nth is None:\n lm = l_max\n else:\n lm = (Nth-2)/2\n SH = self._convert_SHin(vSH, l_max=l_max)\n out = _sht.shtools.MakeGravGridDH(SH, 3480, 3480, sampling=2, normal_gravity=0, lmax_calc=l_max, lmax=lm)\n dph_v = out[2]\n return dph_v", "title": "" }, { "docid": "fbe066d307df53f631916d66c72aadd5", "score": "0.59562176", "text": "def v2vSH_allT(self, v_t, l_max=None):\n lm = (v_t.shape[1]-2)//2\n if l_max is not None:\n lm = min(l_max, lm)\n vSH_t = _np.empty((v_t.shape[0],2,lm+1,lm+1))\n for i in range(v_t.shape[0]):\n vSH_t[i,:,:,:] = self.v2vSH(v_t[i,:,:], l_max=lm)\n return vSH_t", "title": "" }, { "docid": "1c52b08f1ed26652e66e8c7bb73e2163", "score": "0.5905538", "text": "def buildLocalH(Jh, hz):\n dLoc = int(np.sqrt(d))\n Sx, Sy = np.array([[0., 1.], [1., 0.]]), np.array([[0., -1.j], [1.j, 0.]])\n Sz, Id = np.diag([1., -1.]), np.eye(dLoc)\n bulkLocH = Jh * np.kron(Sx, Sx) + Jh * np.kron(Sy, Sy) \\\n + (hz / 2.) * (np.kron(Sz, Id) + np.kron(Id, Sz))\n lBryLocH = Jh * np.kron(Sx, Sx) + Jh * np.kron(Sy, Sy) \\\n + hz * np.kron(Sz, Id) + (hz / 2.) * np.kron(Id, Sz)\n rBryLocH = Jh * np.kron(Sx, Sx) + Jh * np.kron(Sy, Sy) \\\n + (hz / 2.) * np.kron(Sz, Id) + hz * np.kron(Id, Sz)\n\n h = [bulkLocH.real.reshape(dLoc, dLoc, dLoc, dLoc) for n in range(length-1-2)]\n h = [lBryLocH.real.reshape(dLoc, dLoc, dLoc, dLoc)] + h \\\n + [rBryLocH.real.reshape(dLoc, dLoc, dLoc, dLoc)]\n\n print \"theH\", type(h), len(h), \"dLoc\", dLoc, \"Jex\", Jh, \"hz\", hz\n for n in range(length-1): print h[n].reshape(dLoc * dLoc, dLoc * dLoc)\n\n #return h\n\n SxId, IdSx = np.kron(Sx, Id), np.kron(Id, Sx)\n SyId, IdSy = np.kron(Sy, Id), np.kron(Id, Sy)\n SzId, IdSz = np.kron(Sz, Id), np.kron(Id, Sz)\n IdId = np.kron(Id, Id)\n\n bulkHl = Jh * np.kron(SxId, SxId) + Jh * np.kron(SyId, SyId) \\\n + (hz / 2.) * (np.kron(SzId, IdId) + np.kron(IdId, SzId))\n lBryHl = Jh * np.kron(SxId, SxId) + Jh * np.kron(SyId, SyId) \\\n + hz * np.kron(SzId, IdId) + (hz / 2.) * np.kron(IdId, SzId)\n rBryHl = Jh * np.kron(SxId, SxId) + Jh * np.kron(SyId, SyId) \\\n + (hz / 2.) * np.kron(SzId, IdId) + hz * np.kron(IdId, SzId)\n\n bulkHr = Jh * np.kron(IdSx, IdSx) + Jh * np.kron(IdSy, IdSy) \\\n + (hz / 2.) * (np.kron(IdSz, IdId) + np.kron(IdId, IdSz))\n lBryHr = Jh * np.kron(IdSx, IdSx) + Jh * np.kron(IdSy, IdSy) \\\n + hz * np.kron(IdSz, IdId) + (hz / 2.) * np.kron(IdId, IdSz)\n rBryHr = Jh * np.kron(IdSx, IdSx) + Jh * np.kron(IdSy, IdSy) \\\n + (hz / 2.) * np.kron(IdSz, IdId) + hz * np.kron(IdId, IdSz)\n\n hMpo = [(bulkHl + bulkHr).real.reshape(d, d, d, d) for n in range(length-1-2)]\n hMpo = [(lBryHl + lBryHr).real.reshape(d, d, d, d)] + hMpo \\\n + [(rBryHl + rBryHr).real.reshape(d, d, d, d)]\n\n print \"Hmpo\", type(hMpo), len(hMpo), \"d\", d, \"Jex\", Jh, \"hz\", hz\n for n in range(length-1): print hMpo[n].reshape(d * d, d * d)\n\n return hMpo", "title": "" }, { "docid": "df5f31655c583af77893b7c4307922a7", "score": "0.58849996", "text": "def HHL_creator(Hm=250.,nx=300,ny=300,nz=51,a=20.,ay=False,surftopo='cos2'):\n \n # Namelist constants \n ivctype = 2\n vcflat = 6000.\n zz_top = 9999.\n\n \n x = np.arange(1,nx+1)\n y = np.arange(1,ny+1)\n _ks = np.arange(0,nz)\n zak = _ks * 0.\n zbk = _ks * 0.\n\n HHL = np.zeros((nz,nx,ny))\n \n X,Y = np.meshgrid(x,y)\n c = nx/2.+0.5\n R2=(X-c)**2+(Y-c)**2\n hsurf=np.zeros(R2.shape)\n \n \n if surftopo=='gauss':\n #hsurf = Hm*(2**(-(R2)/(a**2))) #symmetric Gaussian\n if not ay: ay = a\n hsurf = Hm*(2.**(-((X-c)/a)**2-((Y-c)/ay)**2))\n if surftopo=='bell':\n hsurf = Hm/(1+(R2)/(a**2)) \n if surftopo=='bell1.5':\n hsurf = Hm/(1+(R2)/(a**2))**1.5 \n if surftopo=='cos2':\n hsurf = Hm*np.cos(np.pi/4.*(R2)/(a**2))**2\n hsurf[R2>2*a**2] = 0.\n \n #hsurf[np.where(hsurf < 0.001)] = 0.\n HHL[-1,:,:] = hsurf \n\n kflat = 0\n # Inverse coordinate transfromation to obtain zak and zbk\n for k in _ks:\n if vcoordvec[k] >= vcflat:\n zak[k] = vcoordvec[k]\n zbk[k] = 0.\n kflat = k\n else:\n zak[k] = vcoordvec[k]\n zbk[k] = (vcflat -vcoordvec[k])/vcflat\n\n # Calcualte HHL\n for k in range(nz-1):\n HHL[k,:,:] = zak[k]+zbk[k]*HHL[-1,:,:]\n\n print kflat\n return HHL", "title": "" }, { "docid": "ec92c2cd0ed229297c0b0f4b3a0e88b1", "score": "0.5883868", "text": "def dth_v(self, vSH, l_max=14, Nth=None):\n\n if Nth is None:\n lm = l_max\n else:\n lm = (Nth-2)/2\n SH = self._convert_SHin(vSH, l_max=l_max)\n out = _sht.shtools.MakeGravGridDH(SH, 3480, 3480, sampling=2, normal_gravity=0, lmax_calc=l_max, lmax=lm)\n dth_v = out[1]\n return dth_v", "title": "" }, { "docid": "7e915d2b297451e392cf021d41eec6f9", "score": "0.5848832", "text": "def SVh(P, h, bw):\n\n pd = squareform(pdist(P[:,:2]))\n\n N = pd.shape[0]\n\n Z = list()\n\n for i in range(N):\n\n for j in range(i+1, N):\n\n if (pd[i,j] >= h-bw) and (pd[i,j] <= h+bw):\n\n Z.append(pd[i,2] - pd[j,2])\n\n\n return np.sum(Z)/(2*len(Z))", "title": "" }, { "docid": "01903bc3a8cb3f01bec9232aba007fa0", "score": "0.5840856", "text": "def simpleVoigt(vsh,vsv):\n return (vsh+vsv)*0.5", "title": "" }, { "docid": "cd0ef93d0ca393a92b8ecc2adc069795", "score": "0.5840297", "text": "def uv_transform(self, u, v):\r\n vprime = np.zeros_like(v)\r\n for i, av in enumerate(v):\r\n vprime[i] = brentq(lambda vp: self.dcopula_spline(u[i], vp) - av, 0, 1)\r\n return {'u':u, 'vprime':vprime}", "title": "" }, { "docid": "df26f51f4a7065ecb8d1b05c36f74e11", "score": "0.5838944", "text": "def h2d(h): \n return b2d(h2b(h))", "title": "" }, { "docid": "54f15ac1c2d14fb13ae4b0dbbeba3a70", "score": "0.5808614", "text": "def homography_solve():\n v = np.array([[674, 318],\n [546, 454],\n [676, 580],\n [800, 452]])\n\n u = np.array([[942, 301],\n [453, 402],\n [944, 517],\n [1426, 402]])\n\n U = np.zeros((8, 2*u.shape[0]))\n for k in range(2*u.shape[1]):\n U[2*k,:] = [u[k,0], u[k,1], 1, 0, 0, 0, -v[k, 0]*u[k, 0], -v[k, 0]*u[k, 1]]\n U[2*k+1,:] = [0, 0, 0, u[k,0], u[k,1], 1, -v[k, 1]*u[k, 0], -v[k, 1]*u[k, 1]]\n\n V = []\n for k in range(2*v.shape[1]):\n V.append(v[k,0])\n V.append(v[k,1])\n\n V = np.array(V)\n\n h = np.matmul(np.linalg.inv(np.matmul(U.T, U)), np.matmul(U.T, V))\n\n H = np.array([[h[0], h[1], h[2]],\n [h[3], h[4], h[5]],\n [h[6], h[7], 1]])\n return H", "title": "" }, { "docid": "3a22c210e94f7c1e18e05edc62f4f9aa", "score": "0.579553", "text": "def axial_to_cube(h):\n q, r = h\n return q, -q - r, r", "title": "" }, { "docid": "b0a0fa9b4739f32b2b2bdb2f3954a60e", "score": "0.57834774", "text": "def sh(val):\n return val*1.0/height", "title": "" }, { "docid": "9d807fb1f94a13903ef64895a9d1a1d3", "score": "0.57762897", "text": "def vSH2v_allT(self, vSH_t, l_max=None, Nth=None):\n if Nth is None and l_max is None:\n l_max = self._get_lmax(vSH_t[0])\n Nth = l_max * 2 + 2\n lm = l_max\n elif Nth is None:\n lm = l_max\n Nth = lm*2+2\n else:\n lm = (Nth-2)/2\n v_t = _np.empty((len(vSH_t), Nth, Nth*2))\n for i,vSH in enumerate(vSH_t):\n SH = self._convert_SHin(vSH, l_max=l_max)\n v_t[i,:,:] = _sht.shtools.MakeGridDH(SH, norm=1, sampling=2, csphase=1, lmax_calc=l_max, lmax=lm)\n return v_t", "title": "" }, { "docid": "952adb80295f01336876b77039daea7a", "score": "0.5763026", "text": "def hsv_standardization(H, S, V):\n return int(H)*2, int(S)/255, int(V)/255", "title": "" }, { "docid": "a7a08bc3de5c27847eda41a181bc1591", "score": "0.57597345", "text": "def get_zw(zeta, h, vgrid):\n\n ti = zeta.shape[0]\n zw = np.empty((ti, vgrid.Np) + h.shape, 'd')\n if vgrid.Vtrans == 1:\n for n in range(ti):\n for k in range(vgrid.Np):\n z0 = vgrid.hc * vgrid.s_w[k] + (h - vgrid.hc) * vgrid.Cs_w[k]\n zw[n, k, :] = z0 + zeta[n, :] * (1.0 + z0 / h)\n elif vgrid.Vtrans == 2 or vgrid.Vtrans == 4 or vgrid.Vtrans == 5:\n for n in range(ti):\n for k in range(vgrid.Np):\n z0 = (vgrid.hc * vgrid.s_w[k] + h * vgrid.Cs_w[k]) / (vgrid.hc + h)\n zw[n, k, :] = zeta[n, :] + (zeta[n, :] + h) * z0\n\n return zw", "title": "" }, { "docid": "ae63930de38f6f416037ef1be670aa61", "score": "0.57507485", "text": "def _uvh_to_llh(self, index):\n longitude = (lerp(self._west, self._east, old_div(float(self.u[index]), MAX)))\n latitude = (lerp(self._south, self._north, old_div(float(self.v[index]), MAX)))\n height = self._dequantize_height(self.h[index])\n return longitude, latitude, height", "title": "" }, { "docid": "994ce61a1a42015f7aa9094c5f380c7c", "score": "0.5696659", "text": "def hsv_to_hsl(cls, hsv):\n h, s, v = hsv\n\n s = float(s)/100\n v = float(v)/100\n l = 0.5 * v * (2 - s) # noqa: E741\n\n if l in [0, 1]:\n s = 0\n else:\n s = v * s / (1 - math.fabs(2 * l - 1))\n return (int(c) for c in [h, s * 100, l * 100])", "title": "" }, { "docid": "ce79db5c78fcdf2776cd49440090c6e3", "score": "0.56956947", "text": "def gs_rb_step(v,f,h2):\n \n u = v.copy()\n res = np.empty_like(v)\n \n res[1:-1:2,1:-1:2]=(u[0:-2:2,1:-1:2]+u[2: :2,1:-1:2]+\n u[1:-1:2,0:-2:2]+u[1:-1:2,2: :2]-\n 4*u[1:-1:2,1:-1:2])/h2 +\\\n u[1:-1:2,1:-1:2]**2-f[1:-1:2,1:-1:2]\n \n \n u[1:-1:2,1:-1:2]-=res[1:-1:2,1:-1:2]/(\n -4.0/h2+2*u[1:-1:2,1:-1:2])\n \n res[2:-2:2,2:-2:2]=(u[1:-3:2,2:-2:2]+u[3:-1:2,2:-2:2]+\n u[2:-2:2,1:-3:2]+u[2:-2:2,3:-1:2]-\n 4*u[2:-2:2,2:-2:2])/h2 +\\\n u[2:-2:2,2:-2:2]**2-f[2:-2:2,2:-2:2]\n \n u[2:-2:2,2:-2:2]-=res[2:-2:2,2:-2:2]/(\n -4.0/h2+2*u[2:-2:2,2:-2:2])\n \n res[2:-2:2,1:-1:2]=(u[1:-3:2,1:-1:2]+u[3:-1:2,1:-1:2]+\n u[2:-2:2,0:-2:2]+u[2:-2:2,2: :2]-\n 4*u[2:-2:2,1:-1:2])/h2 +\\\n u[2:-2:2,1:-1:2]**2-f[2:-2:2,1:-1:2]\n u[2:-2:2,1:-1:2]-=res[2:-2:2,1:-1:2]/(\n -4.0/h2+2*u[2:-2:2,1:-1:2])\n res[1:-1:2,2:-2:2]=(u[0:-2:2,2:-2:2]+u[2: :2,2:-2:2]+\n u[1:-1:2,1:-3:2]+u[1:-1:2,3:-1:2]-\n 4*u[1:-1:2,2:-2:2])/h2 +\\\n u[1:-1:2,2:-2:2]**2-f[1:-1:2,2:-2:2]\n u[1:-1:2,2:-2:2]-=res[1:-1:2,2:-2:2]/(\n -4.0/h2+2*u[1:-1:2,2:-2:2])\n return u", "title": "" }, { "docid": "1753008cb2042eb54d4096226add0d87", "score": "0.56858146", "text": "def filter_hsv_to_h(hsv):\n h = hsv[:, :, 0]\n h = h.flatten()\n h *= 360\n h = h.astype(np.uint8)\n return h", "title": "" }, { "docid": "50efdfa01f09e12c1e807760c78b28ca", "score": "0.5645766", "text": "def h4(bigr, vbar, vstar, sigma, z, s):\n\n return (np.log((bigr * (vbar / z))) + (vstar + sigma ** 2) * s) / (sigma * np.sqrt(s))", "title": "" }, { "docid": "42747dd99b6f978560dba2a71d97c881", "score": "0.56292456", "text": "def H(state_vector: np.ndarray) -> np.ndarray:\n px, py, vx, vy = state_vector\n rho = np.sqrt(np.square(px) + np.square(py))\n phi = np.arctan2(py, px)\n rho_dot = (px * vx + py * vy) / rho\n\n return np.array([rho, phi, rho_dot])", "title": "" }, { "docid": "68f54e55ae8c10fd955c89750602d5df", "score": "0.56262845", "text": "def NN_H(deng_x_at_x0, deng_x_at_xnew, s, inv_H):\n\n direction = -np.dot(inv_H, deng_x_at_x0.flatten())\n return direction.reshape(-1, 3), inv_H", "title": "" }, { "docid": "197855c7c792241bd72b1ddde7ae2742", "score": "0.5618397", "text": "def make_grid_hermitian(guv):\n\n # Make mirror image, then add its conjugate to the original grid.\n # This is not the same concept as hermitian matrices, as:\n # 1) Not the same symetry as transposition\n # 2) We mirror on the zero point, which is off-center if the grid\n # has even size\n if guv.shape[0] % 2 == 0:\n guv[1:,1:] += numpy.conj(guv[:0:-1,:0:-1])\n # Note that we ignore the high frequencies here\n else:\n guv += numpy.conj(guv[::-1,::-1])\n return guv", "title": "" }, { "docid": "02a3575a67ab30557864bf792c5b006a", "score": "0.56180036", "text": "def P_RVH2HRV():\n\treturn np.array([[0, 1, 0],\n\t\t\t\t\t [0, 0, 1],\n\t\t\t\t\t [1, 0, 0]])", "title": "" }, { "docid": "ae20619f4f777869c84abac4fdd49890", "score": "0.56104875", "text": "def sph_convert(v):\n x2 = ek.pow(v.x, 2)\n y2 = ek.pow(v.y, 2)\n z2 = ek.pow(v.z, 2)\n\n r = ek.sqrt(x2+y2+z2)\n phi = ek.atan2(v.y, v.x)\n theta = ek.atan2(ek.sqrt(x2+y2), v.z)\n\n return r, theta, phi", "title": "" }, { "docid": "d369947880f0b2f21106f71186a72472", "score": "0.5610251", "text": "def warp_image(image, H):\n\n H_inv = np.linalg.inv(H) \n H_inv = H_inv / H_inv[2,2]\n # u == x\n # v == y\n \n\n orig_u_range = np.arange(image.shape[1])\n orig_v_range = np.arange(image.shape[0])\n\n _, transformed_image, = transform_grid(orig_u_range, orig_v_range, H)\n \n min_u=int(np.min(transformed_image[:,0]))\n max_u=int(np.max(transformed_image[:,0]))\n min_v=int(np.min(transformed_image[:,1]))\n max_v=int(np.max(transformed_image[:,1]))\n\n mapped_u_range = np.arange(min_u, max_u)\n mapped_v_range = np.arange(min_v, max_v)\n \n \n\n target_image = np.zeros((max_v-min_v, max_u-min_u,3))\n\n\n transformed_points, inv_transformed_image = transform_grid(mapped_u_range, mapped_v_range, H_inv)\n\n def fill_channel(target, channel, batch_size=64):\n I_cont = RectBivariateSpline(orig_v_range, orig_u_range, image[:,:,channel])\n\n n_iters =int( len(inv_transformed_image) / batch_size )\n \n for i in range(n_iters + 1):\n start = i * batch_size\n end = (i+1) * batch_size\n \n mapped_u_batch = inv_transformed_image[start:end, 0].ravel()\n mapped_v_batch = inv_transformed_image[start:end, 1].ravel()\n \n u_batch = transformed_points[start:end, 0].ravel()\n v_batch = transformed_points[start:end, 1].ravel()\n\n target[v_batch-min_v, u_batch-min_u, channel] = I_cont(mapped_v_batch, mapped_u_batch, grid=False)\n\n fill_channel(target_image, 0)\n fill_channel(target_image, 1)\n fill_channel(target_image, 2)\n\n return target_image, min_u, min_v", "title": "" }, { "docid": "ed1edefb1fd2bd49e0e0cad57e41812d", "score": "0.55999047", "text": "def vh(self):\n return self.veto[:, 1]", "title": "" }, { "docid": "ed1edefb1fd2bd49e0e0cad57e41812d", "score": "0.55999047", "text": "def vh(self):\n return self.veto[:, 1]", "title": "" }, { "docid": "ab3fb1b1baf448ffd2cdcb47a3ea6e78", "score": "0.55882305", "text": "def _buildS(vs, dims, s, h): \n if (len(dims) == 1): return [vs[0][0]]\n res = []\n ctr = 0\n for i in range(len(dims)):\n for j in range(i + 1, len(dims)):\n res.append(s[i]*h[i]*vs[ctr][0] + s[j]*h[j]*vs[ctr][1])\n ctr += 1\n return res", "title": "" }, { "docid": "70bcdc6af81768c791f44ff44edbee6b", "score": "0.55776536", "text": "def csHvp(obj,x,v,eps=1e-20, test = False):\n y, cg = obj(x+eps*1.0j*v)\n if test:\n dy = np.imag(y)/eps\n g = np.real(cg)\n assert abs(g@v - dy) < np.sqrt( eps * (g@g) * (v@v) )\n return np.imag(cg)/eps", "title": "" }, { "docid": "f0aa08b2d5cd638ff7d5b3f841429245", "score": "0.55746555", "text": "def funcGH(p, x):\n A, xo, s, h3, h4, zerolev= p\n \n return gausshermiteh3h4(x, A, xo, s, h3, h4) + zerolev", "title": "" }, { "docid": "8731c2c6fd2cc28a02531fc44b17a9b8", "score": "0.55685675", "text": "def uv2ws(u,v):\n ws=np.sqrt(u*u+v*v)\n return(ws)", "title": "" }, { "docid": "54fa3e08092c6486678fe9e1d34e3af1", "score": "0.5559359", "text": "def kep_h_norm(x, y, z, u, v, w):\n return np.sqrt( (y*w-z*v)**2 + (z*u-x*w)**2 + (x*v-y*u)**2 )", "title": "" }, { "docid": "cb4ccbafd6df9a773453634049d909a9", "score": "0.5554707", "text": "def convgridone(a, pi, fi, gcf, v):\n \tsx, sy= gcf[0][0].shape[0]/2, gcf[0][0].shape[1]/2", "title": "" }, { "docid": "5815a7e24e8ef8399f372e2dfe0b15bc", "score": "0.55542046", "text": "def v(i, j, d):\n return 81 * (i - 1) + 9 * (j - 1) + d", "title": "" }, { "docid": "5815a7e24e8ef8399f372e2dfe0b15bc", "score": "0.55542046", "text": "def v(i, j, d):\n return 81 * (i - 1) + 9 * (j - 1) + d", "title": "" }, { "docid": "5815a7e24e8ef8399f372e2dfe0b15bc", "score": "0.55542046", "text": "def v(i, j, d):\n return 81 * (i - 1) + 9 * (j - 1) + d", "title": "" }, { "docid": "6ac00c2a0bb33480150c213b2e4a5d1a", "score": "0.554963", "text": "def hess(self, x, z):\r\n return self.H", "title": "" }, { "docid": "0ba52505506db36e8e17afba57756f35", "score": "0.55351645", "text": "def get_U(h):\n return m*g*h", "title": "" }, { "docid": "9608747fdcabc6fedbb4813705b18b63", "score": "0.55350894", "text": "def H_ss(self,i,j):\n if i==0 and j==0:\n assert(self.np>0)\n return self.H[0:self.np, 0:self.np]\n elif i==1 and j==0:\n assert(self.np>0)\n assert(self.nq>0)\n return self.H[self.np:self.np+self.nq , 0:self.np]\n elif i==0 and j==1:\n assert(self.np>0)\n assert(self.nq>0)\n return self.H[0:self.np , self.np:self.np+self.nq]\n elif i==1 and j==1:\n assert(self.nq>0)\n return self.H[self.np:self.np+self.np+self.nq, self.np:self.np+self.nq]", "title": "" }, { "docid": "4f7d7398e31a95f4131dc404fc928f35", "score": "0.55130386", "text": "def get_h2d(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):\n k = 2.0 * np.pi / l\n\n x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx\n x = np.dot(np.ones((nx, 1)), x_vec)\n y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy\n y = y_vec * np.ones((1, ny))\n \n #k = 2.0 * np.pi / l\n return np.exp(1j * k * z) / (1j * l * z) * np.exp((1j * k / (2 * z)) * \n (np.power(x, 2) + np.power(y, 2)))", "title": "" }, { "docid": "a6b61b8a4d467f7fd58dcd3006799893", "score": "0.5499626", "text": "def toHSL(self):\r\n L = (2 - self.S) * self.V / 2\r\n if L != 0:\r\n if L == 1:\r\n s = 0\r\n elif L < 0.5:\r\n S = self.S * self.V / (L * 2)\r\n else:\r\n S = self.S * self.V / (2 - L * 2)\r\n\r\n return colour([self.H, S, L])", "title": "" }, { "docid": "b3b0fe07026cf8635c4728affc8d3d47", "score": "0.549856", "text": "def zprimeuvcoordmap(u, v):\n return int(n * (n + 1) + (v * (v + 1)) / 2 + u)", "title": "" }, { "docid": "1f2637017079624c684f326b5fd53bdf", "score": "0.54801065", "text": "def _update_H(self):\n numerator = np.abs(self.W.T @ (self.S - self.X)) - (self.W.T @ (self.S - self.X))\n denominator = 2 * self.W.T @ self.W @ self.H\n self.H = self.H * numerator / denominator", "title": "" }, { "docid": "9b84218c16ff1063ffed33f7010b7403", "score": "0.54800856", "text": "def depth(vtransform=1, h=None, hc=100, scoord=None,\n stretching=None, zeta=0, w_grid=False):\n if h is None or scoord is None or stretching is None:\n raise AttributeError(\"you must supply h, scoord, and stretching\")\n if scoord.size != stretching.size:\n raise ValueError(\n \"the stretching and scoord arrays must be the same size\")\n N = scoord.size\n hinv = 1 / h\n h = np.asanyarray(h)\n wk = 0\n r = range(N)\n if w_grid:\n N = N + 1\n wk = 1\n z = np.zeros(np.hstack((N, h.shape)))\n\n if vtransform == 1:\n cff = hc * (scoord - stretching)\n for k in r:\n z0 = cff[k] + stretching[k] * h\n z[k + wk, :] = z0 + zeta * (1.0 + z0 * hinv)\n elif vtransform == 2:\n cff = 1 / (hc + h)\n for k in r:\n cff1 = hc * scoord[k] + h * stretching[k]\n z[k + wk, :] = zeta + (zeta + h) * cff * cff1\n else:\n raise ValueError(\"transform value must be between 1 and 2\")\n if w_grid:\n z[0, :] = -h\n\n return z", "title": "" }, { "docid": "9b84218c16ff1063ffed33f7010b7403", "score": "0.54800856", "text": "def depth(vtransform=1, h=None, hc=100, scoord=None,\n stretching=None, zeta=0, w_grid=False):\n if h is None or scoord is None or stretching is None:\n raise AttributeError(\"you must supply h, scoord, and stretching\")\n if scoord.size != stretching.size:\n raise ValueError(\n \"the stretching and scoord arrays must be the same size\")\n N = scoord.size\n hinv = 1 / h\n h = np.asanyarray(h)\n wk = 0\n r = range(N)\n if w_grid:\n N = N + 1\n wk = 1\n z = np.zeros(np.hstack((N, h.shape)))\n\n if vtransform == 1:\n cff = hc * (scoord - stretching)\n for k in r:\n z0 = cff[k] + stretching[k] * h\n z[k + wk, :] = z0 + zeta * (1.0 + z0 * hinv)\n elif vtransform == 2:\n cff = 1 / (hc + h)\n for k in r:\n cff1 = hc * scoord[k] + h * stretching[k]\n z[k + wk, :] = zeta + (zeta + h) * cff * cff1\n else:\n raise ValueError(\"transform value must be between 1 and 2\")\n if w_grid:\n z[0, :] = -h\n\n return z", "title": "" }, { "docid": "2f5a0b4558f42c11306196a6de4d9093", "score": "0.54766303", "text": "def get_h_given_v_dir(self,visible_minibatch):\n \n assert self.weight_v_to_h is not None\n\n n_samples = visible_minibatch.shape[0]\n\n # [TODO TASK 4.2] perform same computation as the function 'get_h_given_v' but with directed connections (replace the zeros below) \n \n return np.zeros((n_samples,self.ndim_hidden)), np.zeros((n_samples,self.ndim_hidden))", "title": "" }, { "docid": "46ac6d85ab50b463e2cdc8bf41288844", "score": "0.547306", "text": "def SV(P, hs, bw):\n\n sv = list()\n\n for h in hs:\n sv.append(SVh(P, h, bw))\n\n sv = [[hs[i], sv[i]] for i in range(len(hs)) if sv[i] > 0]\n\n return np.array(sv).T", "title": "" }, { "docid": "d2fbf1d6bf4e128283df23c90980b2e1", "score": "0.5471187", "text": "def gs_rb_step(v,f,h2):\n u=v.copy()\n \n res=np.empty_like(v)\n \n res[1:-1:2,1:-1:2]=(\n u[0:-2:2,1:-1:2] + u[2: :2,1:-1:2] + \n u[1:-1:2,0:-2:2] + u[1:-1:2,2: :2] - \n 4*u[1:-1:2,1:-1:2]\n ) / h2 + \\\n u[1:-1:2,1:-1:2]**2 - f[1:-1:2,1:-1:2]\n \n u[1:-1:2,1:-1:2] -= res[1:-1:2,1:-1:2] / (\n -4.0/h2+2*u[1:-1:2,1:-1:2])\n \n res[2:-2:2,2:-2:2] = (u[1:-3:2,2:-2:2] + u[3:-1:2,2:-2:2] + \n u[2:-2:2,1:-3:2]+u[2:-2:2,3:-1:2] - \n 4*u[2:-2:2,2:-2:2])/h2 +\\\n u[2:-2:2,2:-2:2]**2 - f[2:-2:2,2:-2:2]\n \n u[2:-2:2,2:-2:2] -= res[2:-2:2,2:-2:2] / (\n -4.0/h2+2*u[2:-2:2,2:-2:2])\n \n res[2:-2:2,1:-1:2]=(u[1:-3:2,1:-1:2]+u[3:-1:2,1:-1:2]+\n u[2:-2:2,0:-2:2]+u[2:-2:2,2: :2]-\n 4*u[2:-2:2,1:-1:2])/h2 +\\\n u[2:-2:2,1:-1:2]**2-f[2:-2:2,1:-1:2]\n \n u[2:-2:2,1:-1:2]-=res[2:-2:2,1:-1:2]/(\n -4.0/h2+2*u[2:-2:2,1:-1:2])\n \n res[1:-1:2,2:-2:2]=(u[0:-2:2,2:-2:2]+u[2: :2,2:-2:2]+\n u[1:-1:2,1:-3:2]+u[1:-1:2,3:-1:2]-\n 4*u[1:-1:2,2:-2:2])/h2 +\\\n u[1:-1:2,2:-2:2]**2-f[1:-1:2,2:-2:2]\n \n u[1:-1:2,2:-2:2]-=res[1:-1:2,2:-2:2]/(\n -4.0/h2+2*u[1:-1:2,2:-2:2])\n \n return u", "title": "" }, { "docid": "931576962a4eb1a163281beeddade6f8", "score": "0.5464731", "text": "def get_h_given_v_dir(self, visible_minibatch):\n assert self.weight_v_to_h is not None\n\n # [TODO TASK 4.2] perform same computation as the function 'get_h_given_v' but with directed connections (replace the zeros below)\n p_h = sigmoid(self.bias_h + visible_minibatch @ self.weight_v_to_h)\n h = sample_binary(p_h)\n return p_h, h", "title": "" }, { "docid": "ea2eb7151b8cc78079a773193c404f36", "score": "0.5452016", "text": "def scale(tl, br, vhc_tl):\n\n tl_h_new = tl[0] + vhc_tl[0]\n tl_w_new = tl[1] + vhc_tl[1]\n br_h_new = br[0] + vhc_tl[0]\n br_w_new = br[1] + vhc_tl[1]\n\n return (tl_h_new, tl_w_new), (br_h_new, br_w_new)", "title": "" }, { "docid": "b68580ce7e4f172e8aee77692f7ea15e", "score": "0.54490983", "text": "def toHSL(self):\r\n\r\n # Luv -> XYZ now:\r\n newXYZ = CIELuv.toCIEXYZ(self)\r\n # Luv -> XYZ completed\r\n\r\n # XYZ -> sRGB now:\r\n newsRGB = newXYZ.tosRGB()\r\n # XYZ -> sRGB completed\r\n\r\n # sRGB -> HSL now:\r\n newHSL = newsRGB.toHSL()\r\n # sRGB -> HSL completed\r\n\r\n return newHSL", "title": "" }, { "docid": "c6b33e88010cfbec87ef3543964d603d", "score": "0.54408807", "text": "def set_vector_B(data, h):\n\treturn [((data[i+2][1] - data[i + 1][1])/h[i+1]) - ((data[i + 1][1] - data[i][1]) / h[i]) for i in range(len(h)-1)]", "title": "" }, { "docid": "d8180e3737da91f2d566cf656d6a9d2c", "score": "0.54406184", "text": "def get_h2d_inv(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):\n k = 2.0 * np.pi / l\n\n x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx\n x = np.dot(np.ones((nx, 1)), x_vec)\n y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy\n y = y_vec * np.ones((1, ny))\n \n #k = 2.0 * np.pi / l\n return np.exp(-1j * k * z) / (1j * l * z) * np.exp((-1j * k / (2 * z)) * \n (np.power(x, 2) + np.power(y, 2)))", "title": "" }, { "docid": "8c470dc74925a2add1aba6f18722ad6e", "score": "0.543531", "text": "def gauss_seidel(h, k):\n\n m = int(1/h - 1) # The number of interior lattice points\n # in each direction\n U = np.zeros((m+2, m+2), dtype=float) # Initial solution (approx) array with\n # boundary conditions\n\n for k in range(1, k+1): # Gauss-Seidel method\n for j in range(1, m+1):\n for i in range(1, m+1):\n U[i,j] = 0.25*((U[i-1,j]+U[i+1,j]+U[i,j-1]+U[i,j+1])-h**2*f(i*h,j*h))\n\n return U", "title": "" }, { "docid": "79b8a881e0c179288b703b11f193cb56", "score": "0.54349786", "text": "def build_Hv(blocks,tb_l, tb_r,j12,v):\n # {{{\n\n n_blocks = len(blocks)\n assert(n_blocks == tb_l.n_blocks)\n assert(n_blocks == tb_r.n_blocks)\n H_dim_layout = [] # dimensions of Ham block as a tensor (d1,d2,..,d1',d2',...)\n H_dim_layout = np.append(tb_l.block_dims,tb_r.block_dims)\n \n \"\"\"\n form one-block and two-block terms of H separately\n 1-body\n\n for each block, form Hamiltonian in subspace, and combine\n with identity on other blocks\n\n 2-body\n \n for each block-dimer, form Hamiltonian in subspace, and combine\n with identity on other blocks\n \"\"\"\n # How many blocks are different between left and right?\n different = []\n for bi in range(0,n_blocks):\n if tb_l.address[bi] != tb_r.address[bi]:\n different.append(bi)\n #if len(different) > 2:\n # print \" Nothing to do, why are we here?\"\n # exit(-1)\n \n n_sig = v.shape[1] # number of sigma vectors \n\n Hv = np.zeros((tb_l.full_dim,n_sig))\n S2v = np.zeros((tb_l.full_dim,n_sig))\n\n\n # Add up all the one-body contributions, making sure that the results is properly dimensioned for the \n # target subspace\n\n if len(different) == 0:\n\n assert(tb_l.full_dim == tb_r.full_dim)\n full_dim = tb_l.full_dim\n #<abcd|H1+H2+H3+H4|abcd>\n #\n # <a|H1|a> Ib Ic Id\n # + Ia <b|H1|b> Ic Id + etc\n \n\n for bi in range(0,n_blocks):\n Bi = blocks[bi]\n dim_e = full_dim / tb_l.block_dims[bi] \n \n h1 = Bi.H_ss(tb_l.address[bi],tb_r.address[bi])\n s1 = Bi.S2_ss(tb_l.address[bi],tb_r.address[bi])\n\n #h = np.kron(h1,np.eye(dim_e)) \n h1.shape = (tb_l.block_dims[bi],tb_r.block_dims[bi])\n s1.shape = (tb_l.block_dims[bi],tb_r.block_dims[bi])\n\n \n #print \n #print tb_l, \" ||| \", tb_r\n # \n # restructure incoming trial vectors as a tensor\n v_ind = cp.deepcopy(tb_r.block_dims)\n v_ind.extend([n_sig])\n v_tens = v.reshape(v_ind)\n v_tens = v_tens.swapaxes(bi,0)\n\n h1v = np.tensordot(h1,v_tens,axes=(0,0) )\n s1v = np.tensordot(s1,v_tens,axes=(0,0) )\n h1v = h1v.swapaxes(bi,0)\n s1v = s1v.swapaxes(bi,0)\n \n Hv += h1v.reshape(tb_l.full_dim, n_sig)\n S2v += s1v.reshape(tb_l.full_dim, n_sig)\n \"\"\"\n tens_ind = []\n for bj in range(0,n_blocks):\n if bi!=bj:\n tens_ind.extend([bj])\n tens_ind.extend([bi])\n h1v = h1v.transpose(tens_ind)\n print h1v.shape\n \"\"\"\n \n\n # <ab|H12|ab> Ic Id\n # + <ac|H13|ac> Ib Id\n # + Ia <bc|H23|bc> Id + etc\n \n for bi in range(0,n_blocks):\n for bj in range(bi+1,n_blocks):\n Bi = blocks[bi]\n Bj = blocks[bj]\n dim_e = full_dim / tb_l.block_dims[bi] / tb_l.block_dims[bj]\n\n #build full Hamiltonian on sublattice\n h2,s2 = build_dimer_H(tb_l, tb_r, Bi, Bj, j12)\n h2.shape = (tb_l.block_dims[bi],tb_l.block_dims[bj],tb_r.block_dims[bi],tb_r.block_dims[bj])\n s2.shape = (tb_l.block_dims[bi],tb_l.block_dims[bj],tb_r.block_dims[bi],tb_r.block_dims[bj])\n\n # \n # restructure incoming trial vectors as a tensor\n # \n # <abcdef| h24 |abcdef> = <ce|h24|ce> I0 I1 I3 I4 I5\n #\n # v(0,1,2,3,4,5) => v(2,1,0,3,4,5) \n # => v(2,4,0,3,1,5) * h(2,4,2,4) = sig(2,4,0,3,1,5)\n # => sig(0,4,2,3,1,5)\n # => sig(0,1,2,3,4,5)\n #\n v_ind = cp.deepcopy(tb_r.block_dims)\n v_ind.extend([n_sig])\n v_tens = v.reshape(v_ind)\n \n sort_ind = [bi,bj]\n for bk in range(0,n_blocks+1):\n if bk != bi and bk != bj:\n sort_ind.extend([bk])\n v_tens = v_tens.transpose(sort_ind)\n \n sort_ind = np.argsort(sort_ind)\n\n h2v = np.tensordot(h2,v_tens,axes=([0,1],[0,1]) )\n s2v = np.tensordot(s2,v_tens,axes=([0,1],[0,1]) )\n \n h2v = h2v.transpose(sort_ind)\n s2v = s2v.transpose(sort_ind)\n\n Hv += h2v.reshape(tb_l.full_dim, n_sig)\n S2v += s2v.reshape(tb_l.full_dim, n_sig)\n \n \n \n elif len(different) == 1:\n\n full_dim_l = tb_l.full_dim\n full_dim_r = tb_r.full_dim\n #<abcd|H1+H2+H3+H4|abcd>\n #\n # <a|H1|a> Ib Ic Id , for block 1 being different\n\n\n bi = different[0] \n\n Bi = blocks[bi]\n dim_e_l = full_dim_l / tb_l.block_dims[bi] \n dim_e_r = full_dim_r / tb_r.block_dims[bi] \n h1 = Bi.H_ss(tb_l.address[bi],tb_r.address[bi])\n s1 = Bi.S2_ss(tb_l.address[bi],tb_r.address[bi])\n\n h1.shape = (tb_l.block_dims[bi],tb_r.block_dims[bi])\n s1.shape = (tb_l.block_dims[bi],tb_r.block_dims[bi])\n\n assert(dim_e_l == dim_e_r)\n dim_e = dim_e_l\n \n \n v_ind = cp.deepcopy(tb_r.block_dims)\n v_ind.extend([n_sig])\n v_tens = v.reshape(v_ind)\n \n sort_ind = [bi]\n for bk in range(0,n_blocks+1):\n if bk != bi:\n sort_ind.extend([bk])\n v_tens = v_tens.transpose(sort_ind)\n \n h1v = np.tensordot(h1,v_tens,axes=([1],[0]) )\n s1v = np.tensordot(s1,v_tens,axes=([1],[0]) )\n sort_ind = np.argsort(sort_ind)\n\n h1v = h1v.transpose(sort_ind)\n s1v = s1v.transpose(sort_ind)\n Hv += h1v.reshape(tb_l.full_dim, n_sig)\n S2v += s1v.reshape(tb_l.full_dim, n_sig)\n \n # <ab|H12|Ab> Ic Id\n # + <ac|H13|Ac> Ib Id\n # + <ad|H13|Ad> Ib Id\n \n for bj in range(0,bi):\n Bj = blocks[bj]\n dim_e_l = full_dim_l / tb_l.block_dims[bi] / tb_l.block_dims[bj]\n dim_e_r = full_dim_r / tb_r.block_dims[bi] / tb_r.block_dims[bj]\n \n assert(dim_e_l == dim_e_r)\n dim_e = dim_e_l\n \n #build full Hamiltonian on sublattice\n #h12 = build_dimer_H(tb_l, tb_r, Bi, Bj, j12)\n h2,s2 = build_dimer_H(tb_l, tb_r, Bj, Bi, j12)\n \n h2.shape = (tb_l.block_dims[bj],tb_l.block_dims[bi],tb_r.block_dims[bj],tb_r.block_dims[bi])\n s2.shape = (tb_l.block_dims[bj],tb_l.block_dims[bi],tb_r.block_dims[bj],tb_r.block_dims[bi])\n \n v_ind = cp.deepcopy(tb_r.block_dims)\n v_ind.extend([n_sig])\n v_tens = v.reshape(v_ind)\n \n sort_ind = [bj,bi]\n for bk in range(0,n_blocks+1):\n if bk != bi and bk != bj:\n sort_ind.extend([bk])\n v_tens = v_tens.transpose(sort_ind)\n \n h2v = np.tensordot(h2,v_tens,axes=([2,3],[0,1]) )\n s2v = np.tensordot(s2,v_tens,axes=([2,3],[0,1]) )\n\n sort_ind = np.argsort(sort_ind)\n \n h2v = h2v.transpose(sort_ind)\n s2v = s2v.transpose(sort_ind)\n\n Hv += h2v.reshape(tb_l.full_dim, n_sig)\n S2v += s2v.reshape(tb_l.full_dim, n_sig)\n \n for bj in range(bi+1, n_blocks):\n Bj = blocks[bj]\n dim_e_l = full_dim_l / tb_l.block_dims[bi] / tb_l.block_dims[bj]\n dim_e_r = full_dim_r / tb_r.block_dims[bi] / tb_r.block_dims[bj]\n \n assert(dim_e_l == dim_e_r)\n dim_e = dim_e_l\n \n #build full Hamiltonian on sublattice\n #h12 = build_dimer_H(tb_l, tb_r, Bi, Bj, j12)\n h2,s2 = build_dimer_H(tb_l, tb_r, Bi, Bj, j12)\n \n h2.shape = (tb_l.block_dims[bi],tb_l.block_dims[bj],tb_r.block_dims[bi],tb_r.block_dims[bj])\n s2.shape = (tb_l.block_dims[bi],tb_l.block_dims[bj],tb_r.block_dims[bi],tb_r.block_dims[bj])\n \n v_ind = cp.deepcopy(tb_r.block_dims)\n v_ind.extend([n_sig])\n v_tens = v.reshape(v_ind)\n \n sort_ind = [bi,bj]\n for bk in range(0,n_blocks+1):\n if bk != bi and bk != bj:\n sort_ind.extend([bk])\n v_tens = v_tens.transpose(sort_ind)\n \n h2v = np.tensordot(h2,v_tens,axes=([2,3],[0,1]) )\n s2v = np.tensordot(s2,v_tens,axes=([2,3],[0,1]) )\n\n sort_ind = np.argsort(sort_ind)\n \n h2v = h2v.transpose(sort_ind)\n s2v = s2v.transpose(sort_ind)\n\n Hv += h2v.reshape(tb_l.full_dim, n_sig)\n S2v += s2v.reshape(tb_l.full_dim, n_sig)\n \n elif len(different) == 2:\n \n full_dim_l = tb_l.full_dim\n full_dim_r = tb_r.full_dim\n #<abcd|H1+H2+H3+H4|abcd> = 0\n\n\n bi = different[0] \n bj = different[1] \n\n Bi = blocks[bi]\n Bj = blocks[bj]\n\n dim_e_l = full_dim_l / tb_l.block_dims[bi] / tb_l.block_dims[bj] \n dim_e_r = full_dim_r / tb_r.block_dims[bi] / tb_r.block_dims[bj] \n\n assert(dim_e_l == dim_e_r)\n dim_e = dim_e_l\n \n \n # <ac|H13|Ac> Ib Id for 1 3 different\n \n #build full Hamiltonian on sublattice\n #h12 = build_dimer_H(tb_l, tb_r, Bi, Bj, j12)\n h2,s2 = build_dimer_H(tb_l, tb_r, Bi, Bj, j12)\n \n h2.shape = (tb_l.block_dims[bi],tb_l.block_dims[bj],tb_r.block_dims[bi],tb_r.block_dims[bj])\n s2.shape = (tb_l.block_dims[bi],tb_l.block_dims[bj],tb_r.block_dims[bi],tb_r.block_dims[bj])\n \n v_ind = cp.deepcopy(tb_r.block_dims)\n v_ind.extend([n_sig])\n v_tens = v.reshape(v_ind)\n \n sort_ind = [bi,bj]\n for bk in range(0,n_blocks+1):\n if bk != bi and bk != bj:\n sort_ind.extend([bk])\n v_tens = v_tens.transpose(sort_ind)\n \n h2v = np.tensordot(h2,v_tens,axes=([2,3],[0,1]) )\n s2v = np.tensordot(s2,v_tens,axes=([2,3],[0,1]) )\n \n sort_ind = np.argsort(sort_ind)\n \n h2v = h2v.transpose(sort_ind)\n s2v = s2v.transpose(sort_ind)\n\n Hv += h2v.reshape(tb_l.full_dim, n_sig)\n S2v += s2v.reshape(tb_l.full_dim, n_sig)\n\n return Hv,S2v", "title": "" }, { "docid": "1bf5f2b68961338d49e55857f84e455b", "score": "0.54311264", "text": "def rp1_to_s1(v):\n x, y = v[0], v[1]\n\n return np.row_stack([\n 2*x*y / (x*x + y*y),\n (x*x - y*y) / (x*x + y*y)\n ])", "title": "" }, { "docid": "2e7e50509553eafae6b6ad31e54952db", "score": "0.54266065", "text": "def _buildS2(vs, dims, s, h): \n if (len(dims) == 1): return [vs[0][0]]\n res = []\n ctr = 0\n for i in range(len(dims)):\n for j in range(i + 1, len(dims)):\n res.append(s[i]*(1-2.0*h[i])*vs[ctr][0] + s[j]*(1-2.0*h[j])*vs[ctr][1])\n ctr += 1\n return res", "title": "" }, { "docid": "ce2c91fa9d62c0974ee65994f1744594", "score": "0.54170257", "text": "def gen_hcurve(size):\n\n if np.floor(np.log2(size)) != np.ceil(np.log2(size)):\n raise ValueError(\"Input must be a power of 2\")\n herp = np.zeros((size,size),dtype=np.int32)\n\n rx = 0\n ry = 0\n s = 0\n for y in range(size):\n for x in range(size):\n xloc = x # Avoid screwing up the loop counters\n yloc = y\n d = 0\n s = size//2\n while s > 0:\n rx = 1 if xloc & s > 0 else 0\n ry = 1 if yloc & s > 0 else 0\n d += s * s * ((3 * rx) ^ ry)\n (xloc,yloc) = rot(s, xloc, yloc, rx, ry);\n s //= 2\n herp[x,y] = d\n return herp", "title": "" }, { "docid": "7a8e7a1df1467969117f883247d81664", "score": "0.5415452", "text": "def gqhh_vect(self):\n self.addToInvariantsDict(self.raw_housing, None, \"gqhh_vect\", groupings=None)", "title": "" }, { "docid": "e0118a2bac4ca4921c360148971210b5", "score": "0.5408607", "text": "def despill_algorithm0():\n hd, sd, vd, hu, su, vu = 36, 23, 203, 55, 225, 255\n H_mid, S_mid, V_mid = (hd+hu)/2, (sd+su)/2, (vd+vu)/2\n\n my_hsv = np.ones((720, 1280, 3), np.uint8)*0\n my_hsv[:,:,0] = H_mid\n my_hsv[:,:,1] = S_mid\n my_hsv[:,:,2] = V_mid\n\n # opencv convert\n bgr_from_hsv_opencv = cv2.cvtColor(my_hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('bgr image convert from my_hsv use opencv', bgr_from_hsv_opencv)\n print('opencv convert:',\n bgr_from_hsv_opencv[0,0,0],\n bgr_from_hsv_opencv[0,0,1],\n bgr_from_hsv_opencv[0,0,2],\n )\n\n # my convert\n H, S, V = hsv_standardization(H_mid, S_mid, V_mid)\n R, G, B = hsv2rgb(H, S, V)\n print('my convert', B, G, R)\n\n bgr_from_hsv_mine = create_colorful_image(R, G, B)\n cv2.imshow('bgr image convert from my_hsv use mine', bgr_from_hsv_mine)\n cv2.waitKey(0)", "title": "" }, { "docid": "d79706d73a709d2f3a2abceff4cd9fc0", "score": "0.5395183", "text": "def hsv(h, s, v):\n c = _checkHSV(h, s, v)\n return hsvToRgb(c[0], c[1], c[2])", "title": "" }, { "docid": "be8b1c6c8b3d6774f6ca8b802d124d25", "score": "0.5391297", "text": "def P_HRV2RVH():\n\treturn np.array([[0, 0, 1],\n\t\t\t\t\t [1, 0, 0],\n\t\t\t\t\t [0, 1, 0]])", "title": "" }, { "docid": "543c90e9e2666449fe0d82f686ec4657", "score": "0.5387486", "text": "def calc_sxv(p):\r\n\t\r\n\ts = random.getrandbits(256)\r\n\tx = calc_x(s,p)\r\n\tv = calc_v(x)\r\n\treturn (s,v)", "title": "" }, { "docid": "873f328849f2f35e6eb73da962ccb370", "score": "0.53869236", "text": "def H (coordinates):\n q = coordinates[0,:]\n p = coordinates[1,:]\n return KeplerNd.K(p) + KeplerNd.V(q)", "title": "" }, { "docid": "ff6f3054212ac7b6e89144a6d6eb077c", "score": "0.5377635", "text": "def Upper_Half_Space(grid, dim, value):\n data = np.zeros(grid.pts_each_dim)\n for i in range(grid.dims):\n if i == dim:\n data += -grid.vs[i] + value\n return data", "title": "" }, { "docid": "54c4ac6cf7f6597e6bedb42070f4a08e", "score": "0.53759974", "text": "def P_GEO2HRV(gamma):\n\treturn P_GEO2RVH(gamma).dot(P_RVH2HRV())", "title": "" }, { "docid": "a0ac5f25c312945c57ab50b52b1fd7fe", "score": "0.5372884", "text": "def _h(self, v, u, rotation=0, *theta):\n UU = np.asarray([u])\n VV = np.asarray([v])\n\n h3 = UU ** theta[0]\n h4 = VV ** theta[1]\n\n h3Mask = (h3 > h4)\n h4Mask = (h3 < h4)\n\n uu = np.zeros(len(UU))\n uu[h3Mask] = UU[h3Mask] ** (1. - theta[0])\n uu[h4Mask] = (1. - theta[1]) * UU[h4Mask] * VV[h4Mask] ** (-theta[1])\n return uu", "title": "" }, { "docid": "de24bf74cb16c88949932bbb02cccb1a", "score": "0.53658694", "text": "def __call__(self, D):\n return unshear(D, self.g)", "title": "" }, { "docid": "8be8b2a4a00bb77a2cedcb16d8e74612", "score": "0.5363109", "text": "def sfera(h):\r\n def sfera0(point):\r\n u,v,z = point\r\n fx=z*SIN(u)*COS(v)\r\n fy=z*SIN(u)*SIN(v)\r\n fz=h*COS(u)\r\n return fx,fy,fz\r\n return sfera0", "title": "" }, { "docid": "8ba3ad3b7503f672f9c64a89f6c0c618", "score": "0.5359351", "text": "def P_GEO2RVH(gamma):\n\treturn np.array([[np.cos(gamma), -np.sin(gamma), 0],\n\t\t \t\t\t [np.sin(gamma), np.cos(gamma), 0],\n\t\t \t\t\t [\t\t\t0,\t\t\t 0, 1]])", "title": "" }, { "docid": "3fe3525fcb964d7ef7bff2f21392b16e", "score": "0.53569645", "text": "def hsv_to_rgb(h, s, v):\n c = s * v\n m = v - c\n dh = (h % 1.) * 6.\n fmodu = dh % 2.\n x = c * (1 - jnp.abs(fmodu - 1))\n hcat = jnp.floor(dh).astype(jnp.int32)\n rr = jnp.where(\n (hcat == 0) | (hcat == 5), c, jnp.where(\n (hcat == 1) | (hcat == 4), x, 0)) + m\n gg = jnp.where(\n (hcat == 1) | (hcat == 2), c, jnp.where(\n (hcat == 0) | (hcat == 3), x, 0)) + m\n bb = jnp.where(\n (hcat == 3) | (hcat == 4), c, jnp.where(\n (hcat == 2) | (hcat == 5), x, 0)) + m\n return rr, gg, bb", "title": "" }, { "docid": "d497bd4a58dda49b4f2452dc3016d6b3", "score": "0.5355568", "text": "def g(v, y, d):\n return ((v, i / d) for i in range(y))", "title": "" }, { "docid": "b7c5ce44dd0d32c7fd393c3b438a690a", "score": "0.53553164", "text": "def tp2v(self, torSH, polSH, l_max=14):\n vtht, vpht = self.t2v(torSH, l_max=l_max)\n vthp, vphp = self.p2v(polSH, l_max=l_max)\n vth = vtht + vthp\n vph = vpht + vphp\n return vth, vph", "title": "" }, { "docid": "85f8658974302914dad368add7898be6", "score": "0.53530073", "text": "def h_proj1(state, fline, walls):\n global g_fline, g_walls\n ((x,y),(u,v)) = state\n \n # if there are no walls between state and finish, use h_esdist\n if edistw_to_finish((x,y), fline, walls) != infinity:\n return h_esdist(state, fline, walls)\n # update cache if necessary\n if fline != g_fline or walls != g_walls or grid == []:\n edist_grid(fline, walls)\n hval = float(grid[x][y])\n \n # add a small penalty to favor short stopping distances\n au = abs(u); av = abs(v); \n sdu = au*(au-1)/2.0\n sdv = av*(av-1)/2.0\n sd = max(sdu,sdv)\n penalty = sd/10.0\n\n # compute location after fastest stop, and add a penalty if it goes through a wall\n if u < 0: sdu = -sdu\n if v < 0: sdv = -sdv\n sx = x + sdu\n sy = y + sdv\n if rt.crash([(x,y),(sx,sy)],walls):\n penalty += math.sqrt(au**2 + av**2)\n hval = max(hval+penalty,sd)\n return hval*3", "title": "" }, { "docid": "436b044d0af399374927f3611225175e", "score": "0.53484124", "text": "def hd(x,y):\n return hw(x ^ y)", "title": "" }, { "docid": "bbead06fa7844151b9301ea88d15c5ff", "score": "0.53323394", "text": "def h(x):\n temp = np.zeros((x, x))\n temp[:(x - 1), :(x - 1)] = (np.sqrt(2.0 / (x * (x - 1))) *\n np.identity(x - 1))\n temp[x - 1, x - 1] = np.sqrt(2.0 / (x * (x - 1))) * (1 - x)\n return temp", "title": "" }, { "docid": "4b21b2a8f28eebf78282cb5dda46036a", "score": "0.53268033", "text": "def vsh(s, m, n, theta, phi, kr=0.05152015*(0.99993)):\r\n\r\n # Convert variable into numpy array\r\n if not isinstance(theta, np.ndarray):\r\n theta = np.array([theta])\r\n if not isinstance(phi, np.ndarray):\r\n phi = np.array([phi])\r\n\r\n # Dirty method to avoid dividing by 0 errors\r\n theta = theta + 0.0000001\r\n phi = phi + 0.00000001\r\n\r\n \t# Keeps last coord term from exceeding pi or 2pi boundaries\r\n if len(theta) > 1:\r\n theta[-1] = theta[-1] - 0.0000001 * 2\r\n if len(phi) > 1:\r\n phi[-1] = phi[-1] - 0.0000001 * 2\r\n\r\n # Calculate the first term (note the fudge factor kr)\r\n A = np.power(-1, (1 - s)) * np.power(1j, n - s)/kr #* np.exp(-1j * kr) / kr\r\n B = (2 * n + 1) / (4 * np.pi * n * (n + 1))\r\n C = np.math.factorial(n - m) / np.math.factorial(n + m)\r\n\r\n # Calculate the legendre values\r\n Pmn = scipy.special.lpmv(m, n, np.cos(theta))\r\n Pmn_d1 = np.sqrt(1 - np.power(np.cos(theta), 2)) * scipy.special.lpmv(m + 1, n, np.cos(theta))\r\n Pmn_d2 = m * np.cos(theta) * Pmn\r\n Pmn_d3 = np.power(np.cos(theta), 2) - 1\r\n Pmn_d = (Pmn_d1 + Pmn_d2) / Pmn_d3\r\n\r\n # Calculate the middle term\r\n D = ((1j * m) / np.sin(theta)) * Pmn\r\n E = np.sin(theta) * Pmn_d\r\n\r\n # Depending on the mode a certain combination of the inner term is used\r\n if s == 0:\r\n phase_corr = 1j\r\n vsh = A * np.sqrt(B * C) * np.exp(1j * m * phi)\r\n vsh_theta = np.power(-1,np.abs(m))*-phase_corr * vsh * D\r\n vsh_phi = np.power(-1,np.abs(m))*-phase_corr * vsh * E\r\n else:\r\n phase_corr = 1j\r\n vsh = A * np.sqrt(B * C) * np.exp(1j * m * phi)\r\n vsh_theta = np.power(-1, np.abs(m))*-1j * vsh * E\r\n vsh_phi = -1j * vsh * D\r\n\r\n return vsh_theta, vsh_phi", "title": "" }, { "docid": "c23b52a3d6bd0800c29a3d469473beb1", "score": "0.53236526", "text": "def H4(vec_x, h, k1, k2, k3, beta=1., g_inv=0.):\n b2 = beta*beta\n x = vec_x[0]\n px = vec_x[1]\n y = vec_x[2]\n py = vec_x[3]\n ps = vec_x[5]\n x2 = x*x\n y2 = y*y\n px2 = px*px\n py2 = py*py\n ps2 = ps*ps\n x1 = (px*(2.*ps2 - 2.*h*ps*x*beta + (px2 + py2)*b2))/(2.*b2)\n px1 = (-(3.*h*k2 + k3)*x2*x + 3.*(2.*h*k2 + k3)*x*y2 + (3.*h*ps*(px2 + py2))/beta)/6.\n y1 = py*(2.*ps2 - 2.*h*ps*x*beta + (px2 + py2)*b2)/(2.*b2)\n py1 = y*(3.*(2.*h*k2 + k3)*x2 + (h**2*k1 - h*k2 - k3)*y2)/6.\n sigma1 = ps*(px2 + py2)/(2.*b2) - (px2 + py2)*(h*x - ps/beta)/(2.*beta) + g_inv*g_inv*ps/b2\n return array([x1, px1, y1, py1, sigma1, 0.])", "title": "" }, { "docid": "f124a21a52986d870ea4fbffbfb87054", "score": "0.532218", "text": "def sphfn(vnu):\n \n # Code adapted Anna's f90 PROFILE (gridder.f90) code\n # which was adapted from Tim Cornwell's C++ SphFuncVisGridder\n # developed for CONRAD for ASKAP. **This seems to be commented\n # out of the currect ASKAPsoft code... not sure why**\n #\n # Stole this back from Anna!\n n_p = 4\n n_q = 2\n \n p = numpy.zeros((2, 5))\n q = numpy.zeros((2, 3))\n \n p[0, 0] = 8.203343e-2\n p[0, 1] = -3.644705e-1\n p[0, 2] = 6.278660e-1\n p[0, 3] = -5.335581e-1\n p[0, 4] = 2.312756e-1\n p[1, 0] = 4.028559e-3\n p[1, 1] = -3.697768e-2\n p[1, 2] = 1.021332e-1\n p[1, 3] = -1.201436e-1\n p[1, 4] = 6.412774e-2\n \n q[0, 0] = 1.0000000\n q[0, 1] = 8.212018e-1\n q[0, 2] = 2.078043e-1\n q[1, 0] = 1.0000000\n q[1, 1] = 9.599102e-1\n q[1, 2] = 2.918724e-1\n \n value = 0.\n \n if (vnu >= 0.) and (vnu < 0.75):\n part = 0\n nuend = 0.75\n elif (vnu >= 0.75) and (vnu <= 1.):\n part = 1\n nuend = 1.0\n else:\n value = 0.\n # nasty fortran-esque exit statement:\n return value\n \n top = p[part, 0]\n bot = q[part, 0]\n delnusq = vnu ** 2 - nuend ** 2\n \n for k in range(1, n_p + 1):\n factor = delnusq ** k\n top += p[part, k] * factor\n \n for k in range(1, n_q + 1):\n factor = delnusq ** k\n bot += q[part, k] * factor\n \n if bot != 0.:\n value = top / bot\n else:\n value = 0.\n \n if value < 0.: value = 0.\n \n return value", "title": "" }, { "docid": "7a197a20469ecd2388e339fa5b82f4ad", "score": "0.5310874", "text": "def h_esdist(state, fline, walls):\n ((x,y),(u,v)) = state\n ((x1,y1),(x2,y2)) = fline\n if ((x==x1 and y==y1)or (x==x2 and y==y2)) and u == 0 and v == 0:\n return 0\n m = math.sqrt(u**2 + v**2)\n stop_dist = m*(m-1)/2.0 + 1\n return max(h_edist(state, fline, walls)+stop_dist/10.0,stop_dist)*3", "title": "" }, { "docid": "65f345da9385cfa594b262dbff5ae5cc", "score": "0.53098017", "text": "def qkhfs( w, h ):\n g = 9.81\n x = w**2.0 *h/g\n y = sqrt(x) * (x<1.) + x *(x>=1.)\n # is this faster than a loop?\n t = tanh( y )\n y = y-( (y*t -x)/(t+y*(1.0-t**2.0)))\n t = tanh( y )\n y = y-( (y*t -x)/(t+y*(1.0-t**2.0)))\n t = tanh( y )\n y = y-( (y*t -x)/(t+y*(1.0-t**2.0)))\n kh = y\n return kh", "title": "" }, { "docid": "a61b575f64f43e000632f22c75955899", "score": "0.53097284", "text": "def h(self, s, sa):\n dx = abs(s.x - sa.x)\n dy = abs(s.y - sa.y)\n return self.STRAIGHT_COST * (dx + dy) + (self.DIAGONAL_COST - 2 * self.STRAIGHT_COST) * min(dx, dy)", "title": "" }, { "docid": "e6abb48a9314c747eb033a2442afd98d", "score": "0.5302184", "text": "def hsv_to_rgb(h, s, v):\n if s == 0.0:\n return v, v, v\n i = int(h * 6.0)\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n i = i % 6\n\n v = int(v * 255)\n t = int(t * 255)\n p = int(p * 255)\n q = int(q * 255)\n\n if i == 0:\n return v, t, p\n if i == 1:\n return q, v, p\n if i == 2:\n return p, v, t\n if i == 3:\n return p, q, v\n if i == 4:\n return t, p, v\n if i == 5:\n return v, p, q", "title": "" }, { "docid": "279b890607f3e3469386490522737c9f", "score": "0.53008187", "text": "def snaptogrid(x, y, D):\n\n xgrid = np.arange(np.ceil(x[0]), np.floor(x[-1]), 1 / D)\n ygrid = np.zeros_like(xgrid)\n lastinrange = 0\n for i in range(x.size - 1):\n ycurr = y[i : (i + 2)]\n a = [[1, x[i]], [1, x[i + 1]]]\n ainv = np.linalg.inv(a)\n coeffs = np.matmul(ainv, ycurr)\n while xgrid[lastinrange] < x[i + 1]:\n ygrid[lastinrange] = coeffs[0] + coeffs[1] * xgrid[lastinrange]\n lastinrange += 1\n if lastinrange >= xgrid.size - 1:\n break\n if lastinrange >= xgrid.size - 1:\n break\n return xgrid, ygrid", "title": "" }, { "docid": "7effc185a1ffce3c3ee21a9a561a652d", "score": "0.53003985", "text": "def PW_S(self, z: float, horizontal: bool) -> float:\n ...", "title": "" }, { "docid": "7338e2b671dbfdf29b1f47e12897418e", "score": "0.5298363", "text": "def scale(v, s):\n\treturn Vektor(v[\"x\"]/v.lenght()*s, v[\"y\"]/v.lenght()*s)", "title": "" }, { "docid": "da693b182361b0b6fa3dd2f37f064113", "score": "0.52963024", "text": "def mse_times_horiz_divg(temp, hght, sphum, u, v, radius, dp):\n return field_times_horiz_divg(mse(temp, hght, sphum), u, v, radius, dp)", "title": "" }, { "docid": "6722922e5e84aa1992e40fd06374558d", "score": "0.5296009", "text": "def ssim(x, y):", "title": "" }, { "docid": "cd8199c30bde7a423491e147613ec9d2", "score": "0.52944094", "text": "def hsv2rgb(h,s,v):\n h = float(h)\n s = float(s)\n v = float(v)\n v *= 255.\n if s == 0:\n r = v\n g = v\n b = v\n else:\n if h == 360:\n h = 0\n elif h > 360:\n h -= 360\n elif h < 0:\n h += 360\n h /= 60.\n i = floor(h)\n f = h - i\n p = v * (1. - s)\n q = v * (1. - s * f)\n t = v * (1. - s * (1. - f))\n i = int(i)\n if i == 0:\n r = v\n g = t\n b = p\n elif i == 1:\n r = q\n g = v\n b = p\n elif i == 2:\n r = p\n g = v\n b = t\n elif i == 3:\n r = p\n g = q\n b = v\n elif i == 4:\n r = t\n g = p\n b = v\n elif i == 5:\n r = v\n g = p\n b = q\n r = round(r)\n g = round(g)\n b = round(b)\n # out\n return (int(r), int(g), int(b))", "title": "" }, { "docid": "2261a604250c408a435e4c33c74625e3", "score": "0.52926314", "text": "def VpH(pH=7):\n conc = np.exp(-pH*np.log(10)) # Concentration\n return st.V_N()*np.log(conc)", "title": "" }, { "docid": "ffe054dd5f82b50dc1289c5e12d4a9fa", "score": "0.5286481", "text": "def _init_h(self):\n alpha, gamma, beta = self.alpha, self.gamma, self.beta\n grid, grid_size = self.grid, self.grid_size\n\n h = np.empty(grid_size)\n\n for i, y in enumerate(grid):\n # == u'(G(y,z)) G(y,z) == #\n integrand = lambda z: (y**alpha * z)**(1 - gamma)\n h[i] = beta * self.integrate(integrand)\n\n return h", "title": "" } ]
6e0f3a9a52edcea73856d83f939359f1
Hit the CKAN REST API for an ISO 19139 XML representation of a package with data uploaded into the datastore.
[ { "docid": "6ecbc6fab3063de593ebbb7655738681", "score": "0.5573738", "text": "def get_record(context, repo, ckan_url, ckan_id, ckan_info):\n query = ckan_url + 'package_iso/object/%s'\n url = query % ckan_info['id']\n response = requests.get(url)\n try:\n xml = etree.parse(io.BytesIO(response.content))\n except Exception, err:\n log.error('Could not pass xml doc from %s, Error: %s' % (ckan_id, err))\n return\n try:\n record = metadata.parse_record(context, xml, repo)[0]\n except Exception, err:\n log.error('Could not extract metadata from %s, Error: %s' % (ckan_id, err))\n return\n return record", "title": "" } ]
[ { "docid": "983b2cf26fb95740b519f798a2f78874", "score": "0.5985983", "text": "def package(request, name):\n raw_data = api_data.data_cve_circl(name=name)\n fields_names = ['id', 'summary', 'cvss']\n extracted_data = api_data.extract_fields(raw_data, fields_names)\n\n return HttpResponse(json.dumps(extracted_data))", "title": "" }, { "docid": "900f6da1400c16e7442828b83f3b1a2a", "score": "0.53386295", "text": "def download_initial_xml():\r\n\r\n logging.info('Downloading initial XML as feed.xml')\r\n\r\n url = 'https://registers.esma.europa.eu/solr/esma_registers_firds_files/select?q=*&fq=publication_' \\\r\n 'date:%5B2021-01-17T00:00:00Z+TO+2021-01-19T23:59:59Z%5D&wt=xml&indent=true&start=0&rows=100'\r\n\r\n response = requests.get(url)\r\n\r\n with open('feed.xml', 'wb') as file:\r\n file.write(response.content)", "title": "" }, { "docid": "cf95a7f8fcee100950840c16c3534fa4", "score": "0.52594256", "text": "def post(self, dataset_id=None):\n if not dataset_id:\n self.error(400)\n return\n\n dataset = APIData.get_by_id(int(dataset_id))\n if not dataset:\n self.error(400)\n return\n\n if self.POST(\"status\"):\n if self.POST(\"status\") == \"PUBLISHED\":\n dataset_ckan = {}\n dataset_ckan[\"name\"] = dataset.additional_data[\"dataset_ckan_name\"].lower()\n dataset_ckan[\"owner_org\"] = self.POST(\"owner_org\")\n # dataset_ckan[\"metadata_created\"] = time.mktime(dataset.created_time.timetuple())\n # dataset_ckan[\"metadata_updated\"] = time.mktime(dataset.updated_time.timetuple())\n # dataset_ckan[\"is_open\"] = True\n dataset_ckan[\"extras\"] = []\n\n for key, value in dataset.additional_data.items():\n # key = key.replace(\"user_id\", \"creator_user_id\")\n key = key.replace(\"dataset_description\", \"notes\")\n key = key.replace(\"dataset_title\", \"title\")\n key = key.replace(\"maintainer_name\", \"maintainer\")\n key = key.replace(\"author_name\", \"author\")\n\n if key in [\"status\", \"id\", \"comment\", \"uacs_id\"]:\n continue\n\n if key == \"CKAN_ID\":\n key = key.replace(\"CKAN_ID\", \"id\")\n\n if key == \"type\":\n value = value.lower()\n\n if key in [\"temporal_date\", \"granularity\", \"frequency_update\"]:\n dataset_ckan[\"extras\"].append({\n \"key\": key.replace(\"_\", \" \").title(),\n \"value\": value\n })\n else:\n dataset_ckan[key] = value\n\n try:\n if dataset.additional_data[\"CKAN_ID\"]:\n url = \"http://api.data.gov.ph/catalogue/api/action/package_update\"\n else:\n url = \"http://api.data.gov.ph/catalogue/api/action/package_create\"\n except Exception, e:\n url = \"http://api.data.gov.ph/catalogue/api/action/package_create\"\n\n\n dataset_ckan = json.dumps(dataset_ckan)\n\n logging.info(dataset_ckan)\n dataset_ckan_string = urllib.quote(dataset_ckan)\n headers = {\"Authorization\": CKAN_API_KEY}\n content = urlfetch.fetch(\n url=url,\n method=urlfetch.POST,\n payload=dataset_ckan_string,\n headers=headers,\n deadline=15).content\n\n logging.info(content)\n try:\n content = json.loads(content)\n except Exception, e:\n logging.exception(e)\n return\n\n if not content[\"success\"]:\n if \"That URL is already in use.\" in content[\"error\"][\"name\"]:\n error = \"A dataset with name \"\n error += dataset_ckan[\"name\"]\n error += \" already exists in the CKAN API. Please choose a different name \"\n error += \" or delete the existing dataset.\"\n else:\n error = \"An error occured. The dataset was not published.\"\n # try:\n # if dataset.additional_data[\"CKAN_ID\"]:\n # url = \"http://api.data.gov.ph/catalogue/api/action/package_update\"\n # else:\n # url = \"http://api.data.gov.ph/catalogue/api/action/package_create\"\n # except Exception, e:\n # url = \"http://api.data.gov.ph/catalogue/api/action/package_create\"\n error_message(self, error)\n self.redirect(\"/dataset/\" + str(dataset.key.id()))\n return\n\n dataset.additional_data[\"CKAN_ID\"] = content[\"result\"][\"id\"]\n\n resource = dataset.get_all_resource()\n if resource:\n for r in resource:\n payload = {\n \"package_id\": content[\"result\"][\"id\"],\n \"url\": r.additional_data[\"dataset_data\"][\"file_url\"],\n \"name\": r.additional_data[\"file_name\"],\n \"description\": r.additional_data[\"file_description\"]\n }\n\n headers={\"X-CKAN-API-Key\": CKAN_API_KEY}\n # payload = {\"package_id\": \"26c4b1b3-51e6-4810-9a50-450cffd29711\"}\n resource = urlfetch.fetch(\n url='http://api.data.gov.ph/catalogue/api/action/resource_create',\n method=urlfetch.POST,\n payload=json.dumps(payload),\n headers=headers,\n deadline=15).content\n logging.info(resource)\n\n result = json.loads(resource)\n\n if not result[\"success\"]:\n return\n\n r.additional_data[\"CKAN_ID\"] = result[\"result\"][\"id\"]\n r.put()\n\n log = \"ODTF-Admin published \" + dataset.additional_data[\"dataset_title\"] + \" dataset.\"\n Logs.add_log(\n data={\"action\": log, \"icon\": \"check\", \"color\": \"info\" },\n user=self.user.key,\n uacs_id=dataset.additional_data[\"uacs_id\"],\n dataset=dataset.key)\n\n success = \"The dataset has been published.\"\n success_message(self, success)\n self.redirect(\"/dataset/\" + str(dataset.key.id()))\n elif self.POST(\"status\") == \"SENT BACK\":\n log = \"ODTF-Admin sent back \" + dataset.additional_data[\"dataset_title\"] + \" dataset.\"\n Logs.add_log(\n data={\"action\": log, \"icon\": \"reply\", \"color\": \"danger\" },\n user=self.user.key,\n uacs_id=dataset.additional_data[\"uacs_id\"],\n dataset=dataset.key)\n elif self.POST(\"status\") == \"FOR CLEAN UP\":\n log = \"ODTF-Admin cleaning up \" + dataset.additional_data[\"dataset_title\"] + \" dataset.\"\n Logs.add_log(\n data={\"action\": log, \"icon\": \"pencil\", \"color\": \"warning\" },\n user=self.user.key,\n uacs_id=dataset.additional_data[\"uacs_id\"],\n dataset=dataset.key)\n\n if self.POST(\"comment\"):\n dataset.additional_data[\"comment_for_sent_back\"] = self.POST(\"comment\").strip()\n try:\n dataset.additional_data[\"comment\"].append({\n \"comment\": self.POST(\"comment\").strip(),\n \"comment_date\": global_vars.datetime_now_adjusted.strftime(\"%b %d, %Y %I:%H:%S %p\"),\n \"comment_author\": \"ODTF-Admin\"\n })\n except Exception, e:\n dataset.additional_data[\"comment\"] = []\n dataset.additional_data[\"comment\"].append({\n \"comment\": self.POST(\"comment\").strip(),\n \"comment_date\": global_vars.datetime_now_adjusted.strftime(\"%b %d, %Y %I:%H:%S %p\"),\n \"comment_author\": \"ODTF-Admin\"\n })\n\n for key, value in dataset.additional_data.items():\n if key == \"status\":\n dataset.additional_data[key] = self.POST(\"status\").upper()\n\n try:\n tag = create_indexed_tag(\"status\", self.POST(\"status\").upper())\n for indexed_list in dataset.indexed_data:\n if indexed_list.startswith(\"STATUS\"):\n dataset.indexed_data.remove(indexed_list)\n\n dataset.indexed_data.append(tag)\n except Exception, e:\n logging.exception(e)\n\n dataset.put()", "title": "" }, { "docid": "42460099759e597b884ce0b42c780e4a", "score": "0.5220378", "text": "def test_updateMetadataWithPOSTto_SE_IRI(self):\n # create a new module\n self._setupRhaptos()\n self.folder.manage_addProduct['CMFPlone'].addPloneFolder('workspace') \n filename = 'entry.xml'\n module = self._createModule(self.folder.workspace, filename)\n uploadrequest = self.createUploadRequest(\n 'entry.xml',\n module,\n CONTENT_DISPOSITION='attachment; filename=entry.xml',\n )\n adapter = getMultiAdapter(\n (module, uploadrequest), Interface, 'sword')\n xml = adapter()", "title": "" }, { "docid": "6c07a16c3b68261cb22d3ea15c234656", "score": "0.5181066", "text": "def load(pycsw_config, ckan_url):\n\n def parse_datastore(ckan_url):\n \"\"\"\n Scrape and return every resource ID in the datastore database, accessing the information through\n CKAN's REST API.\n\n @param ckan_url: e.g. http://127.0.0.1:5000\n @return: a list of datastored resource object IDs\n \"\"\"\n api_query = 'api/3/action/datastore_search?resource_id=_table_metadata'\n ignore_names = ['_table_metadata', 'geography_columns', 'geometry_columns', 'spatial_ref_sys']\n url = ckan_url + api_query\n response = requests.get(url)\n listing = response.json()\n if not isinstance(listing, dict):\n raise RuntimeError, 'Wrong API response: %s' % listing\n results = listing['result']['records']\n resource_names = []\n # Should use a list/dict comprehension here\n for result in results:\n if not result['name'] in ignore_names:\n resource_names.append(result['name'])\n return resource_names\n\n def parse_resource(resource_id, ckan_url):\n \"\"\"\n CKAN's search API doesn't allow querying packages by their resources. Thankfully,\n each resource is returned with a URL which contains the package id between the\n paths \"dataset\" and \"resource\", (at least for datastore items) so we can use a RegEx\n to figure out what the package of a resource is. This is not an ideal solution, but\n it's the cleanest way to solve the problem until the CKAN team decides to organize\n their data in a less authoritative manner.\n\n @param resource_id: the id of a datastored resource object\n @param ckan_url: http://127.0.0.1:5000\n \"\"\"\n api_query = 'api/3/action/resource_show?id=%s' % resource_id\n url = ckan_url + api_query\n response = requests.get(url)\n listing = response.json()\n if not isinstance(listing, dict):\n raise RuntimeError, 'Wrong API response: %s' % listing\n # skip Authorization Error, most likely due to deleted packages.\n if 'error' in listing:\n if (\"Not Found Error\" == listing['error']['__type']) or (\"Authorization Error\" == listing['error']['__type']):\n return None\n log.info('listing is %r' % listing )\n if listing['result']:\n package_url = listing['result']['url']\n else:\n return None\n\n # Here's that RegEx. Ugh.\n package_id = re.findall('dataset/(.*?)/resource', package_url, re.DOTALL)\n if package_id:\n return package_id[0]\n else:\n return None\n\n def get_record(context, repo, ckan_url, ckan_id, ckan_info):\n \"\"\"\n Hit the CKAN REST API for an ISO 19139 XML representation of a package with data\n uploaded into the datastore.\n\n @param context: Vanilla-CKAN auth noise\n @param repo: PyCSW repository (database)\n @param ckan_url: e.g. http://127.0.0.1:5000\n @param ckan_id: Package ID\n @param ckan_info: Package data\n @return: ISO 19139 XML data\n \"\"\"\n query = ckan_url + 'package_iso/object/%s'\n url = query % ckan_info['id']\n response = requests.get(url)\n try:\n xml = etree.parse(io.BytesIO(response.content))\n except Exception, err:\n log.error('Could not pass xml doc from %s, Error: %s' % (ckan_id, err))\n return\n try:\n record = metadata.parse_record(context, xml, repo)[0]\n except Exception, err:\n log.error('Could not extract metadata from %s, Error: %s' % (ckan_id, err))\n return\n return record\n\n\n # Now that we've defined the local functions, let's actually run the parent function\n database = pycsw_config.get('repository', 'database')\n table_name = pycsw_config.get('repository', 'table', 'records')\n\n context = pycsw.config.StaticContext()\n repo = repository.Repository(database, context, table=table_name)\n\n log.info('Started gathering CKAN datasets identifiers: {0}'.format(str(datetime.datetime.now())))\n\n gathered_records = {}\n\n results = parse_datastore(ckan_url)\n package_ids = []\n for result in results:\n package_id = parse_resource(result, ckan_url)\n if not package_id in package_ids:\n package_ids.append(package_id)\n\n for id in package_ids:\n api_query = 'api/3/action/package_show?id=%s' % id\n url = ckan_url + api_query\n response = requests.get(url)\n listing = response.json()\n if not isinstance(listing, dict):\n raise RuntimeError, 'Wrong API response: %s' % listing\n # skip Not Found Error, most likely due to deleted packages.\n if 'error' in listing \\\n and \"Not Found Error\" == listing['error']['__type']:\n continue\n result = listing['result']\n gathered_records[result['id']] = {\n 'metadata_modified': result['metadata_modified'],\n 'id': result['id'],\n }\n\n\n log.info('Gather finished ({0} datasets): {1}'.format(\n len(gathered_records.keys()),\n str(datetime.datetime.now())\n ))\n\n existing_records = {}\n skipped_records = {}\n\n query = repo.session.query(repo.dataset.ckan_id, repo.dataset.ckan_modified, repo.dataset.type)\n for row in query:\n existing_records[row[0]] = row[1]\n # skip records loaded by pycsw\n # TODO is empty type an valid criteria?\n if row[2]:\n skipped_records[row[0]] = row[1]\n repo.session.close()\n\n new = set(gathered_records) - set(existing_records)\n deleted = set(existing_records) - set(skipped_records) - set(gathered_records)\n changed = set()\n\n for key in set(gathered_records) & (set(existing_records) - set(skipped_records)):\n if gathered_records[key]['metadata_modified'] > existing_records[key]:\n changed.add(key)\n\n for ckan_id in deleted:\n try:\n repo.session.begin()\n repo.session.query(repo.dataset.ckan_id).filter_by(\n ckan_id = ckan_id\n ).delete()\n log.info('Deleted %s' % ckan_id)\n repo.session.commit()\n except Exception, err:\n repo.session.rollback()\n raise\n\n for ckan_id in new:\n ckan_info = gathered_records[ckan_id]\n record = get_record(context, repo, ckan_url, ckan_id, ckan_info)\n if not record:\n log.info('Skipped record %s' % ckan_id)\n continue\n try:\n repo.insert(record, 'local', util.get_today_and_now())\n log.info('Inserted %s' % ckan_id)\n except Exception, err:\n log.error('ERROR: not inserted %s Error:%s' % (ckan_id, err))\n\n for ckan_id in changed:\n ckan_info = gathered_records[ckan_id]\n record = get_record(context, repo, ckan_url, ckan_id, ckan_info)\n if not record:\n continue\n update_dict = dict([(getattr(repo.dataset, key), getattr(record, key))\n for key in record.__dict__.keys() if key != '_sa_instance_state'])\n\n try:\n repo.session.begin()\n repo.session.query(repo.dataset).filter_by(\n ckan_id = ckan_id\n ).update(update_dict)\n repo.session.commit()\n log.info('Changed %s' % ckan_id)\n except Exception, err:\n repo.session.rollback()\n raise RuntimeError, 'ERROR: %s' % str(err)", "title": "" }, { "docid": "d4405f808216a2db69634bf4fcf28fca", "score": "0.5149377", "text": "def retrieve(self):\n os.system(\"wget %s -O %s.pisi-index.xml.xz\" % (self.url, self.name))\n os.system(\"xz -f -d %s.pisi-index.xml.xz\" % self.name)", "title": "" }, { "docid": "629de9ba1267573a053870d72b68c8f2", "score": "0.51459587", "text": "def package_version(request, name, version):\n raw_data = api_data.data_cve_circl(name=name, version=version)\n fields_names = ['id', 'summary', 'cvss']\n extracted_data = api_data.extract_fields(raw_data, fields_names)\n\n return HttpResponse(json.dumps(extracted_data))", "title": "" }, { "docid": "f5f61ca70dcb3a5a3305cbf02e44b945", "score": "0.51048213", "text": "def register(self, package):\n key = 'datamosru_' + package['id'].replace('.', '_')\n try:\n r = self.ckan.package_entity_get(key)\n status = 0\n except ckanclient.CkanApiNotFoundError, e:\n status = 404\n tags = [u'Москва', u'официально', package['theme'].lower(), u'datamosru']\n resources = [{'name': package['name'], 'format': '', 'url': package['url'],\n 'description': u'Страница на сайте data.mos.ru'},\n {'name': package['name'], 'format': 'CSV', 'url': DIRECT_DOWNLOAD_URLPAT % package['id'],\n 'description': u'Данные в формате CSV на data.mos.ru'}]\n # Add direct url to the CSV file and url to HTML\n\n the_package = { 'name' : key, 'title' : package['name'], 'url' : package['url'],\n 'notes' : u'Данные города Москвы.\\n\\n'.encode('utf8') + package['name'],\n 'tags' : tags,\n 'state' : 'active',\n 'resources': resources,\n 'group' : 'moscow',\n 'author' : 'Ivan Begtin',\n 'author_email' : 'ibegtin@infoculture.ru',\n 'maintainer' : 'Ivan Begtin',\n 'maintainer_email' : 'ibegtin@infoculture.ru',\n 'extras':\n {'govbody' : package['source']}\n }\n if status != 404:\n self.ckan.package_entity_delete(r['name'])\n\n if True:\n try:\n self.ckan.package_register_post(the_package)\n rname = 'thedata/%s.csv' %(package['id'])\n self.ckan.upload_file(rname)\n self.ckan.add_package_resource(the_package['name'], rname, resource_type=\"data\", name=package['name'], description=u\"Данные в формате CSV\")\n except Exception, e:\n print key\n print package['url']\n return key\n pass\n print \"Imported\", key\n else:\n package_entity = self.ckan.last_message\n if type(package_entity) == type(''): return None\n package_entity.update(the_package)\n for k in ['id', 'ratings_average', 'relationships', 'ckan_url', 'ratings_count']:\n del package_entity[k]\n self.ckan.package_entity_put(package_entity)\n print \"Updated\", key\n# print self.ckan.last_message\n return key", "title": "" }, { "docid": "fb65270baba3ff62239fb87a266afed7", "score": "0.5064838", "text": "def start_package(self, args):\n uri = PKGMAN\n data = {'action': 'apply',\n 'operation': 'start',\n 'packagename': args.package}\n resp = self._get(uri, data)\n if resp['success']:\n print 'Done'\n else:\n print('Failed to start package %s', args.pacakge)", "title": "" }, { "docid": "12e51ff255f2c53bd4b19b765ae3aded", "score": "0.5011484", "text": "def package(self):\n return self._run_action(\"package\")", "title": "" }, { "docid": "e9db442543aba80ccded1a7d8ea0fe98", "score": "0.5007653", "text": "def loadxml_from_site():\n url = \"http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=month(NEW_DATE)%20eq%206%20and%20year(NEW_DATE)%20eq%202018\"\n resp = requests.get(url)\n with open(_XML_FILE_NAME,'wb') as data:\n data.write(resp.content)", "title": "" }, { "docid": "8ac72f3ebf25832c530015817bf386ed", "score": "0.4988576", "text": "def package(self) -> pulumi.Output['outputs.PackageOccurrenceResponse']:\n return pulumi.get(self, \"package\")", "title": "" }, { "docid": "a8e5254fd55b0517b536eb670ecc9555", "score": "0.49330157", "text": "def PrintPackageInfo(self):\n print self.client.GetPackageInfo(self.config['package'])", "title": "" }, { "docid": "cdfdd8d85bc054bbb1083db8047b8b76", "score": "0.4918931", "text": "def DownloadPackage(self):\n filename = self.config['package']\n self.client.DownloadPackage(filename)\n print 'Package successfully downloaded!'", "title": "" }, { "docid": "246cb48f02e77633be143e6f7117ccc0", "score": "0.49187642", "text": "def PrintPackageMetadata(self):\n print 'List packages on Simian....'\n install_types = self.config['install_types']\n catalogs = self.config['catalogs']\n filename = self.config['package']\n print self.client.GetPackageMetadata(install_types, catalogs, filename)\n print 'Complete!'", "title": "" }, { "docid": "96274fda385c959959b5dee1af6181a0", "score": "0.48810133", "text": "def contents(self, package):\n # don't know anything\n raise KeyError(package)", "title": "" }, { "docid": "036437ff6d4aedd1e72ff005bc9ff5cf", "score": "0.4847293", "text": "def download_xml(request):\n xmlID = request.GET[\"doc_id\"]\n xmltree = retrieve_xml(xmlID)\n xml = etree.fromstring(xmltree)\n json_content = etree.tostring(xml,pretty_print=True)\n\n return HttpResponse(json_content, HTTP_200_OK)", "title": "" }, { "docid": "7ec7cdeccf93fcb52f7c6213e0060823", "score": "0.48443574", "text": "def xml_node_package(self, doc):\n (pack, version, node) = self.xml_node(doc)\n retval = doc.createElement('package')\n retval.setAttribute('name', pack)\n retval.setAttribute('version', version)\n retval.appendChild(node)\n return retval", "title": "" }, { "docid": "71f22980efbdfccb0262c359b16f0cce", "score": "0.48395035", "text": "def get_latest_orbit_copernicus_api(sat_ab,start_time,end_time,orbit_type):\n # modified by E. Lindsey, April 2021\n \n # some hard-coded URLs to make the API work\n scihub_url='https://scihub.copernicus.eu/gnss/odata/v1/Products'\n # these are from the namespaces of the XML file returned in the query. Hopefully not subject to change?\n w3_url='{http://www.w3.org/2005/Atom}'\n m_url='{http://schemas.microsoft.com/ado/2007/08/dataservices/metadata}'\n d_url='{http://schemas.microsoft.com/ado/2007/08/dataservices}'\n\n # compose search filter\n filterstring = f\"startswith(Name,'S1{sat_ab}') and substringof('{orbit_type}',Name) and ContentDate/Start lt datetime'{start_time}' and ContentDate/End gt datetime'{end_time}'\"\n \n # create HTTPS request and get response\n params = { '$top': 1, '$orderby': 'ContentDate/Start asc', '$filter': filterstring }\n search_response = requests.get(url='https://scihub.copernicus.eu/gnss/odata/v1/Products', params=params, auth=('gnssguest','gnssguest'))\n search_response.raise_for_status()\n\n # parse XML tree from response\n tree = ElementTree.fromstring(search_response.content)\n \n #extract w3.org URL that gets inserted into all sub-element names for some reason\n w3url=tree.tag.split('feed')[0]\n \n # extract the product's hash-value ID\n product_ID=tree.findtext(f'.//{w3_url}entry/{m_url}properties/{d_url}Id')\n product_url = f\"{scihub_url}('{product_ID}')/$value\"\n product_name=tree.findtext(f'./{w3url}entry/{w3url}title')\n\n # return the orbit name, type, and download URL\n if product_ID is not None:\n orbit={'name':product_name, 'orbit_type':orbit_type, 'remote_url':product_url}\n else:\n orbit=None\n return orbit", "title": "" }, { "docid": "1d292489f5092416a0e096de7ff2f751", "score": "0.48381197", "text": "def package(self) -> 'outputs.PackageNoteResponse':\n return pulumi.get(self, \"package\")", "title": "" }, { "docid": "196729fe48d269b0a0cc1503e496fa4d", "score": "0.48314953", "text": "def write_xml(self):\n extent_desc = OD(\n [('/gmd:title' + CS, (\n 'ANZLIC Geographic Extent Name Register',\n 'gmd:identificationInfo/gmd:MD_DataIdentification')),\n ('/gmd:date/gmd:CI_Date/gmd:date' + DT, ('2006-10-10', None)),\n ('/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode',\n ('publication', None)),\n ('/gmd:edition' + CS, ('Version 2', None)),\n ('/gmd:editionDate' + DT, ('2001-02', None)),\n ('/gmd:identifier/gmd:MD_Identifier/gmd:code' + CS,\n ('http://asdd.ga.gov.au/asdd/profileinfo/anzlic-allgens.xml' +\n '#new_zealand', None)),\n ('/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:' +\n 'organisationName' + CS,\n ('ANZLIC the Spatial Information Council', None)),\n ('/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:role/' +\n 'gmd:CI_RoleCode', ('custodian', None))])\n extent_desv1 = OD(\n [('/gmd:title' + CS, (\n 'ANZMet Lite Country codelist',\n 'gmd:identificationInfo/gmd:MD_DataIdentification')),\n ('/gmd:date/gmd:CI_Date/gmd:date' + DT, ('2009-03-31', None)),\n ('/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode',\n ('publication', None)),\n ('/gmd:edition' + CS, ('Version 1.0', None)),\n ('/gmd:editionDate' + DT, ('2009-03-31', None)),\n ('/gmd:identifier/gmd:MD_Identifier/gmd:code' + CS,\n ('http://asdd.ga.gov.au/asdd/profileinfo/anzlic-country.xml' +\n '#Country', None)),\n ('/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:' +\n 'organisationName' + CS,\n ('ANZLIC the Spatial Information Council', None)),\n ('/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:role/' +\n 'gmd:CI_RoleCode', ('custodian', None))])\n keyword_dict = OD(\n [('/gmd:title' + CS, ('ANZLIC Jursidictions', None)),\n ('/gmd:date/gmd:CI_Date/gmd:date' + DT, ('2008-10-29', None)),\n ('/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode',\n ('revision', None)),\n ('/gmd:edition' + CS, ('Version 2.1', None)),\n ('/gmd:editionDate' + DT, ('2008-10-29', None)),\n ('/gmd:identifier/gmd:MD_Identifier/gmd:code' + CS,\n ('http://asdd.ga.gov.au/asdd/profileinfo/anzlic-jurisdic.xml#' +\n 'anzlic-jurisdic', None)),\n ('/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:' +\n 'organisationName' + CS,\n ('ANZLIC the Spatial Information Council', None)),\n ('/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:role/' +\n 'gmd:CI_RoleCode', ('custodian', None))])\n iden = ID.split(MD+'/')[1]\n url = 'http://standards.iso.org/ittf/PubliclyAvailableStandards/' + \\\n 'ISO_19139_Schemas/resources/Codelist/gmxCodelists.xml#'\n try:\n md = EL('{http://www.isotc211.org/2005/gmd}MD_Metadata', nsmap=NSX)\n tree = ELT(md)\n for i in FIELDS:\n\n # File Identifier\n if i == FID and self.dlg.outputFile.text() != '':\n tree = self.write(i, tree, str(uuid.uuid4()))\n\n # Date Stamp\n elif i == DSTAMP and self.dlg.outputFile.text() != '':\n date = QDate.currentDate().toString('yyyy-MM-dd')\n tree = self.write(i, tree, date, iden)\n\n # Scale\n elif i == SCALE:\n if self.dlg.scale.isChecked() and \\\n self.dlg.scaleRadioButton.isChecked() and \\\n self.dlg.scaleWidget.scaleString() != '0':\n tx = self.dlg.scaleWidget.scaleString().replace(',', '')\n tree = self.write(i, tree, tx.split('1:')[1])\n\n # Resolution\n elif i == RESOLUTION:\n if self.dlg.scale.isChecked() and \\\n self.dlg.resolutionRadioButton.isChecked():\n if self.dlg.rUnits.currentText() == '' or \\\n self.dlg.resolutionText.text() == '':\n raise Exception('Write Warning: Resolution not ' +\n 'written to XML as missing fields.')\n else:\n text = self.dlg.resolutionText.text()\n tree = self.write(i, tree, text)\n for j in self.rcode:\n if self.rcode[j] == self.dlg.rUnits.currentText(\n ):\n res = tree.find(RESOLUTION[16:],\n namespaces=NSX)\n res.attrib['uom'] = j\n\n # Extent Description\n elif i == EXTENTDES:\n ex = 'gmd:EX_Extent/gmd:geographicElement/gmd:EX_' + \\\n 'GeographicDescription/gmd:geographicIdentifier/' + \\\n 'gmd:MD_Identifier/gmd:authority/gmd:CI_Citation'\n idn = md.find('gmd:identificationInfo/gmd:MD_Data' +\n 'Identification', namespaces=NSX)\n if idn is None:\n idn = SE(md, QN(NSX['gmd'], 'identificationInfo'))\n idn = SE(idn, QN(NSX['gmd'], 'MD_DataIdentification'))\n for s in self.dlg.geogDesList.selectedItems():\n ex_desc = extent_desc\n if s.text() == 'nzl':\n ex_desc = extent_desv1\n extent = SE(idn, QN(NSX['gmd'], 'extent'))\n for value in ex_desc:\n base, found = '', None\n for val in (ex+value).split('/'):\n if extent.find(\n base + val, namespaces=NSX) is not None:\n found = base + val\n base += (val + '/')\n if found:\n f = extent.find(found, namespaces=NSX)\n search = (ex+value).split(found)[1]\n else:\n search = (ex+value)\n f = extent\n splitb = search.split('/')\n for sb in splitb:\n if sb != '':\n splitn = sb.split(':')\n for num, j in enumerate(\n splitn[:len(splitn) / 2]):\n element = SE(\n f, QN(NSX[j], splitn[num + 1]))\n f = element\n if splitn[num+1] == 'authority':\n el = EL(QN(NSX['gmd'], 'code'))\n sel = SE(el, QN(\n NSX['gco'], 'CharacterString'))\n sel.text = s.text()\n f.addnext(el)\n num += 1\n f.text = ex_desc[value][0]\n if 'gco' not in f.tag:\n f.attrib['codeList'] = url + f.tag[f.tag.rfind(\n '}') + 1:]\n f.attrib['codeListValue'] = ex_desc[value][0]\n\n # Reference System\n elif i == RS:\n if self.dlg.referenceSys.crs().authid() != \"\":\n text = self.dlg.referenceSys.crs().authid()\n tree = self.write(i, tree, text.split(':')[1])\n\n # Other Text Fields\n elif i in self.tF:\n if self.tF[i].toPlainText() != '':\n tree = self.write(i, tree, self.tF[i].toPlainText())\n\n # Other Combo - Contact Fields\n elif i in self.cFS:\n if self.cFS[i].currentText() != '':\n tree = self.write(i, tree, self.cFS[i].currentText())\n\n # Other Combo Fields\n elif i in self.cF:\n if self.cF[i][0].currentText() != '':\n tree = self.write(i, tree, self.cF[i][0].currentText())\n\n # Hierarchy Level Description/ Scope/ Scope Description\n elif i == HLEVELNAME or i == SCOPE or i == SCOPEDESC:\n if self.cF[HLEVEL][0].currentText() != '':\n text = self.cF[HLEVEL][0].currentText()\n tree = self.write(i, tree, text)\n\n # Topic Category\n elif i == TOPIC:\n for j in self.dlg.topicCategory.selectedItems():\n tree = self.write(i, tree, j.text(), iden)\n\n # Resource Maintenance Date\n elif i == RMAINTDATE:\n if self.dlg.date.isEnabled():\n date = self.dlg.date.date().toString('yyyy-MM')\n tree = self.write(i, tree, date)\n\n # Resource Limitation\n elif i == RESOURCELIMIT:\n if self.dlg.resourceConLicense.toPlainText() != '':\n tx = self.dlg.resourceConLicense.toPlainText()\n tree = self.write(i, tree, tx, iden, con='license')\n if self.dlg.resourceConCopyright.toPlainText() != '':\n tx = self.dlg.resourceConCopyright.toPlainText()\n tree = self.write(i, tree, tx, iden, con='copyright')\n\n # Metadata Limitation\n elif i == METALIMIT:\n if self.dlg.metadataConLicense.toPlainText() != '':\n tx = self.dlg.metadataConLicense.toPlainText()\n tree = self.write(i, tree, tx, tree.getroot(),\n con='license')\n if self.dlg.metadataConCopyright.toPlainText() != '':\n tx = self.dlg.metadataConCopyright.toPlainText()\n tree = self.write(i, tree, tx, tree.getroot(),\n con='copyright')\n\n # Temporal Single/ Begin Range\n elif i == TSINGLE or i == TBEGIN:\n if i == TSINGLE and not \\\n self.dlg.endDateCheck.isChecked() or i == TBEGIN \\\n and self.dlg.endDateCheck.isChecked():\n if self.dlg.temporalCheck.isChecked():\n date = self.dlg.startDate.date().toString(\n 'yyyy-MM-dd')\n if self.dlg.startTimeCheck.isChecked():\n date += 'T' + self.dlg.startTime.time(\n ).toString('hh:mm:ss')\n tree = self.write(i, tree, date, iden)\n\n # Temporal End Range\n elif i == TEND:\n tend = None\n if self.dlg.temporalCheck.isChecked() and \\\n self.dlg.endDateCheck.isChecked():\n date = self.dlg.endDate.date().toString('yyyy-MM-dd')\n if self.dlg.endTimeCheck.isChecked():\n date += 'T' + self.dlg.endTime.time().toString(\n 'hh:mm:ss')\n tend = TEND.split('/gml:endPosition')[\n 0].split(MD+'/')[1]\n tree = self.write(i, tree, date, tend)\n\n # Keywords/ Keyword Type\n elif i == KEYWORDS or i == KEYWORDSTYPE:\n mdk = None\n if i == KEYWORDS:\n keyword_url = url + 'MD_KeywordTypeCode'\n mt = KEYWORDS.split('/gmd:keyword')[0].split(MD)[1]\n tree = self.write(i, tree, 'New Zealand', mt[1:])\n\n kt = EL(QN(NSX['gmd'], 'type'))\n ktc = SE(kt, QN(NSX['gmd'], 'MD_KeywordTypeCode'))\n ktc.attrib['codeList'] = keyword_url\n ktc.attrib['codeListValue'] = 'theme'\n h = tree.find(i[17:], namespaces=NSX)\n h.getparent().addnext(kt)\n\n path = MD + mt + '/gmd:thesaurusName/gmd:CI_Citation'\n\n for k in keyword_dict:\n tree = self.write(path + k, tree,\n keyword_dict[k][0],\n keyword_dict[k][1])\n found = False\n if len(self.dlg.keywordList.selectedItems()) > 0:\n desc = tree.find(iden + '/gmd:descriptiveKeywords',\n namespaces=NSX)\n desckey = EL(QN(NSX['gmd'], 'descriptiveKeywords'))\n desc.addnext(desckey)\n mdk = SE(desckey, QN(NSX['gmd'], 'MD_Keywords'))\n for j in self.dlg.keywordList.selectedItems():\n found = True\n key = EL(QN(NSX['gmd'], 'keyword'))\n cs = SE(key, QN(NSX['gco'], 'CharacterString'))\n cs.text = j.text().replace(' ', '-')\n mdk.append(key)\n\n if found:\n kt = EL(QN(NSX['gmd'], 'type'))\n ktc = SE(kt, QN(NSX['gmd'], 'MD_KeywordTypeCode'))\n ktc.attrib['codeList'] = keyword_url\n ktc.attrib['codeListValue'] = 'theme'\n t = tree.findall(i[17:], namespaces=NSX)\n t = t[len(tree.findall(i[17:], namespaces=NSX)) - 1]\n t.getparent().addnext(kt)\n\n # Key Resource Date & Type\n elif i == CITDATE or i == CITDATETYPE:\n if i == CITDATE:\n date_url = url + 'CI_DateTypeCode'\n\n code_list_val = {self.dlg.resourceCreateCheck: (\n 'creation', self.dlg.resourceCreate),\n self.dlg.resourceUpdateCheck: (\n 'revision', self.dlg.resourceUpdate),\n self.dlg.resourcePublishCheck: (\n 'publication',\n self.dlg.resourcePublish)}\n\n for val in (self.dlg.resourceCreateCheck,\n self.dlg.resourcePublishCheck,\n self.dlg.resourceUpdateCheck):\n if val.isChecked():\n date = code_list_val[val][1].date()\n date = date.toString('yyyy-MM-dd')\n tree = self.write(\n i, tree, date,\n iden + '/gmd:citation/gmd:CI_Citation')\n cit_d = EL(QN(NSX['gmd'], 'dateType'))\n cit_d_code = SE(cit_d, QN(NSX['gmd'],\n 'CI_DateTypeCode'))\n cit_d_code.attrib['codeList'] = date_url\n cit_d_code.attrib[\n 'codeListValue'] = code_list_val[val][0]\n cit_d_code.text = code_list_val[val][0]\n t = tree.findall(i[17:], namespaces=NSX)\n t = t[len(tree.findall(\n i[17:], namespaces=NSX)) - 1]\n t.getparent().addnext(cit_d)\n\n elif EXTENTBB in i:\n if self.dlg.northExtent.displayText() != '' and \\\n self.dlg.southExtent.displayText() != '' and \\\n self.dlg.eastExtent.displayText() != '' and \\\n self.dlg.westExtent.displayText() != '':\n if 'north' in i:\n tree = self.write(\n i, tree, self.dlg.northExtent.displayText())\n elif 'south' in i:\n tree = self.write(\n i, tree, self.dlg.southExtent.displayText())\n elif 'east' in i:\n tree = self.write(\n i, tree, self.dlg.eastExtent.displayText())\n elif 'west' in i:\n tree = self.write(\n i, tree, self.dlg.westExtent.displayText(),\n iden)\n\n # Default Values\n elif i in DEFAULTTEXT:\n tree = self.write(i, tree, DEFAULTTEXT[i])\n\n elif i in self.MDTEXT and (i == FID or i == DSTAMP or\n 'extent' in i or 'URL' in i):\n # Update Any other fields that are not empty.\n if type(self.MDTEXT[i]) == tuple:\n val = self.MDTEXT[i][0]\n else:\n val = self.MDTEXT[i]\n if val != '':\n if 'extent' in i:\n tree = self.write(i, tree, self.MDTEXT[i], iden)\n else:\n tree = self.write(i, tree, self.MDTEXT[i])\n\n # Create New Metadata File\n md_text = TS(md, pretty_print=True, xml_declaration=True,\n encoding='utf-8')\n\n # Write to Temp File & Create XML Summary & Formatted Summary\n with open(TEMPFILE, 'wb') as f:\n f.write(md_text)\n self.dlg.summary.setText(md_text)\n self.format_summary(tree)\n\n except Exception as e:\n raise Exception(\"Write Error: \" + str(e))", "title": "" }, { "docid": "3955c2b0bc02eba47095546364f14b1f", "score": "0.48272693", "text": "def ServicePack(self) -> str:", "title": "" }, { "docid": "505768f7abdcf408358cde45be6b5269", "score": "0.48236328", "text": "def get(self):\n\n self.response.headers['Content-Type'] = \"application/xml\"\n self.response.out.write(export())", "title": "" }, { "docid": "69953eb6aa31f16613e09b69e7796cb4", "score": "0.48221", "text": "def UploadPackage(self):\n raise NotImplementedError", "title": "" }, { "docid": "77aa4ea834b5e55da0b15b6681b6041b", "score": "0.48200604", "text": "def bcdc_package_show(package):\n params = {\"id\": package}\n r = requests.get(bcdata.BCDC_API_URL + \"package_show\", params=params)\n if r.status_code != 200:\n raise ValueError(\"{d} is not present in DataBC API list\".format(d=package))\n return r.json()[\"result\"]", "title": "" }, { "docid": "bd3e17eb4805a62fd56088b83a75290b", "score": "0.4816351", "text": "def parse(self, cik, name, form, date, filing, return_all=False):\n accession = filing.split('/')[-1].replace('.txt', '')\n self.accession = accession\n self.annual = True if 'K' in form else False\n index = '/'.join([self.edgar, 'data', cik, accession.replace('-', ''), \n ''.join([accession, '-index.htm'])])\n index_links = self.get_index(index)\n xbrl = [x[2] for x in index_links if x[0].endswith('.INS')]\n if not xbrl:\n xbrl = [x[2] for x in index_links if x[0] == 'XML'] # inline\n if not xbrl:\n with open(os.path.join(os.path.realpath('.'), 'missing.txt'), 'a') as f:\n f.write('{}\\n'.format(index))\n return \n instance = '{}/data/{}/{}/{}'.format(self.edgar, cik, \n accession.replace('-', ''), \n xbrl[0])\n try:\n tree = etree.parse(openurl(instance))\n except etree.XMLSyntaxError:\n tree = etree.parse(openurl(instance),\n parser=etree.XMLParser(recover=True))\n except IncompleteRead:\n tree = etree.parse(openurl(instance))\n except ConnectionResetError:\n time.sleep(500)\n tree = etree.parse(openurl(instance))\n\n # pull acceptance datetime and zip\n sgml = index.replace('-index.htm', '.hdr.sgml')\n c = openurl(sgml)\n lines = c.read()\n c.close()\n acceptance = lines.decode().split('<ACCEPTANCE-DATETIME>')[-1].split('<')[0].strip()\n zipcode = format_zip(lines.decode().split('<ZIP>')[-1].split('<')[0].strip())\n\n # build context refs dict\n defs = defaultdict(dict)\n for x in tree.iter():\n if 'id' in x.attrib:\n for xx in x.iterdescendants():\n if xx.text and xx.text.strip() and \\\n 'identifier' not in str(xx.tag):\n key = etree.QName(xx.tag).localname.strip()\n try:\n possible_dt = xx.text.strip()[:10]\n val = datetime.datetime.strptime(possible_dt, \n '%Y-%m-%d').date()\n except ValueError:\n val = xx.text.split(':')[-1].strip()\n defs[x.attrib['id'].strip()][key] = val\n if xx.attrib and xx.attrib.keys() != ['scheme']:\n for row in xx.attrib.items():\n key = row[1].split(':')[-1].strip()\n val = xx.text.split(':')[-1].strip()\n defs[x.attrib['id'].strip()][key] = val\n\n # return list of all elements with context\n if return_all:\n crap = []\n for x in tree.iter(tag=etree.Element):\n if x.text and x.text.strip():\n if 'xbrl.org' and 'instance' in str(x.tag):\n pass\n elif not x.text.startswith('<'):\n tag = etree.QName(x.tag).localname.strip()\n val = x.text.split(':')[-1].strip()\n crap.append(dict(defs[x.attrib.get('contextRef')].items() +\n {'tag': tag, 'val': val}.items()))\n return crap\n\n # general\n self.tree = tree\n self.context = defs\n self.entity = None\n ticker = self.pull('TradingSymbol', None, history=False)\n if ticker is not None:\n ticker = clean_ticker(ticker)\n if ticker is None:\n ticker = clean_ticker(xbrl[0].split('-')[0])\n fiscal_year = self.pull('DocumentFiscalYearFocus', None, history=False)\n fiscal_period = self.pull('DocumentFiscalPeriodFocus', None, history=False)\n if fiscal_year is not None and fiscal_period is not None:\n focus = '{}{}'.format(fiscal_year, fiscal_period)\n else:\n focus = self.datapath.split('/')[-1]\n formdate = self.pull('DocumentPeriodEndDate', None, history=False)\n\n # check for multiple legal entities\n check = tree.xpath(\"//*[local-name()='EntityCentralIndexKey']\")\n if len(check) > 1:\n entity = [defs[x.attrib.get('contextRef')] for x in check if \n int(x.text) == int(cik)]\n if entity and 'LegalEntityAxis' in entity[0]:\n self.entity = entity[0]['LegalEntityAxis']\n\n # balance sheet\n bs_assets = None\n for key in ['Assets', 'AssetsNet']:\n if bs_assets is None:\n bs_assets = self.pull(key, 'bs_assets')\n\n bs_cash = None \n for key in ['CashAndDueFromBanks', 'CashAndCashEquivalents',\n 'CashAndCashEquivalentsAtCarryingValue']:\n if bs_cash is None:\n bs_cash = self.pull(key, 'bs_cash')\n\n bs_currentassets = self.pull('AssetsCurrent', 'bs_currentassets')\n\n bs_ppenet = None\n for key in ['PropertyPlantAndEquipmentNet',\n 'PublicUtilitiesPropertyPlantAndEquipmentNet']:\n if bs_ppenet is None:\n bs_ppenet = self.pull(key, 'bs_ppenet')\n \n bs_ppegross = self.pull('PropertyPlantAndEquipmentGross', 'bs_ppegross')\n \n bs_currentliabilities = self.pull('LiabilitiesCurrent', \n 'bs_currentliabilities')\n\n bs_liabilities = self.pull('Liabilities', 'bs_liabilities')\n\n bs_longtermdebtnoncurrent = self.pull('LongTermDebtNoncurrent', \n 'bs_longtermdebtnoncurrent')\n if bs_longtermdebtnoncurrent is None:\n\n # build long term notes and loans\n bs_longtermdebtnoncurrent = self.pull('LongTermNotesAndLoans', None, \n history=False)\n if bs_longtermdebtnoncurrent is None:\n notes_payable = self.pull('LongTermNotesPayable', None, \n history=False)\n if notes_payable is None:\n notes_payable = 0\n for key in ['MediumtermNotesNoncurrent', \n 'JuniorSubordinatedLongTermNotes',\n 'SeniorLongTermNotes',\n 'ConvertibleLongTermNotesPayable',\n 'NotesPayableToBankNoncurrent', \n 'OtherLongTermNotesPayable']:\n tmp = self.pull(key, None, history=False)\n if tmp is not None:\n notes_payable += int(tmp)\n\n loans_payable = self.pull('LongTermLoansPayable', None, \n history=False)\n if loans_payable is None:\n loans_payable = 0\n for key in ['LongTermLoansFromBank',\n 'OtherLoansPayableLongTerm']:\n tmp = self.pull(key, None, history=False)\n if tmp is not None:\n loans_payable += int(tmp)\n\n bs_longtermdebtnoncurrent = int(notes_payable) + int(loans_payable)\n else:\n bs_longtermdebtnoncurrent = int(bs_longtermdebtnoncurrent)\n\n # add other elements\n for key in ['LongTermLineOfCredit', 'CommercialPaperNoncurrent',\n 'ConstructionLoanNoncurrent', 'SecuredLongTermDebt',\n 'SubordinatedLongTermDebt', 'UnsecuredLongTermDebt', \n 'ConvertibleDebtNoncurrent', \n 'ConvertibleSubordinatedDebtNoncurrent', \n 'LongTermTransitionBond', 'LongTermPollutionControlBond', \n 'JuniorSubordinatedDebentureOwedToUnconsolidatedSubsidiaryTrustNoncurrent',\n 'SpecialAssessmentBondNoncurrent',\n 'LongtermFederalHomeLoanBankAdvancesNoncurrent',\n 'OtherLongTermDebtNoncurrent']:\n tmp = self.pull(key, None, history=False)\n if tmp is not None:\n bs_longtermdebtnoncurrent += int(tmp)\n\n bs_longtermdebtcurrent = self.pull('LongTermDebtCurrent', \n 'bs_longtermdebtcurrent')\n\n bs_longtermdebt = None\n for key in ['LongTermDebtAndCapitalLeaseObligations', 'LongTermDebt',\n 'LongTermDebtNetAlternative']:\n if bs_longtermdebt is None:\n bs_longtermdebt = self.pull(key, 'bs_longtermdebt')\n\n bs_equity = None\n for key in ['StockholdersEquity', \n 'StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest',\n 'PartnersCapital', 'CommonStockholdersEquity', \n 'PartnersCapitalIncludingPortionAttributableToNoncontrollingInterest',\n 'MemberEquity', 'AssetsNet']:\n if bs_equity is None:\n bs_equity = self.pull(key, 'bs_equity')\n\n # income statement\n is_sales = None\n for key in ['SalesRevenueNet', 'Revenues', 'SalesRevenueGoodsNet',\n 'SalesRevenueServicesNet']:\n if is_sales is None:\n is_sales = self.pull(key, 'is_sales')\n\n is_cogs = None\n for key in ['CostOfGoodsAndServicesSold', 'CostOfGoodsSold', 'CostOfServices', \n 'CostOfGoodsSoldExcludingDepreciationDepletionAndAmortization',\n 'CostOfRevenue']:\n if is_cogs is None:\n is_cogs = self.pull(key, 'is_cogs')\n\n is_grossprofit = self.pull('GrossProfit', 'is_grossprofit')\n \n is_research = None\n for key in ['ResearchAndDevelopmentExpense',\n 'ResearchAndDevelopmentExpenseExcludingAcquiredInProcessCost']:\n if is_research is None:\n is_research = self.pull(key, 'is_research')\n\n is_sga = None\n for key in ['SellingGeneralAndAdministrativeExpense', \n 'SellingGeneralAndAdministrativeExpenses']:\n if is_sga is None:\n is_sga = self.pull(key, 'is_sga')\n\n is_opexpenses = None\n for key in ['OperatingCostsAndExpenses', 'OperatingExpenses', \n 'CostsAndExpenses']:\n if is_opexpenses is None:\n is_opexpenses = self.pull(key, 'is_opexpenses')\n\n is_incometax = self.pull('IncomeTaxExpenseBenefit', 'is_incometax')\n if is_incometax is None:\n is_incometax = self.pull('IncomeTaxExpenseBenefitContinuingOperations',\n 'is_incometax')\n\n is_netincome = None\n for key in ['NetIncomeLoss',\n 'NetIncomeLossAvailableToCommonStockholdersBasic',\n 'ProfitLoss',\n 'NetIncomeLossAvailableToCommonStockholdersDiluted']:\n if is_netincome is None:\n is_netincome = self.pull(key, 'is_netincome')\n\n is_opincome = None\n for key in ['OperatingIncomeLoss', \n 'IncomeLossFromContinuingOperationsBeforeIncomeTaxesExtraordinaryItemsNoncontrollingInterest']:\n if is_opincome is None:\n is_opincome = self.pull(key, 'is_opincome')\n\n # cash flow\n cf_operating = None\n for key in ['NetCashProvidedByUsedInOperatingActivities', \n 'NetCashProvidedByUsedInOperatingActivitiesContinuingOperations']:\n if cf_operating is None:\n cf_operating = self.pull(key, 'cf_operating')\n\n cf_depreciation = self.pull('Depreciation', 'cf_depreciation')\n\n cf_depreciationamortization = None\n for key in ['DepreciationAmortizationAndAccretionNet',\n 'DepreciationAndAmortization', \n 'DepreciationDepletionAndAmortization']:\n if cf_depreciationamortization is None:\n cf_depreciationamortization = self.pull(key, 'cf_depreciationamortization')\n\n cf_investing = None\n for key in ['NetCashProvidedByUsedInInvestingActivities', \n 'NetCashProvidedByUsedInInvestingActivitiesContinuingOperations']:\n if cf_investing is None:\n cf_investing = self.pull(key, 'cf_investing')\n\n cf_ppe = None\n for key in ['GainLossOnSaleOfPropertyPlantEquipment']:\n if cf_ppe is None:\n cf_ppe = self.pull(key, 'cf_ppe', history=False)\n\n cf_financing = None\n for key in ['NetCashProvidedByUsedInFinancingActivities',\n 'NetCashProvidedByUsedInFinancingActivitiesContinuingOperations']:\n if cf_financing is None:\n cf_financing = self.pull(key, 'cf_financing')\n\n cf_dividends = None\n for key in ['PaymentsOfDividends']:\n if cf_dividends is None:\n cf_dividends = self.pull(key, 'cf_dividends')\n\n cf_cashchange = self.pull('CashAndCashEquivalentsPeriodIncreaseDecrease',\n 'cf_cashchange')\n if cf_cashchange is None and (cf_operating or cf_investing or cf_financing):\n cf_cashchange = sum([int(x) for x in [\n cf_operating, cf_investing, cf_financing] if x])\n exchange = None\n for key in ['EffectOfExchangeRateOnCashAndCashEquivalents', \n 'EffectOfExchangeRateOnCashAndCashEquivalentsContinuingOperations']:\n if exchange is None:\n exchange = self.pull(key, None, history=False)\n if exchange is not None:\n cf_cashchange -= int(exchange)\n if cf_cashchange is None:\n cf_cashchange = self.pull('CashPeriodIncreaseDecrease', 'cf_cashchange')\n\n # write data to file\n with open(self.datapath, 'a') as f:\n f.write('{}\\n'.format('|'.join(\n str(x) if x is not None else '' for x in [\n focus, ticker, cik, zipcode, form, formdate,\n date, acceptance, accession, name,\n bs_assets, bs_cash, bs_currentassets, bs_ppenet, \n bs_ppegross, bs_currentliabilities, bs_liabilities, \n bs_longtermdebtnoncurrent, bs_longtermdebtcurrent, \n bs_longtermdebt, bs_equity, \n is_sales, is_cogs, is_grossprofit, \n is_research, is_sga, is_opexpenses,\n is_incometax, is_netincome, is_opincome, \n cf_operating, cf_depreciation, \n cf_depreciationamortization, cf_investing,\n cf_financing, cf_dividends, cf_cashchange])))", "title": "" }, { "docid": "a957423acee10d5bc6fab300d83b47a9", "score": "0.4806609", "text": "def useDataManager(self):\n\n payload = ('<?xml version=\"1.0\" ?>\\n' +\n '<S:Envelope xmlns:S=\"http://schemas.xmlsoap.org/soap/envelope/\">\\n' +\n '<S:Body> <ns3:QueryPackageFileElement\\n' +\n 'xmlns:ns4=\"http://dataManagerService\"\\n' +\n 'xmlns:ns3=\"http://gov/nih/ndar/ws/datamanager/server/bean/jaxb\"\\n' +\n 'xmlns:ns2=\"http://dataManager/transfer/model\">\\n' +\n '<packageId>' + self.package + '</packageId>\\n' +\n '<associated>true</associated>\\n' +\n '</ns3:QueryPackageFileElement>\\n' +\n '</S:Body>\\n' +\n '</S:Envelope>')\n\n\n response, session = api_request(self, \"POST\", self.url, data=payload)\n\n root = ET.fromstring(response.text)\n packageFiles = root.findall(\".//queryPackageFiles\")\n for element in packageFiles:\n associated = element.findall(\".//isAssociated\")\n path = element.findall(\".//path\")\n alias = element.findall(\".//alias\")\n for a in associated:\n if a.text == 'false':\n for p in path:\n file = 's3:/' + p.text\n self.path_list.add(file)\n for al in alias:\n alias_path = al.text\n\n self.local_file_names[file] = alias_path\n\n self.verbose_print('Downloading package files for package {}.'.format(self.package))", "title": "" }, { "docid": "cf35e1290a90a113775fefa2a8492e6f", "score": "0.47939947", "text": "def create_package_container(self, project, package):\n dst_meta = '<package name=\"{}\"><title/><description/></package>'\n dst_meta = dst_meta.format(package)\n\n url = makeurl(self.apiurl, ['source', project, package, '_meta'])\n logging.debug(\"create %s/%s\", project, package)\n http_PUT(url, data=dst_meta)", "title": "" }, { "docid": "f803442bc019836ab7aad960d6340e5a", "score": "0.47916466", "text": "def from_ckan(*, base_url, dataset_id, api_key=None):\n return Package.from_storage(\n system.create_storage(\n \"ckan_datastore\",\n base_url=base_url,\n dataset_id=dataset_id,\n api_key=api_key,\n )\n )", "title": "" }, { "docid": "51154bf0b778bf5634ab6eb60790fa4f", "score": "0.4789387", "text": "def test_pkg_get(self):\n\n output = self.app.get('/api/v1/guake')\n self.assertEqual(output.status_code, 301)\n\n output = self.app.get('/api/v1/guake/')\n self.assertEqual(output.status_code, 404)\n output = json.loads(output.data)\n self.assertEqual(output['output'], 'notok')\n self.assertEqual(output['error'], 'Package \"guake\" not found')\n\n create_package(self.session)\n\n output = self.app.get('/api/v1/guake/')\n self.assertEqual(output.status_code, 200)\n output = json.loads(output.data)\n self.assertEqual(output['name'], 'guake')\n self.assertEqual(output['summary'], u'drop-down terminal for gnóme')\n self.assertEqual(output['icon'], 'https://apps.fedoraproject.org/'\n 'packages/images/icons/guake.png')\n self.assertEqual(output['rating'], -1)\n self.assertEqual(output['usage'], 0)\n self.assertEqual(output['tags'], [])", "title": "" }, { "docid": "ee865f36ad7a1c12b92ce0e6b29851b1", "score": "0.47835317", "text": "def get_root_catalog(self):\n return json.load(get_response(self.api + \"/api/catalog\"))", "title": "" }, { "docid": "e03da9820cb48aac6759c8a214e63ec5", "score": "0.47677845", "text": "def get(self, packID):\r\n db = PackageDB()\r\n return jsonify(db.find(packID))", "title": "" }, { "docid": "4c99ac505ec405ccf84e20d9c0a3c98b", "score": "0.47357753", "text": "def parse(self):\n from lxml import objectify as obj\n tree = obj.fromstring(self.content)\n for c in tree.getchildren():\n if c.tag == \"Package\":\n for d in c.getchildren():\n if d.tag == \"PackageHash\":\n self.packages[pname].hash = d.text\n if d.tag == \"Name\":\n pname = d.text\n self.packages[pname] = Pkg(self.base, pname)\n if d.tag == \"PackageURI\":\n self.packages[pname].setFilename(d.text)\n if d.tag == \"History\":\n found = False\n for e in d.getchildren():\n if found == True:\n break\n if e.tag == \"Update\":\n self.packages[pname].setRelease(int(e.attrib[\"release\"]))\n found = True\n if d.tag == \"DeltaPackages\":\n for e in d.getchildren():\n if e.tag == \"Delta\":\n for f in e.getchildren():\n if f.tag == \"PackageURI\":\n self.packages[pname].addDelta(f.text)", "title": "" }, { "docid": "b425b552cac3082cd7ce6c2405a9e1a0", "score": "0.47278172", "text": "def ping_package():\n link = request.args.get('link', '') # ex: http://eric.rsshub.org/\n name = name_from_link(link)\n handle_pack_file(name)\n obj = {\n 'whenLastUpdate': build_timestamp(),\n 'urlRedirect': 'http://dir.rsshub.org/%s/' % name,\n 'ctUpdates': redis.incr('counter:%s' % name),\n }\n redis.hmset('names:%s' % name, obj)\n return build_response({\n 'url': link,\n })", "title": "" }, { "docid": "c6fd58ceecc7bd20bea4bdb82f5b0c7d", "score": "0.47134325", "text": "def testMetadata(self):\n self._setupRhaptos()\n self.folder.manage_addProduct['CMFPlone'].addPloneFolder('workspace') \n uploadrequest = self.createUploadRequest(\n 'entry.xml',\n self.folder.workspace,\n CONTENT_DISPOSITION='attachment; filename=entry.xml',\n )\n\n # Call the sword view on this request to perform the upload\n adapter = getMultiAdapter(\n (self.folder.workspace, uploadrequest), Interface, 'sword')\n xml = adapter()\n dom = parseString(xml)\n returned_depositreceipt = dom.toxml()\n\n file = open(os.path.join(\n DIRNAME, 'data', 'unittest', 'entry_depositreceipt_firstpost.xml'), 'r')\n dom = parse(file)\n file.close()\n\n module = self.folder.workspace.objectValues()[0]\n self.assertEqual(module.message, 'Created module')\n mid = dom.getElementsByTagName('id')\n for element in mid:\n element.firstChild.nodeValue = module.id\n dates = dom.getElementsByTagName('updated')\n dates[0].firstChild.nodeValue = module.revised\n created = dom.getElementsByTagName('dcterms:created')\n for element in created:\n element.firstChild.nodeValue = module.created\n modified = dom.getElementsByTagName('dcterms:modified')\n for element in modified:\n element.firstChild.nodeValue = module.revised\n identifiers = dom.getElementsByTagName('dcterms:identifier')\n for identifier in identifiers:\n if identifier.getAttribute('xsi:type') == \"dcterms:URI\":\n identifier.firstChild.nodeValue = module.absolute_url()\n\n reference_depositreceipt = dom.toxml()\n reference_depositreceipt = reference_depositreceipt.replace('__MODULE_ID__', module.id)\n\n assert bool(xml), \"Upload view does not return a result\"\n assert \"<sword:error\" not in xml, xml\n self.assertEqual(returned_depositreceipt, reference_depositreceipt,\n 'Result does not match reference doc: \\n\\n%s' % diff(\n returned_depositreceipt, reference_depositreceipt))", "title": "" }, { "docid": "c572836aea1cfcca73e1499e99c88e72", "score": "0.47077116", "text": "def spdx_package(self) -> 'outputs.PackageInfoOccurrenceResponse':\n return pulumi.get(self, \"spdx_package\")", "title": "" }, { "docid": "7d264173cbdb56c20900abd2b32a7290", "score": "0.47064915", "text": "def test_upload_package(self):\n self.navigate_to('Manage')\n self.go_to_submenu('Package Definitions')\n\n self.driver.find_element_by_id(c.UploadPackage).click()\n el = self.driver.find_element_by_css_selector(\n \"input[name='upload-package']\")\n el.send_keys(self.archive)\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n\n # No application data modification is needed\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n\n self.wait_for_alert_message()\n\n self.check_package_parameter_by_name(self.archive_name,\n 'Active',\n 'True')\n self.check_package_parameter_by_name(self.archive_name,\n 'Public',\n 'False')\n self.check_package_parameter_by_name(self.archive_name,\n 'Tenant Name',\n cfg.common.tenant)", "title": "" }, { "docid": "f9e36780cc175cb7b1d009bf25fb7287", "score": "0.46981248", "text": "def postXml(url, data):\n import requests\n response = requests.post(url, data=data, auth = (odl_user, odl_password),headers= {'Content-Type': 'application/xml'})\n print response.text\n #f = urllib2.urlopen(req)\n #response = f.read()\n #f.close()\n return response.json()", "title": "" }, { "docid": "7aca782969496f5f86516820d1c757b6", "score": "0.46801066", "text": "def test_custom_action_response_descriptor_octopus_server_web_api_actions_package_search_action(self):\n pass", "title": "" }, { "docid": "c573def00a8a096f1e21c609849e9183", "score": "0.46713343", "text": "def test_pkg_get_tag(self):\n\n output = self.app.get('/api/v1/guake/tag')\n self.assertEqual(output.status_code, 301)\n\n output = self.app.get('/api/v1/guake/tag/')\n self.assertEqual(output.status_code, 404)\n output = json.loads(output.data)\n self.assertEqual(output['output'], 'notok')\n self.assertEqual(output['error'], 'Package \"guake\" not found')\n\n create_package(self.session)\n\n output = self.app.get('/api/v1/guake/tag/')\n self.assertEqual(output.status_code, 200)\n output = json.loads(output.data)\n self.assertEqual(output['name'], 'guake')\n self.assertEqual(output['tags'], [])\n\n create_tag(self.session)\n\n output = self.app.get('/api/v1/guake/tag/')\n self.assertEqual(output.status_code, 200)\n output = json.loads(output.data)\n self.assertEqual(output['name'], 'guake')\n self.assertEqual(output['tags'][0]['tag'], u'gnóme')\n self.assertEqual(output['tags'][0]['votes'], 2)\n self.assertEqual(output['tags'][0]['like'], 2)\n self.assertEqual(output['tags'][0]['dislike'], 0)\n self.assertEqual(output['tags'][1]['tag'], 'terminal')\n self.assertEqual(output['tags'][0]['votes'], 2)\n self.assertEqual(output['tags'][0]['like'], 2)\n self.assertEqual(output['tags'][0]['dislike'], 0)", "title": "" }, { "docid": "180d2b7c9b4bcefe7e379ecb6d124d5d", "score": "0.4651839", "text": "def grab_xml(self, url):\n r = requests.get(url)\n return r.content", "title": "" }, { "docid": "3224e7a22df3690eba43fc3774fe2eea", "score": "0.46476457", "text": "def parse_xml(self, xml_file, check_version=True, check_root=True,\n encoding=None):\n root = mixbox.xml.get_etree_root(xml_file, encoding=encoding)\n\n stix_package_obj = self.parse_xml_to_obj(\n xml_file=root,\n check_version=check_version,\n check_root=check_root,\n encoding=encoding\n )\n \n from stix.core import STIXPackage # resolve circular dependencies\n stix_package = STIXPackage().from_obj(stix_package_obj)\n\n self._apply_input_namespaces(root, stix_package)\n self._apply_input_schemalocations(root, stix_package)\n \n return stix_package", "title": "" }, { "docid": "fdbbfabbe1cb482aa23e68feec8b75bb", "score": "0.46423078", "text": "def getContentXml():", "title": "" }, { "docid": "873b5139bbacd963088f06b6a44e749c", "score": "0.4636437", "text": "def publish(self, dataset):\n file_path = os.path.join(self.inventory.external,\n self.file_name)\n file_response = requests.get(self.file_url)\n with open(file_path, 'wb') as file:\n file.write(file_response.content)\n return self", "title": "" }, { "docid": "f693c4cdac732e7b638c0abb9ac84e41", "score": "0.46361214", "text": "def package(self, package):\n\n self._package = package", "title": "" }, { "docid": "cecbd8d6288dfc4efbb767fb01cb2b67", "score": "0.4632708", "text": "def publish(ctx, username, password, server, debug):\n dp = validate()\n\n if not (username and password):\n secho('Error', fg='red', nl=False)\n echo(': missing user credentials. \\n\\nTo enter your credentials please run:')\n echo('\\n dpmpy configure\\n')\n sys.exit(1)\n\n echo('Authenicating ... ', nl=False)\n authresponse = request('POST', url='%s/api/auth/token' % server,\n json={'username': username, 'secret': password})\n\n token = authresponse.json().get('token')\n if not token:\n secho('FAIL\\n', fg='red')\n echo('Error: server did not return auth token\\n')\n sys.exit(1)\n secho('ok', fg='green')\n\n echo('Uploading datapackage.json ... ', nl=False)\n response = request('PUT',\n '%s/api/package/%s/%s' % (server, username, dp.descriptor['name']),\n json=dp.descriptor,\n headers={'Authorization': 'Bearer %s' % token})\n secho('ok', fg='green')\n\n for resource in dp.resources:\n echo('Uploading resource %s' % resource.local_data_path)\n\n # Ask the server for s3 put url for a resource.\n response = request('POST',\n '%s/api/auth/bitstore_upload' % (server),\n json={\n 'publisher': username,\n 'package': dp.descriptor['name'],\n 'path': basename(resource.local_data_path)\n },\n headers={'Authorization': 'Bearer %s' % token})\n puturl = response.json().get('key')\n if not puturl:\n secho('ERROR ', fg='red', nl=False)\n echo('server did not return resource put url\\n')\n sys.exit(1)\n\n filestream = ChunkReader(resource.local_data_path)\n\n if debug:\n echo('Uploading to %s' % puturl)\n echo('File size %d' % filestream.len)\n\n with progressbar(length=filestream.len, label=' ') as bar:\n filestream.on_progress = bar.update\n response = requests.put(puturl, data=filestream)\n\n echo('Finalizing ... ', nl=False)\n response = request('GET',\n '%s/api/package/%s/%s/finalize' % (server, username, dp.descriptor['name']),\n headers={'Authorization': 'Bearer %s' % token})\n secho('ok', fg='green')", "title": "" }, { "docid": "4bd3a62d3ad1c3c70af91aead09f3155", "score": "0.46322834", "text": "def main():\n # https://stackoverflow.com/questions/6028000/how-to-read-a-static-file-from-inside-a-python-package\n # Read configurations from config file\n cfg = ConfigParser()\n # cfg.read_string(resources.read_text(\"layer\", \"config.txt\"))\n cfg.read_string(resources.read_text(__package__, \"config.txt\"))\n url = cfg.get(\"api\", \"endpoint\")\n\n # If an article ID is given, show the article\n if len(sys.argv) > 1:\n if sys.argv[1] == 'map':\n print(\"This sub package contains all the map api related functions.\")\n if sys.argv[1] == 'layer':\n print(\"This sub package contains all the layer api related functions.\")\n if sys.argv[1] == 'common':\n print(\"This sub package contains various common functions useful accross sub packages.\")\n if sys.argv[1] == 'extras':\n print(\"This sub package contains helpful functions which are not part of core functionality but still good to have them!\")\n \n # If no ID is given, show a list of all articles\n else:\n print(\"This package contains 4 sub packages to accomplish different functions :\")\n print(\" 1. map\")\n print(\" 2. layer\")\n print(\" 3. common\")\n print(\" 4. extras\")\n print(\" To know more about individual sub package, you can run package with respective sub package. e.g. python -m themap common\")", "title": "" }, { "docid": "5b187ec50b4a9250a565fe2f26e21b84", "score": "0.4626973", "text": "def Package():\n pass", "title": "" }, { "docid": "b2e76bea69940cdc3c4b851c49a4316c", "score": "0.46223205", "text": "def test_tag_export(self):\n output = self.app.get('/api/v1/tag/export/')\n self.assertEqual(output.status_code, 200)\n target = {'packages': []}\n self.assertEqual(json.loads(output.data), target)\n\n create_package(self.session)\n create_tag(self.session)\n\n output = self.app.get('/api/v1/tag/export/')\n self.assertEqual(output.status_code, 200)\n target = {\n u'packages': [\n {\n u'guake': [{\n u'tag': u'gnóme',\n u'total': 2,\n }, {\n u'tag': u'terminal',\n u'total': 2,\n }]\n }, {\n u'geany': [{\n u'tag': u'gnóme',\n u'total': 2,\n }, {\n u'tag': u'ide',\n u'total': 2,\n }]\n }, {\n u'gitg': [],\n }\n ]\n }\n self.assertEqual(json.loads(output.data), target)", "title": "" }, { "docid": "fb32c50d96a245da8659190e58b88f2d", "score": "0.46046114", "text": "def root():\n return 'Sciris API v.0.0.0'", "title": "" }, { "docid": "5a544566c8c4b5bf42576151cbac3cca", "score": "0.46006453", "text": "def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True,\n encoding=None):\n root = mixbox.xml.get_etree_root(xml_file, encoding=encoding)\n\n if check_version:\n self._check_version(root)\n\n if check_root:\n self._check_root(root)\n\n import stix.bindings.stix_core as stix_core_binding \n stix_package_obj = stix_core_binding.STIXType().factory()\n stix_package_obj.build(root)\n\n return stix_package_obj", "title": "" }, { "docid": "07b528cc733a651e88cf53d05aa66803", "score": "0.45935777", "text": "def dp_import(xmlfile):", "title": "" }, { "docid": "ba0686f2f7fa72bd3ba9ff14dbdbfc10", "score": "0.45810187", "text": "def query(self, pacer_case_id):\n timeout = (60, 300)\n url = get_docketxml_url(self.court_id, pacer_case_id)\n logger.info(\"GETting docket XML at URL: %s\")\n r = self.session.get(url, timeout=timeout)\n self.response = r\n self.parse()", "title": "" }, { "docid": "23fc2602ec8a7fd93cb700b65ea029f5", "score": "0.45759457", "text": "def test_upload_package(self):\n self.navigate_to('Manage')\n self.go_to_submenu('Packages')\n\n self.driver.find_element_by_id(c.UploadPackage).click()\n el = self.driver.find_element_by_css_selector(\n \"input[name='upload-package']\")\n el.send_keys(self.archive)\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n\n # No application data modification is needed\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n\n self.wait_for_alert_message()\n\n self.check_package_parameter_by_name(self.archive_name,\n 'Active',\n 'True')\n self.check_package_parameter_by_name(self.archive_name,\n 'Public',\n 'False')\n self.check_package_parameter_by_name(self.archive_name,\n 'Tenant Name',\n cfg.common.tenant)", "title": "" }, { "docid": "d59d889d190a6c567789ef6baf580664", "score": "0.45748687", "text": "def run_pack():\n wgt = nuke2pack.PackageDialog()\n wgt.exec_()", "title": "" }, { "docid": "50a34e7dc830ceb70ef56bd12f2524f6", "score": "0.45745155", "text": "def _process_informationfile(self, file, netloc, url):\n try:\n root = ET.parse(file).getroot()\n except ET.ParseError as e:\n QgsMessageLog.logMessage(\n self.tr(\n \"Could not parse ilidata file `{file}` ({exception})\".format(\n file=file, exception=str(e)\n )\n ),\n self.tr(\"QGIS Model Baker\"),\n )\n return\n\n self.repositories[netloc] = list()\n repo_files = list()\n for repo in root.iter(\n \"{http://www.interlis.ch/INTERLIS2.3}DatasetIdx16.DataIndex\"\n ):\n for topping_metadata in repo.findall(\n \"ili23:DatasetIdx16.DataIndex.DatasetMetadata\", self.ns\n ):\n dataset_id = \"ilidata:{}\".format(\n topping_metadata.find(\"ili23:id\", self.ns).text\n )\n if dataset_id in self.file_ids:\n for files_element in topping_metadata.findall(\n \"ili23:files\", self.ns\n ):\n for data_file in files_element.findall(\n \"ili23:DatasetIdx16.DataFile\", self.ns\n ):\n for file_element in data_file.findall(\n \"ili23:file\", self.ns\n ):\n for file in file_element.findall(\n \"ili23:DatasetIdx16.File\", self.ns\n ):\n path = file.find(\"ili23:path\", self.ns).text\n\n toppingfile = dict()\n toppingfile[\"id\"] = dataset_id\n\n version = topping_metadata.find(\n \"ili23:version\", self.ns\n )\n if version is not None:\n toppingfile[\"version\"] = version.text\n else:\n toppingfile[\"version\"] = None\n\n owner = topping_metadata.find(\n \"ili23:owner\", self.ns\n )\n if owner is not None:\n toppingfile[\"owner\"] = owner.text\n else:\n toppingfile[\"owner\"] = None\n\n toppingfile[\"repository\"] = netloc\n # relative_file_path like qml/something.qml\n toppingfile[\"relative_file_path\"] = path\n # url like http://models.opengis.ch or /home/nyuki/folder\n toppingfile[\"url\"] = url\n toppingfile[\"local_file_path\"] = self.download_file(\n netloc, url, path, dataset_id\n )\n repo_files.append(toppingfile)\n\n self.repositories[netloc] = sorted(\n repo_files, key=lambda m: m[\"version\"] if m[\"version\"] else 0, reverse=True\n )\n\n self.model.set_repositories(self.repositories)", "title": "" }, { "docid": "0092349eb6c38a56b5632c6f646d8b26", "score": "0.45724946", "text": "def get_package(self, name):\n i = self.info(name)\n if i:\n return AurPackage(i, self.url)", "title": "" }, { "docid": "d8586c1c2419838a987a473ffb073e14", "score": "0.45697972", "text": "def curl_pkg(self, pkg_name, pkg_path, jamf_url, enc_creds, obj_id):\n url = f\"{jamf_url}/dbfileupload\"\n additional_headers = [\n \"--header\",\n \"DESTINATION: 0\",\n \"--header\",\n f\"OBJECT_ID: {obj_id}\",\n \"--header\",\n \"FILE_TYPE: 0\",\n \"--header\",\n f\"FILE_NAME: {pkg_name}\",\n \"--max-time\",\n str(\"3600\"),\n ]\n r = self.curl(\"POST\", url, enc_creds, pkg_path, additional_headers)\n self.output(f\"HTTP response: {r.status_code}\", verbose_level=1)\n return r", "title": "" }, { "docid": "90ed65c63e338692835c342497b662bb", "score": "0.45644206", "text": "def publish_events(self):\n\n request = Request(self.baseurl)\n request.add_header(\"sense_key\", self.api_key)\n request.add_header(\"content-type\", \"application/json\")\n request.add_data(json.dumps(self.data))\n\n if settings.DEBUG:\n print request.data\n\n try:\n response = urlopen(request)\n if settings.DEBUG:\n print response.read()\n except HTTPError, e:\n print \"Some sort of HTTP style error occurred: Code: %s, reason: %s\" % ( \n e.code, e.reason)\n print \"Package was: %s\" % json.dumps(self.data)\n\n finally:\n self.data = []\n\n return response.code if \"response\" in vars() else e.code", "title": "" }, { "docid": "8115471a6bc5591919186abab026728b", "score": "0.4552929", "text": "def open(self):\n self.xmlrpc.connect()", "title": "" }, { "docid": "430c627643214997384f8755cb7ae11b", "score": "0.45500743", "text": "def __init__(self, gs_axx, dico_gs, tipo, txt=''):\n # connection\n cat = Catalog(gs_axx[0], gs_axx[1], gs_axx[2],\n disable_ssl_certificate_validation=gs_axx[3])\n # print(dir(cat))\n\n\n\n # -- WORKSPACES -------------------------------------------------------\n workspaces = cat.get_workspaces()\n for wk in workspaces:\n # print(wk.name, wk.enabled, wk.resource_type, wk.wmsstore_url)\n dico_gs[wk.name] = wk.href, {}\n # print(dir(wk))\n\n # -- STORES -----------------------------------------------------------\n # stores = cat.get_stores()\n # for st in stores:\n # # print(st.name, st.enabled, st.href, st.resource_type)\n # if hasattr(st, 'url'):\n # url = st.url\n # elif hasattr(st, 'resource_url'):\n # url = st.resource_url\n # else:\n # print(dir(st))\n\n # dico_gs.get(st.workspace.name)[1][st.name] = {\"ds_type\": st.type,\n # \"ds_url\": url}\n\n # print(dir(st))\n # print(st.url)\n # print(st.workspace)\n # print(dir(st.workspace))\n # print(st.workspace.name)\n\n # -- LAYERS -----------------------------------------------------------\n # resources_target = cat.get_resources(workspace='ayants-droits')\n layers = cat.get_layers()\n logging.info(\"{} layers found\".format(len(layers)))\n dico_layers = OrderedDict()\n for layer in layers:\n # print(layer.resource_type)\n lyr_title = layer.resource.title\n lyr_name = layer.name\n lyr_wkspace = layer.resource._workspace.name\n if type(layer.resource) is Coverage:\n lyr_type = \"coverage\"\n elif type(layer.resource) is FeatureType:\n lyr_type = \"vector\"\n else:\n lyr_type = type(layer.resource)\n\n # a log handshake\n logging.info(\"{} | {} | {} | {}\".format(layers.index(layer),\n lyr_type,\n lyr_name,\n lyr_title))\n\n # # METADATA LINKS #\n # download link\n md_link_dl = \"{0}/geoserver/{1}/ows?request=GetFeature\"\\\n \"&service=WFS&typeName={1}%3A{2}&version=2.0.0\"\\\n \"&outputFormat=SHAPE-ZIP\"\\\n .format(url_base,\n lyr_wkspace,\n lyr_name)\n\n # mapfish links\n md_link_mapfish_wms = \"{0}/mapfishapp/?layername={1}\"\\\n \"&owstype=WMSLayer&owsurl={0}/\"\\\n \"geoserver/{2}/ows\"\\\n .format(url_base,\n lyr_name,\n lyr_wkspace)\n\n md_link_mapfish_wfs = \"{0}/mapfishapp/?layername={1}\"\\\n \"&owstype=WFSLayer&owsurl={0}/\"\\\n \"geoserver/{2}/ows\"\\\n .format(url_base,\n lyr_name,\n lyr_wkspace)\n\n md_link_mapfish_wcs = \"{0}/mapfishapp/?cache=PreferNetwork\"\\\n \"&crs=EPSG:2154&format=GeoTIFF\"\\\n \"&identifier={1}:{2}\"\\\n \"&url={0}/geoserver/ows?\"\\\n .format(url_base,\n lyr_wkspace,\n lyr_name)\n\n # OC links\n md_link_oc_wms = \"{0}/geoserver/{1}/wms?layers={1}:{2}\"\\\n .format(url_base,\n lyr_wkspace,\n lyr_name)\n md_link_oc_wfs = \"{0}/geoserver/{1}/ows?typeName={1}:{2}\"\\\n .format(url_base,\n lyr_wkspace,\n lyr_name)\n\n md_link_oc_wcs = \"{0}/geoserver/{1}/ows?typeName={1}:{2}\"\\\n .format(url_base,\n lyr_wkspace,\n lyr_name)\n\n # CSW Querier links\n md_link_csw_wms = \"{0}/geoserver/ows?service=wms&version=1.3.0\"\\\n \"&request=GetCapabilities\".format(url_base)\n\n md_link_csw_wfs = \"{0}/geoserver/ows?service=wfs&version=2.0.0\"\\\n \"&request=GetCapabilities\".format(url_base)\n\n # # # SERVICE LINKS #\n # GeoServer Edit links\n gs_link_edit = \"{}/geoserver/web/?wicket:bookmarkablePage=\"\\\n \":org.geoserver.web.data.resource.\"\\\n \"ResourceConfigurationPage\"\\\n \"&name={}\"\\\n \"&wsName={}\".format(url_base,\n lyr_wkspace,\n lyr_name)\n\n # Metadata links (service => metadata)\n if is_uuid(dict_match_gs_md.get(lyr_name)):\n # HTML metadata\n md_uuid_pure = dict_match_gs_md.get(lyr_name)\n srv_link_html = \"{}/portail/geocatalogue?uuid={}\"\\\n .format(url_base, md_uuid_pure)\n\n # XML metadata\n md_uuid_formatted = \"{}-{}-{}-{}-{}\".format(md_uuid_pure[:8],\n md_uuid_pure[8:12],\n md_uuid_pure[12:16],\n md_uuid_pure[16:20],\n md_uuid_pure[20:])\n srv_link_xml = \"http://services.api.isogeo.com/ows/s/\"\\\n \"{1}/{2}?\"\\\n \"service=CSW&version=2.0.2&request=GetRecordById\"\\\n \"&id=urn:isogeo:metadata:uuid:{0}&\"\\\n \"elementsetname=full&outputSchema=\"\\\n \"http://www.isotc211.org/2005/gmd\"\\\n .format(md_uuid_formatted,\n csw_share_id,\n csw_share_token)\n # add to GeoServer layer\n rzourc = cat.get_resource(lyr_name,\n store=layer.resource._store.name)\n rzourc.metadata_links = [('text/html', 'ISO19115:2003', srv_link_html),\n ('text/xml', 'ISO19115:2003', srv_link_xml),\n ('text/html', 'TC211', srv_link_html),\n ('text/xml', 'TC211', srv_link_xml)]\n # rzourc.metadata_links.append(('text/html', 'other', 'hohoho'))\n cat.save(rzourc)\n\n else:\n logging.info(\"Service without metadata: {} ({})\".format(lyr_name,\n dict_match_gs_md.get(lyr_name)))\n pass\n\n dico_layers[layer.name] = {\"title\": lyr_title,\n \"workspace\": lyr_wkspace,\n \"store_name\": layer.resource._store.name,\n \"store_type\": layer.resource._store.type,\n \"lyr_type\": lyr_type,\n \"md_link_dl\": md_link_dl,\n \"md_link_mapfish\": md_link_mapfish_wms,\n \"md_link_mapfish_wms\": md_link_mapfish_wms,\n \"md_link_mapfish_wfs\": md_link_mapfish_wfs,\n \"md_link_mapfish_wcs\": md_link_mapfish_wcs,\n \"md_link_oc_wms\": md_link_oc_wms,\n \"md_link_oc_wfs\": md_link_oc_wfs,\n \"md_link_oc_wcs\": md_link_oc_wcs,\n \"md_link_csw_wms\": md_link_csw_wms,\n \"md_link_csw_wfs\": md_link_csw_wfs,\n \"gs_link_edit\": gs_link_edit,\n \"srv_link_html\": srv_link_html,\n \"srv_link_xml\": srv_link_xml,\n \"md_id_matching\": md_uuid_pure\n }\n\n # mem clean up\n # del dico_layer\n\n # print(dico_gs.get(layer.resource._workspace.name)[1][layer.resource._store.name])\n # print(dir(layer.resource))\n # print(dir(layer.resource.writers))\n # print(dir(layer.resource.writers.get(\"metadataLinks\").func_dict))\n # print(layer.resource.metadata)\n # print(layer.resource.metadata_links)\n\n dico_gs[\"layers\"] = dico_layers", "title": "" }, { "docid": "b03d5034a04e65c89c619a6b2795ba1d", "score": "0.45442826", "text": "def get_xml_from_server(self, url, post_data=False):\n response = requests.get(url) if \\\n not post_data else \\\n requests.post(\n url,\n data=post_data,\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'HBS Cli'\n })\n\n if response.status_code == 403:\n raise FinderInsideProException(\n FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY, FinderInsideProException.TYPE_KEY_IS_WRONG)\n elif response.status_code == 400 and response.text == FinderInsideProException.SESSION_ERROR_TEXT:\n raise FinderInsideProException(\n FinderInsideProException.SESSION_ERROR_TEXT, FinderInsideProException.TYPE_SESSION_IS_WRONG)\n elif response.status_code != 200:\n raise FinderInsideProException(\n FinderInsideProException.REQUEST_ERROR_TEXT.format(response.status_code, response.text))\n\n return response.text", "title": "" }, { "docid": "5a08b9848bdcae493275a90e68c4e7a2", "score": "0.45433757", "text": "def test_custom_action_response_descriptor_octopus_server_web_api_actions_package_version_search_action(self):\n pass", "title": "" }, { "docid": "c908c428c0e451974bb01686b8031563", "score": "0.45433527", "text": "def prepare_data(self) -> Payload:\n result = self.valiant.get_package_metadata(\n package_name=self.argument(\"package\"),\n package_version=self.argument(\"version\"),\n repository_name=self.option(\"repository\"),\n )\n\n if not result:\n raise ValueError(\"Package details could not be loaded.\")\n\n return Payload(metadata=result.package_metadata)", "title": "" }, { "docid": "ba72d0cf20e8a656723c08c230d023bf", "score": "0.4531972", "text": "def parse_dataset_results(self, results):\n\n # handle results (list of dicts):\n # [{'id': 'package_id', 'package': 'package_json'},]\n datasets = []\n for result in results:\n #print(\"id: {id}\".format(id=result['id']))\n #print(\"package: {package}\".format(package=result['package']))\n\n # for this action, we just want to extract some attributes of the dataset and dump to .csv:\n # ['id']: dataset id\n # ['name']: used to contstruct a URL\n # ['dataset_url']: CKAN catalog URL for the dataset (contstructed from 'name')\n # ['title']: the real 'name'\n # ['harvest_object_url']: CKAN harvest object URL (stored ISO XML)\n # ['waf_location']: URL to the orignal harvested XML file\n # ['type']: usually 'dataset', but whatever\n # ['num_resources']: number of associated resources\n # ['num_tags']: number of associated tags\n # ['bbox']: the bounding box JSON (extracted from an 'extra' of the dataset with key='spatial')\n # ['resources']['format']: resource format\n # ['organization']['title']: the dataset's organization title\n parsed_url = urlparse(self.catalog_api_url, allow_fragments=False)\n try:\n bbox = [extra['value'] for extra in result['package']['extras'] if extra['key'] == \"spatial\"][0]\n except IndexError:\n bbox = \"\"\n try:\n harvest_object_id = [extra['value'] for extra in result['package']['extras'] if extra['key'] == \"harvest_object_id\"][0]\n harvest_object_url = \"{scheme}://{netloc}/harvest/object/{id}\".format(scheme=parsed_url.scheme, netloc=parsed_url.netloc, id=harvest_object_id)\n except IndexError:\n harvest_object_url = \"\"\n try:\n waf_location = [extra['value'] for extra in result['package']['extras'] if extra['key'] == \"waf_location\"][0]\n except IndexError:\n waf_location = \"\"\n dataset_url = \"{scheme}://{netloc}/dataset/{name}\".format(scheme=parsed_url.scheme, netloc=parsed_url.netloc, name=result['package']['name'])\n # necessary to quote (\"\") any fields that may have commas or semicolons for CSV output:\n if any(x in result['package']['title'] for x in [\",\",\";\"]):\n title = \"\\\"{title}\\\"\".format(title=result['package']['title'])\n else:\n title = result['package']['title']\n resource_formats = [resource['format'] for resource in result['package']['resources']]\n #formats_list = \"\\\"{list}\\\"\".format(list=\",\".join(resource_formats))\n formats_list = \"-\".join(resource_formats)\n organization = result['package']['organization']['title']\n datasets.append({\n 'id': result['package']['id'],\n 'name': result['package']['name'],\n 'dataset_url': dataset_url,\n 'title': title,\n 'organization': organization,\n 'harvest_object_url': harvest_object_url,\n 'waf_location': waf_location,\n 'type': result['package']['type'],\n 'num_resources': result['package']['num_resources'],\n 'num_tags': result['package']['num_tags'],\n 'formats': formats_list,\n 'bbox': bbox\n })\n\n # do something with results:\n for dataset in datasets:\n self.out.write(json.dumps(dataset, indent=2, sort_keys=True, ensure_ascii=False))\n\n if \"name\" in self.query_params.keys():\n print(\"Found {count} packages belonging to {org} from {action} query action\".format(count=len(datasets), org=self.query_params.get(\"name\"), action=self.action_name))\n self.out.write(u\"\\nFound {count} packages belonging to {org} from {action} query action\".format(count=len(datasets), org=self.query_params.get(\"name\"), action=self.action_name))\n else:\n print(\"Found {count} packages from {action} query action\".format(count=len(datasets), action=self.action_name))\n self.out.write(u\"\\nFound {count} packages from {action} query action\".format(count=len(datasets), action=self.action_name))\n\n return datasets", "title": "" }, { "docid": "fceb2dc22cc4ccb22abfe4365b28c6fa", "score": "0.45314497", "text": "def _submit(self):\n self.oc.sdc.vsp.submit_software_product(**self.attributes, action=\"Submit\")\n self.oc.sdc.vsp.package_software_product(**self.attributes, action=\"Create_Package\")\n\n vsp = self.oc.sdc.vsp.get_software_product(**self.attributes)\n self.attributes[\"tosca\"] = vsp.response_data\n\n self.oc.cache(\"vsp\", self.software_product_name, \"tosca\", self.tosca)\n self.oc.cache(\"vsp\", self.software_product_name, \"owner\", self.owner)", "title": "" }, { "docid": "c1fce313c4d17ef6c24cff4cfbca1bd6", "score": "0.45307684", "text": "def test_tag_get(self):\n\n output = self.app.get(u'/api/v1/tag/gnóme')\n self.assertEqual(output.status_code, 301)\n\n output = self.app.get(u'/api/v1/tag/gnóme/')\n self.assertEqual(output.status_code, 404)\n output = json.loads(output.data)\n self.assertEqual(output['output'], 'notok')\n self.assertEqual(output['error'], u'Tag \"gnóme\" not found')\n\n create_package(self.session)\n create_tag(self.session)\n\n output = self.app.get(u'/api/v1/tag/gnóme/')\n self.assertEqual(output.status_code, 200)\n output = json.loads(output.data)\n self.assertEqual(output['tag'], u'gnóme')\n self.assertEqual(len(output['packages']), 2)\n self.assertEqual(output['packages'][0]['package'], 'guake')", "title": "" }, { "docid": "3add40217f65576e91de44805ec49775", "score": "0.45296776", "text": "def _extract_xml_data(self):\n\t\ttree = ElementTree.parse(urllib2.urlopen(self._url))\n\t\txml_data = tree.getroot()\n\t\treturn xml_data", "title": "" }, { "docid": "2a7f6b2e078063250600fe876092a0e2", "score": "0.45238218", "text": "def _parse_package_data(self, package_id):\n package = self.client['Product_Package']\n\n results = {\n 'categories': {},\n 'locations': []\n }\n\n # First pull the list of available locations. We do it with the\n # getObject() call so that we get access to the delivery time info.\n object_data = package.getRegions(id=package_id)\n\n for loc in object_data:\n details = loc['location']['locationPackageDetails'][0]\n\n results['locations'].append({\n 'delivery_information': details.get('deliveryTimeInformation'),\n 'keyname': loc['keyname'],\n 'long_name': loc['description'],\n })\n\n mask = 'mask[itemCategory[group]]'\n\n for config in package.getConfiguration(id=package_id, mask=mask):\n code = config['itemCategory']['categoryCode']\n group = utils.NestedDict(config['itemCategory']) or {}\n category = {\n 'sort': config['sort'],\n 'step': config['orderStepId'],\n 'is_required': config['isRequired'],\n 'name': config['itemCategory']['name'],\n 'group': group['group']['name'],\n 'items': [],\n }\n\n results['categories'][code] = category\n\n # Now pull in the available package item\n for category in package.getCategories(id=package_id):\n code = category['categoryCode']\n items = []\n\n for group in category['groups']:\n for price in group['prices']:\n items.append({\n 'id': price['itemId'],\n 'description': price['item']['description'],\n 'sort': price['sort'],\n 'price_id': price['id'],\n 'recurring_fee': price.get('recurringFee'),\n 'setup_fee': price.get('setupFee'),\n 'hourly_recurring_fee':\n price.get('hourlyRecurringFee'),\n 'one_time_fee': price.get('oneTimeFee'),\n 'labor_fee': price.get('laborFee'),\n 'capacity': float(price['item'].get('capacity', 0)),\n })\n results['categories'][code]['items'] = items\n\n return results", "title": "" }, { "docid": "c2b038e3fe859bee99407c7a376edca9", "score": "0.45223686", "text": "def get_products_xml(adobeurl):\n print('Source URL is: ' + adobeurl)\n return ET.fromstring(r(adobeurl))", "title": "" }, { "docid": "93ec42e1849867e59697ea3eb70c641e", "score": "0.45219937", "text": "def get_element_info():\n client_id = get_client_id()\n # Get all elements that hav \"precipitation\" in their id\n url = r\"https://frost.met.no/elements/v0.jsonld?ids=*precipitation*&lang=en-US\"\n\n rsp = requests.get(url, auth=(client_id, ''))\n\n print(rsp.text)", "title": "" }, { "docid": "6c3b08cbc78691a7f4bda6a1d3be0719", "score": "0.4517148", "text": "def _xml_request(self, method, url, data, **kwargs) -> dict:\n kwargs[\"headers\"] = {\n \"Accept\": \"application/xml; charset=utf-8\",\n \"Content-Type\": \"application/xml; charset=utf-8\",\n }\n kwargs[\"data\"] = xmltodict.unparse(data)\n\n response = self._http_session.request(method, url, **kwargs)\n return xmltodict.parse(response.text)", "title": "" }, { "docid": "106f6252bbc469a1c87877902acf857e", "score": "0.45128554", "text": "def publish(self):\n # NOTE: Currently Commented Out - Not tested.\n publish = '''\n try:\n xmlfile = self.dlg.OUTPUTFILE\n publisher = koordinates.Publish()\n res = set_metadata(self.layer, xmlfile, publisher)\n if res:\n #r = self.client.publishing.create(publisher)\n self.dlg.validationLog.setText('Publication Complete - ' + \n self.lidpub + ' - ' + self.title)\n else:\n raise Exception('Error Getting Draft Metadata')\n except Exception as e:\n self.dlg.validationLog.setText('Publication Failure: ' + str(e))\n '''\n print (publish)\n xmlfile = self.dlg.OUTPUTFILE\n layer_id = self.lidpub\n print (xmlfile, layer_id)\n text = 'Publication Complete - ' + self.lidpub + ' - ' + self.title\n self.dlg.validationLog.setText(text)\n self.publishDialog.close()", "title": "" }, { "docid": "3de459957cd827c369d5262182b81eba", "score": "0.45010993", "text": "def main():\n packages = get_from_db()\n logging.info(\"Fetched %i packages.\", len(packages))\n for pkg in packages:\n package_analysis.main(pkg[\"name\"], pkg[\"url\"], pkg[\"id\"])\n logging.info(\"Package '%s' done.\", pkg[\"name\"])", "title": "" }, { "docid": "1ddb62910ad7fc3cd70988649e469b51", "score": "0.44921377", "text": "def get(self, request, *args, **kwargs):\n datasets = []\n\n if self.q:\n os_api = settings.OS_API\n # TODO: they really use a different base URL for search.\n # This is a stupid hack to mimic this change without defining additional API URLs\n os_api = os_api.replace(\"api/3\", \"/search/package\")\n r = requests.get(os_api, params={'q': '\"' + self.q + '\"', 'size': self.MAX_COMPLETIONS})\n\n for dataset in r.json():\n title = dataset['package']['title']\n id = dataset['id']\n datasets.append(dict(id=id, text=title))\n\n return http.HttpResponse(json.dumps({\n 'results': datasets\n }))", "title": "" }, { "docid": "766f4f25790c1fc4f5cee333ca0f8481", "score": "0.4484108", "text": "def push_xml(self, url, **args):\n\n exporter = self.exporter.xml\n\n return self.push(url, exporter=exporter, **args)", "title": "" }, { "docid": "b2022dd1e5670654b2e754ac02191581", "score": "0.44824344", "text": "def get(self, xml, ncreq):\n # Remove the <data/> element\n xml = xml[0]\n get_rsp = yield from self.netconf_mgr.get(('subtree', xml))\n\n logger.debug('GET rpc response: %s', get_rsp.xml)\n\n if get_rsp.ok:\n # Don't have to strip the data tag, it will be ignored by the pretty\n # printer \n root = get_rsp.data_ele\n rsp_xml = lxml.etree.tostring(root) if len(root) else ''\n else:\n # Error\n rsp_xml = get_rsp.xml\n\n logger.debug('GET response xml: %s', rsp_xml)\n self.stats.gets += 1\n return rsp_xml", "title": "" }, { "docid": "9506d60b5ceb383bdd6885ee1053ccb5", "score": "0.44817826", "text": "def make_request(endpoint, **kwargs):\n data = kwargs.get('json', [])\n package = kwargs.get('package', None)\n method = kwargs.get('method', 'GET')\n\n function = getattr(requests, method.lower())\n\n try:\n if package:\n response = function(endpoint, data=data,\n files={'file': package})\n else:\n response = function(endpoint, json=data)\n except requests.exceptions.ConnectionError:\n LOG.error(\"Couldn't connect to NApps server %s.\", endpoint)\n sys.exit(1)\n\n return response", "title": "" }, { "docid": "7d12c81fddccec546f5e751a9b8fda90", "score": "0.44815633", "text": "def process_package(distro, package_group, package, version, url):\n logger.info(\"Processing package %s-%s\", package, version)\n # Example url: https://github.com/ros2-gbp/ament_cmake-release/archive/release/dashing/ament_cmake/0.7.3-1.zip\n url_template_variables = {\"version\": version, \"package\": package}\n url = url.format_map(url_template_variables)\n logger.debug(\"Final url: %s\", url)\n\n ros_package = RosPackage(distro, package_group, package, version, url)\n\n source_zip_data = download_file(url)\n ros_package.md5sum = hashlib.md5(source_zip_data).hexdigest()\n ros_package.sha256sum = hashlib.sha256(source_zip_data).hexdigest()\n zip_archive = zipfile.ZipFile(io.BytesIO(source_zip_data))\n\n # {{ package_group }}-release-release-{{ distro }}-{{ package }}-{{ version }}\n top_folder = \"{}-release-release-{}-{}-{}\".format(\n package_group, distro, package, version\n )\n if top_folder not in zip_archive.namelist():\n # logger.warning('non-regular top folder: %s', top_folder)\n top_folder = os.path.dirname(zip_archive.namelist()[0])\n package_xml_filename = os.path.join(top_folder, \"package.xml\")\n\n with zip_archive.open(package_xml_filename) as f:\n ros_package.license_line, ros_package.license_md5 = extract_license(f)\n\n with io.TextIOWrapper(zip_archive.open(package_xml_filename)) as f:\n tree = ElementTree.parse(f)\n\n root = tree.getroot()\n\n # Extract different xml elements:\n ros_package.description = normalize_description(root.find(\"description\").text)\n license_element = root.find(\"license\")\n ros_package.license_text = (\n None if license_element is None else validate_license(license_element.text)\n )\n\n # Build tool:\n build_type_element = root.find(\"export/build_type\")\n if build_type_element is None:\n logger.warning(\"No build tool set, assuming ament.\")\n build_type = \"ament_cmake\"\n else:\n build_type = build_type_element.text\n ros_package.build_type = build_type\n\n # print(build_tool)\n\n # See also: ros.org/reps/rep-0140.html.\n # Add the different dependencies:\n dep_tags = [\n \"exec_depend\",\n \"build_depend\",\n \"build_export_depend\",\n \"buildtool_depend\",\n \"buildtool_export_depend\",\n \"test_depend\",\n \"doc_depend\",\n ]\n dependencies = {}\n for dep_tag in dep_tags:\n deps = [e.text for e in root.findall(dep_tag)]\n dependencies[dep_tag] = deps\n\n # Special case 'depend' expands to 'exec_depend', 'build_depend' and 'build_export_depend'\n deps = [e.text for e in root.findall(\"depend\")]\n dependencies[\"exec_depend\"].extend(deps)\n dependencies[\"build_depend\"].extend(deps)\n dependencies[\"build_export_depend\"].extend(deps)\n\n ros_package.dependencies = dependencies\n return ros_package", "title": "" }, { "docid": "9b434e5bfbde615ee09493a3a4a2e912", "score": "0.44806567", "text": "def opendata(self):\n self.tree = ElementTree.parse(self.xmlfile)", "title": "" }, { "docid": "8c6acee207ab096e50cf539ba0f3ec5f", "score": "0.44804975", "text": "def scrap(URL : str = \"https://www.nbp.pl/kursy/xml/lasta.xml\"):\r\n try:\r\n req.urlretrieve(URL, \"./currency_list.xml\")\r\n except Exception as e:\r\n print(f\"Wystąpił błąd: {e}\")\r\n exit(1)", "title": "" }, { "docid": "be4787ad09b7b7d0d706b1b564556f41", "score": "0.44801578", "text": "def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n nsinfo_object = serializer.data\n response = {\n \"links\": [],\n \"nodes\": []\n }\n link_count = 0\n if nsinfo_object['nsInfo']:\n if nsinfo_object['nsInfo']['nsInstanceName'] != None:\n response['nodes'].append({\n \"id\": nsinfo_object['nssiId'],\n \"name\": nsinfo_object['nsInfo']['nsInstanceName'],\n \"symbolSize\": 10,\n \"symbol\": \"roundRect\",\n \"attributes\": {\n \"modularity_class\": 0\n }\n })\n else:\n response['nodes'].append({\n \"id\": nsinfo_object['nssiId'],\n \"name\": nsinfo_object['nsInfo']['nsInstanceDescription'],\n \"symbolSize\": 10,\n \"symbol\": \"roundRect\",\n \"attributes\": {\n \"modularity_class\": 0\n }\n })\n nsinfo = eval(nsinfo_object['nsInfo']['vnfInstance'])\n if 'nsInstanceName' in nsinfo_object['nsInfo']:\n for _ in nsinfo:\n addresses = str()\n cp_id = str()\n vnf_state =_['instantiatedVnfInfo']['vnfState']\n for extCpInfo in _['instantiatedVnfInfo']['extCpInfo']:\n cp_id = extCpInfo['id']\n cp_protocol_info = extCpInfo['cpProtocolInfo']\n ip_over_ethernet = cp_protocol_info[0]['ipOverEthernet']\n ip_addresses = ip_over_ethernet['ipAddresses']\n if ip_addresses[0]['isDynamic']:\n addresses = ip_addresses[0]['addresses']\n\n # Consist topology VNF Instance\n if _['vnfProductName'] == \"upf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/tDq5MNIeqdyUibCoHGUFhFvTi5JSM6-PZ6qec5_yBrGx0fBELl0tYrnWcvOn3TpLeWzcP-qxISW9BHYvFkF6CMREi-tJcmO2eMxLTgPvSBSYX8MZWWjNJd6WFQF-iEXW7oWy476RVA=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"hss\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/6D1Mz9o4gBb4g-MSN4p0mKCWz4-PXk-K_ZkAcTQLR1YUyS_PCX-pORu6X9uyRJ_Ve1GlBX4ZL2Bb00sdymga2jRcCOG3nPPVte4JBeoW8cQxaju4BuNFhSkKAeXB0OxYW2HUVEXSHg=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"amf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/4lyf2bW5dKmJ9ygvXL6a9nd8SNP1RABQtcsS6nFLWyeb3W3y27gay4ujcPmmFhKj737C60IZoUcfnn8eUosl_h_qQoIMnQJmbssSwkQ4I3rC8lReRSfcZjuGbj8Xpgpb9PS2nvYWew=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"smf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/i5IXbaptWGdvp74uIEzVfN6nMu6YmblUSA_iPy68sD1FOL31VZuvuE0RQB83iQ-CxptFdkM-ku1ey7tSVzaro2jjIBTIOOpfNzEQA_f84YeJwbP1Fwr7xJOB_r6Tls99c5iOO3WAPg=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"mongodb\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/xwIxuvxi4_wFwaMEw8mi2FqpI1K_SCGy1DGQsedj-aYAfPvjkEtYFmjKu_nrBJck-WxhcJifG6QdC4PY5jdqt3zkIER058P0f1QLS-rvdwOeOkmz9OaZeRLppwd4k3YyJgl36aiq-A=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"webui\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/_p7-L94qZANyy4rN_ygdTHYJU7aYLwAUok5EY5VhdbJhESShkMAcctymFhYzX3nM9MccqG2hJLrJ1618ZMz2fWefgz0_RTPl8LWvhb3eNoziJpHHwTai0t8xymvS3JRmjFGuqoFJQA=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"pcrf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/dp_CZZ6Nn3AKb1WUsawXnvWcIWxz4iXvn4vx0wGjidaV1wSTld3CPfGAZTc-8RqLIX-xeodWKAzuHO5btB37PMbJFYu3J7cwuXD2ya2w0U9D4bIazhK4SrABzr8x-8wRHkz0iI_1fA=w2400\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"ausf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/-R2wrpgEVBdXZfpujbLQkhuIWgaHQi4Vka-BLLDxzX8J4XYNRF-HJx3TsAoBXQHuskFJveYx9v1lQij37730EJKUraPlR2mWYt7OLoa8m1bmH2coQzAN2WGGo3htq6GdJyNkJHLUnA=s314-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"nssf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/mYCmjpPMPhdWG_34KLVqEeTUM5DXQ8u1EK4lMCyiXaa4W-fCioeNgxbgzuQS8j-vcCn6Cnh2r7zGNNpdnAA3VjzgPykGrHbPCvM3NfMzgxf_1lW379FEkcOjqNMa1QzVUSHEam2ykw=s314-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"udm\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/iSgkP7Bfr0HWSSUkEt521Ka6rhbn9PmbNUb_fy0Ck3KD4Vn0YbV5egWqaqfGfvMk87DLpLywheYa6BBzkeffMfJNdFRkbr3nBTd-kJRyEp0Dl29egXQz9Kkr-WeFO3CslXxX1cnJHw=s314-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"udr\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/ygnGeIaEUK7Y4e5T96FBOgfWehLURMaIH6Ev_PKoOv1MbDZoH0lM_cHNkskRo9C1CpsMWsgqYaKuvk-xO-X0GtxNNZKphkaicPfztQhkzV_vZdndvrfQZIanbcALElNWroEHwef2yg=s314-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"nrf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/ygnGeIaEUK7Y4e5T96FBOgfWehLURMaIH6Ev_PKoOv1MbDZoH0lM_cHNkskRo9C1CpsMWsgqYaKuvk-xO-X0GtxNNZKphkaicPfztQhkzV_vZdndvrfQZIanbcALElNWroEHwef2yg=s314-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n elif _['vnfProductName'] == \"pcf\":\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/ygnGeIaEUK7Y4e5T96FBOgfWehLURMaIH6Ev_PKoOv1MbDZoH0lM_cHNkskRo9C1CpsMWsgqYaKuvk-xO-X0GtxNNZKphkaicPfztQhkzV_vZdndvrfQZIanbcALElNWroEHwef2yg=s314-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n else:\n response['nodes'].append({\n \"id\": _['id'],\n \"name\": _['vnfProductName'],\n \"instantiationState\": _['instantiationState'],\n \"vnfState\": vnf_state,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/p-qlD6cG49XFnGtZVmrtr7TfmdEjMSkBYkVZvl_Al6xC1pK87EGDUhoo2EcJBHY6DKIPLE8P9PxqF_ps1AFnu4P5DSFQdbEAUd_QYbzmF_Iu1Xs7XZ3umSpDD3VibL3fKJ9GicqQew=s315-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": addresses\n })\n\n # Consist topology relation VNF Instance <-> Network Service Instance\n response['links'].append({\n \"id\": str(link_count),\n \"source\": nsinfo_object['nssiId'],\n \"target\": _['id']\n })\n link_count += 1\n else:\n print('Tacker Topology')\n for _ in nsinfo:\n response['nodes'].append({\n \"id\": nsinfo[_],\n \"name\": _,\n \"instantiationState\": None,\n \"vnfState\": None,\n \"symbolSize\": 10,\n \"symbol\": \"image://https://lh3.googleusercontent.com/p-qlD6cG49XFnGtZVmrtr7TfmdEjMSkBYkVZvl_Al6xC1pK87EGDUhoo2EcJBHY6DKIPLE8P9PxqF_ps1AFnu4P5DSFQdbEAUd_QYbzmF_Iu1Xs7XZ3umSpDD3VibL3fKJ9GicqQew=s315-p-k\",\n \"attributes\": {\n \"modularity_class\": 1\n },\n \"address\": None\n })\n response['links'].append({\n \"id\": str(link_count),\n \"source\": nsinfo_object['nssiId'],\n \"target\": nsinfo[_]\n })\n link_count += 1\n return response_cors(request.method, JsonResponse(response))", "title": "" }, { "docid": "948a2d53eba20bec5d67bd5003028fc2", "score": "0.44702992", "text": "def _call(self, data, format = 'json', parseJSON = True):\n try:\n if data['content'] in self.supported_actions:\n data['token'] = self._token\n data['format'] = format\n callResult = _requests.post(self._url, data)\n callResult.raise_for_status()\n if format=='json' and parseJSON: \n return callResult.json()\n else:\n return callResult.text\n raise NotImplementedError('Your content type is not supported yet')\n except Exception as e:\n raise REDCapError('Something went wrong when calling the REDCap Project API') from e", "title": "" }, { "docid": "d5405cbe2cad568240eaa26b7cc0be1d", "score": "0.4466604", "text": "def callback(self, root, raw):\n # Not interested, will be handled by NCClient RPC\n pass", "title": "" }, { "docid": "6afa16cd9ab40fc49f64b050ad0e4fc9", "score": "0.44648024", "text": "def access_sfopendata_api():\n\n return render_template(\"api-search.html\")", "title": "" }, { "docid": "596f79cf60373c5bfde6546f3448136a", "score": "0.44599816", "text": "def GeneratePkgInfoXML(self):\n try:\n dir_output = os.path.dirname(self.xml_path_)\n if not os.path.exists(dir_output):\n os.makedirs(dir_output)\n output_file = open(self.xml_path_, 'w')\n document = minidom.Document()\n manifest = self.__CreateNode(document, document, 'manifest')\n self.__SetAttribute(manifest, 'xmlns', 'http://tizen.org/ns/packages')\n self.__SetAttribute(manifest, 'package', self.package_id_)\n if 'version' in self.data_:\n self.__SetAttribute(manifest, 'version', self.data_['version'])\n\n label = self.__CreateNode(document, manifest, 'label')\n if 'name' in self.data_:\n self.__CreateTextNode(document, label, self.data_['name'])\n\n description = self.__CreateNode(document, manifest, 'description')\n if 'description' in self.data_:\n self.__CreateTextNode(document, description, self.data_['description'])\n\n ui_application = self.__CreateNode(document, manifest, 'ui-application')\n self.__SetAttribute(ui_application, 'appid',\n self.package_id_ + '.' + self.stripped_name_)\n self.__SetAttribute(ui_application, 'exec', self.execute_path_)\n # Set application type to 'c++app' for now,\n # to differentiate from 'webapp' used by legacy Tizen web applications.\n self.__SetAttribute(ui_application, 'type', 'c++app')\n self.__SetAttribute(ui_application, 'taskmanage', 'true')\n\n ui_label = self.__CreateNode(document, ui_application, 'label')\n if 'name' in self.data_:\n self.__CreateTextNode(document, ui_label, self.data_['name'])\n\n if ('name' in self.data_ and\n 'icons' in self.data_ and\n '128' in self.data_['icons'] and\n os.path.exists(self.app_path_ + self.data_['icons']['128'])):\n icon = self.__CreateNode(document, ui_application, 'icon')\n self.__CreateTextNode(\n document,\n icon,\n self.package_id_ + '.' + self.stripped_name_ + '.png')\n\n text_re = re.compile('>\\n\\s+([^<>\\s].*?)\\n\\s+</', re.DOTALL)\n pretty_xml = text_re.sub('>\\g<1></', document.toprettyxml(indent=' '))\n output_file.write(pretty_xml)\n print('Converting manifest.json into %s.xml for installation[DONE]'\n % self.package_id_)\n except IOError:\n traceback.print_exc()", "title": "" }, { "docid": "246a8ad7ccbc1eb59c66d238c2c15067", "score": "0.44581676", "text": "def EditPackageInfo(self):\n raise NotImplementedError", "title": "" }, { "docid": "dacf4c750af9c9d7098ac16175cdedc2", "score": "0.44516173", "text": "def Assets(lat, lon, begin, end):\n request='https://api.data.gov/nasa/planetary/earth/assets'\n key=''\n form={'lat': str(lat), 'lon': str(lon), 'begin': begin, 'end': end, 'api_key': key}\n query=urllib.urlencode(form)\n landsat8_request_string=urllib.urlopen(request+\"?\"+query)\n print(landsat8_request_string.info())\n response=json.loads(landsat8_request_string.read())\n #print(response.keys())\n #print(response.values())\n print('From: '+ begin)\n print('To: ' + end)\n print('Number of Satellite passes: ' + str(response['count']))\n print('Planet: ' + response['resource']['planet'])\n print('Data Set: ' + response['resource']['dataset'])\n for item in response['results']:\n print('date: ' + item['date'])\n print('id: ' + item['id'])", "title": "" }, { "docid": "33ac703bc5b9af1f8424ff634dfdcbf2", "score": "0.44507802", "text": "def assemble_xml(metadata, doi):\n import base64\n import xml.etree.ElementTree as ET\n\n # get directory to find xml example\n tree = ET.parse(datacite_xml)\n root = tree.getroot()\n # resource_id = root[0].tag.split('}')[0].lstrip('{')\n\n # set doi identifier\n for node in root.iter('{http://datacite.org/schema/kernel-3}identifier'):\n node.text = str(doi)\n # set title\n for node in root.iter('{http://datacite.org/schema/kernel-3}title'):\n node.text = metadata['name'].item()\n # set pub year\n for node in root.iter('{http://datacite.org/schema/kernel-3}publicationYear'):\n node.text = str(metadata['year'].item())\n # trying to fix name display for citations\n if metadata['department'].item().split()[0] == 'Austin' and metadata['department'].item().split()[-1] != 'Department':\n fixed_name = metadata['department'].item().split(' ', 1)[1] + ' Department'\n for node in root.iter('{http://datacite.org/schema/kernel-3}creatorName'):\n node.text = fixed_name\n elif metadata['department'].item().split()[0] == 'Austin' and metadata['department'].item().split()[-1] == 'Department':\n fixed_name = metadata['department'].item().split(' ', 1)[1]\n for node in root.iter('{http://datacite.org/schema/kernel-3}creatorName'):\n node.text = fixed_name\n else:\n for node in root.iter('{http://datacite.org/schema/kernel-3}creatorName'):\n node.text = metadata['department'].item()\n\n # set description\n for node in root.iter('{http://datacite.org/schema/kernel-3}description'):\n node.text = metadata['desc'].item()\n # set resource type\n for node in root.iter('{http://datacite.org/schema/kernel-3}resourceType'):\n node.text = metadata['type'].item()\n node.set('resourceTypeGeneral', metadata['type'].item())\n\n xmlstr = ET.tostring(root, encoding='utf-8', method='xml')\n xml_encoded = base64.b64encode(xmlstr)\n\n return xml_encoded.decode('utf-8')", "title": "" }, { "docid": "568e1a0e4252237f620043265e60e465", "score": "0.44502753", "text": "def update_url_in_package_xml():\n\n def insert_url(file_name, url):\n with open(file_name, 'r') as f:\n contents = f.readlines()\n click.clear()\n for index, item in enumerate(contents):\n click.echo(\"{0}: {1}\".format(index, item[:-1]))\n linenumber = click.prompt(\"\\n\\nPlease specify the line to insert the url in\", type=click.INT)\n contents.insert(linenumber, ' <url type=\"repository\">{0}</url>\\n'.format(url))\n contents = \"\".join(contents)\n with open(file_name, 'w') as f:\n f.write(contents)\n click.clear()\n if click.confirm(\"OK, did that. Commit these changes?\"):\n org_dir = os.getcwd()\n os.chdir(os.path.dirname(file_name))\n subprocess.call(\"git add {0}\".format(file_name), shell=True)\n subprocess.call(\"git commit -m 'Added repository url to package.xml'\", shell=True)\n os.chdir(org_dir)\n\n ws = Workspace()\n ws.catkin_pkg_names = ws.get_catkin_package_names()\n ws.config = wstool_config.Config([], ws.src)\n ws.cd_src()\n\n for pkg_name in ws.catkin_pkg_names:\n filename = os.path.join(ws.src, pkg_name, \"package.xml\")\n # Try reading it from git repo\n try:\n # TODO Maybe try to always get the https/ssh url? Right now, it is only checked against how YOU have it\n # configured.\n with open(pkg_name + \"/.git/config\", 'r') as f:\n git_url = next(line[7:-1] for line in f if line.startswith(\"\\turl\"))\n except (IOError, StopIteration):\n git_url = None\n\n # Try to read it from package xml\n try:\n if len(ws.catkin_pkgs[pkg_name].urls) > 1:\n raise IndexError\n xml_url = ws.catkin_pkgs[pkg_name].urls[0].url\n except IndexError:\n xml_url = None\n\n # Testing all cases:\n if xml_url is not None and git_url is not None:\n if xml_url != git_url:\n click.secho(\"WARNING in {0}: URL declared in src/{1}/package.xml, differs from the git repo url for {\"\n \"0}!\".format(pkg_name.upper(), pkg_name),\n fg=\"red\")\n click.echo(\"PackageXML: {0}\".format(xml_url))\n click.echo(\"Git repo : {0}\".format(git_url))\n if click.confirm(\"Replace the url in package.xml with the correct one?\"):\n subprocess.call(\"sed -i -e '/ <url/d' {0}\".format(filename), shell=True)\n insert_url(filename, git_url)\n if xml_url is not None and git_url is None:\n click.secho(\"WARNING in {0}: URL declared in package.xml, but {1} does not seem to be a remote \"\n \"repository!\".format(pkg_name.upper(), pkg_name), fg=\"yellow\")\n if click.confirm(\"Remove the url in package.xml?\"):\n click.secho(\"Fixing...\", fg=\"green\")\n subprocess.call(\"sed -i -e '/ <url/d' {0}\".format(filename), shell=True)\n if xml_url is None and git_url is not None:\n click.secho(\"WARNING in {0}: No URL (or multiple) defined in package.xml!\".format(pkg_name.upper()),\n fg=\"yellow\")\n if click.confirm(\"Insert (Replace) the url in package.xml with the correct one?\"):\n subprocess.call(\"sed -i -e '/ <url/d' {0}\".format(filename), shell=True)\n insert_url(filename, git_url)\n if xml_url is None and git_url is None:\n click.secho(\"INFO in {0}: Does not seem to be a git repository. You should use Version Control for your \"\n \"code!\".format(pkg_name.upper()), fg=\"cyan\")\n\n if git_url is not None:\n ws.add(pkg_name, git_url, update=False)\n\n ws.write()", "title": "" }, { "docid": "284a14b3919270343b62dea020aed211", "score": "0.44493705", "text": "def test_import_package_by_url(self):\n\n pkg_name = \"dummy_package\"\n self._compose_app(pkg_name)\n\n self.navigate_to('Manage')\n self.go_to_submenu('Package Definitions')\n self.driver.find_element_by_id(c.UploadPackage).click()\n sel = self.driver.find_element_by_css_selector(\n \"select[name='upload-import_type']\")\n sel = ui.Select(sel)\n sel.select_by_value(\"by_url\")\n\n el = self.driver.find_element_by_css_selector(\n \"input[name='upload-url']\")\n el.send_keys(\"http://127.0.0.1:8099/apps/{0}.zip\".format(pkg_name))\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n\n # No application data modification is needed\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n self.driver.find_element_by_xpath(c.InputSubmit).click()\n\n self.wait_for_alert_message()\n self.check_element_on_page(\n by.By.XPATH, c.AppPackageDefinitions.format(pkg_name))", "title": "" }, { "docid": "1aeb3d22d034a1148b0a72d66cd4ffda", "score": "0.44403973", "text": "def request_response(self) -> dict:\n\n result = requests.get(self.url_download + self.file_id + \"/\" + self.compressed_suffix)\n if result.status_code != 200:\n raise Exception('Clarin backend response with non 2xx response: {}'.format(result.text))\n\n response = xmltodict.parse(gzip.decompress(result.content))\n return response", "title": "" }, { "docid": "479673069ef9ba66003260732d4ca438", "score": "0.4425186", "text": "def __init__read(self, filename):\n self.filename = filename\n try:\n # Read the container\n f = self.read(\"META-INF/container.xml\")\n except KeyError:\n # By specification, there MUST be a container.xml in EPUB\n print \"The %s file is not a valid OCF.\" % str(filename)\n raise InvalidEpub\n try:\n # There MUST be a full path attribute on first grandchild...\n self.opf_path = ET.fromstring(f)[0][0].get(\"full-path\")\n except IndexError:\n # ...else the file is invalid.\n print \"The %s file is not a valid OCF.\" % str(filename)\n raise InvalidEpub\n\n # NEW: json-able info tree\n self.info = {\"metadata\": {},\n \"manifest\": [],\n \"spine\": [],\n \"guide\": []}\n\n self.root_folder = os.path.dirname(self.opf_path) # Used to compose absolute paths for reading in zip archive\n self.opf = ET.fromstring(self.read(self.opf_path)) # OPF tree\n\n ns = re.compile(r'\\{.*?\\}') # RE to strip {namespace} mess\n\n # Iterate over <metadata> section, fill EPUB.info[\"metadata\"] dictionary\n for i in self.opf.find(\"{0}metadata\".format(NAMESPACE[\"opf\"])):\n tag = ns.sub('', i.tag)\n if tag not in self.info[\"metadata\"]:\n self.info[\"metadata\"][tag] = i.text or i.attrib\n else:\n self.info[\"metadata\"][tag] = [self.info[\"metadata\"][tag], i.text or i.attrib]\n\n # Get id of the cover in <meta name=\"cover\" />\n try:\n coverid = self.opf.find('.//{0}meta[@name=\"cover\"]'.format(NAMESPACE[\"opf\"])).get(\"content\")\n except AttributeError:\n # It's a facultative field, after all\n coverid = None\n self.cover = coverid # This is the manifest ID of the cover\n\n self.info[\"manifest\"] = [{\"id\": x.get(\"id\"), # Build a list of manifest items\n \"href\": x.get(\"href\"),\n \"mimetype\": x.get(\"media-type\")}\n for x in self.opf.find(\"{0}manifest\".format(NAMESPACE[\"opf\"])) if x.get(\"id\")]\n\n self.info[\"spine\"] = [{\"idref\": x.get(\"idref\")} # Build a list of spine items\n for x in self.opf.find(\"{0}spine\".format(NAMESPACE[\"opf\"])) if x.get(\"idref\")]\n try:\n self.info[\"guide\"] = [{\"href\": x.get(\"href\"), # Build a list of guide items\n \"type\": x.get(\"type\"),\n \"title\": x.get(\"title\")}\n for x in self.opf.find(\"{0}guide\".format(NAMESPACE[\"opf\"])) if x.get(\"href\")]\n except TypeError: # The guide element is optional\n self.info[\"guide\"] = None\n\n # Document identifier\n try:\n self.id = self.opf.find('.//{0}identifier[@id=\"{1}\"]'.format(NAMESPACE[\"dc\"],\n self.opf.get(\"unique-identifier\"))).text\n except AttributeError:\n raise InvalidEpub # Cannot process an EPUB without unique-identifier\n # attribute of the package element\n # Get and parse the TOC\n toc_id = self.opf[2].get(\"toc\")\n expr = \".//{0}item[@id='{1:s}']\".format(NAMESPACE[\"opf\"], toc_id)\n toc_name = self.opf.find(expr).get(\"href\")\n self.ncx_path = os.path.join(self.root_folder, toc_name)\n self.ncx = ET.fromstring(self.read(self.ncx_path))\n self.contents = [{\"name\": i[0][0].text or \"None\", # Build a list of toc elements\n \"src\": os.path.join(self.root_folder, i[1].get(\"src\")),\n \"id\":i.get(\"id\")}\n for i in self.ncx.iter(\"{0}navPoint\".format(NAMESPACE[\"ncx\"]))] # The iter method\n # loops over nested\n # navPoints", "title": "" }, { "docid": "72d9c5e3562efaa8a81d3e90fb5f93bd", "score": "0.44236836", "text": "def __request_solrxml(self, sbid):\n SHOW_FUNCTION = '_design/scielobooks/_show/solr_xml'\n request_url = '/'.join([self.__db_uri, self.__db_name, SHOW_FUNCTION, sbid])\n\n return urllib2.urlopen(request_url).read()", "title": "" }, { "docid": "0485b770c8bfdc277eb04b5c9a90ea48", "score": "0.44230056", "text": "def request(self, manager, stanza):\n pass", "title": "" }, { "docid": "d87a0fccb21344f0a404d54ac6ac7250", "score": "0.44217244", "text": "def upload_file_to_ckan(url, dataset_name, file_path):\n\n assert os.environ['CKAN_API_KEY']\n\n api_key = os.environ['CKAN_API_KEY']\n file_name = file_path.split('/')[-1]\n resource_id = find_resource_id_if_exists(url, dataset_name, file_name)\n\n if resource_id:\n data_upload_url = url+'/api/action/resource_update'\n data = {\"id\":resource_id}\n else:\n data_upload_url = url+'/api/action/resource_create'\n data={\"package_id\": dataset_name,\n \"name\":file_name}\n logger.info('Uploading {}...'.format(file_name))\n\n response = requests.post(data_upload_url,\n data=data,\n headers={\"X-CKAN-API-Key\": api_key},\n files=[('upload', open(file_path, 'rb'))]\n )\n assert response.status_code == requests.codes.ok\n logger.info('Uploaded {} to https://api.data.amsterdam.nl/catalogus/dataset/{}'.format(file_name, dataset_name))", "title": "" }, { "docid": "efec3ffa2530fca8d00f437b6bcabce9", "score": "0.44196934", "text": "def request_symbol(self, symbol):\n\n # make the parser, and send the xml to be parsed\n result = urllib2.urlopen(self.url % symbol).read()\n result = result.replace('\\\\x', '\\\\u00')\n stock = json.loads(result[4:])\n return stock[0]", "title": "" }, { "docid": "700e5cbc01de7066bca528c1c3d7c7f5", "score": "0.44182292", "text": "def rpc(self, xml, ncreq):\n # Remove the <data/> element\n xml = xml[0]\n rpc_rsp = yield from self.netconf_mgr.dispatch(xml)\n logger.debug('RPC response xml: %s', rpc_rsp.xml)\n\n self.stats.rpcs += 1\n return rpc_rsp.xml", "title": "" }, { "docid": "426eb88bb7eff74efe72bb7119b0691d", "score": "0.44098938", "text": "def call_api(self, *args, **kwargs):\n try:\n output_format = kwargs.pop('output_format')\n except KeyError:\n output_format = self.output_format\n url_string = \"/\".join([x.strip() for x in args])\n url_options = urlencode(kwargs)\n if url_options:\n url_string = self.base_url + url_string + \"?format=json&\" + url_options\n else: url_string = self.base_url + url_string + \"?format=json\"\n print url_string\n #json_data = urlopen(url_string).read()\n #data = self._format_data(output_format, xml_data)\n #return data", "title": "" }, { "docid": "8306bf62d95bdbbcf93c2cf2a606033d", "score": "0.44081908", "text": "def _kazan_retrieve(GAV,ArtifactClassifier,ArtifactPackaging):\n import urlgrabber\n\n # NEXUS_BASE = \"http://ganesh-code.mpht.priv.atos.fr/nexus\"\n NEXUS_BASE = \"https://kazan.priv.atos.fr/nexus\"\n REST_PATH = \"/service/local\"\n ART_REDIR = \"/artifact/maven/redirect\"\n if sys.platform.startswith('win'):\n output_dir = \"D:\\\\Temp\\\\software\\\\kazan\"\n if sys.platform.startswith('linux'):\n output_dir = \"/DATA/software/kazan\"\n\n\n if not re.match(\"(war|tar.gz|jar)\",ArtifactPackaging):\n print \"ArtifactPackaging mus be <war|tar.gz>\"\n sys.exit()\n\n GAV_list = GAV.split(\":\")\n CLASSIFIER_list = ArtifactClassifier.split(\":\")\n params_url = { 'g' : GAV_list[0] ,\n 'a' : GAV_list[1],\n 'v' : GAV_list[2],\n 'p' : ArtifactPackaging,\n }\n\n if re.match(\".*SNAPSHOT\",params_url[\"v\"]):\n #params_url[\"r\"] = \"ganeshrepository-snapshot\"\n params_url[\"r\"] = \"snapshots\"\n else:\n #params_url[\"r\"] = \"ganeshrepository\"\n params_url[\"r\"] = \"releases\"\n\n url = NEXUS_BASE + REST_PATH + ART_REDIR + '?'\n uri = \"\"\n for param in params_url.iterkeys():\n url+=param+'='+params_url[param]+'&'\n url=re.sub(\"&$\",\"\",url)\n\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n saved_files = []\n # save on local file\n for classifier in CLASSIFIER_list:\n if not re.match(\"(war|jar)\",ArtifactPackaging,flags=re.IGNORECASE):\n cl=\"&c=\"+classifier\n classifier=\"-\"+classifier\n else:\n cl=\"\"\n classifier=\"\"\n cl_url = url+cl\n print \"\\nTrying to fetch url <\"+cl_url+\">\\n\"\n try:\n if sys.platform.startswith('win'):\n #filename = urlgrabber.urlgrab(cl_url,filename=output_dir+\"\\\\\"+params_url[\"a\"]+\"-\"+params_url[\"v\"]+classifier+\".\"+params_url[\"p\"])\n filename = output_dir+\"\\\\\"+params_url[\"a\"]+\"-\"+params_url[\"v\"]+classifier+\".\"+params_url[\"p\"]\n if sys.platform.startswith('linux'):\n #filename = urlgrabber.urlgrab(cl_url,filename=output_dir+\"/\"+params_url[\"a\"]+\"-\"+params_url[\"v\"]+classifier+\".\"+params_url[\"p\"])\n filename = output_dir+\"/\"+params_url[\"a\"]+\"-\"+params_url[\"v\"]+classifier+\".\"+params_url[\"p\"]\n distantfile = cl_url\n import urllib2\n #import M2Crypto\n\n proxy = urllib2.ProxyHandler({})\n opener = urllib2.build_opener(proxy)\n urllib2.install_opener(opener)\n\n dfile = urllib2.urlopen(distantfile)\n output = open(filename,'wb')\n output.write(dfile.read())\n output.close()\n\n print \"File <\"+filename+\"> successfully uploaded\"\n saved_files.append(filename)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n return saved_files", "title": "" } ]
7193ad479e923837663da261e3082151
Create and return a new event_book
[ { "docid": "f7d629676eb2c844f672df6e3a2c4758", "score": "0.8405833", "text": "def create_event_book(event, book):\n\n event_book = EventBook(isbn=book.isbn, event_id=event.id)\n\n db.session.add(event_book)\n db.session.commit()\n\n return event_book", "title": "" } ]
[ { "docid": "0d292365bac8582a6c8931c41917df02", "score": "0.6702231", "text": "def create_book(self, title):\n new_thing = Book(NumbID.new_id(), title)\n self.item_list.add_item(new_thing)\n pass", "title": "" }, { "docid": "0d7fc8fb973dd13dfc3a4eff6b4d5358", "score": "0.66807735", "text": "def create_event():\n event_id = str(uuid.uuid4())\n color_choice = picker(COLORS)\n\n return (event_id, {\n \"id\": event_id,\n \"timestamp\": datetime.datetime.now().isoformat(),\n \"eventType\": color_choice()\n })", "title": "" }, { "docid": "7769f383d159e9667d460acc22c458a9", "score": "0.65890706", "text": "def create(self, validated_data):\n\t\treturn Book.objects.create(**validated_data)", "title": "" }, { "docid": "a3caa0e697f8cd15e77322122a67cc66", "score": "0.65867823", "text": "def _add_book():\n call_number = input(\"Enter Call Number: \")\n title = input(\"Enter title: \")\n num_copies = int(input(\"Enter number of copies \"\n \"(positive number): \"))\n book_data = (call_number, title, num_copies)\n author = input(\"Enter Author Name: \")\n new_book = Book(book_data[0], book_data[1], book_data[2], author)\n return new_book", "title": "" }, { "docid": "aacf330e67d7e41eeb6e6a80a1b51e8b", "score": "0.6462375", "text": "def create_book(isbn, title, author, description, page_length, image):\n\n book = Book(isbn=isbn, \n title=title, \n author=author, \n description=description,\n page_length=page_length, \n image=image)\n\n db.session.add(book)\n db.session.commit()\n\n return book", "title": "" }, { "docid": "98655c548cbe09a9040ad81b22543297", "score": "0.64233315", "text": "def create(cls, event, email, context = None):\n return cls.objects.create(\n event = event,\n email = email,\n context = context or dict()\n )", "title": "" }, { "docid": "0c57deb74d53c48c2b6c9d96a40d1665", "score": "0.6419937", "text": "def create_event():\n incoming = request.get_json()\n try:\n start_time = incoming['start_time'][:-1].replace('T', ' ')\n except:\n start_time = None\n try:\n end_time = incoming['end_time'][:-1].replace('T', ' ')\n except:\n end_time = None\n event = Event(\n room_id = incoming['room_id'],\n name = incoming['event_name'],\n location = incoming['location'],\n start_time = start_time,\n end_time = end_time,\n description = incoming['description']\n )\n db.session.add(event)\n db.session.commit()\n return jsonify(results = event.room_id)", "title": "" }, { "docid": "2b6c6d52783de146a5c133ca2b55939c", "score": "0.6409536", "text": "def create_booking(self, data, **kwargs):\n return Booking(**data)", "title": "" }, { "docid": "dd4b00509d63302e13e60691e0e0e37d", "score": "0.6310821", "text": "def test_create_book_obj_new_book(self):\n data = {\n 'title': 'Hobbit',\n 'authors': [\n 'Henryk Sienkiewicz',\n ],\n 'pageCount': 32,\n 'publishedDate': '2018',\n 'language': 'pl',\n 'imageLinks': {\n 'thumbnail': 'https://cover_link.com'\n },\n 'industryIdentifiers': [\n {\n 'type': 'ISBN_13',\n 'identifier': '1234567890123'\n },\n ],\n }\n created = self.external_api._create_book_obj(data)\n exists = Book.objects.filter(title='Hobbit').exists()\n self.assertTrue(created)\n self.assertTrue(exists)", "title": "" }, { "docid": "2391682e3afa3f41b1bc2539e7246120", "score": "0.6158129", "text": "def create(self, validated_data):\n actor_data = validated_data.pop('actor')\n repo_data = validated_data.pop('repo')\n type_data = validated_data.pop('type')\n event_obj = Event.objects.create(**validated_data)\n actor_obj, _ = Actor.objects.get_or_create(**actor_data)\n repo_obj, _ = Repo.objects.get_or_create(**repo_data)\n type_obj, _ = Type.objects.get_or_create(**type_data)\n event_obj.actor = actor_obj\n event_obj.repo = repo_obj\n event_obj.type = type_obj\n event_obj.save()\n return event_obj", "title": "" }, { "docid": "c3ac240c15f8c2aafe1878cfc0da2dc7", "score": "0.6147889", "text": "def post(self):\n print('Received POST on resource /book')\n request_body = request.get_json()\n a_book = BookChecker.create_book(request_body)\n return a_book, 201", "title": "" }, { "docid": "9083f700c0e62b230824abbccd96b540", "score": "0.61245894", "text": "def _create_event(\n session,\n label: str,\n body: str,\n start_time: str,\n end_time: str,\n calendar_id: str,\n user_id: str,\n):\n logger.info(\"Creating new event\")\n\n event_id = uuid.uuid4()\n new_event = Event(\n id=event_id,\n label=label,\n body=body,\n start_time=start_time,\n end_time=end_time,\n calendar_id=calendar_id,\n user_id=user_id,\n )\n\n session.add(new_event)\n\n logger.info(f\"Finished creating new event with ID {event_id}\")\n\n return new_event", "title": "" }, { "docid": "068781a599b02e0d0b543470f447d223", "score": "0.61221606", "text": "def create_gradebook(self, gradebook_form):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "3f958a86823ba80d99bf9c130962d16a", "score": "0.6101399", "text": "def add_book():\n author_id, book_id = get_storage().add_book(app.current_request.json_body)\n return {'Authors.AuthorID': author_id, 'Books.BookID': book_id}", "title": "" }, { "docid": "dd06c43277eeeb548dec658cea317c77", "score": "0.6078337", "text": "def event_creation(self):\n event = {\n \"date\": \"6th JAN 2017\",\n \"description\": \"Swim for the first time in a lake\",\n \"id\": 7,\n \"image_url\": \"https://www.google.com\",\n \"location\": \"Naivasha\",\n \"time\": \"10:00AM\",\n \"title\": \"swimming in lake turkana\",\n \"event_category\": 1\n }\n\n # obtain the access token\n access_token = self.get_access_token()\n self.create_event_cartegory()\n\n self.email_verification()\n\n # ensure the request has an authorization header set with the access token in it\n res = self.client().post(\n '/api/events',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=event)\n return res", "title": "" }, { "docid": "aaf240b1b38994d5bff6666b7029284c", "score": "0.60517937", "text": "def api_add_book():\n book = request.get_json()\n return jsonify(insert_book(book))", "title": "" }, { "docid": "1f21a88d7dfba6930f387c6c5a81d173", "score": "0.60475713", "text": "def post(self):\n book_json = json.loads(self.request.body)\n book = Book(title=book_json['title'],\n isbn=book_json['isbn'],\n genre=book_json['genre'],\n author=book_json['author'],\n checkedIn=book_json.get('checkedIn', True))\n book.put()\n book_d = book.to_dict()\n book_d['id'] = book.key.id()\n # respond with resource creation code\n self.response.status = 201\n self.response.write(json.dumps(book_d))", "title": "" }, { "docid": "6a6ca0ad418f62e774aac69c2d266235", "score": "0.5989098", "text": "def create_event(serializer):\n event_obj = create_instance(serializer)\n if event_obj.is_offline:\n OfflineEvent.objects.get_or_create(event=event_obj)\n\n # Sets foreign key relation to `Quiz` objects, with the appropriate label,\n # for the newly created Event objects\n _slug = event_obj.slug\n event_obj.registration_quiz = _create_quiz(_slug, 'registration')\n event_obj.feedback_quiz = _create_quiz(_slug, 'feedback')\n event_obj.save()\n return event_obj", "title": "" }, { "docid": "21277b8e87c1d86702c78a7d4dfe1ec1", "score": "0.5961599", "text": "def create_event(request, vendor):\n data = json.loads(request.body)\n\n title = data.get(\"title\")\n category_tittle = data.get(\"category\")\n summary = data.get(\"summary\")\n discount = data.get(\"discount\")\n price = data.get(\"price\")\n tag_titles = data.get(\"tags\")\n address_text = data.get(\"address\")\n\n event_tags = list()\n for tag_title in tag_titles:\n tag = Tag.objects.filter(name=tag_title).first()\n if tag:\n event_tags.append(tag)\n else:\n tag = Tag.objects.create(name=tag_title)\n tag.save()\n event_tags.append(tag)\n\n category = EventCategory.objects.filter(title=category_tittle).first()\n if not category:\n category = EventCategory.objects.create(title=category_tittle)\n category.save()\n\n address = Address.objects.create(address_text=address_text)\n address.save()\n\n event = Event.objects.create(vendor=vendor,\n title=title,\n category=category,\n summary=summary,\n discount=discount,\n price=price,\n address=address)\n event.save()\n\n for tag in event_tags:\n event.tags.add(tag)\n event.save()\n\n return JsonResponse({\"status\": \"Successfully created event.\",\n \"creator_id\": vendor.user.pk,\n \"event id\": event.pk,\n \"name\": event.title,\n \"price\": event.price,\n \"category\": event.category.title,\n \"discount\": event.discount,\n \"tags\": [tag_title.name for tag_title in event_tags],\n })", "title": "" }, { "docid": "e5bce69abb73d1bc772f9e9fb778246e", "score": "0.5896276", "text": "def create_event():\n global events, queue\n event = request.json\n events.append(event)\n # Should we make this non-blocking?\n queue.put(event)\n return json.dumps({'id': str(len(events) - 1)})", "title": "" }, { "docid": "ffd023b9346e8734c4c643af5147831d", "score": "0.58768684", "text": "def post(self):\n data = request.json\n\n book_id = data.get('id')\n title = data.get('title')\n isbn = data.get('isbn')\n pub_date = date.fromtimestamp(data.get(\"pub_date_timestamp\"))\n\n author_id = data.get('author_id')\n author_obj = Author.query.filter(Author.id == author_id).one()\n\n book_obj = Book(title, isbn, [author_obj], pub_date)\n book_obj.authors.append(author_obj)\n if book_id:\n book_obj.id = author_id\n\n db.session.add(book_obj)\n db.session.commit()\n return None, 201", "title": "" }, { "docid": "8a281b04ee738a65bbd4db8f3e1fa05b", "score": "0.5866709", "text": "def create_event_db_object(form, current_user_id):\n event = {'title': form['title'],\n 'start_time': form['date'] + 'T' + form['start_time'],\n 'end_time': form['date'] + 'T' + form['end_time'],\n 'location': form['location'],\n 'description': form['description'],\n 'rsvp_date': form['rsvp_deadline'],\n 'rsvp_email_url': form['rsvp_email_url'],\n 'rsvp_required': (form['rsvp_email_url'] != ''),\n 'ticketed_event': (form['ticketed_event_instructions'] != ''),\n 'ticketed_event_instructions': form['ticketed_event_instructions'],\n 'contact_name': form['contact_name'],\n 'contact_email': form['contact_email'],\n 'source': 'user',\n 'source_id': current_user_id}\n for field in event:\n event[field] = None if event[field] == '' else event[field]\n return event", "title": "" }, { "docid": "b816d369c3350276ca2da7a1de41f323", "score": "0.5860473", "text": "def create(cls, user, book, content, parent=None):\n pass", "title": "" }, { "docid": "0ce939f0f9f51d22fabb96076eb70ec3", "score": "0.5834263", "text": "def test_add_book(self):\n body = Book()\n response = self.client.open(\n '/v2/book',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "16f4443ed1e2364b5c010c01a181f9f1", "score": "0.581321", "text": "def create(self, validated_data):\n client_data = self.initial_data.get('client')\n client_instance = ClientSerializer.Meta.model.get_or_create_client(client_data)\n validated_data.update({'client': client_instance, 'room_id': self.room_id})\n return Booking.objects.create(**validated_data)", "title": "" }, { "docid": "766b2380c3fdebfd172bd60b1b80f38b", "score": "0.57978415", "text": "def test_book_create(self):\n country = CountryFactory()\n publisher = PublisherFactory()\n data = {\n 'name': 'My New Book',\n 'isbn': 'isbn-9989',\n 'authors': ['Awais Jibran'],\n 'country': country.name,\n 'number_of_pages': 32,\n 'publisher': publisher.name,\n 'release_date': '2019-05-19'\n }\n response_data = self.make_api_get_request(self.books_api_url)\n pre_create_book_count = len(response_data['data'])\n\n response = self.client.post(self.books_api_url, data=data, format='json')\n response_data = response.json()\n self.assertEqual(response_data['data'], {'book': data})\n\n response_data = self.make_api_get_request(self.books_api_url)\n post_create_book_count = len(response_data['data'])\n\n self.assertGreater(post_create_book_count, pre_create_book_count)\n self.assertEqual(post_create_book_count, pre_create_book_count + 1)", "title": "" }, { "docid": "c30feb5440776bdff5ec9b5397fc3e4a", "score": "0.57805526", "text": "def apigw_create_event():\n\n return {\n \"StackId\": \"somestack/testme\",\n \"RequestId\": \"1234\",\n \"LogicalResourceId\": \"1234\",\n \"RequestType\": \"create\",\n \"ResponseURL\": \"http://someaddress.com\",\n \"ResourceProperties\": {\n \"DestBucketName\": \"aaa\",\n \"DestBucketRegion\": \"bbb\"\n }\n }", "title": "" }, { "docid": "74e1d0862e3b861f417105e24681e760", "score": "0.5771165", "text": "def createEvent(name, owner, event_type, description=None, start_time=None, end_time=None, parent=None, password=None, page=None, position_x=None, position_y=None):\n if not parent is None and Event.query.filter_by(id=parent).first() is None:\n raise ValueError(\"Parent, if used, must be a valid event\")\n if event_type == EventType.ENCRYPTED:\n if description is None:\n raise TypeError(\"Encrypted notes must have a discription\")\n if password is None:\n raise TypeError(\"Encrypted notes must have a password\")\n elif not password is None:\n raise TypeError(\"Only encrypted notes can have a password\")\n if type(owner) == user.User:\n owner = owner.id\n elif type(owner) != int:\n raise TypeError(\"Owner must be a user id or a User\")\n if type(page) == Page:\n page = page.id\n\n if event_type == EventType.ENCRYPTED:\n description = encrypt(description, password)\n\n newEvent = Event(owner_id=owner, name=name, event_type=event_type, description=description, start_time=start_time, end_time=end_time, parent_id=parent, page_id=page, position_x=position_x, position_y=position_y) \n if checkEventAttributes(newEvent):\n db.session.add(newEvent)\n db.session.commit()\n return Event.query.filter_by(owner_id=owner, name=name, event_type=event_type, description=description, start_time=start_time, end_time=end_time, parent_id=parent, page_id=page, position_x=position_x, position_y=position_y).first()", "title": "" }, { "docid": "0570c6b3c070457bace0137a849f9e75", "score": "0.57678527", "text": "def add():\n try:\n isbn = request.form['isbn']\n except KeyError:\n abort(404)\n\n user = session['user']\n if user == None:\n abort(404)\n\n try:\n metadata = get_book(isbn)\n except KeyError:\n flash('ISBN not found')\n return redirect(url_for('home', id=user.id))\n\n # see if user already got this books stored\n conn = pyelasticsearch.ElasticSearch(app.config['ELASTICSEARCH_URL'])\n results = conn.search(\"%s AND user_id:%s\" % (isbn, user.id))\n\n if len(results['hits']['hits']) > 0:\n metadata = results['hits']['hits'][0]['_source']['metadata']\n doc_id = results['hits']['hits'][0]['_source']['_id']\n return jsonify(key=apikey, status='200', \n isbn=isbn, message='OK', metadata=metadata, doc_id=doc_id)\n\n # if not found, store it in CouchDB\n store = get_store()\n\n doc = {\n \"user_id\" : user.id,\n \"metadata\" : metadata\n }\n doc_id, doc_rev = store.save(doc)\n\n # add to social stream\n event = Event(user_id=user.id, event='book added', time=time.time(),\n data=json.dumps({\n \"verbose\" : \"%s added %s to their collection\" % (\n user.username, metadata['title']),\n \"metadata\" : metadata,\n \"doc_id\" : doc_id,\n }))\n db.session.add(event)\n db.session.commit()\n\n flash('Successfully added book')\n return redirect(url_for('home', id=user.id))", "title": "" }, { "docid": "ef561140a23f23225cb64c87213ca3f2", "score": "0.57495356", "text": "def create_event(self):\n\t\tif not CalendarCredentials.logged_in():\n\t\t\traise Exception(\"ERROR. User must be logged in to create an event.\")\n\n\t\tevent = CalendarCredentials.service.events().insert(calendarId=self.calendar_id, body=self.options).execute()\n\t\tprint(\"Event created: {}\".format(event.get('htmlLink')))", "title": "" }, { "docid": "6370292cedb84961ae2050de78f25921", "score": "0.5739657", "text": "def createEvent(self, key, name=None):\n self.Events.createEvent(key, name)\n self._events = self.Events.events\n return self._events[key]", "title": "" }, { "docid": "c66e1213f3119221d7658d4d85605f35", "score": "0.57341415", "text": "def AddBook(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "9b42370bdb95b39fcb3416f9ba432acc", "score": "0.5732589", "text": "def test_create_event(self):\n pass", "title": "" }, { "docid": "48f6e64b51a9c112dc1aeafcefd78119", "score": "0.5730512", "text": "def create(self, request):\n if not set(request.data) <= {\"room\", \"args\", \"event_type\", \"parent_event\"}:\n return Response({\"details\": \"bad arguments\"}, status=400)\n room = Room.objects.get(pk=request.data[\"room\"])\n if not room:\n return Response(\n {\"details\": \"room does not exist\"}, status=status.HTTP_400_BAD_REQUEST\n )\n event = Event(\n author=request.user,\n room=room,\n event_type=request.data[\"event_type\"],\n args=request.data[\"args\"],\n )\n if \"parent_event\" in request.data:\n parent_event = Event.is_valid_parent(room, request.data[\"parent_event\"])\n if parent_event:\n event.parent_event = parent_event[0]\n else:\n return Response(\n {\"details\": \"invalid parent event\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n handler = Handler(event, request.user)\n handle_event = getattr(handler, \"handle_\" + event.event_type)\n result = handle_event()\n return result", "title": "" }, { "docid": "30d06bf7b0d526c5f929995a889417f1", "score": "0.5717656", "text": "def add_a_book(self, params: list):\n title = input('title name = ')\n description = input('book description = ')\n author = input('book author = ')\n self.__books_service.create_book(title, description, author)", "title": "" }, { "docid": "b4e1d5096a0e2b3271075cc1d92ebc95", "score": "0.5695564", "text": "def create(self,validated_data):\n author_data = validated_data.pop('author')\n author_exists = Author.objects.filter(first_name=author_data['first_name'], last_name=author_data['last_name'], birth_date=author_data['birth_date'])\n if (author_exists.count() > 0):\n new_author = Author.objects.get(first_name=author_data['first_name'], last_name=author_data['last_name'], birth_date=author_data['birth_date'])\n else:\n new_author = Author.objects.create(**author_data)\n new_author.save()\n book = Book.objects.create(author=new_author,**validated_data) \n return book", "title": "" }, { "docid": "d6737f25e43b0fff821d2c3ce238b750", "score": "0.5686803", "text": "def create_event(oauth_client_id, event_name, venue_id=None, \n price=None, units=None, currency=\"EUR\",\n account_id=None):\n em = logic.EventManager()\n tm = logic.TicketManager()\n sm = logic.SecurityManager()\n # create event\n try:\n if account_id is None:\n account = sm.lookup_account_for_client(oauth_client_id)\n account_id = account.id\n event = em.start_event(account_id, event_name)\n # add default eventpart\n event.venue_id = venue_id\n eventpart = em.add_eventpart(event.id, venue_id=venue_id)\n # add default ticket type\n tm.create_ticket_type(eventpart.id, price, units, currency=currency)\n except ex.TickeeError, e:\n transaction.abort()\n return marshalling.error(e)\n else:\n result = marshalling.created_success_dict.copy()\n result['event'] = marshalling.event_to_dict(event)\n transaction.commit()\n return result", "title": "" }, { "docid": "b607ae5aca1b4199ba120df541a0e046", "score": "0.56840384", "text": "def handle_create_event(command):\r\n\r\n event_string = command.split(CREATE_EVENT_COMMAND,1)[1].strip()\r\n if \"desc:\" in event_string and \"loc:\" in event_string and \"date:\" in event_string and \"time:\" in event_string:\r\n description = ((event_string.split(\"desc:\"))[1].split(\",\")[0]).strip()\r\n location = ((event_string.split(\"loc:\"))[1].split(\",\")[0]).strip()\r\n times = ((event_string.split(\"time:\"))[1].split(\",\")[0]).strip().split(\"-\")\r\n start_time = handle_time(times[0].strip())\r\n end_time = handle_time(times[1].strip())\r\n date = handle_date(((event_string.split(\"date:\"))[1].split(\",\")[0]).strip())\r\n\r\n if len(Event.query.all()) > 0:\r\n id_val = find_largest_id() + 1\r\n else:\r\n id_val = 1\r\n\r\n return Event(id = id_val, description = description, start_time = start_time, end_time = end_time, \r\n date = date, location = location)\r\n else:\r\n raise ValueError('Bad Event. You are missing an input. Remember to specify *desc:* , *loc:*, *date:*, and *time:*')\r\n return None", "title": "" }, { "docid": "35469c31ef33adf78ef17c63d3b02b34", "score": "0.56610245", "text": "def add_book():\n form = AddBook()\n if form.validate_on_submit():\n title_str = form.title.data\n description_str = form.description.data\n a_list_str = [auth.strip() for auth in form.author.data.split(\",\")]\n c_list_str = form.category.data.split(\", \")\n add_to_db(title_str, description_str, a_list_str, c_list_str)\n flash(\n \"Wygląda na to, że wszystko poszło dobrze i dodałaś/eś książkę do \\\n biblioteki. Możesz ją teraz wyszukać w zakładce 'dostępne' lub dodać kolejną.\",\n \"success\",\n )\n return redirect(url_for(\"add_book\"))\n return render_template(\"add_book.html\", title=\"Add new book\", form=form)", "title": "" }, { "docid": "1ce6d86059cfdb4f4f7c334f372d6cce", "score": "0.56590974", "text": "def create_new_event(name, descr, host, location):\n conn = sqlite3.connect(DB_NAME)\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO {} VALUES(?, ?, ?, ?)\".format(\n \"Events(eventname,eventdescription,hostname,location)\"), (name, descr, host, location))\n\n conn.commit()\n print \"successfully created event\"", "title": "" }, { "docid": "df81ce07893b0c3d27e68d8e488c3767", "score": "0.5652176", "text": "def create_event():\n os.environ['region'] = 'us-west-2'\n os.environ['profile'] = 'default'\n os.environ['prefix1'] = 'sjd'\n os.environ['prefix2'] = 'illon'\n os.environ['env'] = 'dev'\n prefix1 = os.environ['prefix1']\n prefix2 = os.environ['prefix2']\n env = os.environ['env']\n region = os.environ['region']\n\n event = dict()\n event['region'] = os.environ['region']\n # 0 sleep for mocked unit tests\n event['sleep_seconds'] = 0\n bucket_format = \"s3-{prefix1}-{prefix2}-{env}-{region}-data\"\n event['bucket'] = bucket_format.format(prefix1=prefix1,\n prefix2=prefix2,\n env=env,\n region=region)\n event['s3_query_results_path'] = \"awsathenadata/queryresults\"\n return event", "title": "" }, { "docid": "5a06e92689760409cfa2083f89fe4d9a", "score": "0.56467605", "text": "def create_event(event_name, start_date, end_date, pattern):\n\n event = Event(event_name=event_name, start_date=start_date, end_date=end_date, pattern=pattern)\n\n db.session.add(event)\n db.session.commit()\n\n return event", "title": "" }, { "docid": "bba5d31260a9d49a5d4f84ef9b10212f", "score": "0.5644966", "text": "def create(self, **kwargs):\n\t\tfrom ..services import events\n\t\tfile = self.save(self.new(**kwargs))\n\t\tevent = events.create(file_id=file.id, user_id=current_user.id, type='created')\n\t\treturn file", "title": "" }, { "docid": "8bf77d42f524b601109941eb300df912", "score": "0.56431323", "text": "def createNewEvent(self,\r\n title: str,\r\n start,\r\n Nhours: int = 1,\r\n description: str = None,\r\n location: str = None,\r\n attendees=None\r\n ):\r\n\r\n # may update for https://google-calendar-simple-api.readthedocs.io/en/latest/attachments.html\r\n end = start + Nhours*hours\r\n print(\"New event is created at\", start)\r\n self.calendar.add_event(\r\n event=Event(\r\n title,\r\n start=start,\r\n end=end,\r\n description=description,\r\n location=location,\r\n attendees=attendees\r\n )\r\n )", "title": "" }, { "docid": "31fe6f0cb9d99df38fcacd3c2377af85", "score": "0.56354356", "text": "def test_create_event(self):\n\n response = DispatchTestHelpers.create_event(self.client, location='Ubyssey', category='sports')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check the data\n self.assertEqual(response.data['title'], 'Test event')\n self.assertEqual(response.data['description'], 'Test description')\n self.assertEqual(response.data['host'], 'test host')\n self.assertEqual(response.data['location'], 'Ubyssey')\n self.assertEqual(response.data['category'], 'sports')", "title": "" }, { "docid": "3d4a88f135b614981d26c44a94c38ecd", "score": "0.56295556", "text": "def create_event(self, description: str, assignee: List[str] = None) -> Dict:\n # The service endpoint to request from\n suffix = 'event'\n # Dictionary of params for the request\n params = assign_params(description=description, assignee=assignee)\n # Send a request using our http_request wrapper\n return self._http_request('POST', suffix, params=params)", "title": "" }, { "docid": "3c5090caea0bde465b65caa35f624883", "score": "0.56202567", "text": "def create(self) -> None:", "title": "" }, { "docid": "3c5090caea0bde465b65caa35f624883", "score": "0.56202567", "text": "def create(self) -> None:", "title": "" }, { "docid": "0d314c96ec00c36a8d9d615b29ab2c2f", "score": "0.561132", "text": "def add_book(self, book):\n\t\tself.books.append(book)", "title": "" }, { "docid": "7f3777ccd4fbafb04052fbc10b5964da", "score": "0.56109667", "text": "def manage_existing_book(self):\n self.update_existing_book()", "title": "" }, { "docid": "a6e91a0b16161763a58e78eee303ab34", "score": "0.5607758", "text": "def new_book(self,\n title: str,\n subject: str,\n isbn: str,\n authors: str,\n ddsnumber=\"\"):\n sort = \"Book\"\n self.sort = sort\n self.isbn = isbn\n self.authors = authors\n self.ddsnumber = ddsnumber\n self.library_items.append([LibraryItems(title, subject), sort, isbn, authors, ddsnumber])", "title": "" }, { "docid": "2bfe4d851857a0bdbd50d7da7e834305", "score": "0.5600295", "text": "def test_bookings_id_events_post(self):\n body = BookingOperation()\n headers = [('accept_language', 'accept_language_example'),\n ('api', 'api_example'),\n ('api_version', 'api_version_example'),\n ('maas_id', 'maas_id_example'),\n ('addressed_to', 'addressed_to_example')]\n response = self.client.open(\n '/bookings/{id}/events'.format(id='id_example'),\n method='POST',\n data=json.dumps(body),\n headers=headers,\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "fc57efb576e5d6bb3a8a8eaa78de0bbd", "score": "0.55946", "text": "def event_insert(self, request):\n if request.from_datastore:\n my_event = request\n else:\n my_event = Event(parent=main.PARENT_KEY, date=request.date, time=request.time, title=request.title, description=request.description)\n my_event.put()\n return my_event", "title": "" }, { "docid": "85d4a9f93875580079de4d2dde73ce80", "score": "0.558266", "text": "def create_event() -> Event:\n return get_asynclib().Event()", "title": "" }, { "docid": "5d146e3f259b4c450d38b52a00eaf007", "score": "0.556535", "text": "def create(self, request):\n try:\n # If there is no author ID in request data or there's no author\n # with supplied ID, exceptions are thrown.\n author_id = int(request.data['author'])\n author = Author.objects.get(id=author_id)\n except KeyError:\n return Response({'author': 'Field required'}, status=status.HTTP_400_BAD_REQUEST)\n except Author.DoesNotExist:\n return Response({'author': 'Not Found'}, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return Response({'body': 'Incorrect format'}, status=status.HTTP_400_BAD_REQUEST)\n\n book_serializer = BookSerializer(data=request.data)\n # Checks if book request data is valid.\n if book_serializer.is_valid():\n # We can create the book and assign it the author instance\n book = Book.objects.create(\n title = request.data['title'],\n author = author,\n isbn = request.data['isbn'],\n published = request.data['published']\n )\n book_serializer = BookSerializer(book)\n return Response(book_serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(book_serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "cdead7f3c74622e68c6102fc4940201c", "score": "0.55628055", "text": "def test_event_model_creation(self):\n event = models.Event.objects.create(\n created_at='1526123095',\n email='diego@chefhero.com',\n environment='production',\n component='orders',\n message='the buyer # 123456 has placed an order successfully',\n data_payload='{ \"order_id\": 123, \"amount\":300 }'\n )\n\n self.assertEqual(event.__str__(), 'diego@chefhero.com')\n self.assertNotEqual(event.__str__(), 'hellochef@chefhero.com')\n self.assertEqual(event.component, 'orders')\n self.assertNotEqual(event.component, 'orders123')", "title": "" }, { "docid": "df80e4fc54f9d0f92ef742c176ed19b3", "score": "0.5543091", "text": "def create_event(snip):\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token2.pickle'):\r\n with open('token2.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'C:\\credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token2.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n # Call the Calendar API\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n events_result = service.events().list(calendarId='primary', timeMin=now,\r\n maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n\r\n notFound = True\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n if ted_session in event['summary']:\r\n notFound = False\r\n\r\n# Refer to the Python quickstart on how to setup the environment:\r\n# https://developers.google.com/calendar/quickstart/python\r\n# Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any\r\n# stored credentials.\r\n \r\n if notFound:\r\n if (is_valid_info(snip)):\r\n event = get_ev_spec(get_ev_params_ted(snip))\r\n event = service.events().insert(calendarId='primary', body=event).execute()\r\n print('Event created: %s' % (event.get('htmlLink')))\r\n else:\r\n print(snip+ ' << text did not pass verification to create event xgcalendar.py:118')", "title": "" }, { "docid": "1f95f95f5baa0aaa8c0147a878464d9f", "score": "0.55211794", "text": "def create_phonebook(phonebook_name):\n pass", "title": "" }, { "docid": "ac4a5628cd828f4b08308811cc69d4d6", "score": "0.55173385", "text": "def add_book():\n if not session.get('username'):\n return make_response(jsonify(\n {'message':'you are not logged in'}\n )), 403\n else:\n if request.method == 'POST':\n author = request.json.get('author')\n title = request.json.get('title')\n publisher = request.json.get('publisher')\n edition = request.json.get('edition')\n category = request.json.get('category')\n response = admin_user.add_book(author, title, publisher, edition, category)\n if len(author) == 0 or len(title) == 0 or len(publisher) == 0 or len(edition) == 0 or len(category) == 0:\n return make_response(jsonify(\n {'message':'no empty inputs allowed'}\n )), 409\n if author.isdigit() or title.isdigit() or publisher.isdigit() or category.isdigit():\n return make_response(jsonify(\n {'message':'book details must be alphabet'}\n )), 409 \n if not re.findall(r'(^[A-Za-z]+\\s[A-Za-z]+$)', author):\n return make_response(jsonify(\n {'message':'author must be in form of Evalyn James'}\n )), 409 \n \n if response == \"book with similar details exists\":\n return make_response(jsonify(\n {'message':response}\n )), 409\n if response == \"book created\":\n return make_response(jsonify(\n {\n 'author':author,\n 'title':title,\n 'publisher':publisher,\n 'edition':edition,\n 'category':category\n }\n )), 201", "title": "" }, { "docid": "b04c55fd447a9796fd679f038c0f4c53", "score": "0.55110025", "text": "def create_event(name, price, author, participants=None, investors=None,\n date=date.today()):\n # event should have at least one investor\n if investors is None or len(investors.keys()) == 0:\n investors = {author: price}\n\n # TODO: check existance of investors, participants, author\n result_event = Event.objects.create(name=name, price=price, author=author,\n date=date)\n\n if investors:\n add_investors(result_event, investors)\n\n if participants:\n add_participants(result_event, participants)\n\n return result_event", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5505475", "text": "def create():", "title": "" }, { "docid": "8f6bd887e8b6c6ae7706b9053198b75c", "score": "0.5502997", "text": "def test_already_existing_event(self):\n event = {\n \"date\": \"6th JAN 2017\",\n \"description\": \"Swim for the first time in a lake\",\n \"id\": 7,\n \"image_url\": \"https://www.google.com\",\n \"location\": \"Naivasha\",\n \"time\": \"10:00AM\",\n \"title\": \"swimming in lake turkana\",\n \"event_category\": 1\n }\n self.event_creation()\n access_token = self.get_access_token()\n res = self.client().post(\n '/api/events',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=event)\n self.assertEqual(res.status_code, 401)", "title": "" }, { "docid": "f84159a93e33c94238a14e0b444346c6", "score": "0.5490352", "text": "def create_event(snip):\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token2.pickle'):\r\n with open('token2.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'C:\\credentials.json', SCOPESCAL)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token2.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n # Call the Calendar API\r\n now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Checking upcoming 10 events [173]')\r\n events_result = service.events().list(calendarId='primary', timeMin=now,\r\n maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n\r\n notFound = True\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n if ted_session in event['summary']:\r\n notFound = False\r\n\r\n# Refer to the Python quickstart on how to setup the environment:\r\n# https://developers.google.com/calendar/quickstart/python\r\n# Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any\r\n# stored credentials.\r\n \r\n if notFound:\r\n if (is_valid_info(snip)):\r\n event = get_ev_spec(get_ev_params_ted(snip))\r\n event = service.events().insert(calendarId='primary', body=event).execute()\r\n print('Event created: %s' % (event.get('htmlLink')))\r\n else:\r\n print(snip+ ' << text did not pass verification to create event xgcalendar.py:118')\r\n else:\r\n print(ted_session + ' Event is already in Calendar')", "title": "" }, { "docid": "405340fde20228726fcad92b9c2481d6", "score": "0.5479034", "text": "def create_event(request):\n\n requestor_phone = request.values['From']\n requestor_id = db.session.query(User).filter_by(phone=requestor_phone).one().id\n\n if requestor_id == 1:\n event = Event(status=0, owner=1)\n db.session.add(event)\n db.session.commit()\n print('created new event')\n\n responseText = \"Ok! What's a good title for the event? Say something like 'Abby's quincinera'\"\n\n else:\n responseText = \"Please ask Zach to help add an event :)\"\n\n return responseText", "title": "" }, { "docid": "27fa397244ae44ab66d87084ae8520e4", "score": "0.54749817", "text": "def addBook(self, data, u_id):\n response = []\n if not data['title']:\n response.append('Book must have a title!')\n if 'author_s' not in data and not data['author_t']:\n response.append('Book must have an author')\n if not data['review']:\n response.append('Book must have at least one review to be added!')\n if 'rating' not in data:\n response.append('Book must have a rating!')\n if len(response) == 0:\n author_name = None\n if 'author_s' not in data:\n author_name = data['author_t']\n else:\n author_name = data['author_s']\n if len(Authors.objects.filter(name=author_name)) == 0:\n Authors.objects.create(name=author_name)\n\n this_book = Books.objects.create(\n title = data['title'],\n author = Authors.objects.get(name=author_name)\n )\n Reviews.objects.create(\n review = data['review'],\n rating = data['rating'],\n user = Users.objects.get(id=u_id),\n book = this_book\n )\n return response", "title": "" }, { "docid": "720c5257c52e33a366a12f34be995fd2", "score": "0.5473747", "text": "def book(title, pages_read, total_pages=100, book_format=\"book\"):\n if total_pages < pages_read:\n raise ArgTypeError(\"pages_read_more_than_total_pages\")\n elif pages_read < 0 or total_pages < 0:\n raise ArgTypeError(\"negative_pages\")\n if book_format == \"ebook\":\n new_book = Ebook(title, pages_read)\n elif book_format == \"audiobook\":\n new_book = Audiobook(title, pages_read, total_pages)\n else:\n new_book = Book(title, pages_read, total_pages)\n return new_book", "title": "" }, { "docid": "6ca09e76d6c27c9de47b35a896dfec73", "score": "0.547086", "text": "def new(self, event):\n self._dbtool.update(event.vevent.to_ical(),\n self.name,\n href='',\n etag=event.etag,\n status=NEW)", "title": "" }, { "docid": "cd5664ebcc21f190f01b4538355a8b39", "score": "0.54596764", "text": "def create_event(year, month, day, start_time, end_time, time_zone):\n\tevent = {\n\t\t'summary' : \"Study\",\n\t\t'start' : {'dateTime' : year + \"-\" + month + \"-\" + day + \"T\" + start_time + \":00\" , 'timeZone' : time_zone},\n\t\t'end' : {'dateTime' : year + \"-\" + month + \"-\" + day + \"T\" + end_time + \":00\" , 'timeZone' : time_zone},\n 'reminders' : {'useDefault' : False},\n\t}\n\treturn event", "title": "" }, { "docid": "46c64bc758cb731aa1b35ab85706666f", "score": "0.5453602", "text": "def __new__(cls, **info):\n if 'event_id' not in info:\n info['event_id'] = None\n if '_id' in info: # found DB-generated ID, override given one\n info['event_id'] = info['_id']\n del info['_id']\n try:\n return super(Event, cls).__new__(cls, **info)\n except TypeError:\n raise ValueError('Event info was formatted incorrectly.')", "title": "" }, { "docid": "06e0e968eb622efd498e381a4fa41c58", "score": "0.5452457", "text": "def create_event(host_id, city, event_date, start_time, end_time, state=None):\n\n event = Event(host_id=host_id, city=city, state=state, event_date=event_date,\n start_time=start_time, end_time=end_time)\n\n db.session.add(event)\n db.session.commit()\n\n return event", "title": "" }, { "docid": "e0441ebc49c8527c57febb235b751ea7", "score": "0.54405814", "text": "def _create_event(self, index, period, ticker, row):\n try:\n open_price = PriceParser.parse(row['open'])\n high_price = PriceParser.parse(row['high'])\n low_price = PriceParser.parse(row['low'])\n close_price = PriceParser.parse(row['close'])\n volume = PriceParser.parse(row['volume'])\n\n # Create the bar event for the queue\n bev = BarEvent(ticker, index, period, open_price, high_price, low_price, close_price, volume)\n\n except ValueError:\n raise EmptyBarEvent('row {0} {1} {2} {3} can\\'t be convert to BarEvent'.format(index, period, ticker, row))", "title": "" }, { "docid": "dc9f6c0270156cefff7a917172855414", "score": "0.54261744", "text": "def create_event_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:\n # Get arguments from user\n description = args.get('description', '')\n assignee = argToList(demisto.args().get('assignee', ''))\n # Make request and get raw response\n raw_response = client.create_event(description, assignee)\n events = raw_response.get('event')\n # Parse response into context & content entries\n if events:\n event = events[0]\n event_id: str = event.get('eventId', '')\n title = f'{INTEGRATION_NAME} - Event `{event_id}` has been created.'\n context_entry = raw_response_to_context(event)\n context = {\n f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID && val.ID === obj.ID)': context_entry\n }\n human_readable = tableToMarkdown(title, context_entry)\n return human_readable, context, raw_response\n else:\n raise DemistoException(f'{INTEGRATION_NAME} - Could not create new event.')", "title": "" }, { "docid": "1922244158d70d4f7a7ac20e74329e17", "score": "0.54163104", "text": "def add_events():\n\n ### ADD EVENT 1 ###\n event1 = Event(\n name=\"Woodstock Festival\",\n start_time=datetime(1999, 7, 23, 12),\n end_time=datetime(1999, 7, 25, 12),\n )\n premium = TicketType(name='Premium')\n regular = TicketType(name='Regular')\n event1.ticket_types.append(premium)\n event1.ticket_types.append(regular)\n\n for i in range(10):\n ticket = Ticket()\n event1.tickets.append(ticket)\n premium.tickets.append(ticket)\n\n for i in range(5):\n ticket = Ticket()\n event1.tickets.append(ticket)\n regular.tickets.append(ticket)\n\n ### ADD EVENT 2 ###\n\n event2 = Event(\n name=\"Cirque du Soleil\",\n start_time=datetime(2019, 10, 23, 20, 0),\n end_time=datetime(2019, 10, 23, 23, 0),\n )\n\n premium = TicketType(name='Premium')\n regular = TicketType(name='Regular')\n vip = TicketType(name='VIP')\n\n event2.ticket_types.append(premium)\n event2.ticket_types.append(regular)\n event2.ticket_types.append(vip)\n\n for i in range(5):\n ticket = Ticket()\n event2.tickets.append(ticket)\n premium.tickets.append(ticket)\n\n for i in range(2):\n ticket = Ticket()\n event2.tickets.append(ticket)\n regular.tickets.append(ticket)\n\n for i in range(1):\n ticket = Ticket(token='35dd559e-43c1-414a-87e6-031d3fd70960')\n event2.tickets.append(ticket)\n vip.tickets.append(ticket)\n\n db.session.add(event1)\n db.session.add(event2)\n\n ticket = db.session.query(Ticket).filter_by(\n ticket_type=vip).first()\n reservation = Reservation(\n end_time=datetime.utcnow() + timedelta(minutes=120),\n ticket=ticket\n )\n\n db.session.commit()", "title": "" }, { "docid": "8ba753fb8f642bb15ddbe7e3d916cca3", "score": "0.5408198", "text": "def create_book_table(cls) -> None:\n\n with DatabaseConnection(cls.DB_HOST) as connection:\n cursor = connection.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS books(name text primary key, author text, read integer)\")", "title": "" }, { "docid": "7e471a0ed802783fe9ce367198783c1d", "score": "0.540629", "text": "def add(self, event):\n print(f'adding {event}')\n\n body = build_event_body(event)\n\n request = self.calendar.events().insert(calendarId=self.calendar_id, body=body)\n request.execute()", "title": "" }, { "docid": "174bfec845172c386696cd7080c4e958", "score": "0.5401979", "text": "def create():\n pass", "title": "" }, { "docid": "d680beec8a8b5350b80974035796d95d", "score": "0.5386353", "text": "def add_book(self, book=None):\n if book is None:\n book = Book(bookshelves=[self])\n self.books.append(book)\n self.filtered_books.append(book)\n self.books_count += 1\n print(f\"\\n{book.get_book_info()}\\n\\n{book} added to {self.name}\")", "title": "" }, { "docid": "207c096a33a0ebdb07050fbd04cf3aae", "score": "0.537517", "text": "def test_add_book(self):\n # Test to add book without access token\n response = self.client.post('/api/v2/books', data=json.dumps(self.book), content_type=\"application/json\")\n self.assertEqual(response.status_code, 401)\n self.assertIn('Missing Authorization Header', str(response.data))\n\n # Test to add book with admin access token\n access_token = self.register_login_admin()\n\n # Add a new book with an admin access token\n response = self.client.post(\n '/api/v2/books', data=json.dumps(self.book),\n headers={'content-type': 'application/json',\n 'Authorization': 'Bearer {}'.format(access_token)})\n self.assertEqual(response.status_code, 201)\n self.assertIn('Book added successfully.', str(response.data))\n\n # Add a book that already exist\n response = self.client.post(\n '/api/v2/books', data=json.dumps(self.book),\n headers={'content-type': 'application/json',\n 'Authorization': 'Bearer {}'.format(access_token)})\n\n # Test if the same book can be added again\n self.assertIn('Book with that title already exists.', str(response.data))", "title": "" }, { "docid": "bec1dda30411497f423e3dc4931efef4", "score": "0.537157", "text": "def new_event():\n form = EventForm(request.form)\n\n if request.method == 'POST':\n if \"cancel\" in request.form:\n return redirect('/events')\n \n elif \"submit\" in request.form and form.validate():\n add_event(form, new=True)\n flash('Event created successfully!')\n return redirect('/events')\n \n # except IntegrityError as error:\n # if \"duplicate key value\" in str(error):\n # # flash('Rule created successfully!')\n # print \"Event alread exists!\"\n\n\n return render_template('events/event_add.html', form=form)", "title": "" }, { "docid": "78d702aa3457ae9da300cae42db57b9c", "score": "0.53711754", "text": "def test_create_event_successful(self):\n payload = {\n 'title': fake.text(max_nb_chars=255),\n 'description': fake.text(max_nb_chars=2000),\n 'organizer': self.organizer.id,\n 'image': '',\n 'event_time': make_aware(datetime.datetime.now()),\n 'address': fake.address(),\n 'fee': 500,\n 'status': '1'\n }\n res = self.client.post(EVENT_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "181a51ce4dbfc6276f9bf8c0a2dd8737", "score": "0.5358202", "text": "def create(access_token, payload):\n response = post(Config().get_base_url() + '/api/v2/events', headers={\n \"Authorization\": \"Bearer \" + access_token,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n }, data=dumps(payload))\n return response", "title": "" }, { "docid": "6b4c421e46cc963db1a547d1a873dbd6", "score": "0.5357047", "text": "def test_create_invalid_event(self):\n payload = {'kind': 'Normal', 'price': 55.55, 'quantity': 10, 'event': 5}\n response = self.client.post(TICKET_INFO_URL, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "8a5ae0a9e6f973d4c1ce647c038dc205", "score": "0.5356033", "text": "def newBook (row):\n book = {\"book_id\": row['book_id'], \"title\":row['title'], \"average_rating\":row['average_rating'], \"ratings_count\":row['ratings_count']}\n return book\n\n\n movie= {\"id\": row['id'], \"title\":row['title'], \"vote_average\": float(row['vote_average']),\"vote_count\": float(row[\"vote_count\"]) ,\"genres\":row['genres']}\n return movie", "title": "" }, { "docid": "65bee19d34bad89e107d8869ac8db723", "score": "0.5355314", "text": "def _create_calendar(session, label):\n logger.info(\"Creating new calendar\")\n\n calendar_id = uuid.uuid4()\n new_calendar = Calendar(id=calendar_id, label=label)\n\n session.add(new_calendar)\n\n logger.info(f\"Finished creating new calendar with ID {calendar_id}\")\n\n return new_calendar", "title": "" }, { "docid": "d36a48f8208dbb70d81b0ca7119ee424", "score": "0.5347974", "text": "def create_book_category(book, category):\n\n book.categories.append(category)\n db.session.commit()\n\n return category.books", "title": "" }, { "docid": "f3da6f906eb3bffdf4bb316e0cf34191", "score": "0.53444654", "text": "def add_book(self, title_as_string, author_name_as_string):\n current_book_id = self._book_repository.get_next_book_id()\n current_book = Book(current_book_id, title_as_string, author_name_as_string)\n self._book_validator.validate_book(current_book)\n self._book_repository.add_book(current_book)", "title": "" }, { "docid": "d51879827fec1f25f0ba99f8192b15a4", "score": "0.53413266", "text": "def create_event(self, start, end, attendees, summary):\n event = {\n 'summary': summary,\n 'start': {\n 'dateTime': start,\n 'timeZone': self.time_zone\n },\n 'end': {\n 'dateTime': end,\n 'timeZone': self.time_zone\n },\n 'attendees': [\n {'email': _} for _ in attendees\n ]\n }\n\n # pylint: disable=maybe-no-member\n self._service.events().insert(\n calendarId=self.calendar_id,\n body=event\n ).execute()", "title": "" }, { "docid": "def40f5b8c68d6625174a63a869ee164", "score": "0.5338221", "text": "def create_event(\n cls,\n event_type: Optional[EventType] = None,\n event_cla_group_id: Optional[str] = None,\n event_project_id: Optional[str] = None,\n event_company_id: Optional[str] = None,\n event_project_name: Optional[str] = None,\n event_company_name: Optional[str] = None,\n event_data: Optional[str] = None,\n event_summary: Optional[str] = None,\n event_user_id: Optional[str] = None,\n event_user_name: Optional[str] = None,\n contains_pii: bool = False,\n dry_run: bool = False\n ):\n try:\n event = cls()\n if event_project_name is None:\n event_project_name = \"undefined\"\n if event_company_name is None:\n event_company_name = \"undefined\"\n\n # Handle case where teh event_project_id == CLA Group ID or SalesForce ID\n if event_project_id and is_uuidv4(event_project_id): # cla group id in the project_id field\n Event.set_cla_group_details(event, event_project_id)\n elif event_project_id and not is_uuidv4(event_project_id): # external SFID\n Event.set_project_details(event, event_project_id)\n\n # if the caller has given us a CLA Group ID\n if event_cla_group_id is not None: # cla_group_id\n Event.set_cla_group_details(event, event_cla_group_id)\n\n if event_company_id:\n try:\n company = Company()\n company.load(str(event_company_id))\n event_company_name = company.get_company_name()\n event.set_event_company_id(event_company_id)\n except DoesNotExist as err:\n return {\"errors\": {\"event_company_id\": str(err)}}\n\n if event_user_id:\n try:\n user = User()\n user.load(str(event_user_id))\n event.set_event_user_id(event_user_id)\n user_name = user.get_user_name()\n if user_name is not None:\n event.set_event_user_name(user_name)\n except DoesNotExist as err:\n return {\"errors\": {\"event_\": str(err)}}\n\n if event_user_name:\n event.set_event_user_name(event_user_name)\n\n event.set_event_id(str(uuid.uuid4()))\n if event_type:\n event.set_event_type(event_type.name)\n event.set_event_project_name(event_project_name) # potentially overrides the SF Name\n event.set_event_summary(event_summary)\n event.set_event_company_name(event_company_name)\n event.set_event_data(event_data)\n event.set_event_date_and_contains_pii(contains_pii)\n if not dry_run:\n event.save()\n return {\"data\": event.to_dict()}\n\n except Exception as err:\n return {\"errors\": {\"event_id\": str(err)}}", "title": "" }, { "docid": "bec2db103edfba91c7a11cfbc6af4431", "score": "0.5333979", "text": "def __newBook(self):\n self.isActive = 1 # BOOK is active\n self.btnRun.setEnabled(False)\n self.btnBook.setEnabled(False)\n self.cardDropSpot.updateImage(json.dumps({\"suit\": 10, \"value\": 0})) # new book image\n return", "title": "" }, { "docid": "9e5bde464d170174a63a6c7cd4e76433", "score": "0.53293705", "text": "def test_create_book_obj_book_exists(self):\n language = create_sample_language()\n author = create_sample_author(\n first_name='Adam',\n second_name='',\n last_name='Mickiewicz'\n )\n book = create_sample_book(\n title='Pan Tadeusz',\n cover_link='https://cover_link.com',\n language=language,\n authors=[author]\n )\n data = {\n 'title': 'Pan Tadeusz',\n 'authors': [\n 'Adam Mickiewicz',\n ],\n 'pageCount': 32,\n 'publishedDate': '2008',\n 'language': 'pl',\n 'imageLinks': {\n 'thumbnail': 'https://cover_link.com'\n },\n 'industryIdentifiers': [\n {\n 'type': 'ISBN_13',\n 'identifier': '9788372783301'\n },\n ],\n }\n created = self.external_api._create_book_obj(data)\n exists = Book.objects.filter(title='Pan Tadeusz').exists()\n self.assertFalse(created)\n self.assertTrue(exists)", "title": "" }, { "docid": "23276203ab9fa28ab0c15590dac52dcd", "score": "0.532447", "text": "def create(self, context=None):\n values = self.obj_get_changes()\n db_response = self.dbapi.create_response(values)\n self._from_db_object(self, db_response)", "title": "" }, { "docid": "7b799f2e94aadd91951516db00da7dd6", "score": "0.53221166", "text": "def __init__(self):\n self.__index = BookBuilder().new_book()[\"index\"]", "title": "" }, { "docid": "e0349e14a3293956f8b750d9198c71d2", "score": "0.53164035", "text": "def logbook_create(name, lb_id=None):\n # Create the LogBook model\n lb = memory.MemoryLogBook(name, lb_id)\n\n # Store it in the LockingDict for LogBooks\n logbooks[lb_id] = lb", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.5315941", "text": "def create(self):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.5315941", "text": "def create(self):\n pass", "title": "" }, { "docid": "71b3b118d2505ece222b4eb1044b6b65", "score": "0.53133297", "text": "def _create(cls, doc):\n\n assert '_from' in doc and '_to' in doc, \\\n \"You must create an edge ether by calling __init__ \" \\\n \"with _from and _to args or an appropriate dict!\"\n\n return cls.api.create(\n cls.__collection_name__,\n doc['_from'],\n doc['_to'],\n doc)", "title": "" }, { "docid": "e77ae5087bf970d89f2c7509ef7d727d", "score": "0.530287", "text": "def create(self):", "title": "" }, { "docid": "e77ae5087bf970d89f2c7509ef7d727d", "score": "0.530287", "text": "def create(self):", "title": "" }, { "docid": "e77ae5087bf970d89f2c7509ef7d727d", "score": "0.530287", "text": "def create(self):", "title": "" }, { "docid": "6bf0e2f9af6d1d077d1059da828b953f", "score": "0.53013307", "text": "def from_dict(event_data):\n applicationId = Event._check_key(event_data, 'applicationId')\n eventTypeId = Event._check_key(event_data, 'eventTypeId')\n headline = Event._check_key(event_data, 'headline')\n body = Event._check_key(event_data, 'body')\n eventTime = event_data.get('eventTime', None)\n entities = event_data.get('relatedEntities', {})\n relatedEntities = []\n for entityType in entities:\n for entityId in entities[entityType]:\n ent = Entity(entityType, entityId)\n relatedEntities.append(ent)\n evt = Event(applicationId, eventTypeId, headline, body,\n eventTime=eventTime,\n relatedEntities=relatedEntities)\n return evt", "title": "" } ]
c8fc14f8dd58de6ef45b4f8ce90e979c
Fetches a list of all networks for a tenant
[ { "docid": "f7cd8cb047bcf67a3e404398ccc8b010", "score": "0.7156412", "text": "def list_networks(self):\n return self.do_request(\"GET\", self.networks_path)", "title": "" } ]
[ { "docid": "5484cad849024a381fec2e39ae0ed61d", "score": "0.72490734", "text": "def networks_get(self, max_retries=REST_API_MAX_RETRIES):\n return self.__get(\"%s/networks\" % self.__base_mgmt_url,\n max_retries=max_retries)", "title": "" }, { "docid": "0081c7e4551977b5a99856ed1c2623ac", "score": "0.6919345", "text": "def get_list_networks(**_):\n azure_config = utils.get_azure_config(ctx)\n\n subscription_id = azure_config[constants.SUBSCRIPTION_KEY]\n resource_group_name = azure_config[constants.RESOURCE_GROUP_KEY]\n api_version = constants.AZURE_API_VERSION_05_preview\n\n connect = connection.AzureConnectionClient()\n\n response = connect.azure_get(ctx,\n (\"subscriptions/{}\" +\n \"/resourceGroups/{}\"\n \"/providers/microsoft.network\" +\n \"/virtualnetworks\" +\n \"?api-version={}\").format(\n subscription_id,\n resource_group_name,\n api_version\n )\n )\n\n ctx.logger.debug(response.json()['value'])\n return response.json()['value']", "title": "" }, { "docid": "cb29c2acb5a5257c9d5e1d7b7b88f438", "score": "0.6890732", "text": "def get_networks(org_list):\n\tglobal org\n\tfor org in org_list:\n\t\t#print('Collecting network list for {}'.format(org['name']))\n\t\turl = 'https://api.meraki.com/api/v0/organizations/{}/networks'.format(org['id'])\n\t\tresp = requests.get(url, headers=hdr)\n\t\tnet_list = resp.json()\n\n\t\t#print_net_info(net_list)\n\n\t\tget_devices(net_list)", "title": "" }, { "docid": "412c812be458f67d9938539d5715cefc", "score": "0.67078185", "text": "def getNetworks(db_conn = None):\n if db_conn is None:\n conn = usernameUtilities.log2nordb()\n else:\n conn = db_conn\n\n cur = conn.cursor()\n\n cur.execute(\"SELECT network FROM network\")\n ans = cur.fetchall()\n\n if len(ans) == 0:\n return []\n\n if db_conn is None:\n conn.close()\n\n return [network[0] for network in ans]", "title": "" }, { "docid": "36e7d3c8ca63aa47a222dddc702b4a46", "score": "0.6606199", "text": "def list(self):\n return self._list(\"/os-networks\", \"networks\")", "title": "" }, { "docid": "4abde808dd7e6bd68e8a8c37d977ca59", "score": "0.6574225", "text": "def getNetworkList(self):\n\n session = DbManager().openSession()\n\n try:\n dbList = self._networksDbHandler.getNetworkList(session)\n\n return self.getTortugaObjectList(Network, dbList)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self.getLogger().exception('%s' % ex)\n raise\n finally:\n DbManager().closeSession()", "title": "" }, { "docid": "497a6fec38f4443629f80cf4b700138c", "score": "0.6566191", "text": "def _get_list_of_networks(self, detail=False):\n if detail:\n self.info(\"Getting detailed list of networks\")\n else:\n self.info(\"Getting simple list of networks\")\n return self.clients.network.list_networks(detail=detail)", "title": "" }, { "docid": "2cb77bbeec6bd7b779505717fbaab96f", "score": "0.64088386", "text": "def list_networks():\n return __sets.keys()", "title": "" }, { "docid": "558a8af7f3b34e9acf50d9fa2d1e1a36", "score": "0.63675797", "text": "def list_networks(self, zone_id=None, domain=None, domain_id=None, \n account=None, net_id=None, name=None):\n params = {'command':'listNetworks',\n 'listall':'true'}\n\n if zone_id is not None:\n params['zoneid'] = zone_id \n if domain is not None:\n params['domainid'] = self.get_domain_id(domain)\n if domain_id is not None:\n params['domainid'] = domain_id\n if account is not None:\n params['account'] = account\n if net_id is not None:\n params['id'] = net_id\n if name is not None:\n params['name'] = name \n\n try:\n response = self.send_request(params)\n res = json.loads(response)['listnetworksresponse']\n if len(res) > 0:\n data = res['network']\n else:\n return []\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)\n \n networks = []\n for item in data:\n # create Network instance\n network = Network(self, item)\n networks.append(network)\n \n self.logger.debug('List cloudstack %s networks: %s...' % (self.id, str(networks)[0:200])) \n \n return networks", "title": "" }, { "docid": "863fef7fa51aa9e706c13b7416acc14f", "score": "0.62293535", "text": "def ls():\n def get_ccode(network):\n return network['location']['country']\n\n def get_country(code):\n try:\n return countries.get(code.lower()).name\n except KeyError:\n return code\n # Create iter of (country, network)\n networks = ((get_country(get_ccode(n)), n) for n in client.networks)\n # Sort networks by country\n s_networks = sorted(networks, key=lambda cn: cn[0])\n # Group networks by country\n g_networks = itertools.groupby(s_networks, key=lambda cn: cn[0])\n for country, networks in g_networks:\n networks = [n for _, n in networks]\n click.echo(country_name_template.format(country, len(networks)))\n for i, n in enumerate(networks):\n name = u'{0[location][city]} ({0[name]})'.format(n)\n click.echo((u'├' if i < len(networks)-1 else u'└') + ' ' + name)\n click.echo()", "title": "" }, { "docid": "6858893aa609da4eaef9fc7db7dbd7f8", "score": "0.6132363", "text": "def list_network(self):\n raise NotImplementedError('list_network not implemented.')", "title": "" }, { "docid": "ed0406e8909474e500291177468a8d14", "score": "0.6116822", "text": "def networks(self):\n import ns1.rest.ipam\n\n return ns1.rest.ipam.Networks(self.config)", "title": "" }, { "docid": "edbd200691e7d20c285c4657da39df08", "score": "0.61148036", "text": "def getNetlist(self):\n \n return self.netlist", "title": "" }, { "docid": "87a274e4cee458e0056cce1fece4c7d8", "score": "0.6105804", "text": "def get_network_list(cls, timeout=5):\n logger.debug(\"Get all network names from table\")\n network_list = []\n if ui_lib.wait_for_element(GeneralNetworksElements.ID_TABLE_NETWORKS, timeout):\n network_list = FusionUIBase.get_multi_elements_text(GeneralNetworksElements.ID_TABLE_NETWORKS, timeout, fail_if_false=True)\n return network_list", "title": "" }, { "docid": "4e1776b2270916bf855e6cca9ec1fe3a", "score": "0.606703", "text": "def getNetworkListByType(self, type_):\n\n session = DbManager().openSession()\n\n try:\n dbNetworks = self._networksDbHandler.\\\n getNetworkListByType(session, type_)\n\n return self.getTortugaObjectList(Network, dbNetworks)\n except TortugaException as ex:\n session.rollback()\n raise\n except Exception as ex:\n session.rollback()\n self.getLogger().exception('%s' % ex)\n raise\n finally:\n DbManager().closeSession()", "title": "" }, { "docid": "3f7d204caa1ecd820d600ca592b32b97", "score": "0.6038601", "text": "def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")", "title": "" }, { "docid": "79bd94e41ce0cfa32c295fda28dd8307", "score": "0.600791", "text": "def networks(self):\n return self._networks", "title": "" }, { "docid": "289c6b2625c7112b7c1ff5859918b5ce", "score": "0.5966221", "text": "def get_ethernet_networks_from_test_data(self):\n return test_data.get().networks", "title": "" }, { "docid": "84b79077c5b44d2b581edaf1baf14457", "score": "0.59572285", "text": "def get_network_list(self, filter_dict={}):\n raise VimConnNotImplemented(\"Should have implemented this\")", "title": "" }, { "docid": "c6fd94069d6356dee6f205f069b73cf3", "score": "0.5938898", "text": "def subnet_get_all(context):\n session = None\n try:\n session = nova_session.get_session()\n subnets = session.query(Subnet).filter(\n or_(Subnet.deleted == False,\n Subnet.deleted == None)).\\\n options(joinedload_all('groupIdTypes.networkType')).\\\n options(joinedload('resourceTags')).\\\n options(joinedload_all('ipAddressRanges.startAddress')).\\\n options(joinedload_all('ipAddressRanges.endAddress')).\\\n options(joinedload('usedIpAddresses')).\\\n options(joinedload('parents')).\\\n options(joinedload('networkSrc')).\\\n options(joinedload('dnsServer')).\\\n options(joinedload('dnsSuffixes')).\\\n options(joinedload('defaultGateway')).\\\n options(joinedload('winsServer')).\\\n options(joinedload('ntpDateServer')).\\\n options(joinedload('deploymentService')).\\\n options(joinedload('childs')).\\\n options(joinedload('redundancyPeer')).all()\n return subnets\n except Exception:\n LOG.exception(_('error while obtaining Subnets'))\n raise\n finally:\n __cleanup_session(session)", "title": "" }, { "docid": "04837c5af277466409917378dff2ddd7", "score": "0.5867237", "text": "def get_orgs():\n\tglobal hdr\n\turl = \"https://api.meraki.com/api/v0/organizations\"\n\thdr = {'X-Cisco-Meraki-API-Key': MerakiKey, 'content-type' : 'application/json'}\n\tresp = requests.get(url, headers=hdr)\n\tprint(resp)\n\torg_list = resp.json()\n\tget_networks(org_list)", "title": "" }, { "docid": "29e9e8141bede173e02fb2b7d189915f", "score": "0.5859699", "text": "def get_networks_for_instance(context, instance):\n nw_info = instance.get_network_info()\n return get_networks_for_instance_from_nw_info(nw_info)", "title": "" }, { "docid": "19ca47f23ceacd1c9ae54c89b7cedd69", "score": "0.5858551", "text": "def _return_networks(self, networks):\n results = dict()\n with self._driver.session() as session:\n tax_dict = session.read_transaction(self._tax_dict)\n with self._driver.session() as session:\n tax_properties = session.read_transaction(self._tax_properties)\n for item in tax_properties:\n for taxon in tax_properties[item]:\n tax_properties[item][taxon] = str(tax_properties[item][taxon])\n if not networks:\n with self._driver.session() as session:\n networks = session.read_transaction(self._query,\n \"MATCH (n:Network) RETURN n\")\n networks.extend(session.read_transaction(self._query,\n \"MATCH (n:Set) RETURN n\"))\n networks = list(_get_unique(networks, key='n'))\n # create 1 network per database\n for network in networks:\n g = nx.MultiGraph()\n with self._driver.session() as session:\n edge_list = session.read_transaction(self._association_list, network)\n for edge in edge_list[0]:\n index_1 = edge[0]\n index_2 = edge[1]\n all_weights = []\n try:\n all_weights = re.findall(\"[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?\",\n edge_list[1][edge][0])\n except TypeError:\n if type(all_weights) == list:\n all_weights = edge_list[1][edge][0]\n else:\n all_weights = []\n all_weights = [float(x) for x in all_weights]\n weight = float(np.mean(all_weights))\n g.add_edge(index_1, index_2, source=str(edge_list[0][edge]),\n weight=weight, all_weights=str(all_weights))\n # necessary for networkx indexing\n for item in tax_dict:\n nx.set_node_attributes(g, tax_dict[item], item)\n for item in tax_properties:\n nx.set_node_attributes(g, tax_properties[item], item)\n g = g.to_undirected()\n results[network] = g\n return results", "title": "" }, { "docid": "5d8a6867e913217429c4c38329d5a4d8", "score": "0.5849827", "text": "def get_devices(net_list):\n\tfor net in net_list:\n\t\ttry:\n\t\t\tprint(\"Collecting Devices for the {} network which is part of the {} organization\".format(net['name'], org['name']))\n\t\t\turl = 'https://api.meraki.com/api/v0/organizations/{}/networks/{}/devices'.format(org['id'], net['id'])\n\t\t\tresp = requests.get(url, headers=hdr)\n\t\t\tdev_list = resp.json()\n\t\t\tprint_dev_info(dev_list)\n\t\texcept:\n\t\t\tprint('{} organization either has not networks or the networks have no devices'.format(org['name']))\n\t\t\tpass\n\t\tprint('\\n')", "title": "" }, { "docid": "f2ee5a2f3aad69570d1907e209a03e36", "score": "0.5778738", "text": "def _get_all_network_ids(self, context):\n network_dicts = self.nova_netapi.get_all(context)\n network_ids = []\n for network_dict in network_dicts:\n if network_dict.get('id'):\n network_ids.append(network_dict['id'])\n\n LOG.debug('Found network ids: %s' % network_ids)\n\n return network_ids", "title": "" }, { "docid": "85ec7ffe60f17e26d9058659dda6b16f", "score": "0.56979764", "text": "def get_request_network(value):\n network_data = []\n base_urls = gen_network_urls()\n \n for url in base_urls:\n network_url = '{}/{}'.format(url, value)\n result = common_bits.get_request(network_url, common_bits.headers, common_bits.payload)\n\n if result.status_code == 200:\n network = result.json()\n network_type = network['_type']\n\n data = {\n \"type\":network_type,\n \"data\":network\n }\n network_data.append(data)\n break\n\n result = parser(network_data)\n\n final_result = []\n common_bits.removeNestedLists(result, final_result)\n\n return final_result", "title": "" }, { "docid": "1edab338cf8290994be80a52e87786b3", "score": "0.5696007", "text": "def getNetworks(self):\n return self._network_memory", "title": "" }, { "docid": "223b75504e453c15cf37536832b56e1d", "score": "0.5666788", "text": "def list_network_interfaces(self, **kwargs):\n return self._request(\"GET\", \"network\", kwargs)", "title": "" }, { "docid": "3011fc43768849ec1b9bde26655dfd9a", "score": "0.56513494", "text": "def visible_networks(self):\n resp = self._request(req='get_rt_list', need_response=True)\n et = xmlfromstring(f'{resp.text}')\n visible = {}\n for nw in et.findall('w'):\n visible[nw.find('s').text.strip('\"')] = WirelessNetwork(*(i.text.strip('\"') for i in nw.getchildren()))\n return visible", "title": "" }, { "docid": "89afea4b55182042f53db50eb99fa2aa", "score": "0.56262434", "text": "def get_networks(iface, retry=10):\r\n while retry > 0:\r\n if \"OK\" in run_program(\"wpa_cli -i %s scan\" % iface):\r\n networks = []\r\n r = run_program(\"wpa_cli -i %s scan_result\" % iface).strip()\r\n if \"bssid\" in r and len(r.split(\"\\n\")) > 1:\r\n for line in r.split(\"\\n\")[1:]:\r\n b, fr, s, f = line.split()[:4]\r\n ss = \" \".join(line.split()[4:]) # Hmm, dirty\r\n networks.append({\"bssid\": b, \"freq\": fr, \"sig\": s, \"ssid\": ss, \"flag\": f})\r\n return networks\r\n retry -= 1\r\n logging.debug(\"Couldn't retrieve networks, retrying\")\r\n time.sleep(0.5)\r\n logging.error(\"Failed to list networks\")", "title": "" }, { "docid": "f8bba12eda2c600ce3a3f0bb92358fd1", "score": "0.56213653", "text": "def __get_network(self):\n # Http request\n try:\n raw_res = requests.get(LOCATIONS_URL, headers=self._headers, \n cookies=self._cookies, timeout=self._timeout)\n networks = raw_res.json()\n _LOGGER.debug(\"Number of networks found: %s\", len(networks))\n if self._network_name == None and self._network_name2 == None: # Use 1st network found and second if found\n self._gateway_id = networks[0][\"id\"]\n self._network_name = networks[0][\"name\"]\n if len(networks) > 1:\n self._gateway_id2 = networks[1][\"id\"]\n self._network_name2 = networks[1][\"name\"]\n \n else:\n for network in networks:\n if network[\"name\"] == self._network_name:\n self._gateway_id = network[\"id\"]\n _LOGGER.debug(\"Selecting %s network among: %s\",\n self._network_name, networks)\n continue\n elif (network[\"name\"] == self._network_name.capitalize()) or (network[\"name\"] == self._network_name[0].lower()+self._network_name[1:]):\n self._gateway_id = network[\"id\"]\n _LOGGER.debug(\"Please check first letter of your network name, In capital letter or not? Selecting %s network among: %s\",\n self._network_name, networks)\n continue\n else:\n _LOGGER.debug(\"Your network name %s do not correspond to discovered network %s, skipping this one...\",\n self._network_name, network[\"name\"])\n if self._network_name2 is not None:\n if network[\"name\"] == self._network_name2:\n self._gateway_id2 = network[\"id\"]\n _LOGGER.debug(\"Selecting %s network among: %s\",\n self._network_name2, networks)\n continue\n elif (network[\"name\"] == self._network_name2.capitalize()) or (network[\"name\"] == self._network_name2[0].lower()+self._network_name2[1:]):\n self._gateway_id = network[\"id\"]\n _LOGGER.debug(\"Please check first letter of your network2 name, In capital letter or not? Selecting %s network among: %s\",\n self._network_name2, networks)\n continue\n else:\n _LOGGER.debug(\"Your network name %s do not correspond to discovered network %s, skipping this one...\",\n self._network_name2, network[\"name\"])\n \n except OSError:\n raise PyNeviwebError(\"Cannot get networks...\")\n # Update cookies\n self._cookies.update(raw_res.cookies)\n # Prepare data\n self.gateway_data = raw_res.json()", "title": "" }, { "docid": "5c1aea56252e8e2dbb0174bfa2763da8", "score": "0.55802387", "text": "def tenant_connections(self, **query):\n return list(self._list(_tenant_connection.TenantConnection,\n paginated=False, **query))", "title": "" }, { "docid": "e716b9ec9ce4512317ff78eca486eba1", "score": "0.55705786", "text": "def networks_with_identifier(cloud_name, identifier):\n conn = openstack.connect(cloud=cloud_name)\n network_list = []\n for network in conn.network.networks():\n if network['name'].endswith(identifier):\n network_list.append(network['id'])\n return network_list", "title": "" }, { "docid": "99fbce31edc561a3d34fd5b0730e4764", "score": "0.55646694", "text": "def get_tenant_list(self, filter_dict={}):\n raise VimConnNotImplemented(\"Should have implemented this\")", "title": "" }, { "docid": "ce3fa8361888e7274eba4cd84df73f9b", "score": "0.5564467", "text": "def get(self, network):\n return self._get(\"/os-networks/%s\" % base.getid(network),\n \"network\")", "title": "" }, { "docid": "018fab3cf9d5b2a0cc745f8b8629bdad", "score": "0.5516693", "text": "def network_type_list(request, format=None):\n network_types = NetworkType.objects.all()\n network_types_serialized = NetworkTypeSerializer(network_types, many=True)\n return Response(network_types_serialized.data)", "title": "" }, { "docid": "c674a8b75c786d14ef835fd89f12c6ef", "score": "0.54933906", "text": "def networks(self):\n raise NotImplementedError", "title": "" }, { "docid": "7e68bdefab1fed84563079f82e058fcb", "score": "0.5456721", "text": "def get_configured_networks():\n # get attached, configured networks\n networks = []\n inet = Popen([\"/bin/ip\", \"addr\"], stdin=PIPE, stdout=PIPE).communicate()\n # get both ipv4 and ipv6 networks\n m = re.finditer(\"inet6?\\s(?P<addr>\\S*?)\\s.*?(?P<mode>secondary)?\\s(?P<dev>\\S*)\\n\", inet[0])\n for net in [net.groupdict() for net in m]:\n if net['mode'] == 'secondary': continue\n networks.append(IPNetwork(net['addr']))\n return networks", "title": "" }, { "docid": "bd8df1465f16f457f96680756944311a", "score": "0.5451001", "text": "def get_current_networks(self, a_records='default'):\n if a_records == 'default':\n a_records = self.get_current_a_records()\n\n # Remove the last octet of address(es).\n networks = []\n [ networks.append(\n '.'.join(rec.split('.')[-4:-1])\n ) for rec in a_records ]\n logger.info(\n \"Derived networks {} from addresses {}.\".format(networks, a_records)\n )\n return networks", "title": "" }, { "docid": "d1895ecd783bed55dda576e0e35a2d29", "score": "0.54346454", "text": "def subnet_get_all_by_filters(context, filters, sort_key, sort_dir):\n session = None\n try:\n session = nova_session.get_session()\n filtered_query = _create_filtered_ordered_query(session,\n Subnet,\n filters=filters,\n sort_key=sort_key,\n sort_dir=sort_dir)\n subnets = filtered_query.\\\n options(joinedload_all('groupIdTypes.networkType')).\\\n options(joinedload('resourceTags')).\\\n options(joinedload_all('ipAddressRanges.startAddress')).\\\n options(joinedload_all('ipAddressRanges.endAddress')).\\\n options(joinedload('usedIpAddresses')).\\\n options(joinedload('parents')).\\\n options(joinedload('networkSrc')).\\\n options(joinedload('dnsServer')).\\\n options(joinedload('dnsSuffixes')).\\\n options(joinedload('defaultGateway')).\\\n options(joinedload('winsServer')).\\\n options(joinedload('ntpDateServer')).\\\n options(joinedload('deploymentService')).\\\n options(joinedload('childs')).\\\n options(joinedload('redundancyPeer')).all()\n return subnets\n except Exception:\n LOG.exception(_('Error while obtaining Subnets'))\n raise\n finally:\n __cleanup_session(session)", "title": "" }, { "docid": "6e3aca456755261c80f9486c62c3fbd3", "score": "0.54281473", "text": "def list(\n self,\n request: compute.ListNetworksRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.NetworkList:\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/global/networks\".format(\n host=self._host, project=request.project,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.ListNetworksRequest.filter in request:\n query_params[\"filter\"] = request.filter\n if compute.ListNetworksRequest.max_results in request:\n query_params[\"maxResults\"] = request.max_results\n if compute.ListNetworksRequest.order_by in request:\n query_params[\"orderBy\"] = request.order_by\n if compute.ListNetworksRequest.page_token in request:\n query_params[\"pageToken\"] = request.page_token\n if compute.ListNetworksRequest.return_partial_success in request:\n query_params[\"returnPartialSuccess\"] = request.return_partial_success\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.get(url, headers=headers, params=query_params,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.NetworkList.from_json(\n response.content, ignore_unknown_fields=True\n )", "title": "" }, { "docid": "06947a4533e5de4d6b39b9c831fe0ea5", "score": "0.5417787", "text": "def launcher_networks(network: vegaNetwork) -> List[Dict[str, Any]]:\n networks = []\n for validator_id in range(network.validators_count()):\n host, public_port, private_port = network.api_address(validator_id)\n node_network = {\"host\": host, \"ssl\": False, \"public-api-port\": public_port, \"private-api-port\": private_port}\n networks.append(node_network)\n\n # Temporary workaround: supervisor works in simple mode and we need only one node.\n return networks[:1]", "title": "" }, { "docid": "5a5347d79e8dcd7ffa27c83e71a1851f", "score": "0.54148287", "text": "def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]]:\n return pulumi.get(self, \"networks\")", "title": "" }, { "docid": "8d94eca261cd07102ac40c2bf74c02a4", "score": "0.5409303", "text": "def list_networks(self, network_list_args=None):\n self._list_networks(network_list_args or {})", "title": "" }, { "docid": "af7c0e8e4d3474fe307424c8ce06b2e6", "score": "0.5384686", "text": "def request_network_tables():\n data = jetson_network_tables.get_all_values()\n emit('network_tables_update', data)", "title": "" }, { "docid": "df1dbfbb73e12189dae52626126a482f", "score": "0.5380751", "text": "def get_vm_networks(self, vm):\n nics = []\n self._logger.debug('Getting NIC list.')\n for dev in vm.config.hardware.device:\n if hasattr(dev, 'macAddress'):\n nics.append(dev)\n\n self._logger.debug('Got NICs: {nics}'.format(nics=nics))\n networks = []\n for nic in nics:\n self._logger.debug('Checking details for NIC {nic}'\n .format(nic=nic))\n distributed = hasattr(nic.backing, 'port') and isinstance(\n nic.backing.port,\n vim.dvs.PortConnection,\n )\n nsxt_switch = hasattr(nic.backing, 'opaqueNetworkId')\n\n network_name = None\n if nsxt_switch:\n network_name = nic.backing.opaqueNetworkId\n self._logger.debug(\n 'Found NIC was on port group {network}'.format(\n network=network_name,\n )\n )\n elif distributed:\n mapping_id = nic.backing.port.portgroupKey\n self._logger.debug(\n 'Found NIC was on distributed port group with port group '\n 'key {key}'.format(key=mapping_id)\n )\n for network in vm.network:\n if hasattr(network, 'key'):\n self._logger.debug(\n 'Checking for match on network with key: '\n '{key}'.format(key=network.key)\n )\n if mapping_id == network.key:\n network_name = network.name\n self._logger.debug(\n 'Found NIC was distributed and was on '\n 'network {network}'.format(\n network=network_name,\n )\n )\n else:\n # If not distributed, the port group name can be retrieved\n # directly\n network_name = nic.backing.deviceName\n self._logger.debug(\n 'Found NIC was on port group {network}'.format(\n network=network_name,\n )\n )\n\n if network_name is None:\n raise NonRecoverableError(\n 'Could not get network name for device with MAC address '\n '{mac} on VM {vm}'.format(mac=nic.macAddress, vm=vm.name)\n )\n\n networks.append({\n 'name': network_name,\n 'distributed': distributed,\n 'mac': nic.macAddress,\n 'nsxt_switch': nsxt_switch\n })\n\n return networks", "title": "" }, { "docid": "33bfd57dbbed5da95520ae0c866d534b", "score": "0.53647757", "text": "def list_networks(self):\n return self._get_names('SCLogicalNetwork')", "title": "" }, { "docid": "ea16a6ae7a12bf23454b58c2f241265d", "score": "0.53580654", "text": "def list_network_namespaces(**kwargs):\n if cfg.CONF.AGENT.use_helper_for_ns_read:\n return privileged.list_netns(**kwargs)\n else:\n return netns.listnetns(**kwargs)", "title": "" }, { "docid": "77ff28f62277b11528c52523456c3e04", "score": "0.53397864", "text": "def ui_command_nic_list(self):\n print()\n vm = rCache.get_vm(self.appId, self.vmId, aspect='design')\n if not vm.get('networkConnections'):\n print(c.red(\"This VM has no NICs!\\n\"))\n return\n nics = sorted(vm['networkConnections'], key=lambda nic: nic['device']['index'])\n print(ui.prettify_json(nics))\n print()", "title": "" }, { "docid": "ec329481ab589ce1c3b5c0c20ee802a7", "score": "0.53377515", "text": "def query_fixture_networks(ip_addr):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n session = requests.Session()\n session.headers['Content-Type'] = 'application/json'\n session.headers['Accept'] = 'application/json'\n session.verify = False\n session.auth = (_FIXTURE_LOGIN, _FIXTURE_PASSWORD)\n\n data = {\n 'id': 'Zaza {} tests'.format(PLUGIN_APP_NAME),\n 'method': 'runCmds',\n 'jsonrpc': '2.0',\n 'params': {\n 'timestamps': False,\n 'format': 'json',\n 'version': 1,\n 'cmds': ['show openstack networks']\n }\n }\n\n response = session.post(\n 'https://{}/command-api/'.format(ip_addr),\n data=json.dumps(data),\n timeout=10 # seconds\n )\n\n result = []\n for region in response.json()['result'][0]['regions'].values():\n for tenant in region['tenants'].values():\n for network in tenant['tenantNetworks'].values():\n result.append(network['networkName'])\n return result", "title": "" }, { "docid": "ae1d1f181e6a5a2a0e973d30d65719e5", "score": "0.53195244", "text": "def launcher_networks(network: ExonumNetwork) -> List[Dict[str, Any]]:\n networks = []\n for validator_id in range(network.validators_count()):\n host, public_port, private_port = network.api_address(validator_id)\n node_network = {\n \"host\": host,\n \"ssl\": False,\n \"public-api-port\": public_port,\n \"private-api-port\": private_port,\n }\n networks.append(node_network)\n\n # Temporary workaround: supervisor works in simple mode and we need only one node.\n return networks[:1]", "title": "" }, { "docid": "0e16e26668a27a4ceee254668aed374c", "score": "0.531001", "text": "def get_accounts_ncr():\r\n url = \"http://ncrqe-qe.apigee.net/digitalbanking/db-accounts/v1/accounts\"\r\n access_token = get_OAuth_Token_ncr.get_oauth2_token_ncr()\r\n\r\n querystring = {\"hostUserId\": \"TESTUSER001\"}\r\n\r\n headers = {\r\n 'Authorization': \"Bearer \"+access_token,\r\n 'transactionId': \"53bb3e61-a06d-457b-97d7-1d704eacdf4c\",\r\n 'Accept': \"application/json\",\r\n 'User-Agent': \"PostmanRuntime/7.19.0\",\r\n 'Cache-Control': \"no-cache\",\r\n 'Postman-Token': \"7166b34c-b1f5-4717-a3f4-0d84cc716e47,c7c44af2-fa59-4d7f-8282-49ff1e72e20f\",\r\n 'Host': \"ncrqe-qe.apigee.net\",\r\n 'Accept-Encoding': \"gzip, deflate\",\r\n 'Connection': \"keep-alive\",\r\n 'cache-control': \"no-cache\"\r\n }\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n json_response = response.json()\r\n list_accounts = []\r\n for data in json_response[\"accounts\"]:\r\n list_accounts.append(data)\r\n return list_accounts", "title": "" }, { "docid": "6b81dbcd73233aaa854aa07cc0444f11", "score": "0.52623993", "text": "def do_list_all(env):\n nodes_file = env.config['tripleo']['nodes_file']\n fname = os.path.expanduser(nodes_file)\n # If we're called from the proxy try to use cached information from the\n # nodes file. We take this approach for `node-list --all` and also for\n # `get-node-macs` below. Ironic will refresh the power states for each node\n # every minute or so. To find the name of a node, it will list all nodes,\n # and then list the mac addresses for each node until it finds the node.\n # This is very inefficient. Together these two calls account for about 75%\n # of all API calls made by Ironic. It also causes problems because the node\n # is locked by Ironic during these API calls and this sometimes causes\n # exernal API clients to reach their maximum retry.\n # The information for both API calls does not change unless someone first\n # adds a node and then dumps the info to the nodes file, and then imports\n # it in Ironic. So rather than contacting the API, we get the information\n # from the nodes file directly, if it exists.\n if env.args['--cached'] and util.try_stat(fname):\n with open(fname) as fin:\n nodes = json.loads(fin.read())['nodes']\n else:\n # This is computed on attribute access. So we are actually preventing\n # the API calls if we don't access it.\n nodes = env.nodes[1:]\n for node in nodes:\n sys.stdout.write('{}\\n'.format(node['name']))", "title": "" }, { "docid": "e385991c9a07d27ae18324678acf25d8", "score": "0.52588195", "text": "def get_net_generic(self, subtype):\n return self.get_results(\"network\", {}).get(subtype, [])", "title": "" }, { "docid": "06dc9173f7bd885a68f48cadc3d78e3f", "score": "0.5255714", "text": "def search_network(self, device_limit=None, time_limit=5):\n addrs = []\n\n if MDNS_ENABLED:\n addrs += self.search_network_mdns(device_limit=device_limit, time_limit=time_limit)\n if device_limit and len(addrs) >= device_limit:\n return addrs\n\n if SSDP_ENABLED or len(addrs) == 0:\n addrs += search_network_ssdp(device_limit=device_limit, time_limit=time_limit)\n\n return addrs", "title": "" }, { "docid": "352473d430fc651e094a5a251f8c0642", "score": "0.52406704", "text": "def get_network_by_extattrs(self, attributes):\n rest_url = 'https://' + self.iba_host + '/wapi/v' + \\\n self.iba_wapi_version + '/network?*' + \\\n \"&*\".join(attributes.split(\",\")) + '&network_view=' + \\\n self.iba_network_view\n networks = []\n try:\n r = self.session.get(url=rest_url)\n r_json = r.json()\n if r.status_code == 200:\n if len(r_json) > 0:\n for network in r_json:\n if 'network' in network:\n networks.append(network['network'])\n return networks\n else:\n raise InfobloxNotFoundException(\n \"No networks found for extensible attributes: \" + attributes)\n else:\n if 'text' in r_json:\n raise InfobloxGeneralException(r_json['text'])\n else:\n r.raise_for_status()\n except ValueError:\n raise InfobloxGeneralException(r)", "title": "" }, { "docid": "2f9f243f77efe4d8315bbff5fadecda8", "score": "0.5221181", "text": "def list_connections(\n request,\n ):\n\n # expects a wallet to be opened in the current session\n wallet = wallet_for_current_session(request)\n connections = AgentConnection.objects.filter(wallet=wallet).all()\n\n \n\n ret_data = []\n for conn in connections:\n ret_data.append({\n \"wallet\": conn.wallet.wallet_name, \n \"partner_name\": conn.partner_name, \n \"status\":conn.status,\n \"type\":conn.connection_type,\n \"data\":conn.connection_data,\n })\n return HttpResponse(json.dumps(ret_data))", "title": "" }, { "docid": "6c00d5aec428e47213ce1e858acee514", "score": "0.5206891", "text": "def network(self, request, **kwargs):\n obj = self.get_object()\n realm = self.get_realm(request)\n lang = request.LANGUAGE_CODE\n cache_key = make_cache_key(lang, realm, obj, kind='entity')\n\n network = cache.get(cache_key)\n if network is None:\n network = obj.get_network(level=2).to_dict(realm=realm)\n cache.set(cache_key, network, None)\n return Response(network)", "title": "" }, { "docid": "af917f1592ca1fae403ca12f16228038", "score": "0.5198265", "text": "def recover_networks(cls):\n\n if \"_networks\" in globals():\n return\n\n global _networks\n _networks = model.recover_networks()", "title": "" }, { "docid": "050cb3446d72faf029fc9392c54a15e6", "score": "0.5196721", "text": "def list(self, filter=None):\n filter_string = ''\n if filter:\n filter_string = '?{}'.format(filter)\n resp = self._http.get_cmd('{}{}'.format(self._apiBase,filter_string))\n if not resp:\n return list()\n wim_accounts = []\n for datacenter in resp:\n wim_accounts.append({\"name\": datacenter['name'], \"uuid\": datacenter['_id']\n if '_id' in datacenter else None})\n return wim_accounts", "title": "" }, { "docid": "5174abe28f92e0ae2a3490957298201a", "score": "0.5193062", "text": "def get_subnetworks(self, networks=None):\r\n if not networks:\r\n networks = Network.objects.filter(\r\n data_center=self.data_center,\r\n min_ip__gte=self.min_ip,\r\n max_ip__lte=self.max_ip,\r\n ).exclude(\r\n pk=self.id,\r\n ).order_by('-min_ip', 'max_ip')\r\n subnets = sorted(list(networks), key=lambda net: net.get_netmask())\r\n new_subnets = list(subnets)\r\n for net, netw in enumerate(subnets):\r\n net_address = ipaddr.IPNetwork(netw.address)\r\n for i, sub in enumerate(subnets):\r\n sub_addr = ipaddr.IPNetwork(sub.address)\r\n if sub_addr != net_address and sub_addr in net_address:\r\n if sub in new_subnets:\r\n new_subnets.remove(sub)\r\n new_subnets = sorted(new_subnets, key=lambda net: net.min_ip)\r\n return new_subnets", "title": "" }, { "docid": "f26878bb3df914306b5771aaa71c8ae5", "score": "0.517683", "text": "def get_isolated_subnets(cls, network):\n return dhcp.Dnsmasq.get_isolated_subnets(network)", "title": "" }, { "docid": "ad441c5ffb1265d2ed62eef72af3bc56", "score": "0.5173249", "text": "def list_bridges(adapter, host_uuid):\n resp = adapter.read(pvm_ms.System.schema_type, root_id=host_uuid,\n child_type=pvm_net.NetBridge.schema_type)\n net_bridges = pvm_net.NetBridge.wrap(resp)\n\n if len(net_bridges) == 0:\n LOG.warn(_LW('No NetworkBridges detected on the host.'))\n\n return net_bridges", "title": "" }, { "docid": "1889d7cadd9834057bc63036a148dfa6", "score": "0.51706135", "text": "def gen_network_urls():\n base_url = common_bits.base_url\n all_base_service_urls = []\n network_urls = [\n 'aaa', 'any', 'availability_group', 'dns_host',\n 'group', 'host', 'interface_address', 'interface_broadcast',\n 'interface_network', 'multicast', 'network', 'range'\n ]\n for item in network_urls:\n base_network_url = '{}/network/{}'.format(base_url,item)\n all_base_service_urls.append(base_network_url)\n\n return all_base_service_urls", "title": "" }, { "docid": "739e9c1df5320ab29a2b724d590e4148", "score": "0.5169467", "text": "def get_fc_networks_from_test_data(self):\n return test_data.get().fcnetworks", "title": "" }, { "docid": "484ec597fa145168b59aa6951aed02e5", "score": "0.5157912", "text": "def get_list(\n self,\n name: str | None = None,\n label_selector: str | None = None,\n page: int | None = None,\n per_page: int | None = None,\n ) -> NetworksPageResult:\n params: dict[str, Any] = {}\n if name is not None:\n params[\"name\"] = name\n if label_selector is not None:\n params[\"label_selector\"] = label_selector\n if page is not None:\n params[\"page\"] = page\n if per_page is not None:\n params[\"per_page\"] = per_page\n\n response = self._client.request(url=\"/networks\", method=\"GET\", params=params)\n\n networks = [\n BoundNetwork(self, network_data) for network_data in response[\"networks\"]\n ]\n return NetworksPageResult(networks, Meta.parse_meta(response))", "title": "" }, { "docid": "8f6b0a56ffa597a0743da05e94e19708", "score": "0.5149328", "text": "def list_all(network):\n if len(network.child) == 0:\n return [network.name]\n else:\n return gather_lists([list_all(x) for x in network.child]) \\\n + [network.name]", "title": "" }, { "docid": "b08aa7856f22c210b3e1d0ff0afe97c2", "score": "0.5149213", "text": "def network_group(ng):\n network_type = ng['type']\n members = ng['data']['members']\n network_urls = gen_network_urls()\n network_data = []\n for member in members:\n for url in network_urls:\n network_url = '{}/{}'.format(url,member)\n result = common_bits.get_request(network_url, common_bits.headers, common_bits.payload)\n if result.status_code == 200:\n network = result.json()\n network_type = network['_type']\n data = {\n \"type\":network_type,\n \"data\":network\n }\n network_data.append(data)\n result = parser(network_data)\n return result", "title": "" }, { "docid": "279babc5dde2ac8be0e55422c1ce62e2", "score": "0.51484895", "text": "def _get_nets_radb(self, response, is_http=False):\r\n\r\n nets = []\r\n\r\n if is_http:\r\n regex = r'route:[^\\S\\n]+(?P<val>.+?)<br>'\r\n else:\r\n regex = r'^route:[^\\S\\n]+(?P<val>.+|.+)$'\r\n\r\n # Iterate through all of the networks found, storing the CIDR value\r\n # and the start and end positions.\r\n for match in re.finditer(\r\n regex,\r\n response,\r\n re.MULTILINE\r\n ):\r\n\r\n try:\r\n\r\n net = copy.deepcopy(BASE_NET)\r\n net['cidr'] = match.group(1).strip()\r\n net['start'] = match.start()\r\n net['end'] = match.end()\r\n nets.append(net)\r\n\r\n except ValueError: # pragma: no cover\r\n\r\n pass\r\n\r\n return nets", "title": "" }, { "docid": "2922d52b10eaba5673268507a40375dd", "score": "0.5144073", "text": "def virtual_switch_get_all(context):\n\n session = None\n try:\n session = nova_session.get_session()\n virtualswitches = \\\n session.query(VirtualSwitch).filter(\n or_(VirtualSwitch.deleted == False,\n VirtualSwitch.deleted == None)).\\\n options(joinedload('cost')).\\\n options(joinedload_all('portGroups.cost')).\\\n options(joinedload('networks')).\\\n options(joinedload('subnets')).all()\n\n return virtualswitches\n except Exception:\n LOG.exception(_('error while obtaining VirtualSwitch'))\n raise\n finally:\n __cleanup_session(session)", "title": "" }, { "docid": "be16423f087b77eb8429861fa2aa1351", "score": "0.51418114", "text": "def get(self):\n tenants = Tenant.nodes\n list_of_tenants = list(tenants)\n return list_of_tenants", "title": "" }, { "docid": "c2b63fe3a2fcdd071f2ad4db7f758c87", "score": "0.5135231", "text": "def list_subnets(self, **kwargs):\n return self._request(\"GET\", \"subnet\", kwargs)", "title": "" }, { "docid": "f0e0f482749dad5b8e63bc0863827d1a", "score": "0.5132472", "text": "def getInterfaceConnections(cls, data_center: Union[DataCenter, str] = SITE, rack: Union[DataCenter, str, None] = None) -> List[\"InterfaceConnection\"]:\n query_parameters: Dict[str, str] = {}\n if data_center is not None:\n if type(data_center) is str:\n query_parameters[\"site\"] = data_center.lower()\n elif type(data_center) is DataCenter:\n query_parameters[\"site\"] = data_center.name.lower()\n if rack is not None:\n if type(rack) is str:\n query_parameters[\"q\"] = rack.lower()\n elif type(rack) is Rack:\n query_parameters[\"q\"] = rack.name.lower()\n r = NetboxRequest(NetboxQuery.INTERFACECONNECTIONS, query_parameters, json_callback = InterfaceConnection.jsonToObj)\n trace(r.__dict__)\n return r.results", "title": "" }, { "docid": "8b801cf04e006a2e30025f3376cb21c1", "score": "0.51292866", "text": "def GetModeNets(self):\n return _snap.TMMNet_GetModeNets(self)", "title": "" }, { "docid": "5360f1d2dc785c0ccf6771838e90c978", "score": "0.51086557", "text": "def get_network_names(self):\n return self.__locals.network_names", "title": "" }, { "docid": "88b8a13129d5044503c183b2fe215322", "score": "0.51075697", "text": "def existing_dhcp_networks(cls, conf):\n global _devices\n return _devices.keys()", "title": "" }, { "docid": "16d32d18daa703ef4817b04b6df48a0e", "score": "0.51033056", "text": "def get_network():\n if not network:\n set_network(Devnet)\n return network", "title": "" }, { "docid": "3f294b329ff159527aada7a14049573b", "score": "0.51008534", "text": "def discover_all(interactive=False, update_existing=False, outputs=None):\r\n sanity_check()\r\n if outputs:\r\n stdout, stdout_verbose, stderr = outputs\r\n else:\r\n stdout = output.get(interactive)\r\n nets = Network.objects.filter(\r\n environment__isnull=False,\r\n environment__queue__isnull=False,\r\n )\r\n for net in nets:\r\n if interactive:\r\n discover_network(\r\n net.network,\r\n interactive=True,\r\n update_existing=True,\r\n )\r\n else:\r\n queue = django_rq.get_queue()\r\n queue.enqueue(\r\n discover_network,\r\n net.network,\r\n update_existing=update_existing,\r\n )\r\n stdout()", "title": "" }, { "docid": "48ed59e5a246c5e126fdc5923edc090f", "score": "0.50885236", "text": "def net_connections():\n return []", "title": "" }, { "docid": "d07babd595b741017f0a416a19646a8d", "score": "0.50881505", "text": "def network(self, request, **kwargs):\n obj = self.get_object()\n realm = self.get_realm(request)\n lang = request.LANGUAGE_CODE\n cache_key = make_cache_key(lang, realm, obj, kind='entitytype')\n only_entitytype = realm.settings.get('entity-type-only', False)\n\n network = cache.get(cache_key)\n if network is None:\n qs = Entity.objects.filter(realm=realm, type=obj)\n entity_filter = self.make_entity_filter(obj, only_entitytype)\n network = make_network(qs, level=1, entity_filter=entity_filter)\n network = network.to_dict(realm=realm)\n cache.set(cache_key, network, None)\n return Response(network)", "title": "" }, { "docid": "15475567eddc625332c840950378b4d5", "score": "0.5082642", "text": "def get_network_tree(qs=None):\r\n if not qs:\r\n qs = Network.objects.all()\r\n tree = []\r\n all_networks = [\r\n (net.max_ip, net.min_ip, net)\r\n for net in qs.order_by(\"min_ip\", \"-max_ip\")\r\n ]\r\n\r\n def get_subnetworks_qs(network):\r\n for net in all_networks:\r\n if net[0] == network.max_ip and net[1] == network.min_ip:\r\n continue\r\n if net[0] <= network.max_ip and net[1] >= network.min_ip:\r\n yield net[2]\r\n\r\n def recursive_tree(network):\r\n subs = []\r\n sub_qs = get_subnetworks_qs(network)\r\n subnetworks = network.get_subnetworks(networks=sub_qs)\r\n subs = [\r\n {\r\n 'network': sub,\r\n 'subnetworks': recursive_tree(sub)\r\n } for sub in subnetworks\r\n ]\r\n for i, net in enumerate(all_networks):\r\n if net[0] == network.max_ip and net[1] == network.min_ip:\r\n all_networks.pop(i)\r\n break\r\n return subs\r\n\r\n while True:\r\n try:\r\n tree.append({\r\n 'network': all_networks[0][2],\r\n 'subnetworks': recursive_tree(all_networks[0][2])\r\n })\r\n except IndexError:\r\n # recursive tree uses pop, so at some point all_networks[0]\r\n # will rise IndexError, therefore algorithm is finished\r\n break\r\n return tree", "title": "" }, { "docid": "f054075388e31a2da2b1fb2e0f33f30b", "score": "0.5076233", "text": "def get_stations(self, contract=None):\n # Get stations from API\n if contract:\n stations = self.__send('/stations', {'contract': contract})\n else:\n stations = self.__send('/stations')\n\n # Check errors\n if stations is None:\n print('Not able to get stations from API.')\n return []\n\n return stations", "title": "" }, { "docid": "14c4c698fc4cf8d96a33ffa973e09f85", "score": "0.5052602", "text": "def get_network_adapter_collection(uuid):\n\n server_hardware = g.oneview_client.server_hardware.get(uuid)\n\n nic = NetworkAdapterCollection(server_hardware)\n\n return ResponseBuilder.success(nic)", "title": "" }, { "docid": "440c17dc69dac66264bec2e22c260d55", "score": "0.50467885", "text": "def exportSDDCCGWnetworks(self):\n myURL = (self.proxy_url + \"/policy/api/v1/infra/tier-1s/cgw/segments\")\n response = self.invokeVMCGET(myURL)\n if response is None or response.status_code != 200:\n return False\n\n json_response = response.json()\n cgw_networks = json_response['results']\n fname = self.export_path / self.network_export_filename\n with open(fname, 'w') as outfile:\n json.dump(cgw_networks, outfile,indent=4)\n return True", "title": "" }, { "docid": "e44669fce37b7c394f988bf1bcc932c7", "score": "0.50448", "text": "def get_dhcp_networks(args):\n neutron = get_neutron()\n agent_list = get_network_agents_on_host(socket.gethostname(), neutron,\n \"DHCP agent\")\n list_func = neutron.list_networks_on_dhcp_agent\n dhcp_network_list = get_resource_list_on_agents(agent_list,\n list_func,\n \"networks\")\n\n clean_data = clean_resource_list(dhcp_network_list,\n allowed_keys=[\"id\", \"status\", \"name\"])\n\n function_set({\"dhcp-networks\": format_status_output(clean_data)})", "title": "" }, { "docid": "4c312da294c90cd29eb82a5b3662ca22", "score": "0.5040807", "text": "def share_networks(self, details=True, **query):\n base_path = '/share-networks/detail' if details else None\n return self._list(\n _share_network.ShareNetwork, base_path=base_path, **query\n )", "title": "" }, { "docid": "8180b76b8dc85a095a7f4c153e7ed366", "score": "0.50348246", "text": "def network(self):\n for network in self.coordinator.data.networks:\n if network.id == self._network.id:\n return network", "title": "" }, { "docid": "503be163f55590ee7bc6de75e12bc1c3", "score": "0.503381", "text": "def get_vms_by_network(self, net):\r\n pass", "title": "" }, { "docid": "62e9dc9d96ead0dadce0fc04124cb212", "score": "0.5030788", "text": "def list_accounts(self):\n _filter = {\n 'hubNetworkStorage': {'vendorName': {'operation': 'Swift'}},\n }\n return self.client.call('Account', 'getHubNetworkStorage',\n mask=LIST_ACCOUNTS_MASK,\n filter=_filter)", "title": "" }, { "docid": "6f6606188102fcd657aed6f08ef481f1", "score": "0.5027881", "text": "def get_nets(self):\n distinct_nets = []\n for net in self.nets:\n if net is self.NC:\n # Exclude no-connect net.\n continue\n if not net.get_pins():\n # Exclude empty nets with no attached pins.\n continue\n for n in distinct_nets:\n # Exclude net if its already attached to a previously selected net.\n if net.is_attached(n):\n break\n else:\n # This net is not attached to any of the other distinct nets,\n # so it is also distinct.\n distinct_nets.append(net)\n return distinct_nets", "title": "" }, { "docid": "1eefa50a85ca8ba9b7b28d8fc03d2734", "score": "0.5021192", "text": "def test_multiple_networks_data(self):\n net = self.vf_utils.get_multiple_networks(self.networks)\n self.assertEqual(net, self.noaddr_nets)", "title": "" }, { "docid": "4214c43751eb9b3a21b4e889bb9e01ef", "score": "0.5013468", "text": "def check_networks(SELF, t_network, t_subnet=None, t_router=None):\n\n seen_nets = SELF.admin_manager.networks_client.list_networks()['networks']\n seen_names = [n['name'] for n in seen_nets]\n seen_ids = [n['id'] for n in seen_nets]\n SELF.assertIn(t_network['name'], seen_names)\n SELF.assertIn(t_network['id'], seen_ids)\n\n if t_subnet:\n seen_subnets = SELF.admin_manager.subnets_client.list_subnets()\n seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']]\n seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']]\n SELF.assertIn(t_network['id'], seen_net_ids)\n SELF.assertIn(t_subnet['id'], seen_subnet_ids)\n\n if t_router:\n seen_routers = SELF.admin_manager.routers_client.list_routers()\n seen_router_ids = [n['id'] for n in seen_routers['routers']]\n seen_router_names = [n['name'] for n in seen_routers['routers']]\n SELF.assertIn(t_router['name'],\n seen_router_names)\n SELF.assertIn(t_router['id'],\n seen_router_ids)", "title": "" }, { "docid": "2c2d357e69b8e946bf0ec57718c358e8", "score": "0.50074756", "text": "def list():\n token = get_token()\n list_accounts(token)", "title": "" }, { "docid": "cd1483e04c9e6c3b9be44f6d52421575", "score": "0.50021994", "text": "def networks(self):\n return [\n self.policy, self.qf1, self.qf2, self.target_qf1, self.target_qf2\n ]", "title": "" }, { "docid": "cf0ccf2243a3e0911a4f7c65b99e8722", "score": "0.49943322", "text": "def _get_all_networks_and_weights(self, address_class: Optional[str] = None) -> Tuple[List[IPv4Network], List[int]]:\n # If `address_class` has an unexpected value, use the whole IPv4 pool\n if address_class in _IPv4Constants._network_classes.keys():\n networks_attr = f\"_cached_all_class_{address_class}_networks\"\n all_networks = [_IPv4Constants._network_classes[address_class]] # type: ignore\n else:\n networks_attr = \"_cached_all_networks\"\n all_networks = [IPv4Network(\"0.0.0.0/0\")]\n\n # Return cached network and weight data if available\n weights_attr = f\"{networks_attr}_weights\"\n if hasattr(self, networks_attr) and hasattr(self, weights_attr):\n return getattr(self, networks_attr), getattr(self, weights_attr)\n\n # Otherwise, compute for list of networks (excluding special networks)\n all_networks = self._exclude_ipv4_networks(\n all_networks,\n _IPv4Constants._excluded_networks,\n )\n\n # Then compute for list of corresponding relative weights\n weights = [network.num_addresses for network in all_networks]\n\n # Then cache and return results\n setattr(self, networks_attr, all_networks)\n setattr(self, weights_attr, weights)\n return all_networks, weights", "title": "" }, { "docid": "df48adee45772fe535ee30ace667a4e5", "score": "0.49804032", "text": "def getAllMicrobots(self):", "title": "" }, { "docid": "5b1c54e44e0babbbc0a7eb5bbd32ac2a", "score": "0.49803558", "text": "def get_available_wifi_networks():\n try:\n cmd = [\"nmcli\",\n \"-terse\",\n \"-colors\", \"no\",\n \"-fields\", \"ssid,signal\",\n \"-escape\", \"no\",\n \"device\", \"wifi\"]\n\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if (p.returncode != 0) or (err != \"\"):\n raise Exception(err)\n\n network_dict = dict()\n for line in out.splitlines():\n try:\n network, signal = line.split(\":\", 1)\n if (network not in network_dict) or (int(signal) > network_dict[network]):\n network_dict[network] = int(signal)\n except Exception as ex:\n pass\n\n return sorted(network_dict.items(), key=operator.itemgetter(1), reverse=True)\n\n except Exception as ex:\n print(f\"Failed to retrieve available WiFi networds: {ex}\")\n return list()", "title": "" }, { "docid": "227e53866a74d93c333eb984e2b9294f", "score": "0.49745402", "text": "def get_nets(self):\n ret = []\n for id in self.idcode2references:\n ret.append(self.get_xmr(id))\n return ret", "title": "" }, { "docid": "61e7a10df470181d6acd6e174c7117b0", "score": "0.49649617", "text": "def get_all_wifi():\n data = db_handler.get_all_wifi()\n json_data = DataHandler().to_json(data)\n print(json_data)\n return json_data", "title": "" }, { "docid": "e28365b9c0136c1314e6feb7509f3158", "score": "0.49636823", "text": "def discover_nets(self):\n\n # TODO: For now look for nets only assuming that all of them are\n # unrouted.\n sources = {}\n sinks = {}\n\n for node in self.graph.nodes.values():\n if node.type not in [NodeType.SOURCE, NodeType.SINK]:\n continue\n\n # No net\n if node.net is None:\n continue\n\n # Got a source\n if node.type == NodeType.SOURCE:\n if node.net not in sources:\n sources[node.net] = set()\n sources[node.net].add(node.id)\n\n # Got a sink\n elif node.type == NodeType.SINK:\n if node.net not in sinks:\n sinks[node.net] = set()\n sinks[node.net].add(node.id)\n\n # Make nets\n nets = set(sinks.keys()) | set(sources.keys())\n for net_name in nets:\n net = Net(net_name)\n\n # A net may or may not have a source node(s). If there are no\n # sources then one will be created during routing when a route\n # reaches a node of the top-level CLB.\n if net_name in sources:\n net.sources = sources[net_name]\n\n # A net may or may not have at leas one sink node. If there are\n # no sinks then no routing will be done.\n if net_name in sinks:\n net.sinks = sinks[net_name]\n\n self.nets[net_name] = net\n\n # DEBUG\n logging.debug(\" Nets:\")\n keys = sorted(list(self.nets.keys()))\n for key in keys:\n logging.debug(\" \" + str(self.nets[key]))", "title": "" } ]
b1e368a6bd73daaa5b5b06167e0ebb87
Return the character c asis, unless it is a metacharacter, in which case return it preceded by a backslash
[ { "docid": "1bfd2d0c84b7972ab5a4ab6000d7accc", "score": "0.77703065", "text": "def escape(c):\n return RE.backslash + c if c in RE.metacharacters else c", "title": "" } ]
[ { "docid": "75fbacca7dae2d88620bdcaad170b4ea", "score": "0.6689678", "text": "def parse_escaped_string_char(c):\n if not c:\n fatal_error(\"found EOF while reading a string\")\n if c == \"\\\\\":\n result = \"\\\\\"\n elif c == '\"':\n result = '\"'\n elif c == \"b\":\n result = \"\\b\"\n elif c == \"f\":\n result = \"\\f\"\n elif c == \"n\":\n result = \"\\n\"\n elif c == \"r\":\n result = \"\\r\"\n elif c == \"t\":\n result = \"\\t\"\n elif c == \"v\":\n result = \"\\v\"\n else:\n fatal_error(\"unsupported escape sequence in string\")\n return result", "title": "" }, { "docid": "4856745bbc497ab0089f8a7bbb93356b", "score": "0.651779", "text": "def _lex_escape_sequence(self):\n\n c = self._getc()\n if c == '':\n raise ScanningError('trailing backslash')\n elif c in self._escape_sequence:\n return Token(Token.SYMBOL, self._escape_sequence[c])\n else:\n return Token(Token.SYMBOL, c)", "title": "" }, { "docid": "3276a69c7028d0b77782ec148184dfb2", "score": "0.621321", "text": "def _define_char(cls, prev, current):\n if current in cls.special_chars:\n if prev != '\\\\':\n return True\n return False", "title": "" }, { "docid": "043269fb2355948479e1d88369f2ed8d", "score": "0.619059", "text": "def convert_escape(c, line_number):\n if c == 'n':\n return '\\n'\n elif c == '0':\n return '\\0'\n elif c == 't':\n return '\\t'\n elif c == '\"':\n return '\"'\n elif c == '\\'':\n return '\\''\n elif c == '\\\\':\n return '\\\\'\n elif c == 'v':\n return '\\v'\n \n check(False, 'Invalid escape character.', line_number)", "title": "" }, { "docid": "557660451b58ce52f51f4e94ce6b084e", "score": "0.61510545", "text": "def is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "title": "" }, { "docid": "d04903bd7371516665d0a33ec902fe9c", "score": "0.6098835", "text": "def next_char(input_iter):\r\n for ch in input_iter:\r\n if ch != '\\\\':\r\n yield ch, False\r\n continue\r\n ch = input_iter.next()\r\n representative = ESCAPE_MAPPINGS.get(ch, ch)\r\n if representative is None:\r\n continue\r\n yield representative, True", "title": "" }, { "docid": "9791903b41147a21582d977dc0c21d6a", "score": "0.60469836", "text": "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "title": "" }, { "docid": "90a6cd0af133d5a7da0aaf1c1eb1a5e8", "score": "0.5989551", "text": "def getSpecialChar(char_name):\n if char_name in beautyConsole.characters:\n return beautyConsole.characters[char_name]\n return \"\"", "title": "" }, { "docid": "12ddb0643a51276198a9030e5d158edb", "score": "0.5984104", "text": "def get_special_characters(my_string):", "title": "" }, { "docid": "a324f2ecb7b5b8d9cf5d1211201b3f21", "score": "0.59559125", "text": "def _escape_char(c, escape_char):\n buf = []\n for byte in c.encode('utf8'):\n buf.append(escape_char)\n buf.append('%X' % _ord(byte))\n return ''.join(buf)", "title": "" }, { "docid": "edf275b79917695947defc05e613c2b4", "score": "0.5899559", "text": "def charseq():\n\n def string_part():\n \"\"\"Parse normal string.\"\"\"\n return regex(r'[^\"\\\\]+')\n\n def string_esc():\n \"\"\"Parse escaped string.\"\"\"\n return string('\\\\') >> (\n string('\\\\')\n | string('/')\n | string('\"')\n | string('b').result('\\b')\n | string('f').result('\\f')\n | string('n').result('\\n')\n | string('r').result('\\r')\n | string('t').result('\\t')\n | regex(r'u[0-9a-fA-F]{4}').parsecmap(lambda s: chr(int(s[1:], 16)))\n )\n\n return string_part() | string_esc()", "title": "" }, { "docid": "066e882c5bf8399ed1503a07abb9c559", "score": "0.5892684", "text": "def unichr(char):\n try:\n return wstring.chr(char).utf8()\n except:\n return char", "title": "" }, { "docid": "886023d975ba81cd3bf3eedbb0a558a2", "score": "0.58913225", "text": "def _is_control(self, char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "title": "" }, { "docid": "315bacba8d59d5c93eee490514f53e13", "score": "0.5866616", "text": "def escape_char(text):\n # NOTE: ORDER MATTERS!\n return (\n text.replace(r\"\\N\", \"\\n\")\n .replace(\"\\\\\", \"\\\\\\\\\")\n .replace(\";\", r\"\\;\")\n .replace(\",\", r\"\\,\")\n .replace(\"\\r\\n\", r\"\\n\")\n .replace(\"\\n\", r\"\\n\")\n )", "title": "" }, { "docid": "3d0bb225fd632148058136267402b439", "score": "0.5802666", "text": "def match_string(self):\n # catch index error\n start_index = self.index\n if self.atch not in '\"\\'':\n raise ValueError(self.output_err(\"String must start with a quote\"))\n quote = self.nextch()\n try:\n char = \"\"\n while True:\n ch = self.nextch()\n if ch == quote: # reached the end of the string\n return char\n elif ch == '\\\\':\n # find backslash\n ch = self.nextch()\n pos = self.index\n if ch == 'u':\n # find \\uxxxx\n nums = self.nextseq(0, 4)\n # check the four char is num\n for num in nums:\n if num not in HEX_NUMBER:\n raise ValueError(self.output_err(\"Invalid \\\\uXXXX\", pos, pos + 4))\n return chr(int(nums, 16))\n else:\n # control char\n try:\n char += BACKSLASH[ch]\n except KeyError:\n raise ValueError(self.output_err('Invalid \\\\escape: ' + repr(ch), pos))\n elif ch <= '\\x1f':\n raise ValueError(self.output_err('Invalid control character'))\n else: # find normal characters\n char += ch\n except IndexError:\n pass\n raise ValueError(self.output_err(\"Unterminated string\", start_index))", "title": "" }, { "docid": "f6d1b15da65eaa17db6c1708440832dd", "score": "0.5766933", "text": "def escape(self, byte):\r\n assert len(byte) == 1\r\n if byte == self.start_token or byte == self.escape_token:\r\n byte = self.escape_token + bytes([byte[0] ^ self.escape_mask[0]])\r\n return byte", "title": "" }, { "docid": "5d54a13341edc00735f288fa1c03fdc8", "score": "0.5762303", "text": "def _next(self):\n\t\tc = self._get()\n\t\tif c == '/' and self.theA != '\\\\':\n\t\t\tp = self._peek()\n\t\t\tif p == '/':\n\t\t\t\tc = self._get()\n\t\t\t\twhile c > '\\n':\n\t\t\t\t\tc = self._get()\n\t\t\t\treturn c\n\t\t\tif p == '*':\n\t\t\t\tc = self._get()\n\t\t\t\twhile 1:\n\t\t\t\t\tc = self._get()\n\t\t\t\t\tif c == '*':\n\t\t\t\t\t\tif self._peek() == '/':\n\t\t\t\t\t\t\tself._get()\n\t\t\t\t\t\t\treturn ' '\n\t\t\t\t\tif c == '\\000':\n\t\t\t\t\t\traise UnterminatedComment()\n\n\t\treturn c", "title": "" }, { "docid": "0235c07593c9754470f4775aedaa29fb", "score": "0.57327294", "text": "def next_char(s):", "title": "" }, { "docid": "d3660c720dafe8d56f50167d2fd9829c", "score": "0.56230426", "text": "def charClass(self, word_characters=u\":@-./_~\"):\n char = self.c\n if char.isspace():\n return ' '\n if char.isalnum() or char in word_characters:\n return 'a'\n # everything else is weird\n return 1", "title": "" }, { "docid": "90da801dce8124fc360a13dd36f86f07", "score": "0.5602957", "text": "def rmdiacritics(char: str) -> str:\n try:\n desc = unicodedata.name(char)\n cutoff = desc.find(\" WITH \")\n if cutoff != -1:\n desc = desc[:cutoff]\n char = unicodedata.lookup(desc)\n except (KeyError, ValueError):\n pass # removing \"WITH ...\" produced an invalid name\n return char", "title": "" }, { "docid": "40c62bc4d41f6ef353cd781c967e1537", "score": "0.55976665", "text": "def reflect(self, char: str) -> str:\r\n return char", "title": "" }, { "docid": "a86fa26ceef035851abe9ff90064c8d3", "score": "0.55800724", "text": "def is_initial(c):\n return c.isalpha() or c in '-+*/><=?!&'", "title": "" }, { "docid": "b8eac14c6e1dea464439d3d94b5eeafe", "score": "0.5567112", "text": "def parse_character(self):\r\n if self.get_next_if(u'['):\r\n return self.parse_character_class()\r\n \r\n elif self.get_next_if(u'('):\r\n child = self.parse_alternation()\r\n self.expect(u')')\r\n return child\r\n \r\n elif self.get_next_if(u'{'):\r\n return self.parse_variable()\r\n \r\n else:\r\n return self.parse_literal()", "title": "" }, { "docid": "63211af059402054589e4a360a1cdaea", "score": "0.5527433", "text": "def entregar(c):\r\n abc = ' abcdefghijklmnñopqrstuvwxyzABCDEFGHIJKLMNÑOPQRSTUVWXYZ,.+-\\*?¡¿!<>=)(/&%$#\"_;:1234567890[]{^}áéíóúÁÉÍÓÚäëïöüÄËÏÖÜ@|°'\r\n return abc[c]", "title": "" }, { "docid": "324d693952f76c3bfee81f660e4e1f45", "score": "0.547146", "text": "def convert_special(c):\n if c > '\\uffff':\n c = ':{}:'.format(ud.name(c).lower().replace(' ', '_')) \n return c", "title": "" }, { "docid": "e12d493f04349767c16204e7465b7a15", "score": "0.54652536", "text": "def r13(s):\n return 'c' in s", "title": "" }, { "docid": "1d5428e2972f3628aef647d0bd01a831", "score": "0.543645", "text": "def c(text):\n\n return 'C ' + text.replace('_', ' ')", "title": "" }, { "docid": "40cd1c64b4b9365057e6b28d2d9b1c47", "score": "0.5406502", "text": "def test_get_rarest_char():\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n abs_path = os.path.join(path, \"data.txt\")\r\n with codecs.open(\r\n abs_path,\r\n \"r\",\r\n encoding=\"unicode-escape\",\r\n ) as fi:\r\n assert get_rarest_char(fi)[0] == 1", "title": "" }, { "docid": "facddffdedbf4fc5927298e7f8551920", "score": "0.5378896", "text": "def mod_ESCAPED_STRING(self, s: Token) -> str:\n decoded = bytes(s[1:-1], \"utf-8\").decode(\"unicode_escape\")\n return decoded", "title": "" }, { "docid": "72229b150f67b95ed2f52d81367e2fb4", "score": "0.5377576", "text": "def nextRealToken(self,tok):\n c = self.nextChar()\n if c == tokEOF or c == tokLN:\n return tok.set(c)\n\n if c == '/':\n c = self.peekChar()\n if c == '/': # C++ comment line\n self.skipChar()\n while 1:\n c = self.nextChar()\n if c == tokEOF or c == tokLN:\n break\n return tok.set(tokLN)\n if c == '*': # C comment start\n self.skipChar()\n value = \"/*\"\n prev_c = None\n while 1:\n c = self.nextChar()\n if c == tokEOF:\n #print \"## EOF after '%s'\" % value\n return tok.set(tokEOF,value)\n if c == '/' and prev_c == '*':\n break\n prev_c = c\n value += c\n\n value += \"/\"\n #print \"## COMMENT: '%s'\" % value\n return tok.set(tokSPACE,value)\n c = '/'\n\n if c.isspace():\n while 1:\n c2 = self.peekChar()\n if c2 == tokLN or not c2.isspace():\n break\n c += c2\n self.skipChar()\n return tok.set(tokSPACE,c)\n\n if c == '\\\\':\n if debugTokens:\n print \"nextRealToken: \\\\ found, next token is '%s'\" % repr(self.peekChar())\n if self.peekChar() == tokLN: # trailing \\\n # eat the tokLN\n self.skipChar()\n # we replace a trailing \\ by a tokSPACE whose value is\n # simply \"\\\\\". this allows us to detect them later when\n # needed.\n return tok.set(tokSPACE,\"\\\\\")\n else:\n # treat as a single token here ?\n c +=self.getEscape()\n return tok.set(c)\n\n if c == \"'\": # chars\n c2 = self.nextChar()\n c += c2\n if c2 == '\\\\':\n c += self.getEscape()\n\n while 1:\n c2 = self.nextChar()\n if c2 == tokEOF:\n break\n c += c2\n if c2 == \"'\":\n break\n\n return tok.set(tokSTRING, c)\n\n if c == '\"': # strings\n quote = 0\n while 1:\n c2 = self.nextChar()\n if c2 == tokEOF:\n return tok.set(tokSTRING,c)\n\n c += c2\n if not quote:\n if c2 == '\"':\n return tok.set(tokSTRING,c)\n if c2 == \"\\\\\":\n quote = 1\n else:\n quote = 0\n\n if c >= \"0\" and c <= \"9\": # integers ?\n while 1:\n c2 = self.peekChar()\n if c2 == tokLN or (not c2.isalnum() and c2 != \"_\"):\n break\n c += c2\n self.skipChar()\n return tok.set(tokNUMBER,c)\n\n if c.isalnum() or c == \"_\": # identifiers ?\n while 1:\n c2 = self.peekChar()\n if c2 == tokLN or (not c2.isalnum() and c2 != \"_\"):\n break\n c += c2\n self.skipChar()\n if c == tokDEFINED:\n return tok.set(tokDEFINED)\n else:\n return tok.set(tokIDENT,c)\n\n # check special symbols\n for sk in cppLongSymbols:\n if c == sk[0]:\n sklen = len(sk[1:])\n if self.pos + sklen <= self.len and \\\n self.text[self.pos:self.pos+sklen] == sk[1:]:\n self.pos += sklen\n return tok.set(sk)\n\n return tok.set(c)", "title": "" }, { "docid": "0edc5cb64bdd38dab55c690e80300de2", "score": "0.5285", "text": "def get_char(value):\n\n return uchr(value)", "title": "" }, { "docid": "594e2ad2d3a83037642a182f73153dd0", "score": "0.52627754", "text": "def ReplaceSpecChar(string):\n temp_string = string\n for i in \"/\":#\"!@#$%^&*()/[]\\{};:,.<>?|`~-=_+\":\n if i in temp_string:\n temp_string = temp_string.replace(i , '\\\\'+i)\n return temp_string", "title": "" }, { "docid": "0ae4aafdcd9971972184974954435007", "score": "0.52594155", "text": "def _get_singular_symbol(_char, _index):\n index_offset = 1 if _char > 5 else 0\n return _get_symbol(_index - index_offset)", "title": "" }, { "docid": "1b88e14c49d3a0d9a7a1777ca4c65bc4", "score": "0.5258909", "text": "def next_char(c):\n return chr(ord(c) + 1)", "title": "" }, { "docid": "7f66cac6ffd9c68a8e152e78a484c229", "score": "0.5254863", "text": "def _escape(cls, string):\n return cls.ESCAPE_RE.sub(r'\\\\\\g<char>', string)", "title": "" }, { "docid": "2b4e1687b90c2f01e26e6f427ec193a0", "score": "0.5240188", "text": "def __remove_character__(self,string: str,character: str) -> str:\n return string.replace(character,'')", "title": "" }, { "docid": "e654e44ad5d318c8c63b3fbf7ec91afc", "score": "0.523729", "text": "def clean_char(c: chr) -> chr:\n if ord(c) in range(ord('A'), ord('Z') + 1):\n return c\n elif ord(c) in range(ord('a'), ord('z') + 1):\n c = c.upper()\n else:\n c = ''\n return c", "title": "" }, { "docid": "092bedb86855906287020ef68fd7783e", "score": "0.52311534", "text": "def escape(s):\n return \"\".join(_latex_special_chars.get(c, c) for c in s)", "title": "" }, { "docid": "93ccee1a2692201fec97334e350b856e", "score": "0.522709", "text": "def backslashquote_escape(s):\n\t# type: (str, ) -> str\n\n\treturn _backslashquote_escape(s)", "title": "" }, { "docid": "232c05d345f4f3286b1b55d854cd7412", "score": "0.52213407", "text": "def safe_unichr(codepoint):\n if is_py2_narrow_build():\n return (\"\\\\U%08x\" % codepoint).decode(\"unicode-escape\")\n elif PY2:\n return unichr(codepoint)\n return chr(codepoint)", "title": "" }, { "docid": "35d0b2879820ee20258576ffdfe95968", "score": "0.5216481", "text": "def fm_escape(s):\n return smart_text(binascii.b2a_qp(smart_bytes(s)).replace(b\"=\\n\", b\"\"))", "title": "" }, { "docid": "1bc0b215a303dbed44e4a16db432b51f", "score": "0.5212001", "text": "def clean_text(c) -> str:\n matching_string, replace_string = make_trans()\n\n return translate(\n regexp_replace(c, \"\\p{M}\", \"\"),\n\n matching_string, replace_string\n ).alias(c)", "title": "" }, { "docid": "98deaa84bc7b1b8e173616c3252c9984", "score": "0.5208865", "text": "def raw(text):\n new_string=''\n for char in text:\n try: new_string+=escape_dict[char]\n except KeyError: new_string+=char\n return new_string", "title": "" }, { "docid": "98deaa84bc7b1b8e173616c3252c9984", "score": "0.5208865", "text": "def raw(text):\n new_string=''\n for char in text:\n try: new_string+=escape_dict[char]\n except KeyError: new_string+=char\n return new_string", "title": "" }, { "docid": "f6ec18b144bca882295d0e5bc5ec21d8", "score": "0.51987034", "text": "def _lex_char_class(self):\n\n characters = set()\n inverted = False\n\n c = self._getc()\n if c == '^':\n inverted = True\n c = self._getc()\n\n if c == ']':\n # If there is a ']' at the beginning of the character class, it is\n # literal.\n characters.add(']')\n c = self._getc()\n\n range_start = ''\n prev_c = ''\n\n while c and c != ']':\n if c == '-':\n if prev_c:\n range_start = prev_c\n else:\n characters.add('-')\n else:\n if range_start:\n assert prev_c == '-'\n start_i = ord(range_start)\n end_i = ord(c)\n if end_i < start_i:\n raise ScanningError('invalid range end')\n range_start = ''\n characters.update(chr(c) for c in range(start_i, end_i + 1))\n else:\n characters.add(c)\n prev_c, c = c, self._getc()\n\n if not c:\n raise ScanningError('unmatched [ or [^')\n\n if prev_c == '-':\n # Trailing hyphen, literal.\n if range_start:\n characters.add(range_start)\n characters.add('-')\n\n if inverted:\n return Token(Token.CHARCLASS, set(SIGMA) - characters)\n else:\n return Token(Token.CHARCLASS, characters)", "title": "" }, { "docid": "8353cdeb0f1c79928dfb5eb10e43030d", "score": "0.5195379", "text": "def wcfilter(ch):\n width = wcwidth.wcwidth(ch)\n if width == -1:\n return 'x'\n elif width == 0:\n return 'X'\n else:\n return ch", "title": "" }, { "docid": "507936ded0838ea64671a2dc91bbf53d", "score": "0.51936096", "text": "def match_to_character(self, chars='\\n'):\n content = ''\n while not self.eos:\n ch = self.nextch()\n if ch in chars:\n break\n else:\n content += ch\n return content", "title": "" }, { "docid": "0d70ec4e776d65b03c33b7a90ac154ba", "score": "0.5177045", "text": "def remove_escapes(word):\n\treturn re.sub(r'\\\\', '', word)", "title": "" }, { "docid": "4022fa631a0fb4f885034386c460dfd4", "score": "0.51553077", "text": "def is_delimiter(c):\n return c in (' ', '(', ')', '\\\"', ';', '\\n') or not c", "title": "" }, { "docid": "0938bd8eaf8f60948c629c3ff686c7aa", "score": "0.5153942", "text": "def pullEscapeSequence(self):\n start = self.index\n\n escaped = self.takeOne()\n if escaped == '\"':\n return \"\\\"\"\n if escaped == '\\\\':\n return \"\\\\\"\n if escaped == '/':\n return \"/\"\n if escaped == 'b':\n return \"\\b\"\n if escaped == 'f':\n return \"\\f\"\n if escaped == 'n':\n return \"\\n\"\n if escaped == 'r':\n return \"\\r\"\n if escaped == 't':\n return \"\\t\"\n if escaped == 'u':\n hexStr = \"0x\" + self.take(4)\n j = hex(hexStr)\n return chr(j)\n raise TokenizerException(str.format(\n \"Unknown escape code `{0}` {1}\", escaped, self.getHelpTextAtIndex(start)))", "title": "" }, { "docid": "495f93ca0031d72274d3a1fa2c29653a", "score": "0.51435095", "text": "def CEscape(text, as_utf8):\n\n\n Ord = ord if isinstance(text, basestring) else lambda x: x\n if as_utf8:\n return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)\n return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)", "title": "" }, { "docid": "80af1d21f48d1bba74f46c69ef2c95f7", "score": "0.5139796", "text": "def replace_spaces_with_character(self, character):\n # contents = None\n # if contents:\n # self.contents = open(self.file_name, 'r')\n # text = self.contents.read()\n output = \"\"\n for string_character in self.contents:\n if string_character == \" \":\n output+=character\n else:\n output+=string_character\n #print(output)\n return output", "title": "" }, { "docid": "d4b6b62c2aad2b3701e4d09236fde62c", "score": "0.512966", "text": "def getChar(brightness):\r\n\r\n # Map of chars based off brightness\r\n chars = \"`^\\\",:;Il!i~+_-?][}{1)(|\\\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$\"\r\n if gui.inverted:\r\n thisChar = chars[64 - int(brightness//3.95)]\r\n else:\r\n thisChar = chars[int(brightness//3.95)]\r\n return thisChar", "title": "" }, { "docid": "a850b57059d178d82e60b6b429e64366", "score": "0.51287544", "text": "def is_printable(c):\n return ord(c)>=32 or c in ['\\r','\\n', '\\t']", "title": "" }, { "docid": "c4d687a0293ccde5813e4e5ee6a38902", "score": "0.5122831", "text": "def _py_encode_basestring(self, s):\n if not s:\n return s, False\n isAnyCharReplaced = False\n def replace(match):\n nonlocal isAnyCharReplaced\n isAnyCharReplaced = True\n return self.ESCAPE_DICT[match.group(0)]\n return str(self.ESCAPE.sub(replace, s)), isAnyCharReplaced", "title": "" }, { "docid": "b634c32783c6f8be87f338e8092028a4", "score": "0.51214355", "text": "def _get(self):\n\t\tc = self.theLookahead\n\t\tself.theLookahead = None\n\t\tif c == None:\n\t\t\tc = self.instream.read(1)\n\t\tif c >= ' ' or c == '\\n':\n\t\t\treturn c\n\t\tif c == '': # EOF\n\t\t\treturn '\\000'\n\t\tif c == '\\r':\n\t\t\treturn '\\n'\n\t\treturn ' '", "title": "" }, { "docid": "872bbe3d8e815162a437acf7538cf0d7", "score": "0.50858754", "text": "def backslashquote_unescape(s):\n\t# type: (str, ) -> str\n\n\treturn _backslashquote_unescape(s)", "title": "" }, { "docid": "4031d597d95cdfc42ce8fd8404715963", "score": "0.50827545", "text": "def formatChr(chr):\r\n if not chr.startswith(\"chr\"):\r\n return \"chr\" + chr\r\n\r\n return chr", "title": "" }, { "docid": "2af5e80c8db28eb99b2ce8e5ccd04df6", "score": "0.5067812", "text": "def backslash_unescape(s):\n\t# type: (str, ) -> str\n\n\t# for some reason unicode_escape is based on latin-1\n\t# ascii also works, but the intermittent string would be longer\n\t# because more things will be backslash-escaped\n\treturn s.encode(\"latin-1\", \"backslashreplace\").decode(\"unicode_escape\")", "title": "" }, { "docid": "9a968e7dc713450c4297b2ce51b27861", "score": "0.5062518", "text": "def lower(c):\r\n\r\n if type(c) == str: # handle strings\r\n return ord(c)\r\n return c", "title": "" }, { "docid": "620612db2e5fff4a8ad933cb735f4495", "score": "0.50566673", "text": "def blankout(src, char):\r\n return dot_re.sub(char, src)", "title": "" }, { "docid": "620612db2e5fff4a8ad933cb735f4495", "score": "0.50566673", "text": "def blankout(src, char):\r\n return dot_re.sub(char, src)", "title": "" }, { "docid": "150bab73b43392dd3d122442a8c93e46", "score": "0.5050748", "text": "def raw(text):\n new_string = ''\n for char in text:\n try:\n new_string += escape_dict[char]\n except KeyError:\n new_string += char\n return new_string", "title": "" }, { "docid": "ed2ac3491e7cc4ae3f2c29ea0e234e3a", "score": "0.50262463", "text": "def _expect_symbol(self, char):\r\n value = '' + char\r\n\r\n while True:\r\n char = self._next_char(peek=True)\r\n\r\n if char is None:\r\n break\r\n elif value + str(char) == '//':\r\n return None, 'comment'\r\n elif value + str(char) not in self.symbols:\r\n break\r\n\r\n value += char\r\n self._char_pos += 1\r\n\r\n return value, 'symbol'", "title": "" }, { "docid": "a969362d4947694fed045eb5d359fd9f", "score": "0.5021166", "text": "def is_valid_filesystem_char(char: str):\n return unicodedata.category(char)[0] in {'L', 'N', 'Z'}", "title": "" }, { "docid": "67eafc7c83927b97ea1bb1363eff9195", "score": "0.5006495", "text": "def q(self, s):\n for c in \"\\*()\\x00\":\n s = s.replace(c, r\"\\%02x\" % ord(c))\n return s", "title": "" }, { "docid": "e8d9a65b5120866fe8c323eb0eb9efd0", "score": "0.50059503", "text": "def test_string_escape(self):\n code = \"let hey_there = '\\\\'Hello world\\\\''\"\n lexed = self.lexer.parse_literal(code)\n assert lexed == \"['{TokenType.KEYWORD let}', '{TokenType.IDENTIFIER hey_there}', '{TokenType.OPERATOR =}', \\\"{TokenType.STRING 'Hello world'}\\\"]\", \"Issue in escape character lexical analysis.\"", "title": "" }, { "docid": "eda02767ef13ee2afded8fe3376d54cc", "score": "0.49989545", "text": "def getCharValue(self, c):\n returnVal = None\n if c >= 0 and c <= 9:\n returnVal = chr(c + BaseConverter._ZERO)\n else:\n returnVal = chr(c + BaseConverter._ADJUSTED_A)\n return returnVal", "title": "" }, { "docid": "785da0d710deafdfed5ee00f026c9601", "score": "0.49912864", "text": "def backslash(self):\n self.add_token('backslash', '\\\\')\n self.add_token('line-end', '\\n')\n self.line_indent()\n self.backslash_seen = False", "title": "" }, { "docid": "ff877f08572f410b2022c2ec19bb8624", "score": "0.4988181", "text": "def get_character(self, position):\r\n line, index = self.get_position(position)\r\n return unicode(self.text(line)[index])", "title": "" }, { "docid": "2132e5e72504376691693d071397aa23", "score": "0.49862173", "text": "def escape(s):\n return _escape(s).replace(\"\\ \", \" \")", "title": "" }, { "docid": "7c9a1e17adc2b27dda1b2d9e367010aa", "score": "0.49844113", "text": "def escape(s):\n return s.replace('\\n', '\\\\n').replace('\\t', '\\\\t').replace('\\r', '\\\\r')", "title": "" }, { "docid": "4e06c1731aa823a1d1ed60f6863ef47f", "score": "0.49698153", "text": "def _replace_escaped(cls, text):\n for special_char in cls.special_chars:\n text = text.replace('\\\\' + special_char, special_char)\n return text", "title": "" }, { "docid": "b1a2061474282b90068f91b4146dbffa", "score": "0.4964582", "text": "def nextChar(self):\n result = self.peekChar()\n self.skipChar()\n return result", "title": "" }, { "docid": "1dc7a28090f9f8bc4f94353f63f79851", "score": "0.4959831", "text": "def escape_special_characters(s: str) -> str:\n matches = _ESC_REGEX.findall(s)\n if not matches:\n return s\n # Replace all special characters found in `s`. Performance should not be critical\n # so we do one pass per special character.\n all_special = set(\"\".join(matches))\n # '\\' is even more special: it needs to be replaced first, otherwise we will\n # mess up the other escaped characters.\n try:\n all_special.remove(\"\\\\\")\n except KeyError:\n pass # no '\\' in the string\n else:\n s = s.replace(\"\\\\\", \"\\\\\\\\\")\n for special_char in all_special:\n s = s.replace(special_char, f\"\\\\{special_char}\")\n return s", "title": "" }, { "docid": "6d7f58608a329bdb09d28da1a9768d99", "score": "0.49550745", "text": "def _check_special_char_position(cls, last_special, special):\n if special == cls.open_flag_char:\n # In this case the 'last_special' must always be\n # the 'end_format_char' or None if it's the first appearing\n if last_special not in [cls.end_format_char, None]:\n return cls.ESCAPE_CHAR\n else:\n return cls.START_FLAGS\n elif special == cls.close_flag_char:\n # In this case the 'last_special' must always be the 'open_flag_char'\n if last_special != cls.open_flag_char:\n return cls.ESCAPE_CHAR\n else:\n return cls.START_FORMAT\n elif special == cls.end_format_char:\n # In this case the 'last_special' must always be the 'close_flag_char'\n # Or None if the text does not include any other formatting character\n if last_special in [cls.open_flag_char, cls.end_format_char, None]:\n return cls.ESCAPE_CHAR\n else:\n return cls.END_FORMAT", "title": "" }, { "docid": "00cd9be4840c6691da1aa27c19b0b767", "score": "0.49536234", "text": "def is_special_identifier_char(c):\n return c in (\n ESCAPEMENT_SYM, OLD_COMMENT_SYM, FILE_INCLUSION_SYM, UNIT_START_SYM,\n UNIT_END_SYM, ALIAS_SYM, SLOT_SYM, INTENT_SYM,\n CHOICE_START, CHOICE_END, CHOICE_SEP, OLD_CHOICE_START, OLD_CHOICE_END,\n OLD_CHOICE_SEP, CASE_GEN_SYM, RAND_GEN_SYM, ARG_SYM, VARIATION_SYM\n )", "title": "" }, { "docid": "73b573ca847dc25040d42518498d8ce6", "score": "0.49476168", "text": "def encode_char(self, char):\n o = ord(char)\n if o == 32:\n return _SEGMENTS[36] # space\n if o == 42:\n return _SEGMENTS[38] # star/degrees\n if o == 45:\n return _SEGMENTS[37] # dash\n if o >= 65 and o <= 90:\n return _SEGMENTS[o-55] # uppercase A-Z\n if o >= 97 and o <= 122:\n return _SEGMENTS[o-87] # lowercase a-z\n if o >= 48 and o <= 57:\n return _SEGMENTS[o-48] # 0-9\n raise ValueError(\"Character out of range: {:d} '{:s}'\".format(o, chr(o)))", "title": "" }, { "docid": "67ea5869e996029953a0d23bb933c0b8", "score": "0.49270633", "text": "def getCharacter(self):\n return _libsedml.ASTNode_getCharacter(self)", "title": "" }, { "docid": "b802e31e96515fe48acdc82fcba49f99", "score": "0.49265972", "text": "def invalid_first_char(string: str, append_char: str=\"x\") -> str:\n return resub(\"(?P<match>^[0-9])\", f\"{append_char}\\g<match>\", string)", "title": "" }, { "docid": "4eb2499fb88865be087c9c3334d1404e", "score": "0.4921188", "text": "def delete_special_character(text: str) -> str:\r\n return re.sub(r\"[\\╔\\ˊ\\〉\\〈\\–\\η\\●\\®\\·\\•\\-\\~#/*&$|★▶><\\\\^@+[=\\]()(){%_}?\\…]+\", \"\", text)", "title": "" }, { "docid": "45524c0471fa0ef31b2a897ef9349eb1", "score": "0.49168992", "text": "def raw_text(text):\n new_string=''\n for char in text:\n try: new_string+=escape_dict[char]\n except KeyError: new_string+=char\n return new_string", "title": "" }, { "docid": "97312395f9db0e1f396c5532e5be222b", "score": "0.49159524", "text": "def quote(s):\r\n if not isinstance(s, basestring):\r\n return s\r\n res = list(s)\r\n for i in range(len(res)):\r\n c = res[i]\r\n if c in \"\"\":/_#?;@&=+$,\"<>%\\\\\"\"\":\r\n res[i] = '_%02X' % ord(c)\r\n return ''.join(res)", "title": "" }, { "docid": "9c5844d990652be063e006b200907382", "score": "0.4913926", "text": "def encode(char):\n if char.isalnum():\n return char\n elif char in substitution_list:\n return substitution_list[char]\n else:\n return \"_{}_\".format(ord(char))", "title": "" }, { "docid": "055794aeb6ad0a91ded3afcfb7cf8ddf", "score": "0.49131456", "text": "def isAlphanum(c):\n\treturn ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or\n\t\t\t(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\\\' or (c is not None and ord(c) > 126));", "title": "" }, { "docid": "aafb4e615aaf4478b1d142dc1dedd1eb", "score": "0.49125102", "text": "def cateify(input_text: str) -> str:\n result = []\n\n for char in input_text:\n if not __is_special_char(char):\n result.append(chr(ord(char) + monospace_base))\n else:\n result.append(__special_cast_char(char)) \n return ''.join(result)", "title": "" }, { "docid": "b6e14e8b3efbcd9ad6ae2a0b7be20604", "score": "0.49123266", "text": "def charcolor(self, state, ch):\n if state & SBIT_SINGLEQUOTE:\n return COL_SINGLEQUOTE\n if state & SBIT_DOUBLEQUOTE:\n return COL_DOUBLEQUOTE\n if state & SBIT_COMMENT:\n return COL_COMMENT\n if state & SBIT_STATEMENT:\n if ch == '[' or ch == ']':\n return COL_FUNCTIONDELIM\n if ch == '\\'':\n return COL_SINGLEQUOTE\n if ch == '\"':\n return COL_DOUBLEQUOTE\n return COL_CODE\n else:\n if ch in (',', ';', '*', '>'):\n return COL_DIRECTIVE\n if ch == '[' or ch == ']':\n return COL_FUNCTIONDELIM\n if ch == '\\'':\n return COL_SINGLEQUOTE\n if ch == '\"':\n return COL_DOUBLEQUOTE\n return COL_FOREGROUND", "title": "" }, { "docid": "a85e1599572d11b44b7124097e533033", "score": "0.48979676", "text": "def extract_charstr(text=''):\n text = text.strip()\n if text[0:1] != '\"':\n return text, None\n elif len(text) == 1:\n return text, None\n #\n esc = 0\n for cur in range(1, len(text)):\n # 1) end of text\n if cur == len(text) - 1:\n if text[cur:1+cur] != '\"':\n # no end-of-charstr found\n return text, None\n else:\n return '', re.subn('\\s{0,}\\n\\s{0,}', '', text[1:-1])[0]\n\n # 2) finding a double-quote\n if text[cur:1+cur] == '\"':\n if esc > 0:\n # 2.1) escape cursor already set\n if cur == esc:\n # current double-quote escaped, unsetting escape cursor\n esc = 0\n else:\n # current double-quote not escaped\n if text[1+cur:2+cur] == '\"':\n # escaping next char\n esc = 1+cur\n else:\n # end of charstr\n return text[1+cur:].strip(), \\\n re.subn('\\s{0,}\\n\\s{0,}', '', text[1:cur])[0]\n else:\n # 2.2) escape cursor not set\n if text[1+cur:2+cur] == '\"':\n # escaping next char\n esc = 1+cur\n else:\n # end of charstr\n return text[1+cur:].strip(), \\\n re.subn('\\s{0,}\\n\\s{0,}', '', text[1:cur])[0]", "title": "" }, { "docid": "3e068c9fe09c61f04c9db344c47c9551", "score": "0.48966676", "text": "def masking_character(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"masking_character\")", "title": "" }, { "docid": "9708df4ad5b7abe15ae8e66d5a474729", "score": "0.4889131", "text": "def _escape_token(token, alphabet):\n if not isinstance(token, six.text_type):\n raise ValueError(\"Expected string type for token, got %s\" % type(token))\n\n token = token.replace(u\"\\\\\", u\"\\\\\\\\\").replace(u\"_\", u\"\\\\u\")\n ret = [c if c in alphabet and c != u\"\\n\" else r\"\\%d;\" % ord(c) for c in token] # noqa\n return u\"\".join(ret) + \"_\"", "title": "" }, { "docid": "8586e77342af8be179efdbfb2c4d09a9", "score": "0.48853415", "text": "def _escape_chars(self, unescaped_string):\n # Character to replace symbols\n swap_char = '_'\n \n # If swap_char is in string, double it.\n unescaped_string = re.sub(swap_char, \"%s%s\" % (swap_char, swap_char),\n unescaped_string)\n \n # Substitute all invalid chars.\n return re.sub('[^a-zA-Z0-9\\.\\_\\-]', swap_char, unescaped_string)", "title": "" }, { "docid": "ff2a08f54f89f88d5c81803638ab2fb1", "score": "0.4880547", "text": "def complementBase(c):\n retChar = \"\"\n if c == \"A\":\n retChar = \"U\"\n elif c == \"U\":\n retChar = \"A\"\n elif c == \"C\":\n retChar = \"G\"\n elif c == \"G\":\n retChar = \"C\"\n return retChar", "title": "" }, { "docid": "6789373769703580881b0178eaea5cef", "score": "0.48769414", "text": "def it_detects_single_chars(self):\n assert da.lwc.search._get_regex_indicator_fcn(['a', 'b'], False)('a')\n assert da.lwc.search._get_regex_indicator_fcn(['a', 'b'], False)('b')\n assert not da.lwc.search._get_regex_indicator_fcn(\n ['a', 'b'], False)('c')", "title": "" }, { "docid": "fa7491222ddd666b6dd03f15c4e4300e", "score": "0.48740876", "text": "def escape_string(self):\n return self._escape_string", "title": "" }, { "docid": "9cf9a4649c83230f74938d226247a8d7", "score": "0.48608178", "text": "def re_uescape(pattern):\n mutable = list(pattern)\n for idx, char in enumerate(pattern):\n if char not in _alphanum:\n if char == u\"\\000\":\n mutable[idx] = u\"\\\\000\"\n else:\n mutable[idx] = u\"\\\\\" + char\n return u''.join(mutable)", "title": "" }, { "docid": "3eab1d5a2502436d99b948073e909ad4", "score": "0.48543215", "text": "def escape(s):\n for old, new in [(\"-\", \"--\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s", "title": "" }, { "docid": "1b8cd627fdad96c802ba5fe4b4008d4e", "score": "0.48512307", "text": "def test_unicode_escape(self):\n self.assertEqual(\"\\u0000\", utils.unicode_escape(\"\\\\u0000\"))\n self.assertEqual(\"\\u0331\", utils.unicode_escape(\"\\\\u0331\"))\n self.assertEqual(\"\\u26F0\", utils.unicode_escape(\"\\\\u26F0\"))", "title": "" }, { "docid": "3122533472aa29511cb75bcc6576a5c1", "score": "0.48460704", "text": "def char_to_semigraphic(char):\n if char.upper() == 'R':\n return ROOK\n elif char.upper() == 'N':\n return KNIGHT\n elif char.upper() == 'B':\n return BISHOP\n elif char.upper() == 'Q':\n return QUEEN\n elif char.upper() == 'K':\n return KING\n elif char.upper() == 'P':\n return PAWN\n\n return ' '", "title": "" }, { "docid": "76cb0b7575edeab80026d23e867bcc4e", "score": "0.48387316", "text": "def isEnglishCharacter(c):\n return True\n if c.isalpha() or c in [' ', '\\t', '\\n', '.', ',', '?', '!', ';', ':',\n '\\'', '\"', '/', '\\\\', '(', ')', '[', ']', '#', '$', '%', '&', '-',\n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:\n return True\n return False", "title": "" }, { "docid": "86bf6724b0cc486d68655a115a5e7715", "score": "0.4833739", "text": "def r10(s):\n return 'ch' in s", "title": "" } ]
59229feb1e25dc424b9d82333814ddb8
Select and operate a set of commands
[ { "docid": "ab9587793bb4325face4533f44adb4f5", "score": "0.6718301", "text": "def send_select_and_operate_command_set(self, command_set, callback=opendnp3.PrintingCommandResultCallback.Get(),\n config=opendnp3.TaskConfig.Default()):\n self.master.SelectAndOperate(command_set, callback, config)", "title": "" } ]
[ { "docid": "2df849345075724f9350c6a92d703750", "score": "0.6681098", "text": "def __execute_cmd(self, selection):\n \n if selection == 1: \n self.__cmd_display_vehicle_types()\n elif selection == 2:\n self.__cmd_display_vehicle_costs()\n elif selection == 3:\n self.___cmd_prompt_and_display_avail_vehicles()\n elif selection == 4:\n self.__cmd_display_specific_rental_cost()\n elif selection == 5:\n self.__cmd_make_reservation()\n elif selection == 6:\n self.__cmd_cancel_reservation()", "title": "" }, { "docid": "97cc8b3eead1ff98b239964a14fe5884", "score": "0.65771186", "text": "def test_send_SelectAndOperate_command_set(self, run_outstation):\n self.run_master(\"c2\")", "title": "" }, { "docid": "b63a3f9ba33bb5960d9e717256e21e00", "score": "0.6532774", "text": "def select_command(var_dir, commands):\n logger = logging.getLogger(DEFAULT_LOGGER_NAME)\n raise NotImplementedError(\"Finish the list selection first\")\n return None, 0", "title": "" }, { "docid": "e9f265fe9cc5b92cc428f207b7e70a92", "score": "0.6523318", "text": "def test_send_SelectAndOperate_single_command(self, run_outstation):\n self.run_master(\"c1\")", "title": "" }, { "docid": "3fa73304757c48a6f877b692a645125a", "score": "0.6498068", "text": "def execute( self, commands ):\n pass #TODO", "title": "" }, { "docid": "929664d77080f623584f59a7960639e0", "score": "0.6413385", "text": "def execute_commands(pkt):\n\n for cmd in command_flags.keys():\n if command_flags[cmd]:\n execute_one_command(pkt, cmd)\n print(\"--------------------------\")", "title": "" }, { "docid": "7a926b5e0b5f8977d06ae3c9014edd78", "score": "0.6393274", "text": "def send_select_and_operate_command(self, command, index, callback=opendnp3.PrintingCommandResultCallback.Get(),\n config=opendnp3.TaskConfig.Default()):\n _log.debug(\"Selecting and operating...\")\n self.master.SelectAndOperate[opendnp3.ControlRelayOutputBlock](command, index, callback, config)\n _log.debug(\"Completed selecting and operating...\")", "title": "" }, { "docid": "8f4ceff6ec2987a4de658493561bbe46", "score": "0.6344337", "text": "def execute_commands(commands_list):\n for command in commands_list:\n func = command[0]\n func(*command[1:])", "title": "" }, { "docid": "5d8e773d9463eb7537c4ef65de3a7ed0", "score": "0.6242798", "text": "def eval_cmds(commands):\n for c in commands:\n eval_list(parse(c))", "title": "" }, { "docid": "aa177507f6f8783c569d05831d7159a6", "score": "0.62273645", "text": "def helper_run_menu_cmd(apartments, commands, cmd, args):\r\n if cmd in ['info']:\r\n return []\r\n elif cmd == 'print':\r\n return [apartments]\r\n elif cmd in ['occupy', 'clear', 'sum', 'sort', 'max', 'filter']:\r\n try:\r\n return apartments, args[0]\r\n except IndexError:\r\n raise IndexError(\"The given command expects a parameter!\")\r\n elif cmd in commands.keys():\r\n return apartments, args", "title": "" }, { "docid": "df4645962c624ebd043f2cd698f059de", "score": "0.622706", "text": "def commands():\n pass", "title": "" }, { "docid": "df4645962c624ebd043f2cd698f059de", "score": "0.622706", "text": "def commands():\n pass", "title": "" }, { "docid": "df4645962c624ebd043f2cd698f059de", "score": "0.622706", "text": "def commands():\n pass", "title": "" }, { "docid": "5c8706049a61ff7e9ec5bd0fc52f939b", "score": "0.62073463", "text": "def execute_commands(self, cmds):\n for cmd in cmds:\n if cmd.startswith(\"#\") or cmd == \"\":\n # used as comment\n continue\n print(\"[*] \" + cmd) \n if self.onecmd(cmd): # Stop -> An error happened in one of the commands!\n self.__exit(1)", "title": "" }, { "docid": "a60076c5db30c2fcc008e4072dfa126d", "score": "0.61913353", "text": "def execute_commands(self) -> None:\n for command in self.parsed:\n pass # call cube's rotate/twist function here with modifier or resolve the modifier if necessary\n\n # remove executed commands\n self.parsed = []", "title": "" }, { "docid": "8318883cb26e21a1a01753076cfda7b5", "score": "0.6186563", "text": "def execute_command(parsed: List[str]) -> None:\n parsed = correct_key_names(parsed)\n # get the mouse's current position\n x, y = pag.position()\n # check for \"move to\" command\n if parsed[0] == \"move\" and parsed[1] == \"to\":\n COMMANDS[\"move to\"][0](int(parsed[2]), int(parsed[3]), duration=1)\n elif parsed[0] == \"move\":\n # must be a move up/down/left/right\n move_mouse_relative(parsed[1], int(parsed[2]))\n elif parsed[0] == \"double\" and parsed[1] == \"click\":\n # check for double click command\n COMMANDS[\"double click\"][0](x, y)\n elif parsed[1] == \"click\":\n # must be a left, middle, or right click\n COMMANDS[\"left click\"][0](x, y, button=parsed[0])\n elif parsed[0] == \"hold\" and parsed[1] in [\"left\", \"middle\", \"right\"]:\n # check for hold down mouse button\n COMMANDS[\"hold right\"][0](x, y, button=parsed[1])\n elif parsed[0] == \"release\" and parsed[1] in [\"left\", \"middle\", \"right\"]:\n # check for release mouse button\n COMMANDS[\"release right\"][0](x, y, button=parsed[1])\n elif parsed[0] == \"scroll\":\n COMMANDS[\"scroll up\"][0](parsed[1], int(parsed[2]))\n elif parsed[0] == \"type\" and parsed[1] == \"this\":\n COMMANDS[\"type this\"][0](\" \".join(parsed[2:]), interval=0.05)\n elif parsed[1] == \"key\":\n COMMANDS[parsed[0] + \" \" + parsed[1]][0](parsed[2])\n elif parsed[0] == \"use\" and parsed[1] == \"shortcut\":\n perform_hotkey(parsed[2:])\n elif parsed[0] == \"quit\" and parsed[1] == \"program\":\n print(\"You said: quit program. Now quitting...\")\n COMMANDS[\"quit program\"][0]()", "title": "" }, { "docid": "705f127a1138790b1fbe161cb24888f6", "score": "0.61663526", "text": "def commands():\n\n pass", "title": "" }, { "docid": "7e94d2b3a7f61817dcb4ec37a35d785c", "score": "0.6139123", "text": "def do_cmd(self,cmd):\n pass", "title": "" }, { "docid": "ce2e3cc34465b172b830748f5f9f66b9", "score": "0.612823", "text": "def commands(self):\n def func(lib, opts, args):\n write = ui.should_write(opts.write)\n force = opts.force\n\n if opts.album:\n for album in lib.albums(ui.decargs(args)):\n self.handle_album(album, write, force)\n\n else:\n for item in lib.items(ui.decargs(args)):\n self.handle_track(item, write, force)\n\n cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')\n cmd.parser.add_album_option()\n cmd.parser.add_option(\n \"-f\", \"--force\", dest=\"force\", action=\"store_true\", default=False,\n help=u\"analyze all files, including those that \"\n \"already have ReplayGain metadata\")\n cmd.parser.add_option(\n \"-w\", \"--write\", default=None, action=\"store_true\",\n help=u\"write new metadata to files' tags\")\n cmd.parser.add_option(\n \"-W\", \"--nowrite\", dest=\"write\", action=\"store_false\",\n help=u\"don't write metadata (opposite of -w)\")\n cmd.func = func\n return [cmd]", "title": "" }, { "docid": "c1854130aef88af47a753203f0fae3b7", "score": "0.61139053", "text": "def cli(self, commands):\n ret = self.connection.cli(commands)\n\n return ret", "title": "" }, { "docid": "afed83dc14d982564eea8341b107eabd", "score": "0.6052867", "text": "def perform_selection_action(verb: str):\n acts = select_verbs_map[verb]\n for act in acts:\n act()", "title": "" }, { "docid": "7aacb9effe3c5c5eb0ca269e83e6edb8", "score": "0.6036384", "text": "def cmd_commands(self, data):\n\n admin_cmds = []\n admin_or_moderator_cmds = []\n moderator_cmds = []\n no_priv_cmds = []\n\n for command, func in self.commands.items():\n if func.func_dict.get('admin_required'):\n admin_cmds.append(command)\n elif func.func_dict.get('admin_or_moderator_required'):\n admin_or_moderator_cmds.append(command)\n elif func.func_dict.get('moderator_required'):\n moderator_cmds.append(command)\n else:\n no_priv_cmds.append(command)\n reply = 'Available commands: '\n reply += ', '.join(sorted(no_priv_cmds))\n self.reply(reply, data)\n\n user_id = get_sender_id(data)\n if moderator_cmds and self.is_moderator(user_id):\n reply = 'Moderator commands: '\n reply += ', '.join(sorted(moderator_cmds))\n self.api.pm(reply, user_id)\n if admin_or_moderator_cmds and (self.is_moderator(user_id)\n or self.is_admin(user_id)):\n reply = 'Priviliged commands: '\n reply += ', '.join(sorted(admin_or_moderator_cmds))\n self.api.pm(reply, user_id)\n if admin_cmds and self.is_admin(user_id):\n reply = 'Admin commands: '\n reply += ', '.join(sorted(admin_cmds))\n self.api.pm(reply, user_id)", "title": "" }, { "docid": "9517702e55b561f2c64c626d33f6d437", "score": "0.6028927", "text": "def run_commands():\r\n args = setup_argparse()\r\n switch = {\r\n '2a': graph_country_views,\r\n '2b': graph_continent_views,\r\n '3a': graph_browser3a_uses,\r\n '3b': graph_browser3b_uses,\r\n '4': get_reader_profiles,\r\n '5d': also_likes_list,\r\n '6': also_likes_graph,\r\n '7': launch_gui\r\n }\r\n switch.get(args.task_id)(args)", "title": "" }, { "docid": "43ddcce3ab48331e7b7bf3b30461691f", "score": "0.5997897", "text": "def do_examine(self, cmd):\n return self.do_select(cmd, examine=True)", "title": "" }, { "docid": "ae1b74da08975f2e8f98c4a9f6e263bc", "score": "0.5988077", "text": "def run_batch_mode(self, cmds):\n cmds = cmds.split(',')\n for cmd in cmds:\n print \"Running command '%s'...\" % cmd\n getattr(self,cmd)(None)", "title": "" }, { "docid": "4484a782252c02c748f5fd9cbab3337d", "score": "0.59448344", "text": "def execute_many(self, ops):\n\n for op in ops:\n self.execute_one(op)", "title": "" }, { "docid": "b2e6afcab9521b10b5605d14013bdf54", "score": "0.59429866", "text": "def do_command_actions(self, command):\n actions = []\n \n for controlset in self.controls:\n csactions = []\n for control in controlset:\n done = False\n for action, params in self.get_control_actions(control):\n if action == 'replace':\n logging.debug('replace: %s', params)\n self.game.set_words(params)\n return self.do_command_actions(params)\n \n elif action == 'gameover':\n done = True\n self.game.end_game()\n break\n \n elif action == 'done':\n done = True\n break\n \n else:\n csactions.append((action, params))\n actions.append((action, params))\n logging.debug((action, params))\n \n if done:\n break\n \n for action, params in csactions:\n self.do_action(action, params)\n if self.game.gameover:\n break\n \n return actions", "title": "" }, { "docid": "c7eac6f4832ca2a9f4dac3ebbbed854c", "score": "0.5934245", "text": "def execute(self) -> None:\n for item in self.items:\n self.bin_sel_algo(item)", "title": "" }, { "docid": "ea1bee88f5127ef7b6ebfd464d1807a6", "score": "0.5918935", "text": "def selection_utility_command(remote, command_name):\n cmd = mmapi.StoredCommands()\n cmd.AppendSelectUtilityCommand( command_name )\n remote.runCommand(cmd)", "title": "" }, { "docid": "ffa415ea737bd13bba6bb721df4f45a6", "score": "0.59182036", "text": "def __executeSelection(self):\n txt = self.selectedText()\n e5App().getObject(\"Shell\").executeLines(txt)", "title": "" }, { "docid": "a58b63e135091e483ce2cde5d066964e", "score": "0.5917557", "text": "def run_command(command):\n command = int(command)\n if command == 1:\n print(list_dir())\n elif command == 2:\n move_up()\n elif command == 3:\n move_down()\n elif command == 4:\n print(count_files_input())\n elif command == 5:\n print(count_bytes_input())\n elif command == 6:\n print(find_files_input())\n else:\n print(ru.NOT_FOUND)", "title": "" }, { "docid": "4e71e5f8cd231484bfc57bed7777fd03", "score": "0.59156734", "text": "def run_batch(self):\n while not self.closed:\n print(\"Type a command:\")\n inp = input().split(' ')\n command = inp[0].upper()\n arguments = inp[1:]\n if command in self.commands:\n if arguments:\n if len(arguments) == 1:\n print(\n self.commands[command](arguments[0]))\n if len(arguments) == 2:\n print(self.commands[command](arguments[0], arguments[1]))\n else:\n print(self.commands[command]())\n else:\n print(\"UNKNOWN COMMAND\")", "title": "" }, { "docid": "729f0944e768879b92cb52c3fa47b0e6", "score": "0.5905136", "text": "def call_sys(cmds):\n for c in cmds:\n logger.info(c)\n try:\n os.system(c)\n except:\n logger.error(c)", "title": "" }, { "docid": "7418dbb3c41274c219e7ea9ab1b86d61", "score": "0.5904852", "text": "def command(options):\n case.execute(options.case[1])", "title": "" }, { "docid": "70481eee19366c502b6062f0f6182247", "score": "0.5904283", "text": "def process_client_commands():\n for user in gvar.LOBBY.values():\n if user.client.active and user.client.cmd_ready:\n user.cmd_driver()\n for user in gvar.PLAYERS.values():\n if user.client.active and user.client.cmd_ready:\n user.cmd_driver()", "title": "" }, { "docid": "a6576b6533c58bbf08adf787d29c9641", "score": "0.5889629", "text": "def GetCommands(self):", "title": "" }, { "docid": "94b277084d3a58d910052ce9ca90b9e0", "score": "0.5866565", "text": "def run(self):\n for displayTag in self.commandDict:\n # Commands for this particular display\n displayCommands = self.commandDict[displayTag]\n\n # Group commands by subcommand. Must preserve ordering to avoid interfering commands\n verbGroups = collections.OrderedDict([\n (\"help\", []),\n (\"mirror\", []),\n (\"rotate\", []),\n (\"res\", []),\n (\"underscan\", []),\n (\"brightness\", []),\n (\"show\", []),\n ])\n for command in displayCommands:\n verbGroups[command.verb].append(command)\n\n # Run commands by subcommand\n for verb in verbGroups:\n # Commands for this display, of this subcommand\n commands = verbGroups[verb]\n\n if len(commands) > 0:\n # Multiple commands of these types will undo each other.\n # As such, just run the most recently added command (the last in the list)\n if (\n verb == \"help\" or\n verb == \"rotate\" or\n verb == \"res\" or\n verb == \"brightness\" or\n verb == \"underscan\"\n ):\n try:\n commands[-1].run()\n except DisplayError as e:\n raise CommandExecutionError(e.message, commands[-1])\n\n # \"show\" commands don't interfere with each other, so run all of them\n elif verb == \"show\":\n for command in commands:\n try:\n command.run()\n except DisplayError as e:\n raise CommandExecutionError(e.message, command)\n\n # \"mirror\" commands are the most complicated to deal with\n elif verb == \"mirror\":\n command = commands[-1]\n\n if command.subcommand == \"enable\":\n display = getDisplayFromTag(displayTag)\n # The current Display that the above \"display\" is mirroring\n currentMirror = display.mirrorSource\n # Become a mirror of most recently requested display\n mirrorDisplay = command.source\n\n # If display is not a mirror of any other display\n if currentMirror is None:\n try:\n display.setMirrorSource(mirrorDisplay)\n except DisplayError as e:\n raise CommandExecutionError(e.message, command)\n\n # The user requested that this display mirror itself, or that it mirror a display\n # which it is already mirroring. In either case, nothing should be done\n elif display == currentMirror or currentMirror == mirrorDisplay:\n pass\n\n # display is already a mirror, but not of the requested display\n else:\n # First disable mirroring, then enable it for new mirror\n display.setMirrorSource(None)\n display.setMirrorSource(mirrorDisplay)\n try:\n display.setMirrorSource(None)\n display.setMirrorSource(mirrorDisplay)\n except DisplayError as e:\n raise CommandExecutionError(e.message, command)\n\n elif command.subcommand == \"disable\":\n try:\n command.run()\n except DisplayError as e:\n raise CommandExecutionError(e.message, command)", "title": "" }, { "docid": "22f217a4d7bcbf6682bd16d122b7633c", "score": "0.58644193", "text": "def _op_cmds():\n\n op_cmds = [\n \"HEADER\",\n \"TERMINATE\",\n \"NOP\",\n \"ACK\",\n ]\n for tick in range(4, 68, 2):\n for page in range(32, 64):\n op_cmds.append(\"TICK_%d_PAGE_%d\" % (tick, page))\n return op_cmds", "title": "" }, { "docid": "9ace9c3534c9cd173268b31f1700e5bf", "score": "0.58506787", "text": "def run_commands(cmd, *args, **kwargs):\n if isinstance(cmd, tuple):\n return [run(c, *args, **kwargs) for c in cmd]\n\n return run(cmd, *args, **kwargs)", "title": "" }, { "docid": "8d6310e70c7fb79289bcaa8e72ff2d04", "score": "0.58477354", "text": "def run(self,command):\n if command == \"exit\":\n sys.exit()\n elif command == \"reset metrics\" or command == \"resetmetrics\": #todo: regex this so it accepts any combination\n self.controller.reset_metrics()\n elif command == \"save\":\n self.controller.save_active_document()\n elif command == \"save as\":\n self.controller.save_active_document_as()\n elif command == \"save exit\":\n self.controller.save_active_document()\n sys.exit()\n elif command == \"open\":\n self.controller.open_file()\n elif command.startswith(\"exec\"):\n self.executeCLI(command)\n else:\n print(\"Command not found!\")", "title": "" }, { "docid": "b7799f43033d7f383cd447b13969df19", "score": "0.5840412", "text": "async def command_recognizer(self, msg, args):\r\n commands = []\r\n skip = 0\r\n for i, arg in enumerate(args):\r\n if skip:\r\n skip -= 1\r\n continue\r\n\r\n found, arg = Utils.find_subcommand(self, arg)\r\n\r\n if found:\r\n\r\n if arg == 'help':\r\n if len(args) > i + 1:\r\n commands.append((\r\n msg.channel, '', (await Utils(self.data).default_help(self, msg, arg=args[i + 1])), -1\r\n ))\r\n skip += 1\r\n else:\r\n commands.append((msg.channel, '', (await Utils(self.data).default_help(self, msg)), -1))\r\n else:\r\n commands.append((await eval('self.{}(msg)'.format(arg))))\r\n\r\n else: # Command not found or not available for use.\r\n commands.append(self.arg_not_found(msg, arg))\r\n return commands", "title": "" }, { "docid": "80db930e60011482d70f67faae7f2c0a", "score": "0.5838402", "text": "def ProcessCommand(self, id):", "title": "" }, { "docid": "0ed93f9aeef77e6837eac904fa6e85a2", "score": "0.58351487", "text": "def command_available(command, results):\n\n pass", "title": "" }, { "docid": "800a79e4b69e45badfde7a9b662f99bb", "score": "0.5832573", "text": "def select_all(remote):\n cmd = mmapi.StoredCommands()\n cmd.AppendCompleteToolCommand(\"cancel\")\n cmd.AppendBeginToolCommand(\"select\")\n cmd.AppendSelectCommand_All()\n remote.runCommand(cmd)", "title": "" }, { "docid": "800a79e4b69e45badfde7a9b662f99bb", "score": "0.5832573", "text": "def select_all(remote):\n cmd = mmapi.StoredCommands()\n cmd.AppendCompleteToolCommand(\"cancel\")\n cmd.AppendBeginToolCommand(\"select\")\n cmd.AppendSelectCommand_All()\n remote.runCommand(cmd)", "title": "" }, { "docid": "f34998181b82af2b99c93f35d0874664", "score": "0.5830444", "text": "def _run_commands(commands):\n for command in commands:\n syslog('Running command %s' % command)\n check_call(command, shell=True)", "title": "" }, { "docid": "effb5fcab33ceefe9be611c4ecd950a0", "score": "0.58253956", "text": "def commands(self, command):\n command.check_arg_count(0)\n self._send_lists((('command', cmd) for cmd in self.SUPPORTED_COMMANDS))", "title": "" }, { "docid": "232fb859cc08012ff7062a1ed155ccb5", "score": "0.582126", "text": "def _handle_command(cmd):\n ws = cmd.lower().split()\n\n for pattern, func, kwargs in adventurelib._available_commands():\n args = kwargs.copy()\n matches = pattern.match(ws)\n if matches is not None:\n globalvars.save_data.turn_counter += 1\n args.update(matches)\n func(**args)\n update_status()\n break\n else:\n no_command_matches(cmd)\n print()", "title": "" }, { "docid": "778926bf8be0317157bb7f3eed933862", "score": "0.58139765", "text": "def execute(command, register):\n op, A, B, C = command\n return OP_CODES[op](register, A, B, C)", "title": "" }, { "docid": "c3a08387a41ee3ce76038b734894f2e4", "score": "0.5797957", "text": "def execute(self, command: str, **kwargs):", "title": "" }, { "docid": "51d6f746f99ed346054ea205e0598de5", "score": "0.57946336", "text": "def do_use(self, command):\n\t\tif command == \"psscan\" or command == \"pslist\" or command == \"pstree\" or command == \"psxview\":\n\t\t\tpluginExec(command)\n\t\telif command == \"autoruns\":\n\t\t\tpluginExec(command)\n\t\telif command == \"consoles\" or command == \"cmdscan\" or command == \"connections\" or command == \"connscan\":\n\t\t\tpluginExec(command)\n\t\telif command == \"imageinfo\":\n\t\t\tpluginExec(command)\n\t\telif command == \"malfind\":\n\t\t\tpluginExec(command)\n\t\telif command == \"clamscan\":\n\t\t\tcommandStr = \"clamscan output/ --log=output/clamscan.txt --quiet\"\n\t\t\tprint \"Executing: \" + commandStr\n\t\t\tsubprocess.Popen([commandStr], shell=True)\n\t\telif command == \"sockets\" or command == \"sockscan\" or command == \"svcscan\":\n\t\t\tpluginExec(command)\n\t\telse:\n\t\t\tprint \"use autoruns - Searches the registry and memory space for applications running at system startup and maps them to running processes\"\n\t\t\tprint \"use clamscan - Executes a recursive scan on the output folder\"\n\t\t\tprint \"use cmdscan - Extract command history by scanning for _COMMAND_HISTORY\"\n\t\t\tprint \"use connections - Print list of open connections [Windows XP and 2003 Only]\"\n\t\t\tprint \"use connscan - Pool scanner for tcp connections\"\n\t\t\tprint \"use consoles - Extract command history by scanning for _CONSOLE_INFORMATION\"\n\t\t\tprint \"use imageinfo - Executes the imageinfo plugin\"\n\t\t\tprint \"use malfind - Find hidden and injected code\"\n\t\t\tprint \"use pslist - Print all running processes by following the EPROCESS lists\"\n\t\t\tprint \"use psscan - Executes the psscan plugin in the background\"\n\t\t\tprint \"use psxview - Find hidden processes with various process listings\"\n\t\t\tprint \"use pstree - Print process list as a tree\"\n\t\t\tprint \"use sockets - Print list of open sockets\"\n\t\t\tprint \"use sockscan - Pool scanner for tcp socket objects\"\n\t\t\tprint \"use svcscan - Scan for Windows services\"\n\t\t\t#print \"use pstotal - Combination of pslist,psscan & pstree\n\t\t\tprint\n\t\treturn", "title": "" }, { "docid": "29be6c72bbc602c8771dd58aa3d444a7", "score": "0.57840794", "text": "def command(cmd) :\n\trun(cmd)", "title": "" }, { "docid": "12299d86087f494609f72540e6ed829e", "score": "0.57805187", "text": "def ask_user_commands(self):\n\n self.command_list = ['show ip interface brief',\n 'show module']\n\n if not self.auto_run:\n while True:\n print(Fore.CYAN + \"\\nAbout to run following commands on all hops:\" + Fore.WHITE)\n for command in self.command_list:\n print(command)\n\n commands_action = input(\"\\nType new command to add command or existing command to remove.\\n\"\n \"[Enter] to continue, [q] to quit: \")\n if commands_action == '':\n return self.command_list\n elif commands_action.lower() == 'q':\n sys.exit(0)\n elif commands_action in self.command_list:\n self.command_list.remove(commands_action)\n else:\n self.command_list.append(commands_action)", "title": "" }, { "docid": "0beab1908af7cfa56f6fb4f632fb5fd3", "score": "0.5780312", "text": "def select_actions(self):\r\n raise NotImplementedError # TODO: implement\r", "title": "" }, { "docid": "28bef5d15c789ba6c0aa8d22e5acc791", "score": "0.577376", "text": "def select(*args):\n return _call_main(*args, main_func=_lib.selMain)", "title": "" }, { "docid": "7185b55ca5b97485ed1de938b46e2b99", "score": "0.57432073", "text": "def execute_command(self, command):\n try:\n\n if command[0].upper() == \"FIND_ALL\":\n if len(command) != 2:\n raise Exception(\"Please enter FIND_ALL followed by the path to the directory that you want to list the files from.\")\n\n self.command_executor.findAllTextFiles(command[1])\n\n\n elif command[0].upper() == \"CLOSE\":\n self.command_executor.close()\n\n elif command[0].upper() == \"DISPLAY\":\n self.command_executor.display()\n\n elif command[0].upper() == \"OPEN\":\n\n if len(command) != 2:\n raise Exception(\"Please enter OPEN followed by filepath to textfile.\")\n\n self.command_executor.open(command[1])\n\n elif command[0].upper() == \"EDIT_LINE\":\n\n if len(command) != 2:\n raise Exception(\"Please enter EDIT_LINE followed by the line number.\")\n\n self.command_executor.editLine(int(command[1]))\n\n elif command[0].upper() == \"MOVE\":\n\n if len(command) != 2:\n raise Exception(\"Please enter MOVE followed by the new file location.\")\n\n self.command_executor.move(command[1])\n\n elif command[0].upper() == \"HELP\":\n self._get_help()\n else:\n raise Exception(\"Please enter a valid command, type HELP for a list of available commands.\")\n\n except IndexError:\n print(\"Please enter a command, type HELP for a list of available commands.\")", "title": "" }, { "docid": "19eeba06a13e10c7a71f717e247fc94c", "score": "0.5740417", "text": "def execute_commands(self):\n\n for _input in self.commands:\n\n split = _input.strip().split(\" \")\n command = split[0]\n\n if command == CREATE:\n slots = int(split[1])\n self.parking_lot = ParkingLot(slots)\n out = OUT_CREATE.format(slots)\n\n elif command == PARK:\n reg_no = split[1]\n age = split[3]\n vehicle = Vehicle(reg_no, age)\n slot = self.parking_lot.park_vehicle(vehicle)\n if slot:\n out = OUT_PARK_SUCCESS.format(reg_no, slot)\n else:\n out = OUT_PARK_FAIL\n\n elif command == VACATE:\n slot = split[1]\n vehicle = self.parking_lot.vacate_slot(int(slot))\n if vehicle:\n out = OUT_LEAVE_SUCCESS.format(slot, vehicle.reg_no, vehicle.age)\n else:\n if int(slot) <= self.parking_lot.max_available_slots:\n out = OUT_LEAVE_FAIL1\n else:\n out = OUT_LEAVE_FAIL2\n\n elif command == SLOT_BY_CAR:\n reg_no = split[1]\n vehicle = self.parking_lot.get_vehicle(reg_no)\n if vehicle:\n out = vehicle.slot\n else:\n out = OUT_QUERY_NO_RESULT\n\n elif command == SLOTS_BY_DRIVER_AGE or command == REG_NO_BY_DRIVER_AGE:\n age = split[1]\n vehicles = self.parking_lot.get_vehicles_by_driver_age(age)\n out_list = []\n if vehicles:\n\n for vehicle in vehicles:\n if command == SLOTS_BY_DRIVER_AGE:\n out_list.append(str(vehicle.slot))\n else:\n out_list.append(vehicle.reg_no)\n\n if out_list:\n out = \",\".join(out_list)\n else:\n out = OUT_QUERY_NO_RESULT\n\n else:\n out = OUT_INVALID_COMMAND\n\n self.output.append(out)", "title": "" }, { "docid": "11ccb681e8e94ccf3648005187308081", "score": "0.57295924", "text": "def _run_cmds(self, raw):\n\n # If don't have a list of cmds make a single cmd list\n if isinstance(raw, list):\n cmds = raw\n else:\n cmds = [raw]\n\n # Run each command in turn\n for raw_cmd in cmds:\n # Make sure we have env_root when needed\n if raw_cmd.count('%(env_root)s') and len(self._env_dir) == 0:\n raise Exception('Package references environment root, '\n 'must be built in an environment')\n\n # Sub in our variables into the commands\n cmd = raw_cmd % {\n 'jobs' : str(util.cpu_count()),\n 'prefix' : self._target_dir,\n 'arch' : platform.machine(),\n 'env_root' : self._env_dir,\n }\n\n # Run our command\n self._shellcmd(cmd, self._output)", "title": "" }, { "docid": "d544aa021a9434532c7af79416ca357d", "score": "0.572384", "text": "def handle_command(self, cmd):", "title": "" }, { "docid": "1d1a482ee87424120a4453624cb5ef9a", "score": "0.5715571", "text": "def optionsMenuCommands(self, index):\r\n\t\tself.commands[index]()\r\n\t\tself.optionsMenu.deleteLater()\r\n\t\tself.optionsMenu = None", "title": "" }, { "docid": "fcd93f02867087743ea32adb76acb657", "score": "0.5713265", "text": "def readCommands():\n digitBase = {\"0\": 0, \"1\": 1, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9, \"A\": 10, \"B\": 11,\n \"C\": 12, \"D\": 13, \"E\": 14, \"F\": 15}\n\n command_type = {\"1\": uiAddNumbers, \"3\": uiMultiplyNumbers, \"2\": uiSubstractNumbers, \"4\": uiDivisionNumbers, \"5\": uiConvertNumber}\n\n while True:\n printMenu()\n cmd = input(\"Input your command number: \")\n if cmd == \"6\":\n break\n if cmd not in command_type:\n print(\"Command not known\")\n else:\n command_type[cmd](digitBase)", "title": "" }, { "docid": "da90df1275539b99ece9fcda379984e3", "score": "0.5707594", "text": "async def pickems_commands(self, ctx: commands.Context) -> None:\n pass", "title": "" }, { "docid": "8b5fde3b79e37463dae10d2fa2576103", "score": "0.5707063", "text": "def get_commands():\n print('Format: start p1 p2 or exit \\ngame modes: user / easy / medium / hard')\n cmd = input('Input command: ')\n if cmd == 'exit':\n quit()\n if cmd.split(' ')[0] == 'start':\n cmd =cmd.split(' ')\n if len(cmd) != 3:\n print('Bad parameters!')\n else:\n players=[]\n for i in cmd:\n if i == 'easy' or i == 'user' or i == \"medium\" or i == \"hard\":\n players.append(i)\n if len(players) != 2:\n print('Bad parameters!\\n')\n return get_commands()\n return players\n else:\n return get_commands()", "title": "" }, { "docid": "fb30808e75a7e503c9847037d8d310b0", "score": "0.57062835", "text": "def execute_cmds(cmd_array, mekamon_uart, desc=config.default_cmd_desc):\n for index, cmd in enumerate(cmd_array):\n cmd_desc = \"%s message [%d/%d]\" % (desc, index, len(cmd_array))\n execute_cmd(cmd, mekamon_uart, desc=cmd_desc)", "title": "" }, { "docid": "cc8b69deb5bebd0cb412e7931abd030d", "score": "0.5705981", "text": "def main():\n file_data = get_file_data()\n interpret_commands(file_data)", "title": "" }, { "docid": "153d8b4424773ea652f7e7c0313d7c7b", "score": "0.5705817", "text": "def user_command(command):\n commands_lib = {\"move\": move, \"open\": use_key, \"take\": pick_up,\n \"drop\": drop, \"use\": use_key, \"examine\": examine,\n \"display\": display_inventory,\n \"where\": room_to_screen}\n no_args = [\"where\", \"display\"]\n one_target = [\"move\"]\n one_obj = [\"examine\", \"take\", \"drop\"]\n two_args = [\"open\", \"use\"]\n (verb, obj, target) = command\n if verb in two_args and not (obj == '' or target == ''):\n commands_lib[verb](target, obj)\n elif verb in one_target and target != '':\n commands_lib[verb](target)\n elif verb in one_obj and obj != '':\n commands_lib[verb](obj)\n elif verb in no_args:\n commands_lib[verb]()\n else:\n print('Invalid Command')\n print(\"Try 'help' for more options\")", "title": "" }, { "docid": "66e59f9abdd9a3e332a94c6e50eaa5c8", "score": "0.56925195", "text": "def process_command(self, command):\n if command == \"next\":\n self.connect_next()\n elif command == \"end\":\n self.disconnect_current()\n elif command == \"?\":\n self.show_commands()\n else:\n self.prompt_action()", "title": "" }, { "docid": "25a91f3ce603a543fef2c1fb7a9e5143", "score": "0.5689917", "text": "def process_commands(message):\n tokens = message.split(DELIMITER)\n command = tokens[0]\n\n if command == 'ROLL':\n roll_command()\n elif command == 'ATCK':\n atck_command(tokens)\n elif command == 'STOP':\n stop_command()\n else:\n return", "title": "" }, { "docid": "76cc209435d1ceab28f330ef858c70f7", "score": "0.56875515", "text": "def run(self, command: List[str]) -> None:\n pass", "title": "" }, { "docid": "a3698d767ad2ca74a80357b14df3608f", "score": "0.5684417", "text": "def walk_commands(self):\n ...", "title": "" }, { "docid": "b729e178efc8c90b1789b83a8f36c027", "score": "0.5684122", "text": "def systemCommands():\r\n if len(inp)<2:\r\n print \" provide the system command to be executed\\n\"\r\n return\r\n else:\r\n cmd = ' '.join(inp[1:])\r\n #print cmd\r\n os.system(cmd)\r\n print ''", "title": "" }, { "docid": "5161558fc59f8af00321d6f2fd0dfc12", "score": "0.56783134", "text": "def run_mult_commands(cdata, command_list):\n \n cmd_queue = CommandQueue(*command_list)\n\n out_string = run_command(cdata, cmd_queue.combine())\n\n return out_string", "title": "" }, { "docid": "a6a7685803ccb8f8ac845dae66784ba3", "score": "0.56733084", "text": "def jdsu_commands():", "title": "" }, { "docid": "1b12a776c9464593442a6eccdd2c122a", "score": "0.5668443", "text": "def run_cmd(self, A, cmd):\n exec \"target = A.%s\" % cmd\n if type(target) == dict:\n print(target)\n else:\n print(target())\n again = raw_input('\\n --> Run another command? ')\n if again in dir(A):\n self.run_cmd(A, again)", "title": "" }, { "docid": "fa9bf18086cde60dc520fba1e4d15f1f", "score": "0.56650007", "text": "def processor(command):\n command()", "title": "" }, { "docid": "65d6b9f7be1fa0d793ee035dae554664", "score": "0.5650991", "text": "def run_menu_cmd():\r\n\r\n apartments = []\r\n commands = {'add': ui_add_expense, 'remove': ui_remove_expense, 'replace': ui_replace_expense,\r\n 'list': ui_list_expenses, 'info': ui_print_all_options, 'print': print_all_occupied_apartments,\r\n 'occupy': ui_occupy_apartment, 'clear': ui_clear_apartment, 'sum': ui_sum_expense_type,\r\n 'sort': ui_display_sorted, 'max': ui_max_apartment_expense, 'filter': ui_filter}\r\n\r\n fns.fill_random_apartments(apartments)\r\n\r\n # We need a list that will keep track of all of our states throughout the program's life, so we can undo.\r\n # Everytime we perform an operation that modifies data in our program (so commands like 'add', 'remove',\r\n # 'replace', or 'filter') we need to store the state of the data before the operation, so in case we want\r\n # to restore the previous state, we can do it.\r\n\r\n states = []\r\n commands_through_time = []\r\n current_state = deepcopy(apartments)\r\n\r\n while True:\r\n try:\r\n cmd_line = input(\"Please enter your command\\n\"\r\n \"(Enter 'info' if you want to see all available commands)\\n\")\r\n # First, parse the cmd_line into something like ('add', [15, 'water', 200])\r\n cmd, args = fns.parse_command_and_arguments(cmd_line)\r\n\r\n # print(cmd, args)\r\n\r\n # Then, get all the needed arguments for all possible cases into just one variable than we unpack\r\n # when calling functions. This is done in order to avoid a long if/elif/else block\r\n args = helper_run_menu_cmd(apartments, commands, cmd, args)\r\n\r\n if cmd == 'exit':\r\n return\r\n\r\n elif cmd == 'undo':\r\n apartments, current_state = ui_undo(apartments, states, current_state, commands_through_time)\r\n\r\n elif cmd in commands.keys():\r\n commands[cmd](*args)\r\n\r\n else:\r\n print(\"Invalid command! Try again.\")\r\n\r\n except (ValueError, TypeError, IndexError) as err:\r\n print(str(err))\r\n # KeyError automatically adds quotation marks, so we treat it separately, ignoring the first and last elements\r\n except KeyError as ke:\r\n print(str(ke)[1:-1])\r\n\r\n else:\r\n if cmd in ['add', 'remove', 'replace', 'occupy', 'clear', 'filter']:\r\n commands_through_time.append(cmd_line)\r\n current_state = fns.check_state_change(apartments, states, current_state)\r\n\r\n print()", "title": "" }, { "docid": "150390600c21530e97594ed49964ff5b", "score": "0.56502014", "text": "def accept_commands(self):\n command = self.get_command()\n try:\n if self.running:\n self.exec_command(command)\n except KeyboardInterrupt:\n print \"\\n *** KeyboardInterrupt: Command interrupted! ***\"\n except Exception:\n print traceback.format_exc()", "title": "" }, { "docid": "4cac18c45c93f430461d93353597a844", "score": "0.5621042", "text": "def _handle_command(self, command: Command) -> Optional[List[Command]]:", "title": "" }, { "docid": "01cef601943c142d5b53b0f5096049a7", "score": "0.56205", "text": "def main(command):\r\n case = {\r\n 1 : insert,\r\n 2 : select,\r\n 3 : update,\r\n 4 : delete\r\n }\r\n db_func = case.get(command)\r\n\r\n while command != 5:\r\n db_func()\r\n print(\"1 = Insert, 2 = Select, 3 = Update, 4 = Delete, 5 = Quit\")\r\n command = int(input(\"Input new command: \"))\r\n db_func = case.get(command)\r\n else:\r\n print(\"End\")", "title": "" }, { "docid": "31d1c24160315216180d60d8c1c3265e", "score": "0.56190354", "text": "def select_ops(cls,ops, selected):\n if ops == ():\n return ()\n assert issubclass(type(ops[0]), tuple), 'RoadImage.select_ops: BUG ops format is invalid: '+repr(ops)\n ret = ()\n for op in ops:\n if op[0] in selected: ret += (op,)\n return ret", "title": "" }, { "docid": "1ee208f10bd4188d63f76b415328af9e", "score": "0.5609748", "text": "def add_commands(self, commands):\n for command in commands:\n self.add_command(command)", "title": "" }, { "docid": "fed60add64cdd583ba52ac537694110f", "score": "0.5589346", "text": "def _dispatch(self, cmd: List[str]) -> None:\n output = self._run_command(cmd)\n if output is None:\n return None\n self._parse(output)\n return None", "title": "" }, { "docid": "af4c014094383e5890b4569c2618d96e", "score": "0.55812687", "text": "def execute_command(command):\n\n if 0 == len(command):\n return\n\n if command[0] == \"go\": \n# Guard_in_the_room() #check's if guard in room\n valid = is_valid_exit(current_room[\"exits\"],command[1])\n if len(command) > 1 and valid is True:\n execute_go(command[1])\n else:\n print(\"Go where?\")\n\n elif command[0] == \"take\":\n if len(command) > 1:\n execute_take(command[1])\n else:\n print(\"Take what?\")\n\n elif command[0] == \"drop\":\n if len(command) > 1:\n execute_drop(command[1])\n else:\n print(\"Drop what?\")\n\n elif command[0] == \"trick\":\n if len(command) >1:\n execute_trick()\n else:\n print(\"Rrick with what?\")\n\n elif command[0] == \"kill\":\n if len(command)>1:\n execute_kill()\n else:\n print(\"Kill with what?\")\n\n elif command[0] == \"steal\":\n if len (command)>1:\n execute_steal()\n else:\n print (\"Steal what?\")\n else:\n print(\"This makes no sense.\")", "title": "" }, { "docid": "785c343b1e4a3378045d462121b03a53", "score": "0.55796015", "text": "def test_sub(self):\n for name, cmd in (('a', '-'), ('b', '.'), ('c', '/')):\n intp = ABCRInterpreter()\n intp.r.put(3)\n q = getattr(intp, name)\n q.append(10)\n q.append(14)\n intp.run(cmd)\n self.assertEqual(list(q), [14])\n self.assertEqual(intp.r.get(), -7)", "title": "" }, { "docid": "3c4c8c4aa74deae5af12954e16e23f03", "score": "0.557003", "text": "def get_commands():\n for command in content:\n yield command", "title": "" }, { "docid": "945d536741d133ce0a61df68b6ac7080", "score": "0.55614054", "text": "def process_commands( full_command,\n state = misc.read_yaml(STATE_PATH),\n curves = curves):\n\n state_old = state.copy()\n warnings = []\n\n # Processing the full_command, if command is NOT valid, will do nothing.\n command, arg, add, command_is_valid = read_full_command(full_command)\n if not command_is_valid:\n # Do nothing\n warnings.append('Command error')\n return (state, warnings)\n\n # We need to cal gain because we only manage 'level' inside the state file\n gain = misc.calc_gain( state['level'] )\n\n\n ###########################################################\n # 'change_xxxxx' are the actions to be done as parsed below\n ###########################################################\n\n def change_mono(mono):\n # this is a pseudo command just for backwards compatibility\n # here we translate mono:on|off to midside:on|off\n \n try:\n tmp = {\n 'on': 'mid',\n 'off': 'off',\n 'toggle': {'off':'mid', 'side':'off', 'mid':'off'\n }[ state['midside'] ]\n }[mono]\n return change_midside(tmp, state=state)\n\n except KeyError:\n warnings.append('Command \\'mono ' + arg + '\\' is incorrect')\n \n\n def change_midside(midside, state=state):\n\n if midside in ['mid', 'side', 'off']:\n state['midside'] = midside\n try:\n if state['midside']=='mid':\n bf_cli( 'cfia 0 0 m0.5 ; cfia 0 1 m0.5 ;'\n 'cfia 1 0 m0.5 ; cfia 1 1 m0.5 ')\n\n elif state['midside']=='side':\n bf_cli( 'cfia 0 0 m0.5 ; cfia 0 1 m-0.5 ;'\n 'cfia 1 0 m0.5 ; cfia 1 1 m-0.5 ')\n\n elif state['midside']=='off':\n bf_cli( 'cfia 0 0 m1 ; cfia 0 1 m0 ;'\n 'cfia 1 0 m0 ; cfia 1 1 m1 ')\n except:\n state['midside'] = state_old['midside']\n warnings.append('Something went wrong when changing '\n 'midside state')\n else:\n state['midside'] = state_old['midside']\n warnings.append('bad midside option: has to be \"mid\", \"side\"'\n ' or \"off\"')\n return state\n\n\n def change_solo(solo, state=state):\n # new function w/o bf_cli, it delegates to change_gain()\n\n if solo in ['off', 'l', 'r']:\n state['solo'] = solo\n try:\n state = change_gain(gain)\n except:\n state['solo'] = state_old['solo']\n warnings.append('Something went wrong '\n 'when changing solo state')\n else:\n state['solo'] = state_old['solo']\n warnings.append('bad solo option: has to be \"l\", \"r\" or \"off\"')\n\n return state\n\n\n def change_mute(mute, state=state):\n\n try:\n state['muted'] = {\n 'on': True,\n 'off': False,\n 'toggle': not state['muted']\n }[mute]\n except KeyError:\n state['muted'] = state_old['muted']\n warnings.append('Option ' + arg + ' incorrect')\n return state\n\n try:\n state = change_gain(gain)\n except:\n state['muted'] = state_old['muted']\n warnings.append('Something went wrong when changing mute state')\n\n return state\n\n\n def change_loudness_track(loudness_track, state=state):\n\n try:\n state['loudness_track'] = {\n 'on': True,\n 'off': False,\n 'toggle': not state['loudness_track']\n }[loudness_track]\n except KeyError:\n state['loudness_track'] = state_old['loudness_track']\n warnings.append('Option ' + arg + ' incorrect')\n return state\n try:\n state = change_gain(gain)\n except:\n state['loudness_track'] = state_old['loudness_track']\n warnings.append('Something went wrong when changing loudness_track state')\n return state\n\n\n def change_loudness_ref(loudness_ref, state=state, add=add):\n try:\n state['loudness_ref'] = (float(loudness_ref)\n + state['loudness_ref'] * add)\n state = change_gain(gain)\n except:\n state['loudness_ref'] = state_old['loudness_ref']\n warnings.append('Something went wrong when changing loudness_ref state')\n return state\n\n\n def change_treble(treble, state=state, add=add):\n\n try:\n state['treble'] = (float(treble)\n + state['treble'] * add)\n state = change_gain(gain)\n except:\n state['treble'] = state_old['treble']\n warnings.append('Something went wrong when changing treble state')\n return state\n\n\n def change_bass(bass, state=state, add=add):\n\n try:\n state['bass'] = (float(bass)\n + state['bass'] * add)\n state = change_gain(gain)\n except:\n state['bass'] = state_old['bass']\n warnings.append('Something went wrong when changing bass state')\n return state\n\n\n def change_level(level, state=state, add=add):\n try:\n state['level'] = ( float(level) + state['level'] * add )\n gain = misc.calc_gain( state['level'] )\n state = change_gain(gain)\n except:\n state['level'] = state_old['level']\n warnings.append('Something went wrong when changing %s state'\n % command)\n return state\n\n\n def change_gain(gain, state=state):\n \"\"\"change_gain, aka 'the volume machine' :-)\"\"\"\n\n # gain command send its str argument directly\n gain = float(gain)\n\n def change_eq():\n\n eq_str = ''\n l = len(curves['freq'])\n for i in range(l):\n eq_str = eq_str + str(curves['freq'][i]) + '/' + str(eq_mag[i])\n if i != l:\n eq_str += ', '\n bf_cli('lmc eq \"c.eq\" mag ' + eq_str)\n eq_str = ''\n for i in range(l):\n eq_str = eq_str + str(curves['freq'][i]) + '/' + str(eq_pha[i])\n if i != l:\n eq_str += ', '\n bf_cli('lmc eq \"c.eq\" phase ' + eq_str)\n\n\n def change_loudness():\n\n # Curves available:\n loud_i_min = 0\n loud_i_max = curves['loudness_mag_curves'].shape[1] - 1\n # and the flat one:\n loud_i_flat = CONFIG['loudness_index_flat']\n \n if state['loudness_track']:\n loud_i = loud_i_flat - state['level'] - state['loudness_ref']\n else:\n loud_i = loud_i_flat\n \n # clamp index and convert to integer\n loud_i = max( min(loud_i, loud_i_max), loud_i_min )\n loud_i = int(round(loud_i))\n\n eq_mag = curves['loudness_mag_curves'][:,loud_i]\n eq_pha = curves['loudness_pha_curves'][:,loud_i]\n return eq_mag, eq_pha\n\n\n def change_treble():\n\n treble_i = CONFIG['tone_variation'] - state['treble']\n if treble_i < 0:\n treble_i = 0\n if treble_i > 2 * CONFIG['tone_variation']:\n treble_i = 2 * CONFIG['tone_variation']\n # force integer\n treble_i = int(round(treble_i))\n eq_mag = curves['treble_mag'][:,treble_i]\n eq_pha = curves['treble_pha'][:,treble_i]\n state['treble'] = CONFIG['tone_variation'] - treble_i\n return eq_mag, eq_pha\n\n\n def change_bass():\n\n bass_i = CONFIG['tone_variation'] - state['bass']\n if bass_i < 0:\n bass_i = 0\n if bass_i > 2 * CONFIG['tone_variation']:\n bass_i = 2 * CONFIG['tone_variation']\n # force integer\n bass_i = int(round(bass_i))\n eq_mag = curves['bass_mag'][:,bass_i]\n eq_pha = curves['bass_pha'][:,bass_i]\n state['bass'] = CONFIG['tone_variation'] - bass_i\n return eq_mag, eq_pha\n\n\n def commit_gain():\n\t\n bf_atten_dB_L = gain\n bf_atten_dB_R = gain\n # add balance dB gains\n if abs(state['balance']) > CONFIG['balance_variation']:\n state['balance'] = m.copysign(\n CONFIG['balance_variation'] ,state['balance'])\n bf_atten_dB_L = bf_atten_dB_L - (state['balance'] / 2)\n bf_atten_dB_R = bf_atten_dB_R + (state['balance'] / 2)\n\n # From dB to a multiplier to implement easily\n # polarity and mute.\n # Then channel gains are the product of\n # gain, polarity, mute and solo\n\n m_mute = {True: 0, False: 1}[ state['muted'] ]\n\n #m_polarity_L = {'+' : 1, '-' : -1,\n # '+-': 1, '-+': -1 }[ state['polarity'] ]\n #m_polarity_R = {'+' : 1, '-' : -1,\n # '+-': -1, '-+': 1 }[ state['polarity'] ]\n\t\t\t#\n # 'simplepre' version:\n m_polarity_L = 1\n m_polarity_R = 1\n\n m_solo_L = {'off': 1, 'l': 1, 'r': 0}[ state['solo'] ]\n\n m_solo_R = {'off': 1, 'l': 0, 'r': 1}[ state['solo'] ]\n\n m_gain = lambda x: m.pow(10, x/20) * m_mute\n m_gain_L = ( m_gain( bf_atten_dB_L )\n * m_polarity_L * m_solo_L )\n m_gain_R = ( m_gain( bf_atten_dB_R )\n * m_polarity_R * m_solo_R )\n\n # commit final gain change will be applied to the\n # 'from filters' input section on drc filters (cffa)\n bf_cli( 'cffa \"f.drc.L\" \"f.eq.L\" m' + str(m_gain_L)\n + ' ; cffa \"f.drc.R\" \"f.eq.R\" m' + str(m_gain_R))\n\n\n # backs up actual gain\n gain_old = gain\n # EQ curves: loudness + treble + bass\n l_mag, l_pha = change_loudness()\n t_mag, t_pha = change_treble()\n b_mag, b_pha = change_bass()\n # compose EQ curves with target\n eq_mag = curves['target_mag'] + l_mag + t_mag + b_mag\n eq_pha = curves['target_pha'] + l_pha + t_pha + b_pha\n # calculate headroom\n headroom = misc.calc_headroom(gain, abs(state['balance']/2), eq_mag)\n # if enough headroom commit changes\n if headroom >= 0:\n commit_gain()\n change_eq()\n state['level'] = misc.calc_level(gain)\n # if not enough headroom tries lowering gain\n else:\n change_gain(gain + headroom)\n print('headroom hitted, lowering gain...')\n return state\n\n ##########################################################\n ## parsing commands and selecting the corresponding action\n ##########################################################\n try:\n state = {\n 'solo': change_solo,\n 'mono': change_mono,\n 'midside': change_midside,\n 'mute': change_mute,\n 'loudness_track': change_loudness_track,\n 'loudness_ref': change_loudness_ref,\n 'treble': change_treble,\n 'bass': change_bass,\n 'level': change_level,\n 'gain': change_gain\n }[command](arg)\n \n except KeyError:\n warnings.append(f\"Unknown command '{command}'\")\n \n except:\n warnings.append(f\"Problems in command '{command}'\")\n\n # return a dictionary of predic state\n return (state, warnings)", "title": "" }, { "docid": "4c67e735f19eaa08c55a28775ed0de73", "score": "0.55580604", "text": "def select(self):\n self.__function()", "title": "" }, { "docid": "5b6cbdc6532f939f4b46cab3c23607fc", "score": "0.5557848", "text": "def Command():", "title": "" }, { "docid": "59b7892c4809bc04b64d9bbc3fb4a358", "score": "0.5556997", "text": "async def commands(self, command_payload: CommandPayload) -> None:\n raise NotImplementedError('commands must be implemented')", "title": "" }, { "docid": "ece2c5e750cea8642598fb979d73faac", "score": "0.55549514", "text": "def CmdKeyExecute(self, cmd):", "title": "" }, { "docid": "5c87a288c6ccb2f4b29e8f7f4ff6ce73", "score": "0.5553779", "text": "def _execute(self, cmd):\n self.command_ready.wait()\n self.command_ready.clear()\n logger.debug(\"mutex locked\")\n\n self.command_result[:] = []\n self.current_command = cmd\n logger.debug(\"LPCWR : %r\", cmd)\n\n if not isinstance(cmd, basestring):\n logger.error(\"invalid command : \\\"command must be a string\\\"\")\n raise TypeError(\"command must be a string\")\n\n if not cmd:\n logger.error(\"invalid command : \\\"zero-length command\\\"\")\n raise ValueError(\"zero-length command\")\n\n if self.is_running():\n self.__subprocess.stdin.write('%s\\n' % cmd)\n time.sleep(.2)\n self.__subprocess.stdin.write('ec show\\n')\n\n self.command_ready.wait()\n result = list(self.command_result)\n return result", "title": "" }, { "docid": "2c62f741a17a915b936c3ace7a5ee3c2", "score": "0.55519044", "text": "def add_subcommands(self, *cmds):\n for cmd in cmds:\n self.add_subcommand(cmd)", "title": "" }, { "docid": "d199ff10278d3f906a3d8af41fc1cd48", "score": "0.5542479", "text": "def main():\n import sys\n from knpackage.toolbox import get_run_directory_and_file\n from knpackage.toolbox import get_run_parameters\n \n run_directory, run_file = get_run_directory_and_file(sys.argv)\n run_parameters = get_run_parameters(run_directory, run_file)\n SELECT[run_parameters[\"method\"]](run_parameters)", "title": "" }, { "docid": "3c2b9bde62f74257577617762218cf74", "score": "0.5538757", "text": "def execute_commands_vm_app(self, commands):\n if self.client is None:\n self.client = self.remote_connect()\n for cmd in commands:\n stdin, stdout, stderr = self.client.exec_command(cmd)\n stdout.channel.recv_exit_status()\n response = stdout.readlines()\n for line in response:\n logger.info(f'INPUT: {cmd} | OUTPUT: {line}')\n return line", "title": "" }, { "docid": "f24b994d94b35f23fcaa3a50a4e20800", "score": "0.55346245", "text": "def run(addr, *commands, **opts):\n results = []\n handler = VarnishHandler(addr, **opts)\n for cmd in commands:\n if isinstance(cmd, tuple) and len(cmd)>1:\n results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd])\n else:\n results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:]))\n break\n handler.close()\n return results", "title": "" }, { "docid": "e6592a5aa9b09e18070c6122045a060d", "score": "0.5532251", "text": "def execute():", "title": "" }, { "docid": "55560b15eda3337e74752d898d4db090", "score": "0.5532039", "text": "def _cmd(self, command, wait_str=r'\\w:\\\\\\w*', timeout=120, confirm=True):\n\n all_info = ''\n if confirm:\n wait_str += '|y/n|yes/no'\n info, is_match, match_str = self.execute(command, wait_str, timeout)\n while match_str == 'y/n' and confirm:\n all_info += info\n info, is_match, match_str = self.execute('y', wait_str, timeout)\n while match_str == 'yes/no' and confirm:\n all_info += info\n info, is_match, match_str = self.execute('yes', wait_str, timeout)\n\n all_info += info\n\n return info, is_match, match_str", "title": "" }, { "docid": "fdb0d82c23a4c3c02ff9bf009b8ec53a", "score": "0.55316883", "text": "def commands(self,n):\n return ''", "title": "" }, { "docid": "aa53a118bd0c112bcdb5e91adf78a5c7", "score": "0.55312264", "text": "async def execute_many(self, command: str, args=None, *, timeout: float=None):", "title": "" }, { "docid": "7a103ce3bcc9a5cf6eefc46c3b584916", "score": "0.5525909", "text": "def _run_command(commands, options):\n cmd = ['/usr/bin/config']\n if isinstance(commands, list):\n cmd.extend(commands)\n else:\n cmd.append(commands)\n \n for x in options: \n cmd.append(str(x)) \n output =\"\"\n try:\n print(\"cmd\", cmd)\n rc = 0\n output= subprocess.check_output(cmd)\n print('Output -> ',output)\n\n except subprocess.CalledProcessError as err:\n print(\"Exception when calling get_sonic_error -> %s\\n\" %(err))\n rc = err.returncode\n output = err.output\n \n return rc,output", "title": "" } ]
f29e15f6b34b7bdcb12e1ab3ecd86024
gets the last pressed state and resets it
[ { "docid": "1e1f3947c6998210f9efe61fc0149b7b", "score": "0.73674", "text": "def getLastPressedState(self):\n theLastPressedState = self.lastPressedState\n self.lastPressedState = self.ButtonPressStates.NOTPRESSED\n return theLastPressedState", "title": "" } ]
[ { "docid": "e1d14403759ef02d19bb64bfa64507ee", "score": "0.69121516", "text": "def checkLastPressedState(self):\n return self.lastPressedState", "title": "" }, { "docid": "e4a783ab340dec881532a8b02984d7cc", "score": "0.6591248", "text": "def _changeInactive(self):\n keys = self.input.key_count \n if keys>0:\n self._state = (self._state + 1) % NUM_STATES", "title": "" }, { "docid": "b1317f3a0b677e409960ff5b584f3d08", "score": "0.65608597", "text": "def RkeyDown(self,event):\n\n self.RkeyStatus = \"pressed\"", "title": "" }, { "docid": "c0a2784ec013427af45d8f46fdbd2496", "score": "0.6559539", "text": "def key_released(self, event):\n self.lastkey = 0", "title": "" }, { "docid": "42e03219256554aee49cad9c885518d6", "score": "0.6509806", "text": "def is_pressed(self):\n return self._state", "title": "" }, { "docid": "1a5d59578746f45a84839f3f438782d2", "score": "0.65024817", "text": "def get_state(self):\n\n result = copy.deepcopy(self.was_pushed)\n self.reset_button()\n return result", "title": "" }, { "docid": "ae8edc622b430ad3cb0bd6a15ea9e7a0", "score": "0.6461682", "text": "def reset_button(self):\n\n self.was_pushed = False\n self.was_released = False", "title": "" }, { "docid": "794f631355641ff254a42c616ed5ae7a", "score": "0.6444824", "text": "def LkeyDown(self,event):\n self.LkeyStatus = \"pressed\"", "title": "" }, { "docid": "59ba4a4e72dc44ec951da3d78a84ac15", "score": "0.64165556", "text": "def reset_state(self, event=None):\n self._state = \"no press\"\n self.schedule_update_ha_state(False)", "title": "" }, { "docid": "42550344f869177d2852a49d1530d899", "score": "0.62671465", "text": "def reset(self):\n self.current_state = self.start", "title": "" }, { "docid": "6ef654029ceb8d98ee0b7b9bf553437d", "score": "0.6241968", "text": "def pressed(self):\n return self._pressed", "title": "" }, { "docid": "cefd8b3eb6cea7967befa4499d6bdda4", "score": "0.6234746", "text": "def RkeyUp(self,event):\n\n self.RkeyStatus = \"unpressed\"\n self.xCoor = None\n self.yCoor = None", "title": "" }, { "docid": "c8db6814a0cc90c42cd9412bfde65317", "score": "0.62215763", "text": "def undoChoice(self, inNewState = False):\n pass", "title": "" }, { "docid": "e721ccaacb7c193a5c7d15b8804cdcfa", "score": "0.62108713", "text": "def change_state_keys(self):\r\n\r\n self.prev_state = self.state\r\n\r\n # HIT animation.\r\n if self.hit:\r\n self.state = 6\r\n self.frame_speed = 25\r\n self.walking = self.running = False\r\n\r\n # ATTACK animation.\r\n elif self.attack:\r\n self.state = 3\r\n self.frame_speed = 75\r\n self.walking = self.running = False\r\n\r\n # RUN animation.\r\n elif self.key_pressed and self.shift:\r\n self.state = 2\r\n self.frame_speed = 100\r\n self.walking = False\r\n self.running = True\r\n\r\n # WALK animation.\r\n elif self.key_pressed:\r\n self.state = 1\r\n self.frame_speed = 125\r\n self.walking = True\r\n self.running = False\r\n\r\n # IDLE animation.\r\n else:\r\n self.state = 0\r\n self.frame_speed = 200\r\n self.walking = self.running = False\r\n\r\n # Reset frame_index if state change is detected.\r\n if self.prev_state != self.state:\r\n self.frame_index = 0", "title": "" }, { "docid": "983f08595949e469346afb512c7ed429", "score": "0.6177766", "text": "def LkeyUp(self,event):\n self.LkeyStatus = \"unpressed\"\n self.xCoor = None\n self.yCoor = None", "title": "" }, { "docid": "8eab6c647417e8b3bb77b92ca211ad2c", "score": "0.6139709", "text": "def was_pressed(self):\n return self._state and self._changed", "title": "" }, { "docid": "85cf675b1d413f5b590b392fa461e69c", "score": "0.61333156", "text": "def reset_state(self) -> None:", "title": "" }, { "docid": "a2f4c11cf8606425d3c7fca67442b6d3", "score": "0.6087481", "text": "def reset_state(self):\n self.curr_state = self.initial_state_id", "title": "" }, { "docid": "eb566cfc0a8b0d241d81e3828e487f74", "score": "0.60642964", "text": "def _changeRestart(self):\n keys = self.input.key_count \n if keys>0:\n print self._state\n self._state = STATE_NEWGAME", "title": "" }, { "docid": "c9de751550d6e24c2132b8acb575fa2e", "score": "0.60481447", "text": "def handle_touch(self):\n if self.state == self.parent.selected_state:\n new_state = States.DEACTIVATED\n else:\n new_state = self.parent.selected_state\n self.set_state(new_state)\n return new_state", "title": "" }, { "docid": "3682e5de1878fc34b7235820f09ae1e2", "score": "0.6023721", "text": "def reset (self):\n self.current_state = self.initial_state\n self.input_symbol = None", "title": "" }, { "docid": "06c4357892665a9f4e3d46224a1ae9ac", "score": "0.60002697", "text": "def _detect_s(self):\n curr_keys=self.input.is_key_down('spacebar')\n if curr_keys==True and self.last_keys == False:\n self._state=self._state+1\n _text=None\n self.last_keys==curr_keys", "title": "" }, { "docid": "d46eda85ab10baf6b9230aa4cec53326", "score": "0.5988716", "text": "def key_up(self):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n self.click = False", "title": "" }, { "docid": "f4be5de71bcb5c2f1cd39b9baeead6e3", "score": "0.598005", "text": "def reset_keys(self):\n self.up_key, self.down_key, self.right_key = False, False, False\n self.left_key, self.start_key, self.back_key = False, False, False\n self.quit_key, self.zero_key, self.one_key = False, False, False\n self.two_key, self.three_key, self.four_key = False, False, False", "title": "" }, { "docid": "9173b878976041d6bb0dd9d91228fcc5", "score": "0.597376", "text": "def __set_key_state(key: str, state: str):\n\n global pressed_keys\n\n if state == \"down\":\n pressed_keys[key] = True\n elif state == \"up\":\n pressed_keys[key] = False", "title": "" }, { "docid": "7aa3e621167748bfdefea0b1e79a843e", "score": "0.5968503", "text": "def reset(self):\n self.cur_states = (self.start_points[0][0], self.start_points[0][1], 0, 0)", "title": "" }, { "docid": "d884611ec87a06a62d4788428b0a26e1", "score": "0.59668523", "text": "def switch(self):\r\n self.state = not self.state", "title": "" }, { "docid": "ff2d916088188ca2d806bc99fb6911ca", "score": "0.5945858", "text": "def on_press(self):\n game_screen = self.parent.parent.parent\n\n if not game_screen.recv_input:\n self.state = \"normal\"\n else:\n self.flash()\n game_screen.button_press(self.index)", "title": "" }, { "docid": "0e24c3e80540cab4da94159de5a76a4f", "score": "0.5939392", "text": "def prev(self): \n key = int(self.tkvar.get()) - 1\n key = max(0, key)\n key = min(key, len(self.dataset)-1)\n self.tkvar.set(str(key))", "title": "" }, { "docid": "020c6fce8a0127f633ed88042a040895", "score": "0.5922679", "text": "def _activ_key(self, *args):\n self.keyboard = None", "title": "" }, { "docid": "c7fc6cc6b3d77cc7f0b037c402c13941", "score": "0.59021086", "text": "def reset_click_status(self):\r\n\t\tself.clicked = False", "title": "" }, { "docid": "0e7d34d9e79a2c947a2a71cabf63e874", "score": "0.59018266", "text": "def state_locker():\n\n import sys\n import tty\n import termios\n\n print('\\nEnter anything to exit')\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)", "title": "" }, { "docid": "8ee23458258e0f07f54bdac6d2633a63", "score": "0.5898576", "text": "def exit_key_pressed():\n\tglobal going\n\tgoing = 0\n\tappuifw.app.exit_key_handler = None\n\tlock.signal()", "title": "" }, { "docid": "a55be08182926b0f13ffd8f624577a76", "score": "0.58929443", "text": "def unpress_all():\r\n start_button.Pressed = False\r\n rules_button.Pressed = False\r\n rules_button2.Pressed = False", "title": "" }, { "docid": "d08f0c3b10956bff3801b785520b13fd", "score": "0.58862054", "text": "def reset(self):\n\n self.nextstate = -1\n self.command = -1\n self.goal = False", "title": "" }, { "docid": "cae91bcacc6884d97de6a1f6da11e4cd", "score": "0.58760613", "text": "def reset_state(self):\n pass", "title": "" }, { "docid": "cae91bcacc6884d97de6a1f6da11e4cd", "score": "0.58760613", "text": "def reset_state(self):\n pass", "title": "" }, { "docid": "313e6932ffa40ad0115e0d95534a324d", "score": "0.58562094", "text": "def reset_states(self):\n pass", "title": "" }, { "docid": "0592410c80092fa51f520ce8b0b98dd1", "score": "0.5841121", "text": "def key(state):\n \n # TODO\n pass", "title": "" }, { "docid": "ca77b48e52cf66256d751c0388364ae2", "score": "0.5836385", "text": "def update_key_status(self,evt):\n if evt=='left-up':\n self.left_down = False\n elif evt=='left-down':\n self.left_down = True\n if evt=='right-up':\n self.right_down = False\n elif evt=='right-down':\n self.right_down = True", "title": "" }, { "docid": "90e66338b1ac71f7d532102be28ce7b0", "score": "0.5824556", "text": "def press(self, state_machine, switch_event):\n state_machine.change_state(ManySwitch())\n return [\n ProtoKeyEvent('down', switch_event, False),\n ProtoKeyEvent('reset', switch_event, True)]", "title": "" }, { "docid": "104a1d77c83e4af48d50abd1f06d912b", "score": "0.5813447", "text": "def key_pressed(self, event):\n if self.lastkey == event.key:\n if self.key_repeat_delay > 0:\n self.key_repeat_delay -= 1\n if self.key_repeat_delay == 0:\n self.key_repeat_delay = self.key_repeat\n for l in self.listeners:\n l.handle_key(self.lastkey)\n else:\n self.key_repeat_delay = self.key_repeat\n for l in self.listeners:\n l.handle_key(event.key)\n self.lastkey = event.key", "title": "" }, { "docid": "ea2915b2ce8a8c4eb8ae7aa3552397d9", "score": "0.58063227", "text": "def get_keys_down(self):\r\n return self.key_states\r\n\r\n #old key coding ideas\r\n depricated_key_codes = [\r\n ('a','A')\r\n ('b','B')\r\n ('c','C'),\r\n ('d','D'),\r\n ('e','E'),\r\n ('f','F'),\r\n ('g','G'),\r\n ('h','H'),\r\n ('i','I'),\r\n ('j','J'),\r\n ('k','K'),\r\n ('l','L'),\r\n ('m','M'),\r\n ('n','N'),\r\n ('o','O'),\r\n ('p','P'),\r\n ('q','Q'),\r\n ('r','R'),\r\n ('s','S'),\r\n ('t','T'),\r\n ('u','U'),\r\n ('v','V'),\r\n ('w','W'),\r\n ('x','X'),\r\n ('y','Y'),\r\n ('z','Z'),\r\n\r\n (' ',' '),\r\n\r\n (',','<'),\r\n ('.','>'),\r\n ('/','?'),\r\n (';',':'),\r\n ('\\'','\"'),\r\n ('[','{'),\r\n (']','}'),\r\n ('\\\\','|'),\r\n ('-','_'),\r\n ('=','+'),\r\n ('`','~'),\r\n\r\n ('1','!'),\r\n ('2','@'),\r\n ('3','#'),\r\n ('4','$'),\r\n ('5','%'),\r\n ('6','^'),\r\n ('7','&'),\r\n ('8','*'),\r\n ('9','('),\r\n ('0',')'),\r\n\r\n ('UP'),\r\n ('DOWN'),\r\n ('LEFT'),\r\n ('RIGHT'),\r\n ('ENTER'),\r\n ('SHIFT'),\r\n ('BACKSPACE')\r\n ]\r\n #condensed output : ascii keycode\r\n key_map = {\r\n 0:65, #A\r\n 1:66, #B\r\n 2:67, #C...\r\n 3:68,\r\n 4:69,\r\n 5:70,\r\n 6:71,\r\n 7:72,\r\n 8:73,\r\n 9:74,\r\n 10:75,\r\n 11:76,\r\n 12:77,\r\n 13:78,\r\n 14:79,\r\n 15:80,\r\n 16:81,\r\n 17:82,\r\n 18:83,\r\n 19:84,\r\n 20:85,\r\n 21:86,\r\n 22:87,\r\n 23:88, #...X\r\n 24:89, #Y\r\n 25:90, #Z\r\n\r\n 26:32, #SPACE\r\n\r\n 27:48, #0\r\n 28:49, #1..\r\n 29:50,\r\n 30:51,\r\n 31:52,\r\n 32:53,\r\n 33:54,\r\n 34:55,\r\n 35:56, #..8\r\n 36:57, #9\r\n\r\n 37:188, #,\r\n 38:190, #.\r\n 39:191, #/\r\n 40:186, ##;\r\n 41:222, #'\r\n 42:219, #[\r\n 43:221, #]\r\n 44:220, #\\\r\n 45:189, ##-\r\n 46:187, ##=\r\n 47:192, #`\r\n\r\n 48:0,\r\n 49:0,\r\n 50:0,\r\n 51:0,\r\n 52:0,\r\n 53:0,\r\n 54:0,\r\n 55:0,\r\n\r\n 56:8, #BKSPC\r\n 57:9, #TAB\r\n 58:13, #ENTER\r\n 59:16, #SHIFT\r\n 60:17, #CTLR\r\n 61:18, #ALT\r\n 62:20, #CPSLOCK\r\n 63:27, #ESC\r\n 64:33, #PG UP\r\n 65:34, #PG DOWN\r\n 66:35, #END\r\n 67:36, #HOME\r\n 68:37, #AR LEFT\r\n 69:38, #AR UP\r\n 70:39, #AR RIGHT\r\n 71:40, #AR DOWN\r\n 72:45, #INSERT\r\n 73:46, #DELETE\r\n\r\n 74:112, #F1\r\n 75:113, #F2..\r\n 76:114,\r\n 77:115,\r\n 78:116,\r\n 79:117,\r\n 80:118,\r\n 81:119,\r\n 82:120,\r\n 83:121,\r\n 84:122, #..F11\r\n 85:123, #F12\r\n }\r\n #while I can use puppeteeter.lib.USKeyboardLayout to rev. lookup\r\n # a dictionary of K:V pairs of the form:\r\n # 'KeyA' : {'keyCode':65,'code':'KeyA','shiftKey':'A','key':'a'}\r\n #pyppeteer does not contain a lib dir\r\n #so this map should have everything I need \r\n examplekey={0: {'ascii': 65, 'pypp': 'KeyA'}}\r\n big_fat_key_map = {\r\n (1,66,'KeyB'),\r\n (2,67,'KeyC'),\r\n (3,68,'KeyD'),\r\n (4,69,'KeyE'),\r\n (5,70,'KeyF'),\r\n (6,71,'KeyG'),\r\n (7,72,'KeyH'),\r\n (8,73,'KeyI'),\r\n (9,74,'KeyJ'),\r\n (10,75,'KeyK'),\r\n (11,76,'KeyL'),\r\n (12,77,'KeyM'),\r\n (13,78,'KeyN'),\r\n (14,79,'KeyO'),\r\n (15,80,'KeyP'),\r\n (16,81,'KeyQ'),\r\n (17,82,'KeyR'),\r\n (18,83,'KeyS'),\r\n (19,84,'KeyT'),\r\n (20,85,'KeyU'),\r\n (21,86,'KeyV'),\r\n (22,87,'KeyW'),\r\n (23,88,'KeyX'),\r\n (24,89,'KeyY'),\r\n (25,90,'KeyZ'),\r\n\r\n (26,32,'Space'),\r\n\r\n (27,48,'0'),\r\n (28,49,'1'),\r\n (29,50,'2'),\r\n (30,51,'3'),\r\n (31,52,'4'),\r\n (32,53,'5'),\r\n (33,54,'6'),\r\n (34,55,'7'),\r\n (35,56,'8'),\r\n (36,57,'9'),\r\n 27:48, #0\r\n 28:49, #1..\r\n 29:50,\r\n 30:51,\r\n 31:52,\r\n 32:53,\r\n 33:54,\r\n 34:55,\r\n 35:56, #..8\r\n 36:57, #9\r\n\r\n 37:188, #,\r\n 38:190, #.\r\n 39:191, #/\r\n 40:186, ##;\r\n 41:222, #'\r\n 42:219, #[\r\n 43:221, #]\r\n 44:220, #\\\r\n 45:189, ##-\r\n 46:187, ##=\r\n 47:192, #`\r\n\r\n 48:0,\r\n 49:0,\r\n 50:0,\r\n 51:0,\r\n 52:0,\r\n 53:0,\r\n 54:0,\r\n 55:0,\r\n\r\n 56:8, #BKSPC\r\n 57:9, #TAB\r\n 58:13, #ENTER\r\n 59:16, #SHIFT\r\n 60:17, #CTLR\r\n 61:18, #ALT\r\n 62:20, #CPSLOCK\r\n 63:27, #ESC\r\n 64:33, #PG UP\r\n 65:34, #PG DOWN\r\n 66:35, #END\r\n 67:36, #HOME\r\n 68:37, #AR LEFT\r\n 69:38, #AR UP\r\n 70:39, #AR RIGHT\r\n 71:40, #AR DOWN\r\n 72:45, #INSERT\r\n 73:46, #DELETE\r\n\r\n 74:112, #F1\r\n 75:113, #F2..\r\n 76:114,\r\n 77:115,\r\n 78:116,\r\n 79:117,\r\n 80:118,\r\n 81:119,\r\n 82:120,\r\n 83:121,\r\n 84:122, #..F11\r\n 85:123, #F12\r\n }", "title": "" }, { "docid": "e04df81ec85a036f689395393c648a07", "score": "0.57927376", "text": "def _reset_state(self):\n self.state = self.start_state.copy()", "title": "" }, { "docid": "10e98fa1d8a2dc924ef888b6de800d43", "score": "0.57872003", "text": "def reset(self):\n return self.current", "title": "" }, { "docid": "4bfee518d8df8e967f667efae7dcbbdd", "score": "0.57728577", "text": "def key_handler(self, event):\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n self.selected += 1\n elif event.key == pygame.K_DOWN:\n self.selected -= 1\n elif event.key == pygame.K_RETURN or event.key == pygame.K_SPACE:\n self.done = True\n\n self.selected %= len(self.options)", "title": "" }, { "docid": "98a70d2bb902a87a13881f890cd7f930", "score": "0.5766682", "text": "def get_state(self):\n return self._button_variable.get()", "title": "" }, { "docid": "fc5b2a74249f52bf1e61c05f0daee9c0", "score": "0.57624304", "text": "def __poll_keyboard():\n\n global pressed_keys\n\n for key in pressed_keys:\n if base.mouseWatcherNode.is_button_down(key):\n __set_key_state(key, \"down\")\n else:\n __set_key_state(key, \"up\")\n base.accept(\"escape\", sys.exit)", "title": "" }, { "docid": "40b1658454302dcc246ff23da4ab162a", "score": "0.57530725", "text": "def _on_press_button(self, touch=None):\n self.state = 'normal'\n # print('pressed on number label')", "title": "" }, { "docid": "efbba4dfe52cee974f3fba521012105d", "score": "0.5748755", "text": "def button_buffer_decrease(self):\n if self.upPressed > 0:\n self.upPressed -= 1 * 0.045\n else:\n self.upPressed = 0\n if self.leftPressed > 0:\n self.leftPressed -= 1 * 0.045\n else:\n self.leftPressed = 0\n if self.rightPressed > 0:\n self.rightPressed -= 1 * 0.045\n else:\n self.rightPressed = 0\n if self.downPressed > 0:\n self.downPressed -= 1 * 0.045\n else:\n self.downPressed = 0", "title": "" }, { "docid": "151440a16f6b82dfc825e93987fef8d6", "score": "0.5747262", "text": "def press_button(self, event):\n if self.cget('state') == 'normal':\n self.config(relief='sunken')\n self.move('all', 1, 1)\n self.pressed = True", "title": "" }, { "docid": "84a9010f0d2013f95716cbcdc8ded6c9", "score": "0.57448673", "text": "def GetButtonState(self, *args, **kw):", "title": "" }, { "docid": "92d1696839e99a5b07fc77ea664d30e2", "score": "0.5742893", "text": "def key_down(self, key):\n\t\tpass", "title": "" }, { "docid": "f5ab2e6d081b96f96bbbf6867354197a", "score": "0.5739214", "text": "def get_key():\r\n while True:\r\n event = pygame.event.poll()\r\n if event.type == pygame.KEYDOWN:\r\n return event.key\r\n else:\r\n pass", "title": "" }, { "docid": "348d1c2a6b78c28efdbb8b01eec1a66b", "score": "0.5732602", "text": "def _changeActive(self):\n if self._game.getVolume() == 1.0:\n if self.input.is_key_down('n'):\n self._game.setVolume(0.0)\n if self._game.getVolume() == 0.0:\n if self.input.is_key_down('y'):\n self._game.setVolume(1.0)\n self._score.text = \"Score: \" + str(self._game.getScore())\n if len(self._game.getBricks()) == 0:\n self._changeComplete()\n if self._game.loseBall():\n self._game.setTries(self._game.getTries() - 1)\n self._state = (self._state-1)%NUM_STATES", "title": "" }, { "docid": "fb12d71a6b71c14d7db0c5f7c922f3e3", "score": "0.5731163", "text": "def isdown(self):\n return self._penstate", "title": "" }, { "docid": "e2ec970dd59f007382b3201907bdf336", "score": "0.5726759", "text": "def is_pressed(self):\n return self.state() & win32defines.TBSTATE_PRESSED == win32defines.TBSTATE_PRESSED", "title": "" }, { "docid": "ed58fc8367a072addff863d11bd0c6fe", "score": "0.5711334", "text": "def _updateButtons( self, button, state ):\n if state == GLUT_UP:\n state = 0\n else:\n state = 1\n index = {\n GLUT_LEFT_BUTTON: 0,\n GLUT_RIGHT_BUTTON: 1,\n GLUT_MIDDLE_BUTTON: 2,\n }.get( button )\n if index is None:\n if button not in (3,4):\n # is a mouse-wheel button \n log.warn( \"Unrecognized button ID: %s\", button, )\n return -1,state\n else:\n self.CURRENTBUTTONSTATES[index] = state\n return index, state", "title": "" }, { "docid": "fe0b38f6eab50c67fb82c06ff77dd88f", "score": "0.56882286", "text": "def stop(self, pressed):\n self.end = time.time()\n rt = int((self.end-self.begin) * 1000)\n \n key = self.hist[-1][\"key\"]\n delay = self.hist[-1][\"delay\"]\n \n if rt < delay:\n WARNING(\"key {} was pressed {} ms to early\".format(pressed, delay - rt))\n elif pressed != key:\n WARNING(\"key {} was pressed instead of key {}\".format(pressed, key))\n \n self.hist[-1][\"time\"] = rt\n self.hist[-1][\"react\"] = rt - delay\n self.hist[-1][\"pressed\"] = pressed\n \n if self.hist[-1] in self.wrong_result:\n return \"wrong\"\n if self.hist[-1] in self.early_result:\n return \"early\"\n \n self.current_msm += 1\n return \"valid\"", "title": "" }, { "docid": "9a7a4a8c6f6c37d2a8af8fbacfc50c7b", "score": "0.56731933", "text": "def penup(self):\n self._penstate = False", "title": "" }, { "docid": "6674801b70cf2ab459dfa6ea7c6a665d", "score": "0.56683", "text": "def restoreState(self,state):\n pass", "title": "" }, { "docid": "42edd54f1a9031d8bf6017bef308775e", "score": "0.5666464", "text": "def key_down(cls, val: bool):\n cls._key_down_this_frame = val", "title": "" }, { "docid": "a07a204c0d30c40d3df854747219d132", "score": "0.5666258", "text": "def _reset_state(self):\n raise NotImplementedError", "title": "" }, { "docid": "85af4ee3d36361a060907bb3a51132d0", "score": "0.56399363", "text": "def reset_state(self):\n # Transaction is not necessary here, because reverting it\n # would set the value to the original one anyway.\n if hasattr(self, 'previous_state'):\n self.set_state(self.previous_state)", "title": "" }, { "docid": "d597f4c2ab32c11483f222500c6d0519", "score": "0.5618852", "text": "def button_released(self):\n\n self.was_released = True", "title": "" }, { "docid": "076aabe154b58c6bd194f6e2373d6dfe", "score": "0.56124777", "text": "def reset(self):\n self._state()", "title": "" }, { "docid": "1eb4dac1ff94e57a3eba214b1021e9a2", "score": "0.5602133", "text": "def on_key_release_repeat(self, *dummy):\n self.has_prev_key_release = self.after_idle(self.on_key_release, dummy)", "title": "" }, { "docid": "66a74cf5161dfb0c983aa0c5e2bf1d61", "score": "0.56009763", "text": "def home(self):\n pen_was = self.pen_down\n self.pen_down = False\n self.position = self.initial_position\n self.orientation = 180\n self.pen_down = pen_was", "title": "" }, { "docid": "5e02befdf998af984eb2c7558d2acf25", "score": "0.5598908", "text": "def reset(self):\n self.health = prepare.MAX_HEALTH\n self.direction = self.start_direction\n self.direction_stack = [] #Held keys in the order they were pressed.\n pos = (self.start_coord[0]*prepare.CELL_SIZE[0],\n self.start_coord[1]*prepare.CELL_SIZE[1])\n self.reset_position(pos)\n self.action_state = \"normal\"\n self.hit_state = False #When true hit_state is a tools.Timer instance.\n self.knock_state = False #(direction, tools.Timer()) tuple when true.\n self.death_anim.reset()\n self.shadow = shadow.Shadow((40,20), self.rect)\n self.redraw = True", "title": "" }, { "docid": "225da4190dc8cff661f410bd029734a1", "score": "0.5593716", "text": "def reset(self):\n\t\tself.on()\n\t\tself.set_mode(self.RESET)", "title": "" }, { "docid": "5a71ac5cf8d500af3b86c68f774af6db", "score": "0.55828243", "text": "def reset(self):\n self.curr_state = self.sample_init_state()\n self.done = 0", "title": "" }, { "docid": "935a788dfb347c15d843b6a7ce8b1504", "score": "0.55742115", "text": "def keypress(self, direction):\n if direction == UP and self.state.last_moved != DOWN :\n self.state.last_pressed = UP\n return True\n elif direction == DOWN and self.state.last_moved != UP:\n self.state.last_pressed = DOWN\n return True\n elif direction == RIGHT and self.state.last_moved != LEFT:\n self.state.last_pressed = RIGHT\n return True\n elif direction == LEFT and self.state.last_moved != RIGHT:\n self.state.last_pressed = LEFT\n return True\n else:\n # Indicator of a useless move - exact opposite of current direction.\n # In this case, we continue in the last good keypress direction anyway.\n self.state.last_pressed = self.state.last_moved\n return False", "title": "" }, { "docid": "9c1b156355ca6d2e89edbccf69ec9f04", "score": "0.5573532", "text": "def restore(self):\n for signalnum, handler in self._old_signal_states:\n signal.signal(signalnum, handler)\n self._old_signal_states = []", "title": "" }, { "docid": "7f9cc504ebdc9dd35044c6717ad0199a", "score": "0.55683696", "text": "def reset(self):\n if not self.given:\n if self.generated > 24: \n self.switch()\n else:\n self.generated += 1\n self.sim.reset()\n state = np.concatenate([[*self.sim.pose, self.sim.v[2]]] * self.action_repeat) \n return state", "title": "" }, { "docid": "ebd8971cd5055ef12d7928c5a3319639", "score": "0.5568189", "text": "def on_press(self, key):\n if self.pressed_key_count < 6:\n # set state of newly pressed key but consider limit of max. 6 keys\n # pressed at the same time\n try:\n # process printable characters\n k = self.keymap[key.char]\n\n # change keyboard state\n self.change_state(k)\n except AttributeError:\n # process special keys\n k = KEYMAP_SPECIAL_CHARS[key]\n\n # change keyboard state\n self.change_state(k)\n except KeyError:\n print(\"KeyError: {}\".format(key))", "title": "" }, { "docid": "688c037340301a06460dea3158171ee8", "score": "0.5565767", "text": "def press_clear():\n answer.set(\"\")", "title": "" }, { "docid": "7c1042519022600227eeec89907a75ce", "score": "0.55585444", "text": "def clear_state(self):\r\n self._state = _State()", "title": "" }, { "docid": "2de84ac520bceb7c0720029037dfc967", "score": "0.5546885", "text": "def changeState(self):\n if self.__state == 0:\n self.__state = 1\n else:\n self.__state = 0", "title": "" }, { "docid": "639827dfa9f29d0effdf74791680a17a", "score": "0.5536556", "text": "def curstate(self) :\n\t\ttry :\n\t\t\treturn self._curstate\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "639827dfa9f29d0effdf74791680a17a", "score": "0.5536556", "text": "def curstate(self) :\n\t\ttry :\n\t\t\treturn self._curstate\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "15f1dd6f664b00590f86daf534212ee1", "score": "0.55362177", "text": "def reset_click(self):\r\n\r\n self.click = False", "title": "" }, { "docid": "af0b07852a50828a7362634b9833a86c", "score": "0.5529721", "text": "def wait_until_pressed(self):\n # TODO.", "title": "" }, { "docid": "ef695ca649063a23ab0285f28e6a38fb", "score": "0.55284923", "text": "def back(self):\n\n if self.current_sub_menu == self.options_menu:\n self.current_sub_menu = self.main_pause_menu\n elif self.current_sub_menu == self.resolution_options:\n self.current_sub_menu = self.options_menu\n else:\n self.current_sub_menu = self.main_pause_menu\n\n self.selected_option = 0", "title": "" }, { "docid": "78b348bce699b939df260849b8748af0", "score": "0.55215764", "text": "def prev_overlay(self):\n self.current_overlay -= 1", "title": "" }, { "docid": "f98a294ae105b2570234320134c2aa12", "score": "0.55213153", "text": "def reset_state(self):\n self.on = []\n self.off = [c for c in self.all_combos]\n self.next_button = None\n self.flash_timer = 0\n for station in STATE.stations:\n station.buttons.set_all(on=False)\n\n sum_faders = sum(map(lambda station: station.fader_value, STATE.stations))\n self.backwards_progress = sum_faders > 51*6 # Faders cranked up - make it harder!\n self.pick_next()", "title": "" }, { "docid": "cf1f7c15b76d7f04ed6a8f8c9a5aac84", "score": "0.5516615", "text": "def on_action_previous(self, *event):\n self.starting_screen.screen_manager.switch_to_previous_screen()\n Window.release_all_keyboards()\n self.starting_screen.screen_manager.set_up_current_screen()", "title": "" }, { "docid": "a53f173778c3e4ba9d4c0ca5217f58bf", "score": "0.55152667", "text": "def game_state(self):\n return", "title": "" }, { "docid": "46c89bdf62d90cdf1673857604bbdea9", "score": "0.5514448", "text": "def resume(self, arg):\n #raise NotImplementedError(\"Should be implemented in AppState subclass\")\n self.hover_button = None\n self.next_state = None", "title": "" }, { "docid": "6fb3e869a3b7c76246e90c7c6c954100", "score": "0.5511082", "text": "def state(self):", "title": "" }, { "docid": "ca3695dac7268a44b9f74d834ec27e3f", "score": "0.55081004", "text": "def on_press(self, key):\n if key == getattr(Key, self.next_key):\n self.wait = False\n if key == getattr(Key, self.interactive_key):\n self.interactive_bash()", "title": "" }, { "docid": "f5bafb5f9af1be649b81f3766ef42b42", "score": "0.5508074", "text": "def state_INACTIVE(self,input):\n self._text=GLabel(text=\"Press 'S' to play\", font_name='RetroGame',\n x=400, y=350)\n if self.input.is_key_down('s'):\n self._state=STATE_NEWWAVE", "title": "" }, { "docid": "14727aec931bdf3d51eeabdd373d0146", "score": "0.55051637", "text": "def prev_scan(self):\n if self.scans_index > 0:\n if self.autosave:\n self.save_scan()\n self.scans_index -= 1\n self.redraw_scan()\n else:\n self.bell()", "title": "" }, { "docid": "abce839c817cc98a8f6e3448634837c0", "score": "0.5498548", "text": "def cmd_resetstate(self):\n self.calibrate_digital()\n self.reset_state()\n self.logger.debug('reset state!')", "title": "" }, { "docid": "b19869240d71add4b75306795f7493fd", "score": "0.5495207", "text": "def _advance_state(self):\n self._state = (\n self._state * self._multiplier + self._increment\n & self._state_mask\n )", "title": "" }, { "docid": "2d993aa1e604794b9a8dda40981c5e96", "score": "0.5494615", "text": "def unclickedMode(self):\n # makes the circles move again\n for circle in self.allcircles:\n circle.vel_x = 50\n\n self.pushnumber = 0", "title": "" }, { "docid": "710e166c4aed84db124d12584c5f8f4b", "score": "0.5492906", "text": "def act_move_back(self):\r\n\t\tself.instance.stop()\r\n\t\tself.state = 'idle'", "title": "" }, { "docid": "af20b585cd4703597680897568e5aa3b", "score": "0.5486601", "text": "def event_m10_27_x24():\r\n \"\"\"State 0,1: End state\"\"\"\r\n return 0", "title": "" }, { "docid": "4b888db20b3bbd532e2813bf5f8b28e2", "score": "0.5486415", "text": "def OnPseudoLeverPress(self,event):\n flag['pseudoPress'] = True", "title": "" }, { "docid": "461d1d051de11b9926e4939d738db085", "score": "0.5478854", "text": "def shortcut_reset(self, event):\n self.reset()", "title": "" }, { "docid": "bed28bc877d7ad786893cfb21022994d", "score": "0.5473056", "text": "def keyboard_event(self, key):", "title": "" } ]
c9fc8d8d5b393f521beefd7d79414612
Check if this LogicSigAccount has been delegated to another account with a signature.
[ { "docid": "a894643b41f72a73529af925ad7ad05b", "score": "0.6835205", "text": "def is_delegated(self) -> bool:\n return bool(self.lsig.sig or self.lsig.msig)", "title": "" } ]
[ { "docid": "e172a971baae36258682374bfad7d59d", "score": "0.575139", "text": "def is_delegated(self):\n return self.assignee is not None", "title": "" }, { "docid": "b3343719ba5f73dc3fc53a6dc830f486", "score": "0.5476073", "text": "def is_delegate(self, user):\n if not user.is_authenticated:\n return False\n role_as = self.get_role(user)\n if role_as and role_as.role.rank == ROLE_RANKING[PROJECT_ROLE_DELEGATE]:\n return True\n return False", "title": "" }, { "docid": "bfef8b061d455e30a91c7f831e452299", "score": "0.5425826", "text": "def __eq__(self, other):\n if not isinstance(other, AccountTransactionCounterparty):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "b9dd60ef397479c68a2744566e9d5cb1", "score": "0.54137963", "text": "def verify_transaction(self):\n if self.signature is None:\n print(\"No signature\")\n return False\n try:\n sender_key = encryption.decode_verify_key(self.sender)\n encryption.decode_verify_key(self.receiver)\n except (ValueError, TypeError) as e:\n logging.error(\"Error while verifying transaction: %s\", e)\n return False\n return encryption.verify_message(sender_key,\n bytes(repr(self), 'utf-8'),\n self.signature)", "title": "" }, { "docid": "cf5caaa0280106c8db3618bd464165a6", "score": "0.5376312", "text": "def validate_delegation_conditions(self):\n if not (self.domain and self.domain.delegated):\n return\n if self.domain.nameserver_set.filter(server=self.fqdn).exists():\n return\n else:\n # Confusing error messege?\n raise ValidationError(\n \"You can only create A records in a delegated domain that \"\n \"have an NS record pointing to them.\"\n )", "title": "" }, { "docid": "e138be37879a52d0b4a20c8b42da23ac", "score": "0.5352924", "text": "def verify_object_belongs_to_me(self, name):\n auth_name = self.get_authority(name)\n if not auth_name:\n auth_name = name \n if name == self.config.SFA_INTERFACE_HRN:\n return\n self.verify_auth_belongs_to_me(auth_name)", "title": "" }, { "docid": "f1d3afb8e5ec0939f09a902f1cce8f91", "score": "0.5346568", "text": "def is_person_delegate( personid ):\n if _get_groups_from_person_by_roletype(personid, 'Delegate'):\n return True\n return False", "title": "" }, { "docid": "41ee16da5fda8e1286aa45f4dbd5a7ca", "score": "0.5296058", "text": "def check(self, request, consumer, token, signature):\n built = self.sign(request, consumer, token)\n return built == signature", "title": "" }, { "docid": "bbd2ba4e3513560f4aabe6ba45ccfe9e", "score": "0.52905923", "text": "def is_successor_of(self, other):\n return (self.chain_serial == other.chain_serial and self.addr == other.addr and\n self.pixel_num == other.pixel_num + 1)", "title": "" }, { "docid": "3e6e77d40a8e5613bb9160ce194f3356", "score": "0.52447087", "text": "def verify(self) -> bool:\n if self.auth_addr:\n addr_to_verify = self.auth_addr\n else:\n addr_to_verify = self.transaction.sender\n\n public_key = encoding.decode_address(addr_to_verify)\n return self.lsig.verify(public_key)", "title": "" }, { "docid": "0d2798f5b610793b455b316363cdea86", "score": "0.5227702", "text": "def has_valid_signature(self):\n if not self.signature or not self.account:\n return False\n\n try:\n self.verify_signature()\n except InvalidSignature:\n return False\n\n return True", "title": "" }, { "docid": "5b4040dba0ef865d39a9cc2204f5a84f", "score": "0.51595545", "text": "def isValid(self) -> bool:\n # if the sender address is None or empty, it means\n # this is a reward transaction\n if self.fromAddress == None:\n return True\n\n if not self.signature or len(self.signature) == 0:\n raise Exception('No signature in this transaction')\n\n r, s = int(str(int(self.signature, 0))[:77]), int(str(int(self.signature, 0))[77:])\n pub_key = CurvePoint(int(str(int(self.fromAddress, 0))[:77]), int(str(int(self.fromAddress, 0))[77:]))\n return ecdsa.verify((r, s), self.calculateHash(), pub_key, curve=curve.secp256k1)", "title": "" }, { "docid": "b877a9866dac598b23211fcd5920934d", "score": "0.511347", "text": "def __eq__(self, other):\n if not isinstance(other, AccountResult):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "91ec0bd9c08ca309680ee2a3b887df16", "score": "0.5108233", "text": "def verify_signature(self) -> bool:\n if self.signature is None:\n return False\n data = self.get_id()\n return auth_validate(data, self.signature)", "title": "" }, { "docid": "90e3fe82a92a206874ac7fc9dfa6ff08", "score": "0.50780195", "text": "def _verify_signature(self, result):\n if len(result) != 1:\n return False\n\n # Check the signature is valid:\n signature = result[0]\n if (signature.summary & gpgme.SIGSUM_VALID) == 0:\n return False\n\n # Check the signature matches the right key:\n for subkey in self.key.subkeys:\n if subkey.fpr == signature.fpr:\n break\n else:\n return False\n\n return True", "title": "" }, { "docid": "35bd56aefc5aceac930ce15c8cc48227", "score": "0.5055138", "text": "def has_person_handle(self, handle):\n return handle in self.plist", "title": "" }, { "docid": "d7eebafa6152982a5f594269fc63b286", "score": "0.50404835", "text": "def _authorize_unconfirmed_identity(self, req, obj, referrers, roles):\n # Allow container sync.\n if (req.environ.get('swift_sync_key')\n and (req.environ['swift_sync_key'] ==\n req.headers.get('x-container-sync-key', None))\n and 'x-timestamp' in req.headers):\n log_msg = 'allowing proxy %s for container-sync'\n self.logger.debug(log_msg, req.remote_addr)\n return True\n\n # Check if referrer is allowed.\n if swift_acl.referrer_allowed(req.referer, referrers):\n if obj or '.rlistings' in roles:\n log_msg = 'authorizing %s via referer ACL'\n self.logger.debug(log_msg, req.referrer)\n return True\n return False", "title": "" }, { "docid": "d5681623dcb675b58f4269e9c54fb089", "score": "0.503847", "text": "def __eq__(self, other):\n if not isinstance(other, V1SSHPublicKeyAccessCredentialPropagationMethod):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "dcdccc7f8e57563370af4feac57b845b", "score": "0.50308204", "text": "def check_for_donor(self, name):\n for donor in self.return_donors():\n if name == donor.name:\n return True\n return False", "title": "" }, { "docid": "1e4a1f932c757b96223084aba045e244", "score": "0.50188786", "text": "def verify(self, work, seed, sig):\n goodsig = self.sign(work, seed)\n if goodsig != sig:\n log.debug('Signature mismatch: %s != %s', sig, goodsig)\n return False\n return True", "title": "" }, { "docid": "152ccb95ba785ba64afae0005283c5b4", "score": "0.5016531", "text": "def auth_counterparty(self, nick, btc_sig, auth_pub):\n if not btc.ecdsa_verify(self.crypto_boxes[nick][0], btc_sig, auth_pub):\n log.debug('signature didnt match pubkey and message')\n return False\n return True", "title": "" }, { "docid": "dac0985d1eceaec0c1347bf80fd259d8", "score": "0.5007785", "text": "def is_followed_by(self, other_user):\n\n found_user_list = [\n user for user in self.followers if user == other_user]\n return len(found_user_list) == 1", "title": "" }, { "docid": "6f5bc6cb81314d90e142af4a84935045", "score": "0.5003389", "text": "def verify() -> bool:\n serialized = get(AUTH_ADDRESSES)\n auth = cast(list[UInt160], deserialize(serialized))\n for addr in auth: \n if check_witness(addr):\n debug([\"Verification successful\", addr])\n return True\n\n debug([\"Verification failed\", addr])\n return False", "title": "" }, { "docid": "0efa14d0fb0a26ab45c32dc49b7585fe", "score": "0.49938422", "text": "def connect_person_account(self, connect_data):\n\n person_obj = self.get_person_by_id(connect_data[\"person\"])\n account_obj = self.get_account_by_id(int(connect_data[\"account\"]))\n temp_lista = []\n\n for holder in account_obj.holders:\n if holder[-1] == person_obj.id_:\n return False\n\n temp_lista.append(person_obj.name)\n temp_lista.append(person_obj.id_)\n account_obj.add_holder(temp_lista)\n return True", "title": "" }, { "docid": "6fe28303a9fff15fb6e0218ba6e01b11", "score": "0.49938327", "text": "def _check_foreign_user(self, dbus_name):\n uid = yield policykit1.get_uid_from_dbus_name(dbus_name)\n if self.uid != uid:\n raise errors.ForeignTransaction()", "title": "" }, { "docid": "8fbba50c25b51f4e707c58b33c9824bb", "score": "0.49723184", "text": "def __eq__(self, other):\n if not isinstance(other, CloudEndureExtendedAccountInfo):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "ad5f05b713ade8dcea0307464ef80001", "score": "0.49694544", "text": "def __eq__(self, other):\n if not isinstance(other, SubaccountAddRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "ff52fb16e18c3844675efaf9260c3ddc", "score": "0.49655095", "text": "def is_owner_or_delegate(self, user):\n if not user.is_authenticated:\n return False\n role_as = self.get_role(user)\n if role_as and role_as.role.rank in [\n ROLE_RANKING[PROJECT_ROLE_OWNER],\n ROLE_RANKING[PROJECT_ROLE_DELEGATE],\n ]:\n return True\n return False", "title": "" }, { "docid": "66b25a7d5dbe4a6701f95554bacc2dcd", "score": "0.4957335", "text": "def is_following(self, other_user):\n\n found_user_list = [\n user for user in self.following if user == other_user]\n return len(found_user_list) == 1", "title": "" }, { "docid": "06610a5f32ea527835e9db51fd112aee", "score": "0.49550247", "text": "def __eq__(self, other):\n if not isinstance(other, IntegrationAuditLog):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "c47db8b69a2a79eb35c0e24b1779e0e7", "score": "0.4950734", "text": "def __eq__(self, other):\n if not isinstance(other, V1TrialsSnapshotResponseTrial):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "46a8d9dfe0b0afbe8961b2fd67ba4ed9", "score": "0.49434552", "text": "def check_for_delegation(self):\n if not self.domain.delegated:\n return\n if not self.pk: # We don't exist yet.\n raise ValidationError(\"No objects can be created in the {0}\"\n \"domain. It is delegated.\".\n format(self.domain.name))", "title": "" }, { "docid": "2118e40da800f0866d0cd51026f6466d", "score": "0.49338436", "text": "def __eq__(self, other):\n if not isinstance(other, RequestSenderIdInputObject):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "f905ef2bd1acdc12959735e848ddfa9f", "score": "0.4931469", "text": "def exists(self, recipient, sender):\n return recipient == sender or sender.pk in self.get_friend_ids(recipient)", "title": "" }, { "docid": "9a6636fb036237c1d357497667f7bda7", "score": "0.49209204", "text": "def check_self_delegation_validity(self_delegate: Coin):\n if self_delegate.get_amount() > 10_000_000_000:\n raise Exception('Self delegation amount exceeds 10.000 DSM')", "title": "" }, { "docid": "e6184ef0b8f47b45f8f42542e556dbab", "score": "0.49183667", "text": "def __ne__(self, other):\n if not isinstance(other, ActionsAuthRecord):\n return True\n\n return self.to_dict() != other.to_dict()", "title": "" }, { "docid": "06e23121f59a676420f47f5fca25b4a1", "score": "0.49122727", "text": "def is_turn_sign(self, sign=None):\n # opponent sign\n opp_sign = self.sign_play[1-self.sign_play.index(sign)]\n return (self.get_state().count(sign) <= self.get_state().count(opp_sign)) and (not self.is_done())", "title": "" }, { "docid": "2e3e425202bafdd265bd193d4a63ca4e", "score": "0.4907237", "text": "def __eq__(self, other):\n if not isinstance(other, AccountingGeneralSetNextDocumentNumberRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "52b04457806f0f5e79815a5cd0e958f5", "score": "0.49050945", "text": "def __eq__(self, other):\n if not isinstance(other, AuthInfos):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "918b94f463927160d45ea65bc3c45f10", "score": "0.48893076", "text": "def __can_get_caller(self, credential):\n try:\n client = boto3.client('sts',\n aws_access_key_id=credential['AccessKeyId'],\n aws_secret_access_key=credential['SecretAccessKey'],\n aws_session_token=credential['SessionToken'],\n region_name='us-west-2'\n )\n client.get_caller_identity()\n\n return True\n except:\n return False", "title": "" }, { "docid": "9f5780786fab316461c3d877b158579d", "score": "0.48884442", "text": "def __eq__(self, other):\n if not isinstance(other, UpdateEvidencePaymentDisputeRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "40a42658fcbd8f0da83d491efe103688", "score": "0.48861864", "text": "def can_lock_account(self):\n return False", "title": "" }, { "docid": "059e0a2054209ce7b185029a189707b8", "score": "0.48827693", "text": "def __eq__(self, other):\n if not isinstance(other, ActionsAuthRecord):\n return False\n\n return self.to_dict() == other.to_dict()", "title": "" }, { "docid": "251d1fc3823e366ab3d573df5e86de3a", "score": "0.48819348", "text": "def verify() -> bool:\n return check_witness(OWNER)", "title": "" }, { "docid": "a50b29ae4a9cd6fcfd8ed67e0320db89", "score": "0.48749626", "text": "def IsTriggeredBy(self, eventObject):\n if not eventObject.IsKindOf(acm.FSettlement):\n return False\n settlement = eventObject\n if not settlement.Trade():\n return False\n trade = settlement.Trade()\n instrument = trade.Instrument()\n if settlement.Status() != 'Authorised':\n return False\n\n if not instrument.AdditionalInfo().Demat_Instrument():\n return False\n\n if trade.Acquirer().Name() not in ALLOWED_ACQUIRERS:\n return False\n\n if settlement.Type() == 'Premium' and trade.Acquirer().Name() in FUNDING_ACQUIRERS:\n return False\n\n if check_for_stand_alone_payment(settlement):\n LOGGER.info(\n 'MT202 doc type found for {sett_id}'.format(sett_id=settlement.Oid())\n )\n return True\n elif check_demat_payment(settlement, trade, instrument):\n LOGGER.info(\n '{sett_id} is a valid demat payment'.format(sett_id=settlement.Oid())\n )\n return True\n\n return False", "title": "" }, { "docid": "baaebb8ec184e6d1a12bd9baab2f56ed", "score": "0.48732144", "text": "def checkAlarmFinished(self):\n if self._alarmFinished:\n self._alarmFinished = False\n self._ignoreNextAlarm = False\n return True\n return False", "title": "" }, { "docid": "f256d76c1038de55371a1f8a8bcc1c2a", "score": "0.4869716", "text": "def is_verified(self):\r\n if self._verified is None:\r\n signature = self._data.get('Signature')\r\n if not signature:\r\n self._verified = False\r\n return self._verified\r\n\r\n # Decode the signature from base64\r\n signature = base64.b64decode(signature)\r\n\r\n # Get the message to sign\r\n sign_bytes = self._get_bytes_to_sign()\r\n if not sign_bytes:\r\n self._verified = False\r\n return self._verified\r\n\r\n if not self.certificate:\r\n self._verified = False\r\n return self._verified\r\n\r\n # Extract the public key\r\n pkey = self.certificate.get_pubkey()\r\n\r\n # Use the public key to verify the signature.\r\n pkey.verify_init()\r\n pkey.verify_update(sign_bytes)\r\n verify_result = pkey.verify_final(signature)\r\n\r\n self._verified = verify_result == 1\r\n\r\n return self._verified", "title": "" }, { "docid": "e2a91dc60d0782b6158e6dfb8bba6911", "score": "0.48524785", "text": "def __eq__(self, other):\n if not isinstance(other, TransactionDataWithIdentifier):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "1c084d0a37e38cb9663da77e6be196c0", "score": "0.48322782", "text": "def not_exist_other_account(self):\r\n result = True\r\n query = \"SELECT idMemberATE FROM MemberATE WHERE idMemberATE <> %s \" \\\r\n \"AND ( email = %s OR username = %s)\"\r\n param = [self.id_member_ate,\r\n self.email,\r\n self.username]\r\n list_accounts = self.connect.select(query, param)\r\n if list_accounts:\r\n result = False\r\n return result", "title": "" }, { "docid": "d80aff5c251647a36d320c1a14b3a4b6", "score": "0.48307335", "text": "def __eq__(self, other):\n if not isinstance(other, GetUserRsp):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "7d835aea8627ca4d9214664ed67ac06c", "score": "0.48301074", "text": "def __eq__(self, other):\n if not isinstance(other, ContactPayment):\n return False\n\n return self.to_dict() == other.to_dict()", "title": "" }, { "docid": "c1d98e831e62d9104a8378f63d6c612d", "score": "0.48295462", "text": "def verify_signature(self) -> bool:\n if not CkptMsg.verify_signature(self):\n return False\n return self.state.chosen_validator == self.signature[0]", "title": "" }, { "docid": "58e41b11104645afc06bbe678e7725a9", "score": "0.48215953", "text": "def __eq__(self, other):\n if not isinstance(other, NatGatewayInstance):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "558e42ea064aeaf65e98dadf55054295", "score": "0.48197952", "text": "def __eq__(self, other):\n if not isinstance(other, CreditGuyVaultTokenizeResponse):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "36298fa231192417fa4e773c29c872b1", "score": "0.48197293", "text": "def __eq__(self, other):\n if not isinstance(other, CreditGuyGatewayGetReferenceNumbersResponse):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "cfcce82554a7fab52990e99226ae75ef", "score": "0.48192072", "text": "def verify_signature(self) -> bool:\n if not CkptMsg.verify_signature(self):\n return False\n return self.pub_key == self.signature[0]", "title": "" }, { "docid": "f4723d115cb73e64610fe9800f5486fd", "score": "0.48163256", "text": "def __eq__(self, other):\n if not isinstance(other, FraudRegistrationCard):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "1018f35223060f39f7e004504c028854", "score": "0.48143768", "text": "def __eq__(self, other):\n if not isinstance(other, ConfirmUserBundleResponse):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "6dc56637d30bcb2d684fb67869e7a734", "score": "0.48105684", "text": "def __eq__(self, other):\n if not isinstance(other, Domicilio):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "8d69e241c80854c0a8f2e08b33b736fb", "score": "0.48100463", "text": "def __eq__(self, other):\n if not isinstance(other, AccountSettingsInMeeting):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "726f1e93aa2df675bcc6f56627775fb6", "score": "0.4808876", "text": "def _checkSignatureMatch(self, signatureName, objectName, rule):\n checker = rule['checker'][0]\n checkerType = checker['type'][0].getValue()\n if checkerType == 'fixed-signer':\n signerInfo = checker['signer'][0]\n signerType = signerInfo['type'][0].getValue()\n if signerType == 'file':\n cert = self._lookupCertificate(signerInfo['file-name'][0].getValue(), True)\n elif signerType == 'base64':\n cert = self._lookupCertificate(signerInfo['base64-string'][0].getValue(), False)\n else:\n return False\n if cert is None:\n return False\n else:\n return cert.getName().equals(signatureName)\n elif checkerType == 'hierarchical':\n # this just means the data/interest name has the signing identity as a prefix\n # that means everything before 'ksk-?' in the key name\n identityRegex = '^([^<KEY>]*)<KEY>(<>*)<ksk-.+><ID-CERT>'\n identityMatch = NdnRegexMatcher.match(identityRegex, signatureName)\n if identityMatch is not None:\n identityPrefix = Name(identityMatch.group(1)).append(Name(identityMatch.group(2)))\n return self._matchesRelation(objectName, identityPrefix, 'is-prefix-of')\n else:\n return False\n elif checkerType == 'customized':\n keyLocatorInfo = checker['key-locator'][0]\n # not checking type - only name is supported\n\n # is this a simple relation?\n try:\n relationType = keyLocatorInfo['relation'][0].getValue()\n except KeyError:\n pass\n else:\n matchName = Name(keyLocatorInfo['name'][0].getValue())\n return self._matchesRelation(signatureName, matchName, relationType)\n\n # is this a simple regex?\n try:\n keyRegex = keyLocatorInfo['regex'][0].getValue()\n except KeyError:\n pass\n else:\n return NdnRegexMatcher.match(keyRegex, signatureName) is not None\n\n # is this a hyper-relation?\n try:\n hyperRelation = keyLocatorInfo['hyper-relation'][0]\n except KeyError:\n pass\n else:\n try:\n keyRegex = hyperRelation['k-regex'][0].getValue()\n keyMatch = NdnRegexMatcher.match(keyRegex, signatureName)\n keyExpansion = hyperRelation['k-expand'][0].getValue()\n keyMatchPrefix = keyMatch.expand(keyExpansion)\n\n nameRegex = hyperRelation['p-regex'][0].getValue()\n nameMatch = NdnRegexMatcher.match(nameRegex, objectName)\n nameExpansion = hyperRelation['p-expand'][0].getValue()\n nameMatchStr = nameMatch.expand(nameExpansion)\n\n relationType = hyperRelation['h-relation'][0].getValue()\n\n return self._matchesRelation(Name(nameMatchStr), Name(keyMatchPrefix), relationType)\n except:\n pass\n\n # unknown type\n return False", "title": "" }, { "docid": "65094cf1c1344c83daeef394cac04313", "score": "0.48053887", "text": "def __eq__(self, other):\n if not isinstance(other, MerchandiseReturnReversalPostpostPayload):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "534ee0555661225f325f591c99493050", "score": "0.48009875", "text": "def __eq__(self, other):\n if not isinstance(other, GenerateRedirectJwtRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "9fa6ac3f173df3e1a6650cd546ad8590", "score": "0.4792391", "text": "def IsSignedIn(self):\n self.cd.Get(self.settings)\n if not self.cd.SwitchFrame('settings'):\n return False\n account = self.cd.FindID('sync-status-text')\n if not account:\n return False\n if 'Signed in' in account.text:\n return True\n\n return False", "title": "" }, { "docid": "e5b4d5f010052bb723b01b21ce712b28", "score": "0.4782555", "text": "def __eq__(self, o: object) -> bool:\n if isinstance(o, AuthenticationProofPurpose):\n return (\n super().__eq__(o)\n and self.challenge == o.challenge\n and self.domain == o.domain\n )\n\n return False", "title": "" }, { "docid": "6efcc5b4250f32d045c8ea1a3152feb6", "score": "0.47757608", "text": "def __ne__(self, other):\n if not isinstance(other, ContactPayment):\n return True\n\n return self.to_dict() != other.to_dict()", "title": "" }, { "docid": "00ca74458e77c8afd6db9c862195e03a", "score": "0.47741112", "text": "def __eq__(self, other):\n if not isinstance(other, SignUpSelfRegistrationParams):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "b5d54c69ed136c5c1035e0c1fd5a5018", "score": "0.47729918", "text": "def flag_other_user(self, sock):\n\t\tif self.matched_with:\n\t\t\tmatched_with_id = self.matched_with.id\n\t\t\tself.matched_with.flag_myself()\n\t\t\tself.send_both_back_into_matching()\n\t\t\tsock.sendall(json.dumps({'type': 'person_was_flagged', 'person_a': matched_with_id, 'id':'MASTER_PYTHON'}))\n\t\telse:\n\t\t\tprint 'Base client was not matched with anybody or one or the other was not connected'\n\t\treturn True", "title": "" }, { "docid": "699a4644e1399775ab36c2c9c54f7891", "score": "0.47685543", "text": "def __eq__(self, other):\n if not isinstance(other, BankResponse):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "d0f12e54a853acdc961e534b56d624d9", "score": "0.47670466", "text": "def __eq__(self, other):\n return self.base_fret == other.base_fret and self.finger_list == other.finger_list", "title": "" }, { "docid": "968184537d25fcb7b0e987498e1b1824", "score": "0.476512", "text": "def __eq__(self, other):\n if not isinstance(other, AssetParentConnectionSignatureAllOf):\n return False\n\n return self.to_dict() == other.to_dict()", "title": "" }, { "docid": "65b175a967f9803de50cea41813938e3", "score": "0.47650263", "text": "def __contains__(self, other: object) -> bool:\n if not isinstance(other, TemporaryFigureMention):\n return NotImplemented\n return self.__eq__(other)", "title": "" }, { "docid": "2580aa2699b949796dc22e5641d9bfe1", "score": "0.47574714", "text": "def __eq__(self, other):\n if not isinstance(other, PayWithAmazonEvent):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "00598d341ec249fdb490a3a4fd22ac2e", "score": "0.475745", "text": "def __eq__(self, other):\n if not isinstance(other, PickupMigrosCallbackResultResponse):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "330febbfb0e4f7ebc95308f6a6bbc70b", "score": "0.4757028", "text": "def is_reply(self):\n # Each individual message in an album all reply to the same message\n return self.messages[0].is_reply", "title": "" }, { "docid": "a8b3271cd0a65b09ca15bc7061cdcfca", "score": "0.47568378", "text": "def is_received(self, user):\n return getattr(user, 'pk', user) in self.get_received_ids()", "title": "" }, { "docid": "b09f8e7ed6c4200e8870b0e4bff780f8", "score": "0.47552663", "text": "def __eq__(self, other):\n if not isinstance(other, GetCorporationsCorporationIdMembertracking200Ok):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "8bc3b08603302a7203f86e760dd7bf0f", "score": "0.47533932", "text": "def isSignatureValid(expected, received):\n if expected:\n if not received or expected != received:\n return False\n else:\n if received:\n return False\n return True", "title": "" }, { "docid": "3ec0d1fe02a11fa6d6611c539a0eefc2", "score": "0.47515434", "text": "def __eq__(self, other):\n if not isinstance(other, CreateBankIDMobileRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "be791b2fc1fb8145b7d01e2104595836", "score": "0.47493878", "text": "def __eq__(self, other):\n if not isinstance(other, FeedIndicatorsRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "853542564b1c5952cd8f65c5e7868b9a", "score": "0.4749154", "text": "def hasAccount(author):\n return (str(author) in world.info[\"accounts\"].keys())", "title": "" }, { "docid": "53681a521473aabb2635f37553fd9b09", "score": "0.4740882", "text": "def __eq__(self, other):\n if not isinstance(other, PaymentMethod):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "8b3afea6be8d0b90546386677a0fc27c", "score": "0.47406435", "text": "def account_exists(self):\r\n result = False\r\n query = \"SELECT idMemberATE FROM MemberATE WHERE idMemberATE = %s;\"\r\n param = [self.id_member_ate]\r\n response = self.connect.select(query, param)\r\n if response:\r\n result = True\r\n return result", "title": "" }, { "docid": "aa41f38768d99bca5f4aa7f87a8865b3", "score": "0.4734737", "text": "def is_related(self, other):\n return self._head_of_family is other._head_of_family", "title": "" }, { "docid": "acb8172c91fc0a7e3f69b6f54d9b3e5d", "score": "0.47309086", "text": "def withdraw(secret: str) -> bool:\n # Checking if PERSON_A and PERSON_B transferred to this smart contract\n funded_person_a = get(FUNDED_PREFIX + PERSON_A).to_int()\n funded_person_b = get(FUNDED_PREFIX + PERSON_B).to_int()\n if verify() and not refund() and hash160(secret) == get(SECRET_HASH) and funded_person_a != 0 and funded_person_b != 0:\n put(FUNDED_PREFIX + PERSON_A, 0)\n put(FUNDED_PREFIX + PERSON_B, 0)\n put(NOT_INITIALIZED, True)\n put(START_TIME, 0)\n call_contract(UInt160(get(TOKEN_PREFIX + PERSON_B)), 'transfer',\n [executing_script_hash, get(ADDRESS_PREFIX + PERSON_A), get(AMOUNT_PREFIX + PERSON_B), None])\n call_contract(UInt160(get(TOKEN_PREFIX + PERSON_A)), 'transfer',\n [executing_script_hash, get(ADDRESS_PREFIX + PERSON_B), get(AMOUNT_PREFIX + PERSON_A), None])\n return True\n\n return False", "title": "" }, { "docid": "591a958801ba4501f14d42252fd0a4b9", "score": "0.4725835", "text": "def __eq__(self, other):\n if not isinstance(other, CompanyPaymentRequestResponse):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "1e0e2c434fd80dfc26a70915c8b6e153", "score": "0.47246924", "text": "def verify_webhook(self, request_body, x_signature):\n signature = hmac.new(\n key=self.api_secret.encode(), msg=request_body, digestmod=hashlib.sha256\n ).hexdigest()\n return signature == x_signature", "title": "" }, { "docid": "74e9b65da1648b8fa28555560506a21b", "score": "0.47156376", "text": "def should_take_action_for_person_appeared(self, person_name):\n if person_name != self.last_person_name:\n self.logger.info(\"Appeared person has changed from %s to %s\" %\\\n (person_name, self.last_person_name))\n return True\n\n if int(time.time() - self.last_person_action_took_at) >= \\\n self.SAME_PERSON_THRESHOLD:\n self.logger.info(\"Taking action for person %s, even if it's \"\n \"the same person, because enough time has ellapsed\"\n % person_name)\n return True\n else:\n self.logger.info(\"Not taking action for person %s, because it's \"\n \"the same person and too little time has ellapsed\"\n % person_name)\n return False", "title": "" }, { "docid": "37706282d31d9b9a1b9e0b54d7851fe0", "score": "0.47151223", "text": "def is_followed_by(self, dfg, activity_a, activity_b):\n for i in range(0, len(dfg)):\n if (activity_a, activity_b) == dfg[i][0]:\n return True\n\n return False", "title": "" }, { "docid": "dd3c2078d2ea4243188b32cd279a9a0e", "score": "0.4713032", "text": "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'auth' or \\\n obj2._meta.app_label == 'auth':\n return True\n return None", "title": "" }, { "docid": "b810135a3aacf16c567dd0f6a52413e6", "score": "0.47126547", "text": "def has_object_permission(self, request, view, obj):\n user = request.user\n return (user == obj or user.account_type == \"A\")", "title": "" }, { "docid": "9b095df2cb9ba2a288515cbb36daa51c", "score": "0.47125536", "text": "def __eq__(self, other):\n if not isinstance(other, WSImpactEvidence):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "63fbfdd98f812eb94463dfa44e692912", "score": "0.47123677", "text": "def verify(self, seq_id, data, signature):\n data = struct.pack(\">L\", seq_id) + data\n i, o = self.i_mac\n inner = self.hash(i + data).digest()\n outer = self.hash(o + inner).digest()\n return outer == signature", "title": "" }, { "docid": "94f8f668dcd7b4ef517d5a6829f56847", "score": "0.47109783", "text": "def verify_proof_of_work(self, previous_proof, proof):\n return hashlib.sha256(f'{previous_proof}{proof}'.encode()).hexdigest()[:4] == '0000'", "title": "" }, { "docid": "83a48522fbddcac8f2138d0a88cf197d", "score": "0.47101158", "text": "def __eq__(self, other):\n # type: (object) -> bool\n if not isinstance(other, SendSkillMessagingRequest):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "b775d4c7be6abbdb3ff1aba1f22a2685", "score": "0.47081134", "text": "def __eq__(self, other):\n if not isinstance(other, WithdrawalVO):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "6596a2a971f42ea1c1545e5d095b07ef", "score": "0.4704172", "text": "def is_account_owner(self) -> bool:\n return pulumi.get(self, \"is_account_owner\")", "title": "" }, { "docid": "23b8d0c4b9b0915803e003c6a72bd2f9", "score": "0.4697794", "text": "def has_person_handle(self, handle):\n raise NotImplementedError", "title": "" }, { "docid": "bf8aa0933715bd898eb70b54cb5d1866", "score": "0.46954906", "text": "def was_called(self, func) -> bool:\n return id(func) in self.called", "title": "" }, { "docid": "62f91da883032f88a26242f3852c6a47", "score": "0.46919185", "text": "def __bool__(self) -> bool:\n\t\treturn self.calcChecksum() == self.Checksum", "title": "" } ]
6f7ce6571a86f179bb4246d10c8acc4f
Returns a set of value names that pass the criterion.
[ { "docid": "e03dc704da494c380ed9af2336a1f730", "score": "0.0", "text": "def filter_value_to_type_binding(self, binding):\n named_field_list = [\n (field_value.name, field_value.field)\n for field_value in binding.value_list]\n return self._filter(named_field_list)", "title": "" } ]
[ { "docid": "0954cc11dbfe4d93f4f957957eafcd7f", "score": "0.64739496", "text": "def get_values(self, attr_name):\n ret = set(self._attr_value_cdist[attr_name].keys()\n + self._attr_value_counts[attr_name].keys()\n + self._branches.keys())\n return ret", "title": "" }, { "docid": "65c0fd3300693af980a1684abb114cbb", "score": "0.64213735", "text": "def get_event_names(self):\n sets_of_names = list(condition.get_event_names() for condition in self.get_conditions_list())\n flat_list = [item for sublist in sets_of_names for item in sublist]\n return set(flat_list)", "title": "" }, { "docid": "c8e8a197c6d72a9b435a294fd546cace", "score": "0.6308185", "text": "def all_values() -> Iterable[str]:\n return list(p.value for p in Feature)", "title": "" }, { "docid": "7261d2944d16a0742492bd007a596591", "score": "0.6257798", "text": "def get_all_names(cls, exclude_values: Iterator['CommonSide'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "title": "" }, { "docid": "217706b3a7e5c6a0b5cd6e6221d9977f", "score": "0.60785496", "text": "def get_names_from_keys(self):\n return set([\n frozenset([\n field.get_name() for field in fields\n ]) for fields in self.get_keys()\n ])", "title": "" }, { "docid": "df3015b44ce3d45099b576166755a515", "score": "0.6026387", "text": "def get_metric_names(self) -> Sequence[str]:\n\n metric_names = set()\n for slicing_metric in self.slicing_metrics:\n for output_name in slicing_metric[1]:\n for metrics in slicing_metric[1][output_name].values():\n metric_names.update(metrics)\n return list(metric_names)", "title": "" }, { "docid": "32088fefbdc10115a20fb9bebd6cc8c5", "score": "0.6006956", "text": "def names(self):\n\t\treturn [v['name'] for v in self.metrics.values()]", "title": "" }, { "docid": "f04d7bb52b98f3d7fb4daa483703806f", "score": "0.59708333", "text": "def outNames(self):\n\n return self.defNames | self.varNames", "title": "" }, { "docid": "38e6da2ced06e1c873f886f1f45396ee", "score": "0.5967721", "text": "def filter_values(self, model_admin):\n values = model_admin.model.objects.all().values_list(self.parameter_name, flat=True)\n # Unique values list (see f6 in http://www.peterbe.com/plog/uniqifiers-benchmark)\n return list(set(values))", "title": "" }, { "docid": "6b35eeae7a959b81e3b2f77e01236865", "score": "0.5924337", "text": "def values(self):\n return [self._values[key] for key in self.names]", "title": "" }, { "docid": "253546acfc23e797235b8955a69228f0", "score": "0.588697", "text": "def get_names(self):\n return [x.name for x in self.get_active_params()]", "title": "" }, { "docid": "37b8f7f503bc97c755b0afb7d5b28c5d", "score": "0.58663917", "text": "def get_keys(self):\n\treturn self.item + self.cond", "title": "" }, { "docid": "bfb40cd99b27da59ba52dae3c079248d", "score": "0.5837015", "text": "def getNames(self):\n return [v.name for v in self._parameters.values() if self.isFree(v)]", "title": "" }, { "docid": "ffce79eda0f669e3fbcf028c653a30d7", "score": "0.583453", "text": "def _get_feature_names(self):\n return self._filter_analyses(self.dataset.get_feature_names())", "title": "" }, { "docid": "b78611765ce11d11e83f369c6b69a0c0", "score": "0.57605225", "text": "def get_results (self):\n names = []\n for result in self.results:\n names.append (result.GetLabel ( ))\n return sorted (names)", "title": "" }, { "docid": "6da6819c9357082d6af8389476b39041", "score": "0.5748671", "text": "def get_keywords_by_value(value):\n return []", "title": "" }, { "docid": "8954313cec69a45d1e5f20e0deaf4a4d", "score": "0.5716351", "text": "def get_all_names(cls, exclude_values: Iterator['CommonObjectDeliveryMethod'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "title": "" }, { "docid": "511f32b3ce53354c005515df9f2ebb7d", "score": "0.57018995", "text": "def get_flagged_names():\n \n l = []\n for w in _widget_cache.values():\n if w.flagged:\n l.append(w.get_node().get_value())\n return l", "title": "" }, { "docid": "a2e3f66c6bf68a82dedd122c4df49315", "score": "0.5701293", "text": "def names(cls):\n return set(attr.name.lower() for attr in cls)", "title": "" }, { "docid": "d479822b43ddadae56d1c60e0a09e264", "score": "0.56987566", "text": "def get_probenames(self):\n proberecords = (rec for rec in self.results.group_probename.buckets)\n probenames = (self.probematch.match(proberecord.key)\n for proberecord in proberecords)\n probes = (probename.group(2).lower()\n for probename in probenames if probename)\n return set(probes)", "title": "" }, { "docid": "05f666325e13ecf205447345c7d40348", "score": "0.56969476", "text": "def get_flagged_names():\n\n l = []\n for w in _widget_cache.values():\n if w.flagged:\n l.append(w.get_node().get_value())\n return l", "title": "" }, { "docid": "5d2c5d40131588068ee71212567be930", "score": "0.56818753", "text": "def get_all_names() -> List[str]:\n names = []\n for law_branch in LawBranch:\n names.append(law_branch.value[1])\n return names", "title": "" }, { "docid": "e22f052e0a02100321d715a9c6b667ca", "score": "0.5681804", "text": "def get_names(self):\n return list(self._cached_names.keys())", "title": "" }, { "docid": "1eb5b383b038ebc77336df5a5f911e06", "score": "0.5680019", "text": "def names(self):\n self._names = []\n for key, info in self._residuals.items():\n if info[\"_func\"] != \"discretized_misfit\":\n self._names.append(key)\n else:\n n_f1_levels = np.size(info[\"field_1_percentile_edges\"]) - 1\n n_f2_levels = np.size(info[\"field_2_percentile_edges\"]) - 1\n label = info[\"name\"]\n\n for f1l in range(n_f1_levels):\n for f2l in range(n_f2_levels):\n n = label.format(field_1_level=f1l, field_2_level=f2l)\n self._names.append(n)\n\n return self._names", "title": "" }, { "docid": "fd92dd9b947232971d1f99d7a9800c02", "score": "0.5668322", "text": "def values(value):", "title": "" }, { "docid": "6778e8c88ebeaae6d76df60a1b9db3fa", "score": "0.566392", "text": "def get_gene_names(self):\n defined_names = ('id', 'x', 'y', 'z', 'Nx', 'Ny', 'Nz')\n names = set(name.split('_')[0] for name in self.column\n if not name.startswith(defined_names))\n return names", "title": "" }, { "docid": "4c22c01ca6c399ffa0ae28fe94f44aff", "score": "0.5656742", "text": "def get_values(self):\n return (self.values[vid] for vid in self._get_value_order())", "title": "" }, { "docid": "6f5db97aeafde97860d4ea4b13bee7f5", "score": "0.56560844", "text": "def get_names(self) ->List[str]:\n return [self.name]", "title": "" }, { "docid": "3183d8d075845fab17e969feb4f2b0ac", "score": "0.5642326", "text": "def getNames(self):\n return self.names", "title": "" }, { "docid": "b6356c7893397094de6fa13213b09f78", "score": "0.56045043", "text": "def names(self, mask=None):\n names = list()\n if mask is None:\n # return names in sorted order of bitnum\n bitnums = [x for x in self._bits.keys() if isinstance(x, int)]\n for bitnum in sorted(bitnums):\n names.append(self._bits[bitnum].name)\n else:\n mask = int(mask) # workaround numpy issue #2955 for uint64\n bitnum = 0\n while 2**bitnum <= mask:\n if (2**bitnum & mask):\n if bitnum in self._bits.keys():\n names.append(self._bits[bitnum].name)\n else:\n names.append('UNKNOWN' + str(bitnum))\n bitnum += 1\n\n return names", "title": "" }, { "docid": "789585ef77450a176d80a3c00197f252", "score": "0.55981046", "text": "def get_keyword_names():\n return listify_selective(g_keywords, is_name)", "title": "" }, { "docid": "24a9d729f8308eb9a6f8cccc5d154bc3", "score": "0.55845577", "text": "def obsnames(self):\r\n return self.keys()", "title": "" }, { "docid": "20b3da0dfed7db27d4a2b0f727eb0750", "score": "0.5573106", "text": "def variable_exp_names(self):\n name_list = []\n for VE in self.variable_exps:\n name_list.append(VE.name)\n return name_list", "title": "" }, { "docid": "1f85406feec9c1b7188d110645ad6dc5", "score": "0.55685294", "text": "def __match_td_vals(self):\n pairs = self.__get_pairs()\n vals = []\n for pair in pairs:\n try:\n vals.append(self.TD_VALS[pair])\n except KeyError:\n pass\n return vals", "title": "" }, { "docid": "983b6f6ffb879aa3a92e2d8bcd356ebd", "score": "0.5566027", "text": "def getnames(self):\n return [member.name for member in self]", "title": "" }, { "docid": "c22c158356f922d6eb1c31eaa3f7414e", "score": "0.5560243", "text": "def get_names(self):\n \n names = []\n for param in self._paramdict:\n names.append(self._paramdict[param].name)\n return names", "title": "" }, { "docid": "432c08e5e8607e48a63c1e3f3d993946", "score": "0.55510694", "text": "def variables(model: Model) -> AbstractSet[str]:\n assert is_model(model)\n return model.keys()", "title": "" }, { "docid": "76d5cf7acff7b98820788a2adad5baae", "score": "0.5528502", "text": "def find_vars(expression):\n return set(filterfalse(TOKENS.__contains__, expression))", "title": "" }, { "docid": "40b50315bcf2880a055bfa77d0c318b1", "score": "0.5526196", "text": "def get_chr_names (self):\n l = set(self.peaks.keys())\n return l", "title": "" }, { "docid": "996508f369bc27399cd6c3bb3460cf45", "score": "0.5505747", "text": "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "title": "" }, { "docid": "996508f369bc27399cd6c3bb3460cf45", "score": "0.5505747", "text": "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "title": "" }, { "docid": "996508f369bc27399cd6c3bb3460cf45", "score": "0.5505747", "text": "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "title": "" }, { "docid": "f241340c3da99e7f7fe16c48935bc6a5", "score": "0.54972494", "text": "def constants(self) -> Set[str]:\r\n # Task 7.5.1\r\n lst_of_constant = list()\r\n if is_constant(self.root):\r\n lst_of_constant.append(self.root)\r\n try:\r\n for _term in self.arguments:\r\n lst_of_constant += _term.constants()\r\n except AttributeError:\r\n pass\r\n return set(lst_of_constant)", "title": "" }, { "docid": "e407b7ba351e8b25e0bd37be2b651ce1", "score": "0.5479916", "text": "def get_names(self):\n return self.dtype.names", "title": "" }, { "docid": "f4e789b05f1777fcafd647935074a026", "score": "0.54710895", "text": "def get_tag_names(self):\n tag_names = set([])\n for element in self.get_element():\n if len(element) > 0:\n for child in element:\n tag_names.add((child.get('k'), child.get('v')))\n return tag_names", "title": "" }, { "docid": "72a6fbab24b7172766423478b29438e6", "score": "0.5470264", "text": "def names(self):\n return self._names", "title": "" }, { "docid": "72a6fbab24b7172766423478b29438e6", "score": "0.5470264", "text": "def names(self):\n return self._names", "title": "" }, { "docid": "fd1c81167442e109609f6f71413de99d", "score": "0.5470158", "text": "def get_desired_reports_names(self) -> Set[str]:\n return self._desired_detector_names.copy()", "title": "" }, { "docid": "4a7359b8ecbc2a893d5d367ef25f17ff", "score": "0.54509515", "text": "def name(self) -> List[NameAndValue]:\n return self._name", "title": "" }, { "docid": "4a7359b8ecbc2a893d5d367ef25f17ff", "score": "0.54509515", "text": "def name(self) -> List[NameAndValue]:\n return self._name", "title": "" }, { "docid": "59b75bc729d7b37b6016fca5b6368327", "score": "0.5448941", "text": "def enum_vals(enum: Enum) -> List[str]:\n return [v.value for v in enum]", "title": "" }, { "docid": "d9679416c42b1deccee506b21c05aacf", "score": "0.5435517", "text": "def values(self):\r\n return self.cases", "title": "" }, { "docid": "9f4e8562303ca455dc0b2f75567035bf", "score": "0.5430424", "text": "def extract_stat_names(dict_of_stats):\n stat_names = []\n for key, val in dict_of_stats.items():\n stat_names += [key]\n return stat_names", "title": "" }, { "docid": "ed0f8ed90c259e8a96fe394c777360fe", "score": "0.54270583", "text": "def get_values():\n return []", "title": "" }, { "docid": "05ab8fe662efbf5e88505469ade4be55", "score": "0.5424947", "text": "def retrieveValues( self, valueSet ):\n\t\tif self.format in ('qmark','numeric','format'):\n\t\t\treturn [ valueSet[key] for key in self.sequentialNames ]\n\t\treturn valueSet", "title": "" }, { "docid": "d2c2fb0e6d815850e5fc942bc1d5974a", "score": "0.54159963", "text": "def node_variable_names(self):\r\n return frozenset(nv.name for nv in self._nodes)", "title": "" }, { "docid": "ce115ad04c309e5cc04d85eb3e9a6e63", "score": "0.5413887", "text": "def var_names_int(self):\n if self._vars is None:\n raise ValueError(\n f\"Problem '{self.name}': No variables added for optimization.\"\n )\n\n grps = self._vars.groupby(\"type\")\n if \"int\" not in grps.groups.keys():\n return []\n else:\n return grps.get_group(\"int\")[\"name\"].tolist()", "title": "" }, { "docid": "d7ceb976509428078dbeae13c0b2f60f", "score": "0.5409196", "text": "def get_applicable_values(self):\n return [v for v in self._values if v.is_active and not v.is_all_results]", "title": "" }, { "docid": "a790864e738a29fe35bf5e9ef6d2f493", "score": "0.54064035", "text": "def keys(self):\n return [var for var in self.dataset.variables]", "title": "" }, { "docid": "a790864e738a29fe35bf5e9ef6d2f493", "score": "0.54064035", "text": "def keys(self):\n return [var for var in self.dataset.variables]", "title": "" }, { "docid": "ac7bb294374cdd10a7ed090685d32d0d", "score": "0.540624", "text": "def getVarsNames( _vars, symboltable ) :\n return [ getVarName( var, symboltable ) for var in _vars ]", "title": "" }, { "docid": "0f6eae2dd96ac4de94c00cc6480a95a8", "score": "0.54036397", "text": "def variables(self) -> Set[str]:\r\n # Task 7.5.2\r\n lst_of_variable = list()\r\n if is_variable(self.root):\r\n lst_of_variable.append(self.root)\r\n try:\r\n for _term in self.arguments:\r\n lst_of_variable += _term.variables()\r\n except AttributeError:\r\n pass\r\n return set(lst_of_variable)", "title": "" }, { "docid": "76b7228fff39dc2220ded652099842ef", "score": "0.5401537", "text": "def get_cases(self):\n return [\n case for case in self.__dict__.keys() if case in self.__case_list.keys()\n ]", "title": "" }, { "docid": "2a052f325125d02bd02a06b1fef2120d", "score": "0.53974354", "text": "def get_an_values(self):\n return list(self.unique_vals[self.anomalous_indices])", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.5390422", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "406ec9b709a1f7e05ac4f2570fe3e3a7", "score": "0.53789395", "text": "def values(self):\n return self.valueList", "title": "" }, { "docid": "0f1e8d788e89e0b97f97a92b44905bea", "score": "0.53742415", "text": "def get_sets(self):\r\n return [i[1] for i in self.group.items()]", "title": "" }, { "docid": "753aa01d96342d3df6684e216c9ea777", "score": "0.53705996", "text": "def getKeys(self):\r\n keys = []\r\n n = 0\r\n while n < self.size:\r\n if self.value_array[n] is not None:\r\n keys.append(self.value_array[n][0])\r\n n = n + 1\r\n return keys", "title": "" }, { "docid": "41c1a8efea1e5b8a94f626fa97e43153", "score": "0.5367779", "text": "def getVariablesAsSet(cnfFormula):\n variables = set()\n for clause in cnfFormula:\n variables |= set(clause.keys())\n return variables", "title": "" }, { "docid": "75d65eedee1d4e9e8c29617f1470a484", "score": "0.5362503", "text": "def names(self):\n return self.tests.names()", "title": "" }, { "docid": "b954df4e30660d6d052ac9e5d37e302e", "score": "0.53599346", "text": "def extract_unique_external_names_from_assign_value(ast_node: ast.AST) -> List[str]:\n ne = NameExtractor()\n ne.visit(ast_node)\n\n return list(set(ne.names))", "title": "" }, { "docid": "91691fdad82a6c67c02e71ae3d6a3932", "score": "0.5358442", "text": "def get_value_name_feature(self, key):\n ...", "title": "" }, { "docid": "3dd88cc4d6d88bc582e83d9fbd330c0c", "score": "0.5355226", "text": "def filter(self, names=None, ctx=None, txn=None):\n if names is not None:\n if ctx.checkreadadmin():\n return names\n items = self.gets(names, ctx=ctx, txn=txn)\n return set([i.name for i in items])\n return set(self.keys(txn=txn))", "title": "" }, { "docid": "761af31ba51fb51aa2db179bd7edc950", "score": "0.53501296", "text": "def values():", "title": "" }, { "docid": "761af31ba51fb51aa2db179bd7edc950", "score": "0.53501296", "text": "def values():", "title": "" }, { "docid": "f845affce6e62dd60b2bdfeb9970f078", "score": "0.5350111", "text": "def values(self):\n return [kvp.value for kvp in self.keyvaluepair_set.all()]", "title": "" }, { "docid": "30cc2ceeb739e1f575a9590330994584", "score": "0.53492194", "text": "def names(enumclass):\n return frozenset(enumclass._names)", "title": "" }, { "docid": "437e51dde4cba0ae41f47e7461d75a27", "score": "0.53365386", "text": "def get_field_names(self):\n return set(self.fields.keys())", "title": "" }, { "docid": "ff241aa2409718a5f97a560991dfa6de", "score": "0.53362274", "text": "def value_set(d):\n return set(d.itervalues())", "title": "" }, { "docid": "2013b1bf728e71779be2d292f4c48fc1", "score": "0.53278816", "text": "def ordered_values(self) -> List[str]:\n return list(self.value_iterator())", "title": "" }, { "docid": "f06440bbe4470c100b98a3fcab18f4b7", "score": "0.5316824", "text": "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "title": "" }, { "docid": "862c069140e0a326ad451be4b0bbba68", "score": "0.53115755", "text": "def get_values(self) -> Set[str]:\r\n return {self.dr.fix_url(x.get_attribute('value'))\r\n for x in self.dr.quietly_find_elements('option:not([value=\"#\"])', self.element)}", "title": "" }, { "docid": "2c422f1a479d7948febf0f8603b84cd1", "score": "0.53110266", "text": "def get_selected_names(self) -> list:\n rows = self.get_selected_rows()\n names = []\n for r in rows:\n names.append(self.item(r, 1).text())\n return names", "title": "" }, { "docid": "48ff540dbbad2ab541ef63e6f80369c8", "score": "0.53099287", "text": "def _getAttributeNames(self):\n return sorted(self._i._field_map.keys())", "title": "" }, { "docid": "58c0730dbfdcf311f83cf17eab5f3e25", "score": "0.5306564", "text": "def values(self):\n return list(self.itervalues())", "title": "" }, { "docid": "87ea1f96354feb41071c95702f6874f6", "score": "0.53063214", "text": "def collection_variables():\n variable_names = flatten(collection_variable_order())\n ##variable_names = [name for name in variable_names if variable_nchoices(name) > 1]\n return variable_names", "title": "" }, { "docid": "df2adb8fb63b9c3e185648e1c3187d28", "score": "0.52997583", "text": "def attribute_names(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('attribute_names')", "title": "" }, { "docid": "ac89a355f8ed46c590a158d3938d6fba", "score": "0.5292471", "text": "def values(self):\r\n return [getattr(self, field) for field in self.keys()]", "title": "" }, { "docid": "05504a3f06e86386a783317bdfc23501", "score": "0.52908814", "text": "def _dataprops_names_get(self, filter=None):\n return self._filter(filter=filter, llist=self._model.schema.propertynames)", "title": "" }, { "docid": "11aaeb7b9688e13cd55c8aa1eeb785ba", "score": "0.52834636", "text": "def get_feature_values(self, feature):\n return set(self.get_feature_value_counts(feature).index)", "title": "" }, { "docid": "a36260248a37fe126ee916df16d63449", "score": "0.5266791", "text": "def names(self) -> List[str]:\n return self._get(\"names\")", "title": "" }, { "docid": "b190c9a5da07a917b4f04852f9793dda", "score": "0.5264182", "text": "def values(self):\r\n return [x[1] for x in self.items()]", "title": "" } ]
9ad8828fd1d1d0cab49175a179fc50d1
Return a list of port names.
[ { "docid": "ab5f584700a931b565950f95db72a7b6", "score": "0.7265796", "text": "def keys(self):\n return [port.name for port in self]", "title": "" } ]
[ { "docid": "4477977bfbec33537b48c10c569677ff", "score": "0.83994097", "text": "def get_ports(self):\r\n return self.namelist", "title": "" }, { "docid": "b4ccc8436a78d6a7f02a88b434b05fda", "score": "0.80273235", "text": "def ports(self) -> Sequence[str]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "6c3ad3bf86918157f463bcbcd327a8ff", "score": "0.7978655", "text": "def list_ports(self):\n return self.ironic_client.port.list()", "title": "" }, { "docid": "d02a6ad07122e62d11aa71216c670e9b", "score": "0.78895766", "text": "def listPorts(self):\n ports = glob.glob('/dev/tty[A-Za-z]*')\n print(ports)", "title": "" }, { "docid": "d86e46579f191c084bcb0da2493d7954", "score": "0.74389154", "text": "def ports(self):\n return self.__ports[:]", "title": "" }, { "docid": "988ac06642d85eac44ecef4bea044bbf", "score": "0.74244756", "text": "def get_ports(self):\r\n if self._ports:\r\n return self._ports\r\n return ''", "title": "" }, { "docid": "0c62e03d6bb2f3aa30e69f95bf141c45", "score": "0.7406416", "text": "def serial_port_names(self):\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n return ports", "title": "" }, { "docid": "5ef126469d9602fc4fb9fec6699304de", "score": "0.7341906", "text": "def ports(self) -> pulumi.Output[Sequence['outputs.HostPortGroupPort']]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "31f1e15206eb6de32f4f38d7cbbe6d08", "score": "0.7210494", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "31f1e15206eb6de32f4f38d7cbbe6d08", "score": "0.7210494", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "8130913976a2e1bafa00cafe3540d641", "score": "0.7191391", "text": "def ports(self) -> Optional[Sequence[int]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "8130913976a2e1bafa00cafe3540d641", "score": "0.7191391", "text": "def ports(self) -> Optional[Sequence[int]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "8130913976a2e1bafa00cafe3540d641", "score": "0.7191391", "text": "def ports(self) -> Optional[Sequence[int]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "d9c436b1508d0b0423a743a8bbd2aa3b", "score": "0.7087674", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty.usbmodem*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.usbmodem*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n\n # extract the port number \n for port in ports:\n port_no = re.findall(r'[0-9]+', port)[0]\n result.append(port_no)\n return result", "title": "" }, { "docid": "053fb6c463c53fdca27d95dd089ce076", "score": "0.70686424", "text": "def GetPorts(self):\n return _snap.TIntStrH_GetPorts(self)", "title": "" }, { "docid": "3346e5e1011613a509e1e5fc34f3bc5b", "score": "0.7026023", "text": "def list_serial_ports():\n # Windows\n if os.name == 'nt':\n # Scan for available ports.\n available = []\n for i in range(256):\n try:\n s = serial.Serial(i)\n available.append('COM'+str(i + 1))\n s.close()\n except serial.SerialException:\n pass\n return available\n else:\n # Mac / Linux\n return [port[0] for port in list_ports.comports()]", "title": "" }, { "docid": "235483b65cadcf098ead1430c9409b98", "score": "0.7009902", "text": "def get_SerialPortsList():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "e75489b07029f49d02241e9816e92e14", "score": "0.70092434", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HostPortGroupPortArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "708a714ee5a54f76f66e5bb6cb75aa13", "score": "0.7000759", "text": "def get_ports_list(self, port_type=\"optical\", prefix=None) -> List[Port]:\n return list(\n select_ports(self.ports, port_type=port_type, prefix=prefix).values()\n )", "title": "" }, { "docid": "59637fcf54f2cb61c323062a71f65485", "score": "0.69916975", "text": "def serial_port_list():\n ports = list(list_ports.comports())\n dev_names = ['ttyACM', 'ttyUSB', 'tty.', 'cu.']\n serial_ports = []\n for port_no, description, address in ports:\n for dev_name in dev_names:\n if(address != 'n/a' and dev_name in port_no\n or platform() == 'windows'):\n serial_ports.append([description, address, port_no])\n break\n\n return serial_ports", "title": "" }, { "docid": "a45a313afec0014e0c106d995d69106a", "score": "0.69638884", "text": "def port_path(self):\n return [self._device.getBusNumber()] + self._device.getPortNumberList()", "title": "" }, { "docid": "6660d8999189f5fed2b661d22ec141dc", "score": "0.6963725", "text": "def tagged_ports_list(self):\n if self.tagged_ports:\n return [x.strip() for x in self.tagged_ports.split(\",\")]\n else:\n return []", "title": "" }, { "docid": "273b00f776e8750841d8b8a779ea24c1", "score": "0.69538355", "text": "def get_available_ports():\n if system() == \"Darwin\":\n port_address = '/dev/tty.*'\n else:\n port_address = '/dev/tty[A-Za-z]*'\n return glob(port_address)", "title": "" }, { "docid": "23a9f459c7b2b8f7927a12054591796b", "score": "0.69387496", "text": "def ports(self) -> pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessPortRuleArgs']]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "f79a1df804cbaf103eb0a15cf763ae81", "score": "0.6905247", "text": "def ports(self) -> pulumi.Input[Sequence[pulumi.Input['JitNetworkAccessRequestPortArgs']]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "5e210fba2c6f9822ad5bbbd9a795e9de", "score": "0.6901135", "text": "def __serial_ports__(self):\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "a72e4b0e2032fa7b9a2a36a47825730f", "score": "0.6870884", "text": "def serial_ports(self):\n return serial.tools.list_ports.comports()", "title": "" }, { "docid": "740a86f36c1de81a9b261251166a51a0", "score": "0.6863827", "text": "def ports(self, **query):\n return self._list(_port.Port, **query)", "title": "" }, { "docid": "1226a1040d43b0ce2407ee9bc5d00fff", "score": "0.68628037", "text": "def get_port_list(self, filters=None):\n response = self.get_resource(\n self.array_id, SLOPROVISIONING, 'port', params=filters)\n port_key_list = response.get(\n 'symmetrixPortKey', []) if response else []\n return port_key_list", "title": "" }, { "docid": "1c1542e2e5478dcd7ae35805a1e911c7", "score": "0.68592894", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerPortArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "419eeb29fcd7c33b4e8504970cfaffdc", "score": "0.68580234", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/rfcomm*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/rfcomm*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "74aa106f34dd7b52fcaf841851be2092", "score": "0.68501854", "text": "def _make_portlist(self, ports, sep=','):\n\n if self.target['ports']:\n self.ports = sep.join([p[0] for p in self.target['ports']])\n else:\n newports = sep.join([str(p) for p in ports])\n\n return newports", "title": "" }, { "docid": "2b8de078c45e2d842e65087a74684a2b", "score": "0.6833581", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n ports += [\"01A377A5\"]\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "7425449454c809c868059a060f899391", "score": "0.68259346", "text": "def ports(self) -> typing.List[\"EndpointPort\"]:\n return typing.cast(\n typing.List[\"EndpointPort\"],\n self._properties.get(\"ports\"),\n )", "title": "" }, { "docid": "025c36591489771236ca6285123d5fc2", "score": "0.6816269", "text": "def ports_lookup(self):\n\n roomba_ports = [\n p.device\n for p in list_ports.comports()\n ]\n\n if len(roomba_ports) == 0:\n raise\n\n return roomba_ports", "title": "" }, { "docid": "51ad8a29000867e1c6d639a7ec640990", "score": "0.68157876", "text": "def listSerialPorts():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "c2dbacd23ff910a39129a2389003c44f", "score": "0.6809311", "text": "def get_port_by_name(self, port_name=None):\n ports = []\n try:\n ports = self._call(\n self.context,\n self._make_msg('get_port_by_name',\n port_name=port_name),\n topic=self.topic\n )\n except messaging.MessageDeliveryFailure:\n LOG.error(\"agent->plugin RPC exception caught: \",\n \"get_port_by_name\")\n\n return ports", "title": "" }, { "docid": "9ad5d5647c830d01b49d14d9e995b075", "score": "0.68023443", "text": "def outputPorts(self):\n return [port for port in self.ports if port.isOutput()]", "title": "" }, { "docid": "100ced3c40e41238e503be15c2e840e4", "score": "0.67855394", "text": "def serial_ports(self):\r\n if os.name == 'nt':\r\n # windows\r\n for i in range(256):\r\n try:\r\n s = serial.Serial(i)\r\n s.close()\r\n yield 'COM' + str(i + 1)\r\n except serial.SerialException:\r\n pass\r\n else:\r\n # unix\r\n for port in list_ports.comports():\r\n yield port[0]", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "436a59bb8f3f1b14425ccc9fcaf638ac", "score": "0.6779296", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "040cd02ab8e3b89845b24aead557cd58", "score": "0.67772615", "text": "def get_com_ports():\n ports = list(list_ports.comports())\n ports = [str(i) for i in ports]\n ports = [tuple(i.split(\" - \"))[0] for i in ports]\n return tuple(ports)", "title": "" }, { "docid": "b8bb4feb76556a52999a8b5d45ca8ac2", "score": "0.6754367", "text": "def GetPorts(self):\n return _snap.TIntH_GetPorts(self)", "title": "" }, { "docid": "f39644ac47d7c2623f52e8665d08d992", "score": "0.673655", "text": "def get_ports(self):\n from hpswitch.port import Port\n base_ports = self.snmp_get_subtree((\"dot1dBasePort\",))\n return [Port(self, base_port=int(p[1])) for p in base_ports]", "title": "" }, { "docid": "928bbcc148396d851ad2e3859affb916", "score": "0.66931367", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/ttyUSB*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.usbserial*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n\n return result", "title": "" }, { "docid": "7b0f6fa6b7b4a9485d26cb72db32ae15", "score": "0.66920495", "text": "def get_available_ports(self):\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n else:\n raise EnvironmentError('Unsupported platform')\n\n available_ports = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n available_ports.append(port)\n except (OSError, serial.SerialException):\n pass\n\n print('Available ports:')\n for pindex, p in enumerate(available_ports):\n print(' [{}] - {}'.format(pindex, p))\n sel = int(input('Please select port number'))\n self.port_name = available_ports[sel]", "title": "" }, { "docid": "0f6b370b539f0d82a131393249a080ba", "score": "0.66903204", "text": "def serial_ports():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.usb*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n return ports\n\n # Checking the ports already sends a reset signal.\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "8e2a9108d9c92ce326ab9b8c7c1b053e", "score": "0.6686776", "text": "def addresses(self):\n ports = self.list_ports()\n return [port.address for port in ports]", "title": "" }, { "docid": "365a0150a75108ed213002e24ef672ee", "score": "0.66332823", "text": "def serial_ports(self):\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(20)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n \n result = []\n for port in ports:\n try:\n s = Serial(port,timeout=2,write_timeout=2)\n print(port,end=\" \")\n s.close()\n print(\"close\")\n result.append(port)\n except (OSError, SerialException):\n # traceback.print_exc()\n print(\"Except serial_ports scan\")\n print(\"--\")\n return result", "title": "" }, { "docid": "02f7243ae2df337cbda6d773f7e36fb3", "score": "0.6626135", "text": "def scan(self):\n\t\tports = []\n\n\t\tfor i, desc, hwid in comports():\n\t\t\ttry:\n\t\t\t\ts = serial.Serial(i)\n\t\t\t\tports.append(s.portstr)\n\t\t\t\ts.close()\n\t\t\texcept serial.SerialException:\n\t\t\t\tpass\n\t\treturn ports", "title": "" }, { "docid": "403f108420b8fc883e483e37fb76e676", "score": "0.66130394", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertmanagerSpecContainersPortsArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "3f376c454c429636269ca3824b80f773", "score": "0.66066504", "text": "def get_digital_input_ports(self, buf_size=None):\n if buf_size is None:\n buf_size = default_buf_size\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n CALL ('GetDevDIPorts', self, ctypes.byref (buf), buf_size)\n names = [n.strip() for n in buf.value.split(',') if n.strip()]\n return names", "title": "" }, { "docid": "f5ef894deda87b538b4d4e343ba69cab", "score": "0.6585692", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThanosRulerSpecContainersPortsArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "26fba33f3355d2eca46d45e9d0529df2", "score": "0.65784967", "text": "def GetPorts(self):\n return _snap.TIntFltH_GetPorts(self)", "title": "" }, { "docid": "cc485ebcfcf50157a9c3ea857a1b3e75", "score": "0.6565763", "text": "def ports(self) -> dict[str, int]:\n return self._data[ATTR_PORTS]", "title": "" }, { "docid": "33bd35f7b004a3f1a08d78758f5c751c", "score": "0.6546421", "text": "def inputPorts(self):\n return [port for port in self.ports if port.isInput()]", "title": "" }, { "docid": "71f6b27b7069d5f86f9f5ef3163f6931", "score": "0.6539837", "text": "def named_ports(self) -> Optional[Sequence['outputs.ElastigroupBackendServiceNamedPort']]:\n return pulumi.get(self, \"named_ports\")", "title": "" }, { "docid": "e13d86e6e5c09d9e04a6871a818218c3", "score": "0.65388584", "text": "def parameter_ports(self):\n return self._parameter_ports", "title": "" }, { "docid": "f6a4973bf81af8ac10965abcd5f9951e", "score": "0.653214", "text": "def used_ports() -> Set[int]:\n return {connection.laddr.port for connection in psutil.net_connections()}", "title": "" }, { "docid": "fdd0ab9c91b1824d654140e0e0ce2576", "score": "0.6527808", "text": "def get_ports(entity):\n port_set = set()\n if not hasattr(entity, 'processes'):\n return port_set\n for process in entity.processes:\n to_parse = process.properties.get(\"ListeningPorts\", None)\n if to_parse is None:\n continue\n for entry in to_parse.split(' '):\n port_set.add(to_parse)\n return port_set", "title": "" }, { "docid": "9a83efd2620e4b5ce82a9466c3c883d1", "score": "0.65250707", "text": "def _getNodePortList(ctx):\n return CmdShell().run(\n f'oc get service soos-{ctx.cf.refsys.nws4.sidL}-np'\n + ' -o template --template \"{{range .spec.ports}}{{.name}}:{{.nodePort}},{{end}}\"'\n ).out", "title": "" }, { "docid": "38e1c2baebaa132fedce16b5f57aac16", "score": "0.6505147", "text": "def plist():\n list_serial_ports()", "title": "" }, { "docid": "aa95b7dc027e4578e61165fc04937c85", "score": "0.650446", "text": "def list_com_ports():\n path = 'HARDWARE\\\\DEVICEMAP\\\\SERIALCOMM'\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)\n except WindowsError:\n raise IterationError\n\n for i in itertools.count():\n try:\n val = winreg.EnumValue(key, i)\n yield str(val[1])\n except EnvironmentError:\n break", "title": "" }, { "docid": "7157998d3054af1dc1ee091ae68fd63b", "score": "0.6499259", "text": "def port_group_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"port_group_names\")", "title": "" }, { "docid": "cb609aa0e6eeefaaa07123ba49b0e065", "score": "0.6497777", "text": "def available_serial_ports():\r\n ports = []\r\n if os.name == 'nt': # Windows\r\n for port_number in range(256):\r\n # Try connecting to each port and return array of those which were successful\r\n try:\r\n s = serial.Serial('COM%i' % port_number)\r\n ports.append('COM%i' % (port_number))\r\n s.close()\r\n except serial.SerialException:\r\n pass\r\n else: # Unix (CHECK: this code needs testing)\r\n ports = sorted(glob('dev/ttyUSB*'))\r\n ports.reverse() # Reverse list so the most recently attached USB serial port is first in line (i.e. use [0] index to reference it)\r\n\r\n if len(ports) == 0:\r\n ports.append('No COM port')\r\n return ports", "title": "" }, { "docid": "0afbcd7e5d1fe05434f79a71ab17f624", "score": "0.64895463", "text": "def device_ports(self):\n return self.broker.device_ports(**{\"DeviceID\": self.DeviceID})", "title": "" }, { "docid": "87de6ea65b1e57c44dac7a89bc1e8aed", "score": "0.64885354", "text": "def GetPorts(self):\n return _snap.TIntPrFltH_GetPorts(self)", "title": "" }, { "docid": "7ccfde06d5ac7bbbe683073ccf72cfa2", "score": "0.6488134", "text": "def ports(self, user):\n try:\n self.c.execute(sql['ports'], (user,))\n ports = self.c.fetchall()\n return [dict(zip(cols['port'], port)) for port in ports]\n except sqlite3.Error as e:\n flash(\"Can't get ports becuase \"+str(e))", "title": "" }, { "docid": "d8a3760960c8b0ec6053c25b2409ee8c", "score": "0.6481474", "text": "def incoming_connections_ports(self) -> Sequence[str]:\n return pulumi.get(self, \"incoming_connections_ports\")", "title": "" }, { "docid": "9fce7631032c15924e38348c68f5c504", "score": "0.6471621", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrometheusSpecContainersPortsArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "471c21c04b0e6ef51578be3e4e522c03", "score": "0.64675707", "text": "def GetPorts(self):\n return _snap.TIntIntH_GetPorts(self)", "title": "" }, { "docid": "09104cf61340a2b38b21e70fbfef438c", "score": "0.64511716", "text": "def list_ports(retrieve_all=True, **_params):\n return IMPL.list_ports(retrieve_all, **_params)", "title": "" }, { "docid": "5fe54c3ff4c94a7ef7e62afee6939764", "score": "0.64445174", "text": "def port_list(ctx, filter, **ids):\n id_type, id = check_id(**ids)\n request = \"%ss/%s/ports\" %(id_type, id)\n if filter == None :\n result = ctx.obj['nc'].get(request)\n else :\n result = ctx.obj['nc'].get(request, filter=filter)\n table=PrettyTable([\"ID\", \"name\", \"physicalName\"])\n for line in result:\n table.add_row([line['ID'],\n line['name'],\n line['physicalName'] ])\n print table", "title": "" }, { "docid": "19bf6bc7fab9b12afef643c51826edc3", "score": "0.64293027", "text": "def GetPorts(self):\n return _snap.TIntSet_GetPorts(self)", "title": "" }, { "docid": "4336ffc68f1e2707fcfafdc687bde4e3", "score": "0.64258575", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ThanosRulerSpecInitContainersPortsArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "36752afe778315248626d364f71c47f2", "score": "0.6418597", "text": "def ports(self) -> typing.List[\"NetworkPolicyPort\"]:\n return typing.cast(\n typing.List[\"NetworkPolicyPort\"],\n self._properties.get(\"ports\"),\n )", "title": "" }, { "docid": "36752afe778315248626d364f71c47f2", "score": "0.6418597", "text": "def ports(self) -> typing.List[\"NetworkPolicyPort\"]:\n return typing.cast(\n typing.List[\"NetworkPolicyPort\"],\n self._properties.get(\"ports\"),\n )", "title": "" }, { "docid": "a662e516c96bbe88b44eb45bab1609ff", "score": "0.6407919", "text": "def get_digital_output_ports(self, buf_size=None):\n if buf_size is None:\n buf_size = default_buf_size\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n CALL ('GetDevDOPorts', self, ctypes.byref (buf), buf_size)\n names = [n.strip() for n in buf.value.split(',') if n.strip()]\n return names", "title": "" }, { "docid": "65ce8504229048b8ebd2cabdeb113188", "score": "0.6405314", "text": "def GetAllMappedPorts(self):\n return self._port_mappings", "title": "" }, { "docid": "befd80623e545f149ea0a5ad95f17896", "score": "0.6401911", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertmanagerSpecInitContainersPortsArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "c5f108f15d988c9b89027f6d443d9202", "score": "0.63919574", "text": "def get_ports(node, interfaces, oxp_url):\n ports = list()\n for interface in interfaces.values():\n port_no = interface[\"port_number\"]\n if port_no != 4294967294:\n ports.append(get_port(node, interface, oxp_url))\n\n return ports", "title": "" }, { "docid": "91347e8051553aaccb8257ac15be8efb", "score": "0.6377773", "text": "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrometheusSpecInitContainersPortsArgs']]]]:\n return pulumi.get(self, \"ports\")", "title": "" }, { "docid": "1120674aab0b292f404fe5d1e5aac2a1", "score": "0.6374266", "text": "def serial_port():\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result", "title": "" }, { "docid": "1f510973d0353e7090688034b47988b7", "score": "0.6343154", "text": "def get_ports(root, my_tag):\n ports = \"\"\n exist = False\n for child in root:\n if child.tag == my_tag and child.text != \"22\":\n ports += child.text + \" \"\n exist = True\n if not exist:\n return \"\"\n return ports[:-1]", "title": "" }, { "docid": "1dedf96bb4fe9fb08d8e25a2d38cb714", "score": "0.633607", "text": "def port_name(self) -> str:\n return pulumi.get(self, \"port_name\")", "title": "" }, { "docid": "b1f53a05b4214eca9965ef206ad01ec6", "score": "0.6335258", "text": "def main():\n print get_ports()", "title": "" }, { "docid": "9522a0d76b6ce554eb94893a3ff008ca", "score": "0.6334935", "text": "def get_ports(cli, n):\n used_ports = set()\n\n containers = cli.containers()\n for container in containers:\n for port in container.get('Ports', []):\n used_ports.add(port.get('PublicPort'))\n\n ports = []\n obtained = 0\n for i in range(5000, 10000):\n if i not in used_ports:\n ports.append(i)\n obtained += 1\n\n if obtained == n:\n break\n\n return ports", "title": "" }, { "docid": "d2de68ad26d52f54c803bf64a14328a7", "score": "0.6329106", "text": "def get_ports(self, context, filters=None, fields=None):\n\n return [self._make_port_dict(p, fields)\n for p in self._list_resource('port', context, filters, fields)]", "title": "" }, { "docid": "267dd4af20611c07f1f440f4919fd2e0", "score": "0.63279223", "text": "def get_host_name_list(self):\n return [host['name'] for host in self.to_dict()['host_list']]", "title": "" }, { "docid": "10cb57e3cb298d957a090b1beff803c5", "score": "0.6299968", "text": "def get_logical_ports(self):\n response = self.get(endpoint=\"/logical-ports\")\n return response.json()['results']", "title": "" }, { "docid": "742101a550100cb529b836eb35864b24", "score": "0.6298431", "text": "def get_ports(self):\n\n res = self.spp_ctl_cli.get('nfvs/%d' % self.sec_id)\n if res is not None:\n error_codes = self.spp_ctl_cli.rest_common_error_codes\n if res.status_code == 200:\n return res.json()['ports']\n elif res.status_code in error_codes:\n pass\n else:\n print('Error: unknown response.')", "title": "" }, { "docid": "c67e9ad66b838e504c174cf614d2ef1a", "score": "0.6290818", "text": "def read_port_file(ports_file):\n ports = []\n for i in open(ports_file, \"r\"):\n try:\n i = i[:i.index('#')]\n except ValueError:\n pass\n ports.extend(i.split())\n return ports", "title": "" }, { "docid": "82f5e0b0c33f7e051ba8246b5042b7bb", "score": "0.62680155", "text": "def get_fe_port_list(self):\n target_uri = \"/performance/FEPort/keys\"\n port_list = []\n dir_list = self.get_fe_director_list()\n for director in dir_list:\n port_payload = ({\n \"symmetrixId\": self.array_id,\n \"directorId\": director\n })\n port_details = {}\n port_response = self.rest_client.rest_request(\n target_uri, POST, request_object=port_payload)\n for port in port_response[0]['fePortInfo']:\n port_details[port['portId']] = director\n port_list.append(port_details)\n return port_list", "title": "" }, { "docid": "8587f7bad4549726eb3df8a4c731e003", "score": "0.6259885", "text": "def get_communications_port_id_list(self) -> list:\n if self.configuration_content['communications'][\"ports\"]:\n prt_dictionary = self.configuration_content['communications'][\"ports\"]\n # format { port_id0: {}, ... port_idn: {}}\n return list(prt_dictionary.keys())\n return None", "title": "" }, { "docid": "011b96ef911592a64e3267d98284b51b", "score": "0.6249983", "text": "def get_ports(self, filters=None):\n target_uri = \"/sloprovisioning/symmetrix/%s/port\" % self.array_id\n return self.rest_client.rest_request(target_uri, GET, params=filters)", "title": "" } ]
2e3756a1e27c85a27925bd99fe1d47a3
Initialize a new network
[ { "docid": "da32c712e706dee654950e6a6ce22ddd", "score": "0.0", "text": "def __init__(self, feature_sizes, continuous_field_size, embedding_size=4,\n hidden_dims=[128, 128], use_fm1=False, num_classes=10, dropout=[0.5, 0.5],\n use_cuda=True, verbose=False):\n super().__init__()\n self.field_size = len(feature_sizes)\n self.continuous_field_size = continuous_field_size\n self.feature_sizes = feature_sizes\n self.embedding_size = embedding_size\n self.hidden_dims = hidden_dims\n self.use_fm1 = use_fm1\n self.num_classes = num_classes\n self.dtype = torch.float\n\n \"\"\"\n check if use cuda\n \"\"\"\n if use_cuda and torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n \"\"\"\n init fm part\n \"\"\"\n\n # self.fm_first_order_embeddings = nn.ModuleList(\n # [nn.Embedding(feature_size, 1) for feature_size in self.feature_sizes])\n fm_first_order_linears = nn.ModuleList(\n [nn.Linear(feature_size, self.embedding_size) for feature_size in\n self.feature_sizes[:self.continuous_field_size]])\n fm_first_order_embeddings = nn.ModuleList(\n [nn.Embedding(feature_size, self.embedding_size) for feature_size in\n self.feature_sizes[self.continuous_field_size:]])\n self.fm_first_order_models = fm_first_order_linears.extend(\n fm_first_order_embeddings)\n\n # self.fm_second_order_embeddings = nn.ModuleList(\n # [nn.Embedding(feature_size, self.embedding_size) for feature_size in self.feature_sizes])\n fm_second_order_linears = nn.ModuleList(\n [nn.Linear(feature_size, self.embedding_size) for feature_size in\n self.feature_sizes[:self.continuous_field_size]])\n fm_second_order_embeddings = nn.ModuleList(\n [nn.Embedding(feature_size, self.embedding_size) for feature_size in\n self.feature_sizes[self.continuous_field_size:]])\n self.fm_second_order_models = fm_second_order_linears.extend(\n fm_second_order_embeddings)\n\n \"\"\"\n init deep part\n \"\"\"\n all_dims = [self.field_size * self.embedding_size] + \\\n self.hidden_dims + [self.num_classes]\n for i in range(1, len(hidden_dims) + 1):\n setattr(self, 'linear_' + str(i),\n nn.Linear(all_dims[i - 1], all_dims[i]))\n # nn.init.kaiming_normal_(self.fc1.weight)\n setattr(self, 'batchNorm_' + str(i),\n nn.BatchNorm1d(all_dims[i]))\n setattr(self, 'dropout_' + str(i),\n nn.Dropout(dropout[i - 1]))", "title": "" } ]
[ { "docid": "86cca404ef0f66cb8aeddc2e1a86b18d", "score": "0.856658", "text": "def init_network(self):\n self.net = network.Network([784, 30, 10])\n return(\"made a network yo\")", "title": "" }, { "docid": "66a311e8cd13327d8498e7017d8bf118", "score": "0.8541491", "text": "def _init_network(self):\n pass", "title": "" }, { "docid": "b2e3b5ed8d2d8b188add46d949a0669a", "score": "0.8268909", "text": "def constructNetwork():", "title": "" }, { "docid": "de50b4f15fba95b7cc11280c7a18fd16", "score": "0.80178446", "text": "def __init__(self, network) -> None:\n self._network = network", "title": "" }, { "docid": "af0b477739b953e182b91084c395c9e0", "score": "0.7903537", "text": "def _init_network(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "ecc32831ba395502b1d810dd50a3afeb", "score": "0.78985554", "text": "def create_network(self):\n pass", "title": "" }, { "docid": "3013e97241413b3fd68099208669e1dd", "score": "0.7737218", "text": "def __init__(self, network):\r\n self.network = network\r\n self.nodes = {}", "title": "" }, { "docid": "e3d80cd127ccfceae7427cff9624cfa0", "score": "0.7715755", "text": "def __init__(self, config=None, io=None):\n self._network = _Cnetwork()\n\n if config:\n self.config = config\n else:\n self.config = LiteConfig()\n\n if io:\n self.network_io = io\n else:\n self.network_io = LiteNetworkIO()\n\n c_network_io = self.network_io._create_network_io()\n self._api.LITE_make_network(byref(self._network), self.config, c_network_io)", "title": "" }, { "docid": "a2c46933d0d0a9ac4ab9c6505c7739b0", "score": "0.7679225", "text": "def create_network():\n G = Network()\n return G", "title": "" }, { "docid": "25be893e5a5e43904a623fdda7176106", "score": "0.7621141", "text": "def _init_network(self, engine=None):\n return net_factory(engine)", "title": "" }, { "docid": "a362ff578da9dcf5cce379a1aa8d3cb4", "score": "0.7505893", "text": "def __init__(self):\n self.__network = Network()\n self.__membership_network = None\n self.__node_networks = []\n self.__frames = []\n self.__frames_membership = []\n self.__offsets_membership = []\n self.__offsets_node_networks = []\n self.__frames_node_networks = []\n self.__max_num_new_paths = 1\n self.__nodes_membership = []\n self.__links_membership = []\n self.__nodes_node_networks = []\n self.__links_node_networks = []\n self.__link_broken_now = None", "title": "" }, { "docid": "04479eb21cbe825a7d10628aea64b324", "score": "0.74502087", "text": "def init_state(self, network):", "title": "" }, { "docid": "f132beb954327b84101a99973cc1bff0", "score": "0.73705584", "text": "def __init__(self, network_type, prefix, args=-1):\n self.network_type = network_type\n self.prefix = prefix\n self.args = args\n self.create_net()", "title": "" }, { "docid": "1e10573b3e3c0f59e524185bff5734b6", "score": "0.7317354", "text": "def init_network(self):\n if 'network' in self.fixed_params:\n self.params['network'] = self.fixed_params['network']\n elif 'network' not in self.fixed_params:\n if self.params['network_model'] == 'erdos_renyi':\n if 'linkProbability' not in self.fixed_params: # erdos-renyi link probability\n self.params['linkProbability'] = 2 * np.log(self.params['size']) / self.params[\n 'size'] # np.random.beta(1, 1, None)*20*np.log(self.params['size'])/self.params['size']\n self.params['network'] = NX.erdos_renyi_graph(self.params['size'], self.params['linkProbability'])\n if not NX.is_connected(self.params['network']):\n self.params['network'] = NX.erdos_renyi_graph(self.params['size'], self.params['linkProbability'])\n\n elif self.params['network_model'] == 'watts_strogatz':\n if 'nearest_neighbors' not in self.fixed_params:\n self.params['nearest_neighbors'] = 3\n if 'rewiring_probability' not in self.fixed_params:\n self.params['rewiring_probability'] = 0.000000005\n self.params['network'] = NX.connected_watts_strogatz_graph(self.params['size'],\n self.params['nearest_neighbors'],\n self.params['rewiring_probability'])\n elif self.params['network_model'] == 'grid':\n if 'number_grid_rows' not in self.fixed_params:\n if 'number_grid_columns' not in self.fixed_params:\n (self.params['number_grid_columns'],self.params['number_grid_rows']) = \\\n random_factor_pair(self.params['size'])\n else:\n self.params['number_grid_rows'] = self.params['size'] // self.params['number_grid_columns']\n self.params['number_grid_columns'] = self.params['size'] // self.params['number_grid_rows']\n elif 'number_grid_columns' in self.fixed_params:\n assert self.params['number_grid_columns']*self.params['number_grid_rows'] == self.params['size'], \\\n 'incompatible size and grid dimensions'\n else:\n self.params['number_grid_columns'] = self.params['size'] // self.params['number_grid_rows']\n self.params['number_grid_rows'] = self.params['size'] // self.params['number_grid_columns']\n self.params['network'] = NX.grid_2d_graph(self.params['number_grid_rows'],\n self.params['number_grid_columns'])\n elif self.params['network_model'] == 'random_regular':\n if 'degree' not in self.fixed_params:\n self.params['degree'] = np.random.randint(1, 6)\n self.params['network'] = NX.random_regular_graph(self.params['degree'], self.params['size'], seed=None)\n elif self.params['network_model'] == 'newman_watts_fixed_number':\n if 'fixed_number_edges_added' not in self.fixed_params:\n self.params['fixed_number_edges_added'] = 2\n if 'nearest_neighbors' not in self.fixed_params:\n self.params['nearest_neighbors'] = 2\n self.params['network'] = newman_watts_add_fixed_number_graph(self.params['size'],\n self.params['nearest_neighbors'],\n self.params['fixed_number_edges_added'])\n elif self.params['network_model'] == 'cycle_union_Erdos_Renyi':\n if 'c' not in self.fixed_params:\n self.params['c'] = 2\n if 'nearest_neighbors' not in self.fixed_params:\n self.params['nearest_neighbors'] = 2\n self.params['network'] = cycle_union_Erdos_Renyi(self.params['size'], self.params['nearest_neighbors'],\n self.params['c'])\n\n elif self.params['network_model'] == 'c_1_c_2_interpolation':\n if 'c' not in self.fixed_params:\n self.params['c'] = 2\n if 'nearest_neighbors' not in self.fixed_params:\n self.params['nearest_neighbors'] = 2\n if 'add_long_ties_exp' not in self.fixed_params:\n self.params['add_long_ties_exp'] = np.random.exponential(scale=self.params['size'] ** 2,\n size=int(1.0 * self.params['size']\n * (self.params['size'] - 1)) // 2)\n\n self.params['remove_cycle_edges_exp'] = np.random.exponential(scale=2 * self.params['size'],\n size=self.params['size'])\n\n self.params['network'] = c_1_c_2_interpolation(self.params['size'],self.params['eta'],\n self.params['add_long_ties_exp'],\n self.params['remove_cycle_edges_exp'])\n else:\n assert False, 'undefined network type'\n\n # when considering real network and interventions on them we may need to record the original network.\n # This is currently only used in SimpleOnlyAlongOriginalEdges(ContagionModel)\n\n if 'original_network' in self.fixed_params:\n self.params['original_network'] = self.fixed_params['original_network']\n else:\n self.params['original_network'] = None\n\n # additional modifications / structural interventions to the network topology which include rewiring\n # and edge additions\n\n if 'rewire' not in self.fixed_params:\n self.params['rewire'] = False\n print('warning: the network will not be rewired!')\n\n if self.params['rewire']:\n\n if 'rewiring_mode' not in self.fixed_params:\n self.params['rewiring_mode'] = 'maslov_sneppen'\n print('warning: the rewiring mode is set to maslov_sneppen')\n if self.params['rewiring_mode'] == 'maslov_sneppen':\n if 'num_steps_for_maslov_sneppen_rewiring' not in self.fixed_params:\n self.params['num_steps_for_maslov_sneppen_rewiring'] = \\\n 0.1 * self.params['network'].number_of_edges() # rewire 10% of edges\n print('Warning: num_steps_for_maslov_sneppen_rewiring is set to default 10%')\n rewired_network = \\\n self.maslov_sneppen_rewiring(\n num_steps=int(np.floor(self.params['num_steps_for_maslov_sneppen_rewiring'])))\n elif self.params['rewiring_mode'] == 'random_random':\n if 'num_edges_for_random_random_rewiring' not in self.fixed_params:\n self.params['num_edges_for_random_random_rewiring'] = \\\n 0.1 * self.params['network'].number_of_edges() # rewire 10% of edges\n print('warning: num_edges_for_random_random_rewiring is set to default 10%')\n\n rewired_network = \\\n self.random_random_rewiring(\n num_edges=int(np.floor(self.params['num_edges_for_random_random_rewiring'])))\n\n self.params['network'] = rewired_network\n\n if 'add_edges' not in self.fixed_params:\n self.params['add_edges'] = False\n\n if self.params['add_edges']:\n if 'edge_addition_mode' not in self.fixed_params:\n self.params['edge_addition_mode'] = 'triadic_closures'\n if 'number_of_edges_to_be_added' not in self.fixed_params:\n self.params['number_of_edges_to_be_added'] = \\\n int(np.floor(0.15 * self.params['network'].number_of_edges())) # add 15% more edges\n\n fattened_network = add_edges(self.params['network'],\n self.params['number_of_edges_to_be_added'],\n self.params['edge_addition_mode'])\n\n self.params['network'] = fattened_network\n\n self.node_list = list(self.params['network']) # used for indexing nodes in cases where\n # node attributes are available in a list. A typical application is as follows: self.node_list.index(i)\n # for i in self.params['network'].nodes():", "title": "" }, { "docid": "0a8ee7bfe3f7ef1777bce42afa404555", "score": "0.73057395", "text": "def initNet(self, netArgs):\n # Try to initialize the network - if initialization fails, a failure\n # signal is emitted. Otherwise the constructor emits a success\n # signal and continues work\n try:\n # If the network object has been initialized already, delete it\n if self.network is not None:\n del self.network\n # Initialize a network manager object\n self.network = networkGUI(netArgs)\n self.initSuccess.emit(True)\n except:\n print 'Could not initialize network: ', sys.exc_info()[1]\n self.initSuccess.emit(False)\n raise\n\n # Connect signals in the network object to appropriate slots in\n # the thread so the network can send data to the GUI and let the\n # thread know when it completes a task\n self.network.signalMetrics.connect(self.relayMetrics)\n self.network.taskComplete.connect(self.stopProcess)", "title": "" }, { "docid": "1c7fc1fb4a659698ad57c755a91996cd", "score": "0.72925967", "text": "def initial_network1(self):\n net = Network()\n\n for i in range(1,9):\n net.add_switch(i,4,'host')\n for i in range(9,13):\n net.add_switch(i,4,'edge')\n for i in range(13,17):\n net.add_switch(i,5,'agg')\n for i in range(17,19):\n net.add_switch(i,6,'core')\n\n # host:edge links\n h_e = [(1,9,1),(2,9,1),(3,10,1),(4,10,1),(5,11,1),(6,11,1),(7,12,1),(8,12,1)] \n # edge:agg links\n e_a = [(13,9,1),(14,9,1),(13,10,1),(14,10,1),(15,11,1),(16,11,1),(15,12,1),(16,12,1)]\n # agg:core links\n a_c = [(13,17,2),(14,17,1),(13,18,1),(14,18,2),(15,17,2),(16,17,1),(15,18,1),(16,18,2)] \n\n for link in h_e + e_a + a_c:\n print(link)\n net.add_link(*link)\n return net", "title": "" }, { "docid": "2bb913f4b3a1e1f33f87c010fad52f5a", "score": "0.7230826", "text": "def initialize_network(self):\n self.build_random_layers(self.initial_input_node)\n self.get_semantics_initial_nodes()\n self.semantics = self.output_node.semantics\n self.fitness = self._evaluate()\n self.mutation_level += 1", "title": "" }, { "docid": "de1ad188207dafa01c1da220e8ed9dd1", "score": "0.7152254", "text": "def init_network_connection(self):\n print(self.__str__())\n\n if self.ethernet is not None:\n os.environ[\"GLOO_SOCKET_IFNAME\"] = self.ethernet\n\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=\"tcp://{}:{}\".format(self.address[0], self.address[1]),\n rank=self.rank,\n world_size=self.world_size,\n )", "title": "" }, { "docid": "1b65f2f11db7618d758efcb740641edb", "score": "0.7112272", "text": "def create_network(network, controller):", "title": "" }, { "docid": "32597539692aa6aec7aa5f2c0121570e", "score": "0.7094026", "text": "def __init__(self, net):\r\n\r\n self.net = net", "title": "" }, { "docid": "61759d5e2bb791ed48736351560f1a43", "score": "0.7061307", "text": "def initialise_network_attributes(self):\n\n if self.compass_resolver is None:\n if self.compass_resolver_factory is not None:\n self.compass_resolver = self.compass_resolver_factory()\n else:\n self.compass_resolver = CompassHTTPResolver(\n username=self.compass_username,\n password=self.compass_password,\n )\n\n if self.dns_resolver is None:\n if self.dns_resolver_factory is not None:\n self.dns_resolver = self.dns_resolver_factory()\n else:\n self.dns_resolver = CheckHostNet()", "title": "" }, { "docid": "f4f6414623003c3e6b229e8f925916e6", "score": "0.7054861", "text": "def setup_net(self):\n pass", "title": "" }, { "docid": "f4f6414623003c3e6b229e8f925916e6", "score": "0.7054861", "text": "def setup_net(self):\n pass", "title": "" }, { "docid": "4269ced30f1a376b79d14a82c764b83e", "score": "0.7034718", "text": "def createNet(self):\n self.setGids() #### set global ids (gids), used to connect cells\n self.createCells()\n self.connectCells() \n self.createStims()", "title": "" }, { "docid": "ab40ea2547b2605e026e181c907b49a6", "score": "0.7023096", "text": "def __init__(self, networks: list) -> None:\n self.networks = networks\n self.process_network()\n self.remove_invalid_nodes()\n self.node_list = self.get_node_list()", "title": "" }, { "docid": "94a8a4fe59798b94b18ed6ab7a1240a4", "score": "0.7022394", "text": "def initialize_network(self):\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.InstanceNorm3d\n\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.InstanceNorm2d\n\n for s in range(len(self.net_conv_kernel_sizes)):\n for i in range(len(self.net_conv_kernel_sizes[s])):\n self.net_conv_kernel_sizes[s][i] = 3\n\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n\n self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,\n len(self.net_num_pool_op_kernel_sizes),\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)\n if torch.cuda.is_available():\n self.network.cuda()\n self.network.inference_apply_nonlin = softmax_helper", "title": "" }, { "docid": "66e177f7a3ae67a34bb6b5707a948edf", "score": "0.7003043", "text": "def create_network(self):\n\n mafia_network = self.models.MafiaNetwork(\n max_size=self.num_participants + 1\n ) # add a Source\n mafia_network.daytime = 'False'\n mafia_network.winner = None\n mafia_network.last_victim_name = None\n mafia_network.num_victims = 0\n mafia_network.num_rand = 0\n return mafia_network", "title": "" }, { "docid": "77e1e0ea74202af3a3dafe214afc487f", "score": "0.6998633", "text": "def construct_ln_net():\n pass", "title": "" }, { "docid": "b0280e1c3b8ec6daa19973dae281c74f", "score": "0.69922197", "text": "def initialise(self):\n #*** Reinitialise the neurons:\n self.layer1 = NeuronLayer(self.logger, self.input_neurons, self.input_variables)\n self.layer2 = NeuronLayer(self.logger, 1, self.input_neurons)\n self.neural_network = NeuralNetwork(self.logger, self.layer1, self.layer2)", "title": "" }, { "docid": "cbc3e6b435c773deb2038f3705504440", "score": "0.69660664", "text": "def __init__(self, parent, address = None):\n super(Network, self).__init__(parent)\n self.address = address", "title": "" }, { "docid": "9eefbe7e519d335db7716ab27005fbc1", "score": "0.6941582", "text": "def construct_network(self):\n self.layers = []\n num_of_layers = 0\n ind = 0\n if self.layers_list:\n l = len(self.layers_list)\n else:\n l = 0\n if l > 0:\n num_of_layers = self.layers_list[0]\n if num_of_layers > 0:\n self.layers.append(self.initialize_input_layer(ind, num_of_layers))\n ind += 1\n for i in range(l):\n if l > i+1:\n self.layers.append(self.initialize_hidden_layer(ind, \n self.layers_list[i], \n self.layers_list[i+1], \n self.layers[ind-1]))\n else:\n self.layers.append(self.initialize_hidden_layer(ind, \n self.layers_list[i], \n len(self.classes), \n self.layers[ind-1]))\n ind += 1\n else:\n self.layers.append(self.initialize_input_layer(ind, \n len(self.classes)))\n ind += 1\n self.layers.append(self.initialize_output_layer(ind, \n self.layers[ind-1]))\n ind += 1", "title": "" }, { "docid": "4cd18b0d3ab68931a5058fa335233d5e", "score": "0.6931623", "text": "def create_network(self):\n\t\tn = IMNN.IMNN(parameters=self.parameters)\n\t\ttf.reset_default_graph()\n\t\tn.setup(η = eta)\n\t\t\n\t\treturn n", "title": "" }, { "docid": "795395c252df21874db777ff857b6ec6", "score": "0.6897404", "text": "def init_net(self):\n # initialize the generator network\n g_net = Net(self.architecture['generator'], 'gen', FLAGS.IMAGE_FORMAT, num_class=self.num_class)\n # define layer connections in generator\n # print('---------GENERATOR DEF')\n self.Gen = Routine(g_net)\n self.Gen.add_input_layers([64, self.code_size], [0])\n self.Gen.seq_links(list(range(g_net.num_layers)))\n self.Gen.add_output_layers([g_net.num_layers - 1])\n\n # initialize the discriminator network\n # print('---------DISCRIMINATOR DEF')\n d_net = Net(self.architecture['discriminator'], 'dis', FLAGS.IMAGE_FORMAT, num_class=self.num_class)\n\n if self.stop_snorm_grads:\n d_net.stop_all_snorm_grads()\n # define layer connections in discriminator\n self.Dis = Routine(d_net)\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\n self.Dis.seq_links(list(range(d_net.num_layers)))\n self.Dis.add_output_layers([d_net.num_layers - 1])\n\n # print('-----------SNGAN INIT NET done')\n if self.mog_model is not None:\n self.mog_model.define_tfp_mog_vars(self.do_summary)", "title": "" }, { "docid": "9476eb6fef851629c6f3ac93534d62f9", "score": "0.68844956", "text": "def initNetwork(self):\n\n for server in ['server1', 'server2']:\n try:\n host, port = self.settings[server].split(':')\n magic = self.settings['magic']\n self.protocol = JurassicProtocol(host, int(port), int(magic, 16))\n break\n except:\n continue\n else:\n print \"[-] Cannot connect to any server :(\"", "title": "" }, { "docid": "c064d4f2650482172e3aebc0abbeb116", "score": "0.68638426", "text": "def _init_graph(self):\n # self.G = nx.DiGraph(name=self.name)\n self.G = nx.DiGraph()\n self.G.add_node(self.node_id, DATA)", "title": "" }, { "docid": "43ab0bf1c3895382c35346389305c159", "score": "0.6857377", "text": "def initialize_network(self):\r\n\r\n self.net = cv2.dnn_DetectionModel(self.args.cfg, self.args.weights)\r\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\r\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\r\n if not self.args.input_size % 32 == 0:\r\n print('Invalid input size! Make sure it is a multiple of 32. Exiting..')\r\n sys.exit(0)\r\n self.net.setInputSize(self.args.input_size, self.args.input_size)\r\n self.net.setInputScale(1.0 / 255)\r\n self.net.setInputSwapRB(True)\r\n with open(self.args.namesfile, 'rt') as f:\r\n self.names = f.read().rstrip('\\n').split('\\n')", "title": "" }, { "docid": "2f3f542b9c329e82578b903ab1f99de0", "score": "0.6852003", "text": "def start_network(self):\n self.network.start()", "title": "" }, { "docid": "bb58158f9c8eb09cc86c631e62c35a0e", "score": "0.68311656", "text": "def network_create_simple(network, project):\n api.network_create(network, project, project, \"\")", "title": "" }, { "docid": "9ff432aac483191a0cbc9a8cf89a478f", "score": "0.6828528", "text": "def __init__(self, network, max_iter):\n self.network = network # Reference to the NeuronNetwork object\n self.training_size = len(self.network.training_input[:, 1]) # size of training data (number of samples)\n self.training_output = [] # Predicted labels\n self.max_iter = max_iter # number of training iterations\n self._set_initial_training_parameters()\n\n self.is_online = True", "title": "" }, { "docid": "ed4afba3886f8fe0556c8496822733e5", "score": "0.6817048", "text": "def __init__(self, nis_network_name, account_descriptor_repository=None):\n self.network = NetworkLocator.find_by_name(Network.NETWORKS, nis_network_name)\n self.account_descriptor_repository = account_descriptor_repository\n self.transaction_factory = self._create_nis_transaction_factory()", "title": "" }, { "docid": "bc392d5974b6b86e7befa35b9a458f75", "score": "0.6789209", "text": "def __init__(self,network,name):\n self.network = network\n self.name = name", "title": "" }, { "docid": "6959f4f9b73023f09b8937c971d2d600", "score": "0.6770278", "text": "def _create_network(self, name):\n network = self.network(self.num_actions, self._num_atoms, self._support,\n name=name)\n return network", "title": "" }, { "docid": "e90db7843a3441d1beb0c87a19a3a096", "score": "0.67698616", "text": "def __init__(self, netArgs=None, parent=None):\n\n QtCore.QObject.__init__(self)\n\n # First initialize the network\n mlmvn.network.__init__(self, **netArgs)\n\n self.terminate = False\n\n self.taskComplete.connect(self.__reset__)", "title": "" }, { "docid": "4f3578e476dddbab7c16f2bbb63e7238", "score": "0.6769508", "text": "def __init__(self, manager, network_config, chain_id=None, lookup_only=False,\n suffix=None):\n self.manager = manager\n if chain_id is None:\n self.name = network_config.name\n else:\n # the name itself can be either a string or a list of names indexed by chain ID\n if isinstance(network_config.name, tuple):\n self.name = network_config.name[chain_id]\n else:\n # network_config.name is a prefix string\n self.name = network_config.name + str(chain_id)\n if suffix:\n self.name = self.name + suffix\n self.segmentation_id = self._get_item(network_config.segmentation_id,\n chain_id, auto_index=True)\n self.subnet_name = self._get_item(network_config.subnet, chain_id)\n self.physical_network = self._get_item(network_config.physical_network, chain_id)\n\n self.reuse = False\n self.network = None\n self.vlan = None\n self.router_name = None\n if manager.config.l3_router and hasattr(network_config, 'router_name'):\n self.router_name = network_config.router_name\n try:\n self._setup(network_config, lookup_only)\n except Exception:\n if lookup_only:\n LOG.error(\"Cannot find network %s\", self.name)\n else:\n LOG.error(\"Error creating network %s\", self.name)\n self.delete()\n raise", "title": "" }, { "docid": "13e672d8b861d921dab8158ba85fe91c", "score": "0.67681575", "text": "def __init__(self):\n self.network = None\n self.communities = None\n self.authors = 0\n\n self.read_dataset()\n self.get_author_clusters()\n self.get_authors()\n self.get_author_article_counts()\n self.build_network()", "title": "" }, { "docid": "394010c81ae2e41a4a16fb8683f4fca6", "score": "0.6757352", "text": "def enable_network(self):\r\n self.init_networking()", "title": "" }, { "docid": "5f27cf9601020c5a20cd244b2f723249", "score": "0.67500454", "text": "def initialize_nodes(self):\n self.network = {name: ConsensusNode(name=name,\n lr=self.lr,\n weights=self.weights[name],\n train_loader=cycle(iter(self.train_loaders[name])),\n use_cuda=self.use_cuda,\n verbose=self.verbose)\n for name in self.node_names}\n\n for node_name, node in self.network.items():\n if self.resume_path:\n path = self.resume_path + os.sep + node_name + '.t7'\n node.set_model(self.model, resume_path=path)\n elif self.model:\n node.set_model(self.model, *self.model_args, **self.model_kwargs)\n if self.optimizer:\n node.set_optimizer(self.optimizer, *self.opt_args, **self.opt_kwargs)\n if self.error:\n node.set_error(self.error, *self.error_args, **self.error_kwargs)\n node.set_neighbors({neighbor_name: self.network[neighbor_name]\n for neighbor_name in self.weights[node_name]\n if neighbor_name != node_name})\n return self", "title": "" }, { "docid": "454f1d6b8dacaae861ff11b0618a514d", "score": "0.6739117", "text": "def initialize_graph(self):\n\n G = nx.Graph()\n G.add_nodes_from(self.nodes)\n G.add_edges_from(self.edges)\n return G", "title": "" }, { "docid": "3c4cea317dc25fa386e6ecca0f12238f", "score": "0.6737547", "text": "def _create_networks(self):\n raise NotImplementedError", "title": "" }, { "docid": "dbf77daa3edabdaafcb3cdd04c36e3f4", "score": "0.6710386", "text": "def init_network() -> typing.Dict[str, list]:\n\n network = {}\n network['W1'] = np.array([\n [0.1, 0.3, 0.5],\n [0.2, 0.4, 0.6]\n ])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([\n [0.1, 0.4],\n [0.2, 0.5],\n [0.3, 0.6]\n ])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([\n [0.1, 0.3],\n [0.2, 0.4]\n ])\n network['b3'] = np.array([0.1, 0.2])\n\n return network", "title": "" }, { "docid": "bc30295f0786e1df86011144a5fae8f7", "score": "0.6691613", "text": "def __init__(self, nodecls, topo):\n self.net, self.msgs, self.nodes, self.msgcnt = {}, {}, [], 0\n\n ids = random.sample(range(10*len(topo)), len(topo))\n for node_id, label, ports in zip(ids, topo.keys(), topo.values()):\n self.msgs[label] = Queue()\n n = nodecls(node_id, self)\n self.nodes.append(n)\n n._deg, n._label = len(ports), label\n for i, to in enumerate(ports):\n js = [ j for j, n in enumerate(topo[to]) if n == label ]\n assert len(js) == 1, \"Topology is invalid -- edges must be bidirectional.\"\n self.net[(label, i)] = (to, js[0])\n print('Network initialized ({} nodes, {} edges)'.format(len(self.nodes), len(self.net)//2))", "title": "" }, { "docid": "22549d205c658f379dd669965baf3ea9", "score": "0.66901106", "text": "def __init__(self, new_network=False):\n self._connection_rate = 1\n self._learning_rate = 0.01\n self._num_input = 36\n self._num_hidden = 6\n self._num_output = 1\n self._desired_error = 0.0001\n self._max_iterations = 500\n self._iterations_between_reports = 100\n self._new_network = new_network\n self._ann = libfann.neural_net()\n\n if self._new_network:\n self._create_train_data()\n self._train_and_save()", "title": "" }, { "docid": "b83990f780af1f4d9dbf6da666b9f515", "score": "0.66839886", "text": "def init_net(self, n_inputs: int) -> None:\n pass", "title": "" }, { "docid": "86784dff79e196f7287bae2a886a27a3", "score": "0.6674697", "text": "def create_network(self):\n self.cli.networks.create(self.NETNAME, driver=\"bridge\", internal=True)", "title": "" }, { "docid": "353a96c8b1feeaa60e565bdc7ebd8eb6", "score": "0.66559076", "text": "def init_network(self):\n tf_map_generator = self._hyperparams['network_model']\n #print('dO, dU = ', self._dO, self._dU)\n #print('self._hyperparams = ', self._hyperparams)\n tf_map, self.solver_op, self.summary_op, self.avg_tower_loss, self.act_4prob = tf_map_generator(dim_input=self._dO, dim_output=self._dU, batch_size=self.batch_size, network_config=self._hyperparams['network_params'])\n self.obs_tensor = tf_map.get_input_tensor()\n self.action_tensor = tf_map.get_target_output_tensor()\n self.precision_tensor = tf_map.get_precision_tensor()\n self.act_op = tf_map.get_output_op()\n self.loss_scalar = tf_map.get_loss_op()", "title": "" }, { "docid": "a93214b9df6120409b9ff7986fde63ae", "score": "0.6652342", "text": "def init():\n global port, my_ip, my_position, my_coordinates, node_connections, lock\n port = 2750\n my_ip = machine_info.get_my_ip()\n my_position = NodePosition.CENTER\n my_coordinates = None\n node_connections = NodeConnections()\n lock = threading.Lock()", "title": "" }, { "docid": "6e7bf71932579d1f602e05ec955652e2", "score": "0.664299", "text": "def __init__(self, network):\n self.recv_buf = []\n self.layer_cur_step = []\n self.layer_shape = []\n '''\n initialize space to receive model from parameter server\n '''\n # consider we don't want to update the param of `BatchNorm` layer right now\n # we temporirially deprecate the foregoing version and only update the model\n # parameters\n for param_idx, param in enumerate(network.parameters()):\n #self.recv_buf.append(np.zeros(param.size()))\n _shape = param.size()\n if len(_shape) == 1:\n self.recv_buf.append(bytearray(getsizeof(np.zeros((_shape[0]*2,)))))\n else:\n self.recv_buf.append(bytearray(getsizeof(np.zeros(_shape))))\n self.layer_cur_step.append(0)\n self.layer_shape.append(_shape)", "title": "" }, { "docid": "d53b3ca205c6f29cbc094d6c0d47a205", "score": "0.6642403", "text": "def make_network(self, name, seed=None):\n net = Network(name, seed)\n self.o[name] = net\n return net", "title": "" }, { "docid": "eb94c969de3f519600fa6f40586c8628", "score": "0.6640809", "text": "def __init__(self, net_type, popsize, locality, **kwargs):\n self.net_type = net_type\n self.popsize = popsize\n self.locality = locality\n\n self.last_id = 0\n self.population = []\n\n self.__dict__.update(**kwargs)\n\n if (net_type == \"no_com\"):\n self.population = [NoCommunity(**kwargs) for i in range(popsize)]\n\n elif (net_type == 'gaussian_com'):\n try:\n # Create weights and pass to network.\n self.mweights = make_master_weights(self.com_side * self.coms_per_side,\n self.locality)\n kwargs.update({'mweights':self.mweights, 'locality':self.locality})\n\n for i in range(self.popsize):\n kwargs.update({'ID':self.last_id})\n self.population.append(GaussianCommunity(**kwargs))\n self.last_id += 1\n\n except:\n print(\"Issue with initializing network. Kwargs:\")\n print(kwargs)\n\n elif (net_type == 'strict_com'):\n try:\n # Create weights and pass to network.\n self.mweights = make_master_weights(self.com_side * self.coms_per_side,\n self.locality)\n kwargs.update({'mweights':self.mweights, 'locality':self.locality})\n\n for i in range(self.popsize):\n kwargs.update({'ID':self.last_id})\n self.population.append(StrictCommunity(**kwargs))\n self.last_id += 1\n\n except:\n print(\"Issue with initializing network. Kwargs:\")\n print(kwargs)", "title": "" }, { "docid": "50df7b53b6cc041f1e5206c5e4aaf6b6", "score": "0.6630923", "text": "def __init__(self, numNetworks, netArgs):\n\n QtCore.QObject.__init__(self)\n\n # netList is a python dictionary with network indices as keys, and\n # a nested dictionary as values\n self.netList = {}", "title": "" }, { "docid": "ff6d21690f0a7f7137b05be119fe2d88", "score": "0.6628946", "text": "def __init__(self, addresses):\n for address in addresses:\n self.networks.append(ipaddress.ip_network(address))", "title": "" }, { "docid": "d2d4938aa4b4e129b0b14e0a8849dead", "score": "0.6627289", "text": "def __init__(self):\n\n self.nodes = []\n self.links = []\n self.initial_node = None\n self.utterance_id = None\n self.lm_scale = None\n self.wi_penalty = None", "title": "" }, { "docid": "52d2a78792de7757067cafce6b143e87", "score": "0.6590897", "text": "def create_network(self):\r\n\r\n framework = self.net_options['framework']\r\n net_type = self.net_options['type']\r\n net = None\r\n\r\n try:\r\n module_name = 'net.' + framework.lower() + '.' + net_type.lower() + '.' + net_type.lower() + '_network'\r\n module_import = importlib.import_module(module_name)\r\n Net = getattr(module_import, net_type + 'Network')\r\n net = Net(self.net_options)\r\n except:\r\n raise SystemExit('ERROR: Invalid network selected')\r\n\r\n return net", "title": "" }, { "docid": "8e396416a8dd2a4af04662ffde4ae167", "score": "0.65795374", "text": "def setUp(self):\n params = Parameters(stepSize=.1, regularization=1e-5, decay=0, RMSProp=False, momentum=False)\n layers = [Layer(3,2,True)]\n lossFunc = Softmax()\n self.network = Network(parameters=params, layers=layers, lossFunction=lossFunc)\n #now set weights and bias to predefined values\n self.network.layers[0].weights._weights = np.array([[.22,.24,.44], [.33,.42,.68]])\n self.network.layers[0].bias._bias = np.array([.2,.3])\n self.input = np.array([.1,.2,.3])\n self.label = 1", "title": "" }, { "docid": "0deb44bd377b83184bb99b77f65a0b07", "score": "0.6543839", "text": "def network(self, network) :\n\t\ttry :\n\t\t\tself._network = network\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "57b8276cc77948896e806af1b3500f1d", "score": "0.65288484", "text": "def init_nodestructure(self):\n self._topology = self.TOPOLOGY_CLS(self._odl_client)", "title": "" }, { "docid": "f45eb59893c5b404482b15f8f19b64ba", "score": "0.65142375", "text": "def initializeNetwork (self, machineguid, jobguid = \"\", executionparams = {}):\n params =dict()\n params['machineguid'] = machineguid\n return q.workflowengine.actionmanager.startActorAction('pmachine', 'initializeNetwork', params, jobguid=jobguid, executionparams=executionparams)", "title": "" }, { "docid": "5d7dd296329da3efe9a3b58d5ee17f17", "score": "0.6509434", "text": "def __init__(self):\n super(PlautNet, self).__init__()\n self.layer1 = nn.Linear(105, 100)\n self.layer2 = nn.Linear(100, 61)\n self.init_weights()", "title": "" }, { "docid": "6193ca29bc19dca257a434113abf31af", "score": "0.649474", "text": "def initialize(self, model: NNCFNetwork) -> None:", "title": "" }, { "docid": "1f18794f8c727bef1571a7ca43430071", "score": "0.64842725", "text": "def instantiate_and_create_net(model):\n workspace.RunNetOnce(model.param_init_net)\n workspace.CreateNet(model.net)", "title": "" }, { "docid": "14cee98066140a0b3f0b656270e27cd5", "score": "0.64806706", "text": "def __init__(self, network, n, name=None):\n state_spec = alf.nest.map_structure(\n lambda spec: alf.TensorSpec((n, ) + spec.shape, spec.dtype),\n network.state_spec)\n name = name if name else 'naive_parallel_%s' % network.name\n super().__init__(\n network.input_tensor_spec, state_spec=state_spec, name=name)\n self._networks = nn.ModuleList(\n [network.copy(name=self.name + '_%d' % i) for i in range(n)])\n self._n = n", "title": "" }, { "docid": "e0cb8dbbab4a4c1f6514fa3524b50c53", "score": "0.64787674", "text": "def initialize(self):\n if self.operation is None:\n raise Exception('You must build the graph before initializing it')\n for node in self.params:\n node.initialize()\n return self", "title": "" }, { "docid": "931060f8956a5414bdc63b7842ff33aa", "score": "0.64696634", "text": "def network_setup():\n credentials = load_credentials()\n password_hash = pylast.md5(credentials['password'])\n network = pylast.LastFMNetwork(api_key=credentials['api_key'], api_secret=credentials['api_secret'],\n username=credentials['username'], password_hash=password_hash)\n\n return network", "title": "" }, { "docid": "2e86d7db8923ad208fa7636b4b5c40b2", "score": "0.64694303", "text": "def __init__(self, path=None, string=None):\n if path:\n self.network = etree.ElementTree(file=path).getroot().find('NETWORK')\n elif string:\n self.network = etree.fromstring(string).find('NETWORK')\n else:\n raise ValueError(\"Must specify either path or string\")\n self.variables = None\n self.edge_list = None\n self.variable_states = None\n self.variable_parents = None\n self.variable_CPD = None\n self.variable_property = None", "title": "" }, { "docid": "3cbf1fdbbb57ded0447953fe0a59bee3", "score": "0.6463862", "text": "def initialize(self):\n self._main_board_length = len(self.player_names) * self.section_length\n self._graph = nx.DiGraph()\n self._create_main_graph()\n self._create_waiting_graphs()\n self._join_waiting_graphs_to_main()\n self._create_home_graphs()\n self._join_home_graphs_to_main()", "title": "" }, { "docid": "fb4eb1f52761443b4d76d6167db25759", "score": "0.6456629", "text": "def __init__(self) -> None:\n\n self.graph = igraph.Graph()", "title": "" }, { "docid": "4593618a137c24f15f384c62fae2a6c4", "score": "0.6454317", "text": "def __init__(self, training=True, weights_path=None):\n self.net = ValueNN()\n self.net.to(device)\n if training:\n self.criterion = nn.MSELoss().to(device)\n self.optimizer = torch.optim.SGD(self.net.parameters(), lr=conf.LEARNING_RATE)\n self.net.train()\n else: # loading weights for the network\n print(\"loading network from weights...\")\n self.net.load_state_dict(torch.load(weights_path))\n self.test_mode()", "title": "" }, { "docid": "6498a222472668431f5b4e9776f671b8", "score": "0.64474833", "text": "def __initiate_network(self, n_input=4):\n network_without_bias = [n_input, *self.layer_sizes]\n self.neurons_at_layers = [n+1 for n in network_without_bias] + [1] # Add bias and output\n for i in range(len(self.neurons_at_layers) - 1):\n shape = (self.neurons_at_layers[i+1], self.neurons_at_layers[i])\n self.weights.append(np.random.random(shape) - 0.5)\n self.delta_weights.append(np.zeros(shape))\n self.prev_error = 0\n self.convergent = False", "title": "" }, { "docid": "5b8ef7b6a8db7719da1dda40aa7ce34d", "score": "0.6447481", "text": "def __init__(__self__, *,\n network_config: Optional[pulumi.Input['NetworkConfigArgs']] = None,\n worker_config: Optional[pulumi.Input['WorkerConfigArgs']] = None):\n if network_config is not None:\n pulumi.set(__self__, \"network_config\", network_config)\n if worker_config is not None:\n pulumi.set(__self__, \"worker_config\", worker_config)", "title": "" }, { "docid": "ecee465bb0c5d7b196e7eab0e628a3e9", "score": "0.6447148", "text": "def build_network(self):\n self.build_encoder()\n self.build_decoder()\n self.build_model()", "title": "" }, { "docid": "c57f3f5e81a3794f8cccff8d41f6e356", "score": "0.6440761", "text": "def _initialize(self):\n common.soft_variables_update(\n self._q_network_1.variables,\n self._target_q_network_1.variables,\n tau=1.0)\n common.soft_variables_update(\n self._q_network_2.variables,\n self._target_q_network_2.variables,\n tau=1.0)\n common.soft_variables_update(\n self._actor_network.variables,\n self._target_actor_network.variables,\n tau=1.0)\n\n ##TODO: override _check_trajectory_dimensions", "title": "" }, { "docid": "bc3592ca56ec87e7da98dd966bd8e311", "score": "0.64382845", "text": "def __init__(self, network, config=None, save_timing_cache=None):\n self._network = network\n self._config = util.default(config, CreateConfig())\n self.timing_cache_path = save_timing_cache", "title": "" }, { "docid": "4d5d8607b4f88a828a84440e071f83f9", "score": "0.6435027", "text": "def create_network(self):\n class_ = getattr(\n dallinger.networks,\n self.network_factory\n )\n return class_(max_size=self.num_participants + 1)", "title": "" }, { "docid": "f4b17ff4245ae8573131543e26fc97b1", "score": "0.64320236", "text": "def __init__(self, *args):\n _snap.TNEANet_swiginit(self,_snap.new_TNEANet(*args))", "title": "" }, { "docid": "89980e901f65d3cc5f92d6748694f880", "score": "0.64215994", "text": "def initialize_network(self, _training_input, _training_output):\n self.training_input = _training_input\n self.training_output = _training_output\n if self.training_input is not None and self.training_output is not None:\n #: Connect the input layer's neurons with the input data\n for i_neuron in self.neuron_layers_list[0]:\n for input_data in self.training_input[0]:\n _i_synapse = Synapse.Synapse(_input=input_data, _out=i_neuron,\n _weight=1, _mode='data')\n i_neuron.input_synapses.append(_i_synapse)\n else:\n print(\"Missing Input or Output data\")", "title": "" }, { "docid": "19d853462f8db74e5ecd3b752bf94e78", "score": "0.6417615", "text": "def set_network(self):\n network = ConfigurationSet()\n network.configuration_set_type = 'NetworkConfiguration'\n network.input_endpoints.input_endpoints.append(\n ConfigurationSetInputEndpoint('ssh', 'tcp', '22', '22'))\n self.network = network", "title": "" }, { "docid": "0c7e2dc1b403cd557ed836f9f6644811", "score": "0.6413679", "text": "def build_network(self):\n\n # loop over all the layers in the model and build the network\n for (i, layer) in enumerate(self.layer_list):\n if layer.__class__.__name__ == \"Input\":\n self.nodes.append(Node(layer))\n else:\n # initialize a node with the current layer\n node = Node(layer)\n\n # set a reference from the current node to the previous node\n node.set_prev_layer(self.nodes[i - 1])\n\n # set a reference from the previous node to the current node\n self.nodes[i - 1].set_next_layer(node)\n\n # add the node to the nodes list\n self.nodes.append(node)", "title": "" }, { "docid": "70b8817995bba473dd27f280c6196542", "score": "0.6410563", "text": "def network(self, network):\n\n self._network = network", "title": "" }, { "docid": "70b8817995bba473dd27f280c6196542", "score": "0.6410563", "text": "def network(self, network):\n\n self._network = network", "title": "" }, { "docid": "0996d10548c0c5aca1acce7e153a9476", "score": "0.6410139", "text": "def __init__(self, networkShape):\n self.networkShape = networkShape\n self.parametersInfoList = None\n self.inputLayer = None\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.yHat = self.buildNetwork()\n self.prediction = self.getPrediction(self.yHat)\n # Init variables:\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())", "title": "" }, { "docid": "e85cf210e96f7775778e30c59ea131ce", "score": "0.64070696", "text": "def mininet_from_network(self, network):\n self.topo = Topo()\n\n hosts = self.network.get_type('host')\n for sid in hosts:\n self.topo.addHost('s%d' % sid, dpid=(\"%0.2X\" % sid))\n for sid in self.network.switches.keys():\n if sid not in hosts:\n self.topo.addSwitch('s%d' % sid, dpid=(\"%0.2X\" % sid), \n protocols='OpenFlow10')\n\n for sid1, sid2 in self.network.edges.keys():\n self.addLink('s%d' % sid1, 's%d' % sid2)\n\n self.mininet = Mininet(self.topo)\n self.mininet.pingAll(timeout=1)\n\n return self.mininet", "title": "" }, { "docid": "97773bd52ac8403128f51a1ce23fd939", "score": "0.64069533", "text": "def setNetwork( self, network ):\n\t\tself.network = network", "title": "" }, { "docid": "5115f92e2cef6b9ace567501eca3eade", "score": "0.64025146", "text": "def _initialize(self):\n common.soft_variables_update(\n self._critic_network_1.variables,\n self._target_critic_network_1.variables,\n tau=1.0,\n )\n common.soft_variables_update(\n self._critic_network_2.variables,\n self._target_critic_network_2.variables,\n tau=1.0,\n )\n common.soft_variables_update(\n self._actor_network.variables,\n self._target_actor_network.variables,\n tau=1.0,\n )", "title": "" }, { "docid": "47b3f118c699be685a0c6b0fd055ca68", "score": "0.6395535", "text": "def _build_networks(self, **kwargs):", "title": "" }, { "docid": "20fae5445386ac5e8fac2f7d91e2f20b", "score": "0.63891137", "text": "def _initialize_neural_network(self, topology):\n\n # Create shallow copy of topology.\n neural_network = copy(topology)\n # Create output neuron.\n neural_network.output_layer = self._initialize_output_layer(neural_network)\n \n # Create cnn layer.\n neural_network.cnn_layers = self._initialize_cnn_layers(neural_network)\n self._connect_cnn_nodes(neural_network.sensors, neural_network.cnn_layers[0], full=True)\n\n previous_neurons = neural_network.cnn_layers[0]\n for cnn_layer in neural_network.cnn_layers[1:]:\n \"\"\" TODO: To Konrad: temporarily changed to full=True \"\"\"\n # self._connect_cnn_nodes(previous_neurons, cnn_layer, full=False)\n self._connect_cnn_nodes(previous_neurons, cnn_layer, full=True)\n previous_neurons = cnn_layer\n \n i = 0\n for layer in neural_network.cnn_layers:\n if i == 0:\n for neuron in layer:\n neuron.calculate()\n else:\n for neuron in layer:\n neuron.calculate2()\n i += 1\n\n flat_data = self._initialize_flatten_layer(neural_network)\n neural_network.flatten_layer = self._initialize_flat_sensors(flat_data)\n\n # Create hidden layer.\n neural_network.hidden_layers = self._initialize_hidden_layers(neural_network)\n # connect cnn layers\n #\n # Establish connections\n self._connect_nodes(neural_network.flatten_layer, neural_network.hidden_layers[0], random=False)\n \n previous_neurons = neural_network.hidden_layers[0]\n for hidden_layer in neural_network.hidden_layers[1:]:\n self._connect_nodes(previous_neurons, hidden_layer, random=True)\n previous_neurons = hidden_layer\n # Calculate hidden neurons.\n for layer in neural_network.hidden_layers:\n for neuron in layer:\n neuron.calculate()\n # Connect last neuron to output neuron with learning step.\n self._connect_learning_step(neural_network)\n # Calculate output semantics.\n for neuron in neural_network.output_layer:\n neuron.calculate()\n # Return neural network.\n return neural_network", "title": "" }, { "docid": "d0e840514dbb35d2bb2041b31e65cb37", "score": "0.63837194", "text": "def __init__(self, main_subnet, local=False, gateway=None, name='main'):\n\n self.nodes = []\n self.subnets = []\n self.overlays = []\n self.local_networks = []\n self.BGP = None\n\n self.configured = False\n\n if type(main_subnet) is str:\n main_subnet = ip_network(main_subnet)\n\n self.main_net = main_subnet\n self.local = local\n self.name = name\n self.gateway = gateway", "title": "" }, { "docid": "947ad208ae647186c49b0975e725414d", "score": "0.63786334", "text": "def __init__(self, network, config=None):\n super(CustomizeTrainer, self).__init__(network, config)", "title": "" }, { "docid": "f28381e883f7c8ec107ec181b8c2f129", "score": "0.63708943", "text": "def __init__(self):\r\n\r\n self.args = None\r\n self.net = None\r\n self.names = None\r\n\r\n self.parse_arguments()\r\n self.initialize_network()\r\n self.run_inference()", "title": "" }, { "docid": "c6aa8768900b69309b3cd759c1755ce7", "score": "0.63644964", "text": "def __init__(self, **kwargs):\n if \"dimensions\" in kwargs and \"lr\" in kwargs:\n lr = kwargs.pop(\"lr\", False)\n dimensions = kwargs.pop(\"dimensions\", False)\n self.up_nn = DirectionNet(dimensions=dimensions, lr=lr)\n self.down_nn = DirectionNet(dimensions=dimensions, lr=lr)\n self.left_nn = DirectionNet(dimensions=dimensions, lr=lr)\n self.right_nn = DirectionNet(dimensions=dimensions, lr=lr)", "title": "" }, { "docid": "cee167cba6e024a22f0906532ce56320", "score": "0.63585186", "text": "def __init__(self):\n super(NetworkManager, self).__init__()\n self.user = None\n self.contactInfo = {}\n self._file = ''\n self._locked = None\n self._lockedFile = self._file + consts.LOCKED_NOTIFIER\n self._is_local = None\n self._is_locked = False\n self._has_access = None\n self._crewListWks = 'Crew_spreadsheet_dup_test'\n self._requiredContactInfo = ['MACHINE', 'IP', 'USERNAME',\n 'USERFULLNAME', 'EMAIL', 'PHONENUMBER',\n 'ROOM', 'TIME', 'DATE']", "title": "" }, { "docid": "936a46e80816be66a3597bb901095141", "score": "0.6351992", "text": "def add_network(self, network):\n self.add_node(network, **{VERTEX_TYPE: VERTEXTYPE.NETWORK})", "title": "" } ]
27df6b814fe3dd43223050a9a8f7231b
Memoize an expensive computation as a property of an object
[ { "docid": "9336f3465d63239701943e1ee62370fc", "score": "0.65158", "text": "def memoized_property(name=None):\n def memoized_decorator(f):\n @property\n @wraps(f)\n def wrapper(self):\n cached_name = name\n if name is None:\n cached_name = \"_cached_%s\" % f.__name__\n\n if not hasattr(self, cached_name):\n val = f(self)\n setattr(self, cached_name, val)\n return getattr(self, cached_name)\n return wrapper\n return memoized_decorator", "title": "" } ]
[ { "docid": "c6d11056d507385cd7ed343e3a13b958", "score": "0.7218838", "text": "def memoize(obj):\n cache = obj._cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n\n return memoizer", "title": "" }, { "docid": "fa68787944212966ebdc309f83effb63", "score": "0.70940363", "text": "def cached_property(f):\n @wraps(f)\n def func(self):\n prefix = '__racy_internal__cached_property'\n fullname = \"_\".join([prefix, f.__name__])\n if hasattr(self, fullname):\n return getattr(self, fullname)\n res = f(self)\n setattr(self, fullname, res)\n return res\n\n\n func.__name__ = f.__name__\n #p = property(memoize(f)) \n # not memoized anymore for property -> optimized\n p = property(func)\n return p", "title": "" }, { "docid": "2fe1539a66beedf845293d55ea22c04f", "score": "0.69818157", "text": "def memoize(obj):\n cache = {}\n\n def memoizer(*args, **kwargs):\n if args not in cache:\n cache[args] = obj(*args, **kwargs)\n return cache[args]\n return memoizer", "title": "" }, { "docid": "256aeaf373f574341836a6928ecbc965", "score": "0.6970686", "text": "def memoize(*args, **kwargs):\n ...", "title": "" }, { "docid": "4290d1177a2bd3697dc8145d3ae03df1", "score": "0.6914896", "text": "def _memoize(func):\n name = func.__name__\n\n def proxy(*args, **kwargs):\n \"\"\" Proxy \"\"\"\n self, key = args[0], kwargs.pop('key', None)\n cache = self._cache # pylint: disable = protected-access\n if cache is None or key is None:\n return func(*args, **kwargs)\n lock, ckey = getattr(cache, 'lock', None), (name, key)\n if lock is None:\n lock = _global_lock\n lock.acquire()\n try:\n if ckey in cache:\n return cache[ckey]\n finally:\n lock.release()\n res = func(*args, **kwargs)\n lock.acquire()\n try:\n if ckey in cache:\n return cache[ckey]\n else:\n cache[ckey] = res\n return res\n finally:\n lock.release()\n return _util.decorating(func, extra=dict(key=None))(proxy)", "title": "" }, { "docid": "aae1d9aeb2aa3a31857f3febd3c18823", "score": "0.6911139", "text": "def memoize(f):\n\n class Memodict(dict):\n def __getitem__(self, *key):\n return dict.__getitem__(self, key)\n\n def __missing__(self, key):\n ret = self[key] = f(*key)\n return ret\n\n return Memodict().__getitem__", "title": "" }, { "docid": "18f6d6d80d8ed695e830c087378d4aaa", "score": "0.6880547", "text": "def memoize(f):\n\tclass memodict(dict):\n\t\tdef __init__(self, f):\n\t\t\tsuper().__init__()\n\t\t\tself.f = f\n\t\tdef __call__(self, *args):\n\t\t\treturn self[args]\n\t\tdef __missing__(self, key):\n\t\t\tret = self[key] = self.f(*key)\n\t\t\treturn ret\n\treturn memodict(f)", "title": "" }, { "docid": "84f091e58744cf331e9db6ce96eceb70", "score": "0.6852269", "text": "def memoized_property(f):\n\n return decorate(f, _memoized_property)", "title": "" }, { "docid": "cd6b9052003a53b7113c7b238a3b63b3", "score": "0.6826556", "text": "def memoize(f):\n f.cache = {}\n return decorate(f, _memoize)", "title": "" }, { "docid": "5c65371240a560e6cfe6dd71dbd8ef5c", "score": "0.6814973", "text": "def __call__(self, *args):\n if not args in self.memo:\n self.memo[args] = self.func(*args)\n return self.memo[args]", "title": "" }, { "docid": "a80aada9bab18d7e51b7a6b04b57439f", "score": "0.68081623", "text": "def memoize(func):\n memo = None\n\n @wraps(func)\n def wrapper(self):\n if memo is not None:\n return memo\n\n return func(self)\n\n return wrapper", "title": "" }, { "docid": "f750aecf983f0bf142f0aee31d8df74f", "score": "0.67803204", "text": "def memoize(f):\n\tmemory = {}\n\tdef memoized(*args):\n\t\tif args not in memory:\n\t\t\tmemory[args] = f(*args)\n\t\treturn memory[args]\n\treturn memoized", "title": "" }, { "docid": "01928a3114d1d4bdf1dd2d2fec7e21a3", "score": "0.67781186", "text": "def memoize(f):\n class memodict(dict):\n def __getitem__(self, *key):\n return dict.__getitem__(self, key)\n\n def __missing__(self, key):\n res = f(*key)\n if res:\n self[key] = res\n\n return res\n\n return memodict().__getitem__", "title": "" }, { "docid": "7e6cf9bb8e8300a4a4eddf63c44f13a4", "score": "0.67560273", "text": "def _add_to_cache(obj, name, val, *args, kwargs_pkl):\n if not hasattr(obj, '_memoize_cache'):\n obj._memoize_cache = {}\n obj._memoize_cache[name, args, kwargs_pkl] = val\n return val", "title": "" }, { "docid": "d2b1019fd386f3ca7952dc4d9a247fd1", "score": "0.67279184", "text": "def memo(method):\n def memoized(self, *args):\n try:\n results = self._memo_results\n except:\n results = dict()\n self._memo_results = results\n args = tuple(args)\n if args not in results:\n results[args] = method(self, *args)\n return results[args]\n return memoized", "title": "" }, { "docid": "921ca47150f74cda58e04c15b38c633f", "score": "0.67001665", "text": "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n\n def __call__(self, *args):\n return self[args]\n\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "title": "" }, { "docid": "d5ee09595bea9a9de74433707161a116", "score": "0.6698434", "text": "def memoize(f):\n class memodict(dict):\n def __missing__(self, key):\n ret = self[key] = f(key)\n return ret\n return memodict().__getitem__", "title": "" }, { "docid": "7addb25230abf0c86268ad9a75ced236", "score": "0.6658525", "text": "def memoize(key, value=None):\n return cache.memoize(key, value)", "title": "" }, { "docid": "3565c8beed7dd4d5f9d5255515f1e8c1", "score": "0.6611666", "text": "def cached_property(func):\n def wrapper(self):\n if self.mass == None:\n print(\"NONE\")\n func(self)\n else:\n print('here')\n return self.mass\n return wrapper", "title": "" }, { "docid": "44f5e613b163e0bb44b8a760b2d51b2b", "score": "0.6600071", "text": "def memoize(f):\n\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n\n def __call__(self, *args):\n try:\n return self[args]\n except TypeError:\n return self.f(*args)\n\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n\n return memodict(f)", "title": "" }, { "docid": "607a0994b99e6f0b485adae5827c7637", "score": "0.65904546", "text": "def memoize(f):\n cache = {}\n @functools.wraps(f)\n def g(*args):\n ret = cache.get(args, cache)\n if ret is cache:\n ret = cache[args] = f(*args)\n return ret\n return g", "title": "" }, { "docid": "5cdc5a53c00cb8123174e343b32232f9", "score": "0.657774", "text": "def memoized(fget):\n attr_name = '_{0}'.format(fget.__name__)\n\n @wraps(fget)\n def fget_memoized(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fget(self))\n return getattr(self, attr_name)\n\n def fdel(self):\n if hasattr(self, attr_name):\n delattr(self, attr_name)\n\n return property(fget_memoized, fdel=fdel)", "title": "" }, { "docid": "44c986a3a0f73fdb09f6dfc4c67d2e95", "score": "0.65674144", "text": "def sync_memoize(f):\n # TODO: Think about an f that is recursive\n memory = {}\n lock = Lock()\n\n @wraps(f)\n def new_f(*args):\n try:\n return memory[args]\n except KeyError:\n # on cache misses, retry with lock held\n with lock:\n try:\n return memory[args]\n except KeyError:\n r = f(*args)\n memory[args] = r\n return r\n return new_f", "title": "" }, { "docid": "138d1e5b0d60bcbb34193f307b3d3c58", "score": "0.6546849", "text": "def memoize(decorate_function: Callable):\n return lru_cache(maxsize=None)(decorate_function)", "title": "" }, { "docid": "5640f6a357c223ccc751be6301146ac3", "score": "0.6531133", "text": "def memoize(func):\n cache = func.cache = {}\n\n @functools.wraps(func)\n def memoized_func(*args, **kwargs):\n key = str(args) + str(kwargs)\n\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n\n return cache[key]\n\n return memoized_func", "title": "" }, { "docid": "34293eb95ce57c070ce7a2081eca22e1", "score": "0.6480143", "text": "def memo(f):\n cache = {}\n def memoized(*args):\n if args not in cache:\n cache[args] = f(*args)\n return cache[args]\n return memoized", "title": "" }, { "docid": "2bd511ac03da38433cc586d7b38b02ee", "score": "0.6477747", "text": "def memoize(func):\n cache = {}\n def wrapper(*args):\n try:\n return cache[args]\n except KeyError:\n result = func(*args)\n cache[args] = result\n return result\n return wrapper", "title": "" }, { "docid": "bdde4560290e9f8cef19020975cad4a9", "score": "0.644858", "text": "def memoize(func):\n cache = func.cache = {}\n\n @wraps(func)\n def memoizer(*args, **kwargs):\n if args not in cache:\n cache[args] = func(*args, **kwargs)\n return cache[args]\n return memoizer", "title": "" }, { "docid": "863ff4fc675f9636ea7c722722c7c1dd", "score": "0.6437929", "text": "def memorized(f):\n\n\n cache = {}\n\n @wraps(f)\n def wrapped(*args):\n try:\n results = cache[args]\n except KeyError:\n results = cache[args] = f(*args)\n return results\n return wrapped", "title": "" }, { "docid": "6c67ceb807f9faecda7a30f2e9bb7aa2", "score": "0.6424694", "text": "def memoize(f):\n # TODO: Recommend that f's arguments be immutable\n memory = {}\n\n @wraps(f)\n def new_f(*args):\n try:\n return memory[args]\n except KeyError:\n r = f(*args)\n memory[args] = r\n return r\n\n return new_f", "title": "" }, { "docid": "430171ca418d2805cf372316f87a7891", "score": "0.6420544", "text": "def memo(f):\r\n\tcache = {}\r\n\tdef _f(*args):\r\n\t\ttry:\r\n\t\t\treturn cache[args]\r\n\t\texcept KeyError:\r\n\t\t\tresult = f(*args)\r\n\t\t\ttry:\r\n\t\t\t\tcache[args] = result\r\n\t\t\texcept TypeError: # args refuses to be a dict key\r\n\t\t\t\tpass\r\n\t\t\treturn result\r\n\t_f.cache = cache\r\n\treturn _f", "title": "" }, { "docid": "613ed68f32841f69e904d706a54987b9", "score": "0.6417699", "text": "def memoized_property(fget):\n attr_name = '_{0}'.format(fget.__name__)\n @functools.wraps(fget)\n def fget_memoized(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fget(self))\n return getattr(self, attr_name)\n return property(fget_memoized)", "title": "" }, { "docid": "fdfec2bff6827be0c5a1880f0d200027", "score": "0.6414022", "text": "def memo(f):\n cache = {}\n\n def memoized(n):\n if n not in cache:\n cache[n] = f(n)\n return cache[n]\n\n return memoized", "title": "" }, { "docid": "d188e5060835683d33e048422d85b979", "score": "0.6386564", "text": "def memo(f):\n cache = {}\n\n def f_hitra(*args):\n \"\"\"Ta dela, ker ima cache definiran zunaj.\"\"\"\n if args in cache:\n return cache[args]\n else:\n y = f(f_hitra, *args)\n cache[args] = y\n return y\n\n # Rezultat je pohitrena funkcija f\n return f_hitra", "title": "" }, { "docid": "98fa6927e156a24d2842d1af39f56998", "score": "0.63694507", "text": "def wrapper(arg):\n if arg not in memo:\n memo[arg] = func(arg)\n return memo[arg]", "title": "" }, { "docid": "8590e2711ba5f156606778ff77827802", "score": "0.63688105", "text": "def memoize(wrapped):\n cache_name = wrapped.__name__.rstrip('_') + '__cache'\n\n def decorator(inst, item):\n try:\n cache = getattr(inst, cache_name)\n except AttributeError:\n cache = {}\n setattr(inst, cache_name, cache)\n if item not in cache:\n cache[item] = wrapped(inst, item)\n return cache[item]\n return functools.update_wrapper(decorator, wrapped)", "title": "" }, { "docid": "6d00f38c6716f25ff4e7c98c59538dad", "score": "0.6356869", "text": "def memoize(f):\n\n cache = {}\n\n @functools.wraps(f)\n def memf(*args, **kwargs):\n fkwargs = frozenset(kwargs.items())\n if (args, fkwargs) not in cache:\n cache[args, fkwargs] = f(*args, **kwargs)\n return cache[args, fkwargs]\n return memf", "title": "" }, { "docid": "d96c1127990c51565883bc7c73cd41f5", "score": "0.63550866", "text": "def test_memoize_fast():\n time = timeit.timeit(\n setup=\"from tests.test_wkr import fib, wkr; \", stmt=\"fib(13)\", number=10000\n )\n mtime = timeit.timeit(\n setup=(\"from tests.test_wkr import fib, wkr; \" \"mfib = wkr.memoize(fib)\"),\n stmt=\"mfib(13)\",\n number=10000,\n )\n assert mtime < time", "title": "" }, { "docid": "036517bb9897de6473b30c353370b3aa", "score": "0.6350123", "text": "def memoize(func):\n memo = {}\n def helper(arg):\n # pylint: disable=locally-disabled, missing-docstring\n if arg not in memo:\n memo[arg] = func(arg)\n return memo[arg]\n return helper", "title": "" }, { "docid": "ede114bea1aec6d43ed78257419fed8e", "score": "0.6348796", "text": "def memoized(func):\n cache = {}\n\n @functools.wraps(func)\n def wrapper(*args):\n if not isinstance(args, abc.Hashable):\n # args is not cacheable. just call the function.\n return func(*args)\n if args in cache:\n return cache[args]\n else:\n value = func(*args)\n cache[args] = value\n return value\n return wrapper", "title": "" }, { "docid": "262ef59265170f0c1f465f203ed03937", "score": "0.6343583", "text": "def memoize(fn):\n cache = {}\n def newfn(*args, **kw):\n key = (tuple(args), tuple(sorted(kw.items())))\n if key in cache:\n return cache[key]\n else:\n cache[key] = val = fn(*args, **kw)\n return val\n newfn.__name__ = fn.__name__ + ' (MEMOIZED)'\n newfn.__module__ = fn.__module__\n return newfn", "title": "" }, { "docid": "aaf49ff668390180ac53b3ed3ceb9abc", "score": "0.6321407", "text": "def memoize_func(func):\n @functools.wraps(func)\n def verbose_func_call(*args, **kwargs):\n sentinel = object()\n key = get_hash(args, kwargs)\n result = cache.get(key, sentinel)\n if result is not sentinel:\n return result\n result = func(*args, **kwargs)\n with memoize_lock:\n if memoize_none or result is not None:\n cache[key] = result\n if dump_freq is not None and next(counter) % dump_freq == 0:\n save_pickle(cache, backend_fpath)\n return result\n\n return verbose_func_call", "title": "" }, { "docid": "34502181554f1bc5c55dadc3361686d3", "score": "0.6319846", "text": "def memoize(obj, bound=False):\n # this is declared not to be a bound method, so just attach new attr to obj\n if not bound:\n obj.cache = {}\n CACHE_REGISTRY.append(obj.cache)\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n if not bound:\n key = (hashable(args), hashable(kwargs))\n cache = obj.cache\n else:\n # bound methods have self as first argument, remove it to compute key\n key = (hashable(args[1:]), hashable(kwargs))\n if not hasattr(args[0], \"_cache\"):\n setattr(args[0], \"_cache\", collections.defaultdict(dict))\n # do not add to cache registry\n cache = getattr(args[0], \"_cache\")[obj.__name__]\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n\n return cache[key]\n\n return memoizer", "title": "" }, { "docid": "b42b801dac41c2d6d2560ed7f2342419", "score": "0.63184536", "text": "def memoizer(fn):\n\tcache = {}\n\tdef inner(*args, **kwargs):\n\t\tkey = (*args, frozenset(kwargs.items()))\n\t\tif key not in cache:\n\t\t\tcache[key] = fn(*args, **kwargs)\n\t\treturn cache[key]\n\treturn inner", "title": "" }, { "docid": "6b5271d22b3e959d65e0642c8a82e36a", "score": "0.630527", "text": "def _cache(func):\n def inner(self):\n key = func.__name__\n if key in self._cache:\n return self._cache[key]\n value = func(self)\n self._cache[key] = value\n return value\n return inner", "title": "" }, { "docid": "17931f823465e3c7f537c89e62d34c77", "score": "0.63001806", "text": "def test_memoized_property_value():\n c = DummyClass()\n assert c.memoized_property == 5", "title": "" }, { "docid": "81fe18bc1b75ae788c72a57d7f83370a", "score": "0.6293932", "text": "def memo(f):\n cache = {}\n def _f(*args):\n # i.e. running this with goal = 40, the cache had over 61000 entries!\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "1fbf26669fc5f4968d6e6b462f2b9c0c", "score": "0.6287342", "text": "def memoize(f, cache={}):\n\n @wraps(f)\n def g(*args, **kwargs):\n key = ( f, repr(args), repr(kwargs.items()) )\n if key not in cache:\n cache[key] = f(*args, **kwargs)\n return cache[key]\n \n return g", "title": "" }, { "docid": "3037f923ae9e1e6225018a2c534ae37e", "score": "0.62840956", "text": "def memo(func):\n cache = {}\n\n def wrapped(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return wrapped", "title": "" }, { "docid": "48508d3affb01a97f9f98a483d4ce391", "score": "0.6280775", "text": "def memoization_decorator(func):\n @wraps(func)\n def wrapper_decorator(*args, **kwargs):\n number = args[0]\n if number in func.square_cache.keys():\n return func.square_cache[number]\n square = func(*args, **kwargs)\n func.square_cache[number] = square\n return square\n return wrapper_decorator", "title": "" }, { "docid": "044370ad50d3430eb4cc970a18e57cb0", "score": "0.62683415", "text": "def cached_property(f, name=None):\n if name is None:\n name = f.__name__ \n def _get(self):\n try:\n return self.__dict__[name]\n except KeyError:\n value = f(self)\n self.__dict__[name] = value \n return value\n update_wrapper(_get, f)\n def _set(self, value):\n self.__dict__[name] = value\n return property(_get, _set)", "title": "" }, { "docid": "986c64e4a3938062325330dba5574da5", "score": "0.6266044", "text": "def cached_property(func):\n\n @wraps(func)\n def fget(self):\n if not hasattr(self, '_cached'):\n self._cached = {}\n return self._cached.setdefault(cached_name, func(self))\n\n cached_name = func.__name__\n return property(fget=fget)", "title": "" }, { "docid": "78c9503b7b3b805fda808fbbca9eaa62", "score": "0.62650454", "text": "def memoize_result(self, optree, value, combinatorial=None):\n if combinatorial is None:\n combinatorial = value.combinatorial\n self.memoization_map[(optree, combinatorial)] = value\n return value", "title": "" }, { "docid": "a581c81d08001d48b16bab35cf6a0c36", "score": "0.62540996", "text": "def memo(f):\n m = {}\n def func(x):\n if x in m: return m[x]\n m[x] = f(x)\n return m[x]\n return func", "title": "" }, { "docid": "6e5ca2bd7bf9ef18eeafdd6b938c59d9", "score": "0.62513506", "text": "def fastcachedmethod(func):\n\n class memodict(dict):\n __slots__ = ()\n\n def __missing__(self, key):\n self[key] = ret = func(instance, key)\n return ret\n\n sentinel = object()\n memo = memodict()\n memoget = memo.__getitem__\n instance = sentinel\n\n @functools.wraps(func)\n def _fast_cached_method_wrapper(inst, arg):\n nonlocal instance\n instance = inst\n return memoget(arg)\n\n _fast_cached_method_wrapper.cache_clear = memo.clear\n _fast_cached_method_wrapper.cache_size = memo.__len__\n _fast_cached_method_wrapper.cache_view = lambda: MappingProxyType(memo)\n _fast_cached_method_wrapper._cache = memo\n\n return _fast_cached_method_wrapper", "title": "" }, { "docid": "6ab61f1afddff8cbb6eeb030f77abd19", "score": "0.62489897", "text": "def _add_to_cache_ignore_args(obj, name, val):\n if not hasattr(obj, '_memoize_cache'):\n obj._memoize_cache = {}\n obj._memoize_cache[name] = val\n return val", "title": "" }, { "docid": "34fc537430af48ad9ac3d50bdf0caa6f", "score": "0.6248298", "text": "def memoizeSingle(f):\n class memodict(dict):\n\n def __missing__(self, key):\n ret = self[key] = f(key)\n return ret\n return memodict().__getitem__", "title": "" }, { "docid": "4ca5cfeb7a14b26666b674093039ad28", "score": "0.6244705", "text": "def memoize(fn):\n cache = {}\n\n @wraps(fn)\n def wrap(*args, **kwargs):\n if args:\n key_args = frozenset(args)\n else:\n key_args = None\n if kwargs:\n key_kwargs = frozenset(kwargs.items())\n else:\n key_kwargs = None\n\n key = (key_args, key_kwargs)\n if key not in cache:\n cache[key] = fn(*args, **kwargs)\n return cache[key]\n\n return wrap", "title": "" }, { "docid": "f768865b0df37f1fd878c4d142e5095b", "score": "0.62413955", "text": "def memo(f):\n cache = {}\n\n def _f(*args):\n try :\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n return f(args)\n return _f", "title": "" }, { "docid": "2d20afa0b8b8ff8c4900ba1f644fdb75", "score": "0.62316614", "text": "def compute(self) -> any:\n if self.cache is not None:\n return self.cache\n else:\n args, kwargs = self.up_propagate()\n try:\n res = self.method(*args, *kwargs)\n except Exception as _:\n raise TaskRuntimeError(\n self.method.__name__, args, kwargs, traceback.format_exc()\n )\n self.cache = res\n return res", "title": "" }, { "docid": "af9db7d495a07b7ac437a1b798d5213a", "score": "0.62197274", "text": "def memoize(self, obj):\n\n # The Pickler memo is a dictionary mapping object ids to 2-tuples\n # that contain the Unpickler memo key and the object being memoized.\n # The memo key is written to the pickle and will become\n # the key in the Unpickler's memo. The object is stored in the\n # Pickler memo so that transient objects are kept alive during\n # pickling.\n\n # The use of the Unpickler memo length as the memo key is just a\n # convention. The only requirement is that the memo values be unique.\n # But there appears no advantage to any other scheme, and this\n # scheme allows the Unpickler memo to be implemented as a plain (but\n # growable) array, indexed by memo key.\n if self.fast:\n return\n assert id(obj) not in self.memo\n idx = len(self.memo)\n self.write(self.put(idx))\n self.memo[id(obj)] = idx, obj", "title": "" }, { "docid": "348615f01654f2379bbf90cd1b575bdc", "score": "0.62056226", "text": "def memoize(f):\n import pickle\n\n count = [0]\n cache = {}\n\n def g(*args, **kwargs):\n count[0] += 1\n try:\n try:\n if len(kwargs) != 0:\n raise ValueError\n hash(args)\n key = (args,)\n except:\n key = pickle.dumps((args, kwargs))\n if key not in cache:\n cache[key] = f(*args, **kwargs)\n return cache[key]\n finally:\n count[0] -= 1\n if count[0] == 0:\n cache.clear()\n\n return g", "title": "" }, { "docid": "08588f6ff636a8fab5d2cb5a80e98ee3", "score": "0.6203517", "text": "def memoize(f):\n memo = {}\n\n def helper(*args):\n if args not in memo:\n memo[args] = f(*args)\n return memo[args]\n return helper", "title": "" }, { "docid": "6ddd3d8f6144dd2cbe8814c44bac4d73", "score": "0.62009245", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError: # Some element in args cannot be hashed\n return f(args)\n return _f", "title": "" }, { "docid": "a5f19db087e36f201d545f216dbb79b3", "score": "0.61929035", "text": "def memoized_method(name=None):\n def memoized_decorator(f):\n @wraps(f)\n def wrapper(self, *args):\n cached_name = name\n if name is None:\n cached_name = \"_cached_%s\" % f.__name__\n\n if not hasattr(self, cached_name):\n setattr(self, cached_name, {})\n\n if args not in getattr(self, cached_name):\n val = f(self, *args)\n getattr(self, cached_name)[args] = val\n return getattr(self, cached_name)[args]\n return wrapper\n return memoized_decorator", "title": "" }, { "docid": "fc14985d1551844c654ca20d651f74fc", "score": "0.6188132", "text": "def flask_memoized(f):\n\t@wraps(f)\n\tdef dec(*args, **kwargs):\n\t\tif not hasattr(g,f.__qualname__):\n\t\t\tsetattr(g,f.__qualname__,{}) #create the memoization space; this is inside the decorator because it has to be redone on each request\n\t\t\n\t\tmemos = getattr(g,f.__qualname__)\n\t\t\n\t\tk = (args, tuple(sorted(kwargs.items())))\n\t\tif k not in memos:\n\t\t\tmemos[k] = f(*args, **kwargs)\n\t\treturn memos[k]\n\n\treturn dec", "title": "" }, { "docid": "cfacb5ddfd2cbf7d45d2923aae82ffc5", "score": "0.6184293", "text": "def threadedmemoize(fn):\n import threading\n cache = {}\n def newfn(*args):\n now = time.time()\n t = threading.currentThread().getName()\n key = (t, args)\n if key in cache:\n return cache[key]\n else:\n #logger.debug('Memoizing %s with key=%s (%d entries in cache)' % (fn.__name__, key, len(cache)))\n val = fn(*args)\n cache[key] = val\n return val\n newfn.__name__ = fn.__name__ + ' (THREAD MEMOIZED)'\n newfn.__module__ = fn.__module__\n return newfn", "title": "" }, { "docid": "929438ab1436dad565d797cd808ff740", "score": "0.61788654", "text": "def memoize(function):\n call_cache = {}\n\n def memoized(argument):\n try:\n return call_cache[argument]\n except KeyError:\n return call_cache.setdefault(\n argument, function(argument)\n )\n\n return memoized", "title": "" }, { "docid": "0d0cdd7f38c79ddb9d235938fb1dce55", "score": "0.6167253", "text": "def memoizer(op_tree, be, next_error=None):\n key = (op_tree.key(), be, next_error)\n if key not in cache:\n cache[key] = func(op_tree, be, next_error)\n # print 'created grad_tree cache'\n return cache[key]", "title": "" }, { "docid": "1f2def0dd48d7915395964a3db8e25d3", "score": "0.61651605", "text": "def _get_memoized(n, c, t, apply_rule, memoization_table):\n key = n.tobytes()\n if key in memoization_table:\n return memoization_table[key]\n else:\n result = apply_rule(n, c, t)\n memoization_table[key] = result\n return result", "title": "" }, { "docid": "7bfa45e3161e6e2a0de21bb14a8d3b8a", "score": "0.6161281", "text": "def with_memoize(self, memoize):\n self.memoize = memoize", "title": "" }, { "docid": "97e6c7729bd24eb1cafebda6c384178c", "score": "0.6138381", "text": "def memoized(f):\n d = {}\n def wrapper(w1, w2):\n if (w1, w2) not in d:\n d[w1, w2] = f(w1, w2)\n return d[w1, w2]\n return wrapper", "title": "" }, { "docid": "815deb439d7ea3bfca888c89b3b76771", "score": "0.6100081", "text": "def __init__(self, func):\n self.func = func\n self.memo = {}", "title": "" }, { "docid": "326e1249988b08656ae2254f02f3a3fb", "score": "0.60987014", "text": "def memorize(f):\r\n\r\n cache = {}\r\n def g(*args):\r\n if args not in cache:\r\n cache[args] = f(*args)\r\n return cache[args]\r\n return g", "title": "" }, { "docid": "a12a0d72ed7c74c1c91dfbd42934d99f", "score": "0.60844547", "text": "def memoize_autodiff(func):\n cache = {}\n\n @wraps(func)\n def memoizer(op_tree, be, next_error=None):\n \"\"\"\n If params in the caches, return results directly. Othewise, add to cache\n and return the results.\n \"\"\"\n key = (op_tree.key(), be, next_error)\n if key not in cache:\n cache[key] = func(op_tree, be, next_error)\n # print 'created grad_tree cache'\n return cache[key]\n return memoizer", "title": "" }, { "docid": "b2f04426f3467eed2b2d8be17a1b0f10", "score": "0.6066361", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n _f.cache = cache\n return _f", "title": "" }, { "docid": "3455742d5f16f09436fde0a97a06219a", "score": "0.6063351", "text": "def memoizei(meth):\n def wrapper(meth, self, *args, **kwargs):\n if hasattr(meth, '_memoize_keyfunc'):\n key = meth, fun._memoize_keyfunc(*args, **kwargs)\n else:\n key = meth, args, frozenset(kwargs.iteritems())\n\n if not hasattr(self, '__cache'):\n self.__cache = {}\n\n if not key in self.__cache:\n # Here, python __-rules help us out for once.\n try:\n\n self.__cache[key] = meth(self, *args, **kwargs)\n except:\n # Always invalidate cache on exception.\n self.__cache.pop(key, None)\n raise\n\n return self.__cache[key]\n\n return decorator(wrapper, meth)", "title": "" }, { "docid": "d998c0befa88f28514d3498f5b8ac1e9", "score": "0.6061806", "text": "def memoize(func):\n # type: (Callable) -> Callable\n cache = {} # type: Dict[Tuple[Any, ...], Any]\n\n @functools.wraps(func)\n def _inner(*key):\n # type: (*Any) -> Any\n \"\"\"Check the cache for the key.\n\n Args:\n key: The arguments to the function used to cache the results.\n\n Returns:\n The results of the function.\n \"\"\"\n if key not in cache:\n cache[key] = func(*key)\n return cache[key]\n\n return _inner", "title": "" }, { "docid": "d4e602e1bcc5f7ca5d0ddb7a6c761edc", "score": "0.60508245", "text": "def memoize(self, memoize):\n\n self._memoize = memoize", "title": "" }, { "docid": "a84dff877983acbe181b4e2c6dc0515a", "score": "0.60294646", "text": "def memoized(function):\n\n class Memoizer(object):\n\n __instance_memos = weakref.WeakKeyDictionary()\n\n def __call__(self, *args):\n try:\n return self.__memo[args]\n except KeyError:\n self.__memo[args] = result = self.__function(*args)\n return result\n\n def __get__(self, owner, owner_type=None):\n if owner is None:\n return self\n instance_memo = self.__get_instance_memo(owner)\n instance_method = functools.partial(self.__function, owner)\n return Memoizer(instance_method, instance_memo)\n\n def __init__(self, function, memo):\n self.__function = function\n self.__memo = memo\n\n def __get_instance_memo(self, owner):\n try:\n return self.__instance_memos[owner]\n except KeyError:\n self.__instance_memos[owner] = result = {}\n return result\n\n return six.wraps(function)(Memoizer(function, {}))", "title": "" }, { "docid": "72c0a866300c534705ab3c10859bad7b", "score": "0.6015462", "text": "def get_cached_value(self, obj):\n return obj.__dict__[self.prop.name]", "title": "" }, { "docid": "d22907283f83f55f40d7b67dd2d2e014", "score": "0.60052717", "text": "def evaluate_cached(self, verbose=None):\n if not hasattr(self, 'result') or self.result is None:\n self.result = self.evaluate()\n logger.debug(str.format(\"Evaluating: {0} -> {1}\", str(self), str(self.result)))\n return self.result", "title": "" }, { "docid": "6a44a95d63572b04bda467f8b9f2b646", "score": "0.59997576", "text": "def calc_or_read_from_cache(self, attr_name_with_args):\n attr_pieces = attr_name_with_args.split('__') \n attr_name = attr_pieces[0]\n args = attr_pieces[1:]\n\n if not attr_name in self._cache:\n fn = getattr(self, f\"calc_{attr_name}\")\n value = fn(*args)\n self._cache[attr_name_with_args] = value\n return self._cache[attr_name_with_args]", "title": "" }, { "docid": "d57e5b00f0b5b9a67eed4dc2cbf916a1", "score": "0.59810805", "text": "def cached(f):\n cached_arg = cached_res = 0\n\n def drake(arg):\n nonlocal cached_arg, cached_res\n\n if arg != cached_arg:\n cached_arg, cached_res = arg, f(arg)\n\n return cached_res\n\n return drake", "title": "" }, { "docid": "dbb3ea3ce27b9476338d7547ed90bb63", "score": "0.5943517", "text": "def _memoize(pulseFunc):\n # namespacce the cache so we can access (and reset) from elsewhere\n _memoize.cache = {}\n\n @wraps(pulseFunc)\n def cacheWrap(*args, **kwargs):\n if kwargs:\n return pulseFunc(*args, **kwargs)\n key = (pulseFunc, args)\n if key not in _memoize.cache:\n _memoize.cache[key] = pulseFunc(*args)\n return _memoize.cache[key]\n\n return cacheWrap", "title": "" }, { "docid": "d31504ff3423e8fef357bf01239211bf", "score": "0.59187615", "text": "def memo(f):\n f.cache = {}\n @wraps(f)\n def wrapper(*args):\n try:\n return f.cache[args]\n except KeyError:\n f.cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return wrapper", "title": "" }, { "docid": "e92724390211698c6a68fe5da5314dbf", "score": "0.59152794", "text": "def __call__(self):\n self.compute()\n return self.field", "title": "" }, { "docid": "9054584684523656a0caa5bdf542a4c7", "score": "0.5914639", "text": "def cacheProperty(getter, attr_name, fdel=None, doc=None):\n def fget(obj):\n val = None\n\n if hasattr(obj, attr_name):\n val = getattr(obj, attr_name)\n # print \"cacheProperty: retrieving cache: %s.%s = %s\" % (obj, attr_name, val)\n\n if val is None:\n # print \"cacheProperty: running getter: %s.%s\" % (obj, attr_name)\n val = getter(obj)\n # print \"cacheProperty: caching: %s.%s = %s\" % (obj, attr_name, val)\n setattr(obj, attr_name, val)\n return val\n\n def fset(obj, val):\n # print \"cacheProperty: setting attr %s.%s=%s\" % (obj, attr_name, val)\n setattr(obj, attr_name, val)\n\n return property(fget, fset, fdel, doc)", "title": "" }, { "docid": "44932a485cac390de82fc61fe532989b", "score": "0.58992124", "text": "def memo(f):\n\tcache = {}\n\tdef _f(*args):\n\t\tprint (\"[\", args, \"] ==> \", end=\"\")\n\t\tprint (cache)\n\t\ttry:\n\t\t\treturn cache[args]\n\t\texcept KeyError:\n\t\t\tcache[args] = result = f(*args)\n\t\t\treturn result\n\t\texcept TypeError:\n\t\t\t# some element of args can't be a dict key\n\t\t\treturn f(args)\n\n\treturn _f", "title": "" }, { "docid": "0febd12e8739fc02d735a65f2301bc0c", "score": "0.58986074", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n\n return _f", "title": "" }, { "docid": "b4e4e7116d3ed460b54f1767867eb501", "score": "0.5895773", "text": "def cache_result(wrapped):\n def wrapper(obj):\n rv = wrapped(obj)\n\n def replacement(*args, **kwargs):\n return rv\n\n obj.__dict__[wrapped.__name__] = replacement\n return rv\n\n return wrapper", "title": "" }, { "docid": "b74dbf2a5dc60950288cae4554ce83be", "score": "0.58904946", "text": "def memo(f):\n cache = {}\n\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n\n return _f", "title": "" }, { "docid": "b74dbf2a5dc60950288cae4554ce83be", "score": "0.58904946", "text": "def memo(f):\n cache = {}\n\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n\n return _f", "title": "" }, { "docid": "70af2a99cce5f496fbf614473d43e210", "score": "0.58837813", "text": "def test_memoized_property_run_count():\n c = DummyClass()\n _ = c.memoized_property\n _ = c.memoized_property\n _ = c.memoized_property\n assert c.memoized_property_run_count == 1", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.58831424", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.58831424", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "bdf7a8b49d3e33ec5a0fff9c6f5a88bc", "score": "0.5871235", "text": "def __call__(self, func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n key = self.make_key(args, kwargs)\n\n # If result is already in cache, just retrieve it and update its timings\n with self.lock:\n result = self.cache.get(key, self.default)\n if result is not self.default:\n del self.cache[key]\n self.cache[key] = result\n self.stats['hit'] += 1\n return result\n\n # The result was not found in cache: evaluate function\n result = func(*args, **kwargs)\n\n # Add the result to cache\n with self.lock:\n self.stats['miss'] += 1\n if key in self.cache:\n pass\n elif self.is_full:\n self.cache.popitem(last=False)\n self.cache[key] = result\n else:\n self.cache[key] = result\n self.is_full = (len(self.cache) >= self.maxsize)\n return result\n\n wrapper.__name__ = func.__name__\n wrapper.cache = lambda: self.cache\n wrapper.stats = lambda: self.stats\n wrapper.reset = self.reset\n return wrapper", "title": "" }, { "docid": "43b3e354e8da52f7c642d086f2370142", "score": "0.5854616", "text": "def memoizeSeveral(f):\n class memodict(dict):\n\n def __init__(self, f):\n self.f = f\n\n def __call__(self, *args):\n return self[args]\n\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "title": "" }, { "docid": "24f4d0aff0f35eeb9c37495806ff16d0", "score": "0.5846088", "text": "def cached_property(**kwargs):\r\n def decorator(function):\r\n @wraps(function)\r\n def wrapper(self):\r\n key = 'fandjango.%(model)s.%(property)s_%(pk)s' % {\r\n 'model': self.__class__.__name__,\r\n 'pk': self.pk,\r\n 'property': function.__name__\r\n }\r\n\r\n cached_value = cache.get(key)\r\n\r\n delta = timedelta(**kwargs)\r\n\r\n if cached_value is None:\r\n value = function(self)\r\n cache.set(key, value, delta.days * 86400 + delta.seconds)\r\n else:\r\n value = cached_value\r\n\r\n return value\r\n return wrapper\r\n return decorator", "title": "" }, { "docid": "3778729718c63b3bed3ebe6bd258e871", "score": "0.58374053", "text": "def cache(f):\n cache_data = {}\n\n def inner(n):\n if n not in cache_data:\n cache_data[n] = f(n)\n return cache_data[n]\n return inner", "title": "" } ]
934b19fce364275d066951c97d409937
Tests the default credential values, and also the AttributeError mechanism.
[ { "docid": "77e6af9a04b26d3bd156e277a5cd3c28", "score": "0.79476535", "text": "def test_credential_default_values():\n creds = Credentials()\n assert creds.url is None\n assert creds.token is None\n assert creds.org_key is None\n assert creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None\n with pytest.raises(AttributeError):\n assert creds.notexist is None", "title": "" } ]
[ { "docid": "922fa2958aad9658ef8a568eeae6abc2", "score": "0.7252466", "text": "def test_attribute_defaults(self):\n creds = NokiaCredentials()\n self.assertEqual(creds.access_token, None)\n self.assertEqual(creds.token_expiry, None)\n self.assertEqual(creds.token_type, None)\n self.assertEqual(creds.token_expiry, None)\n self.assertEqual(creds.user_id, None)\n self.assertEqual(creds.client_id, None)\n self.assertEqual(creds.consumer_secret, None)", "title": "" }, { "docid": "7b29ddfcea8784c605dcda879d43f259", "score": "0.70410264", "text": "def test_002_check_default_openstack_credential_usage(self):\n cluster_data = {\n 'password': self.config.identity.admin_password,\n 'username': self.config.identity.admin_username}\n\n default_data = {\n 'password': 'admin',\n 'username': 'admin'}\n\n self.verify_response_body_not_equal(\n exp_content=default_data,\n act_content=cluster_data,\n msg='Default credentials values are used. '\n 'We kindly recommend that you changed all defaults.',\n failed_step='1')", "title": "" }, { "docid": "08106e738d35ccb9ac2a8d54b2d9c359", "score": "0.66565436", "text": "def test_credential_boolean_parsing_failure():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": \"bogus\"}\n with pytest.raises(CredentialError):\n Credentials(init_dict)", "title": "" }, { "docid": "008fe64c82c3a689a9d0ded5c472b9be", "score": "0.6631332", "text": "def test_credential_partial_loads():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": 0}\n creds = Credentials(init_dict)\n assert creds.url == \"http://example.com\"\n assert creds.token is None\n assert creds.org_key is None\n assert not creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None", "title": "" }, { "docid": "0d9ff8d6d292815ba7a39e378775eb56", "score": "0.63536394", "text": "def test_mdb_get_unknown_credential(self):\n res = self.mdb.get_credential(1234567890)\n self.assertEqual(res, None)", "title": "" }, { "docid": "75c9710a5906c0bbf3f7937a024e5a9e", "score": "0.62575865", "text": "def test_init_wrong_scope(self):\n self.assertRaises(errors.Credentials,\n credentials.Credentials, 'priv.json', scope='fake')", "title": "" }, { "docid": "0bb266b9f0d65658efeb0705a86a7221", "score": "0.6251009", "text": "def test_validate_credentials(self):\n pass", "title": "" }, { "docid": "7452b8d702db9699521b69d829bb87ad", "score": "0.6237961", "text": "def test_creds_not_found():\n assert_equal(find_credentials({'foo': 'bar'}), (None, None))", "title": "" }, { "docid": "1750fe22f08febab0f3d5b956a08f646", "score": "0.6233923", "text": "def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(username=usr,\n password=pwd,\n auth_url=url)\n keystone.authenticate()\n except k_exceptions.Unauthorized:\n pass\n else:\n self.fail('Step 1 failed: Default credentials '\n 'for keystone on master node were not changed')", "title": "" }, { "docid": "0dfae483772538059a7c0edbb8a05d1f", "score": "0.6206313", "text": "def test_init(self):\n self.assertEqual(self.new_credential.app_name, \"MySpace\")\n self.assertEqual(self.new_credential.account_name, \"Ghostke99\")\n self.assertEqual(self.new_credential.account_password, \"daimaMkenya001\")", "title": "" }, { "docid": "e80d2ee74517c2e1cc4a474f7eadd860", "score": "0.61281824", "text": "def test_missing_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with self.assertRaises(exceptions.CredentialNotFound):\n twine.validate_credentials()", "title": "" }, { "docid": "113a90e0caf5889b23052a558166d0de", "score": "0.6118668", "text": "def test_attributes(self):\n creds = NokiaCredentials(access_token=1, token_expiry=2, token_type=3,\n refresh_token=4, user_id=5, client_id=6,\n consumer_secret=7)\n assert hasattr(creds, 'access_token')\n self.assertEqual(creds.access_token, 1)\n assert hasattr(creds, 'token_expiry')\n self.assertEqual(creds.token_expiry, 2)\n assert hasattr(creds, 'token_type')\n self.assertEqual(creds.token_type, 3)\n assert hasattr(creds, 'refresh_token')\n self.assertEqual(creds.refresh_token, 4)\n assert hasattr(creds, 'user_id')\n self.assertEqual(creds.user_id, 5)\n assert hasattr(creds, 'client_id')\n self.assertEqual(creds.client_id, 6)\n assert hasattr(creds, 'consumer_secret')\n self.assertEqual(creds.consumer_secret, 7)", "title": "" }, { "docid": "019b1eb0eddba4262b6c907c663d8a67", "score": "0.606647", "text": "def check_credentials(self, cli_credentials, default_prompt, enable_prompt, logger):\n raise NotImplementedError(\"Class {} must implement method 'check_credentials'\".format(type(self)))", "title": "" }, { "docid": "234bc426f9a35a1e909daf78189dfb2e", "score": "0.60594296", "text": "def test_init(self):\n\n self.assertEqual(self.new_credential.account_name,\"Facebook\")\n self.assertEqual(self.new_credential.user_name,\"Yvonnah Bonswuh\")\n self.assertEqual(self.new_credential.email_address,\"ivonnahbonswuh@gmail.com\")\n self.assertEqual(self.new_credential.password,\"ivy1996\")", "title": "" }, { "docid": "6119217b7716bfcba5f178207bdaf7ee", "score": "0.605713", "text": "def test_no_password_getter(self):\n self.user.password = '123456'\n with self.assertRaises(AttributeError):\n self.user.password", "title": "" }, { "docid": "ebdd6b12d196ee8a49f557ee8eb88c1a", "score": "0.60392857", "text": "def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password", "title": "" }, { "docid": "d914203a9b7f4cb34ff76ee23c407098", "score": "0.5999532", "text": "def test_database_properties_credential_username_none(self):\n\n instance = WpConnection(db_host=None, db_name='wp_default', credentials=WpCredentials.from_username_and_password(None, 'password'))\n\n self.assertIsInstance(instance, WpConnection)\n self.assertIsNone(instance.db_host)\n self.assertIsNone(instance.credentials)", "title": "" }, { "docid": "3450a5515c4898664b85507d3168b476", "score": "0.5982549", "text": "def credentials(self):\n return True", "title": "" }, { "docid": "1c6fd09c4fbdc0791aa38f504cc8eb80", "score": "0.59579635", "text": "def password(self):\n return AttributeError(\"password is not readeble attribute.\")", "title": "" }, { "docid": "dbcf8b8c87e1eb517cfee2a7a57f98a2", "score": "0.59405875", "text": "def test_credentials(self):\r\n data = self._deep_clean('zekebarge@gmail.com')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "title": "" }, { "docid": "1470c4ce2230689c3ee019f8574fc454", "score": "0.59301525", "text": "def test_display_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(),Credentials.credential_list)", "title": "" }, { "docid": "5026d654517593bf7b421baec144f4e2", "score": "0.5917263", "text": "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "title": "" }, { "docid": "4c67e5bcdd1921f71bc5828d0ce4bb41", "score": "0.5886223", "text": "def test_is_authentication_disabled_negative_test():\n os.environ['DISABLE_AUTHENTICATION'] = ''\n assert not is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = '0'\n assert not is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = 'false'\n assert not is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = 'False'\n assert not is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = 'FALSE'\n assert not is_authentication_disabled()", "title": "" }, { "docid": "b2731063dbbe702b2e25effc5aafa720", "score": "0.58410805", "text": "def test__init__(self) :\n self.assertEqual(self.new_credential.accountName, \"snapchat\")\n self.assertEqual(self.new_credential.email, \"chat@gmail.com\")\n self.assertEqual(self.new_credential.password, \"chat001\")", "title": "" }, { "docid": "db48aeef4ffc672e2797f1105871e36c", "score": "0.5829114", "text": "def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n mock_pwnam.side_effect = KeyError()\n self.configuration.hgst_space_user = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_user = 'Fred!`'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "title": "" }, { "docid": "9b8b906972286b951a9a8f2c384b5240", "score": "0.5819311", "text": "def test_init(self):\n self.assertEqual(self.new_cred.account_name, 'github')\n self.assertEqual(self.new_cred.username, 'Lugaga')\n self.assertEqual(self.new_cred.password, 'tangodown!')", "title": "" }, { "docid": "4ee739ed9f4476367abaacb912576de4", "score": "0.5817904", "text": "def set_credentials():", "title": "" }, { "docid": "bc9bb77a0a2dda6e0d3b5d732fd6f0b2", "score": "0.58153665", "text": "def test_credential_exists(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\" )\n test_credential.save_attributes()\n\n credential_exist = Credentials.credentials_exist(\"Instagram\")\n self.assertTrue(credential_exist)", "title": "" }, { "docid": "7287e257f674d2b36446ae71814b88f8", "score": "0.58142143", "text": "def _make_sure_credentials_are_set(self):\n if self.backend_options:\n if not os.environ.get('APCA_API_KEY_ID') and \\\n self.backend_options['key_id']:\n os.environ['APCA_API_KEY_ID'] = self.backend_options['key_id']\n if not os.environ.get('APCA_API_SECRET_KEY') and \\\n self.backend_options['secret']:\n os.environ['APCA_API_SECRET_KEY'] = self.backend_options[\n 'secret']\n if not os.environ.get('APCA_API_BASE_URL') and \\\n self.backend_options['base_url']:\n os.environ['APCA_API_BASE_URL'] = self.backend_options[\n 'base_url']", "title": "" }, { "docid": "baa4b78d9e929e556cc2237ab9658381", "score": "0.57792187", "text": "def test_simple_auth_error(self):\n client = LDAPClient(self.url)\n client.set_credentials(\"SIMPLE\", (\"cn=wrong\", \"wronger\"))\n self.assertRaises(bonsai.AuthenticationError, client.connect)", "title": "" }, { "docid": "4972f236038293083ae10e5736036860", "score": "0.5777719", "text": "def test_is_authentication_disabled_positive_test():\n os.environ['DISABLE_AUTHENTICATION'] = '1'\n assert is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = 'True'\n assert is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = 'true'\n assert is_authentication_disabled()\n\n os.environ['DISABLE_AUTHENTICATION'] = 'TRUE'\n assert is_authentication_disabled()", "title": "" }, { "docid": "fff8000ee8a18fd4e0f6f9ea2713b5da", "score": "0.5773105", "text": "def test_display_all_credentials(self):\n\n\n self.assertEqual(Credential.display_credentials(),Credential.credential_list)", "title": "" }, { "docid": "e3da4bfec545b784a431608c35b6c9a9", "score": "0.5772571", "text": "def test_default_auth_methods(mp_config_file):\n with custom_mp_config(mp_config_file):\n check.is_in(\"env\", default_auth_methods())\n check.is_in(\"msi\", default_auth_methods())\n check.is_in(\"cli\", default_auth_methods())\n check.is_in(\"interactive\", default_auth_methods())", "title": "" }, { "docid": "92d4a7a844da51549cd099bec49a121a", "score": "0.57647365", "text": "def test_init(self):\n self.assertEqual(self.new_credentials.account,\"Instagram\")\n self.assertEqual(self.new_credentials.username,\"bensongathu\")\n self.assertEqual(self.new_credentials.password,\"vcxz4321\")", "title": "" }, { "docid": "1f59102ffbb82c845159be8765c31b3c", "score": "0.57398874", "text": "def test_attributes(self):\n self.assertEqual(self.client.host, self.test_host)\n self.assertEqual(self.client.auth.host, self.test_host)", "title": "" }, { "docid": "a778da52f1bfb63a8273e2bac5b3c3e7", "score": "0.5736868", "text": "def test_credential_exist(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n\n self.assertTrue(account_found)", "title": "" }, { "docid": "df7f02c179bd63979b79ba1f4cff37c8", "score": "0.57193005", "text": "def test_init(self):\n self.assertEqual(self.new_credential.view_password,\"winnie\")\n self.assertEqual(self.new_credential.account,\"facebook\")\n self.assertEqual(self.new_credential.login,\"deinawinnie\")\n self.assertEqual(self.new_credential.password,\"winnie\")", "title": "" }, { "docid": "741b7ff1e3edfb5dbb2e9b74825c6a06", "score": "0.57016605", "text": "def _basic_auth_credentials(self) -> tuple[str, str] | None:\n return None", "title": "" }, { "docid": "2b02f4a963e3a34094c04e555f69facb", "score": "0.5700085", "text": "def test_scenarios_that_should_raise_errors(self, kwargs, auth):\n try:\n auth.load_creds(**kwargs)\n # raises ValueError (zero length field name in format) for python 2.6\n # OSError for the rest\n except (OSError, ValueError):\n pass\n except Exception as e:\n pytest.fail(\"Unexpected exception thrown: %s\" % e)\n else:\n pytest.fail(\"OSError exception not thrown.\")", "title": "" }, { "docid": "2d407f2b7a70920dff2e2c394f260984", "score": "0.56845844", "text": "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "title": "" }, { "docid": "2d407f2b7a70920dff2e2c394f260984", "score": "0.56845844", "text": "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "title": "" }, { "docid": "3cfdf86497b92daa7391a736512dab8b", "score": "0.567597", "text": "def test_base_props(self):\n\n self.assertTrue(hasattr(settings, \"PROJECT_PATH\"))\n self.assertTrue(hasattr(settings, \"DATABASE_PATH\"))\n self.assertTrue(hasattr(settings, \"EMAIL_HOST\"))\n self.assertTrue(hasattr(settings, \"EMAIL_FROM\"))\n self.assertTrue(hasattr(settings, \"DAYS_TO_ACTIVATE\"))\n self.assertTrue(hasattr(settings, \"MAX_PWD_TRIES\"))", "title": "" }, { "docid": "0166558ff34622f36b2a2ae8f8fd7fc8", "score": "0.56480384", "text": "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")", "title": "" }, { "docid": "e0b44244653a012e0798ae0f16218edc", "score": "0.5647626", "text": "def test_credential_create(self):\n self.new_credential.credential_create()\n self.assertEqual(len(Credentials.credentials_list), 1)", "title": "" }, { "docid": "62e25eeffc46e6454e50e4175cabc298", "score": "0.56387085", "text": "def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"", "title": "" }, { "docid": "b20b944764cd08e65fc1b2889f7f9d88", "score": "0.56306344", "text": "def test_display_cred(self):\n self.assertEqual(Credentials.display_cred(), Credentials.cred_list)", "title": "" }, { "docid": "c919cbb65118db59149ddf1a676f8851", "score": "0.5609055", "text": "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "title": "" }, { "docid": "d75b3936a2191e68a0f6df90eec81049", "score": "0.56036836", "text": "def test_get_accessor_raises_exception_if_not_created_and_no_uname_password(\n RallyAccessor):\n assert_raises(Exception, get_accessor)\n assert_false(RallyAccessor.called)", "title": "" }, { "docid": "c3f896919a2684167b94176f9d2b9bec", "score": "0.55997646", "text": "def setUp(self):\n # instantiate an object by populating with dummy values.\n self.new_credential = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")", "title": "" }, { "docid": "46e72b34355dc7fc86a48f9d9c0e9c98", "score": "0.55847275", "text": "def test_default_setting():\n assert get_default_password_validators() == []", "title": "" }, { "docid": "b73ceb15309bee70d3cc524f52fb2c8b", "score": "0.55753314", "text": "def test_get_credentials_from_keyring_if_not_in_keyring(\n self, mock_keyring):\n mock_keyring.get_keyring.return_value = True\n return_values = {'username': None}\n\n def side_effect(_, arg):\n if arg is None:\n raise TypeError('NoneType instead of str')\n return return_values[arg]\n\n mock_keyring.get_password.side_effect = side_effect\n credentials = get_credentials_from_keyring('TestPlatform')\n self.assertEqual(credentials, None)", "title": "" }, { "docid": "4dae21fa62db652a754c070325686128", "score": "0.5568543", "text": "def test_blank_user(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._blank_user))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiUser\")", "title": "" }, { "docid": "e1c73e8246aa8025d312de112d353ba0", "score": "0.55682105", "text": "def test_user1_method2():\n assert u.email == USER_CREDENTIALS[\"email\"], \"Email was not assigned correctly\"\n assert u.password is not None, \"Password was not assigned correctly\"", "title": "" }, { "docid": "55e72122a26f008880de7b4c37f37bbf", "score": "0.5559024", "text": "def test_authenticate_random_credentials(self):\n \n self.assertRaises(\n TypeError, \n self.authenticator.authenticate,\n foo='bar'\n )", "title": "" }, { "docid": "7a189204660598885059b682381c1d22", "score": "0.5554075", "text": "def test_authenticate_no_credentials(self):\n \n self.assertRaises(\n ValueError, \n self.authenticator.authenticate\n )", "title": "" }, { "docid": "b6f3bba9b47315ad59083ed82c7f9318", "score": "0.555269", "text": "def test_parameter_redundancy_invalid(self, mock_ghn, mock_grnam,\n mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_redundancy = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_redundancy = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "title": "" }, { "docid": "8e9ab115ae99966b860299822724cafb", "score": "0.5545691", "text": "def test_find_credentials(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n\n found_credential = Credentials.find_credentials(\"Instagram\")\n\n self.assertEqual(found_credential.account, test_credential.account)", "title": "" }, { "docid": "53fb7b7a18ce4dbd12c3f1799d6a6e9b", "score": "0.55332446", "text": "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "title": "" }, { "docid": "9ec64c4075f5ba022f62b8c83d670d11", "score": "0.5506902", "text": "def test_discover_no_cli_creds(self):\n entry = mock.MagicMock(user=None, password=None, enable_password=None)\n vendor = mock.MagicMock()\n vendor_settings = mock.MagicMock()\n self.networking_handler._get_cli_credentials = mock.MagicMock(return_value=None)\n\n # act\n result = self.networking_handler.discover(entry=entry,\n vendor=vendor,\n vendor_settings=vendor_settings)\n\n # verify\n self.assertEqual(result, entry)\n self.assertEqual(entry.comment, \"Unable to discover device user/password/enable password\")\n self.assertIsNone(entry.user)\n self.assertIsNone(entry.password)\n self.assertIsNone(entry.enable_password)", "title": "" }, { "docid": "4f26816c1bddaa9eb0da016154dcbac2", "score": "0.5504723", "text": "def test_credential_dict_value_load(input_dict):\n creds = Credentials(input_dict)\n assert creds.url == \"http://example.com\"\n assert creds.token == \"ABCDEFGH\"\n assert creds.org_key == \"A1B2C3D4\"\n assert not creds.ssl_verify\n assert not creds.ssl_verify_hostname\n assert creds.ssl_cert_file == \"foo.certs\"\n assert creds.ssl_force_tls_1_2\n assert creds.proxy == \"proxy.example\"\n assert creds.ignore_system_proxy\n assert creds.integration == 'Bronski'\n assert creds.get_value(CredentialValue.URL) == \"http://example.com\"\n assert creds.get_value(CredentialValue.TOKEN) == \"ABCDEFGH\"\n assert creds.get_value(CredentialValue.ORG_KEY) == \"A1B2C3D4\"\n assert not creds.get_value(CredentialValue.SSL_VERIFY)\n assert not creds.get_value(CredentialValue.SSL_VERIFY_HOSTNAME)\n assert creds.get_value(CredentialValue.SSL_CERT_FILE) == \"foo.certs\"\n assert creds.get_value(CredentialValue.SSL_FORCE_TLS_1_2)\n assert creds.get_value(CredentialValue.PROXY) == \"proxy.example\"\n assert creds.get_value(CredentialValue.IGNORE_SYSTEM_PROXY)\n assert creds.get_value(CredentialValue.INTEGRATION) == 'Bronski'", "title": "" }, { "docid": "7deffdd7506b025b852a7d4978006cd3", "score": "0.5493493", "text": "def test_init_defaults(self):\n self._set_args(log_path=None,\n state='present',\n username='myBindAcct',\n password='myBindPass',\n server='ldap://example.com:384',\n search_base='OU=Users,DC=example,DC=com',\n role_mappings={'.*': ['storage.monitor']},\n )\n\n ldap = Ldap()", "title": "" }, { "docid": "4cff12a3692c991acdbbb05c7fb6bbfe", "score": "0.5486943", "text": "def test_credentials_set_reset(self):\n empty_setting = {\n 'AccessKeyId': None,\n 'SecretAccessKey': None,\n 'SessionToken': None\n }\n nonempty_setting = {\n 'AccessKeyId': '1',\n 'SecretAccessKey': '2',\n 'SessionToken': '3'\n }\n self.assertEqual(_credentials, empty_setting)\n credentials_set(nonempty_setting)\n self.assertEqual(_credentials, nonempty_setting)\n credentials_reset()\n self.assertEqual(_credentials, empty_setting)", "title": "" }, { "docid": "4ed32a9bbce9c09a1beeb9d2149ed12d", "score": "0.5480969", "text": "def test_no_credentials(remove_api_key):\n with raises(\n RuntimeError,\n match=\"Failed to read API key. Did you forget to set GIPHY_API_KEY environment variable?\",\n ):\n api_credentials_provider.resolve_credentials()", "title": "" }, { "docid": "0f90248a80198e573d8cf11a31417de3", "score": "0.5475419", "text": "def test_ApiWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n api = Api(self.userId, \"\")\n self.assertFalse(api.connected())", "title": "" }, { "docid": "0f6e2f35179bed3444c9e79368a21af2", "score": "0.5472082", "text": "def test_blank_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._blank_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "title": "" }, { "docid": "2d20919bba131f8a5d52ab8a367ce528", "score": "0.546877", "text": "def test_no_credentials(self):\n twine = Twine(source=VALID_SCHEMA_TWINE)\n twine.validate_credentials()", "title": "" }, { "docid": "27f1f802af29b4a36e301ed036b1f121", "score": "0.54509634", "text": "def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()", "title": "" }, { "docid": "413602e4f21314172f0e33bb216179c2", "score": "0.54505706", "text": "def test_userinfo(self):\n self.assertEqual(self.gmail_case.userinfo, None)\n self.assertEqual(self.foo_case.userinfo, 'herp')", "title": "" }, { "docid": "0b057e5431f4d69390c3deec19d6c8e8", "score": "0.54455817", "text": "def test_find_credentials(self):\n self.new_credentials.save_credentials()\n new_account= Credentials(\"Twitter\",\"josephat_otieno\", \"joseotis45\")\n new_account.save_credentials()\n\n found_credential= Credentials.find_credentials(\"Twitter\")\n\n self.assertEqual(found_credential.account_name,new_account.account_name)", "title": "" }, { "docid": "ebfb28235b76eab974c9bf24c1911cba", "score": "0.54430926", "text": "def test_ApiConnectionWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n connection = ApiConnection(self.userId, \"\")\n self.assertFalse(connection.connected())", "title": "" }, { "docid": "5c9822ec885877539d2d6e59798f01da", "score": "0.54370695", "text": "def test_login_required_attr(self):\n\t\tdata = {'username' : 'testUser2'}\n\t\tresponse = self.login(data)\n\n\t\terror_text = \"This field is required.\"\n\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\ttry:\n\t\t\tif error_text not in response.data[\"password\"]:\n\t\t\t\tself.fail(\"Error text must exist in 'password' : '\" + error_text + \"'\")\n\t\texcept AttributeError:\n\t\t\tself.fail(\"There must be at least one entry in 'password'\")\n\n\t\tdata = {'password' : 'pass12345'}\n\t\tresponse = self.login(data)\n\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\ttry:\n\t\t\tif error_text not in response.data[\"username\"]:\n\t\t\t\tself.fail(\"Error text must exist in 'username' : '\" + error_text + \"'\")\n\t\texcept AttributeError:\n\t\t\tself.fail(\"There must be at least one entry in 'username'\")\n\n\t\tdata = {}\n\t\tresponse = self.login(data)\n\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\ttry:\n\t\t\tif error_text not in response.data[\"username\"]:\n\t\t\t\tself.fail(\"Error text must exist in 'username' : '\" + error_text + \"'\")\n\t\t\tif error_text not in response.data[\"password\"]:\n\t\t\t\tself.fail(\"Error text must exist in 'password' : '\" + error_text + \"'\")\n\t\texcept AttributeError:\n\t\t\tself.fail(\"There must be at least one entry of '\" + \n\t\t\t\terror_text + \"' in either of 'username' or 'password'\")", "title": "" }, { "docid": "687c8784e46c745ceafefaa27251a58f", "score": "0.54325944", "text": "def test_expired_credentials():\n pass", "title": "" }, { "docid": "94c64478873eac5aeb04fff0bd62a028", "score": "0.54319423", "text": "def setUp(self):\n self.new_credential = Credential(\"winnie\", \"facebook\",\"deinawinnie\",\"winnie\")", "title": "" }, { "docid": "5cda685d0cf5bdc87bb33d89c16b0745", "score": "0.5423306", "text": "def set_credentials(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "741c623839a082f6ce9a5db4904f79fe", "score": "0.54214793", "text": "def test_missing_attribute(self):\n with self.assertRaises(ImproperlyConfigured):\n import_from_setting('TEST_SETTING')", "title": "" }, { "docid": "eeeb3a9cf1688066b31f41c4326beef2", "score": "0.5420578", "text": "def test_create_account_not_allowed(self):\n\n def _side_effect_for_get_value(value, default=None):\n \"\"\"\n returns a side_effect with given return value for a given value\n \"\"\"\n if value == 'ALLOW_PUBLIC_ACCOUNT_CREATION':\n return False\n else:\n return get_value(value, default)\n\n with mock.patch('openedx.core.djangoapps.site_configuration.helpers.get_value') as mock_get_value:\n mock_get_value.side_effect = _side_effect_for_get_value\n response = self.client.post(self.url, {\"email\": self.EMAIL, \"username\": self.USERNAME})\n assert response.status_code == 403", "title": "" }, { "docid": "1c3e5a030db48494e9f2bf2f6a77afdb", "score": "0.5412422", "text": "def test_credential_get_dict(input_dict):\n creds = Credentials(input_dict).to_dict()\n assert creds[\"url\"] == \"http://example.com\"\n assert creds[\"token\"] == \"ABCDEFGH\"\n assert creds[\"org_key\"] == \"A1B2C3D4\"\n assert not creds[\"ssl_verify\"]\n assert not creds[\"ssl_verify_hostname\"]\n assert creds[\"ssl_cert_file\"] == \"foo.certs\"\n assert creds[\"ssl_force_tls_1_2\"]\n assert creds[\"proxy\"] == \"proxy.example\"\n assert creds[\"ignore_system_proxy\"]", "title": "" }, { "docid": "90039455594fb8c66849066584e0cb23", "score": "0.5403495", "text": "def test_display_all_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)", "title": "" }, { "docid": "925b4e6f2c2e2f7c4f42862c62baa2fd", "score": "0.5388713", "text": "def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "title": "" }, { "docid": "12e7bc02450dce01f5ad2014c13bdc3b", "score": "0.53742784", "text": "def test_password_attr(self):\n user = User()\n self.assertTrue(hasattr(user, \"password\"))\n self.assertEqual(user.password, \"\")", "title": "" }, { "docid": "c28f630b036842c807c23c7d290b5768", "score": "0.5369983", "text": "def validate_credentials(self, *args, dotenv_path=None, **kwargs):\n if not hasattr(self, \"credentials\"):\n return set()\n\n # Load any variables from the .env file into the environment.\n dotenv_path = dotenv_path or os.path.join(\".\", \".env\")\n load_dotenv(dotenv_path)\n\n for credential in self.credentials:\n if credential[\"name\"] not in os.environ:\n raise exceptions.CredentialNotFound(\n f\"Credential {credential['name']!r} missing from environment or .env file.\"\n )\n\n return self.credentials", "title": "" }, { "docid": "99c11f5f9b98aa8094e9056dfcefee61", "score": "0.5360967", "text": "def test_display_all_credential(self):\n self.assertEqual(Credential.display_credential(),Credential.credential_list)", "title": "" }, { "docid": "a7e2a550b32b5abdbc7f1a4fa123d05e", "score": "0.53603905", "text": "def setUp(self):\n self.new_credential = Credential(\"Facebook\",\"Yvonnah Bonswuh\",\"ivonnahbonswuh@gmail.com\",\"ivy1996\") # create credential object", "title": "" }, { "docid": "4885a4c5127e973f547b67e3a16d9034", "score": "0.53585184", "text": "def sufficient_options(self):\n has_token = self.opts.get('token')\n has_project_domain_or_tenant = (self.opts.get('project_id') or\n (self.opts.get('project_name') and\n (self.opts.get('user_domain_name') or\n self.opts.get('user_domain_id'))) or\n (self.opts.get('tenant_id') or\n self.opts.get('tenant_name')))\n has_credential = (self.opts.get('username')\n and has_project_domain_or_tenant\n and self.opts.get('password')\n and self.opts.get('auth_url'))\n missing = not (has_token or has_credential)\n if missing:\n missing_opts = []\n opts = ['token', 'endpoint', 'username', 'password', 'auth_url',\n 'tenant_id', 'tenant_name']\n for opt in opts:\n if not self.opts.get(opt):\n missing_opts.append(opt)\n raise exceptions.AuthPluginOptionsMissing(missing_opts)", "title": "" }, { "docid": "0ad2691a3c8882bf9615f50c10b488e5", "score": "0.5340708", "text": "def test_no_auth():\n no_auth = NoAuth()\n assert {} == no_auth.get_auth_header()\n no_auth = NoAuth()\n assert {} == no_auth.get_auth_header()", "title": "" }, { "docid": "08f90f709d8794edfc26ea123fb1ac82", "score": "0.5334988", "text": "def test_get_property_missing(self):\r\n try:\r\n value = self.config.option2\r\n assert value\r\n except Exception as e:\r\n self.assertIsInstance(e, OptionValueNotSetError)\r\n self.assertNotIn('option2', self.config.values)", "title": "" }, { "docid": "b4ae47c2219fa5b06462a35baf462a16", "score": "0.5332603", "text": "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "title": "" }, { "docid": "cce6f65e5bf31166f19ae56931c6dc9c", "score": "0.53324795", "text": "def test_authentication_settings_deprecated() -> None:\n\n r_client = DogClient(backend='requests')\n assert r_client.auth_() is None\n r_client._set_auth(r_HTTPBasicAuth('username', 'password'))\n assert r_client['auth'] == r_HTTPBasicAuth('username', 'password')\n\n assert r_client.auth_() == r_HTTPBasicAuth('username', 'password')\n\n assert r_client._auth() == r_HTTPBasicAuth('username', 'password')\n\n r_client_auth = DogClient(backend='requests',\n auth=r_HTTPBasicAuth('username', 'password'))\n assert r_client_auth.auth_() == r_HTTPBasicAuth('username', 'password')\n\n assert r_client_auth._auth() == r_HTTPBasicAuth('username', 'password')\n\n r_session_auth = r_client_auth._session()\n assert r_session_auth._auth == r_HTTPBasicAuth('username', 'password')\n\n x_client = DogClient(backend='httpx')\n assert x_client._auth() is None\n x_client._set_auth(x_HTTPBasicAuth('username', 'password'))\n assert x_client._auth()._auth_header == \\\n x_HTTPBasicAuth('username', 'password')._auth_header\n\n x_client_auth = DogClient(backend='httpx',\n auth=x_HTTPBasicAuth('username', 'password'))\n assert x_client_auth._auth()._auth_header == \\\n x_HTTPBasicAuth('username', 'password')._auth_header\n x_session_auth = x_client_auth._session()\n assert x_session_auth._auth._auth_header == \\\n x_HTTPBasicAuth('username', 'password')._auth_header", "title": "" }, { "docid": "320e1b702acf821a3e8b6b6cfc260e2e", "score": "0.53213173", "text": "def test_getcredentials_from_netrc(netrc):\n netrc.return_value.authenticators.return_value = (USERNAME, \"\", PASSWORD)\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "title": "" }, { "docid": "9a0339c0d359f6ef9ff39ac1eff9dc2b", "score": "0.53173923", "text": "def authenticate_credentials(self, **credentials):\n return None", "title": "" }, { "docid": "ac2effd35796beda9fd6a6a176c656e5", "score": "0.5314239", "text": "def test_invalid_credentials_forbidden(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPForbidden)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)", "title": "" }, { "docid": "615c8415ea6a89d27a2a3a8f477836f0", "score": "0.52955794", "text": "def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]", "title": "" }, { "docid": "214103b50c0ba14def28e6b0db7f534d", "score": "0.529518", "text": "def servicenow_sspm_csv_enforce_basic_auth_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.basicauth.required.csv\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue != \"true\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.6] Instance should enforce basic authentication for CSV requests\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not enforce basic authentication for CSV requests. Use the 'glide.basicauth.required.csv' property to designate if incoming CSV (Comma-Separated Values) requests should require basic authentication. Without appropriate authorization configured on the incoming CSV requests, an unauthorized user can get access to sensitive content/data on the target instance. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the CSV request authorization (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/csv-request-authorization.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.6] Instance should enforce basic authentication for CSV requests\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} enforces basic authentication for CSV requests.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the CSV request authorization (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/csv-request-authorization.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "title": "" }, { "docid": "18354173c56aa8ce4e8922b70cc7101f", "score": "0.5289004", "text": "def test_default_values_set(self):\n for attempt in self.attempts:\n retrieved = self.resource.get(attempt[_ATTEMPT.attempt_id])\n self.assertIn(_ATTEMPT.logs, retrieved)\n self.assertIn(_ATTEMPT.time_created, retrieved)\n self.assertIn(_ATTEMPT.status, retrieved)", "title": "" }, { "docid": "a8260f18b865d226186017f49edd4057", "score": "0.52877223", "text": "def _has_auth(creds: Dict[str, str]) -> bool:\n if creds.get(\"user\") in [None, \"\"] or creds.get(\"passwd\") in [None, \"\"]:\n warnings.warn(\"Credentials were not supplied. Public data access only.\", NoAuthWarning)\n return False\n return True", "title": "" }, { "docid": "8bc9946be54ee9b36992cf03a8708550", "score": "0.52840585", "text": "def credentials_work(self):\n\n good = True\n try:\n self.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "title": "" }, { "docid": "ebe3a0067c9c413f087627a53b509f15", "score": "0.5280547", "text": "def test_retrieve_non_modifiable(self):\n setting_name = 'user_hidden_setting'\n url = reverse('projectroles:api_user_setting_retrieve')\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': None,\n 'user': self.get_serialized_user(self.user),\n 'name': setting_name,\n 'type': 'STRING',\n 'value': '',\n 'user_modifiable': False,\n }\n self.assertEqual(response_data, expected)", "title": "" }, { "docid": "7dad84fd83aec34a6f768b9b2f84c78b", "score": "0.5273274", "text": "def test_save_creds(self):\n self.new_credentials.save_creds()\n self.assertEqual(len(Credentials.credential_list),1)", "title": "" }, { "docid": "84aa8103465d13fb10f93df486c9fb8d", "score": "0.52693784", "text": "def test_import_allows_attributes_failure(self):\n # We can just use existing modules for this.\n name_map = {\n \"sys\": (\"executable\", \"path\"),\n \"builtins\": (\"list\", \"_qiskit_dummy_attribute_\"),\n }\n\n feature = LazyImportTester(name_map)\n self.assertFalse(feature)", "title": "" }, { "docid": "fb0c805a80c02e10f6c3f776857930f0", "score": "0.5267317", "text": "def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "title": "" } ]
ee1068b325a01087d8763130e600b894
Perform data extraction, geocoding, analysis, and rendering.
[ { "docid": "a0905a75696c2d5f05df9afb5332cecb", "score": "0.0", "text": "def aorta(filename, attribute, maxl, minl, v1, v2, v3, lb, sb):\r\n global start\r\n global stop\r\n lb.configure(text='Reading File')\r\n df = pd.read_csv(filename)\r\n df = df.drop(0)\r\n\r\n df['Date'] = pd.to_datetime(df['Date'])\r\n df['Date'] = df['Date'].dt.date\r\n lb.configure(text='Extracting')\r\n df.index = df['Date']\r\n start = df.index[0]\r\n stop = df.index[-1]\r\n data = pd.DataFrame(index=['sum', 'diffdate', 'maxmindiff', 'latitude', 'longitude'])\r\n\r\n t = attribute\r\n for x, y in df.iteritems():\r\n if t in x:\r\n x = x[len(t) + 1:]\r\n y = pd.to_numeric(y, errors='ignore')\r\n data[x] = pd.Series({'sum': sum(y), 'maxmindiff': max(y) - min(y), 'diffdate': y.iloc[-1] - y.iloc[0], 'latitude': None, 'longitude': None})\r\n\r\n data = data.transpose()\r\n print(data)\r\n\r\n data['place'] = data.index.copy()\r\n data.index = list(range(0, len(data)))\r\n\r\n lb.configure(text='Geocoding (acquiring coordinates)')\r\n\r\n print('\\nGeocoding')\r\n\r\n k = list(map(geocoding, data['place']))\r\n\r\n k = pd.DataFrame(k)\r\n data['latitude'], data['longitude'] = k[0], k[1]\r\n data = data[(data['latitude'] != pd.to_numeric(0.00)) & (data['longitude'] != pd.to_numeric(0.00))]\r\n\r\n data = data.fillna(0)\r\n\r\n print(data, end='\\n\\n')\r\n print('Basic description\\n')\r\n print(data.describe())\r\n\r\n lb.configure(text='Final render and saving')\r\n if v1 == 1:\r\n a = excalibur(data, filename, attribute, maxl, minl, 'sum')\r\n\r\n if v2 == 1:\r\n b = excalibur(data, filename, attribute, maxl, minl, 'diffdate')\r\n\r\n if v3 == 1:\r\n c = excalibur(data, filename, attribute, maxl, minl, 'maxmindiff')\r\n sb.config(state='normal')\r\n lb.configure(text='DONE!')", "title": "" } ]
[ { "docid": "284d7694dea5621aaa51c881c1394f8b", "score": "0.67977667", "text": "def run(self):\n self.prepare()\n self.extract()\n self.transform()\n self.load()\n self.finalize()\n pass", "title": "" }, { "docid": "c476f77c1cdbb522f954f429c6a30600", "score": "0.65497917", "text": "def perform_analysis(self):\n self.identify_hotspots()\n self.assess_druggability()\n self.write_hotspots()", "title": "" }, { "docid": "75cf610ee0f2d816e858ae1f0f63d0ea", "score": "0.62873155", "text": "def analyze(self):\n if self.b_make_images:\n self.images()\n if self.b_extract_xml_data:\n self.get_xml_data()\n if self.b_extract_fonts:\n self.extract_fonts()", "title": "" }, { "docid": "2ea23e1b6f6e2a70c77db8c987b585b6", "score": "0.6097645", "text": "def process(self):\n\n # Opening and preprocessing of the input file\n\n self.open_input()\n self.generate_metadata()\n self.generate_base_tiles()", "title": "" }, { "docid": "93ffb94a9e32f1ebdec56765f8cbc29c", "score": "0.6065653", "text": "def main(): \n # -- GET THE RESULTS FROM A PICKLE FILE -- #\n results,wgoogle,wamazon = get_data()\n\n # -- PLOTTING THE RESULTS OBTAINED -- #\n showing_data(results,wgoogle,wamazon)\n\n # -- WRITING THE RESULTS IN A CSV FILE -- #\n write_csv(results)", "title": "" }, { "docid": "5dbe07e0c5d751a4b1ed0064181e7437", "score": "0.60529447", "text": "def run(self):\n self.compute_student_total()\n self.compute_overall_position()\n self.parse_student_scores()\n self.parse_subject_scores()\n self.compute_subject_positions()\n self.parse_attendance_remark()", "title": "" }, { "docid": "1b5d3b644ce49c4466dffd8554d47203", "score": "0.6004323", "text": "def main() -> None:\n # First initialize the paths to look for the data sources\n pathDB = \"../../raw_data/geography.sqlite\"\n pathLogs = \"../../raw_data/transformed_data\"\n\n # path to the directory for the output files\n pathOut = \"../../out_data/\"\n\n # First, spark session is initialized.\n spark = SparkSession.builder. \\\n master(\"local[*]\"). \\\n config('spark.jars.packages', 'sqlite-jdbc-3.34.0.jar'). \\\n appName(\"ETL_pipeline\"). \\\n getOrCreate()\n spark.sparkContext.setLogLevel('WARN') # to suppress the info level logging\n # initialize logger\n logger = initializeLogging(spark)\n \n # Extract\n logger.warn(\"Extraction process\")\n try:\n rawData = Extract.extractDataJson(spark, path=pathLogs)\n geographyDB = Extract.extractDataDB(spark, dbtable=\"geography\", path=pathDB)\n except:\n logger.error(\"Extraction process Failed\")\n spark.stop()\n exit()\n \n # Transform\n logger.warn(\"Transform process\")\n denormalizedDB = Transform.transformDB(spark, geographyDB)\n cleanData = Transform.cleanRecords(rawData)\n transformedData = Transform.replaceValues(cleanData)\n enrichedData = Transform.enrichData(transformedData, denormalizedDB)\n\n # Load\n logger.warn(\"Load process\") \n status_clean = Load.loadCleaned(pathOut, transformedData)\n status_enriched = Load.loadEnriched(pathOut, enrichedData)\n if not (status_clean and status_enriched):\n logger.warn(\"Load process failed\")\n else:\n logger.warn(f\"Pipeline Completed data saved at {pathOut}\")\n\n spark.stop()", "title": "" }, { "docid": "91e055973c6dff2be7cf73809aa3a574", "score": "0.5949823", "text": "def main(self):\n input_df = self._load_df()\n processed_df = self._process_df(input_df)\n self._write_df(processed_df)", "title": "" }, { "docid": "bce67d4e6116b7ba2659047dab39c1b8", "score": "0.5919402", "text": "def main():\n\n temperature_input_data = '../../data2/GlobalLandTemperaturesByCity.csv'\n demographics_input_data = 'input-data/us-cities-demographics.csv'\n airport_input_data = 'input-data/airport-codes_csv.csv'\n immigration_input_data = '../../data/18-83510-I94-Data-2016/i94_apr16_sub.sas7bdat'\n label_descriptions = 'input-data/I94_SAS_Labels_Descriptions.SAS'\n output_data = 'output-data'\n \n \n \n spark = create_spark_session()\n \n mapping_dict = port_city_mapping(label_descriptions)\n \n process_demographics_data(spark, demographics_input_data, output_data)\n \n process_immigration_data(spark, immigration_input_data, mapping_dict, output_data)\n \n process_airport_data(spark, airport_input_data, mapping_dict, output_data)\n\n process_temperature_data(spark, temperature_input_data, output_data)", "title": "" }, { "docid": "3a74b8aba72f452a85b2e07cdba5b4cf", "score": "0.59008175", "text": "def main():\n\n logger = logging.getLogger(__name__)\n\n # Set environment variables.\n load_dotenv(find_dotenv())\n DatasetDownloader.URL = str(os.environ.get(\"URL\"))\n DatasetDownloader.USERNAME = str(os.environ.get(\"LOGINNAME\"))\n DatasetDownloader.PASSWORD = str(os.environ.get(\"LOGINPASSWORD\"))\n\n if FLAGS.download:\n # Download data.\n logger.info('start downloading data into raw:')\n DatasetDownloader.download_all()\n logger.info('downloading was successfull')\n\n if FLAGS.preprocess:\n logger.info('start preprocessing data:')\n # Preprocess data. Store it in /data/preprocessed/preprocessed_data.dat.\n tokens = [os.environ.get(alias) for alias in [\"KEY_RAPHAEL\", \"KEY_MORITZ\", \"KEY_LUKAS\"]]\n dfs = Preprocessor.preprocess(tokens,\n filename=FLAGS.file_name,\n distance_metric=FLAGS.distance_metric,\n use_individual_columns=FLAGS.use_individual_columns)\n\n logger.info('preprocessing was successful')", "title": "" }, { "docid": "0adfb9d1b774acbb7622becf0fca24d4", "score": "0.586948", "text": "def main():\n datasetPath=\"./Dataset/\"\n peoplePath=\"./People/\"\n processKnownPeopleImages(path=peoplePath)\n processDatasetImages(path=datasetPath)\n print(\"Completed\")", "title": "" }, { "docid": "f6c03eb6ecfaebdf8772e4d86dd10d36", "score": "0.58531415", "text": "def run_driver(self):\n \n # Extract .mbtiles and write image bytes to individual files\n ExtractMBTiles().write_to_file(self.mbtile_path)\n \n # Georeference the individually extracted image files\n GeoreferenceImage().assign_coordinates_to_image()\n \n # Stitch georerferenced images to create a raster mosaic\n StitchImage().stitch_tiles()", "title": "" }, { "docid": "ee3f00318638283ed320737927665e6e", "score": "0.5832285", "text": "def scrape(self):\n self.get_soup()\n self.get_entries()\n self.get_results()", "title": "" }, { "docid": "83ffc9e1add09501ce16a23da563760c", "score": "0.5786054", "text": "def exploratory_data_analysis():\n df, featurs_names = prepare_df(TRAIN_FILE, 3, False)\n dist_by_hour(df)\n dist_by_hour_for_each_year(df)\n dist_by_day(df)\n dist_by_month(df)\n dist_by_year(df)\n dist_1gram(df)\n dist_spl(df)\n dist_word_count(df)\n dist_characters_count(df)\n dist_characters_per_word(df)\n dist_count_number(df)", "title": "" }, { "docid": "f38a28a42f772963003237889c4962d4", "score": "0.5750481", "text": "def main():\n for city in CITIES:\n url = f\"{BASE_URL}/{PREFERRED_FORMAT}?address={format_city(city)}\"\n print(url)\n body = handle_request(url)\n print(f\"City: {city}; Lat/Long: {get_coordinates_from_city(body)}\")", "title": "" }, { "docid": "2d6dff1a0419a83e1a61bc2eca9a22ca", "score": "0.5749509", "text": "def main():\n # == Define parameters ==================================\n # Path to some grid files\n dataPath = 'testData/output_03_MassPlumeTrajectories_netCDF/'\n # =======================================================\n\n # == Find data files ====================================\n # First we extract all the files inside output dir\n files_all = os.listdir(dataPath)\n # Take only files beginning with 'grid'\n files = [f for f in files_all if f.startswith('traj') == True]\n # Choose the file\n filePath = dataPath + files[0]\n # Extract data\n df = extract_trajectories(filePath)\n # =======================================================\n\n # == Plot trajectories ==================================\n # Extract latitude and longitude of the centroids\n lon = df['xcenter'].values\n lat = df['ycenter'].values\n # Plot them\n plotMap_trajectories(lat, lon, extent=[-60, 60, -40, 40])", "title": "" }, { "docid": "40ace08ba73c2d19b59335e95db707ec", "score": "0.57451785", "text": "def run(self):\n\n print(\"Running calculate...\")\n start = time()\n ms = self.calculate()\n print(\"calculate completed in {:,.2f}s\".format(time()-start))\n\n print(\"Running plot...\")\n start = time()\n self.plot(ms, os.path.join(self.output_dir, self.file_name_plot))\n print(\"plot completed in {:,.2f}s\".format(time()-start))\n\n print(\"Running save_data...\")\n start = time()\n self.save_data(ms, os.path.join(self.output_dir, self.file_name_data))\n print(\"save_data completed in {:,.2f}s\".format(time()-start))", "title": "" }, { "docid": "e49f37645957b4c7cd88d40c22d1b308", "score": "0.57443154", "text": "def run():\r\n #get the stations data \r\n stations = build_station_list()\r\n \r\n #get the coordinate of the mid points of the most east and west stations\r\n sort_station_one = sorted(stations, key = lambda station: station.coord[0])\r\n mid_lat = (sort_station_one[0].coord[0] + sort_station_one[-1].coord[0]) / 2\r\n \r\n sort_station_two = sorted(stations, key = lambda station: station.coord[1])\r\n mid_lng = (sort_station_two[0].coord[1] + sort_station_two[-1].coord[1]) / 2\r\n \r\n #show the map\r\n map_options = GMapOptions(lat=mid_lat, lng=mid_lng, map_type=\"roadmap\", zoom=6)\r\n plot = GMapPlot(x_range=Range1d(), y_range=Range1d(), map_options=map_options)\r\n plot.title.text = \"Flood warning\"\r\n\r\n plot.api_key = \"AIzaSyDu680hgb23BsVmnej1GC5XnALbj0DbjTE\" \r\n \r\n #bulid a list of coordinates of the stations \r\n #probably need improvement in terms of speed\r\n \r\n list_of_lat = []\r\n list_of_lng = []\r\n for station in stations:\r\n list_of_lat.append(station.coord[0])\r\n list_of_lng.append(station.coord[1])\r\n \r\n #plot points on the map\r\n \r\n source = ColumnDataSource(\r\n data= dict(\r\n lat = list_of_lat, \r\n lng = list_of_lng,\r\n ))\r\n circle = Circle(x = \"lng\", y = \"lat\", size = 5, fill_color = \"blue\", fill_alpha = 0.8, line_color = None)\r\n plot.add_glyph(source, circle)\r\n \r\n \r\n plot.add_tools(PanTool(), WheelZoomTool())\r\n output_file(\"gmap_plot.html\")\r\n show(plot)", "title": "" }, { "docid": "c654fcfe762f192368baba7c1e3f82d4", "score": "0.57437503", "text": "def run(self):\n self.logger.info('Start to run ...')\n self.load_data()\n self.setup_train_test_split()\n self.setup_classifiers()\n self.report = {}\n for names, clf in zip(self.names, self.classifiers):\n self.logger.info(names + \" in process ....\")\n # train\n clf.fit(self.X_train, self.y_train)\n # if names == 'Decision Tree':\n # tree.export_graphviz(clf, out_file=self.result_path + 'tree.dot')\n # test\n\n self.prediction = clf.predict(self.X_test)\n self.text_class_report(names)\n\n self.predict_proba = clf.predict_proba(self.X_test)\n self.roc_curve_report(names)\n\n self.logger.info(names + \" testing completed!\")", "title": "" }, { "docid": "a3101a3fef0f4ffd907ffe25af9731d7", "score": "0.572947", "text": "def _run(output_dir_name):\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name\n )\n\n parent_dir_name = '/'.join(THIS_DIRECTORY_NAME.split('/')[:-1])\n trace_gas_file_name = '{0:s}/utils/trace_gases.nc'.format(parent_dir_name)\n trace_gas_table_xarray = xarray.open_dataset(trace_gas_file_name)\n\n tgt = trace_gas_table_xarray\n orig_temperatures_kelvins = (\n WHOLE_PROFILE_TEMP_OFFSET_KELVINS +\n tgt[TEMPERATURE_KEY].values[:, TROPICAL_STANDARD_ATMO_INDEX]\n )\n orig_heights_m_agl = KM_TO_METRES * tgt.coords[HEIGHT_DIM].values\n\n interp_object = interp1d(\n x=orig_heights_m_agl, y=orig_temperatures_kelvins, kind='cubic',\n bounds_error=True, assume_sorted=True\n )\n temperatures_kelvins = interp_object(HEIGHTS_M_AGL)\n water_contents_kg_m03 = ORIG_WATER_CONTENTS_KG_M03 + 0.\n\n predictor_matrix = numpy.transpose(numpy.vstack((\n water_contents_kg_m03, temperatures_kelvins\n )))\n predictor_matrix = numpy.expand_dims(predictor_matrix, axis=0)\n predictor_names = [\n example_utils.ICE_WATER_CONTENT_NAME,\n example_utils.TEMPERATURE_NAME\n ]\n example_dict = {\n example_utils.HEIGHTS_KEY: HEIGHTS_M_AGL,\n example_utils.VECTOR_PREDICTOR_VALS_KEY: predictor_matrix,\n example_utils.VECTOR_PREDICTOR_NAMES_KEY: predictor_names\n }\n\n handle_dict = profile_plotting.plot_predictors(\n example_dict=example_dict, example_index=0,\n predictor_names=predictor_names,\n predictor_colours=[WATER_CONTENT_COLOUR, TEMPERATURE_COLOUR],\n predictor_line_widths=numpy.full(shape=2, fill_value=4.),\n predictor_line_styles=['solid'] * 2,\n use_log_scale=True, include_units=True, handle_dict=None\n )\n\n figure_object = handle_dict[profile_plotting.FIGURE_HANDLE_KEY]\n axes_objects = handle_dict[profile_plotting.AXES_OBJECTS_KEY]\n\n axes_objects[0].set_title('Original profiles')\n gg_plotting_utils.label_axes(\n axes_object=axes_objects[0], label_string='(a)', font_size=30\n )\n\n panel_file_names = [\n '{0:s}/ice_cloud_part1.jpg'.format(output_dir_name)\n ]\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n tropopause_height_m_agl, _ = perturb_gfs._find_tropopause(\n temperatures_kelvins=temperatures_kelvins,\n sorted_heights_m_agl=HEIGHTS_M_AGL\n )\n if tropopause_height_m_agl is None:\n tropopause_height_m_agl = perturb_gfs.MIN_TROPOPAUSE_HEIGHT_EVER_M_AGL\n\n num_cloud_layers = 2\n height_indices_by_layer_no_constraints = [\n numpy.where(water_contents_kg_m03 > 0)[0],\n numpy.array([], dtype=int)\n ]\n height_indices_by_layer = [\n numpy.where(water_contents_kg_m03 > 0)[0],\n numpy.array([], dtype=int)\n ]\n\n for k in range(1, num_cloud_layers):\n this_top_height_m_agl = HEIGHTS_M_AGL[\n numpy.where(water_contents_kg_m03 > 0)[0][-1]\n ]\n this_bottom_height_m_agl = max([\n this_top_height_m_agl - MAX_CLOUD_THICKNESS_METRES,\n 0.\n ])\n\n height_indices_by_layer_no_constraints[k] = (\n perturb_gfs._heights_to_grid_indices(\n min_height_m_agl=this_bottom_height_m_agl,\n max_height_m_agl=this_top_height_m_agl,\n sorted_grid_heights_m_agl=HEIGHTS_M_AGL\n )\n )\n\n good_temperature_flags = (\n temperatures_kelvins[height_indices_by_layer_no_constraints[k]]\n < 273.15\n )\n height_indices_by_layer[k] = (\n height_indices_by_layer_no_constraints[k][good_temperature_flags]\n )\n\n test_indices = numpy.array([], dtype=int)\n\n for m in range(k):\n if len(height_indices_by_layer[m]) == 0:\n continue\n\n test_indices = numpy.concatenate((\n test_indices,\n height_indices_by_layer[m][[0]] - 1,\n height_indices_by_layer[m],\n height_indices_by_layer[m][[-1]] + 1\n ))\n\n height_indices_by_layer[k] = height_indices_by_layer[k][\n numpy.invert(numpy.isin(height_indices_by_layer[k], test_indices))\n ]\n\n if len(height_indices_by_layer[k]) < 2:\n height_indices_by_layer[k] = numpy.array([], dtype=int)\n continue\n\n if len(height_indices_by_layer[1]) == 0:\n return\n\n handle_dict = profile_plotting.plot_predictors(\n example_dict=example_dict, example_index=0,\n predictor_names=predictor_names,\n predictor_colours=[WATER_CONTENT_COLOUR, TEMPERATURE_COLOUR],\n predictor_line_widths=numpy.full(shape=2, fill_value=4.),\n predictor_line_styles=['solid'] * 2,\n use_log_scale=True, include_units=True, handle_dict=None\n )\n\n figure_object = handle_dict[profile_plotting.FIGURE_HANDLE_KEY]\n axes_objects = handle_dict[profile_plotting.AXES_OBJECTS_KEY]\n\n min_height_new_layer_km_agl = (\n METRES_TO_KM *\n HEIGHTS_M_AGL[height_indices_by_layer_no_constraints[1][0]]\n )\n max_height_new_layer_km_agl = (\n METRES_TO_KM *\n HEIGHTS_M_AGL[height_indices_by_layer_no_constraints[1][-1]]\n )\n polygon_y_coords = numpy.array([\n min_height_new_layer_km_agl, max_height_new_layer_km_agl,\n max_height_new_layer_km_agl, min_height_new_layer_km_agl,\n min_height_new_layer_km_agl\n ])\n\n x_min = axes_objects[0].get_xlim()[0]\n x_max = axes_objects[0].get_xlim()[1]\n polygon_x_coords = numpy.array([x_min, x_min, x_max, x_max, x_min])\n polygon_coord_matrix = numpy.transpose(numpy.vstack((\n polygon_x_coords, polygon_y_coords\n )))\n\n polygon_colour = matplotlib.colors.to_rgba(\n numpy.full(3, 0.), 0.5\n )\n patch_object = matplotlib.patches.Polygon(\n polygon_coord_matrix, lw=0, ec=polygon_colour, fc=polygon_colour\n )\n axes_objects[0].add_patch(patch_object)\n\n axes_objects[0].set_title('Original profiles + extent of new cloud')\n gg_plotting_utils.label_axes(\n axes_object=axes_objects[0], label_string='(b)', font_size=30\n )\n\n panel_file_names.append(\n '{0:s}/ice_cloud_part2.jpg'.format(output_dir_name)\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n handle_dict = profile_plotting.plot_predictors(\n example_dict=example_dict, example_index=0,\n predictor_names=predictor_names,\n predictor_colours=[WATER_CONTENT_COLOUR, TEMPERATURE_COLOUR],\n predictor_line_widths=numpy.full(shape=2, fill_value=4.),\n predictor_line_styles=['solid'] * 2,\n use_log_scale=True, include_units=True, handle_dict=None\n )\n\n figure_object = handle_dict[profile_plotting.FIGURE_HANDLE_KEY]\n axes_objects = handle_dict[profile_plotting.AXES_OBJECTS_KEY]\n\n min_height_new_layer_km_agl = (\n METRES_TO_KM * HEIGHTS_M_AGL[height_indices_by_layer[1][0]]\n )\n max_height_new_layer_km_agl = (\n METRES_TO_KM * HEIGHTS_M_AGL[height_indices_by_layer[1][-1]]\n )\n polygon_y_coords = numpy.array([\n min_height_new_layer_km_agl, max_height_new_layer_km_agl,\n max_height_new_layer_km_agl, min_height_new_layer_km_agl,\n min_height_new_layer_km_agl\n ])\n\n x_min = axes_objects[0].get_xlim()[0]\n x_max = axes_objects[0].get_xlim()[1]\n polygon_x_coords = numpy.array([x_min, x_min, x_max, x_max, x_min])\n polygon_coord_matrix = numpy.transpose(numpy.vstack((\n polygon_x_coords, polygon_y_coords\n )))\n\n polygon_colour = matplotlib.colors.to_rgba(\n numpy.full(3, 0.), 0.5\n )\n patch_object = matplotlib.patches.Polygon(\n polygon_coord_matrix, lw=0, ec=polygon_colour, fc=polygon_colour\n )\n axes_objects[0].add_patch(patch_object)\n\n axes_objects[0].set_title(\n 'Original profiles + extent of new cloud\\n' +\n r'(temperature $<$ 0 $^{\\circ}$C and no overlap)'\n )\n gg_plotting_utils.label_axes(\n axes_object=axes_objects[0], label_string='(c)', font_size=30\n )\n\n panel_file_names.append(\n '{0:s}/ice_cloud_part3.jpg'.format(output_dir_name)\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n for k in range(1, num_cloud_layers):\n enhanced_height_indices = numpy.concatenate((\n height_indices_by_layer[k][[0]] - 1,\n height_indices_by_layer[k],\n height_indices_by_layer[k][[-1]] + 1\n ))\n\n enhanced_layer_heights_m_agl = HEIGHTS_M_AGL[enhanced_height_indices]\n layer_heights_m_agl = HEIGHTS_M_AGL[height_indices_by_layer[k]]\n\n layer_center_m_agl = numpy.mean(layer_heights_m_agl)\n max_height_diff_metres = numpy.max(\n numpy.absolute(layer_center_m_agl - enhanced_layer_heights_m_agl)\n )\n\n layer_height_diffs_metres = numpy.absolute(\n layer_center_m_agl - layer_heights_m_agl\n )\n layer_height_diffs_relative = (\n layer_height_diffs_metres / max_height_diff_metres\n )\n\n layer_water_contents_kg_m03 = (\n (1. - layer_height_diffs_relative) * MAX_NEW_WATER_CONTENT_KG_M03\n )\n water_contents_kg_m03[height_indices_by_layer[k]] = (\n layer_water_contents_kg_m03\n )\n\n figure_object, axes_object = profile_plotting.plot_one_variable(\n values=KG_TO_GRAMS * water_contents_kg_m03,\n heights_m_agl=HEIGHTS_M_AGL,\n line_width=4, line_style='solid', use_log_scale=True,\n line_colour=WATER_CONTENT_COLOUR\n )\n\n axes_object.set_xlabel(r'Ice-water content (g m$^{-3}$)')\n axes_object.set_ylabel('Height (km AGL)')\n axes_object.set_title('IWC profile with new cloud')\n gg_plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')\n\n panel_file_names.append(\n '{0:s}/ice_cloud_part4.jpg'.format(output_dir_name)\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n for k in range(1, num_cloud_layers):\n while True:\n noise_values_kg_m03 = numpy.random.normal(\n loc=0., scale=WATER_CONTENT_NOISE_KG_M03,\n size=len(height_indices_by_layer[k])\n )\n new_water_contents_kg_m03 = (\n water_contents_kg_m03[height_indices_by_layer[k]] +\n noise_values_kg_m03\n )\n\n if not numpy.any(new_water_contents_kg_m03 < 0):\n continue\n if not numpy.any(\n new_water_contents_kg_m03 > MAX_NEW_WATER_CONTENT_KG_M03\n ):\n continue\n\n water_contents_kg_m03[height_indices_by_layer[k]] += (\n noise_values_kg_m03\n )\n break\n\n figure_object, axes_object = profile_plotting.plot_one_variable(\n values=KG_TO_GRAMS * water_contents_kg_m03,\n heights_m_agl=HEIGHTS_M_AGL,\n line_width=4, line_style='solid', use_log_scale=True,\n line_colour=WATER_CONTENT_COLOUR\n )\n\n axes_object.set_xlabel(r'Ice-water content (g m$^{-3}$)')\n axes_object.set_ylabel('Height (km AGL)')\n axes_object.set_title(\n 'IWC profile with new cloud\\n(including Gaussian noise)'\n )\n gg_plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')\n\n panel_file_names.append(\n '{0:s}/ice_cloud_part5.jpg'.format(output_dir_name)\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n water_contents_kg_m03 = numpy.maximum(water_contents_kg_m03, 0.)\n water_contents_kg_m03 = numpy.minimum(\n water_contents_kg_m03, MAX_NEW_WATER_CONTENT_KG_M03\n )\n\n figure_object, axes_object = profile_plotting.plot_one_variable(\n values=KG_TO_GRAMS * water_contents_kg_m03,\n heights_m_agl=HEIGHTS_M_AGL,\n line_width=4, line_style='solid', use_log_scale=True,\n line_colour=WATER_CONTENT_COLOUR\n )\n\n axes_object.set_xlabel(r'Ice-water content (g m$^{-3}$)')\n axes_object.set_ylabel('Height (km AGL)')\n\n title_string = (\n 'IWC profile with new cloud\\n(bounded to 0...{0:.0f}'\n ).format(KG_TO_GRAMS * MAX_NEW_WATER_CONTENT_KG_M03)\n\n title_string += r' g m$^{-3}$)'\n axes_object.set_title(title_string)\n gg_plotting_utils.label_axes(axes_object=axes_object, label_string='(f)')\n\n panel_file_names.append(\n '{0:s}/ice_cloud_part6.jpg'.format(output_dir_name)\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n for this_file_name in panel_file_names:\n imagemagick_utils.resize_image(\n input_file_name=this_file_name, output_file_name=this_file_name,\n output_size_pixels=PANEL_SIZE_PX\n )\n\n concat_figure_file_name = '{0:s}/ice_cloud_schematic.jpg'.format(\n output_dir_name\n )\n print('Concatenating panels to: \"{0:s}\"...'.format(concat_figure_file_name))\n\n imagemagick_utils.concatenate_images(\n input_file_names=panel_file_names,\n output_file_name=concat_figure_file_name,\n num_panel_rows=2, num_panel_columns=3\n )\n imagemagick_utils.trim_whitespace(\n input_file_name=concat_figure_file_name,\n output_file_name=concat_figure_file_name,\n border_width_pixels=10\n )\n imagemagick_utils.resize_image(\n input_file_name=concat_figure_file_name,\n output_file_name=concat_figure_file_name,\n output_size_pixels=CONCAT_FIGURE_SIZE_PX\n )", "title": "" }, { "docid": "5b85d59bd4b531f5052b3a7123b787d2", "score": "0.5688408", "text": "def main(data_dir):\n logger = logging.getLogger(__name__)\n logger.info('obtaining data')\n\n # Get all data\n bandGaps, spaceGroups, icsdIDs = get_all_data(data_dir)\n logger.info('Obtaining data is done')", "title": "" }, { "docid": "8515702e28bbacfd321fcf1f01af4d7a", "score": "0.5686491", "text": "def process_results(args):\n\n # Metadata table\n metadata_fields = ['image_id', 'run_id']\n metadata_fields.extend(args.valid_meta.keys())\n metadata_fields.append(\"image\")\n # args.metadata_file.write('#' + '\\t'.join(map(str, metadata_fields)) + '\\n')\n\n # Feature data table\n feature_fields = ['area', 'hull-area', 'solidity', 'perimeter', 'width', 'height',\n 'longest_axis', 'center-of-mass-x', 'center-of-mass-y', 'hull_vertices',\n 'in_bounds', 'ellipse_center_x', 'ellipse_center_y', 'ellipse_major_axis',\n 'ellipse_minor_axis', 'ellipse_angle', 'ellipse_eccentricity']\n opt_feature_fields = ['y-position', 'height_above_bound', 'height_below_bound',\n 'above_bound_area', 'percent_above_bound_area', 'below_bound_area',\n 'percent_below_bound_area']\n marker_fields = ['marker_area','marker_major_axis_length','marker_minor_axis_length','marker_eccentricity']\n watershed_fields=['estimated_object_count']\n landmark_fields = ['tip_points', 'tip_points_r', 'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c',\n 'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b',\n 'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r', 'center_h_lmk_r',\n 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r', 'bottom_lmk_r', 'center_v_lmk_r']\n\n # args.features_file.write('#' + '\\t'.join(map(str, feature_fields + opt_feature_fields)) + '\\n')\n\n # Signal channel data table\n signal_fields = ['bin-number', 'channel_name', 'values', 'bin_values']\n\n # bin-number\tblue\tgreen\tred\tlightness\tgreen-magenta\tblue-yellow\thue\tsaturation\tvalue\n\n # Initialize the database with the schema template if create is true\n args.sq.execute(\n 'CREATE TABLE IF NOT EXISTS `runinfo` (`run_id` INTEGER PRIMARY KEY, `datetime` INTEGER NOT NULL, '\n '`command` TEXT NOT NULL);')\n args.sq.execute(\n 'CREATE TABLE IF NOT EXISTS `metadata` (`image_id` INTEGER PRIMARY KEY, `run_id` INTEGER NOT NULL, `' +\n '` TEXT NOT NULL, `'.join(map(str, metadata_fields[2:])) + '` TEXT NOT NULL);')\n args.sq.execute(\n 'CREATE TABLE IF NOT EXISTS `features` (`image_id` INTEGER PRIMARY KEY, `' + '` TEXT NOT NULL, `'.join(\n map(str, feature_fields + opt_feature_fields + marker_fields+ watershed_fields + landmark_fields)) + '` TEXT NOT NULL);')\n args.sq.execute(\n 'CREATE TABLE IF NOT EXISTS `analysis_images` (`image_id` INTEGER NOT NULL, `type` TEXT NOT NULL, '\n '`image_path` TEXT NOT NULL);')\n args.sq.execute(\n 'CREATE TABLE IF NOT EXISTS `signal` (`image_id` INTEGER NOT NULL, `' + '` TEXT NOT NULL, `'.join(\n map(str, signal_fields)) + '` TEXT NOT NULL);')\n\n # Walk through the image processing job directory and process data from each file\n for (dirpath, dirnames, filenames) in os.walk(args.jobdir):\n for filename in filenames:\n # Make sure file is a text file\n if 'text/plain' in mimetypes.guess_type(filename):\n meta = {}\n images = {}\n features = []\n feature_data = {}\n signal = []\n signal_data = {}\n boundary = []\n boundary_data = {}\n marker = []\n marker_data = {}\n watershed=[]\n watershed_data={}\n landmark = []\n landmark_data = {}\n # Open results file\n with open(dirpath + '/' + filename) as results:\n # For each line in the file\n for row in results:\n # Remove the newline character\n row = row.rstrip('\\n')\n # Split the line by tab characters\n cols = row.split('\\t')\n # If the data is of class meta, store in the metadata dictionary\n if cols[0] == 'META':\n meta[cols[1]] = cols[2]\n # If the data is of class image, store in the image dictionary\n elif cols[0] == 'IMAGE':\n images[cols[1]] = cols[2]\n # If the data is of class shapes, store in the shapes dictionary\n elif cols[0] == 'HEADER_SHAPES':\n features = cols\n elif cols[0] == 'SHAPES_DATA':\n for i, datum in enumerate(cols):\n if i > 0:\n feature_data[features[i]] = datum\n # If the data is of class histogram/signal, store in the signal dictionary\n elif cols[0] == 'HEADER_HISTOGRAM':\n signal = cols\n elif cols[0] == 'HISTOGRAM_DATA':\n for i, datum in enumerate(cols):\n if i > 0:\n signal_data[signal[i]] = datum\n # If the data is of class boundary (horizontal rule), store in the boundary dictionary\n elif 'HEADER_BOUNDARY' in cols[0]:\n boundary = cols\n # Temporary hack\n boundary_data['y-position'] = cols[0].replace('HEADER_BOUNDARY', '')\n elif cols[0] == 'BOUNDARY_DATA':\n for i, datum in enumerate(cols):\n if i > 0:\n boundary_data[boundary[i]] = datum\n elif 'HEADER_MARKER' in cols[0]:\n marker = cols\n # Temporary hack\n marker[1] = 'marker_area'\n elif 'MARKER_DATA' in cols[0]:\n for i, datum in enumerate(cols):\n if i > 0:\n marker_data[marker[i]] = datum\n elif 'HEADER_WATERSHED' in cols[0]:\n watershed=cols\n watershed[1]='estimated_object_count'\n elif 'WATERSHED_DATA' in cols[0]:\n for i, datum in enumerate(cols):\n if i>0:\n watershed_data[watershed[i]]=datum\n elif 'HEADER_LANDMARK' in cols[0]:\n landmark = cols\n elif 'LANDMARK_DATA' in cols[0]:\n for i, datum in enumerate(cols):\n if i > 0:\n landmark_data[landmark[i]] = datum\n\n # Check to see if the image failed, if not continue\n\n # Print the image metadata to the aggregate output file\n args.image_id += 1\n meta['image_id'] = args.image_id\n meta['run_id'] = args.run_id\n\n meta_table = []\n for field in metadata_fields:\n meta_table.append(meta[field])\n\n if len(feature_data) != 0:\n args.metadata_file.write('|'.join(map(str, meta_table)) + '\\n')\n\n # Print the image feature data to the aggregate output file\n feature_data['image_id'] = args.image_id\n\n # Boundary data is optional, if it's not there we need to add in placeholder data\n if len(boundary_data) == 0:\n for field in opt_feature_fields:\n boundary_data[field] = 0\n feature_data.update(boundary_data)\n\n # Marker data is optional, if it's not there we need to add in placeholder data\n if len(marker_data) == 0:\n for field in marker_fields:\n marker_data[field] = 0\n feature_data.update(marker_data)\n\n # Watershed data is optional, if it's not there we need to add in placeholder data\n if len(watershed_data) == 0:\n for field in watershed_fields:\n watershed_data[field] = 0\n feature_data.update(watershed_data)\n\n # Landmark data is optional, if it's not there we need to add in placeholder data\n if len(landmark_data) == 0:\n for field in landmark_fields:\n landmark_data[field] = 0\n feature_data.update(landmark_data)\n\n feature_table = [args.image_id]\n for field in feature_fields + opt_feature_fields + marker_fields + watershed_fields + landmark_fields:\n feature_table.append(feature_data[field])\n\n args.features_file.write('|'.join(map(str, feature_table)) + '\\n')\n\n # Print the analysis image data to the aggregate output file\n for img_type in images:\n args.analysis_images_file.write(\n '|'.join(map(str, (args.image_id, img_type, images[img_type]))) + '\\n')\n\n # Print the image signal data to the aggregate output file\n for key in signal_data.keys():\n if key != 'bin-number' and key != 'bin-values':\n signal_data[key] = signal_data[key].replace('[', '')\n signal_data[key] = signal_data[key].replace(']', '')\n signal_table = [args.image_id, signal_data['bin-number'],key, signal_data[key],signal_data['bin-values']]\n args.signal_file.write('|'.join(map(str, signal_table)) + '\\n')\n else:\n args.fail_log.write('|'.join(map(str, meta_table)) + '\\n')\n\n args.metadata_file.write('|'.join(map(str, meta_table)) + '\\n')\n\n feature_table = [args.image_id]\n\n for field in feature_fields + opt_feature_fields + marker_fields + watershed_fields+ landmark_fields:\n feature_table.append(0)\n\n args.features_file.write('|'.join(map(str, feature_table)) + '\\n')", "title": "" }, { "docid": "27a267eb8de9b72cadb69f0a7b01ba81", "score": "0.56792337", "text": "def run_analysis(self):\n from iota.components.iota_analysis import Analyzer\n analysis = Analyzer(init=self.init, all_objects=self.img_objects)\n analysis.print_results()\n analysis.unit_cell_analysis()\n analysis.print_summary()\n analysis.make_prime_input()", "title": "" }, { "docid": "b999d452db9a5a4ad25ea664c2cd1eac", "score": "0.56756896", "text": "def ScreeningAnalysis(self):\r\n self.Mean_intensity_in_contour_thres = self.Mean_intensity_in_contour_thres_box.value()\r\n self.Contour_soma_ratio_thres = self.Contour_soma_ratio_thres_box.value()\r\n \r\n self.normalOutputWritten('Start loading images...\\n')\r\n \r\n self.ProcessML = ProcessImageML()\r\n \r\n if len(self.Tag_round_infor) == 0 and len(self.Lib_round_infor) == 0:\r\n # ===== General image analysis in folder. =====\r\n if self.FilepathSwitchBox.currentText() == 'Cam Z-max':\r\n # If need to do z-max projection first and then analyse on them\r\n cell_data = self.ProcessML.analyze_images_in_folder(self.Analysis_saving_directory, generate_zmax = True)\r\n else:\r\n # Directly analyze images\r\n cell_data = self.ProcessML.analyze_images_in_folder(self.Analysis_saving_directory)\r\n \r\n elif len(self.Tag_round_infor) == 1 and len(self.Lib_round_infor) == 1:\r\n # ===== One GFP round, one Arch round. ===== \r\n tag_folder = self.Tag_folder\r\n lib_folder = self.Lib_folder\r\n \r\n tag_round = 'Round{}'.format(self.Tag_round_infor[0])\r\n lib_round = 'Round{}'.format(self.Lib_round_infor[0])\r\n \r\n cell_Data_1 = self.ProcessML.FluorescenceAnalysis(tag_folder, tag_round)\r\n cell_Data_2 = self.ProcessML.FluorescenceAnalysis(lib_folder, lib_round)\r\n \r\n self.Cell_DataFrame_Merged = ProcessImage.MergeDataFrames(cell_Data_1, cell_Data_2, method = 'TagLib')\r\n \r\n DataFrames_filtered = ProcessImage.FilterDataFrames(self.Cell_DataFrame_Merged, self.Mean_intensity_in_contour_thres, self.Contour_soma_ratio_thres)\r\n \r\n self.DataFrame_sorted = ProcessImage.sort_on_axes(DataFrames_filtered, axis_1 = self.X_axisBox.currentText(), \\\r\n axis_2 = self.Y_axisBox.currentText(), \\\r\n axis_3 = self.Z_axisBox.currentText(), \\\r\n weight_1 = self.WeightBoxSelectionFactor_1.value(), \\\r\n weight_2 = self.WeightBoxSelectionFactor_2.value(), \\\r\n weight_3 = self.WeightBoxSelectionFactor_3.value())\r\n \r\n print(\"Save CellsDataframe to Excel...\")\r\n self.SaveCellsDataframetoExcel()\r\n \r\n self.UpdateSelectionScatter()\r\n \r\n elif len(self.Tag_round_infor) == 0 and len(self.Lib_round_infor) > 2:\r\n \r\n # ===== For multiple single round wavelength experiment. ===== \r\n lib_folder = self.Lib_folder\r\n \r\n for round_index in self.Lib_round_infor:\r\n lib_round = 'Round{}'.format(round_index)\r\n \r\n cell_Data = self.ProcessML.FluorescenceAnalysis(lib_folder, lib_round)\r\n \r\n elif len(self.Tag_round_infor) == 0 and len(self.Lib_round_infor) == 2:\r\n # ===== For KCL assay, two rounds of lib. ===== \r\n print('===== Kcl analysis based on absolute contour intensity =====')\r\n lib_folder = self.Lib_folder\r\n \r\n EC_round = 'Round{}'.format(self.Lib_round_infor[0])\r\n KC_round = 'Round{}'.format(self.Lib_round_infor[1])\r\n \r\n cell_Data_EC = self.ProcessML.FluorescenceAnalysis(lib_folder, EC_round)\r\n cell_Data_KC = self.ProcessML.FluorescenceAnalysis(lib_folder, KC_round)\r\n \r\n print('Start Cell_DataFrame_Merging.')\r\n self.Cell_DataFrame_Merged = ProcessImage.MergeDataFrames(cell_Data_EC, cell_Data_KC, method = 'Kcl')\r\n print('Cell_DataFrame_Merged.')\r\n \r\n DataFrames_filtered = ProcessImage.FilterDataFrames(self.Cell_DataFrame_Merged, self.Mean_intensity_in_contour_thres, self.Contour_soma_ratio_thres)\r\n \r\n self.DataFrame_sorted = ProcessImage.sort_on_axes(DataFrames_filtered, axis_1 = self.X_axisBox.currentText(), \\\r\n axis_2 = self.Y_axisBox.currentText(), \\\r\n axis_3 = self.Z_axisBox.currentText(), \\\r\n weight_1 = self.WeightBoxSelectionFactor_1.value(), \\\r\n weight_2 = self.WeightBoxSelectionFactor_2.value(), \\\r\n weight_3 = self.WeightBoxSelectionFactor_3.value()) \r\n print(\"Save CellsDataframe to Excel...\")\r\n self.DataFrame_sorted.to_excel(os.path.join(self.Tag_folder, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_Kcl_CellsProperties.xlsx'))\r\n \r\n self.UpdateSelectionScatter()\r\n \r\n elif len(self.Tag_round_infor) == 2 and len(self.Lib_round_infor) == 2:\r\n # ===== For KCL assay, two rounds of lib/tag. ===== \r\n print('===== Kcl analysis based on lib/tag contour ratio =====')\r\n tag_folder = self.Tag_folder\r\n lib_folder = self.Lib_folder\r\n \r\n # First get the ratio data from the first EC round.\r\n tag_round_1 = 'Round{}'.format(self.Tag_round_infor[0])\r\n lib_round_1 = 'Round{}'.format(self.Lib_round_infor[0])\r\n \r\n cell_Data_tag_round_1 = self.ProcessML.FluorescenceAnalysis(tag_folder, tag_round_1)\r\n cell_Data_lib_round_1 = self.ProcessML.FluorescenceAnalysis(lib_folder, lib_round_1)\r\n \r\n Cell_DataFrame_Merged_1 = ProcessImage.MergeDataFrames(cell_Data_tag_round_1, cell_Data_lib_round_1, method = 'TagLib')\r\n #------------------------------------------------------------------\r\n \r\n # Get the ratio data from the second KC round.\r\n tag_round_2 = 'Round{}'.format(self.Tag_round_infor[1])\r\n lib_round_2 = 'Round{}'.format(self.Lib_round_infor[1])\r\n \r\n cell_Data_tag_round_2 = self.ProcessML.FluorescenceAnalysis(tag_folder, tag_round_2)\r\n cell_Data_lib_round_2 = self.ProcessML.FluorescenceAnalysis(lib_folder, lib_round_2)\r\n \r\n Cell_DataFrame_Merged_2 = ProcessImage.MergeDataFrames(cell_Data_tag_round_2, cell_Data_lib_round_2, method = 'TagLib')\r\n #------------------------------------------------------------------\r\n \r\n print('Start Cell_DataFrame_Merging.')\r\n self.Cell_DataFrame_Merged = ProcessImage.MergeDataFrames(Cell_DataFrame_Merged_1, Cell_DataFrame_Merged_2, method = 'Kcl')\r\n # self.Cell_DataFrame_Merged.to_excel(os.path.join(self.Tag_folder, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_merged_CellsProperties.xlsx'))\r\n # print(self.Cell_DataFrame_Merged.columns)\r\n print('Cell_DataFrame_Merged.')\r\n\r\n DataFrames_filtered = ProcessImage.FilterDataFrames(self.Cell_DataFrame_Merged, self.Mean_intensity_in_contour_thres, self.Contour_soma_ratio_thres)\r\n \r\n self.DataFrame_sorted = ProcessImage.sort_on_axes(DataFrames_filtered, axis_1 = self.X_axisBox.currentText(), \\\r\n axis_2 = self.Y_axisBox.currentText(), \\\r\n axis_3 = self.Z_axisBox.currentText(),\\\r\n weight_1 = self.WeightBoxSelectionFactor_1.value(), \\\r\n weight_2 = self.WeightBoxSelectionFactor_2.value(), \\\r\n weight_3 = self.WeightBoxSelectionFactor_3.value())\r\n print(\"Save CellsDataframe to Excel...\")\r\n self.DataFrame_sorted.to_excel(os.path.join(self.Tag_folder, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_Kcl_CellsProperties.xlsx'))\r\n \r\n self.UpdateSelectionScatter()", "title": "" }, { "docid": "d47940afc5e299c11c6468aac773288c", "score": "0.56714416", "text": "def main():\n do_plot = False\n print('Loading raw data.')\n raw_data = load_raw_data(PurePath.joinpath(PATH, 'raw'))\n print('Extracting keys and valid channel urls.')\n key_list = extract_key_list(raw_data)\n stable_channel_list = extract_stable_channel_urls(raw_data)\n print('Sorting the data into unified dictionaries.')\n data_dict = sort_data(raw_data, key_list, allowed_channels=stable_channel_list)\n df_list = list()\n print('Creating pandas data frames.')\n for key in data_dict.keys():\n if 'change' in key:\n data_frame = create_week_df(data_dict, key, drop=False)\n elif 'month' in key:\n data_frame = create_month_df(data_dict, key, True)\n else:\n data_frame = create_week_df(data_dict, key, drop=True)\n data_frame.name = str(key)\n df_list.append(data_frame)\n print('Cleaning data frames.')\n for _, data_frame in enumerate(df_list):\n clean_df(data_frame)\n if do_plot:\n print('Plotting data frames for inspection.')\n for data_frame in df_list:\n plot_df(data_frame, title=data_frame.name)\n print('Saving data frames into processed folder.')\n final_data_frame = concat_data_frames(df_list)\n final_data_frame.to_csv(PurePath.joinpath(PATH, 'processed', 'socialblade.csv').as_posix(),\n encoding='utf-8', index=False)\n print('Saving complete, script will exit.')", "title": "" }, { "docid": "e75ccc0b1c1425d629a3ad8a60154c40", "score": "0.566944", "text": "def run(self):\n\t\tif self.load_file == None:\n\t\t\tfile_contents = self.read_pdf_file(self.pdf_file)\n\t\telse:\n\t\t\tfile_contents = self.process_saved_file(self.load_file)\n\n\t\tif len(file_contents) == 0:\n\t\t\tprint(\"Couldn't read text from the pdf file!\")\n\t\t\treturn 1\n\n\t\t# Begin with some word processing\n\t\tfile_contents = [ self.remove_punctuation(word) for word in file_contents ]\n\t\tfile_contents = self.remove_no_alpha(file_contents)\n\t\tfile_contents = [ word.lower() for word in file_contents ]\n\n\t\tif len(file_contents) == 0:\n\t\t\tprint(\"Couldn't remove no alpha\")\n\t\t\tprint(\"file_contents:\", file_contents)\n\t\t\treturn 1\n\n\t\tif self.save_filename != None:\n\t\t\tflatten = lambda l: [item for sublist in l for item in sublist]\n\t\t\tfile_contents2 = flatten([i.split() for i in file_contents])\n\t\t\tfile_contents2 = self.remove_filters(file_contents2, self.filters)\n\t\t\tfilename = self.save_file(\" \".join(file_contents2))\n\n\t\tfrecuencies = self.frequency_analysis(file_contents, self.groups, self.filters)\n\n\t\tif len(frecuencies) == 0:\n\t\t\tprint(\"Couldn't get frecuencies\")\n\t\t\tprint(\"file_contents:\", file_contents)\n\t\t\treturn 1\n\n\t\tself.create_image(self.base_image, frecuencies, self.output_file, self.max_words)\n\t\tif self.csv_file != None:\n\t\t\tself.export_csv(frecuencies)\n\t\treturn 0", "title": "" }, { "docid": "5ffc89593e46b5e13d4198669ce6a1a7", "score": "0.56691206", "text": "def process():\n\tstatus = create_grps()\n\tif not status:\n\t\treturn()\n\tmove_geo()\n\tget_jnt_tops()", "title": "" }, { "docid": "a8763a5e272dc16ddcc04d42f6000055", "score": "0.56491196", "text": "def ProcessAnalysis(self):\n # Compile list of Be stars\n self.CompileBeLists()\n for ls in ('belist', 'rej_belist'):\n self.FindCorrespondingTargets(ls)\n\n # Output\n self.NightSummary()\n\n for ls in ('belist', 'rej_belist'):\n GetExcess(self, ls)\n\n self.SortBelist()\n for ls in ('belist', 'rej_belist', 'outlier_belist'):\n self.BeSummary(ls)\n\n self.ColorAnalysis()\n self.SpectralTypeDist()\n\n for date in ListDates(self.cluster):\n self.BeCandidatePlots(date)", "title": "" }, { "docid": "d89da6d283e0ac825c2107f2d34289d8", "score": "0.5627255", "text": "def _run_analysis(self, fragmentIndex):\n preprocessTask = self.dataSet.load_analysis_task(\n self.parameters['preprocess_task'])\n optimizeTask = self.dataSet.load_analysis_task(\n self.parameters['optimize_task'])\n decode3d = self.parameters['decode_3d']\n\n lowPassSigma = self.parameters['lowpass_sigma']\n\n codebook = self.get_codebook()\n decoder = decoding.PixelBasedDecoder(codebook)\n scaleFactors = optimizeTask.get_scale_factors()\n backgrounds = optimizeTask.get_backgrounds()\n chromaticCorrector = optimizeTask.get_chromatic_corrector()\n\n zPositionCount = len(self.dataSet.get_z_positions())\n bitCount = codebook.get_bit_count()\n imageShape = self.dataSet.get_image_dimensions()\n decodedImages = np.zeros((zPositionCount, *imageShape), dtype=np.int16)\n magnitudeImages = np.zeros((zPositionCount, *imageShape),\n dtype=np.float32)\n distances = np.zeros((zPositionCount, *imageShape), dtype=np.float32)\n\n if not decode3d:\n for zIndex in range(zPositionCount):\n di, pm, d = self._process_independent_z_slice(\n fragmentIndex, zIndex, chromaticCorrector, scaleFactors,\n backgrounds, preprocessTask, decoder\n )\n\n decodedImages[zIndex, :, :] = di\n magnitudeImages[zIndex, :, :] = pm\n distances[zIndex, :, :] = d\n\n else:\n with tempfile.TemporaryDirectory() as tempDirectory:\n if self.parameters['memory_map']:\n normalizedPixelTraces = np.memmap(\n os.path.join(tempDirectory, 'pixel_traces.dat'),\n mode='w+', dtype=np.float32,\n shape=(zPositionCount, bitCount, *imageShape))\n else:\n normalizedPixelTraces = np.zeros(\n (zPositionCount, bitCount, *imageShape),\n dtype=np.float32)\n\n for zIndex in range(zPositionCount):\n imageSet = preprocessTask.get_processed_image_set(\n fragmentIndex, zIndex, chromaticCorrector)\n imageSet = imageSet.reshape(\n (imageSet.shape[0], imageSet.shape[-2],\n imageSet.shape[-1]))\n\n di, pm, npt, d = decoder.decode_pixels(\n imageSet, scaleFactors, backgrounds,\n lowPassSigma=lowPassSigma,\n distanceThreshold=self.parameters['distance_threshold'])\n\n normalizedPixelTraces[zIndex, :, :, :] = npt\n decodedImages[zIndex, :, :] = di\n magnitudeImages[zIndex, :, :] = pm\n distances[zIndex, :, :] = d\n\n self._extract_and_save_barcodes(\n decoder, decodedImages, magnitudeImages,\n normalizedPixelTraces,\n distances, fragmentIndex)\n\n del normalizedPixelTraces\n\n if self.parameters['write_decoded_images']:\n self._save_decoded_images(\n fragmentIndex, zPositionCount, decodedImages, magnitudeImages,\n distances)\n\n if self.parameters['remove_z_duplicated_barcodes']:\n bcDB = self.get_barcode_database()\n bc = self._remove_z_duplicate_barcodes(\n bcDB.get_barcodes(fov=fragmentIndex))\n bcDB.empty_database(fragmentIndex)\n bcDB.write_barcodes(bc, fov=fragmentIndex)", "title": "" }, { "docid": "d5d7ba1497ab7e3fa4fb32c25970ea61", "score": "0.5620821", "text": "def run(self):\n\n # query packages based on self.params_list list:\n results = self.dataset_query(params=self.params_list, operator=self.operator)\n\n #handle output:\n datasets = self.parse_dataset_results(results)\n if len(datasets) > 0: self.write_dataset_results_to_csv(datasets)", "title": "" }, { "docid": "8d0ecaf6b3ad91578f1cde3846c412c0", "score": "0.5615584", "text": "def run(self):\n self._regressor.fit(self._list_sample_x, self._list_sample_y)\n unified_feature_importance = self.get_unified_feature_importance(self._regressor)\n result = zip(unified_feature_importance, self._labels, self._index)\n result = sorted(result, key=lambda x: -x[0])\n self._sorted_index = [i for coef, label, i in result]\n self._sorted_index_queue.put(self._sorted_index)\n prediction = self.get_ensemble_train_data()\n self._prediction_queue.put(prediction)", "title": "" }, { "docid": "b73784172a45005814126d9ff436c800", "score": "0.561352", "text": "def process(self, settings={}):\n self.updateSettings(settings)\n self.applyBlankImageAndMask()\n self.getHistograms()\n self.applyConvexhull()\n self.updateRotationAngle()\n self.fitModel()\n self.getBackgroundSubtractedHistograms()\n self.getPeakInfos()\n if 'no_cache' not in settings:\n self.cacheInfo()", "title": "" }, { "docid": "8366deb5d107f8857e504a5588e897a2", "score": "0.5613393", "text": "def main():\n # print(lat)\n # print(lng)\n # pprint(response_data)\n # print(name)\n # print(wheelchair_boarding)\n pass", "title": "" }, { "docid": "fd3aff4845cbbe33cedb0a7f249c0d6c", "score": "0.5608512", "text": "def main(results, boundaries, base, width, style, coords):\n def to_point(longlat_list):\n '''\n Adjusts points so that they can be mapped. The with-opens below make the actual map.\n '''\n return list(zip([float(x) for x in longlat_list[2::2]],[mercator(float(x)) for x in longlat_list[3::2]]))\n with open(results,'r') as fin:\n results_lst1=list(csv.reader(fin))\n with open(boundaries,'r') as fin1:\n boundaries_lst1= list(csv.reader(fin1))\n\n lstofboundaries = [to_point(x) for x in boundaries_lst1] #skip state,city first pair\n\n lstofregions= [Region(x,int(y[2]),int(y[3]),int(y[4])) for x,y in zip(lstofboundaries,results_lst1)]\n minlong = min([(x.min_long()) for x in lstofregions])\n maxlong = max([(x.max_long()) for x in lstofregions])\n minlat = min([(x.min_lat()) for x in lstofregions])\n maxlat = max([(x.max_lat()) for x in lstofregions])\n\n Make_Map = Plot(width, minlong, minlat, maxlong, maxlat)\n output=\"map{}.png\".format(base)\n for x in lstofregions:\n Make_Map.draw(x,style)\n Make_Map.save(output)\n tweet_coords=Make_Map.pointgen(coords)\n imgs=[]\n for index, tweet in enumerate(tweet_coords):\n if index % 1 == 0:\n Make_Map.drawtweet(tweet)\n Make_Map.save(\"map{}{:04d}.png\".format(base,index))\n imgs.append(\"map{}{}.png\".format(base,index))\n\n #OBSERVE: In line 51 of map_generator.py, there is an \"if\" parameter that will allow you to determine the numbers of tweets you want mapped. If it is set to 1, you will get every single picture. This could easily reach the hundreds. Adjust accordingly.", "title": "" }, { "docid": "b530782180e85b640359021ddb64a40b", "score": "0.560295", "text": "def run():\n POGenerator().process_countries()", "title": "" }, { "docid": "e80606ffdc1037b469f269a53eb410cf", "score": "0.5600639", "text": "def main():\n year = int(input(\"Please enter a year you would like to have a map for: \"))\n user_location_str = input(\n \"Please enter your location(format: lat, long): \")\n user_location = (float(user_location_str.split(',')[0][1:]),\n float(user_location_str.split(',')[1][:-1]))\n slow(\"Map is generating...\\n\")\n slow(\"Please wait...\\n\")\n film_list = flmc.read_file(\"small.list\")\n one_year = flmc.one_year_films(year, film_list)\n coord_list = flmc.address_to_coordinates(one_year)\n closest_films = flmc.determine_ten_closest(coord_list, user_location)\n flmc.map_builder(closest_films, user_location, year)\n slow(f\"Finished. Please have a look at the {year}_film_map.html\\n\")", "title": "" }, { "docid": "17b378cff5b314a51ac0fed855cfdb9e", "score": "0.5600275", "text": "def run(self):\n for k in sorted(self.corpus, key=sanity_check_order):\n run_checks(self, k)\n create_dirname(self.report.subreport_path(k))\n self.report.flush_subreport(k)\n\n copy_parses(self)\n generate_graphs(self)\n write_index(self)\n\n output_dir = self.output_dir\n if self.output_is_temp():\n print(\"See temp directory: %s\" % output_dir, file=sys.stderr)\n print(\"HINT: use --output if you want to specify \"\n \"an output directory\",\n file=sys.stderr)\n else:\n print(\"Fancy results saved in %s\" % output_dir, file=sys.stderr)", "title": "" }, { "docid": "fd60f74e4a4aa0a009a2518fc0c68f62", "score": "0.55872977", "text": "def run_full_analysis(self):\n self.calculate_file_sizes()\n self.calculate_total_sizes()\n self.calculate_total_build_times()\n self.calculate_translation_units()\n self.calculate_agg_build_time_dev()\n self.guess_project_names()", "title": "" }, { "docid": "3e3fded3a67c55f012901661e1869325", "score": "0.5547641", "text": "def run(self):\r\n\t\t\r\n\t\tref_base = self.mapping_base.ref_base\r\n\t\tcalib_dict = None\r\n\t\telm_dict = None\r\n\t\tdrug_dict = None\r\n\t\tinput_dict = {}\r\n\t\t\r\n\t\tif self.options.elm_flag:\r\n\t\t\telm_dict = AnnotUtils.ELMParser(DIRECTORY = self.options.elm_direc)\r\n\t\t\tinput_dict['ELM_DICT'] = elm_dict\r\n\t\tif self.options.drug_flag:\r\n\t\t\tdrug_dict = AnnotUtils.ResitenceParser(self.options.drug_motif_file)\r\n\t\t\tinput_dict['DRUG_DICT'] = drug_dict\r\n\t\tif self.options.mirna_flag:\r\n\t\t\twith open(self.options.hybrid_calib) as handle:\r\n\t\t\t\tcalib_dict = pickle.load(handle)\r\n\t\t\t\r\n\t\t\t# for this_calib in calib_dict.keys()[30:]:\r\n\t\t\t\t# junk = calib_dict.pop(this_calib)\r\n\t\t\tinput_dict['MIRNA_DICT'] = calib_dict\r\n\t\t\r\n\t\tlogging.warning('Beginning Annotation')\r\n\t\t\r\n\t\tif self.options.mirna_flag:\r\n\t\t\tref_base.ref_seqs[0].HumanMiRNAsite(calib_dict)\r\n\t\tif self.options.elm_flag:\r\n\t\t\tref_base.ref_seqs[0].FindELMs(elm_dict)\r\n\t\tif self.options.tf_flag:\r\n\t\t\tref_base.ref_seqs[0].FindTFSites()\r\n\t\t\tinput_dict['WANT_TF'] = True\r\n\t\tif self.options.hom_flag:\r\n\t\t\tref_base.ref_seqs[0].FindHomIslands(ref_base.ref_seqs[0])\r\n\t\t\tinput_dict['WANT_HOM'] = True\r\n\t\tif self.options.drug_flag:\r\n\t\t\tref_base.ref_seqs[0].FindResistSites(drug_dict)\r\n\t\t\r\n\t\tinput_dict['WANTED_REF'] = ref_base.ref_seqs[0].seq_name\r\n\t\tself.mapping_base.AnnotateBase(input_dict, \r\n\t\t\t\t\t\t\t\tWANTED_THREADS = self.options.num_threads)\r\n\t\t\r\n\t\t\r\n\t\tresp_name = self.options.out_direc + self.options.base_name + '_resp.txt'\r\n\t\tnonresp_name = self.options.out_direc + self.options.base_name + '_nonresp.txt'\r\n\t\tannot_name = self.options.out_direc + self.options.base_name + '_description.txt'\r\n\t\t\r\n\t\t\r\n\t\t#color_list = map(lambda x: x.DetermineResponder('WND'), self.pat_base.values())\r\n\t\t\r\n\t\tt_resp_count = 0\r\n\t\tt_non_resp = 0\r\n\t\tresp = []\r\n\t\tnon_resp = []\r\n\t\tfor this_val in self.pat_base.values():\r\n\t\t\tif this_val.DetermineResponder('WND'):\r\n\t\t\t\tt_resp_count += 1\r\n\t\t\t\tresp.append(this_val)\r\n\t\t\telse:\r\n\t\t\t\tt_non_resp += 1\r\n\t\t\t\tnon_resp.append(this_val)\r\n\t\tlogging.debug('Test Found %(r)d responders and %(nr)d non-resp' % \\\r\n\t\t\t\t\t\t{'r':t_resp_count, 'nr':t_non_resp})\r\n\t\t\r\n\t\t\r\n\t\twith open(resp_name, mode = 'w') as handle:\r\n\t\t\tfor this_pat in resp:\r\n\t\t\t\tthis_pat.WriteFeatures(ref_base.ref_seqs[0].feature_annot, handle)\r\n\t\t\r\n\t\twith open(nonresp_name, mode = 'w') as handle:\r\n\t\t\tfor this_pat in non_resp:\r\n\t\t\t\tthis_pat.WriteFeatures(ref_base.ref_seqs[0].feature_annot, handle)\r\n\t\t\r\n\t\twith open(annot_name, mode = 'w') as handle:\r\n\t\t\tfor this_feat in ref_base.ref_seqs[0].feature_annot:\r\n\t\t\t\thandle.write(str(this_feat) + '\\n')\r\n\t\t\r\n\t\t\r\n\t\t#filter_fun = lambda x: x.CheckRange('HumanMiRNA', None)\r\n\t\tfilter_fun = None\r\n\t\t\r\n\t\tresp_fun = lambda x:x.DetermineResponder('WND')\r\n\t\t\r\n\t\tgene_fig = self.mapping_base.MakeMultiDiagram(ref_base.ref_seqs[0].seq_name,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.options.wanted_subs,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tANCHOR_FILT = filter_fun,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tDISPLAY_GROUPING = resp_fun)\r\n\t\t\r\n\t\tif self.options.align_fig != None:\r\n\t\t\tgene_fig.draw(format = 'linear', fragments = 1)\r\n\t\t\tgene_fig.write(self.options.align_fig, 'PDF')\r\n\t\t\tself.outconfig.set('DEFAULT', 'AlignmentFigure', self.options.align_fig)", "title": "" }, { "docid": "3e6028e2cfa5c9a8dc7bc76b8343cbce", "score": "0.55304855", "text": "def run(self):\n try:\n os.mkdir(self.sub_dst) # set up output directory\n except OSError:\n pass\n if self.run_config <= 0:\n # split the data into 24 hour periods\n if self.verbose:\n print(\"Loading data...\")\n if \".bin\" in self.src:\n self.split_days_geneactiv_bin()\n elif \".csv\" in self.src:\n self.split_days_geneactiv_csv()\n if self.run_config <= 1:\n # extract the activity index feature\n if self.verbose:\n print(\"Extracting activity index...\")\n self.extract_activity_index()\n if self.run_config <= 2:\n # run wear/on-body detection\n if self.verbose:\n print(\"Running off-body detection...\")\n self.wear_detection()\n if self.run_config <= 3:\n # run major rest period detection\n if self.verbose:\n print(\"Detecting major rest period...\")\n self.major_rest_period()\n if self.run_config <= 4:\n # run sleep wake predictions on the major rest period\n if self.verbose:\n print(\"Running sleep/wake predictions...\")\n self.sleep_wake_predict()\n if self.run_config <= 5:\n # calculate endpoints based on the above predictions\n if self.verbose:\n print(\"Calculating endpoints...\")\n self.calculate_endpoints()\n if self.run_config <= 6:\n # generates visual reports\n if self.verbose:\n print(\"Generating visual reports...\")\n self.visualize_results()\n\n # aggregate results\n if self.verbose:\n print(\"Aggregating results...\")\n self.aggregate_results()\n\n # clear data\n if self.clear:\n if self.verbose:\n print(\"Clearing intermediate data...\")\n self.clear_data()", "title": "" }, { "docid": "6152c0217e9090bd8d7f8bf38d6b430a", "score": "0.55272126", "text": "def run(self):\n request = CollectingDataOFF()\n request.menu()\n final_json = request.connect_and_harvest()\n products = request.get_info_products(final_json)\n\n database = DataBaseCreator()\n database.menu()\n database.create_tables()\n database.insert_products_informations(products)\n\n affichage = View()\n affichage.menu()\n\n database.disconnect_sql()", "title": "" }, { "docid": "4587175c0d845928732a24a5645c8623", "score": "0.5485196", "text": "def main():\n cleaned_results_file_name = \"cleaned_pre_processing_\" + \\\n \"experiment_results.json\"\n eclipse_exp_1_results_plotter = \\\n EclipseExp1ResultsPlotter(cleaned_results_file_name)\n eclipse_exp_1_results_plotter.plot_results()", "title": "" }, { "docid": "3d1896be441727e020a64821128ad9b7", "score": "0.54792976", "text": "def main():\n year = input_data(int, \"1887 < a < 2027\", \"Input year: \")\n max_mark = input_data(int, \"0 < a < 101\",\n \"Input max number of markers(up to 100): \")\n countries = country_lst(read_file('locations.list'), year)\n random.shuffle(countries)\n locations = get_locations(countries, max_mark)\n map_creator(area_layer(), pop_layer(), films_layer(locations))", "title": "" }, { "docid": "71acc2a99ab9ed0bf907db167ad06d6c", "score": "0.5471819", "text": "def main():\n\n # Globals\n jk_data = 'maps'\n out_data = 'data_to_process_for_jk'\n data_files = glob(os.path.join(jk_data, '*.npz'))\n max_samples = 10\n\n # Teaser fig: gamma-nets versus seung models\n main_out_name = 'teaser_main_synth_experiment_data'\n main_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'gammanet_t4_per_pixel': 'red orange',\n 'gammanet_t1_per_pixel': 'orange',\n 'seung_unet_per_pixel': 'dusty blue'} # ,\n # 'ffn_per_pixel': 'kelly green'}\n plot_wrapper(\n out_name=main_out_name,\n keep_models=main_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Invariance analysis: gamma-net versus seung models\n main_out_name = 'inv_main_synth_experiment_data'\n main_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'seung_unet_per_pixel': 'dusty blue'} # ,\n # 'ffn_per_pixel': 'kelly green'}\n plot_wrapper(\n out_name=main_out_name,\n keep_models=main_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Main analysis: gamma-net versus seung models\n main_out_name = 'main_synth_experiment_data'\n main_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'seung_unet_per_pixel': 'dusty blue',\n 'seung_unet_per_pixel_BSDS_init': 'deep sky blue'}\n # 'ffn_per_pixel': 'kelly green',\n # 'ffn_per_pixel_BSDS_init': 'darkgreen'}\n plot_wrapper(\n out_name=main_out_name,\n keep_models=main_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Main analysis: gamma-net versus seung models\n main_out_name = 'main_synth_ffn_experiment_data'\n main_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'ffn_per_pixel': 'kelly green',\n 'ffn_per_pixel_BSDS_init': 'darkgreen'}\n plot_wrapper(\n out_name=main_out_name,\n keep_models=main_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Main analysis: gamma-net versus seung models part 2\n main_out_name = 'extra_main_synth_experiment_data_2'\n main_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'seung_unet_per_pixel': 'dusty blue',\n 'seung_unet_per_pixel_BSDS_init': 'grey',\n # 'seung_unet_per_pixel_param_ctrl': 'aqua',\n 'seung_unet_per_pixel_adabn': 'tiffany blue',\n 'seung_unet_per_pixel_param_ctrl_rn_2': 'purple',\n 'seung_unet_per_pixel_wd': 'lightish blue'}\n plot_wrapper(\n out_name=main_out_name,\n keep_models=main_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Compare gammanet to RNNs\n rnn_out_name = 'rnn_synth_experiment_data'\n rnn_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n # 'gru_v2_t8_per_pixel': 'light teal',\n # 'lstm_v2_t8_per_pixel': 'light sea green'} # ,\n 'hgru_bn_per_pixel': 'faded green',\n 'gru_t8_per_pixel': 'dark teal',\n 'lstm_t8_per_pixel': 'dark sea green'}\n plot_wrapper(\n out_name=rnn_out_name,\n keep_models=rnn_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Compare gammanet to lesioned gammanets\n lesion_gamma_out_name = 'lesion_gamma_synth_experiment_data'\n lesion_gamma_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'gammanet_t4_per_pixel': 'red orange',\n 'gammanet_t1_per_pixel': 'orange'}\n plot_wrapper(\n out_name=lesion_gamma_out_name,\n keep_models=lesion_gamma_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Extra lesions\n lesion_gamma_out_name = 'extra_lesion_gamma_synth_experiment_data'\n lesion_gamma_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'gammanet_t8_per_pixel_disinhibition': 'dark pink',\n 'hgru_bn_per_pixel': 'faded green',\n 'gammanet_t8_per_pixel_skips': 'salmon',\n 'gammanet_t8_per_pixel_lesion_mult': 'magenta',\n 'gammanet_t8_per_pixel_lesion_add': 'purplish pink'}\n plot_wrapper(\n out_name=lesion_gamma_out_name,\n keep_models=lesion_gamma_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)\n\n # Plot everything\n all_out_name = 'all_models'\n all_model_batch = {\n 'gammanet_t8_per_pixel': 'scarlet',\n 'gammanet_t8_per_pixel_skips': 'salmon',\n 'gammanet_t8_per_pixel_lesion_mult': 'magenta',\n 'gammanet_t8_per_pixel_lesion_add': 'purplish pink',\n 'gammanet_t4_per_pixel': 'red orange',\n 'gammanet_t1_per_pixel': 'orange',\n 'hgru_bn_per_pixel': 'faded green',\n 'gru_t8_per_pixel': 'lightblue',\n 'lstm_t8_per_pixel': 'french blue',\n 'gru_v2_t8_per_pixel': 'light seafoam',\n 'lstm_v2_t8_per_pixel': 'twilight', \n 'ffn_per_pixel': 'kelly green',\n 'gammanet_t8_per_pixel_disinhibition': 'dark pink',\n 'seung_unet_per_pixel_param_ctrl_rn_2': 'deep aqua',\n # 'seung_unet_per_pixel_param_ctrl': 'aqua',\n 'seung_unet_per_pixel_wd': 'slate',\n 'seung_unet_per_pixel_BSDS_init': 'deep sky blue'}\n plot_wrapper(\n out_name=all_out_name,\n keep_models=all_model_batch,\n data_files=glob(os.path.join(jk_data, '*.npz')),\n out_data=out_data,\n max_samples=max_samples)", "title": "" }, { "docid": "254f4cd49a2f2a6332a1205f6e877345", "score": "0.54717463", "text": "def scrape_data():\n \n has_been_run_once = False\n \n for country in countries:\n \n url = \"https://www.worldometers.info/coronavirus/country/\"+country+\"/\"\n content = page_contents(url)\n \n for stat in data_indexes:\n \n script_contents = script_tag_contents(content, stat)\n script_contents = clean(script_contents)\n \n if 'daily' in stat:\n \n data = retrieve_daily_stats(script_contents)\n \n else:\n \n data = retrieve_overall_stats(script_contents)\n \n if not has_been_run_once:\n \n date = retrieve_dates(script_contents)\n dataframe = build_dataframe(data, stat, date= date)\n has_been_run_once = True\n \n else:\n \n dataframe = build_dataframe(data, stat, dataframe= dataframe)\n \n dataframe = clean_date(dataframe, date_col='date')\n dataframe.to_csv('./Data/covid19_'+country+'_stats.csv',index=False)\n has_been_run_once = False\n print(\"Scraped successfully: \",country)\n \n return True", "title": "" }, { "docid": "e229bffb641e92f3a9225796c6599432", "score": "0.54636234", "text": "def run(self):\n # Log beginning of processing\n start = datetime.datetime.now()\n self.logger.info(\"Metadata extraction started at: %s\",\n start.isoformat())\n\n # Build list of file paths\n data_files = []\n for root, _, files in os.walk(self.datapath, followlinks=True):\n for each_file in files:\n data_files.append((root, each_file))\n\n if len(data_files) > 0:\n # Process files\n pool = []\n\n for f in data_files:\n path = os.path.join(*f)\n if \"raw\" not in path:\n p = multiprocessing.Process(target=self.process_file,\n args=(path,))\n pool.append(p)\n p.start()\n\n while len(pool) >= self.numcores:\n for p in pool:\n if p.exitcode is not None:\n pool.remove(p)\n\n for p in pool:\n p.join()\n\n # Log end of processing\n end = datetime.datetime.now()\n self.logger.info(\"Metadata extraction completed at: %s\",\n end.isoformat())\n self.logger.info(\"Start: %s, End: %s, Total: %s\",\n start.isoformat(), end.isoformat(), end - start)", "title": "" }, { "docid": "b42f279f7829ae07241ca1dd10a8531c", "score": "0.54616165", "text": "def run_analysis():\n csv_path = Path('data', 'rereading_data_2019-09-13.csv')\n student_data = load_data_csv(csv_path)\n mean_view_time_sentiment_comparison(student_data)\n reread_counts = compute_reread_counts(student_data, \"In one word\", \"ad\")\n print(\"Number of times students reread text based on question or context:\\n\")\n print(reread_counts)\n\n response_groups_freq_dicts = get_response_groups_frequencies(student_data)\n show_response_groups(response_groups_freq_dicts)\n run_time_analysis_functions(student_data)\n run_relevant_word_analysis(student_data)\n\n total_view_time = compute_total_view_time(student_data)\n print(f'The total view time of all students was {total_view_time}.')\n print(f'Mean number of revisits per unique question: ', compute_mean_revisits(student_data))\n print(f'Median number of revisits per unique question: ', compute_median_revisits(student_data))\n print(\n get_responses_for_question(student_data, \"In one word, how does this text make you feel?\"))\n print(most_common_response(\n student_data,\n \"In one word, how does this text make you feel?\",\n \"This is an ad.\"\n ))\n print(unique_word_pattern(student_data)[0])\n print(unique_word_pattern(student_data)[1])", "title": "" }, { "docid": "a168f079ed3bc53912403b920e05dace", "score": "0.5458471", "text": "def main():\n print('Retrieving zones from mongo')\n zones = retrieve_zones()\n\n print('Fetching logos from Wikimedia')\n fetch_logos(zones)\n print('Fetching done')\n\n print('Compressing retrieved logos')\n compress_logos()\n print('Compressing done')", "title": "" }, { "docid": "e6dc810ac33d7b20f87b3fbc2361281a", "score": "0.54491836", "text": "def run(self):\n\n if not self.check_for_scan():\n #TODO: possibly section scan?\n self.map_avg_altitude()\n self.create_cutplan()\n self.save_cutplan()\n #TODO: reset?\n return", "title": "" }, { "docid": "4ecffa95d8dd6da299349a927f718f4c", "score": "0.54236364", "text": "def run(self):\n # count before filtering\n self.cardinality = self.query.count()\n \n # the term entered in the datatable's search box\n self.filtering()\n\n # field chosen to sort on\n self.sorting()\n\n # pages have a 'start' and 'length' attributes\n self.paging()\n\n # fetch the result of the queries\n self.results = self.query.all()\n\n #return formatted results with correct filters applied\n formatted_results = []\n for i in range(len(self.results)):\n row = dict()\n for j in range(len(self.columns)):\n col = self.columns[j]\n tmp_row = get_attr(self.results[i], col.column_name)\n if col.filter:\n try:\n tmp_row = col.filter(tmp_row)\n except UnicodeEncodeError:\n tmp_row = col.filter(tmp_row.encode('utf8'))\n row[col.mData if col.mData else str(j)] = tmp_row\n formatted_results.append(row)\n\n self.results = formatted_results", "title": "" }, { "docid": "91003fea477b78ab2b7194a1ae7fc010", "score": "0.5404872", "text": "def launch_tiles(self):\n \n self.healpix_skymap()\n self.telescope_site()\n self.query_params()\n self.starting_time()\n \n self.set_fov_shape()\n self.check_fov_entries()\n self.set_fov_template()\n\n self.maximum_pixel()\n self.starting_sky_position()\n\n self.GWsky_basic()\n self.win_trasparency()\n self.make_GWsky_config()\n self.make_selected_pointing_file()\n self.make_grid()\n \n message= ' The highest probability pixel is located at RA =' + str('% .5f' % self._ra_max)+'°' + 'and Dec =' + str('% .5f' % self._dec_max)+'°.'\n \n messagebox.showinfo('User Values has been initialized', message)\n \n from . import coverage\n\n Utils.create_folders(folders=[\"Queries\", \"Coords\", \"FoV\"])\n Utils.load_user_fov(\"GWsky_fov.vot\")\n aladin.remove(\"*~*\")", "title": "" }, { "docid": "b52cf883ce9590b316b07733579d6651", "score": "0.53951734", "text": "def run(self):\n futures = {}\n for dataset_name in DATA:\n dataset = self._load_dataset(dataset_name)\n for featurizer in self.featurizers:\n logging.info(\"Featurizing {} with {}....\".format(dataset_name, featurizer.__class__.__name__))\n future = POOL.submit(featurizer.generate, dataset, dataset_name)\n futures[future] = (featurizer, dataset_name)\n\n for future in concurrent.futures.as_completed(futures):\n featurizer, dataset_name = futures[future]\n try:\n future.result()\n logging.info(\"Completed featurization of dataset `{dataset_name}` with featurizer `{featurizer}`.\".format(\n dataset_name=dataset_name,\n featurizer=featurizer.__class__.__name__\n ))\n except Exception as e:\n logging.exception(\"Failed featurization of dataset `{dataset_name}` with featurizer `{featurizer}`.\".format(\n dataset_name=dataset_name,\n featurizer=featurizer.__class__.__name__\n ))", "title": "" }, { "docid": "208e1534fa07edd5de0f7b2cdd85d986", "score": "0.53950983", "text": "def run_plots(self):\r\n # start simulation to generate the state files\r\n self.simulation.start_simulation()\r\n\r\n # read in the state files\r\n self.hospitals = self.read_in_csv()\r\n\r\n # make the plots\r\n self.plot_hospital_covid_patients()\r\n self.severity_scatter_plot()", "title": "" }, { "docid": "53a7aa0b95e39fb282ce7a45d29635b1", "score": "0.538071", "text": "def main():\n usage = \"usage: %prog [optional title] path/to/data/file\"\n parser = OptionParser(usage, version=\"%prog 0.1\")\n parser.add_option(\"-t\",\n \"--title\",\n dest=\"title\",\n default=None,\n help=\"Add a title to the plot. Default is None\")\n options, args = parser.parse_args()\n if len(args) != 1:\n parser.error(\"\"\"\n\nPlease specify a path to the data file\ne.g. gpsprocess.py /root/projects/detection/logs/scryturmcut\n\"\"\")\n filepath = args[0]\n settitle = options.title\n outputfile = makenewfilename(filepath)\n\n oo=open(outputfile,\"w\")\n\n y = readcsvfile(filepath)\n (lat, lon, intensity, intensitylow) = processgpsdata(y) # add/subtract more values\n\n \n for x, yz, z, zz in zip(lat, lon, intensity, intensitylow):\n # write to file comma seperated\n # for moment write\n string=\"-\"+str(yz)+ \",\"+str(x)+\",\"+str(z)+\",\"+str(zz)+\"\\n\" #csv\n # as vector layer?\n oo.write(string)\n\n oo.close", "title": "" }, { "docid": "320603fe228441eab442c60aca35358c", "score": "0.53766036", "text": "def main(data_dir, output_dir, city):\n # raster paths\n test_f = os.path.join(\n data_dir, 'intermediary', city, 'reference', 'reference.tif')\n train_f = os.path.join(\n data_dir, 'output', city, 'osm_b', 'training_dataset.tif')\n result_f = os.path.join(\n data_dir, 'output', city, 'osm_b', 'map.tif')\n\n # colormaps\n cmap_test = {1: RED, 2: ORANGE, 3: LIGHT_GREEN, 4: DARK_GREEN}\n cmap_train = {1: RED, 2: DARK_GREEN}\n cmap_result = {1: RED}\n\n paths = [test_f, train_f, result_f]\n cmaps = [cmap_test, cmap_train, cmap_result]\n labels = ['test', 'train', 'result']\n\n for path, cmap, label in zip(paths, cmaps, labels):\n\n with rasterio.open(path) as src:\n img = src.read(1)\n src_profile = src.profile\n src_epsg = src.crs['init'].split(':')[-1]\n\n # Colorize raster\n rgb = transform_colors(img, cmap)\n rgb_f = os.path.join(output_dir, '{}_rgb.tif'.format(label))\n write_rgb(rgb, src_profile, rgb_f)\n\n # Reproject to Mercator\n reproj_f = os.path.join(output_dir, '{}_reproj.tif'.format(label))\n reproject_raster(rgb_f, reproj_f, src_epsg)\n\n # Generate tiles\n dst_dir = os.path.join(output_dir, '{}_tiles'.format(label))\n generate_tiles(reproj_f, dst_dir)\n\n write_bounds(os.path.join(output_dir, 'result_reproj.tif'), output_dir)\n\n for f in os.listdir(output_dir):\n if f.endswith('.tif'):\n os.remove(os.path.join(output_dir, f))", "title": "" }, { "docid": "5138487bd7a0bcecf5db96288cefd347", "score": "0.53706706", "text": "def main():\n global doc_file\n if analysis == 1:\n # Using the output from the AWS Comprehend Custom Enitity Recognition analysis\n cer_content = read_cer_file() # This is the cer_file content (cer = Custom Entity Recognition)\n doc_file, masked_doc_file = get_doc_file_names(\n cer_content) # This is the file containing the document lines to scan\n doc_lines = read_doc_file(doc_file)\n k, j = parse_cer_result(cer_content, doc_lines, masked_doc_file)\n print(f\"Processed: {k} lines with entities masked. Excluding {j} incomplete lines.\")\n\n elif analysis == 2:\n # Do a comparison with entity recognition using my own NLP method.\n nlp = read_dictionary()\n keys = create_dict(doc_file) # This is the doc with the original lines.\n j = len(keys)\n print(\"Total Lines:\", j)\n nlp = build_entity_ruler(nlp)\n # star_entities(0, len(keys), keys, nlp, 0) # Run as serial job\n par_star_entities(keys, nlp) # Run it as a parallel job.", "title": "" }, { "docid": "3072fce7f3bf1d5ae6c1b60c869ed73d", "score": "0.5370099", "text": "def main():\n\n with temp_dir('wfp-foodprices') as folder:\n with Download() as downloader:\n config = Configuration.read()\n\n countries_url = config['countries_url']\n wfpfood_url = config['wfpfood_url']\n country_correspondence = config['country_correspondence']\n shortcuts = config['shortcuts']\n\n countriesdata = get_countriesdata(countries_url, downloader, country_correspondence)\n logger.info('Number of datasets to upload: %d' % len(countriesdata))\n\n for countrydata in countriesdata:\n dataset, showcase = generate_dataset_and_showcase(wfpfood_url, downloader, folder, countrydata, shortcuts)\n if dataset:\n dataset.update_from_yaml()\n dataset['notes'] = dataset['notes'] % 'Food Prices data for %s. Food prices data comes from the World Food Programme and covers' % countrydata['name']\n dataset.create_in_hdx()\n showcase.create_in_hdx()\n showcase.add_dataset(dataset)\n resource_view = generate_resource_view(dataset)\n resource_view.create_in_hdx()\n\n logger.info('Individual country datasets finished.')\n\n generate_joint_dataset_and_showcase(wfpfood_url, downloader, folder, countriesdata)\n\n logger.info('Done')", "title": "" }, { "docid": "1e6d88631d740d5a78e926aab36c2819", "score": "0.5367832", "text": "def main(df: pd.DataFrame, coordinates: Tuple[float, float], geocode: RateLimiter,\n num_places: int = 60, output_filename: str = 'films_map.html'):\n small_df = add_coordinates(df, coordinates, num_places, geocode)\n films_map = create_map(coordinates, small_df)\n\n films_map.save(output_filename)", "title": "" }, { "docid": "b4a2e90dc4f85184cca589cebb139ec9", "score": "0.536512", "text": "def main():\n\n hikes_path = load_data('wta-parks-data.json')\n \n hikes = pd.read_csv(hikes_path, sep='\\t', index_col=0)\n # get_hike_pages(list(hikes.index), list(hikes['url']), max_pages=0)\n fast_get_hike_pages(list(hikes.index), list(hikes['url']))\n hikes = merge_pages(hikes_path)\n get_drive_data(list(hikes.index), list(hikes['lat']), list(hikes['lon']))\n clean_drive_data(hikes_path)\n\n return", "title": "" }, { "docid": "a681b86b2b91173db57d9ba2f3dc6921", "score": "0.53641033", "text": "def run(self):\r\n (\r\n self.time_dict,\r\n self.entries_dict,\r\n self.entries_exit_dict,\r\n self.movement_dict,\r\n ) = ({}, {}, {}, {})\r\n for file_path in self.files_found:\r\n _, video_name, _ = get_fn_ext(file_path)\r\n (\r\n self.time_dict[video_name],\r\n self.entries_dict[video_name],\r\n self.entries_exit_dict[video_name],\r\n ) = ({}, {}, {})\r\n print(\"Analysing ROI data for video {}...\".format(video_name))\r\n self.video_recs = self.rectangles_df.loc[\r\n self.rectangles_df[\"Video\"] == video_name\r\n ]\r\n self.video_circs = self.circles_df.loc[\r\n self.circles_df[\"Video\"] == video_name\r\n ]\r\n self.video_polys = self.polygon_df.loc[\r\n self.polygon_df[\"Video\"] == video_name\r\n ]\r\n video_shapes = list(\r\n itertools.chain(\r\n self.video_recs[\"Name\"].unique(),\r\n self.video_circs[\"Name\"].unique(),\r\n self.video_polys[\"Name\"].unique(),\r\n )\r\n )\r\n\r\n if video_shapes == 0:\r\n NoDataFoundWarning(\r\n msg=f\"Skipping video {video_name}: No user-defined ROI data found for this video...\"\r\n )\r\n continue\r\n\r\n else:\r\n video_settings, pix_per_mm, self.fps = self.read_video_info(\r\n video_name=video_name\r\n )\r\n self.data_df = read_df(file_path, self.file_type).reset_index(drop=True)\r\n self.data_df.columns = self.bp_headers\r\n data_df_sliced = self.data_df[self.bp_names]\r\n self.video_length_s = data_df_sliced.shape[0] / self.fps\r\n for animal_name in self.bp_dict:\r\n animal_df = self.data_df[self.bp_dict[animal_name]]\r\n (\r\n self.time_dict[video_name][animal_name],\r\n self.entries_dict[video_name][animal_name],\r\n ) = ({}, {})\r\n self.entries_exit_dict[video_name][animal_name] = {}\r\n for _, row in self.video_recs.iterrows():\r\n top_left_x, top_left_y, shape_name = (\r\n row[\"topLeftX\"],\r\n row[\"topLeftY\"],\r\n row[\"Name\"],\r\n )\r\n self.entries_exit_dict[video_name][animal_name][shape_name] = {}\r\n bottom_right_x, bottom_right_y = (\r\n row[\"Bottom_right_X\"],\r\n row[\"Bottom_right_Y\"],\r\n )\r\n slice_x = animal_df[\r\n animal_df[self.bp_dict[animal_name][0]].between(\r\n top_left_x, bottom_right_x, inclusive=True\r\n )\r\n ]\r\n slice_y = slice_x[\r\n slice_x[self.bp_dict[animal_name][1]].between(\r\n top_left_y, bottom_right_y, inclusive=True\r\n )\r\n ]\r\n slice = (\r\n slice_y[\r\n slice_y[self.bp_dict[animal_name][2]]\r\n >= self.settings[\"threshold\"]\r\n ]\r\n .reset_index()\r\n .rename(columns={\"index\": \"frame_no\"})\r\n )\r\n bouts = [\r\n self.__get_bouts(g)\r\n for _, g in itertools.groupby(\r\n list(slice[\"frame_no\"]),\r\n key=lambda n, c=itertools.count(): n - next(c),\r\n )\r\n ]\r\n self.time_dict[video_name][animal_name][shape_name] = round(\r\n len(slice) / self.fps, 3\r\n )\r\n self.entries_dict[video_name][animal_name][shape_name] = len(\r\n bouts\r\n )\r\n self.entries_exit_dict[video_name][animal_name][shape_name][\r\n \"Entry_times\"\r\n ] = list(map(lambda x: x[0], bouts))\r\n self.entries_exit_dict[video_name][animal_name][shape_name][\r\n \"Exit_times\"\r\n ] = list(map(lambda x: x[1], bouts))\r\n\r\n for _, row in self.video_circs.iterrows():\r\n center_x, center_y, radius, shape_name = (\r\n row[\"centerX\"],\r\n row[\"centerY\"],\r\n row[\"radius\"],\r\n row[\"Name\"],\r\n )\r\n self.entries_exit_dict[video_name][animal_name][shape_name] = {}\r\n animal_df[\"distance\"] = np.sqrt(\r\n (animal_df[self.bp_dict[animal_name][0]] - center_x) ** 2\r\n + (animal_df[self.bp_dict[animal_name][1]] - center_y) ** 2\r\n )\r\n slice = (\r\n animal_df.loc[\r\n (animal_df[\"distance\"] <= radius)\r\n & (\r\n animal_df[self.bp_dict[animal_name][2]]\r\n >= self.settings[\"threshold\"]\r\n )\r\n ]\r\n .reset_index()\r\n .rename(columns={\"index\": \"frame_no\"})\r\n )\r\n bouts = [\r\n self.__get_bouts(g)\r\n for _, g in itertools.groupby(\r\n list(slice[\"frame_no\"]),\r\n key=lambda n, c=itertools.count(): n - next(c),\r\n )\r\n ]\r\n self.time_dict[video_name][animal_name][shape_name] = round(\r\n len(slice) / self.fps, 3\r\n )\r\n self.entries_dict[video_name][animal_name][shape_name] = len(\r\n bouts\r\n )\r\n self.entries_exit_dict[video_name][animal_name][shape_name][\r\n \"Entry_times\"\r\n ] = list(map(lambda x: x[0], bouts))\r\n self.entries_exit_dict[video_name][animal_name][shape_name][\r\n \"Exit_times\"\r\n ] = list(map(lambda x: x[1], bouts))\r\n\r\n for _, row in self.video_polys.iterrows():\r\n polygon_shape, shape_name = (\r\n Polygon(\r\n list(zip(row[\"vertices\"][:, 0], row[\"vertices\"][:, 1]))\r\n ),\r\n row[\"Name\"],\r\n )\r\n self.entries_exit_dict[video_name][animal_name][shape_name] = {}\r\n points_arr = animal_df[\r\n [self.bp_dict[animal_name][0], self.bp_dict[animal_name][1]]\r\n ].to_numpy()\r\n contains_func = np.vectorize(\r\n lambda p: polygon_shape.contains(Point(p)),\r\n signature=\"(n)->()\",\r\n )\r\n inside_frame_no = [\r\n j\r\n for sub in np.argwhere(contains_func(points_arr))\r\n for j in sub\r\n ]\r\n slice = (\r\n animal_df.loc[\r\n (animal_df.index.isin(inside_frame_no))\r\n & (\r\n animal_df[self.bp_dict[animal_name][2]]\r\n >= self.settings[\"threshold\"]\r\n )\r\n ]\r\n .reset_index()\r\n .rename(columns={\"index\": \"frame_no\"})\r\n )\r\n bouts = [\r\n self.__get_bouts(g)\r\n for _, g in itertools.groupby(\r\n list(slice[\"frame_no\"]),\r\n key=lambda n, c=itertools.count(): n - next(c),\r\n )\r\n ]\r\n self.time_dict[video_name][animal_name][shape_name] = round(\r\n len(slice) / self.fps, 3\r\n )\r\n self.entries_dict[video_name][animal_name][shape_name] = len(\r\n bouts\r\n )\r\n self.entries_exit_dict[video_name][animal_name][shape_name][\r\n \"Entry_times\"\r\n ] = list(map(lambda x: x[0], bouts))\r\n self.entries_exit_dict[video_name][animal_name][shape_name][\r\n \"Exit_times\"\r\n ] = list(map(lambda x: x[1], bouts))\r\n\r\n if self.calculate_distances:\r\n self.movement_dict[video_name] = {}\r\n for animal, shape_dicts in self.entries_exit_dict[\r\n video_name\r\n ].items():\r\n self.movement_dict[video_name][animal] = {}\r\n for shape_name, shape_data in shape_dicts.items():\r\n d = pd.DataFrame.from_dict(\r\n shape_data, orient=\"index\"\r\n ).T.values.tolist()\r\n for entry in d:\r\n df = self.data_df[self.bp_dict[animal][0:2]][\r\n self.data_df.index.isin(\r\n list(range(entry[0], entry[1] + 1))\r\n )\r\n ]\r\n df = self.create_shifted_df(df=df)\r\n df[\"Movement\"] = (\r\n np.sqrt(\r\n (df.iloc[:, 0] - df.iloc[:, 2]) ** 2\r\n + (df.iloc[:, 1] - df.iloc[:, 3]) ** 2\r\n )\r\n ) / pix_per_mm\r\n self.movement_dict[video_name][animal][shape_name] = (\r\n df[\"Movement\"].sum() / 10\r\n )\r\n self.__transpose_dicts_to_dfs()", "title": "" }, { "docid": "8dd47da5346772fc32d567c82d2abacd", "score": "0.53593725", "text": "def main():\r\n\tmodel_full = get_reconstructed_model(model_file,True)\r\n\tscene_full = get_raw_depth(depth_file)\r\n\r\n\t#run the ICP\r\n\tR,t = icp(model_full['positions'],scene_full)\r\n\r\n\t#apply the ICP results to the model\r\n\ttransformed_points = [se3.apply((R,t),p) for p in model_full['positions']]\r\n\tmodel_full['positions'] = transformed_points\r\n\r\n\t#visualize the results\r\n\topengl_plot = OpenGLPlot(model_full, scene_full)\r\n\topengl_plot.initialize_main_loop()\r\n\r\n\t# If matplot is available you can use it to visualize the points (but it not\r\n\t# required) as in the commented line\r\n\t# matplot_points(model_full, clpoints_out)\r", "title": "" }, { "docid": "5a597bfd63a1794ce5158f812743028f", "score": "0.5357731", "text": "def execute(self): \n print('')\n print('Downloading from Streetview static API:')\n \n for location in self.locations:\n for heading in self.headings:\n \n lat, lon = location \n NYTlocation = covid.topCounties[((self.numLocations)-1)].replace(\" \",\"\")\n \n self.filename = \"{0}_{1}_{2}_({3},{4},h{5}).jpg\".format(str(self.numLocations).zfill(3), covid._today, NYTlocation, lat, lon, heading) \n self.getStreetView(lat, lon, heading, self.filename, go.todayPath) \n \n print(f' Got {self.filename}') \n self.numImages += 1 \n \n self.numLocations += 1", "title": "" }, { "docid": "d01d6a137f1400af4a9312ce8b94f906", "score": "0.53491926", "text": "def rawdata_viewer(city):", "title": "" }, { "docid": "3bb814a234cc2c7120810ad293dbd3b7", "score": "0.5342798", "text": "def main():\n # Display logged output from the PUDL package:\n\n args = parse_command_line(sys.argv)\n pudl.logging_helpers.configure_root_logger(\n logfile=args.logfile, loglevel=args.loglevel\n )\n\n pudl_settings = pudl.workspace.setup.get_defaults()\n pudl_engine = sa.create_engine(pudl_settings[\"pudl_db\"])\n\n # Load the US Census DP1 county data:\n county_gdf = pudl.etl.defs.load_asset_value(AssetKey(\"county_censusdp1\"))\n\n kwargs_dicts = [\n {\"entity_type\": \"util\", \"limit_by_state\": False},\n {\"entity_type\": \"util\", \"limit_by_state\": True},\n {\"entity_type\": \"ba\", \"limit_by_state\": True},\n {\"entity_type\": \"ba\", \"limit_by_state\": False},\n ]\n\n for kwargs in kwargs_dicts:\n _ = compile_geoms(\n balancing_authority_eia861=pd.read_sql(\n \"balancing_authority_eia861\", pudl_engine\n ),\n balancing_authority_assn_eia861=pd.read_sql(\n \"balancing_authority_assn_eia861\", pudl_engine\n ),\n denorm_utilities_eia=pd.read_sql(AssetKey(\"denorm_utilities_eia\")),\n service_territory_eia861=pd.read_sql(AssetKey(\"service_territory_eia861\")),\n utility_assn_eia861=pd.read_sql(\"utility_assn_eia861\", pudl_engine),\n census_counties=county_gdf,\n dissolve=args.dissolve,\n save_format=\"geoparquet\",\n **kwargs,\n )", "title": "" }, { "docid": "c5ed2a8e47d2413dad125372c5e3310d", "score": "0.53419805", "text": "def processResults(self):\r\n pass", "title": "" }, { "docid": "033b64c21e8a85c023ed8353643571d9", "score": "0.5340096", "text": "def main():\n\n results = dict()\n names_to_gender = load_names_cache()\n\n for config in PYCON_ES_SOURCES:\n\n results[config['year']] = dict()\n\n scraper = NameScraper(config)\n speakers = scraper.get_names()\n results[config['year']]['speakers'] = speakers\n\n for gender in KNOWN_GENDERS:\n results[config['year']][gender] = list()\n\n for speaker in speakers:\n name = speaker.split()[0]\n gender = ''\n if name in names_to_gender:\n gender = names_to_gender[name]\n if not gender:\n gender = get_gender(name)\n names_to_gender[name] = gender\n results[config['year']][gender].append(speaker)\n\n data = results[config[\"year\"]].get('female', [])\n _sp = results[config['year']]['speakers']\n if data:\n print(f'\\n--- {config[\"year\"]} ---\\n{config[\"url\"]}\\n({len(data)} / {len(_sp)}): {data}')\n\n save_names_cache(names_to_gender)\n save_results(results)\n viz_data()\n\n return 0", "title": "" }, { "docid": "6cb0559245d4cd85c321519a91997d3f", "score": "0.53349954", "text": "def main():\n\n # Load data\n city_data = load_data()\n\n # Explore the data\n explore_city_data(city_data)\n\n # Training/Test dataset split\n X_train, y_train, X_test, y_test = split_data(city_data)\n\n # Learning Curve Graphs\n max_depths = [1,2,3,4,5,6,7,8,9,10]\n for max_depth in max_depths:\n learning_curve(max_depth, X_train, y_train, X_test, y_test)\n\n # Model Complexity Graph\n model_complexity(X_train, y_train, X_test, y_test)\n\n # Tune and predict Model\n fit_predict_model(city_data)", "title": "" }, { "docid": "e12aa2fce5982d9822db47e0a8f004cc", "score": "0.53312767", "text": "def main():\r\n\r\n\t# Creates a main title and subheader on your page -\r\n\t# these are static across all pages\r\n\timport nltk\r\n\r\n\timport numpy as np\r\n\timport pandas as pd\r\n\r\n\timport matplotlib.pyplot as plt\r\n\t\r\n\timport seaborn as sns\r\n\r\n\timport re\r\n\tfrom string import punctuation\r\n\tfrom nltk.tokenize import TreebankWordTokenizer\r\n\tfrom nltk.stem import WordNetLemmatizer\r\n\tfrom nltk.corpus import stopwords\r\n\tfrom wordcloud import WordCloud\r\n\tfrom collections import Counter\r\n\tfrom sklearn import metrics\r\n\t\r\n\r\n\tfrom sklearn.model_selection import train_test_split\r\n\r\n\tfrom sklearn.linear_model import LogisticRegression\r\n\tfrom sklearn.svm import SVC, LinearSVC\r\n\tfrom sklearn.tree import DecisionTreeClassifier\r\n\tfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\t#To Look\r\n\tfrom sklearn.naive_bayes import MultinomialNB\r\n\tfrom sklearn.linear_model import SGDClassifier\r\n\tfrom sklearn.ensemble import AdaBoostClassifier\r\n\r\n\t# set plot style\r\n\tsns.set(style = 'whitegrid')\r\n\r\n\t\r\n\r\n\t# Creating sidebar with selection box -\r\n\t# you can create multiple pages this way\r\n\toptions = [\"Prediction\", \"Information\", \"Exploratory Data Analysis and Insights\",\"Model Explanation\", \"Aim of Our App\",\"Team Members\"]\r\n\tselection = st.sidebar.selectbox(\"Choose Option\", options)\r\n\r\n\t# Building out the \"Information\" page\r\n\tif selection == \"Information\":\r\n\t\tst.title(\"Information\")\r\n\t\tst.info(\"General Information\")\r\n\t\t# You can read a markdown file from supporting resources folder\r\n\t\tst.markdown(\"Some information here\")\r\n\r\n\t\tst.subheader(\"Raw Twitter data and label\")\r\n\t\tif st.checkbox('Show raw data'): # data is hidden if box is unchecked\r\n\t\t\tst.write(raw[['sentiment', 'message']]) # will write the df to the page\r\n\r\n\t# Building out the predication page\r\n\tif selection == \"Prediction\":\r\n\t\tst.info(\"Prediction with ML Models\")\r\n\t\t# Creating a text box for user input\r\n\t\ttweet_text = st.text_area(\"Enter Text\",\"Type Here\")\r\n\r\n\tif selection == \"Exploratory Data Analysis and Insights\":\r\n\t\tst.title(\"Luke Rocks Data Analysis and Insights\")\r\n\t\t\r\n\t\tst.write(\"\"\"\r\n\t\t\r\n\t\t\"\"\")\r\n\t\tst.info('This page contains various key data insights that guided our Exploration of our data, and the factors of data preprocessing and visualisations that we utilised. ')\r\n\t\tfrom PIL import Image\r\n\t\timage = Image.open('Images/format_of_data.png')\r\n\t\t\r\n\t\tst.image(image, caption='')\r\n\t\tst.write(\"This is the format our data is in. We have messages, its respective tweet ID as well as the message's sentiment with regards to climate change. There are 4 sentiment expressions, namely;\")\r\n\t\tst.write(\"* 1 Pro: The tweet supports the belief of our man made impact on climate change. \")\r\n\t\tst.write(\"* 2 News: the tweet links to factual news about climate change.\")\r\n\t\tst.write(\"* 0 Neutral: the tweet neither supports nor refutes the belief of man-made climate change.\")\r\n\t\tst.write(\"* 1 Anti: the tweet does not believe in man-made climate change.\")\r\n\t\tfrom PIL import Image\r\n\t\timage = Image.open('Images/counts_of_class.png')\r\n\t\tst.image(image, caption='')\r\n\r\n\t\tst.write(\"From the figure above, we observed that we have unbalanced classes. * The majority of tweets (53.9%) support the belief of man-made climate change. * 23% consist of factual news regarding climate change. * 14.9% are neutral about man-made climate change* 8.2% don't believe in man-made climate change\")\r\n\r\n\t\tst.write(\"\")\r\n\t\tst.write(\"\")\r\n\r\n\t\tst.write(\" Next, lets investigate into the number of unique words used in each class.\")\r\n\r\n\t\tfrom PIL import Image\r\n\t\timage = Image.open('Images/box_plot.png')\r\n\t\tst.image(image, caption='Number of words for corresponding sentiment class')\r\n\r\n\t\tst.write(\"Tweets representing news contain less words. People who believe in man-made climate change appear to used on average the same ammount of words\")\r\n\r\n\t\tst.write(\"Now let's us study the distribution of the length of the words.\")\r\n\t\tst.write(\"* First we obtained a list containing all the words. * Afterwards we obtained the lenth of each word and counted the number of times the word appears in our list. * Lastly we grouped frequencies by lenght and summed them up.\")\r\n\r\n\t\tfrom PIL import Image\r\n\t\timage = Image.open('Images/word_length.png')\r\n\t\tst.image(image, caption='')\r\n\r\n\t\tst.write(\"The lengths of the words ranged from 1-70, to obtain a better visualisation we limited the domain to words of lengths 1-20. The length of the words appears to be positively skewed. We can expect the data to contain outliers to the right of the distribution. Most words lengths (78.4%) lies between 3-8,with the peak being 7.\")\r\n\t\t\r\n\tif selection == \"Aim of Our App\":\r\n\t\tst.title('Title for the page')\r\n\t\tst.write(\"Here's our first attempt at using data to create a table:\")\r\n\t\tst.write(pd.DataFrame({\r\n \t'first column': [1, 2, 3, 4],\r\n \t'second column': [10, 20, 30, 40]\r\n\t\t}))\r\n\r\n\t\t\"\"\"\r\n\t\t# My first app\r\n\t\tHere's our first attempt at using data to create a table:\r\n\t\t\"\"\"\r\n\r\n\t\tdf = pd.DataFrame({\r\n \t\t'first column': [1, 2, 3, 4],\r\n \t\t'second column': [10, 20, 30, 40]\r\n\t\t})\r\n\r\n\t\tdf\r\n\r\n\r\n\t\tif st.button(\"Classify\"):\r\n\t\t\t# Transforming user input with vectorizer\r\n\t\t\tvect_text = tweet_cv.transform([tweet_text]).toarray()\r\n\t\t\t# Load your .pkl file with the model of your choice + make predictions\r\n\t\t\t# Try loading in multiple models to give the user a choice\r\n\t\t\tpredictor = joblib.load(open(os.path.join(\"resources/Logistic_regression.pkl\"),\"rb\"))\r\n\t\t\tprediction = predictor.predict(vect_text)\r\n\r\n\t\t\t# When model has successfully run, will print prediction\r\n\t\t\t# You can use a dictionary or similar structure to make this output\r\n\t\t\t# more human interpretable.\r\n\t\t\tst.success(\"Text Categorized as: {}\".format(prediction))", "title": "" }, { "docid": "97ab149f65481a149288bc9811b97384", "score": "0.53263676", "text": "def main():\n files = glob(path.join(project_dir, 'data/interim/*.csv'))\n\n for file in files:\n df = build_features_for_a_file(file, SEP)\n file_name = path.splitext(path.basename(file))[0]+'_with_features.csv'\n # save data to the output file\n df.to_csv(path.join(project_dir, 'data/processed/', file_name), sep=SEP, index=False)", "title": "" }, { "docid": "20723d685ace504c98ac9c62d97076f7", "score": "0.5323", "text": "def main():\n sales_data = load_data()\n monthly_df = monthly_sales(sales_data)\n stationary_df = get_diff(monthly_df)\n\n generate_supervised(stationary_df)\n generate_arima_data(stationary_df)", "title": "" }, { "docid": "fd90d52ddc2e0fd9a1e3a989c5e3d7a4", "score": "0.5318487", "text": "def excalibur(data, filename, attribute, maxl, minl, types):\r\n # Getting radius\r\n radius = 150000 / max(data[f'{types}'])\r\n\r\n # Setting the file path and creating the folder\r\n print(filename)\r\n filename = filename.split('/')\r\n print(filename)\r\n filetemp = filename.pop()\r\n filepath = '/'.join(filename)\r\n filename = filetemp\r\n datime = str(datetime.datetime.now())\r\n datime = datime.replace(':', '_')\r\n filepath = filepath + f'\\\\ Analysis {filename}' + datime[:20]\r\n os.makedirs(filepath)\r\n\r\n gmap = gmplot.GoogleMapPlotter(21.170240, 72.831062, 8)\r\n gmap.apikey = apikey\r\n data1 = data[data[f'{types}'] > maxl]\r\n print('\\n\\n Excellent performing Data\\n')\r\n print(data1)\r\n markers(data1, types, gmap)\r\n circle(data1, 'green', radius, gmap, types)\r\n gmap.heatmap(data1['latitude'], data1['longitude'], radius=40)\r\n gmap.draw(filepath + f'/Excellent_perfor {types} {filename}.html')\r\n\r\n gmap = gmplot.GoogleMapPlotter(21.170240, 72.831062, 8)\r\n gmap.apikey = apikey\r\n data2 = data[data[f'{types}'] < minl]\r\n print('\\n\\n Badly performing data\\n')\r\n print(data2)\r\n markers(data2, types, gmap)\r\n gmap.heatmap(data2['latitude'], data2['longitude'], radius=40)\r\n circle(data2, 'red', radius, gmap, types)\r\n gmap.draw(filepath + f'/Badly_perfor {types} {filename}.html')\r\n\r\n gmap = gmplot.GoogleMapPlotter(21.170240, 72.831062, 8)\r\n gmap.apikey = apikey\r\n data3 = data[(data[f'{types}'] > minl) & (data[f'{types}'] < maxl)]\r\n print('\\n\\n Medium performing data\\n')\r\n print(data3)\r\n markers(data3, types, gmap)\r\n gmap.heatmap(data3['latitude'], data3['longitude'], radius=40)\r\n circle(data3, 'yellow', radius, gmap, types)\r\n gmap.draw(filepath + f'/Medium_perfor {types} {filename}.html')\r\n\r\n return 1", "title": "" }, { "docid": "c0fbedcbd7e4da676ff3a3aec5e9a14b", "score": "0.531767", "text": "def main(dataset):\n do_plot = True \n #do_plot = False \n\n # Set the basic things about this run\n # TODO: accept these from the command line\n e_set = EvalSet()\n if dataset == \"WGDB\":\n e_set.dataset = \"WGDB\"\n e_set.dataset_full = \"Willow Garage Grasping Dataset\"\n else:\n e_set.dataset = \"PSB\"\n e_set.dataset_full = \"Princeton Shape Benchmark\"\n e_set.features = ['PFH','FPFH','SHOT','SPIN_IMAGE']\n\n # Load the common model names\n model_name_location = \"../results/%s/model_names.txt\"%e_set.dataset.lower()\n with open(model_name_location) as f:\n lines = f.readlines()\n e_set.model_names = [line.strip() for line in lines]\n e_set.num_models = len(e_set.model_names)\n\n # Set the paths to write out to, creating directories if needed\n e_set.log_location_template = \"../results/%s\"%e_set.dataset.lower()+\"/%s.out\"\n e_set.plot_location = \"../writeups/figures/%s/\"%e_set.dataset\n if not os.path.exists(e_set.plot_location):\n os.makedirs(e_set.plot_location)\n e_set.table_location = \"../writeups/results/\"\n if not os.path.exists(e_set.table_location):\n os.makedirs(e_set.table_location)\n # NOTE: We need the paths from these files to be the same as from the actual\n # writeup, so we can't add an additional folder. Hence the name of the\n # dataset as part of the filename.\n e_set.table_tex_filename_template = e_set.table_location+e_set.dataset+'_%s_table.tex'\n e_set.features_tex_filename_template = e_set.table_location+e_set.dataset+'_%s_features.tex'\n\n # Parse the output log data\n e_set.evals = {}\n for feature in e_set.features:\n e_set.evals[feature] = Evaluation(e_set, feature)\n e_set.print_info(feature)\n if do_plot:\n e_set.plot_confusion_matrix(feature)\n e_set.plot_pr([feature])\n e_set.plot_rank_histogram([feature])\n\n # Print feature comparison table and output PR plot\n if do_plot:\n e_set.plot_rank_histogram()\n e_set.plot_pr()\n e_set.plot_timing()\n e_set.plot_timing_panel()\n\n # Generate feature comparison table and latex figure\n e_set.print_comparison_table(pdf=False)\n e_set.generate_subfig(pdf=False)", "title": "" }, { "docid": "e6093e0490aeaec9ae5c724a8ed10fd8", "score": "0.53122616", "text": "def data_visualizer():", "title": "" }, { "docid": "fa1935bbaa8547b5dbbab446c4b884dc", "score": "0.53066397", "text": "def scrape(self):\n print(DEBUG, f\"Starting location data scrape: TOP_LINK={self.TOP_LINK}\")\n try:\n page = requests.get(self.TOP_LINK)\n except requests.exceptions.RequestException as e:\n print(ALERT, e)\n return None\n print(SUCCESS, f\"Retrieved location data from {self.TOP_LINK}\")\n\n # I don't know the ways the rest of this can fail, so no useful log messages here\n\n # Extract contents of .kmz file\n archive = ZipFile(BytesIO(page.content), 'r')\n\n try:\n kml = archive.open('doc.kml', 'r')\n except KeyError as e:\n print(ERR, f\"Item not found: {e}\")\n else:\n print(SUCCESS, \"Found doc.kml\")\n\n # .kmz files holds a .kml file that just contains styled XML\n try:\n parser = xml.sax.make_parser()\n except Exception as e:\n print(ERR, f\"Failed to create parser: {e}\")\n else:\n print(SUCCESS, \"Created parser\")\n try:\n handler = PlacemarkHandler()\n except Exception as e:\n print(ERR, f\"Failed to create handler: {e}\")\n else:\n print(SUCCESS, \"Created handler\")\n\n parser.setContentHandler(handler)\n\n try:\n parser.parse(kml)\n except Exception as e:\n print(ERR, f\"Failed to parse .kml file: {e}\")\n archive.close()\n\n output = self.build_table(handler.mapping)\n\n locations_request = json.dumps({\n 'locations': [self.transform_location_to_db(location)\n for location in output.split('\\n')[1:-1]]\n })\n requests.post(url=self.LOCATIONS_API,\n json=locations_request)\n\n return output", "title": "" }, { "docid": "ce2005bc6214ba16eefe6084cbc7cf52", "score": "0.530445", "text": "def preprocess():\n print(\"traversing the summaries to fill up deleted_indices\")\n traverse_summaries()\n print(\"extracting token, field type and position info from original data ... \\nAnd updating deleted_indices\")\n time_start = time.time()\n split_infobox()\n reverse_pos()\n duration = time.time() - time_start\n print(\"extract finished in %.3f seconds\" % float(duration))\n\n print(\"spliting test and valid summaries for ROUGE evaluation ...\")\n time_start = time.time()\n split_summary_for_rouge()\n duration = time.time() - time_start\n print(\"split finished in %.3f seconds\" % float(duration))\n\n print(\"turning words and field types to ids ...\")\n time_start = time.time()\n table2id()\n duration = time.time() - time_start\n print(\"idlization finished in %.3f seconds\" % float(duration))", "title": "" }, { "docid": "b5408bc99c4a60e3e31ed3baf7dd9387", "score": "0.5294356", "text": "def run(self):\n # Get the relevant alert ids\n alert_ids, labels = self._get_alert_ids()\n logging.info(\"Number of alert_ids \" + str(len(alert_ids)))\n\n # Get all the alerts\n alerts = self._get_all_alerts(alert_ids)\n logging.info(\"Number of alerts \" + str(len(alerts)))\n\n ##### Extract the features #######\n # Create an object that aggregates multiple extractors\n featureExtractors = AggregateFeatureExtraction()\n \n print \"self._includeRaw\", self._includeRaw\n \n # Add raw feature extractor to the aggregate feature extractor\n if self._includeRaw:\n rawFeatureExtractor = RawFeatureExtraction()\n featureExtractors.add_extractor(rawFeatureExtractor)\n\n # Add the entity extractor to the aggregate feature extractor\n if self._includeExtractedEntities:\n entityExtractor = ExtractedEntities(self._scot, \n self._config) \n featureExtractors.add_extractor(entityExtractor)\n\n # Add cached alert correlation\n # cached alert correlation requires a dictionary mapping alert ids to\n # labels. We create that in the next line.\n if self._includeCachedCorrelation:\n alert_label_dictionary = self._create_alert_label_dictionary(\n alert_ids,labels)\n \n cachedAlertCorrelationExtractor = CachedAlertCorrelation(self._scot,\n alert_label_dictionary) \n featureExtractors.add_extractor(cachedAlertCorrelationExtractor)\n \n # Add topic model feature extractor\n if self._includeLDA:\n # Needs a corpus object\n corpus = DatabaseCorpus(self._config,\n fields=['subject'],\n params={\"id\":alert_ids},\n collection=ALERT_COLLECTION,\n scot=self._scot)\n lda = TopicModelFeatureExtraction(corpus = corpus,\n config = self._config)\n featureExtractors.add_extractor(lda)\n\n \n # Builds the models for all feature extractors. \n t1 = time.time()\n featureExtractors.build_model()\n logging.info(\"METRICS: Time to build models: \" + str(time.time() - t1))\n \n # Apply feature extractors to all of the alerts. Returns a dictionary\n # mapping alert id to a dictionary of features.\n t1 = time.time()\n features = self._extract_all_features(featureExtractors, alerts)\n logging.info(\"METRICS: Time to extract features: \" + \n str(time.time() - t1))\n self._extractor = featureExtractors;\n\n #From the feature dictionary create a matrix\n t1 = time.time()\n X = self._create_feature_matrix(alert_ids, features)\n logging.info(\"METRICS: Time to create feature matrix: \" + \n str(time.time() - t1))\n if len(X) >= 10:\n logging.info(\"First entries of X \" + str(X[0:10]))\n logging.info(\"First entries of labels \" + str(labels[0:10]))\n\n t1 = time.time()\n rfc = RandomForestClassifier(n_estimators=100, n_jobs=1)\n rfc = rfc.fit(X, labels)\n logging.info(\"METRICS: Time to train classifier: \" + \n str(time.time() - t1))\n self._classifier = rfc\n\n logging.info(\"Score of model \" + str(rfc.score(X, labels)))\n\n # Write the classifier (and the other necessary objects) to disk\n with open(self._pickleFile, 'wb') as outfile:\n self._extractor.pickle(outfile)\n pickle.dump(self._vectorizer, outfile)\n pickle.dump(self._classifier, outfile)", "title": "" }, { "docid": "a242e02d04ab3e2706c9972fccd9bb3e", "score": "0.5290092", "text": "def run_all(self):\n #############\n # grid search\n #############\n settings = itertools.product(\n self.list_of_s_0,\n self.list_of_delta,\n self.criteria,\n )\n for s_0, delta, crit in settings:\n # Run the decoding\n hyps, actions, up_time = self.run(int(s_0), int(delta), crit)\n\n # Dumps two files one with segmentations preserved, another\n # with post-processing filters applied\n self.dump_results(hyps, suffix=f's{s_0}_d{delta}_{crit}')\n\n # Dump actions\n self.dump_lines(actions, suffix=f's{s_0}_d{delta}_{crit}.acts')", "title": "" }, { "docid": "d5105a4e06adc5b1a2ae31dafb2358e2", "score": "0.52896893", "text": "def main(): \n # Initialize our node\n rospy.init_node('blobFinder')\n\n # Initialize all the global variables we will need\n init_globals()\n\n # Call the function that begins our process\n handle_data()", "title": "" }, { "docid": "659362542ed0de7b55c975fd5f3f6a68", "score": "0.5286324", "text": "def main():\n t0 = time.time()\n polish_data()\n funcs = ['get_pixelated_mug', 'update_summary', 'get_charges_from_recent_text',\n 'retry_getting_mugshot', 'remove_weird_character', 'parse_charge_1',\n 'fix_charges_to_by_lines', 'get_all_intake_deets']\n wrap_it_up(t0, function=funcs)", "title": "" }, { "docid": "94c73ab19b7c7d1537524cf89a0b6f64", "score": "0.52798975", "text": "def processAlgorithm(self, parameters, context, feedback):\n sourceL = self.parameterAsSource(\n parameters,\n self.INPUT_L,\n context)\n \n sourceF = self.parameterAsMatrix(\n parameters,\n self.INPUT_F,\n context)\n \n filtro = self.parameterAsString(\n parameters,\n self.GROUP_BY,\n context)\n \n titolo = self.parameterAsString(\n parameters,\n self.INPUT_T,\n context)\n \n html = self.parameterAsFileOutput(\n parameters,\n self.OUTPUT_H,\n context)\n \n source_path = self.parameterAsString(\n parameters,\n self.INPUT_L,\n context)\n \n icona = self.parameterAsString(\n parameters,\n self.INPUT_I,\n context)\n \n fogliocss = self.parameterAsString(\n parameters,\n self.INPUT_S,\n context)\n \n rel_path = self.parameterAsBool(\n parameters,\n self.INPUT_ABS,\n context)\n \n pie_p = self.parameterAsBool(\n parameters,\n self.INPUT_P,\n context)\n \n def html_composer(sourceL, sourceF, filtro, titolo, html, source_path, icona, fogliocss, rel_path, partizione, values, valori):\n ''' COMPOSIZIONE PAGINA HTML ''' \n #FASE #01 - cerco la path del progetto\n if QgsProject.instance().homePath():\n path_proj = QgsProject.instance().homePath()\n #windowizzo la path quale che sia\n path_proj = str(Path(path_proj))\n #rimuovo geopakage: se presente\n path_proj = path_proj.replace('geopackage:','')\n else:\n feedback.reportError('WARNING NO PROJECT PATH: the html file may not work correctly\\n')\n path_proj = ''\n #tolgo %20 e metto spazio \n path_proj = path_proj.replace('%20',' ')\n \n #FASE #02 - cerco la path del file di input\n path_file = (self.parameterDefinition('INPUT_L').valueAsPythonString(parameters['INPUT_L'], context))\n path_file = path_file[1:path_file.rfind('/')+1]\n if 'memory' in path_file:\n file_mem = True\n path_file = ''\n else:\n file_mem = False\n #windowizzo la path quale che sia\n path_file = str(Path(path_file))\n\n #tolgo %20 e metto spazio \n path_file = path_file.replace('%20',' ')\n \n #FASE #03 - scelgo la path da usare tra le due: prioritaria quella di progetto\n if path_proj:\n path_dir = path_proj\n if path_proj not in path_file and path_file != '':\n feedback.reportError('WARNING PATH FILE ' + path_file)\n feedback.reportError('OUTSIDE PROJECT PATH ' + path_proj)\n feedback.reportError('MOST LIKELY IT WON''T WORK' + '\\n')\n elif path_file == '':\n feedback.reportError('WARNING TEMPORARY LAYER WITHOUT PATH\\n')\n else:\n path_dir = path_file\n if path_dir:\n feedback.reportError('WARNING use the path of the input file ' + path_dir + '\\n')\n else:\n feedback.reportError('WARNING TEMPORARY LAYER WITHOUT PATH\\n')\n \n #FASE #04 - controllo se si sta salvando file con percorsi relativi nella cartella di progetto\n if path_dir not in str(Path(html)) and 'processing' not in str(Path(html)):\n feedback.reportError('WARNING HTML WITH RELATIVE PATH SAVED OUTSIDE THE PROJECT PATH DOES NOT WORK PROPERLY\\n')\n if 'processing' in str(Path(html)):\n feedback.reportError('WARNING TEMPORARY HTML WORK PROPERLY ONLY WITH ABSOLUTE PATH\\n')\n \n #FASE #05 - controllo se icona e css sono entro la cartella progetto\n if fogliocss and (path_dir not in fogliocss):\n feedback.reportError('WARNING css PATH OUTSIDE PROJECT PATH: the html file may not work correctly\\n')\n if icona and path_dir not in icona:\n feedback.reportError('WARNING icon PATH OUTSIDE PROJECT PATH: the html file may not work correctly\\n')\n \n #FASE #06 - aggiungo terminatore di percorso se non è un file temporaneo\n if path_dir != '':\n path_dir = path_dir + '\\\\'\n \n #FASE #07 - modifica se csv in input\n if source_path.find(\".csv\"):\n source_path = 'file:///' + source_path[0:source_path.rfind('/')+1]\n \n #FASE #08 pulisco titolo e riordino a causa di un bug \n titolo = titolo.replace('\\\"','')\n \n intestazione = titolo.replace('\"','')\n intestazione = titolo.replace('\\'','')\n \n #riordino campi come da selezione per bug \n cleanlist = []\n [cleanlist.append(x) for x in sourceF if x not in cleanlist]\n sourceF = cleanlist\n \n #FASE #09 - inizializzo variabile per barra % esecuzione script\n # Compute the number of steps to display within the progress bar and\n # get features from source\n total = 100.0 / sourceL.featureCount() if sourceL.featureCount() else 0\n \n #FASE #10 - filtra dati se richiesto\n if len (filtro) > 0:\n request = QgsFeatureRequest(QgsExpression(filtro))\n features = sourceL.getFeatures(request)\n else:\n features = sourceL.getFeatures()\n \n #FASE #11 - produco il file in uscita\n with open(html, 'w') as output_file:\n # write header\n line = '<html>\\r'\n output_file.write(line)\n \n #FASE #11.01 - se richiesto inserisco foglio css\n if fogliocss:\n if not rel_path or 'processing' in html:\n fogliocss = 'file:///' + fogliocss\n else:\n fogliocss = str(Path(fogliocss))\n fogliocss = fogliocss.replace(path_dir,'')\n line = '<head>\\r<link rel=\"stylesheet\" href=\"'+ fogliocss + '\">\\r</head>'\n output_file.write(line)\n \n #FASE #11.02 - se richiesto inserisco icona e titolo\n if icona or titolo:\n line = '<div>'\n output_file.write(line)\n if icona:\n if not rel_path or 'processing' in html:\n icona = 'file:///' + icona\n else:\n icona = str(Path(icona))\n icona = icona.replace(path_dir,'')\n line = '<img class=\"zoom\" src=\"' + icona + '\">' #'\" style=\"width:' + wi + ';height:' + hi + ';\">'\n output_file.write(line)\n line = ''\n if titolo:\n if icona:\n line = line + '<b>' + '&nbsp&nbsp' + titolo + '</b>'\n else:\n line = line + '<b>' + titolo + '</b>'\n output_file.write(line)\n line = '</div>'\n output_file.write(line)\n line = None\n \n #FASE #11.03 - compongo tabella\n line = '<table class=\"Table\">'\n output_file.write(line)\n \n #FASE #11.04 - inserisco testata pagina se ho più di una pagina\n if values and pie_p:\n if rel_path and 'processing' not in html:\n html = str(Path(html))\n html = html.replace(path_dir,'')\n feedback.pushInfo('Done: ' + html + '\\n') \n line = '<caption>' + str(valori) + '</caption><thead id=\"h\"><tr><td colspan=\"' + str(sourceL.featureCount()) + '\"><div class=\"links\">'\n output_file.write(line)\n for i in range (0, len(values)):\n html = re.sub('_[0-9]{2,3}(.html)','_0' + str(i) + '.html', html)\n if i == 0:\n line = '<a href=\"' + html + '\">&laquo;</a>'\n output_file.write(line)\n try:\n valore_ins = values[i].toString('dd.MM.yyyy')\n except:\n valore_ins = str(values[i])\n line = '<a href=\"' + html + '\">' + partizione + ': ' + valore_ins + '</a>'\n output_file.write(line)\n line = '<a href=\"' + html + '\">&raquo;</a></div></td></tr></thead>'\n output_file.write(line)\n \n line = '<thead>\\r<tr>\\r'\n output_file.write(line)\n \n #titoli colonne\n line = ''.join(('<th style=\"width:auto\">'+ str(name)+ '</<th>\\r') for name in sourceF) + '</tr>\\r'\n output_file.write(line)\n \n line = '</thead>\\r<tbody>\\r'\n output_file.write(line)\n \n #righe tabella\n for current, f in enumerate(features):\n line = '<tr>\\r'\n output_file.write(line)\n \n for name in sourceF:\n #controllo se si tratta di una immagine\n try:\n img_type = f[name].split(\".\")\n img_type = img_type[len(img_type)-1]\n except:\n img_type = ''\n \n #se è un'immagine e/o ha un percorso\n if img_type in [\"JPEG\",\"jpeg\",\"JPG\",\"jpg\",\"PNG\",\"png\"]:\n #se non è un file temporaneo o non voglio riferimenti relativi\n if not rel_path or 'processing' in html:\n if file_mem:\n img_name = ''\n else:\n img_name = 'file:///'\n if path_dir not in str(Path(f[name])):\n img_name = img_name + path_dir\n img_name = img_name + f[name]\n else:\n #se voglio riferimenti relativi\n img_name = str(Path(f[name]))\n img_name = img_name.replace(path_dir,'')\n line = ''.join('<td><center><img class=\"zoom\" src ='+ \"'\" + img_name + \"'\" + 'alt='+ \"'\" + img_name + \"'\" + '\"></center></td>\\r') #+ 'width=\"' + wf + '\" height=\"' + hf +\n else:\n try:\n line = ''.join('<td>'+f[name].toString(\"dd.MM.yyyy\")+ '</td>\\r')\n except:\n line = ''.join('<td>'+ str(f[name]) + '</td>\\r')\n output_file.write(line)\n \n line = '</tr>\\r'\n output_file.write(line)\n\n # Update the progress bar\n feedback.setProgress(int(current * total))\n \n #FASE #11.05 - inserisco piè di pagina se ho più di una pagina\n if values and pie_p:\n if rel_path and 'processing' not in html:\n html = str(Path(html))\n html = html.replace(path_dir,'')\n feedback.pushInfo('Done: ' + html + '\\n') \n line = '<tfoot><tr><td colspan=\"' + str(sourceL.featureCount()) + '\"><div class=\"links\">'\n output_file.write(line)\n for i in range (0, len(values)):\n html = re.sub('_[0-9]{2,3}(.html)','_0' + str(i) + '.html', html)\n if i == 0:\n line = '<a href=\"' + html + '\">&laquo;</a>'\n output_file.write(line)\n try:\n valore_ins = values[i].toString('dd.MM.yyyy')\n except:\n valore_ins = str(values[i])\n line = '<a href=\"' + html + '\">' + partizione + ': ' + valore_ins + '</a>'\n output_file.write(line)\n line = '<a href=\"' + html + '\">&raquo;</a></div></td></tr></tfoot>'\n output_file.write(line)\n \n line = '</tbody>\\r</table>\\r</html>'\n output_file.write(line)\n \n output_file.close()\n return {self.OUTPUT_H: html}\n \n # INIZIO ELABORAZIONE\n if filtro and \"'\" in filtro or not filtro:\n partizione = 0\n values = ''\n valori = ''\n risultato = html_composer(sourceL, sourceF, filtro, titolo, html, source_path, icona, fogliocss, rel_path, partizione, values, valori)\n elif filtro:\n partizione = filtro[1:len(filtro)-1]\n idx = sourceL.fields().indexOf(partizione)\n values = sourceL.uniqueValues(idx)\n pagine = len(values)\n values = sorted(values)\n for current, valori in enumerate(values):\n try:\n valori = valori.toString(\"yyyy-MM-dd\")\n except:\n pass\n if valori:\n N_Filter =\"\\\"\" + partizione + \"\\\"\" + ' = ' + \"'\" + str(valori) + \"'\"\n else:\n N_Filter =\"\\\"\" + partizione + \"\\\"\" + \" is None\"\n filtro = N_Filter\n finale = html.replace(\".html\", \"_0\" + str(current)+\".html\")\n risultato = html_composer(sourceL, sourceF, filtro, titolo, finale, source_path, icona, fogliocss, rel_path, partizione, values, valori)\n filtro = \"\"\n \n return{self.OUTPUT_H: risultato['OUTPUT_H']}", "title": "" }, { "docid": "f4d6527e72297b6c4dc5a5b3c7c9d271", "score": "0.52795756", "text": "def run(self):\n self.generate_population()\n self.calculate_fitness()\n\n for generation in range(self.generations):\n self.process_generation(generation)\n\n self.post()", "title": "" }, { "docid": "6b57bb5066f5c785acd8733be310a6e1", "score": "0.5275769", "text": "def render(self):\n\n # Create a group to hold the Huff Model Main Outputs.\n root = QgsProject.instance().layerTreeRoot()\n HuffModelGroup = root.addGroup(\"Huff Model\")\n\n # Get Needed Info\n self.inPointVector = self.getinPointVector()\n filepath = self.inPointVector.dataProvider().dataSourceUri()\n filepath_list = filepath.split(\"\\\\\")\n layer_name = filepath_list[-1]\n filepath = filepath.replace(layer_name, \"\")\n lyrCentre = self.inPointVector\n fldCentreID_index = self.get_index(self.inPointVector, self.dlg.le_inPointVectorField.text())\n\n # Has The User Selected An Advanced Methodology?\n yes_button = self.dlg.rb_AdvancedChoice_M_Yes.isChecked()\n\n # What Is Rendered Depends On The Users Choice\n if yes_button == True:\n # Create a group to hold the Huff Model Main Outputs.\n HuffModel_Mapping_Group = root.addGroup(\"Trade Areas\")\n\n # Get Fields For Names\n tas = QgsVectorLayer(filepath + \"Huff__Model__TAD.shp\", \"\", \"ogr\")\n\n # Check Inputs For First Vector\n fields = tas.fields()\n field_names = []\n for field in fields:\n field_names.append(str(field.name()))\n\n # A Loop That Creates A Copy Of The TradeArea Shapefile For Each Centre, Name By IDs\n name_idx = 1\n for centreFeature in lyrCentre.getFeatures():\n\n # Get The ID of the Field\n currentCentreID = centreFeature[fldCentreID_index]\n current_ID = currentCentreID\n\n # Load Huff Trade Areas Layer To Interface.\n HuffModel_TradeAreas = QgsVectorLayer(filepath + \"Huff__Model__TAD.shp\", str(field_names[name_idx]), \"ogr\")\n QgsProject.instance().addMapLayer(HuffModel_TradeAreas, False)\n HuffModel_Mapping_Group.insertChildNode(1, QgsLayerTreeLayer(HuffModel_TradeAreas))\n\n # Create A Thematic Map\n name = current_ID\n layer = QgsProject.instance().mapLayersByName(name)[0]\n\n # get unique values for 'severity' field\n fni = self.get_index(layer, current_ID)\n unique_vals = layer.dataProvider().uniqueValues(fni)\n\n # define categories to use in symbology\n categories = []\n\n for val in unique_vals:\n int_val = int(val)\n\n # Define The Primary Trade Area Colours\n if int_val == 1:\n\n # initialise the default symbol for this geometry type\n symbol = QgsSymbol.defaultSymbol(layer.geometryType())\n\n # configure a symbol layer\n layer_style = {}\n layer_style['color'] = '%d, %d, %d' % (246, 4, 8)\n\n layer_style['outline'] = '#ffffff'\n symbol_layer = QgsSimpleFillSymbolLayer.create(layer_style)\n\n # replace default symbol layer with the configured one\n if symbol_layer is not None:\n symbol.changeSymbolLayer(0, symbol_layer)\n else:\n print\n \"success\"\n\n # create renderer object\n category = QgsRendererCategory(val, symbol, str(val))\n # entry for the list of category items\n categories.append(category)\n\n # Define The Secondary Trade Area Colours\n elif int_val == 2:\n\n # initialise the default symbol for this geometry type\n symbol = QgsSymbol.defaultSymbol(layer.geometryType())\n\n # configure a symbol layer\n layer_style = {}\n layer_style['color'] = '%d, %d, %d' % (255, 207, 102)\n\n layer_style['outline'] = '#ffffff'\n symbol_layer = QgsSimpleFillSymbolLayer.create(layer_style)\n\n # replace default symbol layer with the configured one\n if symbol_layer is not None:\n symbol.changeSymbolLayer(0, symbol_layer)\n else:\n pass\n\n # create renderer object\n category = QgsRendererCategory(val, symbol, str(val))\n # entry for the list of category items\n categories.append(category)\n\n # Everything else gets skipped, and isn't rendered\n else:\n pass\n\n # create renderer object\n renderer = QgsCategorizedSymbolRenderer(current_ID, categories)\n\n # assign the created renderer to the layer\n if renderer is not None:\n layer.setRenderer(renderer)\n\n layer.triggerRepaint()\n name_idx += 1\n\n # Load Huff Probability Layer To Interface.\n HuffModel_Probabilities = QgsVectorLayer(filepath + \"Huff__Model.shp\", \"Huff Probabilities\", \"ogr\")\n QgsProject.instance().addMapLayer(HuffModel_Probabilities, False)\n HuffModelGroup.insertChildNode(1, QgsLayerTreeLayer(HuffModel_Probabilities))\n\n else:\n # Load Huff Probability Layer To Interface.\n HuffModel_Probabilities = QgsVectorLayer(filepath + \"Huff__Model.shp\", \"Huff Probabilities\", \"ogr\")\n QgsProject.instance().addMapLayer(HuffModel_Probabilities, False)\n HuffModelGroup.insertChildNode(1, QgsLayerTreeLayer(HuffModel_Probabilities))", "title": "" }, { "docid": "ca81e0f6a41ceddc24a74fcdefd3924b", "score": "0.5274013", "text": "def _runPath(self):\n self._bookPdfData()\n self._bookMinimizer()\n self._preFitSteps()\n self._runFitSteps()\n self._postFitSteps()", "title": "" }, { "docid": "517f79c9b8bd9b0a5621bf01204d5dc2", "score": "0.5265684", "text": "def main(event, context):\n\n neighborhood_list = build_neighborhood_list()\n neighborhoods = build_url_dic(neighborhood_list)\n get_rental_data(neighborhoods)", "title": "" }, { "docid": "490168611b18b03bea8ef30e4cbaade3", "score": "0.52567065", "text": "def execute(self):\n super(DownloadCrawlRunData, self).execute()\n\n if self._list:\n self.do_list_extractors()\n if self._dump:\n self.do_dump()", "title": "" }, { "docid": "1478355c1fc283e01c15ea279ac9731e", "score": "0.525269", "text": "def main(in_txt_fld, in_img_fld, out_fld, log_level=\"info\", verbose=False):\n global conf, img_folder, txt_folder, out_folder, logger, verbose_log\n\n logger = uc5def.get_logger(__file__, log_level)\n verbose_log = verbose\n \n img_folder = in_img_fld # conf.in_folder(uc5keys.images)\n txt_folder = in_txt_fld # conf.in_folder(uc5keys.texts)\n out_folder = out_fld # conf.in_folder(uc5keys.csvs)\n \n logger.info(\"-\")\n logger.info(\"process_raw_dataset [v%.1f], %s\" % (version, description))\n logger.info(\"image folder %s\" % img_folder)\n logger.info(\"text folder %s\" % txt_folder)\n logger.info(\"output folder %s\" % out_folder)\n \n in_folders = [img_folder, txt_folder]\n out_folders = [] # [out_folder]\n\n ok = fu.check_folders_procedure(in_folders=in_folders, out_folders=out_folders,\n exist_ok=True, log_f=logger.warning)\n if not ok:\n logger.error(\"Exiting, error code 1\")\n logger.error(\"-\")\n exit(1)\n\n parsed = parse_reports(txt_folder)\n save_results(parsed, out_folder)\n\n logger.info(\"all went well\")", "title": "" }, { "docid": "510ecb0f4d2dbe090da09be9de8d3341", "score": "0.52509433", "text": "def build(self):\n self.download()\n self.parse()\n try:\n \tself.nlp()\n \texcept Exception:\n \t\tpass", "title": "" }, { "docid": "2f6c6a4dde8254a678e5ffdaa9482d45", "score": "0.5249399", "text": "def run_module(self):\n # set args and local parameters\n if not self.skip_parse_args:\n args = self.set_args()\n self.parse_args(args)\n\n # create the breakdown and final report directories if they don't already exist.\n if not os.path.exists(self.final_reports_sub_dir):\n os.makedirs(self.final_reports_sub_dir)\n\n # Recognition module will only use one graph throughout its lifecycle.\n # set those parameters here. Will update individual files as needed.\n recognition_args = self.setup_recognition_arguments()\n self.recognition_module.setup_module(recognition_args)\n\n if self.is_input_dir:\n # Photos must be in a specific layout, one directory down from input path to allow for classification and\n # general sorting. Ex: \"photo_backlog/test/image.jpg\"\n input_photos = glob.glob(os.path.join(self.input_filepath, \"**\", \"*.jpg\"), recursive=True)\n # Go over all photos that must be checked\n for photo_path in input_photos:\n # Run the breakdown (if required), recognition and breakdown report parsing (if required) for a single\n # photo in the total list\n self.single_photo_workflow(photo_path)\n else:\n # Run the breakdown (if required), recognition and breakdown report parsing (if required) for the single\n # file that the ENTIRE SYSTEM IS SPECIFIED TO RUN.\n self.single_photo_workflow(self.input_filepath)", "title": "" }, { "docid": "c5f8db6acd89f73a443887113e7b7e6d", "score": "0.52483135", "text": "def main():\n flowfig=None\n flowvecfig=None\n dispfig=None\n stress_strain_fig=None\n aqconcfig=None\n gas_volfrac_fig=None\n min_si_fig=None\n min_ab_fig=None\n if op_Flowdata==True:\n if info==True:\n print('Running flowdata.tec')\n flowfaces=flowdata_import()\n flowfig=pdf_png_fig_plotting(flowfaces,flowdata_params,cwd+\"/flow_data.pdf\")\n if op_Flowvector==True:\n if info==True:\n print('Running flowvector.tec')\n flowvecfaces=flowvector_import()\n flowvecfig=pdf_png_fig_plotting(flowvecfaces,flowvector_params,cwd+\"/flow_vector.pdf\")\n if op_Displacement==True:\n if info==True:\n print('Running displacement.tec')\n dispfaces=displace_import()\n dispfig=pdf_png_fig_plotting(dispfaces,displacement_params,cwd+\"/displacement.pdf\")\n if op_Stress_Strain==True:\n if info==True:\n print('Running stress_strain.tec')\n stressfaces=stress_strain_import()\n stress_strain_fig=pdf_png_fig_plotting(stressfaces,stress_strain_params,cwd+\"/stress_strain.pdf\")\n if op_aqconc==True:\n if info==True:\n print('Running aqconc.tec')\n aqconcfaces=aq_conc_import()\n aqconc_params=aqconc_params_selector()\n aqconcfig=pdf_png_fig_plotting(aqconcfaces,aqconc_params,cwd+\"/aq_conc.pdf\")\n if op_gas_volfrac==True:\n if info==True:\n print('Running gas_volfrac.tec')\n gas_volfrac_faces=gas_volfrac_import()\n gas_volfrac_params=gas_volfrac_params_selector()\n gas_volfrac_fig=pdf_png_fig_plotting(gas_volfrac_faces,gas_volfrac_params,cwd+\"/gas_volfrac.pdf\")\n if op_min_ab==True:\n if info==True:\n print('Running min_ab.tec')\n min_ab_faces=mineral_ab_import()\n min_ab_params=mineral_ab_params_selector()\n min_ab_fig=pdf_png_fig_plotting(min_ab_faces,min_ab_params,cwd+\"/min_ab.pdf\")\n if op_min_si==True:\n if info==True:\n print('Running min_si.tec')\n min_si_faces=mineral_si_import()\n min_si_params=mineral_si_params_selector()\n min_si_fig=pdf_png_fig_plotting(min_si_faces,min_si_params,cwd+\"/min_si.pdf\")\n\n return flowfig, flowvecfig, dispfig, stress_strain_fig, aqconcfig, gas_volfrac_fig, min_ab_fig, min_si_fig", "title": "" }, { "docid": "0d4b11a5f2be5696dc5e7b24b05452ee", "score": "0.5244725", "text": "def process(self):\n\n data_list = list()\n pb = tqdm(self._get_smiles()) if self.verbose else self._get_smiles()\n for sm, y in pb:\n try:\n x, edge_idx = self._graph_helper(sm)\n except ValueError:\n continue\n y = torch.tensor(y, dtype=torch.float)\n data_list.append(Data(x=x, edge_index=edge_idx, y=y))\n\n if self.pre_filter is not None:\n data_list = [data for data in data_list if self.pre_filter(data)]\n if self.pre_transform is not None:\n data_list = [self.pre_transform(data) for data in data_list]\n data, slices = self.collate(data_list)\n torch.save((data, slices), self.processed_paths[0])", "title": "" }, { "docid": "22b77029cfde7321c886be743c22a8a4", "score": "0.5239389", "text": "def main(query, locality, regio, form=\"json\"):\n ypc = YellowPageCrawler(query, locality, region)\n filename = query+\"-\"+locality+\"-\"+region\n ypc.crawl(filename, format_=form)\n print \"Crawling completed for: \",filename", "title": "" }, { "docid": "bdfd8a1257f70303b69db766dae134a0", "score": "0.52329326", "text": "def run(self):\n self.get_info_from_data_header()\n self.base_str = self.construct_base_string()\n self.write_flag_files()", "title": "" }, { "docid": "40955985bcdb7a2d0260ce42dc6541ca", "score": "0.5225404", "text": "def run(self):\n\t\t# initialize\n\t\tself.initialize()\n\t\t# train model\n\t\tself.train()", "title": "" }, { "docid": "564faaf59da5893872c115b00c935070", "score": "0.5224925", "text": "def run(self):\n ingests = self.get_ingests()\n # check every 60 secs but only execute code if there are files to ingest.\n if ingests:\n # Create coursemap table\n self.create_map_table()\n\n # Create conversions table\n self.create_conv_table()\n\n # Create campaigns table\n self.create_camp_table()\n\n for ingest in ingests:\n if ingest['type'] == 'file':\n # print \"ingesting \" + ingest['meta']\n self.start_ingest(ingest['id'])\n path = ingest['meta']\n\n if 'Campaign' in ingest['meta']:\n # Ingest the campaigns file\n self.ingest_csv_file(path, self.camp_table)\n elif 'Conversions' in ingest['meta']:\n # Ingest the conversions file\n self.ingest_csv_file(path, self.conv_table)\n else:\n utils.log(\"GoogleAnalytics - Campaign or Conversions not found in file path\")\n # update the ingest record\n self.finish_ingest(ingest['id'])\n\n # identify any new campaigns and add the key to the coursemap table\n print(\"GoogleAnalytics - updating map table\")\n self.update_map_table()\n # save_run to ingest api\n # self.save_run_ingest()\n utils.log(\"GoogleAnalytics completed\")\n pass", "title": "" }, { "docid": "54b696b6289ce125ca73fcab3e75c1e4", "score": "0.521721", "text": "def main():\n parser = argparse.ArgumentParser(prog=\"data reports generation\")\n parser.add_argument(\n \"--input\", type=str, help=\"path to dataset for reports\", required=True\n )\n parser.add_argument(\n \"--output\", type=str, help=\"reports folder output\", required=True\n )\n arguments = parser.parse_args()\n output_dir = REPORT_DIR / arguments.output\n output_dir.mkdir(parents=True, exist_ok=True)\n\n required_cols = list(CATEGORICAL_COLUMNS.keys())\n required_cols.extend(REAL_COLUMNS.keys())\n required_cols.append(LABEL_COL)\n\n df = pd.read_csv(DATA_DIR / arguments.input, usecols=required_cols)\n\n save_data_stats(df, output_dir)\n save_categorical_graphs(df, output_dir)\n save_real_graphs(df, output_dir)\n save_heatmap(df, output_dir)\n save_pairplot(df, output_dir)", "title": "" }, { "docid": "f0af5c00f4eeda0c501fce5d569fc81e", "score": "0.52165514", "text": "def geocode_crime_data(data_entries):\n\n print(tableprint.header(['City Name', 'Lat', 'Long'], width=30))\n for entry in data_entries:\n coords = geocode_city_name(entry['city_name'])\n entry['coords'] = coords\n\n # Add a delay for API limiting\n time.sleep(0.1)\n\n print(tableprint.bottom(3, width=30))\n\n return data_entries", "title": "" }, { "docid": "76994148c3dabd6d433e67517f623878", "score": "0.521293", "text": "def runImageProcessing():\n # initialize instance\n extract = writeFile()", "title": "" }, { "docid": "140961c467c0bf9cc6cdbaf4d6b356bd", "score": "0.5209017", "text": "def execute():\n camera = calibrate_camera()\n binarizer = Binarizer()\n detector = Detector(camera)\n undistort_sample_images(camera)\n detect_perspective(camera)\n top_down_sample_images(camera)\n binarize_sample_images(camera, binarizer)\n generate_histogram(detector, camera, binarizer)\n detect_lane_lines()", "title": "" }, { "docid": "4bde9dee7d60fa70a1230b766517c0c3", "score": "0.52005154", "text": "def single_analysis_routine(rundict_list, graph_type, x_coord, chem_env_done, bader_done):\n reload_all = False\n continue_loop = True\n try:\n if graph_type == 'Structure':\n bailar.plot_structure_graphs(\n rundict_list, chem_env_done)\n\n elif graph_type == 'XRD':\n print(\"Not implemented yet\")\n\n elif graph_type == 'Bader':\n if bader_done:\n bader.plot_charge_and_mag(rundict_list, coord=x_coord)\n else:\n print(\"Generate bader tags before plotting !!\")\n\n elif graph_type == 'COOP':\n sorting = \"oxidation\" if bader_done else \"OO_pairs\"\n rundict_list = lob.plot_COOP_OO(\n rundict_list, sorting=sorting)\n\n elif graph_type == \"O2 release\":\n rundict_list = O2.O2_computation(\n rundict_list, bader_done=bader_done)\n\n elif graph_type == \"DOS\":\n DOS.plot_DOS_graphs(rundict_list)\n\n elif graph_type == 'hull':\n if len([d for d in rundict_list if d.status >= 3]) >= 2:\n hull.plot_hull_graphs(rundict_list, coord=x_coord)\n else:\n print(\"not enough converged runs to perform further analysis\")\n\n elif graph_type == \"energy surface\":\n PES.plot_energy_surface_graphs(\n rundict_list, chem_env_done)\n\n elif graph_type == \"mag\":\n nupdown.plot_all_graphs(rundict_list)\n\n elif graph_type == \"debug_infos\":\n print(\"=== debug output ===\")\n print_debug(rundict_list)\n\n elif graph_type == \"reload\":\n print(\"normal reload\")\n reload_all = True\n\n elif graph_type is \"QUIT\":\n print(\"finished plotting \\n\")\n input(\"press any key to close all figures\")\n plt.close(\"all\")\n continue_loop = False\n\n except Exception as ex:\n print(traceback.format_exc())\n print(ex)\n reload_all = True\n\n if reload_all:\n plt.close(\"all\")\n os.chdir(SETTING_DIR)\n try:\n importlib.reload(lob)\n importlib.reload(PES)\n importlib.reload(DOS)\n importlib.reload(hull)\n importlib.reload(bailar)\n importlib.reload(bader)\n importlib.reload(O2)\n importlib.reload(nupdown)\n importlib.reload(generic_plot)\n importlib.reload(read)\n except Exception:\n print(traceback.format_exc())\n print(\"PLEASE correct the issue and reload once more\")\n else:\n plt.show(block=False)\n\n return continue_loop", "title": "" }, { "docid": "d576ef0a0b494ca46d5f316cad915175", "score": "0.5195741", "text": "def main():\n group_process('g1', g1_img_dir)\n group_process('g2', g2_img_dir)\n analyze.main()", "title": "" }, { "docid": "ac9b6c69de5c797bad929b1af76dbee6", "score": "0.5194612", "text": "def main():\n map = folium.Map()\n print('Type a year')\n year = input()\n main_dict = find_titles('locations.list', year)\n print(main_dict)\n draw_movies(year, main_dict, map)\n draw_population(map)\n map.add_child(folium.LayerControl())\n map.save('Map.html')", "title": "" } ]
d9cf02e19c112c1bbb11bbb69e3a7402
GET /reports/new/ must return status code 200
[ { "docid": "63920f5807fbde4b4725d59f0dcc6378", "score": "0.0", "text": "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "title": "" } ]
[ { "docid": "b03f4adc79057fbdaa7d623898943a07", "score": "0.63913965", "text": "def test_create_invalid_report_admin(self):\n request = self.factory.post(\n '/api/inventories/', {})\n force_authenticate(request, user=self.test_admin)\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(InventoryReport.objects.all()), 0)", "title": "" }, { "docid": "ac9a6b209218c638c7cafe5ce062fc56", "score": "0.6260877", "text": "def test_no_report(self):\n response = self.client.get(reverse('detail', kwargs={'report_id': 100}))\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "bea6d206f9e33b440bedfe75a34830ba", "score": "0.6196714", "text": "def test_create_invalid_report_unauthorized(self):\n request = self.factory.post(\n '/api/inventories/', {})\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(len(InventoryReport.objects.all()), 0)", "title": "" }, { "docid": "7144a8f4a742fb3fb0b195eb34be45ac", "score": "0.6181287", "text": "def post_report_new(database: Database):\n report_uuid = uuid()\n delta_description = \"{user} created a new report.\"\n report = {\"report_uuid\": report_uuid, \"title\": \"New report\", \"subjects\": {}}\n result = insert_new_report(database, delta_description, [report_uuid], report)\n result[\"new_report_uuid\"] = report_uuid\n return result", "title": "" }, { "docid": "ceddd9d6b7611dbc83ea5317bb580aae", "score": "0.61714286", "text": "def test_get_report(self):\n headers = {\"Accept\": \"application/json\"}\n response = self.client.open(\n \"/api/report/{id}\".format(id=MOCK_ID), method=\"GET\", headers=headers\n )\n self.assert_200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))", "title": "" }, { "docid": "7b7212d10d8478948e7ddc22a8606236", "score": "0.6166465", "text": "def test_create_report_unauthorized(self):\n request = self.factory.post(\n '/api/inventories/', {'name': self.report_name})\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(len(InventoryReport.objects.all()), 0)", "title": "" }, { "docid": "aaabfcccfc49a8ba3d308788399e7cee", "score": "0.61048985", "text": "def post_report_new(database: Database):\n report_uuid = uuid()\n delta_description = \"{user} created a new report.\"\n report = dict(report_uuid=report_uuid, title=\"New report\", subjects={})\n result = insert_new_report(database, delta_description, (report, [report_uuid]))\n result[\"new_report_uuid\"] = report_uuid\n return result", "title": "" }, { "docid": "108898c0ca3345d956ad4aaf99fd223c", "score": "0.60782003", "text": "def test_create_report_user(self):\n request = self.factory.post(\n '/api/inventories/', {'name': self.report_name})\n force_authenticate(request, user=self.test_user)\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(len(InventoryReport.objects.all()), 0)", "title": "" }, { "docid": "ad9b849f3369655ef812d1bd7230b38f", "score": "0.60753614", "text": "def test_create_report_admin(self):\n\n request = self.factory.post(\n '/api/inventories/', {'name': self.report_name})\n force_authenticate(request, user=self.test_admin)\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n report = InventoryReport.objects.get(name=self.report_name)\n self.assertEqual(report.name, self.report_name)\n self.assertTrue(len(report.inventory_supplies.all()) > 0)\n except InventoryReport.DoesNotExist:\n self.fail()", "title": "" }, { "docid": "c954f047214517e9185a4d76ffa64124", "score": "0.6049396", "text": "def test_create_invalid_report_user(self):\n request = self.factory.post(\n '/api/inventories/', {})\n force_authenticate(request, user=self.test_user)\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(len(InventoryReport.objects.all()), 0)", "title": "" }, { "docid": "bee00c9ef47082d21c174e9a28a52566", "score": "0.60291076", "text": "def test_add_report(self):\n body = {\"type\": \"csv\", \"source\": \"local\"}\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n with patch.dict(\"ibutsu_server.controllers.report_controller.REPORTS\", {\"csv\": MOCK_CSV}):\n response = self.client.open(\n \"/api/report\",\n method=\"POST\",\n headers=headers,\n data=json.dumps(body),\n content_type=\"application/json\",\n )\n self.assert_201(response, \"Response body is : \" + response.data.decode(\"utf-8\"))", "title": "" }, { "docid": "bff2cd645c6f4f569bde69d2e8109553", "score": "0.601132", "text": "def post(self, request, report_name):\n request_data = dict(request.data.items())\n\n request_data.update(request.query_params.items())\n\n serialized_data = GenerateReportViewSerializer(data=request_data)\n\n serialized_data.is_valid(raise_exception=True)\n\n report_backend, report_backend_settings = get_report_backend(report_name)\n\n if not report_backend:\n raise Http404\n\n backend_instance = report_backend(report_settings=report_backend_settings, **serialized_data.data)\n backend_response = backend_instance.process_request(\n request=request,\n extra_data={key: value for key, value in request_data.items() if key not in serialized_data.data},\n )\n\n return JsonResponse(\n backend_response,\n status=backend_response.get('status', status.HTTP_202_ACCEPTED),\n )", "title": "" }, { "docid": "d6e0ca630ba7144065dda37a29c3e0bd", "score": "0.5994288", "text": "def create(self):\n self.client.get(\"/create\")", "title": "" }, { "docid": "f4f90c11cf01dfabc6dd7a8cbc956fa0", "score": "0.59608865", "text": "def _get_existing_report(code):\n url = f\"{BASE_URL}/file/report\"\n payload = {'apikey': VIRUS_TOTAL_API_KEY, 'resource': code}\n resp = r.get(url, params=payload)\n\n # Account for timeouts - only 4 request may be made a minute\n # Wait until the upload has completed scanning if it is doing so\n resp = _wait_for_scan(resp, url, payload)\n\n # If any other error has occurred raise an exception\n if resp.status_code != r.codes.ok:\n raise r.exceptions.HTTPError(f\"Error getting report on {code} - {resp.reason}\")\n\n # Check the json response - return None if the item doesn't exist\n json = resp.json()\n if ResponseCode(json['response_code']) == ResponseCode.NOT_IN_DB:\n return None\n\n # Otherwise we're all good\n return resp.json()", "title": "" }, { "docid": "18e5eb5b1b39252c49b31c738687170c", "score": "0.5953265", "text": "def test_list_reports_admin(self):\n request = self.factory.get(\n '/api/inventories')\n force_authenticate(request, user=self.test_admin)\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(len(response.data) > 0)", "title": "" }, { "docid": "eaecb2cf29fd0ed71fe7b15f5d13e878", "score": "0.58981794", "text": "def test_api_can_create_a_program(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "16dafd8713635a60911f0f1581b60333", "score": "0.5823782", "text": "def handle_create(request, uri, headers):\n expected_post = {\n \"gradingSchemeType\": \"NUMERIC\",\n \"name\": \"New homework\",\n \"weight\": 1.0,\n \"dueDateString\": \"12-15-2013\",\n \"shortName\": \"Newrk\",\n \"graderVisible\": False,\n \"maxPointsTotal\": expected_max_points,\n \"gradebookId\": 1234\n }\n assert expected_post == json.loads(request.body)\n body = json.dumps({'data': {'assignmentId': new_assignment_id}})\n return 200, headers, body", "title": "" }, { "docid": "9591c6dfe1dda28143ecf0bd8fcb8b8e", "score": "0.58177584", "text": "def run(self, request, pk):\n\n # Check that the user has permission to run reports.\n if not request.user.has_perm('extras.add_reportresult'):\n raise PermissionDenied(\"This user does not have permission to run reports.\")\n\n # Retrieve and run the Report. This will create a new ReportResult.\n report = self._retrieve_report(pk)\n report.run()\n\n serializer = serializers.ReportDetailSerializer(report)\n\n return Response(serializer.data)", "title": "" }, { "docid": "38bccba656bc4f90b5fc0b4a16c9eb3f", "score": "0.5804834", "text": "def test_list_reports_unauthorized(self):\n request = self.factory.get(\n '/api/inventories')\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "title": "" }, { "docid": "f1e4e5ed840030851ff08c9f8a32309b", "score": "0.57849747", "text": "def handle_create(request, uri, headers):\n expected_post = {\n \"gradingSchemeType\": \"NUMERIC\",\n \"name\": \"New homework\",\n \"weight\": 1.0,\n \"dueDateString\": \"12-15-2013\",\n \"shortName\": \"Newrk\",\n \"graderVisible\": False,\n \"maxPointsTotal\": 1, # 1 is the default value\n \"gradebookId\": 1234\n }\n assert expected_post == json.loads(request.body)\n body = json.dumps({'data': {'assignmentId': new_assignment_id}})\n return 200, headers, body", "title": "" }, { "docid": "bb387303ab79bcf1e543f8c061738fb5", "score": "0.5778906", "text": "def post(self): # type: ignore[no-untyped-def]\n report_dict = self.payload.get('report')\n report_obj = (\n app_feedback_report_services.create_report_from_json(\n report_dict))\n app_feedback_report_services.save_feedback_report_to_storage(\n report_obj, new_incoming_report=True)\n app_feedback_report_services.store_incoming_report_stats(report_obj)\n\n return self.render_json({})", "title": "" }, { "docid": "7659d8b41b09ae911bd8b54add1e0d1e", "score": "0.5748023", "text": "def save_report(request):\n report = request.POST.get('report', None)\n user_pk = int(request.POST.get('user_pk', None))\n user = User.objects.get(pk=user_pk)\n form_instance = ReportForm({'user': user, 'date': datetime.datetime.now(), 'report': report})\n if form_instance.is_valid():\n form_instance.save()\n return JsonResponse(data={'status': 'ok', 'message': 'Report saved successfully'}, status=200)\n else:\n return JsonResponse(data={'status': 'error', 'message': \"Report doesn't saved successfully\"}, status=400)", "title": "" }, { "docid": "ff04f641d981295caff9416f630f5aab", "score": "0.57357085", "text": "def new(request):\n if request.method != 'POST':\n form = forms.NewForm()\n return respond(request, 'new.html', {'form': form})\n\n form = forms.NewForm(request.POST, request.FILES)\n issue, _ = _make_new(request, form)\n if issue is None:\n return respond(request, 'new.html', {'form': form})\n else:\n return HttpResponseRedirect(reverse(request, show, args=[issue.key().id()]))", "title": "" }, { "docid": "57c60740c35f7016249cda6bf8a6a647", "score": "0.5725382", "text": "def get(self, projid):\n lbl=request.args.get('lbl', 'new issues')\n days=request.args.get('weeks', '2')\n if days is not None:\n start_date = datetime.date.today() + datetime.timedelta(-(int(days) * 7))\n dateParam = start_date.strftime('%Y-%m-%d')\n lbl = lbl + ' last ' + days + ' weeks'\n url = 'http://'+os.environ['GITLAB_HOST']+':'+os.environ['GITLAB_PORT']+'/api/v4/projects/' + str(projid) + '/issues?scope=all&created_after='+dateParam+'&per_page=1'\n headers = {'Private-Token': os.environ['GITLAB_SECRET']}\n r = requests.get(url, headers=headers)\n if r.status_code != 200:\n v = r.status_code\n lbl = 'HTTP Error'\n thresh = {0: 'red'}\n else:\n v = r.headers['x-total']\n thresh = {0: 'green'}\n vfmt = \"%d\"\n badge = Badge(label=lbl, value=v, value_format=vfmt, thresholds=thresh)\n resp = make_response(badge.badge_svg_text, 200)\n resp.headers['Content-Type'] = 'image/svg+xml;charset=utf-8'\n resp.headers['Content-Disposition'] = 'attachment; filename=badge.svg'\n return resp\n else:\n v = 'error'\n vfmt = \"%s\"\n thresh = {0: 'red'}", "title": "" }, { "docid": "b5af55007d9b0eaf43a794a6d2b09d72", "score": "0.57208306", "text": "def report(self):\r\n data = {}\r\n\r\n # GET DATA\r\n token = request.headers.get('token')\r\n userid = request.headers.get('userid')\r\n vessel_id = request.args.get('vessel_id')\r\n\r\n # CHECK TOKEN\r\n token_validation = self.validate_token(token, userid)\r\n\r\n if not token_validation:\r\n data[\"alert\"] = \"Invalid Token\"\r\n data['status'] = 'Failed'\r\n\r\n # RETURN ALERT\r\n return self.return_data(data)\r\n\r\n # OPEN CONNECTION\r\n self.postgresql_query.connection()\r\n\r\n # GET REPORT TEMP\r\n report_data = self.get_report(vessel_id)\r\n\r\n # CLOSE CONNECTION\r\n self.postgresql_query.close_connection()\r\n\r\n datas = {}\r\n datas['report_data'] = report_data\r\n datas['status'] = 'ok'\r\n\r\n return self.return_data(datas)", "title": "" }, { "docid": "4c2ef5ac8ca02c53cb4bde1eb3d88117", "score": "0.5717658", "text": "def report(request, report_id, format=''):\n return render_report(request, report_id, format)", "title": "" }, { "docid": "5ac0066eba40c17994078aed612b7e8c", "score": "0.56884074", "text": "def Reports():\n return render_template('Reports.html')", "title": "" }, { "docid": "44519f9a32d437a87d18bb3022119a1d", "score": "0.5674699", "text": "def test_list_reports_user(self):\n request = self.factory.get(\n '/api/inventories')\n force_authenticate(request, user=self.test_user)\n response = InventoryReportListCreateView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(len(response.data) > 0)", "title": "" }, { "docid": "85d21dba5e5052e2fbfe36fb83c49188", "score": "0.5671081", "text": "def cars_report():\n car = Car.query.filter_by(id=request.form['car_id']).first()\n if car:\n car_report = CarReport(car.id)\n db.session.add(car_report)\n db.session.commit()\n return 'success', 200\n return 'car not exist.', 404", "title": "" }, { "docid": "e999783678ab8cb0f68a8b0e71be1d8f", "score": "0.5648941", "text": "def reports(request):\n return render(request, 'reports.html')", "title": "" }, { "docid": "0cbf91c9696bea241b074befb306dd27", "score": "0.5640119", "text": "def test_api_new_graph_1(self):\n department = baker.make(\"Department\")\n program = baker.make(\"DegreeProgram\",department=department)\n r = baker.make(\"Report\", degreeProgram=program, year=2017)\n slo = baker.make(\"SLOInReport\",report=r)\n assessHere = baker.make(\"AssessmentVersion\",report=r,slo=slo)\n baker.make(\"AssessmentData\",assessmentVersion=assessHere,overallProficient=93)\n data = {\n 'report__degreeProgram__department': department.pk,\n 'report__degreeProgram': program.pk,\n 'report__year__gte': 2015,\n 'report__year__lte': 2018,\n 'decision': 1,\n 'sloIR': slo.pk,\n 'assess': assessHere.assessment.pk,\n 'sloWeights': \"{\\\"\"+str(slo.pk)+\"\\\": 1}\"\n }\n resp = self.client.post(reverse('makeReports:api-new-graph'),data)\n self.assertEquals(resp.status_code,200)", "title": "" }, { "docid": "a211f3dadf74bca5fa169d78e6072281", "score": "0.5599508", "text": "def new(self, verbose=False):\n\n response=api(url=self.__url+\"/new\", verbose=verbose)\n return response", "title": "" }, { "docid": "b50d313fa672ffdc383d87c8d492566d", "score": "0.5595814", "text": "def test_new(self):\n result = self.client.get('/playlists/new')\n self.assertEqual(result.status, '200 OK')", "title": "" }, { "docid": "5b2125693cf19cb094cc0cd2704fda20", "score": "0.5584116", "text": "def test_report(self):\n\n result = self.client.get(\"/report\")\n self.assertEqual(result.status_code, 200)\n self.assertIn('text/html', result.headers['Content-Type'])\n self.assertIn('<h2 id=\"report-header\" class=\"header-text\">Report Your Wait Time</h2>', result.data)", "title": "" }, { "docid": "e5c5e55ae2b486e9e184d7e40ee906f8", "score": "0.55167794", "text": "def test_invalid_report_details(self):\n invalid_test_report_id = 153\n url = reverse(\"inventories:report-details\", args=[invalid_test_report_id])\n request = self.factory.get(url)\n force_authenticate(request, user=self.test_user)\n response = InventoryReportDetailsView.as_view()(request, pk=invalid_test_report_id)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "c71fb352ba0e726afefd5a6a6d54234c", "score": "0.55057526", "text": "def post(self) -> None:\n assert self.normalized_payload is not None\n report_dict = self.normalized_payload['report']\n report_obj = (\n app_feedback_report_domain.AppFeedbackReport.from_submitted_feedback_dict( # pylint: disable=line-too-long\n report_dict\n )\n )\n app_feedback_report_services.save_feedback_report_to_storage(\n report_obj, new_incoming_report=True)\n app_feedback_report_services.store_incoming_report_stats(report_obj)\n\n return self.render_json({})", "title": "" }, { "docid": "a07daad7761df86adcc2314ae8c2d599", "score": "0.5500255", "text": "def GET(self, *args, **kwargs):\n \n return BadRequest()", "title": "" }, { "docid": "0a24f729db51a0fc46801580716ba6f8", "score": "0.5499015", "text": "def test_bad_request(self, authenticated_client, simple_report_view):\n url = f\"{reverse('chart_data', args=(simple_report_view.pk,))}?prim_dim=foobar\"\n response = authenticated_client.get(url)\n assert response.status_code == 400\n error = response.json()['error']\n assert 'foobar' in error", "title": "" }, { "docid": "8a7f4e25ea374d5adb3172a113592b0e", "score": "0.5487561", "text": "def test_status_code(self):\n assert self.response.status_code == 201", "title": "" }, { "docid": "0c0a6193563ccd40044d1b1fbf2ddd8d", "score": "0.5484944", "text": "def test_get_report_list(self):\n query_string = [(\"page\", 56), (\"pageSize\", 56)]\n headers = {\"Accept\": \"application/json\"}\n response = self.client.open(\n \"/api/report\", method=\"GET\", headers=headers, query_string=query_string\n )\n self.assert_200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))", "title": "" }, { "docid": "e70c059d759aa61455a008e00f8131f9", "score": "0.54729426", "text": "def get(self, request):\r\n return response.Response( # Renders to content type as requested by the client.\r\n status=status.HTTP_200_OK,\r\n data={\r\n 'detail': 'Hello World!',\r\n }\r\n )\r\n\r\n # Question and Answers:\r\n # QUESITON 1: Where do I lookup our status codes?\r\n # ANSWER 1: https://www.django-rest-framework.org/api-guide/status-codes/#status-codes\r", "title": "" }, { "docid": "bd4cf7539ab84dc0fbc8bddf30c85f10", "score": "0.54693955", "text": "def test_response_200(self):\n client = Client()\n client.login(username='hjansen', password='ikhebkaas42')\n order = Order.objects.create(\n full_name='Hans Jansen',\n order_total=9.99,\n email='hj@email.com',\n )\n response = client.get(\n f'/profile/order_history/{order.order_reference}/'\n )\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "776a781121bbed609027663bd73cb1a6", "score": "0.5460034", "text": "def _web_send_status(self, status, context):\n if status == 201:\n # redirect to created resource\n lattice_id = str(context.lattice._id)\n url = self.handler.reverse_url(\"lattice_details\", lattice_id)\n self.handler.redirect(url, status=303)\n else:\n self.handler.set_status(status)\n self.handler.render(self._TEMPLATE_NAME, **context)", "title": "" }, { "docid": "2a53a95fe9a4f1580565828cda35a7db", "score": "0.5459766", "text": "def test_new_catalog(self):\n response = self.client.get('/catalogs/new')\n self.assertTrue(response.status_code == 200)", "title": "" }, { "docid": "2d6ea22f57d45b50d31252f38fa61d52", "score": "0.5441457", "text": "def download_reports():\n\n def encode(s):\n return s.encode('utf-8') if s else ''\n\n current_date = str(datetime.date.today())\n csv_name = 'IncidentReports-' + current_date + '.csv'\n outfile = open(csv_name, 'w+')\n print('initial file contents:', outfile.read())\n\n wr = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n reports = db.session.query(IncidentReport).all()\n wr.writerow(['DATE', 'LOCATION', 'AGENCY ID', 'VEHICLE ID', 'DURATION',\n 'LICENSE PLATE', 'DESCRIPTION'])\n for r in reports:\n wr.writerow([r.date, r.location, r.agency.name,\n r.vehicle_id, r.duration,\n r.license_plate, encode(r.description)])\n\n endfile = open(csv_name, 'r+')\n data = endfile.read()\n return Response(\n data,\n mimetype=\"text/csv\",\n headers={\"Content-disposition\": \"attachment; filename=\" + csv_name})", "title": "" }, { "docid": "a0cd6b1b7ab32311820e57a344949874", "score": "0.5436053", "text": "def POST(self, *args, **kwargs):\n\n return BadRequest()", "title": "" }, { "docid": "1478bb2d6533fe20c9fd0807467bcded", "score": "0.5431713", "text": "def status():\n\n resp = {\n 'status': 'OK',\n 'date': str(datetime.now())\n }\n\n return Response(\n headers={'Content-Type': 'application/json'},\n body=resp,\n status_code=200\n )", "title": "" }, { "docid": "9a24e0cad63d22b6f6c65ea6b68145ee", "score": "0.54279953", "text": "def _created_response(resource):\n\n# if type(resource) == type(list()):\n# response = jsonify(resource)\n# response.status_code = 201\n# else:\n# response = jsonify(resource)\n# response.status_code = 201\n# return response\n# else:\n# l = []\n# for r in resource:\n# l.append(jsonify(r))\n# response = json.dumps( l )\n response = jsonify( resource )\n# response = add_link_headers(response, resource.links())\n response.status_code = 201\n return response", "title": "" }, { "docid": "0bde52c2dac938e0ab91fc975a02e9ca", "score": "0.54132396", "text": "def test_report(self):\n self.login()\n res = self.view_times()\n\n assert res.status_code == 200", "title": "" }, { "docid": "c0734e557ecd26c41b37a9a30fb90a60", "score": "0.54095525", "text": "def test_api_new_graph_3(self):\n department = baker.make(\"Department\")\n program = baker.make(\"DegreeProgram\",department=department)\n r = baker.make(\"Report\", degreeProgram=program, year=2016)\n slo = baker.make(\"SLOInReport\",report=r)\n assessHere = baker.make(\"AssessmentVersion\",report=r,slo=slo)\n baker.make(\"AssessmentData\",assessmentVersion=assessHere,overallProficient=93)\n data = {\n 'report__degreeProgram__department': department.pk,\n 'report__degreeProgram': program.pk,\n 'report__year__gte': 2015,\n 'report__year__lte': 2018,\n 'decision': 3,\n 'sloIR': slo.pk,\n 'assess': assessHere.assessment.pk,\n 'sloWeights': \"{\\\"\"+str(slo.pk)+\"\\\": 1}\"\n }\n resp = self.client.post(reverse('makeReports:api-new-graph'),data)\n self.assertEquals(resp.status_code,200)", "title": "" }, { "docid": "68ad4b870b13879aa09e599125fd530e", "score": "0.54055554", "text": "def test_view_dailyavailabilities_return_200(self):\n # Create an instance of a GET request.\n request = self.factory.get(reverse('policorp:dailyavailabilities', kwargs={'taskid': 1, 'date': '20200101'}))\n response = dailyavailabilities(request, 1, '20200101')\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "74d72e6a89fe3d98e6c9cdf02af4715e", "score": "0.5404768", "text": "def post(self, request, nnid):\n try:\n return_data = \"\"\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))", "title": "" }, { "docid": "92f3af4bd2dfb02683d11543c65242c8", "score": "0.5395408", "text": "def test_new(self):\n result = self.client.get('/pantry/new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New item', result.data)", "title": "" }, { "docid": "fd457ded2d5b48b7d397712f87f9de08", "score": "0.53909415", "text": "def create_reponse():\n request_object = request.get_json(force=True)\n _id = Db.get_instance().responses.insert_one(request_object).inserted_id\n return \"/responses/\" + str(_id), 201", "title": "" }, { "docid": "b13f1da76e79198a1e859a75bc037440", "score": "0.5388361", "text": "def test_api_new_graph_2(self):\n department = baker.make(\"Department\")\n program = baker.make(\"DegreeProgram\",department=department)\n r = baker.make(\"Report\", degreeProgram=program, year=2018)\n slo = baker.make(\"SLOInReport\",report=r)\n assessHere = baker.make(\"AssessmentVersion\",report=r,slo=slo)\n baker.make(\"AssessmentData\",assessmentVersion=assessHere,overallProficient=93)\n data = {\n 'report__degreeProgram__department': department.pk,\n 'report__degreeProgram': program.pk,\n 'report__year__gte': 2015,\n 'report__year__lte': 2018,\n 'decision': 2,\n 'sloIR': slo.pk,\n 'assess': assessHere.assessment.pk,\n 'sloWeights': \"{\\\"\"+str(slo.pk)+\"\\\": 1}\"\n }\n resp = self.client.post(reverse('makeReports:api-new-graph'),data)\n self.assertEquals(resp.status_code,200)", "title": "" }, { "docid": "b297e79532e0874f0bff00db3b2bcf49", "score": "0.5378497", "text": "def test_send_reports_404(cbcloud_api_mock, state_manager, engine_results, input):\n engine_results._accept_report(ENGINE_NAME, input)\n cbcloud_api_mock.mock_request(\"PUT\",\n f\"/threathunter/feedmgr/v2/orgs/test/feeds/{'FAKE_FEED_ID'}/reports/.*\",\n ObjectNotFoundError)\n sent = engine_results.send_reports('FAKE_FEED_ID')\n assert not sent", "title": "" }, { "docid": "a353ed0cf8f4283a740d4b5f50a75ead", "score": "0.53687906", "text": "def create_new_campaign():\n\n # create campaign\n db_connection = db.connect_to_database()\n campaign = request.get_json()\n print(\"Received a POST request to create a new campaign.\")\n print(\"campaign JSON/Dict is: \")\n print(campaign)\n query = queries.create_campaign(session['user_id'], \\\n campaign['name'], \\\n campaign['num_players'], \\\n campaign['desired_history'], \\\n campaign['playstyle'], \\\n campaign['plays_on'] \\\n )\n db.execute_query(db_connection, query)\n\n # JS will automatically reload the page on a success response\n return \"1\"", "title": "" }, { "docid": "4a8109bb7d24cf25225142c067672f4e", "score": "0.534125", "text": "def reports_home(request):\r\n try:\r\n address = 'P.O Box %s' % ('.' * 30)\r\n params, location = {}, '.' * 20\r\n form = CaseLoad(request.user)\r\n if request.method == 'POST':\r\n doc_id = request.POST.get('id')\r\n doc_name = request.POST.get('name')\r\n org_unit_id = request.POST.get('org_unit')\r\n file_name = '%s' % (doc_id)\r\n # Organisation units details\r\n orgs = RegOrgUnit.objects.select_related().get(\r\n id=org_unit_id, is_void=False)\r\n org_contacts = get_contacts(org_unit_id)\r\n params['org_unit'] = orgs.org_unit_name\r\n if 'contact_CPOA' in org_contacts:\r\n address = org_contacts['contact_CPOA'].replace('\\r\\n', '<br/>')\r\n params['address'] = address\r\n # Physical location\r\n if 'contact_CPHA' in org_contacts:\r\n location = org_contacts['contact_CPHA'].replace(\r\n '\\r\\n', '<br/>')\r\n params['location'] = location\r\n # Get geo details\r\n geos = get_geo_locations(org_unit_id)\r\n sub_county = ', '.join(geos)\r\n params['sub_county'] = sub_county.upper()\r\n simple_document(document_name=doc_name, report_name=file_name,\r\n params=params)\r\n results = {'file_name': file_name}\r\n return JsonResponse(results, content_type='application/json',\r\n safe=False)\r\n return render(request, 'reports/reports_index.html',\r\n {'form': form, 'status': 200})\r\n except Exception, e:\r\n raise e", "title": "" }, { "docid": "13eda6bf3f3bd8b6aee74f0d88be17d5", "score": "0.5340878", "text": "def new_status(request):\n try:\n # create new UserStatus object and link to user object\n user_status_object = UserStatus(status=request.POST['status'])\n user_status_object.user = UserExtended.objects.get(user__id=request.POST['user_id'])\n user_status_object.save()\n\n # returned\n return HttpResponse(json.dumps(\"user %s successfully created new status\" % user_status_object.user.user.username),\n 'application/json')\n except Exception as error:\n return HttpResponseBadRequest(error)", "title": "" }, { "docid": "6f930d22999e3088bddc28e4f9b483b0", "score": "0.5340035", "text": "def test_api_can_create_a_plan(self):\n #Create a plan using API, also it can be created using plan model.\n response = self.client.post(\n reverse(\"create\"),\n self.plan_data,\n format=\"json\"\n ) \n #print(json.loads(response.content))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg=\"Plan creation failed.\")\n self.assertContains(response, \"Monthly Plan 1999999\", status_code=201)", "title": "" }, { "docid": "4c8f4289390bb16831d2098056c27cb1", "score": "0.53388184", "text": "def retrieve(self, request, pk):\n\n # Retrieve the Report and ReportResult, if any.\n report = self._retrieve_report(pk)\n report.result = ReportResult.objects.filter(report=report.full_name).first()\n\n serializer = serializers.ReportDetailSerializer(report)\n\n return Response(serializer.data)", "title": "" }, { "docid": "5d26b5d018e3b6d0a5c6eafc4ab87167", "score": "0.5335365", "text": "def create_response(self):\r\n self.response = JsonResponse(401)", "title": "" }, { "docid": "6ac46de259b3fb10c2af6200faba0e19", "score": "0.5335004", "text": "def create(self, *args: Any, **kwargs: Any) -> Response:\n return super().create(*args, **kwargs)", "title": "" }, { "docid": "b6ea0cfd2e9fee4e767c7b31d2451019", "score": "0.53281087", "text": "def test_new(self):\n\n # Unauthorized user ('viewer') should return a 403 status code on the\n # new action, which requires a 'contributor' or an 'administrator'.\n extra_environ = {'test.authentication.role': 'viewer'}\n response = self.app.get(url('new_file'), extra_environ=extra_environ,\n status=403)\n resp = json.loads(response.body)\n assert resp['error'] == u'You are not authorized to access this resource.'\n assert response.content_type == 'application/json'\n\n # Add some test data to the database.\n application_settings = h.generate_default_application_settings()\n restricted_tag = h.generate_restricted_tag()\n speaker = h.generate_default_speaker()\n Session.add_all([application_settings, restricted_tag, speaker])\n Session.commit()\n\n # Get the data currently in the db (see websetup.py for the test data).\n data = {\n 'tags': h.get_mini_dicts_getter('Tag')(),\n 'speakers': h.get_mini_dicts_getter('Speaker')(),\n 'users': h.get_mini_dicts_getter('User')(),\n 'utterance_types': h.utterance_types,\n 'allowed_file_types': h.allowed_file_types\n }\n # JSON.stringify and then re-Python-ify the data. This is what the data\n # should look like in the response to a simulated GET request.\n data = json.loads(json.dumps(data, cls=h.JSONOLDEncoder))\n\n # GET /file/new without params. Without any GET params, /file/new\n # should return a JSON array for every store.\n response = self.app.get(url('new_file'),\n extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp['tags'] == data['tags']\n assert resp['speakers'] == data['speakers']\n assert resp['users'] == data['users']\n assert resp['utterance_types'] == data['utterance_types']\n assert resp['allowed_file_types'] == data['allowed_file_types']\n assert response.content_type == 'application/json'\n\n # GET /new_file with params. Param values are treated as strings, not\n # JSON. If any params are specified, the default is to return a JSON\n # array corresponding to store for the param. There are three cases\n # that will result in an empty JSON array being returned:\n # 1. the param is not specified\n # 2. the value of the specified param is an empty string\n # 3. the value of the specified param is an ISO 8601 UTC datetime\n # string that matches the most recent datetime_modified value of the\n # store in question.\n params = {\n # Value is any string: 'speakers' will be in response.\n 'speakers': 'anything can go here!',\n # Value is ISO 8601 UTC datetime string that does not match the most\n # recent User.datetime_modified value: 'users' *will* be in\n # response.\n 'users': datetime.datetime.utcnow().isoformat(),\n # Value is ISO 8601 UTC datetime string that does match the most\n # recent Tag.datetime_modified value: 'tags' will *not* be in response.\n 'tags': h.get_most_recent_modification_datetime('Tag').isoformat()\n }\n response = self.app.get(url('new_file'), params,\n extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp['tags'] == []\n assert resp['speakers'] == data['speakers']\n assert resp['users'] == data['users']\n assert resp['utterance_types'] == data['utterance_types']\n assert response.content_type == 'application/json'", "title": "" }, { "docid": "bbca4f8ef0a6f41648d703b72e5752d0", "score": "0.5314831", "text": "def get_report():\n\n # list of tupple(event_id and name)\n response = op.get_report()\n\n return jsonify({\n \"method\": \"POST\",\n \"headers\": {\n \"content-type\": \"application/json\"\n },\n \"body\": {\n \"response\": response\n }\n })", "title": "" }, { "docid": "5ef769d7b5e4ead0cf2320fa9e2faf71", "score": "0.5314323", "text": "def create_report(report_type_name, patient, reporter):\n\n\treport_type = ReportType.objects.get(name=report_type_name)\n\treport = Report(patient=patient, reporter=reporter, type=report_type,\n\t\t location=reporter.health_centre, village=reporter.village)\n\treturn report", "title": "" }, { "docid": "09de48a0908e323ea2175d4388fe3d3c", "score": "0.53133976", "text": "def status_of_report(request_ctx, account_id, report, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/reports/{report}/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, report=report, id=id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "title": "" }, { "docid": "0662a5291c44bfef171a00a859dcbc4a", "score": "0.5307528", "text": "def assertHttpCreated(self, response):\n assert response.status_code == 201", "title": "" }, { "docid": "a89ed219709bb8ea9a3cdbf2bfc0ff8e", "score": "0.52991116", "text": "def create_resource(request):\n\n print(\"STARTING\")\n\n return_obj = {\n \"success\": False,\n \"message\": None,\n \"results\": {}\n }\n\n # -------------------- #\n # VERIFIES REQUEST #\n # -------------------- #\n\n if not (request.is_ajax() and request.method == \"POST\"):\n return_obj[\"success\"] = False\n return_obj[\"message\"] = \"Unable to communicate with server.\"\n return_obj[\"results\"] = {}\n\n return JsonResponse(return_obj)\n\n # -------------------------- #\n # GETS DATA FROM REQUEST #\n # -------------------------- #\n\n session_id = request.POST.get(\"sessionId\")\n res_title = request.POST.get(\"resTitle\")\n res_abstract = request.POST.get(\"resAbstract\")\n res_keywords = request.POST.get(\"resKeywords\").split(\", \")\n res_filename = slugify(request.POST.get(\"resFilename\"))[0:40]\n create_ts = request.POST.get(\"createTs\")\n create_refts = request.POST.get(\"createRefts\")\n create_public = request.POST.get(\"createPublic\")\n\n refts_metadata = {\n \"res_title\": res_title,\n \"res_abstract\": res_abstract,\n \"res_keywords\": res_keywords,\n \"res_filename\": res_filename\n }\n\n workspace = get_app_workspace(request)\n try:\n shutil.rmtree(\"/\".join((workspace, session_id,)))\n except:\n pass\n os.mkdir(\"/\".join((workspace, session_id,)))\n\n # ---------------------- #\n # CREATES REFTS FILE #\n # ---------------------- #\n\n if create_refts:\n try:\n refts_path = create_refts_file(session_id, timeseries_ids, workspace, refts_metadata)\n except:\n refts_path = None\n pass\n else:\n refts_path = None\n\n # --------------------------------- #\n # LOADS DATA INTO ODM2 DATABASE #\n # --------------------------------- #\n '''\n if create_ts:\n if True:\n odm2_path = create_odm2_database(session_id, timeseries_ids, workspace, refts_metadata)\n else:\n odm2_path = None\n pass\n else:\n odm2_path = None\n\n # ------------------------------- #\n # CREATES HYDROSHARE RESOURCE #\n # ------------------------------- #\n\n hs_api = get_oauth_hs(request)\n\n if not refts_path and not odm2_path:\n pass\n \n resource_id = hs_api.createResource(\"CompositeResource\", res_title, abstract=res_abstract, keywords=res_keywords)\n\n if refts_path:\n hs_api.addResourceFile(resource_id, resource_file=refts_path)\n #with open(refts_path, \"rb\") as res_file:\n # hs_api.addResourceFile(resource_id, resource_file=res_file)\n #res_file.close()\n\n if odm2_path:\n hs_api.addResourceFile(resource_id, resource_file=odm2_path)\n '''\n return_obj[\"success\"] = True\n\n return JsonResponse(return_obj)", "title": "" }, { "docid": "4ebf9f47319cff2483b1196a38bdf355", "score": "0.5295457", "text": "def report(self, request):\n\n if 'username' not in request.data:\n _log.debug(\"Invalid call to PM worker: missing username for PM account: %s\" % str(request.data))\n return JsonResponse(\n {'status': 'error', 'message': 'Invalid call to PM worker: missing username for PM account'},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'password' not in request.data:\n _log.debug(\"Invalid call to PM worker: missing password for PM account: %s\" % str(request.data))\n return JsonResponse(\n {'status': 'error', 'message': 'Invalid call to PM worker: missing password for PM account'},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'template' not in request.data:\n _log.debug(\"Invalid call to PM worker: missing template for PM account: %s\" % str(request.data))\n return JsonResponse(\n {'status': 'error', 'message': 'Invalid call to PM worker: missing template for PM account'},\n status=status.HTTP_400_BAD_REQUEST\n )\n username = request.data['username']\n password = request.data['password']\n template = request.data['template']\n pm = PortfolioManagerImport(username, password)\n try:\n try:\n if 'z_seed_child_row' not in template:\n _log.debug(\"Invalid template formulation during portfolio manager data import: %s\" % str(template))\n return JsonResponse(\n {\n 'status': 'error',\n 'message': 'Invalid template formulation during portfolio manager data import'\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n if template['z_seed_child_row']:\n content = pm.generate_and_download_child_data_request_report(template)\n else:\n content = pm.generate_and_download_template_report(template)\n except PMExcept as pme:\n _log.debug(\"%s: %s\" % (str(pme), str(template)))\n return JsonResponse({'status': 'error', 'message': str(pme)}, status=status.HTTP_400_BAD_REQUEST)\n try:\n content_object = xmltodict.parse(content, dict_constructor=dict)\n except Exception: # catch all because xmltodict doesn't specify a class of Exceptions\n _log.debug(\"Malformed XML from template download: %s\" % str(content))\n return JsonResponse(\n {'status': 'error', 'message': 'Malformed XML from template download'},\n status=status.HTTP_400_BAD_REQUEST)\n try:\n if content_object.get('report', None) is not None:\n success, properties = pm._parse_properties_v1(content_object)\n else:\n # assume that v2 is the correct version now\n success, properties = pm._parse_properties_v2(content_object)\n if not success:\n return JsonResponse(\n {\n 'status': 'error',\n 'message': properties\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n except (KeyError, TypeError):\n _log.debug(\"Processed template successfully, but missing keys -- is the report empty on Portfolio Manager?: %s\" % str(content_object))\n return JsonResponse(\n {\n 'status': 'error', 'message':\n 'Processed template successfully, but missing keys -- is the report empty on Portfolio Manager?'\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n return JsonResponse({'status': 'success', 'properties': properties})\n except Exception as e:\n _log.debug(\"%s: %s\" % (e, str(request.data)))\n return JsonResponse({'status': 'error', 'message': e}, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "c46371401b6b9c4f880dceac8bda1824", "score": "0.52948457", "text": "def create_status(request):\n logged_in_user = get_logged_in_user(request)\n\n # get parameters\n name = request.params.get('name')\n code = request.params.get('code')\n\n if name and code:\n new_status = Status(\n name=name,\n code=code,\n created_by=logged_in_user,\n )\n DBSession.add(new_status)\n\n return HTTPOk()", "title": "" }, { "docid": "91592b974a830e7bb5e5166e7a7249db", "score": "0.5291794", "text": "def HandleNewResource(self, request):\n\n return NewItemPage(self, \"resource\")(request)", "title": "" }, { "docid": "967a4612b3eabb551b96e3bb28e9f762", "score": "0.5291111", "text": "def test_api_can_create_a_book(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "d3a1231da59c1ffb59350fc583ea84e0", "score": "0.52880365", "text": "def features_reports_get(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method features_reports_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/Features/reports'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept([])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[InlineResponse200]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "aa038f0123e0365f06564096a49ec413", "score": "0.5283004", "text": "def get_issue_report(self, status):\n self.issue_report.status = status\n self.issue_report.save()\n return self.issue_report", "title": "" }, { "docid": "b6e8634a583129cfbdbf90d98524dbab", "score": "0.5269663", "text": "def createClientSuccess(request):\n \n return render_to_response('createclientsuccess.html')", "title": "" }, { "docid": "9c906c53aa964d8f345f8d803aa47f20", "score": "0.52630407", "text": "def create(request):", "title": "" }, { "docid": "66cbd902d77accda1cd40e586c5927d6", "score": "0.52510715", "text": "def test_new(self):\n result = self.client.get('/item/new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New Item', result.data)", "title": "" }, { "docid": "c85561fafadca95c64d8e15cbad0e405", "score": "0.5241983", "text": "def report():\n report_type = request.args.get('type', None)\n\n if report_type == 'bar':\n status_code = self._report_bar()\n\n elif report_type == 'tick':\n status_code = self._report_tick()\n\n elif report_type == 'order':\n status_code = self._report_order()\n\n elif report_type == 'balance':\n status_code = self._report_balance()\n\n elif report_type == None:\n status_code = 200\n\n else:\n logging.error(\"Invalid report type '%s'\", report_type)\n status_code = 400\n\n return self._rpc_text(), status_code", "title": "" }, { "docid": "6ec0242eb9e1dba21ccc310e8c475912", "score": "0.52332664", "text": "def test_url_get_ok(self, report):\n self.check_version_is(report, \"HTTP/1.1\")\n self.check_status_is(report, 200)\n self.check_date_valid(report)", "title": "" }, { "docid": "3c441211e9ebc19c734eec237bfe50e4", "score": "0.52311736", "text": "def healthz(request):\n return JsonResponse({'status': 'ok'})", "title": "" }, { "docid": "0aee0a937d55b36084777027f32a501e", "score": "0.52276295", "text": "def HandleNewSuite(self, request):\n\n return NewSuitePage(self)(request)", "title": "" }, { "docid": "4dd44d83ab8f27e541a2b3e5e3a585e0", "score": "0.52247095", "text": "def insert_report(self, report: dict[str, Any]) -> Any:\n response = self.get_conn().reports().request(body=report).execute(num_retries=self.num_retries)\n return response", "title": "" }, { "docid": "a77a6da61c29c7060710dae3bd323951", "score": "0.5216074", "text": "def test_create_first_status(self):\n db = get_session(self.app)\n\n with self.app.app_context():\n u = user(username='r1cky', save=True)\n\n data = json.dumps({\n 'api_key': self.app.config.get('API_KEY'),\n 'user': u.username,\n 'project': 'sumodev',\n 'content': 'bug 123456'})\n response = self.client.post('/api/v1/status/', data=data,\n content_type='application/json')\n eq_(response.status_code, 200)\n assert 'bug 123456' in response.data\n\n # Verify the user was created.\n eq_(db.query(User).first().username, 'r1cky')\n # Verify the project was created.\n eq_(db.query(Project).first().slug, 'sumodev')\n # Verify the status was created.\n eq_(db.query(Status).first().content, 'bug 123456')", "title": "" }, { "docid": "303d5d8eb5f0adee1fb33fc38e640187", "score": "0.5207665", "text": "def test_get_reports_overview(self):\n self.assertEqual({\"_id\": \"id\", \"title\": \"Reports\", \"subtitle\": \"\"}, get_reports_overview(self.database))", "title": "" }, { "docid": "364c60855b0d2a1ecd8a728189dec833", "score": "0.5205938", "text": "def report_definition(connection, report_id):\n connection._validate_project_selected()\n return connection.get(url=f'{connection.base_url}/api/v2/reports/{report_id}')", "title": "" }, { "docid": "007c1d8c255575ea3a83589f029f6943", "score": "0.5202752", "text": "def test_create_status_no_user(self):\n data = json.dumps({\n 'api_key': self.app.config.get('API_KEY'),\n 'user': 'r1cky',\n 'project': 'sumodev',\n 'content': 'bug 123456'})\n response = self.client.post('/api/v1/status/', data=data,\n content_type='application/json')\n eq_(response.status_code, 400)", "title": "" }, { "docid": "ca73badc804bb049fb0e5c2bb78021d4", "score": "0.51998043", "text": "def get(self):\n project_form = forms.ProjectForm()\n self.response.out.write(\n template.render('shifteleven/views/project/new.html', {'project_form': project_form}))", "title": "" }, { "docid": "e4220eaf72c3a8185713746e6925e468", "score": "0.51992893", "text": "def list(self, request):\n report_list = []\n\n # Iterate through all available Reports.\n for module_name, reports in get_reports():\n for report in reports:\n\n # Attach the relevant ReportResult (if any) to each Report.\n report.result = ReportResult.objects.filter(report=report.full_name).defer('data').first()\n report_list.append(report)\n\n serializer = serializers.ReportSerializer(report_list, many=True, context={\n 'request': request,\n })\n\n return Response(serializer.data)", "title": "" }, { "docid": "21082c0713d36a31a096ac687c702b98", "score": "0.5199076", "text": "def _web_send_status(self, status_code, context):\n if status_code == 201:\n # redirect to created resource\n model_id = str(context.model._id)\n url = self.handler.reverse_url(\"model_details\", model_id)\n self.handler.redirect(url, status=303)\n else:\n self.handler.set_status(status_code)\n self.handler.render(self._TEMPLATE_NAME, **context)", "title": "" }, { "docid": "0ad4491f302e59baa02d154adc26d58e", "score": "0.51973313", "text": "def test_create_issue1(client):\n response = client.post(\n ISSUE_API_URL + '/create/1',\n data=dict(\n title=\"Example issue 1\",\n description=\"Wheel was punctured by nail.\"\n ),\n follow_redirects=True\n )\n # Checks if there issue is created.\n assert b'<td>1</td>' in response.data", "title": "" }, { "docid": "35f3cae6f8d1d39c5cca0cd6fb3c46d6", "score": "0.51946944", "text": "def CreateDashboard(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "5f3e9646124f899233469dc947a582fd", "score": "0.5191951", "text": "def test_access_analytics_endpoint(self):\n res = self.client.get(self.report_views_url, **self.headers, format='json')\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "065dfcab651548b3f9ebcae1319f43d6", "score": "0.5186885", "text": "def post(self, request, *args, **kwargs):\n self.create(request)\n return Response(status=201)", "title": "" }, { "docid": "9689d13a2ae363bb6a2a53b846d45ccd", "score": "0.51805574", "text": "def add_new_project():\n\n title = request.form[\"title\"]\n description = request.form[\"description\"]\n max_grade = request.form[\"max_grade\"]\n\n hackbright.make_new_project(title, description, max_grade)\n\n return redirect(\"/\")", "title": "" }, { "docid": "ae2a08f4ceb3df8319b93463d8704497", "score": "0.51799524", "text": "def get(self, projid):\n lbl = request.args.get('lbl', 'orphaned issues')\n days = request.args.get('weeks', '2')\n if days is not None:\n start_date = datetime.date.today() + datetime.timedelta(-(int(days) * 7))\n dateParam = start_date.strftime('%Y-%m-%d')\n lbl = lbl + ' last ' + days + ' weeks'\n url = 'http://'+os.environ['GITLAB_HOST']+':'+os.environ['GITLAB_PORT']+'/api/v4/projects/' + str(projid) + '/issues?state=opened&scope=all&updated_before='+dateParam+'&per_page=1'\n headers = {'Private-Token': os.environ['GITLAB_SECRET']}\n r = requests.get(url, headers=headers)\n if r.status_code != 200:\n v = r.status_code\n lbl = 'HTTP Error'\n thresh = {0: 'red'}\n else:\n v = r.headers['x-total']\n thresh = {2: 'green',\n 4: 'yellowgreen',\n 6: 'yellow',\n 8: 'orange',\n 10: 'red'}\n vfmt = \"%d\"\n badge = Badge(label=lbl, value=v, value_format=vfmt, thresholds=thresh)\n resp = make_response(badge.badge_svg_text, 200)\n resp.headers['Content-Type'] = 'image/svg+xml;charset=utf-8'\n resp.headers['Content-Disposition'] = 'attachment; filename=badge.svg'\n return resp\n else:\n v = 'error'\n vfmt = \"%s\"\n thresh = {0: 'red'}", "title": "" }, { "docid": "fde03411cffadc2d91485c9cadef80bc", "score": "0.5177817", "text": "def health_check_view(request):\n return JsonResponse({'detail': 'OK'}, status=200)", "title": "" }, { "docid": "10edca68d479acb625d02a15adae2e5a", "score": "0.51775455", "text": "def test_valid_report_with_bad_endpoint(cli_runner, simple_report):\n with cli_runner.isolated_filesystem():\n with open(\".report.json\", \"w\") as f:\n f.write(json.dumps(simple_report))\n\n with mock.patch.multiple(\"mig3_client\", requests=mock.DEFAULT, git=mock.DEFAULT) as patches:\n # Simulate 404 Not Found response\n patches[\"requests\"].post().status_code = 404\n patches[\"requests\"].post().content = b\"Page not found\"\n result = cli_runner.invoke(mig3, args=\" \".join(ALL_ARGUMENTS.values()))\n\n assert result.exception, result.output\n assert result.exit_code == RequestError.exit_code, result.exit_code\n assert fmt(\"Reading report...{ICON_SUCCESS}\") in result.output\n assert fmt(\"Converting test data...{ICON_SUCCESS}\") in result.output\n assert fmt(\"Building submission...{ICON_SUCCESS}\") in result.output\n assert fmt(\"Sending submission...{ICON_FAILURE}\") in result.output\n assert \"Page not found\" in result.output", "title": "" }, { "docid": "5f675c1f5f0ca80c7e96e20407290686", "score": "0.51691216", "text": "def test_crear_reporte_mermas_reporte_existente(self):\n # Creamos una barra nueva para el test\n barra_4 = models.Almacen.objects.create(nombre='BARRA 4', numero=4, sucursal=self.magno_brasserie)\n\n #-------------------------------------------------------------------------------\n # Creamos una Inspeccion para la BARRA 4\n with freeze_time(\"2019-06-03\"):\n \n inspeccion_barra4_1 = models.Inspeccion.objects.create(\n almacen=barra_4,\n sucursal=self.magno_brasserie,\n usuario_alta=self.usuario,\n usuario_cierre=self.usuario,\n estado='1' # CERRADA\n )\n\n # Creamos un reporte de mermas para la inspeccion\n reporte_mermas_barra4 = models.ReporteMermas.objects.create(\n inspeccion=inspeccion_barra4_1,\n almacen=barra_4,\n fecha_inicial=datetime.date(2019, 6, 2),\n fecha_final=inspeccion_barra4_1.fecha_alta\n )\n\n # Construimos el request\n payload = {\n 'inspeccion': inspeccion_barra4_1.id,\n }\n url = reverse('analytics:crear-reporte-mermas')\n response = self.client.post(url, payload)\n\n print('::: RESPONSE DATA :::')\n print(response.data)\n print(response.status_code)\n\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "b8cfdb27f70a78561cc8782efd2d22a9", "score": "0.51664937", "text": "def test_create(self):\n self.client.force_authenticate(user=self.user)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n # The 'user' field is ignored when the calling user is not admin.\n # The field is REQUIRED nonetheless.\n 'user': reverse('user-detail', args=[self.admin.id]),\n }\n\n response = self.client.post(\n reverse('retreat:waitqueue-list'),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_201_CREATED,\n response.content,\n )\n\n content = {\n 'list_size': 2,\n 'notified': False,\n 'retreat': 'http://testserver/retreat/retreats/' +\n str(self.retreat.id),\n 'user': ''.join(['http://testserver/users/', str(self.user.id)]),\n 'created_at': json.loads(response.content)['created_at'],\n 'used': False,\n }\n\n response_data = json.loads(response.content)\n del response_data['id']\n del response_data['url']\n\n self.assertEqual(\n response_data,\n content\n )", "title": "" }, { "docid": "05d81c68917b931f73f71c9f985d7f18", "score": "0.51640606", "text": "def new_get():\n\n return render_template('new.html', form=empty_form())", "title": "" } ]
eb453cae7da383dd193905d275001909
View the reputation points leaderboard.
[ { "docid": "29930feaccab07b2952f3ab60de65a3e", "score": "0.7601583", "text": "async def leaderboard(self, ctx):\n leaderboard = await self.bot.database.get_top_reps()\n embed = discord.Embed(colour=EMBED_ACCENT_COLOUR)\n embed.set_author(\n name=\"Reputation Leaderboard\",\n icon_url=\"https://images.emojiterra.com/mozilla/512px/1f3c6.png\",\n )\n for member_id, points in leaderboard:\n member = ctx.guild.get_member(member_id)\n if member:\n embed.add_field(\n name=str(member.nick if member.nick is not None else member.name),\n value=points,\n inline=False,\n )\n await ctx.send(embed=embed)", "title": "" } ]
[ { "docid": "6375733d6284707ba6aaf4b11d8578d3", "score": "0.70809627", "text": "def show_leaderboard():\n leaderboard_data = get_sorted_scores()\n return render_template(\"leaderboard.html\", leaderboard_data=leaderboard_data)", "title": "" }, { "docid": "437d7c5565d4948aa3c19c5e239cc8fe", "score": "0.68095446", "text": "def leaderboard(self):\n click.echo(\"\\n----Leaderboard----\")\n players = OrderedDict(\n sorted(self.players.items(), key=lambda x: getitem(x[1], \"round\"))\n )\n columns = [\"Rank\", \"Name\", \"Round\", \"Score\"]\n row_format = \"{:>15}\" * len(columns)\n click.echo(row_format.format(*columns))\n rank = 1\n for k, v in players.items():\n click.echo(row_format.format(rank, v[\"name\"], v[\"round\"], v[\"score\"]))\n rank = rank + 1", "title": "" }, { "docid": "8b902558ddeb9651656b204c2bbf7032", "score": "0.63663286", "text": "def display_points(self) -> List[dict]:\n self.log.debug('Generating scores...')\n score_df = self.get_score(in_game=True) # type: pd.DataFrame\n self.log.debug(f'Retrieved {score_df.shape[0]} players\\' scores')\n if score_df.shape[0] == 0:\n return [\n BKitB.make_block_section('No one has scored yet. Check back later!')\n ]\n score_df.loc[:, 'rank_emoji'] = [':blank:' for _ in range(score_df.shape[0])]\n if score_df['current'].sum() != 0:\n # Determine the emojis for 1st, 2nd and 3rd place\n is_zero = (score_df.current == 0)\n for r in range(1, 6):\n score_df.loc[(score_df.current_rank == r) & (~is_zero), 'rank_emoji'] = f':cah-rank-{r}:'\n # Determine if the recent winner is on a streak\n score_df['streak'] = ''\n if self.current_game is not None:\n player_id, n_streak = self.determine_streak()\n if n_streak > 0:\n # Streak!\n score_df.loc[score_df.player_id == player_id, 'streak'] = ':steak:' * n_streak\n # Set order of the columns\n score_df = score_df[['rank_chg_emoji', 'rank_emoji', 'current_rank', 'display_name',\n 'current', 'overall', 'streak']]\n score_df = score_df.sort_values('current_rank', ascending=True)\n\n scores_list = []\n for i, r in score_df.iterrows():\n dname = f\"{r['display_name'][:14].title():_<15}\"\n emos = f\"{r['rank_chg_emoji'] + r['rank_emoji']}\"\n c_rank = f\"{r['current_rank']:>2.0f}\"\n scores = f\"*`{r['current']:>4.0f}`*`({r['overall']:>4.0f})`\"\n streak = f\"{r['streak']}\"\n line = f\"{emos}*`{c_rank}`*` {dname}`:diddlecoin:{scores}{streak}\"\n scores_list.append(line)\n\n return [\n BKitB.make_context_section([BKitB.markdown_section('*Current Scores*')]),\n BKitB.make_block_divider(),\n BKitB.make_block_section(scores_list)\n ]", "title": "" }, { "docid": "69bc8af69c922860aa714ad0f15c5b3a", "score": "0.6162498", "text": "def displayPoints(self):\n self.client.get(\"/displayPoints\")", "title": "" }, { "docid": "f4837939975e559645382e5d71401c45", "score": "0.6161864", "text": "def fullleaderboard (request):\n # Define views here\n score_submit = EventEntryModel.objects.exclude(winner__isnull=True).count()\n active_players = PlayerModel.objects.all()\n\n loaded_points = list(EventEntryModel.objects.aggregate(Sum('points')).values())[0]\n awarded_points = list(EventEntryModel.objects.exclude(winner__isnull=True).aggregate(Sum('points')).values())[0]\n\n context = {\n 'score_submit': score_submit,\n 'active_players': active_players,\n 'loaded_points': loaded_points,\n 'awarded_points': awarded_points,\n 'tour_name_top_line': tour_name_top_line,\n 'tour_name_highlight': tour_name_highlight,\n }\n return render(request, 'fullLeaderboard.html', context=context)", "title": "" }, { "docid": "b68db73095cfe00a7258b97ef6b10d3f", "score": "0.6103896", "text": "async def leaderboard(self, ctx):\r\n guild = ctx.guild.id\r\n users = await self.bot.db.fetch(\"SELECT * FROM levels WHERE guild_id=$1 ORDER BY xp DESC\", guild)\r\n\r\n ranks = {(y := x + 1): f\"#{y}\" if y > 3 else f\"{self.leaderboard_emojis[y]}\" for x in range(10)}\r\n fields = {\"member\": [], \"level\": [], \"rank\": []}\r\n\r\n for index, value in zip(range(10), users):\r\n user = self.bot.get_user(value[\"user_id\"])\r\n if user:\r\n if (rank := index + 1) == 1:\r\n top_user = f\"Top Member: 🏆 **{str(user)}**\"\r\n fields[\"rank\"].append(ranks[rank])\r\n fields[\"member\"].append(f\"**{user.name}**\")\r\n xp = round((4 * (value['lvl'] ** 3) / 5))\r\n fields[\"level\"].append(f\"Level {value['lvl']} ({value['xp']}/{xp})\")\r\n else:\r\n for value in fields.values():\r\n value.append(\"...\")\r\n\r\n leaderboard = discord.Embed(color=ctx.me.colour, title=f\"Top 10 in {ctx.guild.name}\", description=top_user,\r\n timestamp=ctx.message.created_at)\r\n leaderboard.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\r\n\r\n author = await self.bot.db.fetchrow(\"SELECT * FROM levels WHERE user_id=$1 AND guild_id=$2\",\r\n ctx.author.id, guild)\r\n\r\n if author:\r\n fields[\"rank\"].append(ranks[users.index(author) + 1])\r\n fields[\"member\"].append(f\"**{ctx.author.name}**\")\r\n xp = round((4 * (author['lvl'] ** 3) / 5))\r\n fields[\"level\"].append(f\"Level {author['lvl']} ({author['xp']}/{xp})\")\r\n\r\n leaderboard.add_field(name=\"Rank\", value=\"\\n\".join(fields[\"rank\"]), inline=True)\r\n leaderboard.add_field(name=\"Member\", value=\"\\n\".join(fields[\"member\"]), inline=True)\r\n leaderboard.add_field(name=\"Level\", value=\"\\n\".join(fields[\"level\"]), inline=True)\r\n\r\n await ctx.send(embed=leaderboard)", "title": "" }, { "docid": "cafa310f61b85a6ec569e99cfafaa7c9", "score": "0.60134494", "text": "async def leaderboard(self, ctx: commands.Context):\n\n\t\treturn await ctx.send(await TrophyLeaderboard(ctx).create())", "title": "" }, { "docid": "648e2b9738edcdc22360410c3f34ba3b", "score": "0.57833266", "text": "def showCurrentRankings(self):\n \n font = pygame.font.Font(self.fontSrc, self.fontSize - self.fontSize//6)\n sorted_players = sorted([p for p in self.players], key=lambda x: x.rank)\n headerRankTxt = font.render(\"Here are the rankings:\", True, (0,0,0))\n widthTxt, heightTxt = font.size(\"Here are the rankings:\")\n self.screen.blit(headerRankTxt, (self.width//2 - widthTxt//2, (self.height//2 - heightTxt*2) + self.height//6))\n\n for i in range(len(self.players)):\n sufixes = {1: \"st\", 2: \"nd\", 3: \"rd\", 4: \"th\"}\n p = sorted_players[i]\n playerTxt = font.render(str(p.rank) + sufixes[p.rank] + \": \" + p.name + \" (\" + str(p.points) + \"p)\", True, (0,0,0))\n widthTxt, heightTxt = font.size(str(p.rank) + \": \" + p.name + \" (\" + str(p.points) + \"p)\")\n\n self.screen.blit(playerTxt, (self.width//2 - widthTxt//2, (self.height//2 + heightTxt*i) + self.height//6))", "title": "" }, { "docid": "b70190aa8df97b0bc7195b975aee1d0b", "score": "0.5755101", "text": "def leaderboard():\n #allows the leaderboard to be viewed without updating/adding to it\n if request.method == 'POST':\n new_leaderboard_name = request.form[\"leaderboard_name\"] #store the users input name\n #add the new highscore to the database\n new_highscore= User(username=new_leaderboard_name, score=flask_session['user_score'])\n db.session.add(new_highscore)\n db.session.commit()\n\n leaderboard = User.query.order_by(desc(User.score)).limit(10).all()\n\n return render_template('leaderboard.html', leaderboard=leaderboard)", "title": "" }, { "docid": "4001c2dea5cd3f48475c83a57b166e60", "score": "0.5731824", "text": "def print_scores(self) -> None :\n \n for i in range(len(self._players)):\n print(self._players[i]._name ,\"=\", self._players[i].current_score())", "title": "" }, { "docid": "26a338181e6564a8cbc07b220894291f", "score": "0.56973094", "text": "def print_scoreboard(self, gender):\n print('Scoreboard for track %s in season %s' % (gender, self.name))\n rank = 1\n scoreboard = self.get_scoreboard(gender)\n for points, stats in scoreboard:\n print('#%d. %s at %.2f points' % (rank, stats.player.name, stats.points))\n rank += 1", "title": "" }, { "docid": "63e011e2bc5e082ac655bb0c83e02dd2", "score": "0.56954557", "text": "async def show(ctx, *args):\n if len(server.user_list) < 1:\n await ctx.send(\"The server dosen't have any data….\")\n elif (len(args) < 1 or args[0] == \"--name\"):\n user_name = ctx.author.name\n if len(args) > 1 and \"--name\" == args[0]:\n for m in ctx.guild.members:\n if m.name == args[1] or m.nick == args[1]:\n user_name = args[1]\n break\n else:\n await ctx.send(\"Invalid user name\")\n return\n user = server.get(user_name)\n ret = \"Win rate(when Impostor or Crewmate): {:.2%}\\n\".format(\n user.win_num / user.data['Games Finished'])\n ret += \"Win rate(when Impostor): {:.2%}\\n\".format(\n user.impostor_win_num / user.data['Times Impostor'])\n ret += \"Win rate(when Crewmate): {:.2%}\\n\".format(\n user.crewmate_win_num / user.data['Times Crewmate'])\n ret += \"Kills per Impostor: {:.2f}\\n\".format(\n user.data['Impostor Kills'] / user.data['Times Impostor'])\n ret += \"Tasks Completed rate: {:.2%}\\n\".format(\n user.data['All Tasks Completed'] / user.data['Times Crewmate'])\n ret += \"Sabotages Fixed / Games: {:.2f}\".format(\n user.data['Sabotages Fixed'] / user.data['Games Finished'])\n await ctx.send(ret)\n elif args[0] == \"--rank\":\n show_num = 3\n if len(args) > 1 and is_num(args[1]):\n show_num = int(args[1])\n ret = \"leaderboard! (Win Rate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} win\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_win_rate()[:show_num], start=1))\n ret += \"\\n\\nleaderboard! (Impostor Win Rate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} win\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_win_rate_when_impostor()[:show_num],\n start=1))\n ret += \"\\n\\nleaderboard! (Crewmate Win Rate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} win\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_win_rate_when_crewmate()[:show_num],\n start=1))\n ret += \"\\n\\nleaderboard! (Kill / Times Impostor) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2f} killed\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_kill()[:show_num], start=1))\n ret += \"\\n\\nleaderboard! (All Tasks Completed / Times Crewmate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} completed\".format(\n tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_alltask()[:show_num], start=1))\n ret += \"\\n\\nleaderboard! (Sabotages Fixed / Number of Games) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2f} fixed\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_sabotagefix()[:show_num], start=1))\n await ctx.send(ret)\n elif args[0] == \"--diff\":\n await ctx.send(\"I'll implement it one day!\")\n elif args[0] == \"--userlist\":\n await ctx.send(\", \".join([k for k in server.user_list.keys()]))\n else:\n await ctx.send(\"invalid command ><\")", "title": "" }, { "docid": "6c44c80cea96895f9c3990277d80b4dc", "score": "0.56856644", "text": "def rd1leaderboard(request):\n\n #Add views\n playing_players = Rd1SlotModel.objects.filter(player_name__isnull=False)\n\n endurance_leader = Rd1SlotModel.objects.aggregate(Max('endurance_score'))\n\n #Add context\n context = {\n 'playing_players': playing_players,\n 'endurance_leader': endurance_leader,\n }\n\n return render(request, 'rd1Leaderboard.html', context=context)", "title": "" }, { "docid": "7554035597c5a42f496da3707d110bbd", "score": "0.5664168", "text": "async def get_ranking(self, ctx):\n logger.debug(\"get_ranking: called by\".format(ctx.author))\n rankings = get_player_ranking() # returns dict with {'gamertag':'rank'}\n announcement = announce_rankings(rankings)\n await ctx.send(announcement[\"content\"], embed=announcement[\"embed\"])", "title": "" }, { "docid": "306b04bff59283d900b43f79246d656c", "score": "0.5656268", "text": "def show_scores(self) -> None:\n print(\"\\nScores\")\n for player in self.__players:\n print(f\"{player.name}: {player.score}\")\n print(f\"Ties: {self.tie_score}\")", "title": "" }, { "docid": "f494a072a544585cb57a2fab8d31f093", "score": "0.56248295", "text": "def make_leaderboard(self):\n leaderboard = [(player_id, rating.mu - 3*rating.sigma) for player_id, rating in self._ratings.items()]\n leaderboard = sorted(leaderboard, key=operator.itemgetter(1), reverse=True)\n return leaderboard", "title": "" }, { "docid": "ad4411c918d1b999731574c887d82bad", "score": "0.56239873", "text": "def reputation(answer):\n\n return answer[\"score\"] * 10 + answer[\"is_accepted\"] * 15", "title": "" }, { "docid": "2a75106633f67134bde9215efb83feff", "score": "0.559073", "text": "def display(self):\n\t\tprint(\"title = \" + self.title, \"URL = \" + self.url, \\\n\t\t\t\"Subreddit = \" + self.subreddit, \"Score = \" + str(self.score))", "title": "" }, { "docid": "4e9eacde1f1ec23048979852d73d99a7", "score": "0.55866706", "text": "async def leaderboard(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.invoke(self._server_leaderboard)", "title": "" }, { "docid": "798b9d5c1c3c546cf1dc37ee60c9219d", "score": "0.5573686", "text": "def getleaderboard(request):\n note_session_activity(request)\n\n if not using_unique_session(request.user):\n return HttpResponseForbidden()\n\n owner_filter = request.GET['owner_filter']\n body_pk = int(request.GET['legislative_body'])\n leg_body = LegislativeBody.objects.get(pk=body_pk)\n\n display = getleaderboarddisplay(leg_body, owner_filter)\n if display is None:\n return HttpResponse(\n _('No display configured'), content_type='text/plain')\n\n plans = getvalidplans(leg_body, request.user\n if owner_filter == 'mine' else None)\n\n try:\n html = display.render(plans, request)\n return HttpResponse(html, content_type='text/html; charset=utf-8')\n except Exception, ex:\n logger.warn('Leaderboard could not be fetched.')\n logger.debug('Reason: %s', ex)\n return HttpResponse(str(ex), content_type='text/plain')", "title": "" }, { "docid": "6954894d95f9e458b36e65fcbf8211df", "score": "0.5555083", "text": "def get_leaderboard(db):\r\n user_scores = []\r\n prediction_users_ref = db.collection(\"prediction_users\")\r\n user_docs = prediction_users_ref.stream()\r\n for user_doc in user_docs:\r\n user_info = user_doc.to_dict()\r\n if user_info[\"can_display\"]:\r\n email = user_doc.id\r\n username = email.split(\"@\")[0]\r\n user_scores.append((round(user_info[\"current_score\"], 2), username))\r\n user_scores.sort(key=lambda x: (-x[0], x[1]))\r\n return user_scores[:10]", "title": "" }, { "docid": "d9f7a217ceeb2c3068b96078deb0bb06", "score": "0.5549718", "text": "def show_top_five_on_leaderboard():\n all_scores = []\n with open(leaderboard_file, 'r') as file:\n for line in file:\n words = line.split('=')\n leaderboard_score = int(words[1])\n player = words[0]\n all_scores.append((leaderboard_score, player))\n print('Here are the current top 5 players in the leaderboard:')\n sorted_scores = sorted(all_scores, reverse=True)[0:5]\n for score, name in sorted_scores:\n print('{} - {}'.format(name, score))", "title": "" }, { "docid": "8d8f46e2559b39b119d7febd6d19afad", "score": "0.5538746", "text": "def view_scores(self):\n scores = { \n **self.__score['upper'],\n 'total upper': self.__score['total upper'],\n **self.__score['lower'],\n 'total lower': self.__score['total lower'],\n 'grand total': self.__score['grand total'] \n }\n for score in scores:\n s = scores[score]\n if s == None:\n s = 'blank'\n print(f'\\t{score.title()}: {s}')", "title": "" }, { "docid": "3b85cda051d1b14514e22f0914e73c40", "score": "0.5518601", "text": "def show_leaderboard_screen(self):\n self.leaderboard.retrieve()\n self.clear()\n self.add_button(\"Back\", self.show_main_screen)\n\n if self.leaderboard.entries is None:\n self.add_button(\"Error Loading Leaderboard\", None)\n else:\n for i in range(0, 4 if len(self.leaderboard.entries) > 6 else len(self.leaderboard.entries)):\n entry = self.leaderboard.entries[i]\n self.add_button(entry.name + \" Lvl=\" + entry.level + \" Scr=\" + str(entry.score) + \" Wve=\" + str(entry.wave), None)", "title": "" }, { "docid": "b74ed25514064c5e10d0a2346470cd3a", "score": "0.5509204", "text": "def displayLeaderboard():\n\n global leaderboard\n\n Y_OFFSET = 50\n TOP_OFFSET = 150\n\n leaderboard_title = largeFont.render(\"Leader Board\", True, BLACK)\n leaderboard_titleRect = leaderboard_title.get_rect()\n leaderboard_titleRect.center = ((WIN_WIDTH / 2), 50)\n screen.blit(leaderboard_title, leaderboard_titleRect)\n\n for idx, person in enumerate(leaderboard):\n person_entry_txt = person[0] + '.'\n person_entry_txt += (' ') + person[1]\n\n leaderboard_entry = mediumFont.render(person_entry_txt, True, BLACK)\n leaderboard_entryRect = leaderboard_entry.get_rect()\n leaderboard_entryRect.bottomleft = ((WIN_WIDTH / 4), TOP_OFFSET + Y_OFFSET * idx)\n screen.blit(leaderboard_entry, leaderboard_entryRect)\n\n score_entry_txt = person[2]\n rightAlign = mediumFont.size(person[2])[0]\n\n leaderboard_entryScore = mediumFont.render(score_entry_txt, True, BLACK)\n leaderboard_entryScoreRect = leaderboard_entryScore.get_rect()\n leaderboard_entryScoreRect.bottomleft = ((3 * WIN_WIDTH / 4) - rightAlign, TOP_OFFSET + Y_OFFSET * idx)\n screen.blit(leaderboard_entryScore, leaderboard_entryScoreRect)", "title": "" }, { "docid": "6aa0d4c89c99b0ac45a704db7faf0f1f", "score": "0.54985845", "text": "async def leaderboard(self, ctx: commands.Context, list_all_invites: bool = False):\n if not ctx.me.permissions_in(ctx.channel).administrator:\n return await self._send_embed(ctx, PERM_MSG)\n\n if not list_all_invites:\n pinned_invites = await self.config.guild(ctx.guild).pinned_invites()\n if not pinned_invites:\n return await self._send_embed(ctx, \"No invites are pinned, or there are no invites to display.\")\n else:\n pinned_invites = await ctx.guild.invites()\n invite_info = \"\"\n for i, invite_code_or_object in enumerate(pinned_invites):\n if not list_all_invites:\n inv_object = await self._get_invite_from_code(ctx, invite_code_or_object)\n else:\n inv_object = invite_code_or_object\n max_uses = await self.get_invite_max_uses(ctx, inv_object)\n inv_details = f\"{i+1}. {inv_object.url} [ {inv_object.uses} uses / {max_uses} max ]\\n\"\n invite_info += inv_details\n\n pagified_stings = [x for x in cf.pagify(invite_info, delims=[\"\\n\"], shorten_by=16)]\n pages = MenuLeaderboardPages(ctx, pagified_stings, show_all=list_all_invites)\n await self._menu(ctx, pages)", "title": "" }, { "docid": "dae9a8857eb42415296cdc731bc2346e", "score": "0.54210013", "text": "async def leaderboard(self, ctx, *args):\r\n if ctx.channel.id == BANK_CHANNEL or ctx.channel.id in GENERAL_CHANNELS:\r\n if len(args) == 0:\r\n name = None\r\n key = 'total'\r\n elif len(args) == 1:\r\n if 'key=' in args[0]:\r\n name = None\r\n for userkey in users.DEFAULT_ACCOUNT.keys():\r\n if args[0][4:] in userkey:\r\n key = userkey\r\n break\r\n else:\r\n if args[0][4:] == 'total':\r\n key = 'total'\r\n else:\r\n await ctx.send(f'Key {args[0]} not found.')\r\n return\r\n\r\n else:\r\n name = args[0]\r\n key = 'total'\r\n else:\r\n key = args[0]\r\n if key not in users.DEFAULT_ACCOUNT.keys():\r\n if key != 'total':\r\n await ctx.send(f'Key {key} not found.')\r\n return\r\n name = ' '.join(args[1:])\r\n\r\n key_name = {\r\n users.ITEMS_KEY: 'gold',\r\n users.SLAYER_XP_KEY: 'slayer',\r\n users.COMBAT_XP_KEY: 'combat',\r\n users.GATHER_XP_KEY: 'gather',\r\n users.ARTISAN_XP_KEY: 'artisan',\r\n users.COOK_XP_KEY: 'cooking',\r\n users.QUESTS_KEY: 'quest points',\r\n 'total': 'total level'\r\n }\r\n if key not in key_name.keys():\r\n await ctx.send(f\"Can't make leaderboard with key {key}.\")\r\n return\r\n\r\n leaderboard = users.get_values_by_account(key=key)\r\n\r\n out = f':hammer_pick: __**{key.upper()} LEADERBOARD**__ :crossed_swords:\\n'\r\n if name is None:\r\n try:\r\n for i in range(10):\r\n user_id, amount = leaderboard[i]\r\n amount_formatted = '{:,}'.format(amount)\r\n member = ctx.message.guild.get_member(user_id)\r\n if member is not None:\r\n name = get_display_name(member)\r\n else:\r\n name = f'User {user_id}'\r\n out += f'**({1 + i}) {name}**: '\r\n if key == users.ITEMS_KEY:\r\n out += f'{amount_formatted} coins\\n'\r\n elif key == users.QUESTS_KEY:\r\n out += f'{amount_formatted} quests\\n'\r\n elif key == 'total':\r\n out += f'{amount_formatted} levels\\n'\r\n else:\r\n out += f'{users.xp_to_level(amount)} *({amount_formatted}xp)*\\n'\r\n except IndexError:\r\n pass\r\n await ctx.send(out)\r\n else:\r\n if name == 'bottom':\r\n try:\r\n for i in range(len(leaderboard) - 10, len(leaderboard)):\r\n user_id, amount = leaderboard[i]\r\n amount_formatted = '{:,}'.format(amount)\r\n member = ctx.message.guild.get_member(user_id)\r\n if member is not None:\r\n name = get_display_name(member)\r\n else:\r\n name = f'User {user_id}'\r\n out += f'**({1 + i}) {name}**: '\r\n if key == users.ITEMS_KEY:\r\n out += f'{amount_formatted} coins\\n'\r\n elif key == users.QUESTS_KEY:\r\n out += f'{amount_formatted} quests\\n'\r\n elif key == 'total':\r\n out += f'{amount_formatted} levels\\n'\r\n else:\r\n out += f'{users.xp_to_level(amount)} *({amount_formatted}xp)*\\n'\r\n except IndexError:\r\n pass\r\n await ctx.send(out)\r\n else:\r\n try:\r\n name_list = [x[0] for x in leaderboard]\r\n name_member = parse_name(ctx.message.guild, name)\r\n name_index = name_list.index(name_member.id)\r\n if name_index < 5:\r\n lower = 0\r\n upper = 10\r\n else:\r\n lower = name_index - 5\r\n upper = name_index + 5\r\n if name_index + 5 > len(leaderboard):\r\n upper = len(leaderboard)\r\n lower = len(leaderboard) - 10\r\n for i in range(lower, upper):\r\n user_id, amount = leaderboard[i]\r\n amount_formatted = '{:,}'.format(amount)\r\n member = ctx.message.guild.get_member(user_id)\r\n if member is not None:\r\n name = get_display_name(member)\r\n else:\r\n name = f'User {user_id}'\r\n out += f'**({1 + i}) {name}**: '\r\n if key == users.ITEMS_KEY:\r\n out += f'{amount_formatted} coins\\n'\r\n elif key == users.QUESTS_KEY:\r\n out += f'{amount_formatted} quests\\n'\r\n elif key == 'total':\r\n out += f'{amount_formatted} levels\\n'\r\n else:\r\n out += f'{users.xp_to_level(amount)} *({amount_formatted}xp)*\\n'\r\n except IndexError:\r\n pass\r\n except ValueError:\r\n await ctx.send(f'Name {name} not found in leaderboard.')\r\n await ctx.send(out)", "title": "" }, { "docid": "7f5f15309702a6e9f6ea403b25794594", "score": "0.5393029", "text": "def user_reward_points(username):\n\n if not isinstance(username, str): # Input checking\n raise Exception\n\n try:\n cursor = db.cursor()\n\n point_sql = \"SELECT reward_profile.points \" \\\n \"FROM user INNER JOIN reward_profile \" \\\n \"ON user.reward_profile_id=reward_profile.reward_id WHERE user.username=(%s)\" \\\n\n cursor.execute(point_sql, (username,))\n\n points = cursor.fetchone()[0]\n\n return points\n except:\n print(\"Unable to get user reward points\")\n raise Exception\n finally:\n db.commit()", "title": "" }, { "docid": "532e19c789245a46c763c362ec922bb5", "score": "0.53860444", "text": "def view_guesses(self):\n for x in range(len(self.guessed_list)):\n print(self.guessed_list[x])\n print(\"\\n\")", "title": "" }, { "docid": "4db4c860a5c6f82d2848fbbca292dcb5", "score": "0.5382469", "text": "def get_user_listens_leaderboard(self) -> List[Dict]:\n return self._client._internal_call(\"GET\", \"/metrics/user-listens-leaderboard\", None, {})", "title": "" }, { "docid": "8cb9564384eadac06d26cfad7e5051df", "score": "0.53818065", "text": "def modify_reputation(self, added_points):\n self.points = F('points') + added_points\n self.save()", "title": "" }, { "docid": "c007b19661cb0c85518a95133821d2ac", "score": "0.5379222", "text": "def print_board(self):\n print(f\"Score: {self.score}\")\n print(self.tiles.copy().reshape(self.width, self.height))", "title": "" }, { "docid": "a8c2a38b031166b121f5556d2c3f8602", "score": "0.53656805", "text": "def get_leaderboards():\n top_scores = leaderboards.get_latest()\n logger.debug('Current leaderboards top score: %s', top_scores)\n return top_scores", "title": "" }, { "docid": "b3e3abac51a8aa505f3774bdd963b8cb", "score": "0.53413874", "text": "def test_get_reputation_score(self):\n dummy_reputation_score = 0\n reputation_score = self.githubScore.get_reputation_score()\n self.assertEqual(reputation_score, dummy_reputation_score)", "title": "" }, { "docid": "98ddad587c5f6433602480b68cc25a36", "score": "0.53279865", "text": "async def repcount(self, ctx, *, member: discord.Member = None):\n if member is None:\n member = ctx.author\n rep_count = await self.bot.database.get_reps(member)\n await ctx.send(f\"{member.name} has `{rep_count}` reputation points.\")", "title": "" }, { "docid": "74ac9d9e21fe3d8009430143163d4ee6", "score": "0.53124636", "text": "def load_leaderboard(self, name: str, show: bool = False) -> list:\n leaderboard = []\n members = self.__find_models(name, leaderboard_prefix)\n if len(members) > 0:\n for member in members:\n self.cursor.execute(\n f\"SELECT * FROM {self.schema}.{PREPROCESSORS} WHERE MODEL = '{member[0]}' \"\n f\"AND VERSION = {member[1]}\"\n )\n columns = self.cursor.fetchall()[\n 0\n ] # MODEL, VERSION, JSON, TRAIN_ACC, VALID_ACC, ALGORITHM, METRIC\n prep = self.__setup_preprocessor(columns[2])\n if \"Regressor\" in columns[5]:\n algo = self.reg_dict[columns[5]]\n if \"Classifier\" in columns[5]:\n algo = self.cls_dict[columns[5]]\n algo.model = super().load_model(member[0], member[1])\n algo.title = columns[5]\n model_board_member = ModelBoard(algo, 0, prep)\n model_board_member.valid_score = columns[4]\n model_board_member.train_score = columns[3]\n leaderboard.append(model_board_member)\n if show:\n print(\"\\033[33m{}\".format(f\"Loaded leaderboard '{name}':\\n\"))\n place = 1\n for member in leaderboard:\n print(\n \"\\033[33m {}\".format(\n str(place)\n + \". \"\n + str(member.algorithm.model)\n + f\"\\n Train {columns[6]} score: \"\n + str(member.train_score)\n + f\"\\n Holdout {columns[6]} score: \"\n + str(member.valid_score)\n )\n )\n print(\"\\033[0m {}\".format(\"\"))\n place += 1\n else:\n raise StorageError(\"Leaderboard not found!\")\n return leaderboard", "title": "" }, { "docid": "2fe3cc33225358d9525373963b157990", "score": "0.5295132", "text": "def rd2leaderboard(request):\n\n #Add views\n playing_players = Rd2SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd2Leaderboard.html', context=context)", "title": "" }, { "docid": "7dd6028e15ece7f8f95d3f3b2a0caa2c", "score": "0.52866375", "text": "async def _server_leaderboard(self, ctx, top : int=10):\n server = ctx.message.server\n if top < 1:\n top = 10\n bank_sorted = sorted(self.bank.get_server_accounts(server),\n key=lambda x: x.balance, reverse=True)\n if len(bank_sorted) < top:\n top = len(bank_sorted)\n topten = bank_sorted[:top]\n highscore = \"\"\n place = 1\n for acc in topten:\n highscore += str(place).ljust(len(str(top))+1)\n highscore += (acc.name+\" \").ljust(23-len(str(acc.balance)))\n highscore += str(acc.balance) + \"\\n\"\n place += 1\n if highscore:\n if len(highscore) < 1985:\n await self.bot.say(\"```py\\n\"+highscore+\"```\")\n else:\n await self.bot.say(\"```css\\nThe leaderboard is too big to be displayed. Try with a lower <top> parameter.\\n```\")\n else:\n await self.bot.say(\"```css\\nThere are no accounts in the bank.\\n```\")", "title": "" }, { "docid": "3dd267e52c9cc7ba29307e94d20b632c", "score": "0.5285733", "text": "async def update_leaderboard(self):\n while True:\n # check if should update\n if self.should_update_leaderboard:\n self.should_update_leaderboard = False\n # perform message edit\n leaderboard_data = '\\n'.join(\n self.expand_template(\n 'LEADERBOARD_DATA'\n rank=rank,\n name=self.get_user(int(user.id)),\n level=user.lvl,\n current_exp=user.exp,\n needed_exp=exp.exp_next_lvl(user.lvl),\n exp_bar=exp.exp_bar(user)\n )\n for rank, user in enumerate(self.leaderboard, 1)\n )\n msg = self.expand_template('LEADERBOARD', data=leaderboard_data)\n await self.leaderboard_message.edit(content=msg)\n await asyncio.sleep(self.settings.edit_delay)", "title": "" }, { "docid": "5e04c5112c4339c0075facf6aa06ceb6", "score": "0.527246", "text": "def scoreboard():\r\n scores = {}\r\n for user in models.User.query.all():\r\n if user.is_admin:\r\n continue\r\n user.solved_questions = list(filter(lambda x: not x.question.hide, user.solved_questions))\r\n scores[user.get_id()] = { 'username' : user.username, 'score': user.total_score, 'last_question_date': user.solved_questions[len(user.solved_questions)-1].date if len(user.solved_questions) > 0 else datetime.datetime.min }\r\n\r\n scores = helpers.sortScoreDict(scores)\r\n \"\"\"\r\n enumerate generates indices for the orderedDict\r\n \"\"\"\r\n return render_template(\"scoreboard.html\",scores=enumerate(scores.items()))", "title": "" }, { "docid": "4994f62f6090a34dc6564a29f51023cb", "score": "0.5257238", "text": "def display_score(self):\n\n self.penup()\n self.color(\"green\")\n self.hideturtle()\n\n self.setpos(-300, -250)\n self.setheading(0)\n self.pensize(3)\n\n self.pendown()\n self.forward(590)\n self.penup()\n\n self.setpos(220, -300)\n self.write(f\"Score:{self.user_score}\", False, \"center\", (\"Courier\", 20, \"normal\"))\n\n self.setpos(-230, -300)\n self.write(f\"Lives:{self.lives}\", False, \"center\", (\"Courier\", 20, \"normal\"))", "title": "" }, { "docid": "2d7f8536eba329b94c1681f4a76ddabc", "score": "0.52403444", "text": "def rankings():\n return render_template('rankings.html',\n main_ranking_table=Markup(generate_top_ranking_table()), hashtag_li=Markup(meta_hashtags()))", "title": "" }, { "docid": "19ea26362a6244a03c0d51172f38bb2d", "score": "0.52400327", "text": "def show_points(self):\n for point in self.points:\n print(point)", "title": "" }, { "docid": "7140299e6b0377cfaebc37b83cbfd52e", "score": "0.5220556", "text": "def supply(request, page_name):\n _ = page_name\n\n # Get the team members.\n team = request.user.get_profile().team\n current_round = challenge_mgr.get_round_name()\n if team and current_round:\n members_with_points = ScoreboardEntry.objects.filter(\n round_name=current_round).select_related(\n 'profile').filter(profile__team=team)\n zero_point_members = team.profile_set.exclude(\n id__in=members_with_points.values_list(\n 'profile__id', flat=True))\n\n # calculate and sort by rank\n members_with_points = sorted(list(members_with_points),\n key=lambda member: member.profile.overall_rank())\n\n zero_point_members = sorted(list(zero_point_members),\n key=lambda member: member.overall_rank())\n\n else:\n members_with_points = None\n zero_point_members = None\n\n return {\n \"team_members\": members_with_points,\n \"zero_members\": zero_point_members,\n }", "title": "" }, { "docid": "bbabea782c7d022c2385f13b6aa721c6", "score": "0.5212293", "text": "def test_get_leaderboard(self):\n pass", "title": "" }, { "docid": "6d25344c35e84a3b8b9d248159286e44", "score": "0.52118003", "text": "def leaderboard(self, id):\n from .game import GameLeaderboard\n return GameLeaderboard.leaderboard(self.short_name, id)", "title": "" }, { "docid": "a14adb9c3b3a55e876dda5471bda81c1", "score": "0.5209672", "text": "def show_privileges(self):\n\t\tprint(\"The above user: \")\n\t\twhile self.Privalages:\n\t\t\tcurrent_Privalages = self.Privalages.pop()\n\t\t\tprint(\"\\t \" + current_Privalages + \".\")", "title": "" }, { "docid": "c65811e0ee90fe44e7802c6af4baf24a", "score": "0.5179153", "text": "def test_scoreboard(self):\n run.add_to_scoreboard(\"testSuite\", 12)\n leaderboard = run.get_scoreboard()\n \n self.assertIn({\"username\":\"testSuite\", \"score\": 12}, leaderboard)", "title": "" }, { "docid": "9f69be86b85fc12a9966b5bf10829b5c", "score": "0.5166052", "text": "def test_user_reputation_score(self):\n dummy_user_reputation_score = 0.18\n total_reputation = self.user_details['reputation']\n type_of_user = 5\n user_reputation_score = self.stackOverflowScore._user_reputation_score(total_reputation, type_of_user)\n self.assertEqual(user_reputation_score, dummy_user_reputation_score)", "title": "" }, { "docid": "dda1b561034c1f3cee9e7e65a12b0ebe", "score": "0.516465", "text": "def buildLeaderboard(self):\n\t\toverall_standings = []\n\t\toverall_fpl_points = {}\n\t\toverall_league_points = {}\n\t\toverall_fines = {}\n\t\tgameweeks = {}\n\t\tfor i in self.league.league_gameweeks:\n\t\t\tgameweeks[i.fpl_event] = []\n\t\tfor i in self.members:\n\t\t\toverall_fpl_points[i] = 0\n\t\t\toverall_league_points[i] = 0\n\t\t\toverall_fines[i] = 0\n\t\t\trunning_league_points = 0\n\t\t\tfor j in i.league_gameweeks:\n\t\t\t\t\tif j.league_id == self.league.league_id:\n\t\t\t\t\t\toverall_fpl_points[i] += j.fpl_points\n\t\t\t\t\t\toverall_league_points[i] += j.championship_points\n\t\t\t\t\t\toverall_fines[i] += j.fines\n\t\t\t\t\t\trunning_league_points += j.championship_points\n\t\t\t\t\t\tgameweeks[j.fpl_event].append({\n\t\t\t\t\t\t\t'player': j.user,\n\t\t\t\t\t\t\t'fpl_points': j.fpl_points,\n\t\t\t\t\t\t\t'league_points': j.championship_points,\n\t\t\t\t\t\t\t'running_league_points': running_league_points,\n\t\t\t\t\t\t\t'fines': j.fines,\n\t\t\t\t\t\t\t'notes': j.notes\n\t\t\t\t\t\t\t})\n\t\tfor i in gameweeks:\n\t\t\tnewlist = sorted(gameweeks[i], key=lambda k: k['fpl_points'], reverse=True)\n\t\t\tgameweeks[i] = newlist\n\t\tfor i in overall_fpl_points:\n\t\t\toverall_standings.append({'player': i.username, 'fpl_points': overall_fpl_points[i], 'P & L': '+£69'})\n\t\tfor i in overall_league_points:\n\t\t\tfor j in overall_standings:\n\t\t\t\tif j['player'] == i.username:\n\t\t\t\t\tj['league_points'] = overall_league_points[i]\n\t\tfor i in overall_fines:\n\t\t\tfor j in overall_standings:\n\t\t\t\tif j['player'] == i.username:\n\t\t\t\t\tj['fines'] = overall_fines[i]\n\t\tsorted_overall = sorted(overall_standings, key=lambda k: k['league_points'], reverse=True)\n\t\tself.leaderboard = sorted_overall\n\t\tself.gameweeks = gameweeks", "title": "" }, { "docid": "d24b350d0febc4ead71b4d2fb9a097c1", "score": "0.51332504", "text": "def test_get_reputation_score(self):\n dummy_reputation_score = 0\n reputation_score = self.stackOverflowScore.get_reputation_score()\n self.assertEqual(reputation_score, dummy_reputation_score)", "title": "" }, { "docid": "a709bf7308a9b3ccd5521627ce7f6ae6", "score": "0.50993466", "text": "def printStats():\n n = top_node()\n print \"Winner: %i\" %(n.winner)\n starv = pd.SWIG_FloatArray_frompointer( n.starv )\n for c in xrange(n.nb):\n print \"starv %i: %f\" % (c, starv[c])\n \n print \"\"", "title": "" }, { "docid": "3c60285b741f953b2c1410c34b1391df", "score": "0.50816464", "text": "def show_winner(self):\n if self.player.get_player_points() > 21:\n print(self.player.get_name() + ' has lost. ' + str(self.player.get_player_points()) + ' > 21')\n elif self.dealer.get_player_points() > 21:\n print(self.dealer.get_name() + ' has lost. ' + str(self.dealer.get_player_points()) + ' > 21')\n else:\n winner = self.player if self.player.get_player_points() > self.dealer.get_player_points() else self.dealer\n print(winner.get_name() + ' has won with ' + str(winner.get_player_points()) + ' points!')", "title": "" }, { "docid": "00274fccda49c8c5d1b7a5079b048031", "score": "0.5072612", "text": "def display_scores(base_path: Path):\n path = scores_file_path(base_path)\n df = load_dataframe(path)\n print(df)", "title": "" }, { "docid": "cd933ea08c51ced3ac7d8292aeb7a311", "score": "0.50628436", "text": "def show_game_score_history():\n worksheet = SHEET.worksheet('high_scores')\n score_data = worksheet.get_all_values()\n last_player = score_data[-1]\n delay_print(\"Game stats\", 0)\n delay_print(\"___________________________ \\n\", 2)\n delay_print(f\"The last player was {last_player[0]}.\", 1)\n delay_print(f\"They played with {last_player[1]} cards and scored \\\n{last_player[2]} points\", 1)\n delay_print(\"\", 1)\n\n number_of_cards_played_list = []\n \n for data in score_data:\n number_of_cards_played_list.append(data[1])\n \n num_only = number_of_cards_played_list[1:]\n convert_to_num = [int(i) for i in num_only]\n average = sum(convert_to_num) / len(convert_to_num)\n\n mode_card = statistics.mode(convert_to_num)\n\n delay_print(f\"The average number of cards played with is {int(average)} \"\n \"\\n\", 2)\n delay_print(f\"The most common number of cards played with is {mode_card} \"\n \"\\n\", 2)\n delay_print(\"___________________________ \\n\", 2)", "title": "" }, { "docid": "3f24d948a6740e908f6ee89fc9c8dcf9", "score": "0.5051716", "text": "def view_allmembers(self):\n print(\"\\n\")\n print(\" {: ^7} {: ^20} {: ^10} {: ^10} {: ^15} {: ^30} {: ^10} {: ^10}\".format(\"Sr.No\", \"Name\", \"Age\", \"Gender\",\"Number\", \"Email\", \"BMI\", \"Duration\"))\n print((\"-\") * 118)\n sr = 1\n for item in self.member:\n print(\" {: ^7} {: ^20} {: ^10} {: ^10} {: ^15} {: ^30} {: ^10} {: ^10}\".format(sr, self.member[item][\"Name\"],\n self.member[item][\"Age\"],\n self.member[item][\"Gender\"],\n item,\n self.member[item][\"Email\"],\n self.member[item][\"BMI\"],\n self.member[item][\"Membership Duration\"]))\n sr +=1", "title": "" }, { "docid": "3c1aebd0cee66df4a706d20caaa59fac", "score": "0.50388896", "text": "def display_scores(self) -> str:\n scores = self.spreadsheet.get_scores()\n response = ''\n for country, score in zip(self.country_names, scores):\n response += '{0}: {1}\\n'.format(country, score)\n return response", "title": "" }, { "docid": "ed54f78d2da0391deb652f18fd68e233", "score": "0.5030802", "text": "def LoadReputationImpact():\r\n return ApiClient.Request('GET', '/account/loadreputationimpact')", "title": "" }, { "docid": "606c336487c2f74308bf4d93269cc008", "score": "0.5024674", "text": "def print_modif_classement(self, player):\n\n print(\"total de points : \" + str(player.get(\"ranking\")) + \" , tapez oui pour modifier -> \")\n reponse = CleanText.clean_input(input())\n return reponse", "title": "" }, { "docid": "bcfee476821ff8576887fa7b53d6d373", "score": "0.5003179", "text": "def print_guesses_list(self):\n\n print(\n f\"{f'There is 1 guess' if self._guess_index == 1 else f'There are {self._guess_index} guesses'} \"\n f\"(so far) in this game:\"\n )\n for guess in self:\n print(guess)\n print()", "title": "" }, { "docid": "4d2fddc5777056c87be92d70a855f29e", "score": "0.4997211", "text": "def display_stats(self):\n print '\\n=====\\nStats\\n====='\n print 'Chips: ' + str(self.player_chips)\n print 'Hands played: ' + str(self.total_hands)\n print 'Hands won: ' + str(self.won_hands)\n print 'Hands lost: ' + str(self.lost_hands)\n print '\\n'", "title": "" }, { "docid": "1bd72381a5a444ce60bac75225786ede", "score": "0.4995199", "text": "def display(self):\n\n for k, v in self._records.iteritems():\n print k, v, '\\n'\n print len(v['votes']), '\\n'", "title": "" }, { "docid": "9674bbc8fbb9aa414e0df80fccf220de", "score": "0.49903974", "text": "async def _global_leaderboard(self, top : int=10):\n if top < 1:\n top = 10\n bank_sorted = sorted(self.bank.get_all_accounts(),\n key=lambda x: x.balance, reverse=True)\n unique_accounts = []\n for acc in bank_sorted:\n if not self.already_in_list(unique_accounts, acc):\n unique_accounts.append(acc)\n if len(unique_accounts) < top:\n top = len(unique_accounts)\n topten = unique_accounts[:top]\n highscore = \"\"\n place = 1\n for acc in topten:\n highscore += str(place).ljust(len(str(top))+1)\n highscore += (\"{} |{}| \".format(acc.name, acc.server.name)).ljust(23-len(str(acc.balance)))\n highscore += str(acc.balance) + \"\\n\"\n place += 1\n if highscore:\n if len(highscore) < 1985:\n await self.bot.say(\"```py\\n\"+highscore+\"```\")\n else:\n await self.bot.say(\"```css\\nThe leaderboard is too big to be displayed. Try with a lower <top> parameter.\\n```\")\n else:\n await self.bot.say(\"```css\\nThere are no accounts in the bank.\\n```\")", "title": "" }, { "docid": "82295137e8c69672c468eeb11df2a14e", "score": "0.49883723", "text": "def _get_highscore(self):\n self._player_list = player_list = self._highscore.get_entries(game='basic')\n \n self.high_scores=tk.Toplevel(root)\n self.high_scores.title('High Score List')\n\n self.high_score_title = tk.Label(self.high_scores, text= 'Top 10 Players\\n')\n self.high_score_title.pack()\n\n for i, player_list in enumerate(player_list): # List top scoring players from 1 to 10 format\n self.high_score_list = tk.Label(self.high_scores, text = \"[{}] ,{}\\n\".format(i+1,player_list)).pack()", "title": "" }, { "docid": "bc92161549f659df63e1e4f6940f31ab", "score": "0.49777597", "text": "def show_score(matches, guesses, match_list, match_list_int):\n print ''\n print 'Here\\'s the score:'\n print 'Matches: %s' % matches\n print 'Guesses: %s' % guesses\n print 'Matches found so far: %s' % match_list\n print ''", "title": "" }, { "docid": "2125b575837ad2128fbaf20acd5d1a46", "score": "0.49623066", "text": "def rating_dashboard(request, challenge_id):\n challenge = get_object_or_404(models.Challenge, pk=challenge_id)\n\n if not user_may_rate(challenge, request.user):\n return HttpResponseForbidden()\n\n final_entries = models.Entry.objects.filter(\n challenge=challenge_id,\n has_final=True,\n ).annotate(\n author_count=md.Count('users', distinct=True),\n ratings_count=md.Count('rating'),\n ratings_nw=md.Count('rating', nonworking=True),\n ).select_related('user').prefetch_related(\n md.Prefetch(\n 'entryaward_set',\n queryset=request.user.entryaward_set.all()\n )\n )\n user_ratings = models.Rating.objects.filter(\n user__username__exact=request.user.username,\n entry__challenge=challenge_id,\n )\n ratings_by_entry = {r.entry.pk: r for r in user_ratings}\n\n your_entries = set(models.Entry.objects.filter(\n challenge=challenge_id,\n users__username__exact=request.user.username,\n ).values_list('pk', flat=True))\n\n rated = []\n not_rated = []\n not_working = []\n yours = []\n for entry in final_entries:\n r = ratings_by_entry.get(entry.pk)\n if r:\n fun = r.fun\n prod = r.production\n inno = r.innovation\n nw = r.nonworking\n dq = r.disqualify\n else:\n fun = prod = inno = None\n dq = nw = False\n is_team = entry.author_count > 1\n info = {\n 'name': entry.name,\n 'game': entry.game,\n 'title': entry.title,\n 'owner': entry.title if is_team else entry.user.username,\n 'is_team': is_team,\n 'sortname': hash((request.user.username, entry.pk)),\n 'fun': fun,\n 'prod': prod,\n 'inno': inno,\n 'awards': entry.entryaward_set.filter(creator=request.user),\n 'dq': dq,\n 'nw_pct': (\n (entry.ratings_nw * 100.0 / entry.ratings_count)\n if entry.ratings_count != 0 else 0\n ),\n }\n if entry.pk in your_entries:\n yours.append(info)\n elif nw:\n not_working.append(info)\n elif r:\n rated.append(info)\n else:\n not_rated.append(info)\n\n for es in (rated, not_rated, yours):\n es.sort(key=lambda e: hash((request.user.username, e['name'])))\n\n return render(request, 'challenge/rating-dash.html', {\n 'challenge': challenge,\n 'rated': rated,\n 'not_rated': not_rated,\n 'not_working': not_working,\n 'yours': yours,\n }\n )", "title": "" }, { "docid": "4d006dcb3dd17fe8fa6dc50a0a24cf16", "score": "0.4943942", "text": "def leaderboard(request):\n try:\n earned_awards = Award.objects.filter(user=request.user)\n except ObjectDoesNotExist:\n earned_awards = {}\n leaders_br = Profile.objects.filter(\n category=\"beginnerrunner\").order_by('-distance')\n leaders_r = Profile.objects.filter(category=\"runner\").order_by('-distance')\n leaders_b = Profile.objects.filter(category=\"biker\").order_by('-distance')\n leaders_d = Profile.objects.filter(\n category=\"duathloner\").order_by('-distance')\n leaders_f = Profile.objects.filter(\n category=\"freestyler\").order_by('-distance')\n\n total_workouts = Workout.objects.all()\n total_kms = 0\n for workout in total_workouts:\n total_kms += workout.distance\n table_leaders_br = ProfileTable(leaders_br, prefix=\"leaders-br-\")\n table_leaders_r = ProfileTable(leaders_r, prefix=\"leaders-r-\")\n table_leaders_b = ProfileTable(leaders_b, prefix=\"leaders-b-\")\n table_leaders_d = ProfileTable(leaders_d, prefix=\"leaders-d-\")\n table_leaders_f = ProfileTable(leaders_f, prefix=\"leaders-f-\")\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_br)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_r)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_b)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_d)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_f)\n\n return render(\n request, 'ic_marathon_app/leaderboard.html', {\n 'leaders_br': table_leaders_br,\n 'leaders_br_c': len(leaders_br),\n 'leaders_r': table_leaders_r,\n 'leaders_r_c': len(leaders_r),\n 'leaders_b': table_leaders_b,\n 'leaders_b_c': len(leaders_b),\n 'leaders_d': table_leaders_d,\n 'leaders_d_c': len(leaders_d),\n 'leaders_f': table_leaders_f,\n 'leaders_f_c': len(leaders_f),\n 'earned_awards': earned_awards,\n 'total_kms': total_kms\n })", "title": "" }, { "docid": "ef0a60fc1f86a4540850ce4b41b8505d", "score": "0.49308687", "text": "def test_get_reputation_score(self):\n dummy_reputation_score = 0.2\n reputation_score = self.blog_score.get_reputation_score()\n self.assertEqual(dummy_reputation_score, reputation_score)", "title": "" }, { "docid": "3c0349d42b675583e84584871c72c424", "score": "0.49217483", "text": "def view(username):\r\n user = Account.query.filter_by(username=username).first_or_404()\r\n topics = Topic.query.filter_by(\r\n account_id=user.id).order_by(Topic.id.desc()).limit(16)\r\n topics = fill_with_nodes(topics)\r\n return render_template('user/view.html', user=user, topics=topics)", "title": "" }, { "docid": "752d02403c47af838429a85f865b45d0", "score": "0.49164164", "text": "def leaderboards(self):\n from .game import GameLeaderboard\n return GameLeaderboard.leaderboards(self.short_name)", "title": "" }, { "docid": "36994e135f13cacde504dc1b39c2b8ee", "score": "0.49101168", "text": "def print_pawns(self):\n for player in Player.array:\n for pawn in player.pawn:\n pos_on_screen = player._get_pawn_on_screen(self, pawn)\n pawn_image = self.get_image(pawn.image, self.PAWN_SIZE, self.PAWN_SIZE)\n self.WINDOW.blit(pawn_image, pos_on_screen)", "title": "" }, { "docid": "f119a8d1da96eafb5ebb3aff6fa8cc40", "score": "0.49066737", "text": "async def bet_board(self, ctx: commands.Context, top: int = 10):\n reverse = True\n if top == 0:\n top = 10\n elif top < 0:\n reverse = False\n top = -top\n members_sorted = sorted(\n await self._get_all_members(ctx.bot), key=lambda x: x.karma, reverse=reverse\n )\n if len(members_sorted) < top:\n top = len(members_sorted)\n topten = members_sorted[:top]\n highscore = \"\"\n place = 1\n for member in topten:\n highscore += str(place).ljust(len(str(top)) + 1)\n highscore += \"{0} | \".format(member.name).ljust(18 - len(str(member.karma)))\n highscore += str(member.karma) + \"\\n\"\n place += 1\n if highscore != \"\":\n embed = discord.Embed(color=0xf3f1f6)\n embed.title = \"Karma Returns\"\n embed.description = \"\"\"```xl\n{0}```\"\"\".format(highscore)\n await ctx.send(embed=embed)\n else:\n await ctx.send(\"No one has gained or lost any karma\")", "title": "" }, { "docid": "beb981b58ae7a0cf2ba16781b2f09256", "score": "0.49053532", "text": "def playerdetail (request,name):\n\n #Basic player details\n player_image = PlayerModel.objects.get(name=name).image\n player_HC = PlayerModel.objects.get(name=name).HC\n player_highfinish = PlayerModel.objects.get(name=name).highfinish\n player_tournum = PlayerModel.objects.get(name=name).tournum\n player_totalpoints = LeaderBoardModel.objects.get(player=name).overall_total\n player_totalrank = LeaderBoardModel.objects.filter(overall_total__gt=player_totalpoints).count() + 1\n\n\n target_holes = 10 #Change to 10 in production\n\n ##START ROUND 1 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd1holes_played = Rd1SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd1holes_played is None:\n rd1holes_played = 0\n else:\n rd1holes_played = Rd1SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd1holes_played = 0\n\n\n\n #Rd1 Player golf score & rank\n if rd1holes_played >= target_holes:\n rd1golf_score = Rd1SlotModel.objects.get(player_name__name=name).player_score\n rd1golf_scoreRank = Rd1SlotModel.objects.filter(player_score__lt=rd1golf_score).count() + 1\n rd1golf_stbl = Rd1SlotModel.objects.get(player_name__name=name).player_stbl\n rd1golf_stblRank = Rd1SlotModel.objects.filter(player_stbl__gt=rd1golf_stbl).count() + 1\n else:\n rd1golf_score = \"-\"\n rd1golf_scoreRank= \"n/a\"\n rd1golf_stbl = \"-\"\n rd1golf_stblRank= \"n/a\"\n\n #Rd1PlayerPoints\n try:\n rd1golf_points = LeaderBoardModel.objects.get(player=name).rd1_golf\n except:\n rd1golf_points = \"-\"\n try:\n rd1golf_rank = LeaderBoardModel.objects.filter(rd1_golf__gt=rd1golf_points).count() + 1\n except:\n rd1golf_rank = \"-\"\n try:\n rd1ctpld_points = LeaderBoardModel.objects.get(player=name).rd1_ctpld\n except:\n rd1ctpld_points = \"-\"\n try:\n rd1ctpld_rank = LeaderBoardModel.objects.filter(rd1_ctpld__gt=rd1ctpld_points).count() + 1\n except:\n rd1ctpld_rank = \"-\"\n try:\n rd1bonus_points = LeaderBoardModel.objects.get(player=name).rd1_bonus\n except:\n rd1bonus_points = \"-\"\n try:\n rd1bonus_rank = LeaderBoardModel.objects.filter(rd1_bonus__gt=rd1bonus_points).count() + 1\n except:\n rd1bonus_rank = \"-\"\n try:\n rd1total_points = rd1golf_points + rd1ctpld_points + rd1bonus_points\n except:\n rd1total_points = \"-\"\n try:\n rd1total_rank = LeaderBoardModel.objects.filter(rd1_total__gt=rd1total_points).count() + 1\n except:\n rd1total_rank = \"-\"\n\n try:\n round1overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd1_total')).values())[0]\n except:\n round1overall_points = 0\n\n\n ##START ROUND 2 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd2holes_played = Rd2SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd2holes_played is None:\n rd2holes_played = 0\n else:\n rd2holes_played = Rd2SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd2holes_played = 0\n\n #Rd2 Player golf score & rank\n if rd2holes_played >= target_holes:\n rd2golf_score = Rd2SlotModel.objects.get(player_name__name=name).player_score\n rd2golf_scoreRank = Rd2SlotModel.objects.filter(player_score__lt=rd2golf_score).count() + 1\n rd2golf_stbl = Rd2SlotModel.objects.get(player_name__name=name).player_stbl\n rd2golf_stblRank = Rd2SlotModel.objects.filter(player_stbl__gt=rd2golf_stbl).count() + 1\n else:\n rd2golf_score = \"-\"\n rd2golf_scoreRank= \"n/a\"\n rd2golf_stbl = \"-\"\n rd2golf_stblRank= \"n/a\"\n\n #Rd2PlayerPoints\n try:\n rd2golf_points = LeaderBoardModel.objects.get(player=name).rd2_golf\n except:\n rd2golf_points = \"-\"\n try:\n rd2golf_rank = LeaderBoardModel.objects.filter(rd2_golf__gt=rd2golf_points).count() + 1\n except:\n rd2golf_rank = \"-\"\n try:\n rd2ctpld_points = LeaderBoardModel.objects.get(player=name).rd2_ctpld\n except:\n rd2ctpld_points = \"-\"\n try:\n rd2ctpld_rank = LeaderBoardModel.objects.filter(rd2_ctpld__gt=rd2ctpld_points).count() + 1\n except:\n rd2ctpld_rank = \"-\"\n try:\n rd2bonus_points = LeaderBoardModel.objects.get(player=name).rd2_bonus\n except:\n rd2bonus_points = \"-\"\n try:\n rd2bonus_rank = LeaderBoardModel.objects.filter(rd2_bonus__gt=rd2bonus_points).count() + 1\n except:\n rd2bonus_rank = \"-\"\n try:\n rd2total_points = rd2golf_points + rd2ctpld_points + rd2bonus_points\n except:\n rd2total_points = \"-\"\n try:\n rd2total_rank = LeaderBoardModel.objects.filter(rd2_total__gt=rd2total_points).count() + 1\n except:\n rd2total_rank = \"-\"\n\n try:\n round2overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd2_total')).values())[0]\n except:\n round2overall_points = 0\n\n ##START ROUND 3 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd3holes_played = Rd3SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd3holes_played is None:\n rd3holes_played = 0\n else:\n rd3holes_played = Rd3SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd3holes_played = 0\n\n #Rd3 Player golf score & rank\n if rd3holes_played >= target_holes:\n rd3golf_score = Rd3SlotModel.objects.get(player_name__name=name).player_score\n rd3golf_scoreRank = Rd3SlotModel.objects.filter(player_score__lt=rd2golf_score).count() + 1\n rd3golf_stbl = Rd3SlotModel.objects.get(player_name__name=name).player_stbl\n rd3golf_stblRank = Rd3SlotModel.objects.filter(player_stbl__gt=rd2golf_stbl).count() + 1\n else:\n rd3golf_score = \"-\"\n rd3golf_scoreRank= \"n/a\"\n rd3golf_stbl = \"-\"\n rd3golf_stblRank= \"n/a\"\n\n #Rd2PlayerPoints\n try:\n rd3golf_points = LeaderBoardModel.objects.get(player=name).rd3_golf\n except:\n rd3golf_points = \"-\"\n try:\n rd3golf_rank = LeaderBoardModel.objects.filter(rd3_golf__gt=rd3golf_points).count() + 1\n except:\n rd3golf_rank = \"-\"\n try:\n rd3ctpld_points = LeaderBoardModel.objects.get(player=name).rd3_ctpld\n except:\n rd3ctpld_points = \"-\"\n try:\n rd3ctpld_rank = LeaderBoardModel.objects.filter(rd3_ctpld__gt=rd3ctpld_points).count() + 1\n except:\n rd3ctpld_rank = \"-\"\n try:\n rd3bonus_points = LeaderBoardModel.objects.get(player=name).rd3_bonus\n except:\n rd3bonus_points = \"-\"\n try:\n rd3bonus_rank = LeaderBoardModel.objects.filter(rd3_bonus__gt=rd3bonus_points).count() + 1\n except:\n rd3bonus_rank = \"-\"\n try:\n rd3total_points = rd3golf_points + rd3ctpld_points + rd3bonus_points\n except:\n rd3total_points = \"-\"\n try:\n rd3total_rank = LeaderBoardModel.objects.filter(rd3_total__gt=rd3total_points).count() + 1\n except:\n rd3total_rank = \"-\"\n\n try:\n round3overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd3_total')).values())[0]\n except:\n round3overall_points = 0\n\n ##START OTHER_SCORES CALCULATIONS -->\n\n #Other Player Points\n try:\n social_points = LeaderBoardModel.objects.get(player=name).social\n except:\n social_points = \"-\"\n try:\n social_rank = LeaderBoardModel.objects.filter(social__gt=social_points).count() + 1\n except:\n social_rank = \"-\"\n try:\n bestdressed_points = LeaderBoardModel.objects.get(player=name).best_dressed\n except:\n bestdressed_points = \"-\"\n try:\n bestdressed_rank = LeaderBoardModel.objects.filter(best_dressed__gt=bestdressed_points).count() + 1\n except:\n bestdressed_rank = \"-\"\n try:\n tipping_points = LeaderBoardModel.objects.get(player=name).tipping\n except:\n tipping_points = \"-\"\n try:\n tipping_rank = LeaderBoardModel.objects.filter(tipping__gt=tipping_points).count() + 1\n except:\n tipping_rank = \"-\"\n try:\n othertotal_points = social_points + bestdressed_points + tipping_points\n except:\n othertotal_points = \"-\"\n try:\n othertotal_rank = LeaderBoardModel.objects.filter(other_total__gt=othertotal_points).count() + 1\n except:\n othertotal_rank = \"-\"\n\n try:\n otheroverall_points = list(LeaderBoardModel.objects.aggregate(Sum('other_total')).values())[0]\n except:\n otheroverall_points = 0\n\n## == END SCORING CALCS ==\n\n context ={\n 'name': name,\n 'player_image': player_image,\n 'player_HC': player_HC,\n 'player_highfinish': player_highfinish,\n 'player_tournum': player_tournum,\n 'player_totalpoints': player_totalpoints,\n 'player_totalrank': player_totalrank,\n 'rd1golf_score': rd1golf_score,\n 'rd1golf_stbl': rd1golf_stbl,\n 'rd1golf_scoreRank': rd1golf_scoreRank,\n 'rd1golf_stblRank': rd1golf_stblRank,\n 'rd1golf_points': rd1golf_points,\n 'rd1golf_rank': rd1golf_rank,\n 'rd1ctpld_points': rd1ctpld_points,\n 'rd1ctpld_rank': rd1ctpld_rank,\n 'rd1bonus_points': rd1bonus_points,\n 'rd1bonus_rank': rd1bonus_rank,\n 'rd1total_points': rd1total_points,\n 'rd1total_rank': rd1total_rank,\n 'round1overall_points': round1overall_points,\n 'rd2golf_score': rd2golf_score,\n 'rd2golf_stbl': rd2golf_stbl,\n 'rd2golf_scoreRank': rd2golf_scoreRank,\n 'rd2golf_stblRank': rd2golf_stblRank,\n 'rd2golf_points': rd2golf_points,\n 'rd2golf_rank': rd2golf_rank,\n 'rd2ctpld_points': rd2ctpld_points,\n 'rd2ctpld_rank': rd2ctpld_rank,\n 'rd2bonus_points': rd2bonus_points,\n 'rd2bonus_rank': rd2bonus_rank,\n 'rd2total_points': rd2total_points,\n 'rd2total_rank': rd2total_rank,\n 'round2overall_points': round2overall_points,\n 'rd3golf_score': rd3golf_score,\n 'rd3golf_stbl': rd3golf_stbl,\n 'rd3golf_scoreRank': rd3golf_scoreRank,\n 'rd3golf_stblRank': rd3golf_stblRank,\n 'rd3golf_points': rd3golf_points,\n 'rd3golf_rank': rd3golf_rank,\n 'rd3ctpld_points': rd3ctpld_points,\n 'rd3ctpld_rank': rd3ctpld_rank,\n 'rd3bonus_points': rd3bonus_points,\n 'rd3bonus_rank': rd3bonus_rank,\n 'rd3total_points': rd3total_points,\n 'rd3total_rank': rd3total_rank,\n 'round3overall_points': round3overall_points,\n 'social_points': social_points,\n 'social_rank': social_rank,\n 'bestdressed_points': bestdressed_points,\n 'bestdressed_rank': bestdressed_rank,\n 'tipping_points': tipping_points,\n 'tipping_rank': tipping_rank,\n 'othertotal_points': othertotal_points,\n 'othertotal_rank': othertotal_rank,\n 'otheroverall_points': otheroverall_points,\n\n }\n\n return render(request, 'playerDetail.html', context=context)", "title": "" }, { "docid": "bd4209fed8d5cad44b982dbbec08dbdb", "score": "0.49044922", "text": "def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n # draw rockets\n self.rockets.draw(self.screen)", "title": "" }, { "docid": "70cdd567b347e55aa13859d1023ca6a0", "score": "0.49012008", "text": "def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.lives_image, self.lives_rect)", "title": "" }, { "docid": "35d1e816f3bb215a64141a82f151423b", "score": "0.48983058", "text": "def rd3leaderboard(request):\n\n #Add views\n playing_players = Rd3SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd3Leaderboard.html', context=context)", "title": "" }, { "docid": "8446419a117c070dc000a2f408c7aa23", "score": "0.48977476", "text": "async def show_points(self, ctx: Context, *, user: discord.User = None) -> None:\n user = user or ctx.author\n guild_id = ctx.guild.id if ctx.guild else None\n\n embed = discord.Embed(colour=discord.Colour.blue(), timestamp=datetime.utcnow())\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n\n # remove point first for the special case of user == author\n author_points = await self.change_points(ctx.author.id, guild_id, -1)\n\n if user == ctx.author:\n points = author_points\n else:\n points = await self.get_points(user.id, guild_id)\n\n if points is None:\n embed.description = f\"{user.mention} hasn't received any points yet.\"\n else:\n role = self.get_role_for(points)\n if role:\n role_text = f\" and is part of the role **{role.name}**\"\n else:\n role_text = \"\"\n\n embed.description = f\"{user.mention} currently has **{points}** point(s) {role_text}\"\n\n embed.set_footer(\n text=f\"You paid a point to see the points of {user.name}. You now have {author_points} point(s).\")\n\n await self.bot.send_embed(ctx, embed)", "title": "" }, { "docid": "b5b1dbbd204cc51c22d76c6f1d451348", "score": "0.4896858", "text": "def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n self.ships.draw(self.screen)", "title": "" }, { "docid": "aaaee8b570be170da59b89d0d8a22987", "score": "0.48963338", "text": "def get_player_interactive(self, allowDrafted=False):\n # Prompt the user to choose who has been drafted and on which team\n firstNameInit = raw_input('First Name Initial?\\t')\n lastNameInit = raw_input('Last Name Initial? \\t')\n\n print '\\n'\n\n matches = []\n for player in sorted(self.predictionData, key=lambda x: x['RNK']):\n isMatch = all([\n player['FIRST'].lower().startswith(firstNameInit.lower()),\n player['LAST'].lower().startswith(lastNameInit.lower()),\n ])\n\n if isMatch and (allowDrafted or player['F TEAM'] == 'FA'):\n matches.append(player)\n print '# {RNK:>4}: {FIRST:} {LAST:}, {TEAM:} {POS:}'.format(**player)\n\n validRanks = [x['RNK'] for x in matches]\n players = None\n while True:\n r = int(raw_input('\\nwhich rank is correct? (0 for none of the above)\\t'))\n if r == 0:\n break\n elif r in validRanks:\n players = [x for x in matches if x['RNK'] == r]\n break\n else:\n print \"\\tThis is not a valid rank!\"\n print \"\\tPlease choose from {}\".format(validRanks)\n\n return players", "title": "" }, { "docid": "51d9ed13de54eb5de30e0141b6a9a79e", "score": "0.48881084", "text": "def getPlayerRankings(self, request):\n players = Player.query().fetch()\n items = [p._copyPlayerToRankForm for p in players]\n sorted_items = sorted(\n items, key=lambda prf: prf.percentage, reverse=True)\n return PlayersRankForm(items=sorted_items)", "title": "" }, { "docid": "a6a8630d16f9aa4d34683745152a4c59", "score": "0.48763537", "text": "def new_pagerank_contrib(self):\n return self.start_page.rank * self.probability", "title": "" }, { "docid": "c5e939f8942488c06b435e1cc7eafc4f", "score": "0.4874214", "text": "async def preview_vote(self, ctx):\n vote = self.vote_manager.get_configuring_vote(ctx.author.id)\n msg = await ctx.send(embed=create_embed(vote))\n await add_reactions(vote, msg)", "title": "" }, { "docid": "85921cbbac5374fed20ad0677aba7700", "score": "0.4866367", "text": "def showTeam(self):\n for n in range(len(self.team)):\n self.team[n].showStats()", "title": "" }, { "docid": "c8bba816fa6880ae02d4f65c3eb004fa", "score": "0.4866328", "text": "async def rep(self, ctx, *, member: discord.Member):\n if member.bot:\n raise Exception(\"You can't rep a bot!\")\n elif member == ctx.author:\n raise Exception(\"You can't rep yourself!\")\n\n reps = await self.bot.database.add_rep(member)\n await ctx.send(f\"✅ **{member.mention}** now has `{reps}` reputation points!\")\n await self.log_rep(ctx.message, member)", "title": "" }, { "docid": "ae5f76d118fc8c5509db50549ec1fe17", "score": "0.48603877", "text": "def display(self):\n self.game_board.print_grid()", "title": "" }, { "docid": "f47f49bd84d8bdb6ae23a3a3df69e2e7", "score": "0.48548338", "text": "def display_risk_rank(self):\n\n self.cls.driver.implicitly_wait(10)\n self.cls.driver.get(self.cls.live_server_url + '/dashboard/')\n WebDriverWait(\n self.cls.driver, 20).until(\n lambda driver: driver.find_element_by_tag_name('body'))\n self.assertEqual(\n self.cls.driver.title,\n 'SPRA | {}\\'s profile'.format(self.cls.user_info.get('user_name')))\n risk = self.cls.driver.find_element_by_id('portfolio_risk')\n self.assertEqual(risk.text, 'N/A')\n rank = self.cls.driver.find_element_by_id('risk_rank')\n self.assertEqual(rank.text, 'N/A')", "title": "" }, { "docid": "d196f67ffbd74386e379b65d3ec335f8", "score": "0.48496094", "text": "def refresh_score(self):\n self._score_label = tk.Label(self._master, text=\"Score: {} - {}\".format(self._model.get_score()[0],self._model.get_score()[1]),font='Arial 15 bold')\n self._score_label.grid(row=3, column =0)", "title": "" }, { "docid": "5ed08c3ec25f5a83280167ac0d7baed2", "score": "0.48445347", "text": "def print_results(self):\n print(\"The estimation of probability winning by switching doors is: \", str(self.num_of_wins / float(self.N) * 100), \"%\")", "title": "" }, { "docid": "23e8a5a1d50872cc0c2387dbee4e7eba", "score": "0.4844284", "text": "def launchWebTopScore(self):\n\t\twebbrowser.open_new(\"http://127.0.0.1:5000/score\")", "title": "" }, { "docid": "65d438f1f3a2a1645ba3b79008fa4563", "score": "0.4843089", "text": "def get_game_statistics(self):\n options = [\"Show correct/incorrect answers by category\",\n \"Show correct/incorrect answers by difficulty\",\n f\"Show top {self.N_TOP_USERS} users\"]\n choice = self.ui.get_user_choice(options)\n if choice == options[0]:\n result = self.db.get_results_by('category')\n #result = result.sort_values('Category')\n elif choice == options[1]:\n result = self.db.get_results_by('difficulty')\n # the order is lexicographic but we want it ordered by difficulty.\n result['Difficulty'] = result['Difficulty'].map(lambda x: Difficulties[x])\n result = result.sort_values('Difficulty')\n elif choice == options[2]:\n result = self.db.get_results_by('user', limit=self.N_TOP_USERS, order_by='Correct', ascending=False)\n if len(result) > 0:\n self.ui.show_data(result, bar=choice != options[2])\n else: # no data\n self.ui.alert(\"There is currently no data to show.\")", "title": "" }, { "docid": "b3e9316c2143115762ac09a9dc204206", "score": "0.48420492", "text": "def print_for_challenge(score_list):\n print(', '.join(f'{player.name}:{player.score}' for player in score_list))", "title": "" }, { "docid": "dd1a1c467ae2e6fd16048d0d1e20b402", "score": "0.4841507", "text": "def test_get_reputation_score(self):\n dummy_reputation_score = 0.44000000000000006\n reputation_score = self.android_score.get_reputation_score()\n self.assertEqual(dummy_reputation_score, reputation_score)", "title": "" }, { "docid": "db2518cdb05e581be55bf9ff45638c03", "score": "0.48345786", "text": "def get_user_rankings(self, request):\n users = User.query().fetch()\n max_streaks = [Score.query(user.key == Score.user).\n order(-Score.streak).get() for user in users]\n return Rankings(items=[max_streak.ranking() for max_streak in max_streaks])", "title": "" }, { "docid": "5e6ad305ca98839162157590102b3f6f", "score": "0.48264495", "text": "def test_view_returns_expected_multiple_user_position(self):\n auth_token = 'Token {}'.format(self.second_user3.auth_token.key)\n received_data = self.client.get('/challenges/selfLeaderboardPosition', HTTP_AUTHORIZATION=auth_token).data\n\n self.assertEqual(received_data['position'], 2)\n self.assertEqual(received_data['leaderboard_count'], 5)", "title": "" }, { "docid": "dd808073990afeb033df476dcfe62583", "score": "0.4820132", "text": "def player_standings():\n refresh_views()\n query = \"\"\"\n SELECT players.id, players.name, v_player_wins.wins, v_matches.matches\n FROM players\n LEFT JOIN v_player_wins ON players.id = v_player_wins.player\n LEFT JOIN v_matches ON players.id = v_matches.player\n GROUP BY players.id, players.name, v_player_wins.wins, v_matches.matches\n ORDER BY v_player_wins.wins DESC;\n\t\"\"\"\n db, cursor = connect()\n cursor.execute(query)\n results = cursor.fetchall()\n db.close()\n return results", "title": "" }, { "docid": "1f3f27e1b992dc7773105f42e0cf6840", "score": "0.48155794", "text": "def show_score(self):\r\n self.screen.blit(self.score_image, self.score_rect)\r\n self.screen.blit(self.high_score_image, self.high_score_rect)\r\n self.screen.blit(self.level_image, self.level_rect)\r\n self.ships.draw(self.screen)", "title": "" }, { "docid": "e38b2d47f50c412076a2a673c7206a1b", "score": "0.4813069", "text": "def test_get_reputation_score(self):\n dummy_reputation_score = 0.42500000000000004\n reputation_score = self.itunes_score.get_reputation_score()\n self.assertEqual(dummy_reputation_score, reputation_score)", "title": "" }, { "docid": "f027010b11e07158fd0314b05c3a34ec", "score": "0.48122138", "text": "def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)", "title": "" }, { "docid": "5047272e37aa42d3d29f17539bc0c3a3", "score": "0.48120806", "text": "def show_privileges(self):\r\n for privilege in self.privileges:\r\n print(\"- \" + privilege)", "title": "" }, { "docid": "5047272e37aa42d3d29f17539bc0c3a3", "score": "0.48120806", "text": "def show_privileges(self):\r\n for privilege in self.privileges:\r\n print(\"- \" + privilege)", "title": "" } ]
0f56ae144be87b4aecaf10b5786b8608
Transform ref_point using same transforms as those applied to data.
[ { "docid": "9289eeb4c4a6ca5e01b8cee83ee0a20b", "score": "0.74598885", "text": "def _transform_ref_point(\n self, ref_point: Dict[str, float], padding_obs_data: ObservationData\n ) -> Dict[str, float]:\n metric_names = list(self._metric_names or [])\n objective_metric_names = list(self._objective_metric_names or [])\n num_metrics = len(metric_names)\n # Create synthetic ObservationData representing the reference point.\n # Pad with non-objective outcomes from existing data.\n # Should always have existing data with BO.\n padding_obs_data\n padded_ref_dict: Dict[str, float] = dict(\n zip(padding_obs_data.metric_names, padding_obs_data.means)\n )\n padded_ref_dict.update(ref_point)\n ref_obs_data = [\n ObservationData(\n metric_names=list(padded_ref_dict.keys()),\n means=np.array(list(padded_ref_dict.values())),\n covariance=np.zeros((num_metrics, num_metrics)),\n )\n ]\n ref_obs_feats = []\n\n # Apply initialized transforms to reference point.\n for t in self.transforms.values():\n ref_obs_data = t.transform_observation_data(ref_obs_data, ref_obs_feats)\n transformed_ref_obsd = ref_obs_data.pop()\n transformed_ref_dict = dict(\n zip(transformed_ref_obsd.metric_names, transformed_ref_obsd.means)\n )\n transformed_ref_point = {\n objective_metric_name: transformed_ref_dict[objective_metric_name]\n for objective_metric_name in objective_metric_names\n }\n return transformed_ref_point", "title": "" } ]
[ { "docid": "aafeb46b7f285a3e23a27911fa1f244f", "score": "0.7291867", "text": "def TransformPoint(self):\r\n pass", "title": "" }, { "docid": "80496a8057adef353ffb0a6fb1d02cc9", "score": "0.7044938", "text": "def TransformPoints(self):\r\n pass", "title": "" }, { "docid": "e8c9399a9aa21ac7a01f92cbbaf6c6cf", "score": "0.69368607", "text": "def transform(self,point):\n return self.fit_transform(np.append(self.data,point),return_diff=True)[-1]", "title": "" }, { "docid": "b407b2244ee96c5842ac2b95c4840474", "score": "0.6594534", "text": "def _apply_transformation(point, transform):\n point.append(1) # Add 4th coordinate for homogeneous coordinates\n\n quat = [transform.rotation.x,\n transform.rotation.y,\n transform.rotation.z,\n transform.rotation.w]\n\n matrix = quaternion_matrix(quat)\n\n point_rotated = np.dot(matrix, point)\n point_translated = [point_rotated[0] + transform.translation.x,\n point_rotated[1] + transform.translation.y,\n point_rotated[2] + transform.translation.z]\n\n return point_translated", "title": "" }, { "docid": "e8f7f7c7742df75921d2766e685421ca", "score": "0.6526554", "text": "def transform(self, X):\n ...", "title": "" }, { "docid": "e8f7f7c7742df75921d2766e685421ca", "score": "0.6526554", "text": "def transform(self, X):\n ...", "title": "" }, { "docid": "e8f7f7c7742df75921d2766e685421ca", "score": "0.6526554", "text": "def transform(self, X):\n ...", "title": "" }, { "docid": "e8f7f7c7742df75921d2766e685421ca", "score": "0.6526554", "text": "def transform(self, X):\n ...", "title": "" }, { "docid": "5670a872a9641cb1bdcd57163d5ebc38", "score": "0.6509996", "text": "def TransformPoint(self, *args):\n return _gdal.Transformer_TransformPoint(self, *args)", "title": "" }, { "docid": "07264cb8ba6ddb3254c3ff1cf8d17627", "score": "0.6497457", "text": "def forward_transform(self, x):", "title": "" }, { "docid": "b2592468e4c53f21395c7e78e62aa251", "score": "0.649283", "text": "def apply_ants_transform_to_point(transform, point):\n return transform.apply_transform_to_point(point)", "title": "" }, { "docid": "456c66251f5fedbb37e721b67bfcf32d", "score": "0.6421542", "text": "def global_to_local(point: carla.Location, reference: Union[carla.Transform, carla.Location, carla.Rotation]):\n if isinstance(reference, carla.Transform):\n reference.transform(point)\n elif isinstance(reference, carla.Location):\n carla.Transform(reference, carla.Rotation()).transform(point)\n elif isinstance(reference, carla.Rotation):\n carla.Transform(carla.Location(), reference).transform(point)\n else:\n raise ValueError('Argument \"reference\" is none of carla.Transform or carla.Location or carla.Rotation!')", "title": "" }, { "docid": "ad4f0d0f91c4a3849fe679480200c095", "score": "0.6287689", "text": "def _transform_data(\n self,\n obs_feats: List[ObservationFeatures],\n obs_data: List[ObservationData],\n search_space: SearchSpace,\n transforms: Optional[List[Type[Transform]]],\n transform_configs: Optional[Dict[str, TConfig]],\n ) -> Tuple[List[ObservationFeatures], List[ObservationData], SearchSpace]:\n # Run superclass version to fit transforms to observations\n obs_feats, obs_data, search_space = super()._transform_data(\n obs_feats=obs_feats,\n obs_data=obs_data,\n search_space=search_space,\n transforms=transforms,\n transform_configs=transform_configs,\n )\n\n ref_point = self.ref_point\n if ref_point and obs_data:\n self._transformed_ref_point = self._transform_ref_point(\n ref_point=ref_point, padding_obs_data=obs_data[0]\n )\n return obs_feats, obs_data, search_space", "title": "" }, { "docid": "db6e7f47b6eb02eb43ff8f1988b07700", "score": "0.625271", "text": "def transform(self, *transformations):", "title": "" }, { "docid": "c9aecfa74db87fcd884f98a396b2af99", "score": "0.6229672", "text": "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "title": "" }, { "docid": "94b7bf23d8dd4023e29e33797bc8c4b0", "score": "0.61835456", "text": "def transform(self, point):\n # Get the current tranformation matrix\n matrix = self.model\n # transform our point by the matrix to model-view\n return matrix * self._get_Vector3(point)", "title": "" }, { "docid": "3a20d088f0e21e0db0102ae7c0c7c5f5", "score": "0.6161791", "text": "def apply_ants_transform_to_image(transform, image, reference, interpolation='linear'):\n return transform.apply_transform_to_image(image, reference, interpolation)", "title": "" }, { "docid": "25f043ca53d1dd9382ef4c67369cb921", "score": "0.6140992", "text": "def transform(self, orig_x, orig_y, ref_x, ref_y, ref_wavelength,\n reverse=False):\n\n if self._scales is None or self._parameters is None:\n raise OpticalModelException(\n 'Transformation not initialized!'\n )\n\n # First, rescale our references.\n scales_x, scales_y, scales_wavelength = self._scales\n scaled_ref_x = self._apply_scale(ref_x, *scales_x)\n scaled_ref_y = self._apply_scale(ref_y, *scales_y)\n scaled_ref_wavelength = self._apply_scale(ref_wavelength,\n *scales_wavelength)\n\n offset_x, offset_y = self._calculate_transformation(\n self._parameters, scaled_ref_x, scaled_ref_y, scaled_ref_wavelength\n )\n\n if reverse:\n scale = -1.\n else:\n scale = 1.\n\n trans_x = orig_x + scale * offset_x\n trans_y = orig_y + scale * offset_y\n\n return (trans_x, trans_y)", "title": "" }, { "docid": "38793f39193c60ca53b374e6ed2c3aeb", "score": "0.6140977", "text": "def transform(self):\n raise NotImplementedError", "title": "" }, { "docid": "38793f39193c60ca53b374e6ed2c3aeb", "score": "0.6140977", "text": "def transform(self):\n raise NotImplementedError", "title": "" }, { "docid": "4313db2c820fb8f0555d57cbad317800", "score": "0.61299664", "text": "def do_transformation(self):\r\n pass", "title": "" }, { "docid": "1208a7bc0469b56cfd87593f79227519", "score": "0.61278385", "text": "def transform(self, X):\n raise NotImplementedError()", "title": "" }, { "docid": "d5c68bb762328ac8e591efedc5ddbf92", "score": "0.61277664", "text": "def transform_point(target_frame, point):\n global tfBuffer\n if tfBuffer is None:\n init()\n try:\n transform = tfBuffer.lookup_transform(target_frame,\n point.header.frame_id, # source frame\n point.header.stamp,\n rospy.Duration(5.0))\n new_pose = do_transform_point(point, transform)\n return new_pose\n except ExtrapolationException as e:\n logging.logwarn(str(e))", "title": "" }, { "docid": "50b14fb7bc1f7abfa4f6d4ba44a534b4", "score": "0.6090512", "text": "def transform(self, points, offset):\n copy = np.array(points)\n if str(offset) in self.offset_to_rotation_axis:\n copy = rotate(\n copy,\n axis=self.offset_to_rotation_axis[str(offset)]\n )\n copy /= self.scale_factor,\n copy += offset * self.radius * self.radius_scale_factor\n return copy", "title": "" }, { "docid": "52be6e4e311237d1e7147bdf81e325ee", "score": "0.6075296", "text": "def transform(self, X):\n return super().transform(X)", "title": "" }, { "docid": "3ae35e9334c85ca87941dbd51eb8074a", "score": "0.60676354", "text": "def transform(self, t):\n self.b.transform(t)", "title": "" }, { "docid": "6e9ffdbeb16e50def2f9a17401d1de6a", "score": "0.597609", "text": "def TransformPoints(self, *args):\n return _gdal.Transformer_TransformPoints(self, *args)", "title": "" }, { "docid": "ad8c2b44527499aeec1c3305ecd640b6", "score": "0.59734076", "text": "def _apply_transformations(link_tree, node, transformations=None):\n\n if transformations is None:\n transformations = {}\n transformations[node] = np.eye(4)\n \n current_link = link_tree.nodes[node]\n current_link['pose'] = np.dot(current_link['pose'], transformations[node])\n \n for neighbor in link_tree.neighbors(node):\n edge_data = link_tree.get_edge_data(node, neighbor)\n origin_transform = edge_data.get('origin', np.eye(4))\n \n transform_matrix = np.dot(transformations[node], origin_transform)\n transformations[neighbor] = transform_matrix\n \n URDFParser._apply_transformations(link_tree, neighbor, transformations)", "title": "" }, { "docid": "7f5b17fec36b01fa6220ef77503ae3f2", "score": "0.5952999", "text": "def geo_transform(self):\n return self.__geo_transform", "title": "" }, { "docid": "e00e8487bf6074dbe4537009c7502ea5", "score": "0.59424835", "text": "def transform(self, data):\n return self.model.transform(data)", "title": "" }, { "docid": "8ece55c88347d3e706dd6d7471d98a52", "score": "0.59288645", "text": "def transform(self, obj):\n raise NotImplementedError(\n \"transform() is not implemented in \" + self.__class__.__name__)", "title": "" }, { "docid": "ac9ea8e18a55a078d8148c4613bf0a8f", "score": "0.5901793", "text": "def transform_point(point: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n point = np.expand_dims(point, 0)\n return transform_points(point, transf_matrix)[0]", "title": "" }, { "docid": "df7f17d6f907beb50b3a24e4eb383298", "score": "0.58972263", "text": "def transform(self, X):\n return self._transform(X)", "title": "" }, { "docid": "574484fff0e0ffb1a5f43b3a9b34203d", "score": "0.58932304", "text": "def transform(self, X, copy=False):\n X = self.scaler.transform(X, copy=copy)\n return X", "title": "" }, { "docid": "b60c7296bb86965a91eac4151a8f8e26", "score": "0.5880371", "text": "def PointTransform(self, *args):\n return _MaxPlus.Matrix3_PointTransform(self, *args)", "title": "" }, { "docid": "34a7cb1c83de9de293604f57e8e8d2d2", "score": "0.5876736", "text": "def make_transform(_transform):\n transforms = D(ax=ax.transAxes, data=ax.transData, fig=fig.transFigure)\n if isinstance(_transform, list) or isinstance(_transform, tuple):\n assert len(_transform) == 2\n return blended_transform_factory(transforms[_transform[0]], transforms[_transform[1]])\n else:\n return transforms[_transform]", "title": "" }, { "docid": "05002814a7ff97f20576baf30ab4d8fb", "score": "0.5863404", "text": "def transform(self, scale=1, angle=0, origin=(0,0)):\n\n r = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]]) * scale\n\n o = np.array(origin)\n\n pt1 = np.dot(r, np.array((self.x1, self.y1))) + o\n pt2 = np.dot(r, np.array((self.x2, self.y2))) + o\n\n self.x1 = pt1[0]\n self.y1 = pt1[1]\n self.x2 = pt2[0]\n self.y2 = pt2[1]", "title": "" }, { "docid": "bfc8f0f5feb7ead4d47ad0a7883454df", "score": "0.58592254", "text": "def transform_point(self, point, from_frame, to_frame):\n ps = PointStamped()\n # ps.header.stamp = #self.tf_l.getLatestCommonTime(from_frame,\n # to_frame)\n ps.header.frame_id = from_frame\n ps.point = point\n transform_ok = False\n while not transform_ok and not rospy.is_shutdown():\n try:\n target_ps = self.tf_l.transformPoint(to_frame, ps)\n transform_ok = True\n except tf.ExtrapolationException as e:\n rospy.logwarn(\n \"Exception on transforming point... trying again \\n(\" +\n str(e) + \")\")\n rospy.sleep(0.2)\n ps.header.stamp = self.tf_l.getLatestCommonTime(\n from_frame, to_frame)\n except tf.LookupException as e:\n rospy.logwarn(\n \"Exception on transforming point... trying again \\n(\" +\n str(e) + \")\")\n rospy.sleep(0.2)\n\n return target_ps", "title": "" }, { "docid": "599fdde73b06b2a6935fe858f914da39", "score": "0.5847249", "text": "def transform_relative_pose_for_ik(self, manip, matrix4, ref_frame, targ_frame):\n if ref_frame == 'world':\n world_from_ref = np.eye(4)\n else:\n ref = self.robot.GetLink(ref_frame)\n world_from_ref = ref.GetTransform()\n \n if targ_frame == 'end_effector': \n targ_from_EE = np.eye(4)\n else:\n world_from_targ = self.robot.GetLink(targ_frame).GetTransform()\n world_from_EE = manip.GetEndEffectorTransform() \n targ_from_EE = np.dot(np.linalg.inv(world_from_targ), world_from_EE) \n \n ref_from_targ_new = matrix4\n world_from_EE_new = np.dot(np.dot(world_from_ref, ref_from_targ_new), targ_from_EE) \n \n return np.array(world_from_EE_new)", "title": "" }, { "docid": "f0dd7a08993478b1da7fd42b5c4cd89c", "score": "0.5842032", "text": "def transform(self, data):\n pass", "title": "" }, { "docid": "f39042ec53b678f015d199fbc1929c36", "score": "0.5840239", "text": "def apply_feature_transform(self, transform):\n self._input_transform = transform", "title": "" }, { "docid": "89eb1e2188ecea4f6a8bbc31315cf51c", "score": "0.58270335", "text": "def transform_points(points, transform_mat):\n transposed = np.transpose(points)\n if len(transposed) > 3:\n transposed = np.delete(transposed, 3, axis=0)\n to_conc = np.ones((1, get_shape(transposed)[1]))\n transposed = np.concatenate((transposed, to_conc), 0)\n mult = np.dot(transform_mat, transposed)\n return np.transpose(mult[0:-1])", "title": "" }, { "docid": "49edd4ac641a2279dea3b34a0f57b10e", "score": "0.5797796", "text": "def _transform(self, X, y=None):\n return self.transformer_.inverse_transform(X=X, y=y)", "title": "" }, { "docid": "6286531b29fb3c005fb23d85a940663d", "score": "0.5786718", "text": "def generate_transformation_matrix(self, reference_frame, point):\n rotation_matrix = self.reference_frame.dcm(reference_frame)\n self._transform = Identity(4).as_mutable()\n self._transform[0:3, 0:3] = rotation_matrix[0:3, 0:3]\n\n _point_vector = self.origin.pos_from(point).express(reference_frame)\n\n self._transform[3, 0] = _point_vector.dot(reference_frame.x)\n self._transform[3, 1] = _point_vector.dot(reference_frame.y)\n self._transform[3, 2] = _point_vector.dot(reference_frame.z)\n return self._transform", "title": "" }, { "docid": "8297749bc8039906a83655a7da464fa1", "score": "0.57853544", "text": "def _roundtrip_transform(x: np.ndarray) -> np.ndarray:\n # apply reverse terminal transform to turn array to ObservationFeatures\n observation_features = [\n ObservationFeatures(\n parameters={p: float(x[i]) for i, p in enumerate(param_names)}\n )\n ]\n # reverse loop through the transforms and do untransform\n for t in reversed(transforms.values()):\n observation_features = t.untransform_observation_features(\n observation_features\n )\n # forward loop through the transforms and do transform\n for t in transforms.values():\n observation_features = t.transform_observation_features(\n observation_features\n )\n # parameters are guaranteed to be float compatible here, but pyre doesn't know\n new_x: List[float] = [\n # pyre-fixme[6]: Expected `Union[_SupportsIndex, bytearray, bytes, str,\n # typing.SupportsFloat]` for 1st param but got `Union[None, bool, float,\n # int, str]`.\n float(observation_features[0].parameters[p])\n for p in param_names\n ]\n # turn it back into an array\n return np.array(new_x)", "title": "" }, { "docid": "52189d93f78e6dc3fada07fbca9b71cb", "score": "0.57630557", "text": "def create_ref_transform(\n name, side, index, buffer_grp=False, match_matrix=None, child=None\n):\n valid_sides = [\"L\", \"R\", \"M\"]\n if side not in valid_sides:\n raise AttributeError(\n 'Chosen side is not valid. Valid values are [\"L\", \"R\", \"M\"]'\n )\n name = \"{}_REF_{}_{}_GRP\".format(side, name, str(index))\n name = strings.string_checkup(name, logger_=_LOGGER)\n ref_trs = pmc.createNode(\"transform\", n=name)\n if match_matrix:\n ref_trs.setMatrix(match_matrix, worldSpace=True)\n if buffer_grp:\n create_buffer_grp(node=ref_trs)\n if child:\n ref_trs.addChild(child)\n return ref_trs", "title": "" }, { "docid": "c39a041e9b5abef2d8db8c8771fb07e9", "score": "0.5759768", "text": "def _transform_plane(self, plane_point, target_frame):\n try:\n common_time = self._tf_listener.getLatestCommonTime(target_frame, plane_point.header.frame_id)\n plane_point.header.stamp = common_time\n self._tf_listener.waitForTransform(target_frame, plane_point.header.frame_id,\n plane_point.header.stamp, rospy.Duration(1))\n\n plane_point = self._tf_listener.transformPoint(target_frame, plane_point)\n except(tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n rospy.logerr('Unable to transform %s -> %s' % (plane_point.header.frame_id, target_frame))\n\n return plane_point", "title": "" }, { "docid": "f3cdb641b2537dc8c830ecf705d4774b", "score": "0.5755168", "text": "def transform(self, spatial_reference: SpatialReference or int) -> 'Geometry':\n # Retrieve (or create) the dictionary of cached transforms.\n cached_transforms: Dict[int, Geometry] = None # We're just declaring it here.\n # If we've already created the cache...\n try:\n # ...we should use it.\n cached_transforms = self._caches['transforms']\n except KeyError:\n # Otherwise, create one...\n cached_transforms = {}\n # ...and add it to the caches.\n self._caches['transforms'] = cached_transforms\n # Figure out the target spatial reference.\n sr: SpatialReference = (\n spatial_reference if isinstance(spatial_reference, SpatialReference)\n else SpatialReference.from_srid(srid=spatial_reference)\n )\n # If this geometry is already in the target spatial reference...\n if self.spatial_reference.srid == sr.srid:\n # ...no transformation is necessary.\n return self\n # If we've already transformed for this spatial reference once...\n if sr.srid in cached_transforms:\n # ...just return the previous product.\n return cached_transforms[sr.srid]\n else:\n # We need the OGR geometry.\n ogr_geometry = self._get_ogr_geometry(from_cache=True)\n # Transform the OGR geometry to the new coordinate system...\n ogr_geometry.TransformTo(sr.ogr_sr)\n # ...and build the new djio geometry from it.\n transformed_geometry: Geometry = Geometry.from_ogr(ogr_geom=ogr_geometry)\n # Cache the shapely geometry in case somebody comes calling again.\n cached_transforms[sr.srid] = transformed_geometry\n # Now we can return it.\n return transformed_geometry", "title": "" }, { "docid": "1537c4fb3962b188d6a5229b2314e57d", "score": "0.57513875", "text": "def transform(self, T):\n if not isinstance(T, Rhino.Geometry.Transform):\n M = Rhino.Geometry.Transform(0.0)\n for i in range(4):\n for j in range(4):\n M[i, j] = T[i, j]\n else:\n M = T\n self.geometry.Transform(M)\n if self.object:\n self.object.CommitChanges()", "title": "" }, { "docid": "8c7dec87966d67c74964e5935187576e", "score": "0.573024", "text": "def transform(self, data):\n return self.transformer.transform(data)", "title": "" }, { "docid": "674f0573dfb225c0daa4f657c3359b6c", "score": "0.572706", "text": "def translate_points(kp_data_list):", "title": "" }, { "docid": "30f3566250bc7fcddf7e9e1c0bf9d670", "score": "0.5726896", "text": "def Transform(self, *args):\n return _MaxPlus.PolyShape_Transform(self, *args)", "title": "" }, { "docid": "e11a09c42d6b5ff403476b31a3712f03", "score": "0.5713609", "text": "def apply_transformation(self, x):\n xt = x\n for _, transform in self.steps[:-1]:\n xt = transform.fit_transform(xt)\n return xt", "title": "" }, { "docid": "160b60704c17acd6e349aae1ee9602be", "score": "0.57131416", "text": "def transform_points(\n self, pt_data: np.ndarray, px_idx=True, source_res=1, output_idx=True\n ) -> np.ndarray:\n tformed_pts = []\n for pt in pt_data:\n if px_idx is True:\n pt = pt * source_res\n for idx, t in enumerate(self.reg_transforms):\n if idx == 0:\n t_pt = t.inverse_transform.TransformPoint(pt)\n else:\n t_pt = t.inverse_transform.TransformPoint(t_pt)\n t_pt = np.array(t_pt)\n\n if output_idx is True:\n t_pt *= 1 / self._output_spacing[0]\n tformed_pts.append(t_pt)\n\n return np.stack(tformed_pts)", "title": "" }, { "docid": "b2a5e24dbfa4d8a471ff1850175527c3", "score": "0.5709261", "text": "def backward_transform(self, x):", "title": "" }, { "docid": "169a3aacbb3a1f59966e087ad76cce49", "score": "0.5688864", "text": "def getTransform(self, **kwargs):\n \n pass", "title": "" }, { "docid": "741f3910f75158f65a9d7bd134603c9d", "score": "0.5688422", "text": "def transform(self, x, y=None):\n if self.is_complex:\n raise AssertionError('Cannot apply complex transform operation to '\n 'single instance. This chain has a complex operation.')\n tx, ty = x, y\n for xf in self.transforms:\n tx, ty = xf.transform(tx, ty)\n return tx, ty", "title": "" }, { "docid": "51dc549c71a548ed68b93afe225e14ac", "score": "0.5676371", "text": "def points_transform(points, trans_matrix_T):\n points_xyz = points[:, :3]\n points_xyz1 = np.concatenate(\n [points_xyz, np.ones((points.shape[0], 1))], axis=1)\n trans_points_xyz1 = points_xyz1 @ trans_matrix_T\n trans_points = trans_points_xyz1[:, :3] / trans_points_xyz1[:, 3:4]\n\n if points.shape[1] > 3:\n trans_points = np.concatenate([trans_points, points[:, 3:]])\n return trans_points", "title": "" }, { "docid": "505c3580977a732ceb98517f88fb49ec", "score": "0.56736827", "text": "def transform():\n transform_ = transforms.Compose(\n [\n transforms.ToTensor()\n ]\n )\n return transform_", "title": "" }, { "docid": "29dedcc8538f994d31780c1ab39e3603", "score": "0.56407666", "text": "def ants_transform_from_displacement_field(field):\n raise ValueError('Displacement field transforms not currently supported')", "title": "" }, { "docid": "daf2fe3c8e3fbbd5ab3612bb51bb8283", "score": "0.56335235", "text": "def align_to_refscan(self):\n if self.refscan is None:\n return\n Tref_inv = self.transforms[self.refscan].inv()\n for t in range(self.nscans):\n self.transforms[t] = (self.transforms[t]).compose(Tref_inv)", "title": "" }, { "docid": "468a9dc3c92439409870b7a5f18c3ebf", "score": "0.56303877", "text": "def transform_points(self, ex_points, model):\n #Create array of old points:\n tpoints = []\n for point in ex_points:\n transformed = numpy.dot(numpy.array([[model[0], model[1]], [model[2], model[3]]]) , numpy.array([point[0][0], point[0][1]])) + numpy.array([model[4],model[5]])\n newPoint = (int(round(transformed.tolist()[0])), int(round(transformed.tolist()[1])))\n tpoints.append(newPoint)\n return tpoints", "title": "" }, { "docid": "757c8809369e6844530e1d0b1ea6fa63", "score": "0.5598569", "text": "def calculate_rigid_transform(refpoints, points):\n\n # rows = []\n # ys = []\n # for (rx, ry), (x, y) in zip(refpoints, points):\n # row = [x, -y, 1, 0]\n # rows.append(row)\n # row = [y, x, 0, 1]\n # rows.append(row)\n # ys.append([rx])\n # ys.append([ry])\n\n ys = [(a,) for args in refpoints for a in args]\n rows = [row for x, y in points for row in ((x, -y, 1, 0), (y, x, 0, 1))]\n\n A = array(rows)\n y = array(ys)\n # print A\n # print y\n soln = linalg.lstsq(A, y)\n # print soln\n a, b, tx, ty = soln[0]\n tx = float(tx[0])\n ty = float(ty[0])\n sum_residuals = soln[1, 0]\n\n # R = array([[a, -b], [b, a]])\n scale = float((a ** 2 + b ** 2) ** 0.5)\n theta = math.degrees(math.acos(a / scale))\n err = (sum_residuals / len(points)) ** 0.5 / scale\n # print err\n # print scale, float(scale)\n return scale, theta, (tx, ty), err", "title": "" }, { "docid": "3cadb7c49d1796fd298c35bb535b1734", "score": "0.5597538", "text": "def final_transform(self):\n return self.fitter.transform.from_vector(self.parameters[-1])", "title": "" }, { "docid": "2df9d799bf0fdf2e76b80723ed8255e0", "score": "0.55904615", "text": "def __call__(self, src_coords):\n return self.transform(src_coords)", "title": "" }, { "docid": "eda5a95cf8a4de67a77d34453cfa14c2", "score": "0.5582024", "text": "def on_transform_out(self, *args):", "title": "" }, { "docid": "5db64e686ec77d020e719b42c91b952c", "score": "0.5577165", "text": "def transform(self, view1_features, view2_features):\n raise NotImplementedError", "title": "" }, { "docid": "8cc78f273bec04ef1a48ecad9ac941b9", "score": "0.5551452", "text": "def fit_transform(self, X):\n return self.fit(X).transform(X)", "title": "" }, { "docid": "45b4f98aa9a5fa21b0920f1c2178000d", "score": "0.5550666", "text": "def Transform(self, *args):\n return _MaxPlus.BezierShape_Transform(self, *args)", "title": "" }, { "docid": "4ae94e395ac689108d4712294d275b80", "score": "0.55488974", "text": "def _transform(point, center, scale, resolution, invert=False):\n _pt = np.ones(3)\n _pt[0] = point[0]\n _pt[1] = point[1]\n\n h = scale # NOTE: originally, scale * 200\n t = np.eye(3)\n t[0, 0] = resolution / h\n t[1, 1] = resolution / h\n t[0, 2] = resolution * (-center[0] / h + 0.5)\n t[1, 2] = resolution * (-center[1] / h + 0.5)\n\n if invert:\n t = np.linalg.inv(t)\n new_point = (np.dot(t, _pt))[0:2]\n return new_point.astype(int)", "title": "" }, { "docid": "d26509d4804db380d00c45408c33cdcf", "score": "0.55407697", "text": "def transform_for_training(self):\n pass", "title": "" }, { "docid": "b851333458dae282c4eef8f5e16d4499", "score": "0.553636", "text": "def transform(self, X, y=None):\r\n return X[self.key]", "title": "" }, { "docid": "9631053f8b124c1ae11f85b725617793", "score": "0.5535955", "text": "def apply_points(self, points: np.ndarray) -> np.ndarray:", "title": "" }, { "docid": "9b6cbb0738102b5d68f8a1945762cbbf", "score": "0.55292124", "text": "def transform_points(Points,R,t):\n return [transform_point(p,R,t) for p in Points]", "title": "" }, { "docid": "6b072895e9af604a528cde0abb033e1b", "score": "0.5525682", "text": "def _reflected_to_reference(self, MDS):\n # compute signs of dot products of corresponding columns\n signs = []\n for a, b in zip(MDS.T, self.reference_MDS.T):\n signs.append(1 if np.dot(a, b) > 0 else -1)\n # return a copy of the MDS with the new signs\n return np.array(MDS) * signs", "title": "" }, { "docid": "6e49212f8c2f3638326ec7840d350c80", "score": "0.55135626", "text": "def apply(self, transformation):\n\t\treturn transformation(self)", "title": "" }, { "docid": "2bb514e25ffa7583b5c1448af4f457e8", "score": "0.5501681", "text": "def transform(self, observation):\n raise NotImplementedError", "title": "" }, { "docid": "d9491ad4a285829699fc90aeca4ddd03", "score": "0.55014485", "text": "def transform(self,transformation,inplace=False,**kwargs):\n\n\t\t#Apply the transformation\n\t\ttransformed_data = transformation(self.data,**kwargs)\n\n\t\t#Return the new Ensemble \n\t\tif inplace:\n\t\t\t\n\t\t\tself.data = transformed_data\n\t\t\tself.num_realizations = transformed_data.shape[0]\n\n\t\telse:\n\t\t\treturn self.__class__.fromdata(transformed_data)", "title": "" }, { "docid": "cf51fab21ae389b8660cb0dcfc2076fc", "score": "0.5493336", "text": "def transform(self, transformation):\n self.line.transform(transformation)", "title": "" }, { "docid": "226e2aa9d6236d5c01d6b3d21e451548", "score": "0.5482918", "text": "def apply_transform(x, transform, fill_mode='nearest', fill_value=0.):\n x = x.astype('float32')\n transform = transform_matrix_offset_center(transform, x.shape[1], x.shape[2])\n final_affine_matrix = transform[:2, :2]\n final_offset = transform[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,\n final_offset, order=0, mode=fill_mode, cval=fill_value) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n return x", "title": "" }, { "docid": "226e2aa9d6236d5c01d6b3d21e451548", "score": "0.5482918", "text": "def apply_transform(x, transform, fill_mode='nearest', fill_value=0.):\n x = x.astype('float32')\n transform = transform_matrix_offset_center(transform, x.shape[1], x.shape[2])\n final_affine_matrix = transform[:2, :2]\n final_offset = transform[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,\n final_offset, order=0, mode=fill_mode, cval=fill_value) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n return x", "title": "" }, { "docid": "6b022a214850da0579c8a6b20a032265", "score": "0.5481376", "text": "def transform(self, X, y=None):\n if X.pixeltype != 'float':\n raise ValueError('image.pixeltype must be float ... use TypeCast transform or clone to float')\n\n if len(self.translation) != X.dimension:\n raise ValueError('must give a translation value for each image dimension')\n\n if self.reference is None:\n reference = X\n else:\n reference = self.reference\n\n insuffix = X._libsuffix\n cast_fn = utils.get_lib_fn('translateAntsImage%s_%s' % (insuffix, self.interp))\n casted_ptr = cast_fn(X.pointer, reference.pointer, self.translation)\n return iio.ANTsImage(pixeltype=X.pixeltype, dimension=X.dimension,\n components=X.components, pointer=casted_ptr)", "title": "" }, { "docid": "712ddb4a1cc1d8504d01cf9a7fdde0e5", "score": "0.54786205", "text": "def transform(self, position):\n position = position - self.src_origin\n position = numpy.dot(self.R, position)\n position = position + self.dst_origin\n return position", "title": "" }, { "docid": "c7b5e4445b92f65a395a97c3b010424e", "score": "0.5466488", "text": "def transform(self, *transformations):\n return _transform(self, transformations)", "title": "" }, { "docid": "71a1ba332d9ceb3644cf84e7dee6be99", "score": "0.54660505", "text": "def _apply_transforms(self, record):\n if record:\n for t in self.transforms:\n record = t.transform(record)\n if not record:\n break\n return record", "title": "" }, { "docid": "694e06f76a5acc58b700a70f482577e8", "score": "0.54549605", "text": "def transform_data(self) -> None:\n pass", "title": "" }, { "docid": "bfd4537254a1e89a7185d3909c987756", "score": "0.5454018", "text": "def Transform(self, *args):\n return _MaxPlus.PolyLine_Transform(self, *args)", "title": "" }, { "docid": "f5507584e555166be85f4ee62d3d924b", "score": "0.5452842", "text": "def transform(self, x, y=None):\n return x, y", "title": "" }, { "docid": "981019d6767937c380882f1f3657a2b1", "score": "0.5451725", "text": "def callback(widget, event):\n t = tvtk.Transform()\n bw.get_transform(t)\n bw.prop3d.user_transform = t", "title": "" }, { "docid": "a84b1505576f31cdf6a61f42f234f12a", "score": "0.54485583", "text": "def transform(self, X):\n return (X - self.means) @ self.components", "title": "" }, { "docid": "d4aedfba8bffa0d33ae1494d608774db", "score": "0.5438352", "text": "def transform(src, m, dst=None): # real signature unknown; restored from __doc__\r\n pass", "title": "" }, { "docid": "89080f45b6f5d71220ac4e1f3704f166", "score": "0.5426515", "text": "def transform(data: Any, transformation: BaseTransformation, maintain_positioning: bool = False) -> Any:\n raise RuntimeError(f\"Cannot transform {type(data)}\")", "title": "" }, { "docid": "08b3f26ddb5cc50b0cf6614245664165", "score": "0.542598", "text": "def transform(self, X, y=None, reconstruct=True, deterministic=True):\n return self.session.run(\n self.y,\n feed_dict={\n self.x: self.vae.reconstruct(X, deterministic=deterministic) if reconstruct else X})", "title": "" }, { "docid": "1d9e69c70ca8c2ae79801c854e13a396", "score": "0.54239905", "text": "def fit_transform(self, X, y=None, sample_weight=None):\n # compute mean covariance\n self._check_reference_points(X)\n self.reference_ = mean_covariance(X, metric=self.metric,\n sample_weight=sample_weight)\n return tangent_space(X, self.reference_)", "title": "" }, { "docid": "de243aeaf61e29da6ba5fbbc3ba6db6c", "score": "0.5422481", "text": "def on_transform_in(self, *args):", "title": "" }, { "docid": "de5218124ee1f8aa37193cee9fada0d8", "score": "0.54168785", "text": "def _convert_transform(t: Union[Transform, Sequence[float]]) -> Transform:\n return t if isinstance(t, Transform) else Transform(*t)", "title": "" }, { "docid": "87932d6853d73c1620997a53796b3d21", "score": "0.54165316", "text": "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)", "title": "" }, { "docid": "551096926f37395f4a1fac8b1067c95e", "score": "0.5415106", "text": "def transform(self, X):\n return self(X=X)", "title": "" }, { "docid": "fdea8cd366cad5ced898ec999e8cbd18", "score": "0.5406907", "text": "def transform(self, p: np.array) -> np.array:\n R, t = self.getTransform()\n r_t = p[2]\n r_p = p[0:2]\n # Point in frenet frame\n p_f = np.matmul(R.T, r_p) - np.matmul(R.T, t)\n t_f = r_t - self.theta_r\n return np.array([p_f[0], p_f[1], t_f])", "title": "" }, { "docid": "fdea8cd366cad5ced898ec999e8cbd18", "score": "0.5406907", "text": "def transform(self, p: np.array) -> np.array:\n R, t = self.getTransform()\n r_t = p[2]\n r_p = p[0:2]\n # Point in frenet frame\n p_f = np.matmul(R.T, r_p) - np.matmul(R.T, t)\n t_f = r_t - self.theta_r\n return np.array([p_f[0], p_f[1], t_f])", "title": "" } ]
eda41d01a55a56d65e9d5b161b5a42cc
Put entry in dct. If already there, check it's the same.
[ { "docid": "aac336579641b8bb8a0b9fd44be61b14", "score": "0.8130835", "text": "def put(dct, entry):\r\n id = int(entry['id'])\r\n if id in dct:\r\n if entry == dct[id]:\r\n pass\r\n else:\r\n print entry\r\n print dct[id]\r\n assert False\r\n else:\r\n dct[id] = entry", "title": "" } ]
[ { "docid": "74c66a751f436991c28c48de00e0304b", "score": "0.60856813", "text": "def addEntry(self, key, dictVal):\n hashBucket = self.buckets[key%self.numBuckets]\n for i in range(len(hashBucket)):\n if hashBucket[i][0] == key:\n hashBucket[i] = (key, dictVal)\n return\n hashBucket.append((key, dictVal))", "title": "" }, { "docid": "2c0dbe71954b957de81a2919823ed55f", "score": "0.60387325", "text": "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item\n self.access(key)\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n key = min(list(self.dct2.keys()))\n to_remove = self.dct2.get(key)\n print(\"DISCARD: {}\".format(to_remove))\n del self.cache_data[to_remove]\n del self.dct1[to_remove]\n del self.dct2[key]", "title": "" }, { "docid": "deae4a5ac20dcff54f7bed8d76016d05", "score": "0.5887493", "text": "def test_add_same_key(dictcache_obj):\n\n retval = dictcache_obj.add(\"test_key\", 123)\n assert retval == 123\n dictcache_obj.add(\"test_key\", 456)\n assert dictcache_obj.get(\"test_key\") == 123\n assert dictcache_obj.size() == 1", "title": "" }, { "docid": "e2bb75465801ecb5a680845154a2783c", "score": "0.576357", "text": "def test_no_duplicates(self):\n # New Node of same name: new value should overwrite previous value\n d = nodict.NoDict()\n d.add(\"Zeppo\", 54)\n self.assertEqual(d.get(\"Zeppo\"), 54)\n d.add(\"Zeppo\", 56)\n self.assertEqual(d.get(\"Zeppo\"), 56)", "title": "" }, { "docid": "dee3d1242a320a0a8ddfc6b5a033df51", "score": "0.56850886", "text": "def make_dict_handleduplicates(keys, vals):\n assert(len(keys) == len(vals))\n\n return", "title": "" }, { "docid": "ce7a24102d8d2066a006858497b600a6", "score": "0.5682747", "text": "def put(self, key, item):\n if key is not None and item is not None:\n if len(self.cache_data) == self.MAX_ITEMS:\n lowest_count = min(self.ccd.values())\n if self.ccd.get(key) == lowest_count:\n self.cache_data[key] = item\n self.ccd[key] += 1\n self.tcd[key] = now.strftime(\"%m/%d/%Y %H:%M:%S:%f\")\n return\n self.lfk = {}\n for keys, vals in self.ccd.copy().items():\n if vals == lowest_count:\n self.lfk[keys] = self.tcd.get(keys)\n min_value = min(self.lfk.values())\n for k, v in self.lfk.items():\n if v == min_value:\n lfru_key = k\n break\n del self.cache_data[lfru_key]\n del self.tcd[lfru_key]\n del self.ccd[lfru_key]\n print(f\"DISCARD: {lfru_key}\")\n\n self.cache_data[key] = item\n if self.ccd.get(key) is None:\n self.ccd[key] = 1\n else:\n self.ccd[key] += 1\n self.tcd[key] = now.strftime(\"%m/%d/%Y %H:%M:%S:%f\")", "title": "" }, { "docid": "a8337aabb0c1248f8ca7c0c00f7a84de", "score": "0.5602996", "text": "def add_to_dictionary_of_sets(self,d,k,v):\n singleton = {v}\n d[k] = d[k].union(singleton) if k in d else singleton", "title": "" }, { "docid": "e6037a09f5401d183d1df75e4001b422", "score": "0.55942976", "text": "def __setitem__(self, k, v):\n j = self._hash_function(k)\n for idx, tup in enumerate(self._table[j]):\n if k in tup: # Key already exists in bucket\n self._table[j][idx] = (k, v) # Update value of key-value pair\n return\n self._table[j].append((k, v)) # Else add key-value pair\n self._n += 1", "title": "" }, { "docid": "a60c39c5e6b9f9e7136c381357567afd", "score": "0.55745643", "text": "def put(self, key, value):\n hash = self.sum_hash(key)\n key_value = [key, value]\n\n if self.bucket[hash] is None:\n self.bucket[hash] = list([key_value])\n return True\n else:\n for pair in self.bucket[hash]:\n if pair[0] == key:\n pair[1] == value\n return True\n self.buckets[hash].append(key_value)\n return True", "title": "" }, { "docid": "e992e72f082cb0de11b9c595ad6422f7", "score": "0.5535471", "text": "def mark_exact_duplicates(dict_list, key):\n hash_set = set()\n duplicate_dict = {}\n\n counter = 0 \n for i in range(len(dict_list)):\n temp_len = len(hash_set)\n value = dict_list[i][key]\n hash_set.add(value)\n # If the set size did not increase the value was a duplicate in the dict_list\n if len(hash_set) != temp_len +1:\n if value not in duplicate_dict:\n duplicate_dict[value] = 1\n else:\n duplicate_dict[value] += 1\n counter += 1 \n\n for i in range(len(dict_list)):\n value = dict_list[i][key]\n if value in duplicate_dict:\n dict_list[i][''] = True\n \n return dict_list, counter", "title": "" }, { "docid": "fc48e38395b38753e931ad67953f974d", "score": "0.5533714", "text": "def __eq__(self, other: 'DictionaryEntry') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "bfa5e16c3fd868714bc0485462c2d14e", "score": "0.54977995", "text": "def add_if(dict, key, val):\n if val:\n dict[key] = val", "title": "" }, { "docid": "4f8b0b444e70d0ea3b48884fd28a3ec5", "score": "0.5471022", "text": "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "title": "" }, { "docid": "c34ec2bba3ee6b98bfea29f5e4b9ab5d", "score": "0.54492825", "text": "def add(self, elem):\n if not self.contains(elem): # O(1)\n self.table.set(elem, None) # *O(1)\n else:\n raise KeyError(\"Item is already in the set.\")", "title": "" }, { "docid": "81faae925a1b78c025dc07e77b8a6aa5", "score": "0.5427513", "text": "def __setitem__(self, key, value):\n if key in self._storage:\n # if key is in cache\n self._update_item(key, value)\n else:\n # if key is not in cache\n self._add_item(key, value)", "title": "" }, { "docid": "4104b630128a8210bb4a40218f694487", "score": "0.54264003", "text": "def set(self, key, value):\n hash_key = self.hash_func(key)\n key_exists = False\n slot = self.hash_map[hash_key]\n i = 0\n\n for key_value in slot:\n key_, value_ = key_value\n if key == key_:\n key_exists = True\n break\n\n if key_exists:\n slot[i] = (key, value)\n else:\n slot.append((key, value))", "title": "" }, { "docid": "e387001bb055d59bb0c21a334bc0e67f", "score": "0.5355502", "text": "def put(self, key, value):\r\n self.map_lock.acquire()\r\n self.hashmap[key] = value\r\n self.map_lock.release()\r\n return True", "title": "" }, { "docid": "9506ad4c97f675d73ff8010541c02e79", "score": "0.5328484", "text": "def sameitems(dictlist):\n checkloop=1\n dictlistlen = len(dictlist)\n final_dict = dictlist[0] #initial list is the \"whole thing\"\n# final_final_dict = {}\n while checkloop < dictlistlen:\n final_final_set = final_dict.items() & dictlist[checkloop].items()\n checkloop = checkloop + 1\n # out dictionary is now a set - convert it back so we can do\n # the next comparison\n final_dict.clear() #clear and repopulate final_dict\n for value in final_final_set: \n final_dict[value[0]] =value[1]\n return final_dict", "title": "" }, { "docid": "93824e9991a018d475ccaadcf1ecf5dd", "score": "0.5314652", "text": "def put(self, key, value):\n\n '''\n get the expected index\n if no collision\n store (key, value)\n else\n go to end of linked list and append it\n '''\n location = self.hash_index(key)\n if self.storage[location] is None:\n self.storage[location] = HashTableEntry(key, value)\n self.population += 1\n if self.find_load_factor() < 0.7:\n self.down_size()\n\n else:\n # put in a new head\n new_node = HashTableEntry(key, value)\n new_node.next = self.storage[location]\n self.storage[location] = new_node\n self.population += 1\n if self.find_load_factor() < 0.7:\n self.down_size()", "title": "" }, { "docid": "e2196bda69cd1aecfbee2cce4eca3dfe", "score": "0.53130674", "text": "def test_update_dict(self):\n d1 = {'1': {'1': '1'}}\n d2 = {'2': {'2': '2'}}\n d12 = {'1': {'1': '1'}, '2': {'2': '2'}}\n d3 = {'3': {'3': '4'}}\n d4 = {'3': {'3': '5'}}\n d34 = {'3': {'3': ['4', '5']}}\n\n utils.update_dict(d1, d2)\n self.assertEqual(d12, d1)\n utils.update_dict(d3, d4)\n self.assertEqual(d34, d3)\n\n # Check duplicates\n d22 = {'2': {'2': '2'}}\n d22copy = {'2': {'2': '2'}}\n utils.update_dict(d22, d22copy, allow_duplicates=False)\n self.assertEqual(d22copy, d22)\n utils.update_dict(d22, d22copy, allow_duplicates=True)\n self.assertEqual({'2': {'2': ['2', '2']}}, d22)", "title": "" }, { "docid": "7edb9e9260641aca8a5169f8ca0113fa", "score": "0.52822024", "text": "def _sameoccurence(self, mpath):\n for key, value in self.record.items():\n if key in mpath and mpath[key] != value:\n return False\n else: # all equal keys have same values, thus both are 'equal'.\n self.record.update(mpath) # add items to self.record that are new\n return True", "title": "" }, { "docid": "46727670dc71101431660748a308a84f", "score": "0.5279495", "text": "def insert(self,e):\n\t\tfor i in self.vals[self.hashE(e)]:\n\t\t\tif i == e:\n\t\t\t\treturn\n\t\tself.vals[self.hashE(e)].append(e)", "title": "" }, { "docid": "5f0d54b936c1ec39a45ba6ee620330e0", "score": "0.5271502", "text": "def put(self, key, item):\n if key is not None and item is not None:\n if len(self.cache_data) < BaseCaching.MAX_ITEMS:\n order_of_keys = self.cache_data.keys()\n order_of_keys_c = self.count.keys()\n if key in self.cache_data:\n self.cache_data[key] = item\n self.count[key] += 1\n else:\n self.cache_data[key] = item\n self.count[key] = 1\n\n myTup = [(key, self.cache_data[key]) for key in order_of_keys]\n self.cache_data = OrderedDict(myTup)\n\n CntTup = [(key, self.count[key]) for key in order_of_keys_c]\n self.count = OrderedDict(CntTup)\n else:\n if key not in self.cache_data:\n minKey = min(self.count, key=self.count.get)\n del self.cache_data[minKey]\n print(\"DISCARD: {}\".format(minKey))\n del self.count[minKey]\n self.cache_data[key] = item\n self.count[key] = 1\n else:\n self.cache_data[key] = item\n self.count[key] += 1", "title": "" }, { "docid": "e6f3c3153da6ff752bd3d8f09da76ab9", "score": "0.52628595", "text": "def put(k, v):\n # will overwrite which is good/norml for a dictionary\n index = get_index(k)\n hash_data[index] = v", "title": "" }, { "docid": "b4aef8a6a9eb6f2686eab6c1d831e1f1", "score": "0.5261745", "text": "def insert(self, word):\n if word not in self.hash_set:\n self.hash_set[word] = word", "title": "" }, { "docid": "8ee1f388c31b13c9936ed1ff5aeb4d3a", "score": "0.5256359", "text": "def insert(self, val):\n if val in self.dictionary:\n return False\n else:\n if len(self.store) == self.length:\n self.store.append(val)\n else:\n self.store[self.length] = val\n self.dictionary[val] = self.length\n self.length += 1\n return True", "title": "" }, { "docid": "4f488b75096c1ea6e540a3907376af5b", "score": "0.5253159", "text": "def access(self, key):\n curr_idx = self.dct1.pop(key) if key in self.dct1 else None\n if curr_idx is not None:\n del self.dct2[curr_idx]\n self.dct1[key] = self.idx\n self.dct2[self.idx] = key\n self.idx += 1", "title": "" }, { "docid": "f640d183fe08f766f255044d4ab53a66", "score": "0.52365226", "text": "def insert_one(self, matched_dict):\n\n lid = matched_dict.get('location_id')\n time_valid = str(matched_dict.get('time_valid'))\n forecast_source = matched_dict.get('forecast_source')\n\n if self.tree.get(lid) is None:\n self.tree[lid] = {}\n\n if self.tree[lid].get(time_valid) is None:\n self.tree[lid][time_valid] = {}\n\n if self.tree[lid][time_valid].get(forecast_source) is not None:\n print(\"Duplicate Forecast/Observation pair found at ({},{},{})\".format(lid, time_valid, forecast_source))\n\n self.tree[lid][time_valid][forecast_source] = matched_dict", "title": "" }, { "docid": "30a8088feff28b0b5d52366d2068a450", "score": "0.5230971", "text": "def append(dictionary, key, value):\n\ttry: dictionary[key].add(value)\n\texcept KeyError: dictionary[key] = set([value])", "title": "" }, { "docid": "e79d070e103de6984516552f87fd66e5", "score": "0.52239025", "text": "def put(self, key, item):\n if key is not None and item is not None:\n if key in self.cache_data:\n self.key_list.remove(key)\n self.cache_data[key] = item\n self.key_list.append(key)\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n least_used_key = self.key_list.pop(0)\n print('DISCARD:', least_used_key)\n del self.cache_data[least_used_key]", "title": "" }, { "docid": "a72cc1b26743adadc8d4219ec3230806", "score": "0.522291", "text": "def set(self, key, value):\n hashed = self._hash(key)\n bucket = self.table_list[hashed]\n for item in bucket:\n if item[0] == key:\n bucket.remove(item)\n break\n bucket.append((key, value))", "title": "" }, { "docid": "1d5af69efd12413965afd88a760865df", "score": "0.5222628", "text": "def put(self, item):\n index = self.hashfunction(item)\n empty_index = False\n slots_checked = 0\n while not empty_index:\n if self.hash_list[index] == None:\n self.hash_list[index] = item\n empty_index = True\n elif index == self.hash_length - 1:\n index = 0\n slots_checked += 1\n elif slots_checked == self.hash_length:\n raise RuntimeError\n else:\n index += 1\n slots_checked += 1", "title": "" }, { "docid": "87a3b198b4efc0a298622353a3b65da7", "score": "0.5193309", "text": "def append(self, entry):\n # check_for_duplicates may not be defined (yet) when unpickling.\n # But if pickling, we never want to check for duplicates anyway.\n if getattr(self, 'check_for_duplicates', False) and entry in self:\n raise ValueError('Entry \"%s\" already exists' % entry.msgid)\n super(_BaseFile, self).append(entry)", "title": "" }, { "docid": "1965add9a9553a241679dd2b803cdabc", "score": "0.518167", "text": "def add(self, key, value):\n\t\tkey_hash = self._get_hash(key)\n\t\tkey_value = (key, value)\n\t\t\n\t\tif not self.map[key_hash]:\n\t\t\tself.map[key_hash] = list([key_value])\n\t\t\treturn True\n\t\telse:\n\t\t\tfor pair in self.map[key_hash]:\n\t\t\t\tif pair[0] == key:\n\t\t\t\t\tpair[1] = value\n\t\t\t\t\treturn True\n\t\t\tself.map[key_hash].append(key_value)\n\t\t\treturn True", "title": "" }, { "docid": "0e89f662af658303a61642b06cb4e01b", "score": "0.51792526", "text": "def put(self, key, item):\n if key and item is not None:\n self.cache_data[key] = item\n if key not in self.cached_list:\n self.cached_list.append(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n pop_key = self.cached_list.pop(-2)\n print(\"DISCARD: {}\".format(pop_key))\n del self.cache_data[pop_key]", "title": "" }, { "docid": "0e59b42e34a8b8f2dc41f97f80d72552", "score": "0.51752937", "text": "def observation(self, observation: gym.core.ObsType) -> gym.core.ObsType:\n new_observation: Dict[str, Any] = observation.copy() # type: ignore\n new_observation[self.new_key] = new_observation[self.key_to_duplicate]\n return new_observation # type: ignore", "title": "" }, { "docid": "16e2e5d1a8da930d6f852f28a7c23b1c", "score": "0.51602286", "text": "def test_duplicate_add(self):\n entry = Entry.objects.get(title=\"Using Models in Tests\")\n tag = Tag.objects.get(pk=1)\n self.assertIn(tag, entry.tags.all())\n \n # Re-add the tag. Django's related manager will send an empty pk_set,\n # and cachetree shouldn't choke on it.\n entry.tags.add(tag)", "title": "" }, { "docid": "f06286eb0b155b799ff4b8a059058b55", "score": "0.5157823", "text": "def add_single_expiration_dict(dct, expiration):\n\n\tdct[expiration] = {}", "title": "" }, { "docid": "ea89c76866392805621669d1846535e8", "score": "0.5157568", "text": "def _duplicate_check(self, newID):\n if self._SeenIDs is None:\n self._SeenIDs = {}\n \n if newID in self._SeenIDs:\n self._SeenIDs[newID] += 1\n return True\n else:\n self._SeenIDs[newID] = 1\n return False", "title": "" }, { "docid": "2266eea3ccdece3687bebd1fd421b9b9", "score": "0.5151142", "text": "def _seen_nonce(access_key_id, nonce, _):\n\n cache_key = f\"hawk:{access_key_id}:{nonce}\"\n\n # cache.add only adds key if it isn't present\n seen_cache_key = not cache.add(cache_key, True, timeout=settings.HAWK_RECEIVER_NONCE_EXPIRY_SECONDS)\n\n if seen_cache_key:\n raise AlreadyProcessed(f\"Already seen nonce {nonce}\")\n\n return seen_cache_key", "title": "" }, { "docid": "3610fbd43d3af7aaae50c5e5871c005d", "score": "0.5138563", "text": "def has_duplicate_values(_dictionary):\r\n for key_a in _dictionary:\r\n for key_b in _dictionary:\r\n if key_a != key_b:\r\n if _dictionary[key_a] == _dictionary[key_b]:\r\n return False\r\n return True", "title": "" }, { "docid": "6bcabc655c27e96cb96d992b59db04db", "score": "0.5135105", "text": "def upsert_item(item, count, item_freq):\n if item in item_freq:\n item_freq[item] += count\n else:\n item_freq[item] = count\n\n return item_freq", "title": "" }, { "docid": "f25a48fda3a9b5b44a4a6fd99216c101", "score": "0.5132541", "text": "def set(aMap, key, value):\n ## to set a key value pair, I need to get the bucket, and append the new key, value pair so it can be found later. \n\tbucket = get_bucket(aMap, key)\n\ti, k, v = get_slot(aMap, key)\n\t\n\tif i >= 0:\n\t\t#the key exists, replace it\n\t\tbucket[i] = (key, value)\n\telse:\n\t\t#the key does not, append to create it\n\t\tbucket.append((key, value))", "title": "" }, { "docid": "6919c14320266fe2933014e712c6157d", "score": "0.5131579", "text": "def _store(self, key, value, dct):\n # We just entered the context successfully.\n if not self.finalized:\n if value is dct:\n dct = self.dicts[1]\n if isinstance(value, Namespace):\n value.push(key, self, dct)\n if isinstance(value, self.scope_proxy):\n value = self.proxies[value]\n value.validate_parent(dct)\n value.validate_assignment(key, self)\n return value, dct", "title": "" }, { "docid": "a150cf49d126e870e256cd650f3f700d", "score": "0.51189995", "text": "def put(self, key, item):\n if key is None or item is None:\n pass\n if key and item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard = sorted(self.cache_data.keys())\n self.cache_data.pop(discard[-2])\n print(\"DISCARD: {}\".format(discard[-2]))", "title": "" }, { "docid": "1ea5758c32dcb0e4773c0a3759cc635e", "score": "0.511629", "text": "def put(self, key, item):\n if key and item:\n if key in self.cache_data:\n self.cache_data[key] = item\n return\n if len(self.index) >= BaseCaching.MAX_ITEMS:\n delKey = self.index.pop(0)\n del self.cache_data[delKey]\n print(\"DISCARD:\", delKey)\n self.index.append(key)\n self.cache_data[key] = item", "title": "" }, { "docid": "c573eb08d76d54184d4fcad3590cef5e", "score": "0.51139927", "text": "def add_dict(self, new_dict, word):\n if new_dict.sequence in self.dictionary.keys():\n self.dictionary[new_dict.sequence].update_dict(word)\n else:\n self.dictionary[new_dict.sequence] = new_dict", "title": "" }, { "docid": "37a7813a8cc58b658b69e09337b200fb", "score": "0.51074517", "text": "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item\n self.cache_data.move_to_end(key)\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discarded = self.cache_data.popitem(last=False)\n print('DISCARD: {}'.format(discarded[0]))", "title": "" }, { "docid": "4ae8d903f9850f95afada4e471576d33", "score": "0.5106507", "text": "def __setitem__(self, key, value):\n # Make sure that the item isn't in the cache already. Ideally I would\n # write something that does the getting and setting together, but I\n # didn't.\n test_value = self[key]\n if test_value is not None:\n raise SceneModelException(\n \"key %s is already in the cache. This shouldn't happen!\" % key\n )\n\n # Add the item\n self.cache.append((key, value))\n\n # Keep the cache within the maximum size.\n if len(self.cache) > self.max_size:\n self.cache.pop(0)", "title": "" }, { "docid": "4df2ed7d080301d5fa7fb37e6f6f7e4f", "score": "0.50980407", "text": "def put(self, key, item):\n if len(self.cache_data) >= BaseCaching.MAX_ITEMS and \\\n key not in self.__datakeys:\n discard = self.__datakeys.pop()\n del self.cache_data[discard]\n print('DISCARD: {}'.format(discard))\n\n if key and item:\n self.__datakeys.append(key)\n self.cache_data[key] = item", "title": "" }, { "docid": "6d34210b244da65ad47ebed070d46e56", "score": "0.5092138", "text": "def co_diff2dict(dct):\n while True:\n key, value = (yield)\n try:\n dct[key].append(value)\n except KeyError:\n dct[key] = list()\n dct[key].append(value)", "title": "" }, { "docid": "833d52c3bb6b80b2845941387c4389f8", "score": "0.50894594", "text": "def put(self, key, item):\n if key is not None and item is not None:\n if len(self.cache_data) >= BaseCaching.MAX_ITEMS:\n # print(self.MRU_data)\n sorted_index = [\n (k,\n self.MRU_data[k]) for k in sorted(\n self.MRU_data,\n key=self.MRU_data.get,\n reverse=True)][0][0]\n print(\"DISCARD:\", sorted_index)\n del self.cache_data[sorted_index]\n del self.MRU_data[sorted_index]\n self.cache_data[key] = item\n self.MRU_data[key] = self.index\n self.index += 1", "title": "" }, { "docid": "a228d6441d1a52be86a38510891cdff3", "score": "0.5081624", "text": "def put(key, value):\n\tindex = get_index(key)\n\ttable[index] = HashEntry(key, value)", "title": "" }, { "docid": "0f7d6d35b2ef887defcb13c965edc979", "score": "0.5080413", "text": "def test_replace_entry_duplicated(self):\n generic_0 = storepass.model.Generic(\"E1 name\", None, None, None, None,\n None, None)\n generic_1 = storepass.model.Generic(\"E2 name\", None, None, None, None,\n None, None)\n folder = storepass.model.Folder(\"E3 name\", None, None, None,\n [generic_0, generic_1])\n root = storepass.model.Root([folder])\n model = storepass.model.Model(root)\n\n # Try replacing the first generic account with an entry that has the\n # same name as the second one.\n generic_2 = storepass.model.Generic(\"E2 name\", None, None, None, None,\n None, None)\n with self.assertRaises(storepass.exc.ModelException) as cm:\n model.replace_entry(generic_0, generic_2)\n self.assertEqual(str(cm.exception),\n \"Entry 'E3 name/E2 name' already exists\")\n self.assertEqual(len(root.children), 1)\n self.assertEqual(root.children[0], folder)\n self.assertEqual(len(folder.children), 2)\n self.assertEqual(folder.children[0], generic_0)\n self.assertEqual(folder.children[1], generic_1)", "title": "" }, { "docid": "c5c975b2e8f598c486f3539054fccc68", "score": "0.5077197", "text": "def __setitem__(self, key, value):\n\n lkey = key.lower()\n if lkey not in self or self[lkey] != value:\n self.updates.add(lkey)\n if value and lkey in self.deletions:\n self.deletions.remove(lkey)\n dict.__setitem__(self, lkey, value)", "title": "" }, { "docid": "1398ec87464e02a880a0bdb62ff1bca2", "score": "0.50720876", "text": "def add_cache_entry(key, value, cache=dict()):\n cache[key] = value", "title": "" }, { "docid": "b1b8ac65fc1a37ad61fe20370161f40c", "score": "0.50697565", "text": "def _seen_entry(self, entry):\n return entry.link in self.entry_history", "title": "" }, { "docid": "9bd2c33a99cbdd2383ca53d7c67dd86e", "score": "0.50660527", "text": "def save_entry_to_hash(session_name, entry):\n\n find_hash = Hash.find_item_name(session_name)\n if find_hash is None:\n find_hash = Hash()\n find_hash.item_name = session_name\n find_hash.fob_method = settings.FOB_METHOD\n find_hash.desc = entry['session_name']\n find_hash.generated_date = entry['dataset_date']\n find_hash.hashcode = entry['dataset_hash']\n find_hash.size = entry['dataset_size']\n find_hash.save()\n\n else:\n find_hash.generated_date = entry['dataset_date']\n find_hash.hashcode = entry['dataset_hash']\n find_hash.size = entry['dataset_size']\n find_hash.save()\n\n return None", "title": "" }, { "docid": "33174e8b53207973ad54b1383376049e", "score": "0.5064448", "text": "def set(self, key, value):\n # Hash the given key to determine where to place the key/value pair\n hashed_key = self._hash_algorithm(key)\n # Create a list with the key & value held in it\n hashed_value = [key, value]\n\n # If the bucket is empty\n if self.table[hashed_key] is None:\n # Add a two value list to the table index of the hashed key\n self.table[hashed_key] = list([hashed_value])\n return True\n else: # If bucket is not empty\n # For each pair in the bucket found...\n for pair in self.table[hashed_key]:\n # If the key/value pair exists... return True\n if pair[0] == key:\n pair[1] = value\n return True\n # If the key/value pair does not exist, append it to the bucket\n self.table[hashed_key].append(hashed_value)\n return True", "title": "" }, { "docid": "eff90afd81f6e3d2671449ed174cf392", "score": "0.50643826", "text": "def match_dict(zero_dict, coord_dict):\n for akey in coord_dict.keys():\n if akey in zero_dict.keys():\n # print(akey)\n zero_dict[akey] = coord_dict[akey]\n return(zero_dict)", "title": "" }, { "docid": "eac585792430f385b37cf7bd9b811f70", "score": "0.5055082", "text": "def put(self, key, value):\n # hash the key\n hash = self.hash_index(key)\n # store value in slot\n # empty slot, directly store it \n if self.slots[hash] == []: \n self.slots[hash] = HashTableEntry(key, value)\n else:\n # slot not empty, go over linked list\n curr = self.slots[hash]\n # search for replicate key, overwrite if found, insert if not found\n while curr is not None:\n if curr.key == key:\n # overwrite\n curr.value = value\n return\n curr = curr.next\n\n # else no replicate key, insert key value to head of linked list\n temp = self.slots[hash]\n self.slots[hash] = HashTableEntry(key, value)\n self.slots[hash].next = temp", "title": "" }, { "docid": "e654d6197d7b2c062f09370d87cc50bf", "score": "0.5045185", "text": "def put(self, key, value):\n hash_idx = self.hash_index(key)\n new_entry = HashTableEntry(key, value)\n\n if self.storage[hash_idx]:\n add_quantity = self.storage[hash_idx].add_entry(new_entry)\n self.entries += add_quantity\n\n return\n\n self.storage[hash_idx] = LinkedList()\n add_quantity = self.storage[hash_idx].add_entry(new_entry)\n self.entries += add_quantity", "title": "" }, { "docid": "c56d4aa764f7f54f65fb6d3c4c2f3be5", "score": "0.5021662", "text": "def dictAddOrInit(d, k, v):\n\tif k in d:\n\t\td[k].add(v)\n\telse:\n\t\td[k] = eqSet([v])", "title": "" }, { "docid": "2c9be9675e79bd7f109967bf823dd7a7", "score": "0.50153875", "text": "def _add_to_dict(dictionary: KEGGmapping, key: str, values: set[str]) -> None:\n if key in dictionary.keys():\n dictionary[key].update(values)\n else:\n dictionary[key] = cp.deepcopy(values) # In case \"values\" is referenced elsewhere, we don't want to update a shallow copy", "title": "" }, { "docid": "2b0f4ad92cc961e8a1a5485dff964afb", "score": "0.50092053", "text": "def Map_set(aMap, key, value):\n\tbucket = Map_get_bucket(aMap, key)\n\ti, k, v = Map_get_slot(aMap, key)\n\t\n\tif v:\n\t\tbucket[i] = (key, value)\n\telse:\n\t\tbucket.append((key, value))", "title": "" }, { "docid": "fa282af50d2a6438ac57e3a794c643ac", "score": "0.5003511", "text": "def insert(self, index, entry):\n if self.check_for_duplicates and entry in self:\n raise ValueError('Entry \"%s\" already exists' % entry.msgid)\n super(_BaseFile, self).insert(index, entry)", "title": "" }, { "docid": "b1661f4a81317350b33d05afde26f539", "score": "0.49959275", "text": "def put(self, key, item):\n if key and item is not None:\n self.cache_data[key] = item\n self.cache_data.move_to_end(key)\n if len(self.cache_data) > self.MAX_ITEMS:\n pop_key = self.cache_data.popitem(last=False)\n print(\"DISCARD: {}\".format(str(pop_key[0])))", "title": "" }, { "docid": "d7dc13dc602cdbe42a785eda70e76d55", "score": "0.49873388", "text": "def optimized_sets2(dictionary, current):\n\tfor key in dictionary:\n\t\tfor keys, values in current.items():\n\t\t\tif (values[0] not in key) and (values[1] not in key) and (values[2] not in key):\n\t\t\t\tdictionary[key] += values\n\treturn dictionary", "title": "" }, { "docid": "2df12239f4f6047a13bb2d2b987281f7", "score": "0.49765858", "text": "def update_temp_key(self, key):\n if self.temp_key.find({}).count() == 0:\n self.temp_key.insert_one(key)\n else:\n self.temp_key.find_one_and_replace({}, key)", "title": "" }, { "docid": "991e0902e6dd613ff423d2b90c3cb446", "score": "0.49660966", "text": "def add(self, entry):\n if entry not in self.base_set:\n # Entry wasn't in the set before so store that it needs to be\n # added.\n self.added_entries.add(entry)\n else:\n # Add overrides any previous remove.\n self.removed_entries.discard(entry)", "title": "" }, { "docid": "d5e170218ded24afedcb0cfe53ec231a", "score": "0.49653953", "text": "def __setitem__(self, key: str, value: Any) -> int:\n if self.filled_entries == self.size:\n # no more room\n raise KeyError\n entry = Entry(key, value)\n index = entry.hash % self.actual_size\n existing_entry = self.entries[index]\n while existing_entry and not entry.compare_hash(existing_entry):\n # don't overlap existing values\n index = (index + 1) % self.actual_size\n existing_entry = self.entries[index]\n self.entries[index] = entry\n self.filled_entries += 1\n return index", "title": "" }, { "docid": "15555fa37ddf1139ec158bf2e030d7c4", "score": "0.4961998", "text": "def compare_dict(dict1, dict2):\n for k, v in dict2.items():\n if k not in dict1:\n dict1[k] = v", "title": "" }, { "docid": "c31b27be56764774aefcb14662bde7d7", "score": "0.49595296", "text": "def __eq__(self, other):\n return self[\"key\"] == other[\"key\"]", "title": "" }, { "docid": "e3c01651b701d73f3f651a302e7b5374", "score": "0.49560872", "text": "def add_to_artist(seen,data):\n seen.update(data) # add the new values to the set\n #print(seen)\n return seen", "title": "" }, { "docid": "569192321aa4840a62205a722996a294", "score": "0.4946772", "text": "def put(self, key: str, item: str):\n if (key is not None and item is not None):\n self.cache_time[key] = datetime.now()\n self.cache_data[key] = item\n if (len(self.cache_data) > BaseCaching.MAX_ITEMS):\n my_list = [\n k for k, v in sorted(\n self.cache_time.items(),\n key=lambda p: p[1],\n reverse=True\n )]\n del self.cache_data[my_list[1]]\n del self.cache_time[my_list[1]]\n print(\"DISCARD: \" + str(my_list[1]))", "title": "" }, { "docid": "abee32470915db13b0179572f7fc85fb", "score": "0.4943241", "text": "def update_new(d1, d2):\n for (key, value) in d2.items():\n if not d1.has_key(key):\n d1[key] = value", "title": "" }, { "docid": "18da0dbd0ff12aac3b23c36f45d6388c", "score": "0.49393976", "text": "def upsert(self, doc):\r\n\r\n self.doc_dict[doc[self.unique_key]] = doc", "title": "" }, { "docid": "f3384ca9b37dc5fedc7eede3c8e2a2de", "score": "0.49378988", "text": "def put(self, key: int, value: int) -> None:\n hash = key % 10000\n if self.arr[hash] == -1:\n self.arr[hash] = [[key, value]]\n else:\n flag = True\n for i, li in enumerate(self.arr[hash]):\n k, v = li\n if key == k:\n self.arr[hash][i] = [key, value]\n flag = False\n break\n if flag:\n self.arr[hash].append([key, value])", "title": "" }, { "docid": "fe0cf4b415c831c429a3f9b8494447f3", "score": "0.4932373", "text": "def eq_dict(d1, d2):\n for k in d2.keys(): # NOTE: d2\n # Just assign the same value (This is for defaultdict)\n d1[k] = d1[k]\n for k in d1.keys(): # NOTE: d1\n # Just assign the same value (This is for defaultdict)\n d2[k] = d2[k]\n\n return set(d1.items()) == set(d2.items()) # (from: https://stackoverflow.com/a/4527978/2885946)", "title": "" }, { "docid": "2acf23265bf7cbe7da8e65a00fc62c3a", "score": "0.49312526", "text": "def store(self,key,val):\n self.cache[key]=val", "title": "" }, { "docid": "2a6a6ffe95b8148663c44055b9f7119f", "score": "0.49297833", "text": "def __ne__(self, other: 'DictoinaryEntry') -> bool:\n return not self == other", "title": "" }, { "docid": "2e1854db193e26cbdb1be3b2b4b3637f", "score": "0.49297476", "text": "def __setitem__(self, key, item):\n if not item.index == key:\n raise ValueError('key does not match item index attribute')\n if key in self:\n self.remove(key)\n set.add(self, item)", "title": "" }, { "docid": "8a5c443050de8eb49214e264f0276c99", "score": "0.49240464", "text": "def duplicate(self):\n return xmlsecmod.keyDuplicate(self)", "title": "" }, { "docid": "4c2fb2ff860976dcd83be6fd254d44ce", "score": "0.49235028", "text": "def insert(self, val):\n ret = True\n if self.ds.has_key(val):\n ret = False\n else:\n self.count += 1\n self.linked_ds[self.count] = val\n self.ds[val] = self.count \n return ret", "title": "" }, { "docid": "4c85c6038cd11ae2ffc569f1d3740a48", "score": "0.4921756", "text": "def __setitem__(self, key: Any, item: Any):\n # select a random cache and add the item to the cache\n cache = random.choice(self.caches)\n cache.update({key: item})", "title": "" }, { "docid": "ed3ef448d2cf97dea775d43662b57dc6", "score": "0.49120262", "text": "def put(self, key, value):\n bucket, idx = self._index(key)\n array = self.buckets[bucket]\n if idx < 0 :\n array.append((key, value))\n else:\n array[idx] = (key, value)", "title": "" }, { "docid": "812b4b2f5a78ed7a93f4d017dcf5a8a1", "score": "0.49090993", "text": "def insert(term: str, prefix: str, identifier: str) -> None:\n terms = dict(load())\n existing = terms.get(term)\n reference_tuple = ReferenceTuple(prefix, identifier)\n if existing:\n if existing == reference_tuple:\n return None\n else:\n raise KeyError\n terms[term] = reference_tuple\n write(terms)\n load.cache_clear()", "title": "" }, { "docid": "4068353dd113a507ff638e7ac7903ee8", "score": "0.49073145", "text": "def test_setitem(self):\n d = nodict.NoDict()\n d[\"Harpo\"] = 52\n self.assertEqual(d.get(\"Harpo\"), 52)", "title": "" }, { "docid": "509f103dd89edd801e4d92e0280269c5", "score": "0.49064785", "text": "def insert(self, val):\n if val in self.set:\n return False\n self.set.add(val)\n return True", "title": "" }, { "docid": "c5183d56d8ad3c820f5addcfa60f6a2d", "score": "0.4889862", "text": "def insert(self, key, value):\n # Run key through hash function to get integer\n # Take the mod of hashed key to get bucket index\n index = self._hash_mod(key)\n # Instantiate new LinkedPair instance\n new_pair = LinkedPair(key, value)\n # If value at index is None, replace\n if self.storage[index] is None:\n self.storage[index] = new_pair\n else: # If there is already a value at index\n node = self.storage[index]\n while node: # Loop through LinkedList\n # Compare each node's key\n if node.key == key: # If key exists, overwrite value\n node.value = value\n elif node.next is None: # If node is tail\n # Add new pair to next of current tail / as new tail\n node.next = new_pair\n # Continue iteration into next node\n node = node.next", "title": "" }, { "docid": "4605a094f4d0add93d18771605edab75", "score": "0.48885524", "text": "def put(self, key, item):\n if key is None or item is None:\n return\n if(len(self.cache_data) >= self.MAX_ITEMS):\n p = max(self.mru.keys(), key=lambda k: self.mru[k])\n self.cache_data.pop(p)\n self.mru.pop(p)\n print(\"DISCARD:\", p)\n self.cache_data[key] = item\n self.mru[key] = self.count\n self.count += 1", "title": "" }, { "docid": "bea39049f6b63f2bc81ee0601dca773b", "score": "0.48884633", "text": "def add(key, value):", "title": "" }, { "docid": "ea29dd74ec6d42001de4e829fe4617be", "score": "0.48846066", "text": "def set_item(key, value):\n if not redis_db.hexists(CACHE_STORE, key): # Time complexity: O(1)\n make_space()\n redis_db.hset(CACHE_STORE, key, value) # setting up key-value pair O(1)\n #Time complexity: O(log(N)) for each item added, where N is the number of elements in the sorted set\n redis_db.zadd(CACHE_KEYS, 0, key) # Init a key with 0 rank", "title": "" }, { "docid": "1791d14fabb066f44c4d954e92c6d587", "score": "0.48822695", "text": "def sadd(self, key, value):\n if not key in self._items:\n self._items[key] = set([value])\n else:\n self._items[key].update([value])", "title": "" }, { "docid": "69034f5c797b53b9b17343e7de78b4c5", "score": "0.4881047", "text": "def insert(self, val):\n notContain = True\n if self.dataDict.has_key(val): notContain = False\n self.dataList.append(val)\n idx = len(self.dataList) - 1\n self.dataDict[val].add(idx)\n return notContain", "title": "" }, { "docid": "1867527458a3cbe604e0fcd3fb3efe93", "score": "0.48750034", "text": "def add_key(self, key, docname):\n cache = self.cache.get(docname, OrderedSet())\n cache.add(key)\n self.cache[docname] = cache", "title": "" }, { "docid": "a45ff7fb57fa312945cf842c6ed7076b", "score": "0.48722547", "text": "def __setitem__(self, k, v):\n for item in self._table:\n if k == item._key: # Found a match:\n item._value = v # reassign value\n return # and quit\n # did not find match for key\n self._table.add_last(self._Item(k, v))", "title": "" }, { "docid": "64c89808c042c1315626c910afce141d", "score": "0.4871876", "text": "def __add_item(self, key, value):\r\n\r\n # in case the key (item) exists\r\n # in the map\r\n if key in self._map:\r\n # removes the item from the internal\r\n # structures (using the key)\r\n self.__remove_item(key)\r\n\r\n # adds the tuple to the tuples list\r\n self.tuples_list.append((key, value))\r\n\r\n # sets the value in the map\r\n self._map[key] = value\r\n\r\n # adds the key to the keys list (only in case\r\n # the keys list is available and set and the key\r\n # is not present in the keys list)\r\n if not self._keys == None and not key in self._keys: self._keys.append(key)", "title": "" }, { "docid": "3d25fc7c8112339a3c9249f35166c573", "score": "0.48714304", "text": "def put(self, key: int, value: int) -> None:\n hash_result = self._hash_function(key)\n for item in self.table[hash_result]:\n if item.key == key:\n item.value = value\n return\n self.table[hash_result].append(Item(key, value))", "title": "" }, { "docid": "3d25fc7c8112339a3c9249f35166c573", "score": "0.48714304", "text": "def put(self, key: int, value: int) -> None:\n hash_result = self._hash_function(key)\n for item in self.table[hash_result]:\n if item.key == key:\n item.value = value\n return\n self.table[hash_result].append(Item(key, value))", "title": "" } ]
7637cc26122219aaeb441ef46942a6d2
Easy Train Test Split call.
[ { "docid": "07a292fa1648fecd88b751277960cde6", "score": "0.6491326", "text": "def TrainTestSplit(X, Y, R=0, test_size=0.2):\n return train_test_split(X, Y, test_size=test_size, random_state=R)", "title": "" } ]
[ { "docid": "338cdcf7bb94999d0d6357a807bab52c", "score": "0.7243466", "text": "def patched_train_test_split(*args, **kwargs):\n # pylint: disable=no-method-argument\n original = gorilla.get_original_attribute(model_selection, 'train_test_split')\n\n def execute_inspections(op_id, caller_filename, lineno, optional_code_reference, optional_source_code):\n \"\"\" Execute inspections, add DAG node \"\"\"\n # pylint: disable=too-many-locals\n function_info = FunctionInfo('sklearn.model_selection._split', 'train_test_split')\n input_info = get_input_info(args[0], caller_filename, lineno, function_info, optional_code_reference,\n optional_source_code)\n\n operator_context = OperatorContext(OperatorType.TRAIN_TEST_SPLIT, function_info)\n input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])\n result = original(input_infos[0].result_data, *args[1:], **kwargs)\n backend_result = SklearnBackend.after_call(operator_context,\n input_infos,\n result) # We ignore the test set for now\n train_backend_result = BackendResult(backend_result.annotated_dfobject,\n backend_result.dag_node_annotation)\n test_backend_result = BackendResult(backend_result.optional_second_annotated_dfobject,\n backend_result.optional_second_dag_node_annotation)\n\n description = \"(Train Data)\"\n columns = list(result[0].columns)\n dag_node = DagNode(op_id,\n BasicCodeLocation(caller_filename, lineno),\n operator_context,\n DagNodeDetails(description, columns),\n get_optional_code_info_or_none(optional_code_reference, optional_source_code))\n add_dag_node(dag_node, [input_info.dag_node], train_backend_result)\n\n description = \"(Test Data)\"\n columns = list(result[1].columns)\n dag_node = DagNode(singleton.get_next_op_id(),\n BasicCodeLocation(caller_filename, lineno),\n operator_context,\n DagNodeDetails(description, columns),\n get_optional_code_info_or_none(optional_code_reference, optional_source_code))\n add_dag_node(dag_node, [input_info.dag_node], test_backend_result)\n\n new_return_value = (train_backend_result.annotated_dfobject.result_data,\n test_backend_result.annotated_dfobject.result_data)\n\n return new_return_value\n\n return execute_patched_func(original, execute_inspections, *args, **kwargs)", "title": "" }, { "docid": "338cdcf7bb94999d0d6357a807bab52c", "score": "0.7243466", "text": "def patched_train_test_split(*args, **kwargs):\n # pylint: disable=no-method-argument\n original = gorilla.get_original_attribute(model_selection, 'train_test_split')\n\n def execute_inspections(op_id, caller_filename, lineno, optional_code_reference, optional_source_code):\n \"\"\" Execute inspections, add DAG node \"\"\"\n # pylint: disable=too-many-locals\n function_info = FunctionInfo('sklearn.model_selection._split', 'train_test_split')\n input_info = get_input_info(args[0], caller_filename, lineno, function_info, optional_code_reference,\n optional_source_code)\n\n operator_context = OperatorContext(OperatorType.TRAIN_TEST_SPLIT, function_info)\n input_infos = SklearnBackend.before_call(operator_context, [input_info.annotated_dfobject])\n result = original(input_infos[0].result_data, *args[1:], **kwargs)\n backend_result = SklearnBackend.after_call(operator_context,\n input_infos,\n result) # We ignore the test set for now\n train_backend_result = BackendResult(backend_result.annotated_dfobject,\n backend_result.dag_node_annotation)\n test_backend_result = BackendResult(backend_result.optional_second_annotated_dfobject,\n backend_result.optional_second_dag_node_annotation)\n\n description = \"(Train Data)\"\n columns = list(result[0].columns)\n dag_node = DagNode(op_id,\n BasicCodeLocation(caller_filename, lineno),\n operator_context,\n DagNodeDetails(description, columns),\n get_optional_code_info_or_none(optional_code_reference, optional_source_code))\n add_dag_node(dag_node, [input_info.dag_node], train_backend_result)\n\n description = \"(Test Data)\"\n columns = list(result[1].columns)\n dag_node = DagNode(singleton.get_next_op_id(),\n BasicCodeLocation(caller_filename, lineno),\n operator_context,\n DagNodeDetails(description, columns),\n get_optional_code_info_or_none(optional_code_reference, optional_source_code))\n add_dag_node(dag_node, [input_info.dag_node], test_backend_result)\n\n new_return_value = (train_backend_result.annotated_dfobject.result_data,\n test_backend_result.annotated_dfobject.result_data)\n\n return new_return_value\n\n return execute_patched_func(original, execute_inspections, *args, **kwargs)", "title": "" }, { "docid": "4e18bc1b8893291f3e4430bcbcc3de24", "score": "0.7188682", "text": "def main() -> None:\n use_case_dataframe = read_use_case_dataframe()\n train_test_split(use_case_dataframe)", "title": "" }, { "docid": "99c6a645628407fbf9806f6d204889ef", "score": "0.7024909", "text": "def custom_train_test_splitter(method, random_state, test_size, row_ids_list):\n\n if method == 'Simple':\n train, test = train_test_split(row_ids_list, test_size=test_size, random_state=random_state)\n return train, test", "title": "" }, { "docid": "b9778c38c13faa7b6084a3d465d6a1e9", "score": "0.69551575", "text": "def train_test_split(self, *arrays, test_size=0.25, train_size=None, random_state=42, stratify=None):\n self.logger.info('***** In train_test_split started *****')\n try:\n x_train, x_test, y_train, y_test = train_test_split(*arrays, test_size=test_size, train_size=train_size,\n random_state=random_state, stratify=stratify)\n except Exception as e:\n self.logger.error(f'error in DataManagement train_test_split e: {e}')\n raise e\n\n self.logger.info('***** In train_test_split finished *****')\n\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "6075848e3787e7f5d0a87a9a67ecbcb6", "score": "0.68815273", "text": "def traintestSplit(test_size=0.3, random_state=42):\n \n independent_variable = tfidfvec()\n target_variable = preprocessing_df()\n\n X_train, X_test, y_train, y_test = train_test_split(\n independent_variable,\n target_variable,\n test_size = test_size,\n random_state = random_state \n )\n\n return X_train, X_test, y_train, y_test", "title": "" }, { "docid": "26d7c69e8b232c86392647b7c2d1aee2", "score": "0.68713355", "text": "def train_test_split(dataset = [], test_precent = 20):\n\n train = []\n test = []\n\n try:\n test_amount = int(len(dataset)*test_precent/100)\n for i in range(5):\n random.shuffle(dataset)\n\n test = dataset[:test_amount]\n train = dataset[test_amount:]\n\n\n\n except Exception as e:\n print(e)\n\n return train, test", "title": "" }, { "docid": "e2dd27dae061f0ff750c148a85a4b4ef", "score": "0.6811058", "text": "def custom_split_protocol(self, algo, train, train_targets, valid,\n valid_targets, test, test_targets):\n ds = self.dataset\n ds.fetch(True)\n ds.build_meta()\n n_cv = ds.descr['n_train'] + ds.descr['n_valid']\n n_test = ds.descr['n_test']\n\n print ds.descr['n_train'], ds.descr['n_valid'], ds.descr['n_test']\n print \"Split assertion 1\", len(train), len(valid), n_cv\n assert(len(train) + len(valid) == n_cv)\n print \"Split assertion 2\", len(train_targets), len(valid_targets), n_cv\n assert(len(train_targets) + len(valid_targets) == n_cv)\n print \"Split assertion 3\", len(test), n_test\n assert(len(test) == n_test)\n print \"Split assertion 4\", len(test_targets), n_test\n assert(len(test_targets) == n_test)\n\n train_task = Task('vector_classification',\n name='train',\n x=train.reshape(train.shape[0], -1),\n y=train_targets,\n n_classes=ds.descr['n_classes'])\n\n valid_task = Task('vector_classification',\n name='valid',\n x=valid.reshape(valid.shape[0], -1),\n y=valid_targets,\n n_classes=ds.descr['n_classes'])\n\n test_task = Task('vector_classification',\n name='test',\n x=test.reshape(test.shape[0], -1),\n y=test_targets,\n n_classes=ds.descr['n_classes'])\n\n model = algo.best_model(train=train_task, valid=valid_task)\n algo.loss(model, train_task)\n algo.loss(model, valid_task)\n algo.loss(model, test_task)", "title": "" }, { "docid": "122cfecff9bc3f7c2fe056ef700ede44", "score": "0.6787178", "text": "def main():\n train_val_test()", "title": "" }, { "docid": "faab99ed40fba750c2ad247c0e488974", "score": "0.6654784", "text": "def trainSplit(self, trainDir):\n split = self.TrainSplit()\n posTrainFileNames = os.listdir('%s/pos/' % trainDir)\n negTrainFileNames = os.listdir('%s/neg/' % trainDir)\n for fileName in posTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))\n example.klass = 'pos'\n split.train.append(example)\n for fileName in negTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))\n example.klass = 'neg'\n split.train.append(example)\n return split", "title": "" }, { "docid": "d610fb182792a8feac9a421f2d38ee39", "score": "0.6638636", "text": "def train_and_test(self, split=.5, subsample=1, randomize=True, show=False):\n random.shuffle(self.data)\n s = int(len(self.data) * split)\n train_set = self.data[:s]\n self.data = self.data[s:] # Remove train set from further testing\n\n self.detector.set_max_features(40)\n self.train(train_set, show=show)\n self.color_info()\n\n self.detector.set_max_features(900)\n self.evaluate(show=show)\n self.feature_selection()", "title": "" }, { "docid": "8c935c331f3c8e6dd1784b68a199ff24", "score": "0.6619687", "text": "def test_model_split(model, train_split_xy):\n train_X, test_X, train_y, test_y = train_split_xy\n model = clone(model)\n model.fit(train_X, train_y)\n return test_model(model, test_X, test_y)", "title": "" }, { "docid": "b3d54a577542af793a3b0efa4adf95ac", "score": "0.6612836", "text": "def traintestsplit(self, traininds, testinds):\n self.dftrain = self.df.iloc[traininds]\n self.dftest = self.df.iloc[testinds]", "title": "" }, { "docid": "21ab7c9b8133038acdab28cb5fadb501", "score": "0.6605236", "text": "def train(train, test, datafraction=1.):\n pass", "title": "" }, { "docid": "0e17948e728ed73837e796993df9e562", "score": "0.66008615", "text": "def train_test_split(data: Dataset, test_size=0.1):\n # take a sample of text_size\n test = sample(data, int(len(data) * test_size))\n # separate a test sample from an original set and store it a trainset\n train = list(set(data) - set(test))\n\n return train, test", "title": "" }, { "docid": "389556e852a1a47a79a65ffc5ee1922f", "score": "0.6580241", "text": "def split_test(self,test_size=0.2,random=None):\n\n print('Splitting Test Data..')\n self.att_full_train, self.att_test, self.tar_full_train, self.tar_test = train_test_split(self.attributes, self.targets, test_size=test_size, random_state=random)\n print('Done')", "title": "" }, { "docid": "8ab5adb8175a2d58b84620fcf23cfcea", "score": "0.65653664", "text": "def train_test_split0(self):\n \n Xtrain, Xtest, ytrain, ytest = train_test_split(self.data,\n self.target, test_size = 0.2,\n random_state = 1512)\n \n return Xtrain, Xtest, ytrain, ytest", "title": "" }, { "docid": "75789feb88eb0641c1b2c68b47aaa87e", "score": "0.6540653", "text": "def training_test_split(api, dataset, rate=0.8, seed='seed',\n training='Training', test='Test'):\n training_set = api.create_dataset(dataset, {\n 'sample_rate': rate,\n 'seed': seed,\n 'name': training})\n test_set = api.create_dataset(dataset, {\n 'sample_rate': rate,\n 'seed': seed,\n 'out_of_bag': True,\n 'name': test})\n\n if api.ok(training_set) and api.ok(test_set):\n return training_set, test_set", "title": "" }, { "docid": "77d2e5cb87ade4d9cf4897fcac72c6ab", "score": "0.65181905", "text": "def train_test_split(self, filepath_for_saving=None, percentage_of_test_data=0.25):", "title": "" }, { "docid": "8d70a7fc0875c23d271d9eaa90e06f42", "score": "0.6504656", "text": "def get_train_test_(self, test_size=0.1):\n self.x_train, self.x_val, self.y_train, self.y_val = train_test_split(self.X,self.Y, test_size=test_size, random_state=42)", "title": "" }, { "docid": "8d70a7fc0875c23d271d9eaa90e06f42", "score": "0.6504656", "text": "def get_train_test_(self, test_size=0.1):\n self.x_train, self.x_val, self.y_train, self.y_val = train_test_split(self.X,self.Y, test_size=test_size, random_state=42)", "title": "" }, { "docid": "6f18f79edca7068de907a2c6a9a9a1e7", "score": "0.6485314", "text": "def test_TrainSegExperiment_Stage1(self):\n pass", "title": "" }, { "docid": "26e0b0df5243cbe8beeede1ad49ecd18", "score": "0.6476987", "text": "def data_train_test_split_simple(features, labels, test_size, is_print=False):\n data = zip(features, labels)\n train, test = data_split(data, 1 - test_size)\n f_train, l_train = zip(*train)\n f_test, l_test = zip(*test)\n if is_print:\n print(\"--- Train/test split results ---\")\n print(\"-- # features:\", len(features[0]))\n print(\"-- # items in train:\", len(l_train))\n audit_get_counts(l_train, is_print=True)\n print(\"-- # items in test:\", len(l_test))\n audit_get_counts(l_test, is_print=True)\n return f_train, f_test, l_train, l_test", "title": "" }, { "docid": "f016414de9a43da15173425289deeddc", "score": "0.646685", "text": "def splitTrainTest(self):\n numberOf1stTrainSamples = int((1 - self.trainTestSplitFactor) * len(self.firstClassAllFiles))\n numberOf2stTrainSamples = int((1 - self.trainTestSplitFactor) * len(self.secondClassAllFiles))\n\n # randomly put train files name into lists\n self.firstClassTrainFiles = random.sample(self.firstClassAllFiles, numberOf1stTrainSamples)\n self.secondClassTrainFiles = random.sample(self.secondClassAllFiles, numberOf2stTrainSamples)\n\n self.firstClassTestFiles = []\n self.secondClassTestFiles = []\n\n # put test files name into lists\n for fileName in self.firstClassAllFiles:\n if fileName not in self.firstClassTrainFiles:\n self.firstClassTestFiles.append(fileName)\n\n for fileName in self.secondClassAllFiles:\n if fileName not in self.secondClassTrainFiles:\n self.secondClassTestFiles.append(fileName)", "title": "" }, { "docid": "fd50a0b281d10e2a1f91495111ce07fd", "score": "0.64651114", "text": "def split_data(self, train, eva, test, rank, collaborator_count):\n if collaborator_count == 1:\n return train, eva, test\n\n fraction = [1.0 / float(collaborator_count)]\n fraction *= (collaborator_count - 1)\n\n # Expand the split list into individual parameters\n train_split = train.split(*fraction)\n eva_split = eva.split(*fraction)\n test_split = test.split(*fraction)\n\n train = [train]\n eva = [eva]\n test = [test]\n\n if type(train_split) is not list:\n train.append(train_split)\n eva.append(eva_split)\n test.append(test_split)\n else:\n # Combine all partitions into a single list\n train = [train] + train_split\n eva = [eva] + eva_split\n test = [test] + test_split\n\n # Extract the right shard\n train = train[rank - 1]\n eva = eva[rank - 1]\n test = test[rank - 1]\n\n return train, eva, test", "title": "" }, { "docid": "e8c59e06a684aa606fa680c8b66ae6a5", "score": "0.6430867", "text": "def train_test_splitter(df, test_ratio, key=None):\n\n # calculate maximum number of times test set can be generated\n num_iterations = int(1/test_ratio)\n print('\\nProducing {0} test sets...'.format(num_iterations))\n count = 1\n while count <= num_iterations:\n print('\\n' + '='*80)\n print('='*80)\n print('\\nTrain/Test split # {0}'.format(count))\n test, train, p = split_train_test(df,\n test_ratio,\n key)\n yield train, test, count\n count += 1", "title": "" }, { "docid": "572720dc4259213b5b793743f9b23825", "score": "0.64293706", "text": "def split_train_test(subj_list, indices, test_fold):\n\n train_inds = np.where(indices != test_fold)\n test_inds = np.where(indices == test_fold)\n\n train_subs = []\n for sub in subj_list[train_inds]:\n train_subs.append(sub)\n\n test_subs = []\n for sub in subj_list[test_inds]:\n test_subs.append(sub)\n\n return (train_subs, test_subs)", "title": "" }, { "docid": "18699d03418b34cb937ba0842bf22462", "score": "0.64091545", "text": "def traintest_split(X, y):\n # 80% of the input for training and 20% for training\n Xtrain, Xtest, ytrain, ytest = train_test_split(\n X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y)\n print(Xtrain.shape, ytrain.shape)\n print(Xtest.shape, ytest.shape)\n return Xtrain, Xtest, ytrain, ytest", "title": "" }, { "docid": "1c4a2fae0137f83f03269205ebeafc7c", "score": "0.6407266", "text": "def test_TrainSegExperiment_from_checkpoint(self):\n pass", "title": "" }, { "docid": "aa25d5ae94b1886018a27c2f5308d5a8", "score": "0.640621", "text": "def train_test(self, dataset, idxs):\r\n # split indexes for train, validation, and test (80, 10, 10)\r\n idxs_train = idxs[:int(0.8*len(idxs))]\r\n idxs_test = idxs[int(0.8*len(idxs)):]\r\n\r\n trainloader = DataLoader(DatasetSplit(dataset, idxs_train),\r\n batch_size=self.args.local_batch_size, shuffle=True, drop_last = False)\r\n\r\n testloader = DataLoader(DatasetSplit(dataset, idxs_test),\r\n batch_size=self.args.local_batch_size, shuffle=False)\r\n return trainloader, testloader", "title": "" }, { "docid": "ffbcd4228f798c38e45584b617b3658a", "score": "0.63841444", "text": "def train_test_split(self, \n train_frac: Optional[float] = 0.8\n ) -> Tuple[object, object]:\n train_ids = []\n test_ids = []\n for i in range(len(self.raw_data)):\n if random.choices([True, False], [train_frac, 1-train_frac]):\n train_ids.append(i)\n else:\n test_ids.append(i)\n test = copy.copy(self)\n test.raw_data = test.raw_data.iloc[test_ids]\n self.raw_data = self.raw_data.iloc[train_ids]\n return self, test", "title": "" }, { "docid": "71a8cb107acf75d5b0efa485d7265771", "score": "0.637802", "text": "def test_cli_train(\n setup_train_test,\n epochs,\n validation_steps,\n steps_per_epoch,\n trace_width,\n cell_size,\n batch_size,\n quiet,\n dry_run,\n):\n tmp_path = setup_train_test\n _test_cli_train(\n tmp_path=tmp_path,\n epochs=epochs,\n validation_steps=validation_steps,\n steps_per_epoch=steps_per_epoch,\n trace_width=trace_width,\n cell_size=cell_size,\n batch_size=batch_size,\n quiet=quiet,\n dry_run=dry_run,\n )", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.63768655", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.63768655", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.63768655", "text": "def train(self):\n pass", "title": "" }, { "docid": "26bda20962f00c471f807306f006237e", "score": "0.6354604", "text": "def test_train(self):\n self._estimator.train(input_fn=_train_input_fn, steps=1)\n self._assert_checkpoint(expected_global_step=1)", "title": "" }, { "docid": "00a82f042fb559cb3dc1d945e9b42e8e", "score": "0.63492984", "text": "def train_test_split(\n self,\n train_size: int,\n scaler: Optional[BaseScaler] = None,\n use_tqdm: bool = True,\n ):\n\n if scaler is not None:\n self.scaler = scaler\n self.dataset = scaler(self.dataset)\n\n self.x_train, self.y_train, self.x_test, self.y_test = (\n TrainTestSplitter(\n train_size=train_size,\n dataset=self.dataset,\n use_tqdm=use_tqdm,\n max_pred_horizon=self.max_pred_horizon,\n max_train_horizon=self.max_train_horizon,\n )()\n )", "title": "" }, { "docid": "95610b7ab8577311c3138d781263afba", "score": "0.63454187", "text": "def train(self) -> None:\n self.mode = 'test'", "title": "" }, { "docid": "be3a7741a8f6bb4c05d1b73a70d7f3e8", "score": "0.6344746", "text": "def train_test_split(self, train_frac=0.8):\n self._train_indices, self._test_indices=train_test_split(self.data.index, train_size=train_frac)", "title": "" }, { "docid": "06040c163a344e42cb8579dec34619fa", "score": "0.6337919", "text": "def train_test_split(use_case_dataframe: pd.DataFrame):\n\n sc = MinMaxScaler()\n training_set = use_case_dataframe.iloc[:,1:2].values\n training_data = sc.fit_transform(training_set)\n\n seq_length = int(Config.SEQ_LENGTH)\n x_variable, y_variable = sliding_windows(training_data, seq_length)\n\n train_size = int(len(y_variable) * 0.67)\n test_size = len(y_variable) - train_size\n\n dataX = Variable(torch.Tensor(np.array(x_variable)))\n dataY = Variable(torch.Tensor(np.array(y_variable)))\n\n trainX = Variable(torch.Tensor(np.array(x_variable[0:train_size])))\n trainY = Variable(torch.Tensor(np.array(y_variable[0:train_size])))\n\n testX = Variable(torch.Tensor(np.array(x_variable[train_size:len(x_variable)])))\n testY = Variable(torch.Tensor(np.array(y_variable[train_size:len(y_variable)])))\n\n save_train_test_splits(dataX, dataY, trainX, trainY, testX, testY)\n logger.info('Train and test split is finsihed and saved.')", "title": "" }, { "docid": "3ec49fedf321f0c63540e7773200003d", "score": "0.6332543", "text": "def random_split(data, train_split):\n to_shuffle = data.copy\n random.shuffle(to_shuffle)\n train_end = int(len(to_shuffle) * train_split)\n train = to_shuffle[:train_end]\n test = to_shuffle[train_end:]\n return train, test", "title": "" }, { "docid": "b9d90cd25bdd2eb9420448648d8e867e", "score": "0.6332429", "text": "def train_test_split(self,\n train_fraction: float = None,\n train_study_count: int = None,\n seed: int = 42,\n ) -> Sequence[\"ExpressionDataset\"]:\n raise NotImplementedError", "title": "" }, { "docid": "5cd51137cc52a60628d6b9347ee97b83", "score": "0.6332356", "text": "def test(self):\n if self.importName != None:\n self.restore_pytorch()\n # self.trainModel.cuda()\n total = self.lib.getTestTotal()\n for epoch in range(total):\n self.lib.getHeadBatch(self.test_h_addr, self.test_t_addr, self.test_r_addr)\n res = self.trainModel.predict(self.test_h, self.test_t, self.test_r)\n self.lib.testHead(res.data.numpy().__array_interface__['data'][0])\n\n self.lib.getTailBatch(self.test_h_addr, self.test_t_addr, self.test_r_addr)\n res = self.trainModel.predict(self.test_h, self.test_t, self.test_r)\n self.lib.testTail(res.data.numpy().__array_interface__['data'][0])\n if self.log_on:\n print\n epoch\n self.lib.test()", "title": "" }, { "docid": "c87be69736dea06898f15b0c97dfa3ba", "score": "0.6329784", "text": "def __split_train_and_validation(self):\n\n self.train_orig, self.validate_orig, self.train_labels, self.validate_labels = train_test_split(\n self.train_orig,\n self.train_labels,\n test_size=self.split_size,\n random_state=0)\n\n self.__log(\n 'Training features and labels randomized and split with train_test_split (validation % of training set: {})'.format(\n self.split_size))", "title": "" }, { "docid": "fbd725aedee37b4f486b74ccda19f4a0", "score": "0.6329583", "text": "def __train_test_split(self):\n\n log.info('Using separate training and test sets...')\n\n # Load training data\n data_set = self.data_set\n data_dir = data_set.data_dir\n data_set_name = data_set.data_set_name\n use_one_hot_encoding = data_set.use_one_hot_encoding\n train_arff_file_name = get_file_name(data_set_name + '-train', SUFFIX_ARFF)\n train_arff_file = path.join(data_dir, train_arff_file_name)\n test_data_exists = True\n\n if not path.isfile(train_arff_file):\n train_arff_file_name = get_file_name(data_set_name, SUFFIX_ARFF)\n log.warning('File \\'' + train_arff_file + '\\' does not exist. Using \\'' +\n path.join(data_dir, train_arff_file_name) + '\\' instead!')\n test_data_exists = False\n\n train_x, train_y, meta_data = load_data_set_and_meta_data(data_dir, train_arff_file_name,\n get_file_name(data_set_name, SUFFIX_XML))\n\n if use_one_hot_encoding:\n train_x, encoder, meta_data = one_hot_encode(train_x, train_y, meta_data)\n else:\n encoder = None\n\n # Load test data\n if test_data_exists:\n test_x, test_y = load_data_set(data_dir, get_file_name(data_set_name + '-test', SUFFIX_ARFF), meta_data)\n\n if encoder is not None:\n test_x, _ = one_hot_encode(test_x, test_y, meta_data, encoder=encoder)\n else:\n log.warning('No test data set available. Model will be evaluated on the training data!')\n test_x = train_x\n test_y = train_y\n\n # Train and evaluate classifier\n self._train_and_evaluate(meta_data, None, train_x, train_y, None, test_x, test_y, first_fold=0,\n current_fold=0, last_fold=0, num_folds=1)", "title": "" }, { "docid": "ba8198efa5279366f4ec79f9443ae5ad", "score": "0.63231444", "text": "def test_choose_best_feature_to_split(self):\n my_data, labels = self.create_dataset()\n returned = trees.choose_best_feature_to_split(my_data)\n self.assertEqual(0, returned)", "title": "" }, { "docid": "878ca02b84116cd27646ab30af9f5662", "score": "0.6322496", "text": "def trainSplit(self, trainDir):\n split = self.TrainSplit()\n posTrainFileNames = os.listdir('%s/pos/' % trainDir)\n negTrainFileNames = os.listdir('%s/neg/' % trainDir)\n for fileName in posTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))\n example.sentiment = 'pos'\n split.train.append(example)\n for fileName in negTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))\n example.sentiment = 'neg'\n split.train.append(example)\n return split", "title": "" }, { "docid": "391948d09411bc4cd25f6ba2c814ac9a", "score": "0.6320088", "text": "def train_test_split(shp, savedir, config, client = None): \n #set seed.\n np.random.seed(1)\n most_species = 0\n if client:\n futures = [ ]\n for x in np.arange(config[\"iterations\"]):\n future = client.submit(sample_plots, shp=shp, min_samples=config[\"min_samples\"], test_fraction=config[\"test_fraction\"])\n futures.append(future)\n \n for x in as_completed(futures):\n train, test = x.result()\n if len(train.taxonID.unique()) > most_species:\n print(len(train.taxonID.unique()))\n saved_train = train\n saved_test = test\n most_species = len(train.taxonID.unique()) \n else:\n for x in np.arange(config[\"iterations\"]):\n train, test = sample_plots(shp, min_samples=config[\"min_samples\"], test_fraction=config[\"test_fraction\"])\n if len(train.taxonID.unique()) > most_species:\n print(len(train.taxonID.unique()))\n saved_train = train\n saved_test = test\n most_species = len(train.taxonID.unique())\n \n train = saved_train\n test = saved_test \n \n print(\"There are {} records for {} species for {} sites in filtered train\".format(\n train.shape[0],\n len(train.taxonID.unique()),\n len(train.siteID.unique())\n ))\n \n print(\"There are {} records for {} species for {} sites in test\".format(\n test.shape[0],\n len(test.taxonID.unique()),\n len(test.siteID.unique())\n ))\n \n #Give tests a unique index to match against\n test[\"point_id\"] = test.index.values\n train[\"point_id\"] = train.index.values\n \n return train, test", "title": "" }, { "docid": "371022b7b953424bb11b9fb9383ee3d2", "score": "0.6310743", "text": "def get_standard_train_test_splits(self):\n return None", "title": "" }, { "docid": "e3c2075175edae5c530d395794588c1b", "score": "0.6306234", "text": "def train(self, *_, **__):\n pass", "title": "" }, { "docid": "61dd61e18e73d84a46775da1c23ec96a", "score": "0.630554", "text": "def train(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "d4d6cb6ef1a14075c99957fb0353b938", "score": "0.6300396", "text": "def traintestsimple(self, train_frac):\n inds = self.df.index.values\n np.random.shuffle(inds)\n traininds, testinds = np.split(inds, [int(train_frac*self.N)])\n self.traintestsplit(traininds, testinds)", "title": "" }, { "docid": "5c0adc0165f12d237eba83e4d378f220", "score": "0.62966037", "text": "def split_data(data, training_size):\n num_training = int(training_size * len(data))\n num_test = len(data) - num_training\n train = data.head(num_training)\n test = data.tail(num_test)\n return train,test", "title": "" }, { "docid": "b8feccd988c7dcd90898904dc5afbe8d", "score": "0.62864196", "text": "def test_train_split(self, data):\n # Get a random sample\n sample = np.random.choice(data.index, size=int(len(data)*0.75), replace=False)\n # train_data, test_data\n return data.iloc[sample], data.drop(sample)", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.6285299", "text": "def train(self):", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.6285299", "text": "def train(self):", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.6285299", "text": "def train(self):", "title": "" }, { "docid": "46964642d8122aa5de097c918f3d513c", "score": "0.62830156", "text": "def TrainValidationSplit(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "b9575282ed9bc27c4f79b9aaadd237f2", "score": "0.62750757", "text": "def test_cli_train_interface(setup_train_test, quiet):\n tmp_path = setup_train_test\n _test_cli_train(\n tmp_path=tmp_path,\n epochs=1,\n validation_steps=1,\n steps_per_epoch=1,\n trace_width=0.1,\n cell_size=512,\n batch_size=1,\n quiet=quiet,\n dry_run=True,\n )", "title": "" }, { "docid": "213ffe97e801dd0e06b41d138748273b", "score": "0.62739515", "text": "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "title": "" }, { "docid": "157045457fd60a46323113a5b78d5cd8", "score": "0.62661535", "text": "def baseclass(self):\n return RunnerSplitTrainTest", "title": "" }, { "docid": "e69e964a9f280e025b746b64a43968ff", "score": "0.62593514", "text": "def create_trainvaltest_split(traintest_split_file):\n partition = dict()\n partition['trainval'] = []\n partition['train'] = []\n partition['val'] = []\n partition['test'] = []\n partition['weight_trainval'] = []\n partition['weight_train'] = []\n with open(label_path) as f:\n labels = f.readlines()\n # load PETA.MAT\n # data = loadmat(open('./dataset/peta/PETA.mat', 'r'))\n items = [x for x in range(len(labels))]\n for idx in range(2):\n random.shuffle(items)\n s1 = len(items)*2//3\n s2 = len(items)*7//8\n train = items[:s1]\n val = items[s1:s2]\n test = items[s2:]\n print('train', len(train))\n print('val', len(val))\n print('test', len(test))\n trainval = train + val\n partition['train'].append(train)\n partition['val'].append(val)\n partition['trainval'].append(trainval)\n partition['test'].append(test)\n l_trainval = [labels[x] for x in trainval]\n l_train = [labels[x] for x in train]\n weight_trainval = eval_weights(l_trainval, 9)\n weight_train = eval_weights(l_train, 9)\n print(weight_trainval)\n partition['weight_trainval'].append(weight_trainval)\n partition['weight_train'].append(weight_train)\n with open(traintest_split_file, 'wb') as f:\n pickle.dump(partition, f)", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.6258096", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.6258096", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.6258096", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "ba1a8381ca832437714e1952abc1f0bd", "score": "0.6234811", "text": "def train(self, *args):\n pass", "title": "" }, { "docid": "1cdd36fd2bb69d7694ea3f73efaed915", "score": "0.62307984", "text": "def train(self, trainfile):", "title": "" }, { "docid": "47a856272e790ac8e131fb1f6974ebb7", "score": "0.62137294", "text": "def splitData(self, testSize): # testsize=0.3 --> 70% training and 30% test\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(\n self.__input,\n self.__target,\n test_size=testSize,\n random_state=0\n )", "title": "" }, { "docid": "6b87d50c47a51f3836a853685be19285", "score": "0.62126243", "text": "def testing_splits(self, t=None, flatten=False, corrected_R=True):\n\t\treturn self._get_splits('test', t=t, flatten=flatten, corrected_R=corrected_R)", "title": "" }, { "docid": "d5c9ac6184c450464bb3c2540e82e3cf", "score": "0.62034297", "text": "def train_test_split(self, test_size, random_state):\n train_ids, test_ids, y_train, y_test = model_selection.train_test_split(\n self.patients_ids,\n self.labels,\n test_size=test_size,\n random_state=random_state,\n stratify=self.labels\n )\n return train_ids, test_ids, y_train, y_test", "title": "" }, { "docid": "d5c9ac6184c450464bb3c2540e82e3cf", "score": "0.62034297", "text": "def train_test_split(self, test_size, random_state):\n train_ids, test_ids, y_train, y_test = model_selection.train_test_split(\n self.patients_ids,\n self.labels,\n test_size=test_size,\n random_state=random_state,\n stratify=self.labels\n )\n return train_ids, test_ids, y_train, y_test", "title": "" }, { "docid": "a34d625b6cf900713ec75e89a2f8f979", "score": "0.6194298", "text": "def train(_, config):\n deeplabchop.training.train(config)", "title": "" }, { "docid": "9622cb6f7fc3cea3db1b3829817a94f9", "score": "0.6189527", "text": "def train(self):\r\n print(\"Training method not implemented yet!\")\r\n pass", "title": "" }, { "docid": "2b44364fbf25536ca9717f6a9e5ea7cd", "score": "0.617805", "text": "def test(self, network, dataloader, training_flag):\n pass", "title": "" }, { "docid": "4bb98dd9f5b523c563c6addcff470e6d", "score": "0.6173376", "text": "def train_step(self):\n raise NotImplementedError", "title": "" }, { "docid": "cbd60d9127e31bd89c2b2c6f174f6752", "score": "0.6171362", "text": "def test_split_data():\n # load sample test data\n data = pd.read_csv(\"test/test_data.csv\")\n X_df = data.drop('Exited',axis=1)\n y_df = data['Exited']\n X_train, X_test, y_train, y_test = train_test_split(X_df, y_df, test_size=0.3, random_state=123)\n\n # split data using the function\n X, y = split_data(X_df, y_df, train_size=0.7, test_size=0.3, random_state=123)\n\n # raise AssertionError if keys do not match\n assert X_train.equals(X['train'])\n assert y_test.equals(y['test'])", "title": "" }, { "docid": "2e704b9a32a84ade1f790944d74b2ed6", "score": "0.6170002", "text": "def test_train_multitask_integration(seed):\n cmd = []\n cmd.extend([\"python\", \"src/train_multitask.py\"])\n cmd.extend([\"--flagfile\", f\"{str(PATH/'configs/multitask_flags.txt')}\"])\n cmd.extend([\"--source\", f\"{str(PATH/'data')}\"])\n cmd.extend([\"--save_path\", str(OUTPUT_PATH / seed)])\n cmd.extend([\"--seed\", seed])\n output = subprocess.run(cmd)\n assert output.returncode == 0", "title": "" }, { "docid": "386308ed77fcaaa47da5acb3a692ad5a", "score": "0.61698246", "text": "def train_test_split(self, data, cut_at=0.75):\n # Random sample of the train set is acquired\n train = data.sample(frac=cut_at, random_state=200)\n # Rest are put in the test set\n test = data.drop(train.index)\n return train, test", "title": "" }, { "docid": "f600ec0f8b369558e073eb0a6ea346c0", "score": "0.6165174", "text": "def train_test_split(df, test_size=0.1):\n ntrn = int(round(len(df) * (1 - test_size)))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n\n return (X_train, y_train), (X_test, y_test)", "title": "" }, { "docid": "7e5fef79087473562b7a9fe2f9245bcf", "score": "0.61599493", "text": "def train_view2(namebases, basedirs, test=None, use_libsvm=False,\n trace_normalize=False, model_kwargs=None):\n pair_features = [[larray.cache_memmap(None,\n name=view2_filename(nb, snum),\n basedir=bdir) for snum in range(10)] \n for nb, bdir in zip(namebases, basedirs)]\n\n split_data = [verification_pairs('fold_%d' % split_num, test=test) for split_num in range(10)]\n \n train_errs = []\n test_errs = []\n if model_kwargs is None:\n model_kwargs = {}\n \n for ind in range(10):\n train_inds = [_ind for _ind in range(10) if _ind != ind]\n print ('Constructing stuff for split %d ...' % ind)\n test_X = [pf[ind][:] for pf in pair_features]\n\n test_y = split_data[ind][2]\n train_X = [np.vstack([pf[_ind][:] for _ind in train_inds])\n for pf in pair_features]\n train_y = np.concatenate([split_data[_ind][2] for _ind in train_inds])\n train_decisions = np.zeros(len(train_y))\n test_decisions = np.zeros(len(test_y))\n \n #train_Xyd_n, test_Xyd_n = toyproblem.normalize_Xcols(\n # (np.hstack(train_X), train_y, train_decisions,),\n # (np.hstack(test_X), test_y, test_decisions,))\n \n normalized = [dan_normalize((t0, t1),\n trace_normalize=trace_normalize,\n data=None) for t0, t1 in zip(train_X, test_X)]\n train_X = np.hstack([n[0] for n in normalized])\n test_X = np.hstack([n[1] for n in normalized])\n \n train_Xyd_n = (train_X, train_y, train_decisions)\n test_Xyd_n = (test_X, test_y, test_decisions)\n \n print ('Training split %d ...' % ind)\n if use_libsvm:\n if hasattr(use_libsvm, 'keys'):\n kernel = use_libsvm.get('kernel', 'linear')\n else:\n kernel = 'linear'\n if kernel == 'precomputed':\n (_Xtrain, _ytrain, _dtrain) = train_Xyd_n\n print ('Computing training kernel ...')\n Ktrain = np.dot(_Xtrain, _Xtrain.T)\n print ('... computed training kernel of shape', Ktrain.shape)\n train_Xyd_n = (Ktrain, _ytrain, _dtrain)\n train_data = (Ktrain, _ytrain, _dtrain)\n print ('Computing testtrain kernel ...')\n (_Xtest, _ytest, _dtest) = test_Xyd_n\n Ktest = np.dot(_Xtest, _Xtrain.T)\n print ('... computed testtrain kernel of shape', Ktest.shape)\n test_Xyd_n = (Ktest, _ytest, _dtest)\n\n model_kwargs['kernel'] = kernel\n svm, _ = train_scikits(train_Xyd_n,\n labelset=[-1, 1],\n model_type='svm.SVC',\n model_kwargs=model_kwargs,\n normalization=False\n )\n else:\n svm = toyproblem.train_svm(train_Xyd_n,\n l2_regularization=1e-3,\n max_observations=20000)\n\n #train_decisions = svm_decisions_lfw(svm, train_Xyd_n)\n #test_decisions = svm_decisions_lfw(svm, test_Xyd_n)\n \n #train_predictions = predictions_from_decisions(train_decisions)\n #test_predictions = predictions_from_decisions(test_decisions)\n\n train_predictions = svm.predict(train_Xyd_n[0])\n test_predictions = svm.predict(test_Xyd_n[0])\n train_err = (train_predictions != train_y).mean()\n test_err = (test_predictions != test_y).mean()\n\n print 'split %d train err %f' % (ind, train_err)\n print 'split %d test err %f' % (ind, test_err)\n \n train_errs.append(train_err)\n test_errs.append(test_err)\n \n train_err_mean = np.mean(train_errs)\n print 'train err mean', train_err_mean\n test_err_mean = np.mean(test_errs)\n print 'test err mean', test_err_mean\n \n return train_err_mean, test_err_mean", "title": "" }, { "docid": "908e3d9ef928f0b4e27d825cd2fc55ac", "score": "0.61579055", "text": "def name(self):\n return 'Split train and validation'", "title": "" }, { "docid": "c1323a3e9eddea783b507e7532bd05f5", "score": "0.61542195", "text": "def load_data(self, test_split_ind):\n \n test_indices = self.splits[test_split_ind]\n train_indices = [x for i,x in enumerate(self.splits) if i!=test_split_ind]\n train_indices = np.array(train_indices).flatten()\n\n train_sampler = SubsetRandomSampler(train_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n\n train_dataloader = DataLoader(self.dataset, batch_size= self.batch_size,\n shuffle=False, num_workers=4, sampler=train_sampler)\n\n test_dataloader = DataLoader(self.dataset, batch_size= self.batch_size,\n shuffle=False, num_workers=4, sampler=test_sampler)\n\n return train_dataloader, test_dataloader", "title": "" }, { "docid": "e383cc81d2144d5e4a3d5d1b4f1891df", "score": "0.6152653", "text": "def train_test_split(sentiment_data):\n train_data = np.array(sentiment_data[:])[:, [2, 3]]\n test_data = np.array(sentiment_data[124848:])[:, [2]]\n return train_data, test_data", "title": "" }, { "docid": "f58d6ab461bbb950d1c2c0be915a8d97", "score": "0.61504793", "text": "def train_step(self, *args, **kwargs):\n raise NotImplementedError('Implement to run training')", "title": "" }, { "docid": "1bfa2407304d122c5beea17c13038a3e", "score": "0.61457014", "text": "def train_test_split(*arrays, test_size=0.25, shufffle=True, random_seed=1):\n # checks\n assert 0 < test_size < 1\n assert len(arrays) > 0\n length = len(arrays[0])\n for i in arrays:\n assert len(i) == length\n \n n_test = int(np.ceil(length*test_size))\n n_train = length - n_test\n\n if shufffle:\n perm = np.random.RandomState(random_seed).permutation(length)\n test_indices = perm[:n_test]\n train_indices = perm[n_test:]\n else:\n train_indices = np.arange(n_train)\n test_indices = np.arange(n_train, length)\n\n return list(chain.from_iterable((_indexing(x, train_indices), _indexing(x, test_indices)) for x in arrays))", "title": "" }, { "docid": "ecc25893b3774215bc03a5941137adf4", "score": "0.6144663", "text": "def train_test_split(xs, ys):\n split = int(len(xs) * .8)\n permutation = np.random.permutation(len(ys))\n train_xs, test_xs = xs[permutation][0:split], xs[permutation][split:]\n train_ys, test_ys = ys[permutation][0:split], ys[permutation][split:]\n return train_xs, train_ys, test_xs, test_ys", "title": "" }, { "docid": "f4116f5dfe234d6443798014916aab61", "score": "0.6132126", "text": "def train_test_split(X, y, test_size):\r\n num_instances = len(X)\r\n if isinstance(test_size, float):\r\n test_size = math.ceil(num_instances * test_size)\r\n split_index = num_instances - test_size\r\n\r\n return X[:split_index], X[split_index:], y[:split_index], y[split_index:]", "title": "" }, { "docid": "6a01415684ca307e8182f9aa885bba4a", "score": "0.61299944", "text": "def task4(self):\n\n return train_test_split(\n self.task3(), self.d['price'],\n test_size=0.3,\n random_state=1234\n )", "title": "" }, { "docid": "fa087dbb12faf18a401dd131ac05ef51", "score": "0.61287767", "text": "def main():\n matranse_model.train_test()", "title": "" }, { "docid": "0704e610cef515de9e679b64514607e6", "score": "0.61241704", "text": "def train(self):\n raise NotImplementedError(\"This method should be implemented\")", "title": "" }, { "docid": "5c4ac2f5e2264ce10ba29366bd7a7933", "score": "0.61236674", "text": "def train_test_dev_split(pmid, p_train = 0.7, p_test = 0.15, r1 = 42, r2 = 100):\n train_pmid, other = train_test_split(pmid, test_size = (1 - p_train), random_state = r1)\n test_pmid, dev_pmid = train_test_split(other, test_size = p_test/(1 - p_train), random_state = r2)\n return train_pmid, test_pmid, dev_pmid", "title": "" }, { "docid": "9aea8f9765e5f8f35d779daa605d0806", "score": "0.6117699", "text": "def train_test_split(df, test_size=0.1):\n ntrn = round(len(df) * (1 - test_size))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n\n return (X_train, y_train), (X_test, y_test)", "title": "" }, { "docid": "adfe0d4b0a8f1615fae51d0978365bc7", "score": "0.6116243", "text": "def test_training(self):\n self.hpc.train(self.arr, epochs=2)\n self.assertEqual(self.hpc.tier(1).shape, [2, 1])", "title": "" }, { "docid": "9878df4153cca4b4244cfed9a575a7dd", "score": "0.6115038", "text": "def stash_split(fqdn, result, *argl, **argd):\n global _splits\n if fqdn == \"sklearn.cross_validation.train_test_split\":\n key = id(result[1])\n _splits[key] = result\n\n #We don't actually want to return anything for the analysis; we are using it\n #as a hook to save pointers to the dataset split so that we can easily\n #analyze performance later on.\n return None", "title": "" }, { "docid": "5500e0e608ff1e4e84398dd4c5f665a3", "score": "0.6114302", "text": "def randomly_split(p, train_size, validation_size, test_size):\n if train_size + validation_size + test_size != 1.0:\n raise ValueError(\n 'Train validation and test sizes don`t add up to 1.0.')\n\n class _SplitData(beam.DoFn):\n def process(self, element):\n r = random.random()\n if r < test_size:\n yield beam.pvalue.TaggedOutput('Test', element)\n elif r < 1 - train_size:\n yield beam.pvalue.TaggedOutput('Val', element)\n else:\n yield element\n\n split_data = (\n p | 'SplitData' >> beam.ParDo(_SplitData()).with_outputs(\n 'Test',\n 'Val',\n main='Train'))\n\n return split_data['Train'], split_data['Val'], split_data['Test']", "title": "" }, { "docid": "dd26c36a1797fd150000fc5e9ba00104", "score": "0.6109199", "text": "def train(self, train, dev, test): # <--- implemented PER class\n # IMPLEMENT TRAINING.\n # pass\n\n return self.NERModel.train(train,dev,test)", "title": "" }, { "docid": "033895109146996d7b94672bc1139b90", "score": "0.61020577", "text": "def test_split_dataset(self):\n my_data, labels = self.create_dataset()\n returned = trees.split_data_set(my_data, 0, 1)\n self.assertEqual([[1, 'yes'], [1, 'yes'], [0, 'no']], returned)", "title": "" }, { "docid": "c03651af4236dcd0f8b88292a9e4082b", "score": "0.6101615", "text": "def traintest(model, switch, params, X_train, y_train, X_test, y_test, testsize, order):\n\n # set model parameters\n if switch == 'SVC':\n model.set_params(C = params['C'],\n gamma = params['gamma'],\n kernel = params['kernel'])\n elif switch == 'KNN':\n model.set_params(n_neighbors = params['n_neighbors'],\n weights = params['weights'])\n elif switch == 'LR':\n model.set_params(C = params['C'],\n penalty = params['penalty'])\n else:\n print('No model type has been specified, parameters cannot be set and trainig \\\n will use default parameters.')\n \n # train and test the models\n model.fit(X_train, y_train)\n predictions = model.predict(X_test)\n \n true_posneg = 0\n fails = np.ndarray(shape=(testsize,1),dtype=bool)\n for i in range(0,testsize):\n if predictions[i] == y_test[i]:\n true_posneg = true_posneg + 1\n fails[i] = True\n else:\n fails[i] = False\n test_acc = true_posneg/testsize\n\n # display accuracy and parameters\n print(\"--------------------------\")\n print(\"Model: %r\" % switch)\n print(\"\\tTrain accuracy: %0.3f\" % model.score(X_train, y_train)) \n print(\"\\tTest accuracy: %0.3f\" % test_acc)\n print(\"--------------------------\")\n\n # Write the fails output file\n prt.fails(fails,order,switch)\n\n return None", "title": "" }, { "docid": "621b994ff80e027bcae697aaa8fb15a9", "score": "0.61008435", "text": "def _split_dataset(self, train_test_ratio):\n random.seed(self.random_seed)\n random.shuffle(self.control_ls)\n train_ls = list()\n test_ls = list()\n self._list_extend(train_ls, test_ls, self.control_ls, train_test_ratio)\n if self.subset is None:\n random.shuffle(self.heme_ls)\n random.shuffle(self.nucleotide_ls)\n random.shuffle(self.steroid_ls)\n self._list_extend(\n train_ls, test_ls, self.heme_ls, train_test_ratio)\n self._list_extend(\n train_ls, test_ls, self.nucleotide_ls, train_test_ratio)\n self._list_extend(\n train_ls, test_ls, self.steroid_ls, train_test_ratio)\n elif self.subset == \"nucleotide\":\n random.shuffle(self.nucleotide_ls)\n self._list_extend(\n train_ls, test_ls, self.nucleotide_ls, train_test_ratio)\n elif self.subset == \"heme\":\n random.shuffle(self.heme_ls)\n self._list_extend(\n train_ls, test_ls, self.heme_ls, train_test_ratio)\n \n random.shuffle(train_ls)\n random.shuffle(test_ls)\n return train_ls, test_ls", "title": "" }, { "docid": "a34dba24e11380e9af60d30154e7e620", "score": "0.60998684", "text": "def split_train_test(partition_labels, labels):\r\n\r\n # set seeds\r\n #seed(config.fixed_seed)\r\n #set_random_seed(config.fixed_seed)\r\n\r\n partition_train_test = {\"train\": [], \"test\": []}\r\n\r\n # get X (subjects) and corresponding y (labels)\r\n if config.task == \"AD\" or config.task == \"MCI\":\r\n X = np.concatenate((partition_labels[config.class0], partition_labels[config.class1]), axis=0)\r\n y = np.array([0] * len(partition_labels[config.class0]) + [1] * len(partition_labels[config.class1]))\r\n elif config.task == \"CN-MCI-AD\":\r\n X = np.concatenate((partition_labels[config.class0], partition_labels[config.class1], partition_labels[config.class2]), axis=0)\r\n y = np.array([0] * len(partition_labels[config.class0]) + [1] * len(partition_labels[config.class1]) + [2] * len(partition_labels[config.class2]))\r\n\r\n # create k training and test sets (for stratified k cross validations)\r\n if config.shuffle_split:\r\n # random distribution of subjects over train and test sets\r\n skf = StratifiedShuffleSplit(n_splits=config.k_cross_validation, test_size=config.test_size, random_state=config.fixed_seed)\r\n else:\r\n # k folds: every subject in test set once\r\n skf = StratifiedKFold(n_splits=config.k_cross_validation, shuffle=True, random_state=config.fixed_seed)\r\n\r\n # split based on X and y\r\n for train_index, test_index in skf.split(X, y):\r\n X_train, X_test = X[train_index], X[test_index]\r\n partition_train_test[\"train\"].append(X_train)\r\n partition_train_test[\"test\"].append(X_test)\r\n\r\n print(\"\\nTRAIN TEST\")\r\n count_sets(partition_train_test, labels)\r\n\r\n return partition_train_test, test_index", "title": "" } ]
1ef65124cda4457e6d1bae66faf6be05
Asserts un usuario no puede ver la lista de comentarios de un album privado
[ { "docid": "f930d48d8b63a1bc7c65f228b0c9fa7f", "score": "0.7511498", "text": "def test_given_existing_user_when_list_comment_of_private_album_then_fail(self):\n # Arrange\n user = UsuarioFixture().create()\n \n album_fixture = AlbumFixture()\n album_fixture.default_acceso = fixtures.ObjectFixture(Acceso.PRIVADO)\n album = album_fixture.create()\n \n # Act\n response = self.run_authenticated(user, 'get', f'/album/{album.id}/comentarios')\n\n # Assert\n self.assertEqual(400, response.status_code)", "title": "" } ]
[ { "docid": "1ceeb5fb1d2d91f655dfb3b4bfce0bb5", "score": "0.67601097", "text": "def test_given_existing_user_when_list_comments_of_public_album_then_see_comments_belong_given_album_success(self):\n # Arrange\n user = UsuarioFixture().create()\n \n album_fixture = AlbumFixture()\n album_fixture.default_acceso = fixtures.ObjectFixture(Acceso.PUBLICO)\n album = album_fixture.create()\n\n album_sin_comentarios = album_fixture.create()\n\n album_comentario_fixture = AlbumComentarioFixture()\n album_comentario_fixture.default_album = fixtures.ObjectFixture(album) # comentarios para solo para el primer album creado\n # comentario para el primer album\n album_comentario_fixture.create() \n \n # Act\n response = self.run_authenticated(user, 'get', f'/album/{album_sin_comentarios.id}/comentarios')\n comentarios = response.json\n \n # Assert\n self.assertEqual(200, response.status_code)\n self.assertEqual(len(comentarios), 0)", "title": "" }, { "docid": "2fa18e2145d4a67e414754afce27e65c", "score": "0.67441374", "text": "def test_given_existing_user_when_list_comments_of_public_album_then_success(self):\n # Arrange\n user = UsuarioFixture().create()\n \n album_fixture = AlbumFixture()\n album_fixture.default_acceso = fixtures.ObjectFixture(Acceso.PUBLICO)\n album = album_fixture.create()\n\n album_comentario_fixture = AlbumComentarioFixture()\n album_comentario_fixture.default_album = fixtures.ObjectFixture(album) # comentarios para solo para el album creado previamente\n\n primer_comentario = album_comentario_fixture.create()\n segundo_comentario = album_comentario_fixture.create()\n tercer_comentario = album_comentario_fixture.create()\n \n # Act\n response = self.run_authenticated(user, 'get', f'/album/{album.id}/comentarios')\n comentarios = response.json\n \n # Assert\n self.assertEqual(200, response.status_code)\n self.assertEqual(len(comentarios), 3)\n self.assertTrue(any(comentario['id'] == primer_comentario.id for comentario in comentarios))\n self.assertTrue(any(comentario['usuario'] == primer_comentario.usuario for comentario in comentarios))\n self.assertTrue(any(comentario['id'] == segundo_comentario.id for comentario in comentarios))\n self.assertTrue(any(comentario['usuario'] == segundo_comentario.usuario for comentario in comentarios))\n self.assertTrue(any(comentario['id'] == tercer_comentario.id for comentario in comentarios))\n self.assertTrue(any(comentario['usuario'] == tercer_comentario.usuario for comentario in comentarios))", "title": "" }, { "docid": "4326f6f9b8e7dd8fa87b0bbb2feee2d5", "score": "0.6663884", "text": "def test_given_existing_user_when_list_comments_of_public_album_then_see_comments_belong_given_albums_success(self):\n # Arrange\n user = UsuarioFixture().create()\n \n album_fixture = AlbumFixture()\n album_fixture.default_acceso = fixtures.ObjectFixture(Acceso.PUBLICO)\n primer_album = album_fixture.create()\n segundo_album = album_fixture.create()\n\n album_comentario_fixture = AlbumComentarioFixture()\n album_comentario_fixture.default_album = fixtures.ObjectFixture(primer_album) \n # dos comentarios para el primer album\n album_comentario_fixture.create()\n album_comentario_fixture.create()\n \n album_comentario_fixture.default_album = fixtures.ObjectFixture(segundo_album) \n # un comentario para el segundo album\n album_comentario_fixture.create()\n\n # Act\n response_comentarios_primer_album = self.run_authenticated(user, 'get', f'/album/{primer_album.id}/comentarios')\n comentarios_primer_album = response_comentarios_primer_album.json\n \n response_comentarios_segundo_album = self.run_authenticated(user, 'get', f'/album/{segundo_album.id}/comentarios')\n comentarios_segundo_album = response_comentarios_segundo_album.json\n \n # Assert\n self.assertEqual(200, response_comentarios_primer_album.status_code)\n self.assertEqual(200, response_comentarios_segundo_album.status_code)\n self.assertEqual(len(comentarios_primer_album), 2)\n self.assertEqual(len(comentarios_segundo_album), 1)", "title": "" }, { "docid": "ce71ca414aa6331e867f2880438ffbe5", "score": "0.64658964", "text": "def test_given_existing_user_when_add_comment_to_private_album_then_fail(self):\n # Arrange\n user = UsuarioFixture().create()\n \n album_fixture = AlbumFixture()\n album_fixture.default_acceso = fixtures.ObjectFixture(Acceso.PRIVADO)\n album = album_fixture.create()\n \n comentario_data = {\n \"texto\": \"Este es un comentario\"\n }\n\n # Act\n response = self.run_authenticated(user, 'post', f'/album/{album.id}/comentarios', data=json.dumps(comentario_data))\n\n # Assert\n self.assertEqual(400, response.status_code)", "title": "" }, { "docid": "d93f6cc8628e5226d0c16efbb3a32970", "score": "0.64475584", "text": "def test_5_quizz_access_user_not_log(self):\n response = self.client.get('/nadeshiko/quizz/3')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Désolé vous n'êtes pas authentifié\")", "title": "" }, { "docid": "f55a52a3daa9f2c32373ccba5748fd62", "score": "0.6444463", "text": "def test_channel_addowner_not_authorised():\n initial_data = initialise_data()\n user_data = initial_data[\"users\"]\n channel_id = initial_data[\"channel_id\"]\n\n with pytest.raises(AccessError):\n # User 2, a non-owner member, making user 3 an owner of the channel\n assert channel.channel_addowner(user_data[2][\"token\"], channel_id, user_data[3][\"u_id\"])", "title": "" }, { "docid": "8c0dc64c1e9b401efe03338d4f341a2e", "score": "0.63900256", "text": "def test_user_list(self):\n url = reverse('projectroles:api_user_list')\n good_users = [\n self.superuser,\n self.owner_as_cat.user,\n self.owner_as.user,\n self.delegate_as.user,\n self.contributor_as.user,\n self.guest_as.user,\n self.user_no_roles,\n ]\n self.assert_response_api(url, good_users, 200)\n self.assert_response_api(url, self.anonymous, 401)\n self.assert_response_api(url, good_users, 200, knox=True)", "title": "" }, { "docid": "8e2af1655e119104da26f2311f927d18", "score": "0.63739276", "text": "def test_list_no_perms(self):\n self.client.force_authenticate(self.denied_user)\n\n response = self.client.get(self.url)\n self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)", "title": "" }, { "docid": "30dc0366451368f7f8c3de4e6e672b9b", "score": "0.63461065", "text": "def test_incorrect_permissions(self):\n self.client.force_login(self.user)\n response = self.client.get(\"/accounts/\", follow=True)\n self.assertContains(response, \"You don&#39;t have the required permissions.\")", "title": "" }, { "docid": "d93c5527cd27ecb369413741967355c7", "score": "0.6317276", "text": "def test_get_user_list_unauthorized(self):\n url = reverse('user-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "258ca84eec262bc3e8a55d56340b9972", "score": "0.63142365", "text": "def test_blocked_user_is_no_more_admin(self):\n self.c1 = Conversation.objects.get(pk=self.c1.pk)\n self.c1.admins.append(self.valid_user.profile.id)\n self.c1.save()\n self.login(self.user1)\n resp = self.do_request(self.c1.pk, self.valid_user)\n self.assert202(resp)\n self.assertNotIn(self.valid_user.profile.id,\n Conversation.objects.get(pk=self.c1.pk).admins)", "title": "" }, { "docid": "6bb05300a8c2c7e1b9bb16a4df5e0cd7", "score": "0.62915313", "text": "def test_user_cannot_access(self):\n self.user = User.objects.create_user(**self.credentials) # regular user\n self.user.save() \n response = self.client.post('/login/', self.credentials, follow=True)\n response = self.client.get(reverse('animal', kwargs={'id': self.animal.id}))\n self.assertEqual(self.user.has_perm('auth.add_group'), False)\n self.assertEqual(self.user.has_perm('auth.change_user'), False)\n self.assertEqual(self.user.has_perm('zoo.add_animal'), False)\n self.assertEqual(self.user.has_perm('zoo.add_enclosure'), False)\n self.assertEqual(self.user.has_perm('zoo.add_medicalreport'), False)\n self.assertNotContains(response, \"Medical Reports\")\n self.assertNotContains(response, \"Add a medical report:\")", "title": "" }, { "docid": "49ef533dfef228bed5ba6bc4c7f0c054", "score": "0.6279646", "text": "def test_channel_removeowner_not_authorised():\n initial_data = initialise_data()\n user_data = initial_data[\"users\"]\n channel_id = initial_data[\"channel_id\"]\n\n with pytest.raises(AccessError):\n # User 2 removing user 1 as owner\n assert channel.channel_removeowner(user_data[2][\"token\"], channel_id, user_data[1][\"u_id\"])", "title": "" }, { "docid": "3d05b101c1aec00e50fefdee8c5a2ab9", "score": "0.6257671", "text": "def test_project_list(self):\n url = reverse('projectroles:api_project_list')\n good_users = [\n self.superuser,\n self.owner_as_cat.user,\n self.owner_as.user,\n self.delegate_as.user,\n self.contributor_as.user,\n self.guest_as.user,\n self.user_no_roles,\n ]\n self.assert_response_api(url, good_users, 200)\n self.assert_response_api(url, self.anonymous, 401)\n self.assert_response_api(url, good_users, 200, knox=True)", "title": "" }, { "docid": "d5d5df714d76e0e1b51f1bd7228329f1", "score": "0.62165695", "text": "def test_account_visible__other_users(self):\n visible = Account.objects.visible(self.user_2)\n\n self.assertNotIn(self.account_1, visible)", "title": "" }, { "docid": "e7d77cff0805fb337e8e3a4599fce04d", "score": "0.62068737", "text": "def test_non_superuser_cannot_list_organizations(self):\n # get organizations\n response = self.client.get(reverse(\"organizations-list\"))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "bbf3e9c5feac933b54e3e14af5af59b6", "score": "0.62021446", "text": "def test_user_list_if_regular_user(self):\n\n url = reverse('user-list')\n self.client.login(username='test', password='test54321')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "336dfab2a6f815a9ef2c684701172602", "score": "0.61846226", "text": "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, project_id='froggy')", "title": "" }, { "docid": "14761b7cdc51eb7335178370db2d854c", "score": "0.6162335", "text": "def test_get_user_permissions_repositories(self):\n pass", "title": "" }, { "docid": "1d81752ef9a2a457a9638320dc4c903a", "score": "0.615695", "text": "def test_non_admin_user_retrieve_users_unsuccessful(self):\n client = APIClient()\n token = self.response_non_admin.data[\"token\"]\n client.credentials(HTTP_AUTHORIZATION=\"JWT \" + token)\n response = client.get(self.url_list, format=\"json\")\n self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "87f2e34e2eb358f542c4f058b03de8d8", "score": "0.6134036", "text": "def test_user1_can_see_only_his_items(self):\n self.client.force_login(self.user_1)\n\n response = self.client.get(self.todo_list_url)\n data = response.data\n self.assertIn(\"next\", data)\n self.assertIn(\"previous\", data)\n\n count = data[\"count\"]\n self.assertEqual(len(self.user_1_items), count)\n\n results = data[\"results\"]\n self.assertEqual(len(self.user_1_items), len(results))\n\n for todo_item in self.user_1_items:\n self.assertTrue(any(item[\"id\"] == str(todo_item.id) for item in results))\n self.assertTrue(\n any(item[\"name\"] == str(todo_item.name) for item in results)\n )\n self.assertTrue(\n all(\n str(item[\"owner\"][\"id\"]) == str(todo_item.owner.id)\n for item in results\n )\n )\n\n for todo_item in self.user_2_items:\n self.assertFalse(any(item[\"id\"] == str(todo_item.id) for item in results))\n self.assertFalse(\n any(\n str(item[\"owner\"][\"id\"]) == str(todo_item.owner.id)\n for item in results\n )\n )\n\n for todo_item in self.admin_items:\n self.assertFalse(any(item[\"id\"] == str(todo_item.id) for item in results))\n self.assertFalse(\n any(\n str(item[\"owner\"][\"id\"]) == str(todo_item.owner.id)\n for item in results\n )\n )\n\n self.assertEqual(200, response.status_code)\n self.client.logout()", "title": "" }, { "docid": "741b96652426bc1af6b319fc7098f970", "score": "0.6128363", "text": "def test_user_cant_create(self):\n self.client.login(username='user', password='temp')\n\n response = self.client.options(f'{IS_ADMIN_OR_READONLY_PATH}')\n self.assertNotIn('actions', response.json())", "title": "" }, { "docid": "cae20a837839de73bd70c27f9bcdf271", "score": "0.61197174", "text": "def test__new_user_is_not_shown_bucketlists__succeeds(client):\n response = register_and_login(client, \"arny\", \"passy\")\n assert b'Looks like you dont have any buckets' in response.data", "title": "" }, { "docid": "6cc964c95d46f480c8d56247b57544a9", "score": "0.6111777", "text": "def test_not_logged_in(self):\n response = self.client.get(self.todo_list_url)\n\n self.assertEqual(403, response.status_code)", "title": "" }, { "docid": "80feb904bc6321be3d501625e182488d", "score": "0.61039025", "text": "def test_channel_addowner_invalid_token():\n initial_data = initialise_data()\n user_data = initial_data[\"users\"]\n channel_id = initial_data[\"channel_id\"]\n\n # User 1 logging out\n auth.auth_logout(user_data[1][\"token\"])\n\n with pytest.raises(AccessError):\n # User 1, an owner who has logged out, adding user 2 as owner\n assert channel.channel_addowner(user_data[1][\"token\"], channel_id, user_data[2][\"u_id\"])", "title": "" }, { "docid": "6e73483469e986ef106f7b31996ef22b", "score": "0.6089169", "text": "def test_auth_private_unowned(self):\r\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "title": "" }, { "docid": "5c02b36ad17491c1ab4ddb667158ba39", "score": "0.607355", "text": "def test_6_my_account_user_not_log(self):\n response = self.client.get('/nadeshiko/my_account/7')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Désolé vous n'êtes pas authentifié\")", "title": "" }, { "docid": "7751d3987d2e31f9c31c11cbf3819f4c", "score": "0.6066758", "text": "def test_get_list_unauthorized(self):\n self.assertHttpUnauthorized(self.api_client.get('/api/v1/transaction/', format='json'))", "title": "" }, { "docid": "635828c1a16df14d4d78a1cc1c5023ed", "score": "0.6059473", "text": "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, project_id='pattieblack')", "title": "" }, { "docid": "879f058fc9aec65b1327591651118f74", "score": "0.60501784", "text": "def test_osarch_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/osarch/')\n # compare\n self.assertEqual(response.status_code, 401)", "title": "" }, { "docid": "ca4ee1c61ae0840a1fe1113c72061240", "score": "0.60391366", "text": "def test_user_list_if_anonymous(self):\n\n url = reverse('user-list')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "4fa0a520ad8405dbd475ee1201a9de54", "score": "0.60237503", "text": "def test_not_listening_profile_is_forbidden(self):\n self.login(self.user1)\n resp = self.do_request(self.c1.pk, self.user4)\n self.assert400(resp)\n self.assertNotIn(self.user4, self.c1.users.all())", "title": "" }, { "docid": "3c39484b13110c52011ae9c0e81c1844", "score": "0.5993745", "text": "def test_systemuser_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/systemuser/')\n # compare\n self.assertEqual(response.status_code, 401)", "title": "" }, { "docid": "0af2c5286d86c89728199bee5d5a001c", "score": "0.597427", "text": "def test_user_cannot_access(self):\n response = self.client.post('/login/', self.credentials, follow=True)\n self.assertTrue(response.context['user'].is_active)\n response = self.client.get(reverse('animal', kwargs={'id': self.animal.id}))\n self.assertEqual(self.user.has_perm('zoo.change_animal'), False)\n self.assertNotContains(response, \"new enclosure\")", "title": "" }, { "docid": "6a55e85ffc2b756d501812c8b536f6cb", "score": "0.59702295", "text": "def test_tiene_no_permiso_de_proyecto(self, proyecto, usuario, rol_de_proyecto):\n fases = Fase.objects.all().filter(proyecto=proyecto)\n permisos = list(rol_de_proyecto.get_pp_por_fase())\n permisos_por_fase = {fases[0]: permisos}\n proyecto.asignar_rol_de_proyecto(usuario, rol_de_proyecto, permisos_por_fase)\n\n condicion = proyecto.tiene_permiso_de_proyecto(usuario, 'pp_permiso_no_existente')\n assert condicion is False, 'El metodo indica que el usuario tiene el permiso de proyecto pero el usuario no ' \\\n 'tiene asignado el permiso'", "title": "" }, { "docid": "f5c981a006348dfb0070d75a2084a960", "score": "0.5964657", "text": "def test_user_list():\n assert len(users) > 0, 'User list is empty.'", "title": "" }, { "docid": "055802c5d7df76453635357ea9ec565e", "score": "0.59629613", "text": "def test_non_authorized_user_blocked(self):\n\n self.assertEqual(self.user_get_response.status_code,\n status.HTTP_403_FORBIDDEN)\n self.assertEqual(self.user_put_response.status_code,\n status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "b122644a5ea4ad8a641649375e342e2d", "score": "0.595908", "text": "def test_permission_required(self):\n company = Company.objects.create(company_name='PiedPiper')\n keywords = {'company_pk': company.id}\n COMPANYCHATBOT_URL = reverse('chatbot:companychatbot-list',\n kwargs=keywords)\n\n res = self.client.get(COMPANYCHATBOT_URL)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "57a43f1a897ce1eba9f9ea98f75dc522", "score": "0.594538", "text": "def test_change_permissions_bad_params_2(self):\n client = Client()\n client.login(\n username='test_user',\n password='qwertyuiop')\n res = client.post(\n '/file_manager/change_file_permissions/', \n data=json.dumps({'user_list': []}),\n content_type=\"application/json\")\n self.assertEqual(res.status_code, 401)", "title": "" }, { "docid": "4cd656290dfed07c43ac6a4725e6eba3", "score": "0.59452987", "text": "def test_non_admin_is_forbidden(self):\n self.login(self.valid_user)\n resp = self.do_request(self.c1.pk, self.user1)\n self.assert403(resp)", "title": "" }, { "docid": "5610e633cf7821c9d2af4ace8c7843b9", "score": "0.59449327", "text": "def test_user_cant_edit(self):\n self.client.login(username='user', password='temp')\n\n response = self.client.options(f'{IS_ADMIN_OR_READONLY_PATH}{self.user.id}/')\n self.assertNotIn('actions', response.json())", "title": "" }, { "docid": "4957abb3facfe5e16e0af19806c75eda", "score": "0.59430826", "text": "def test_get_stocktakingcollection_as_user(self):\n res = self.get(url=\"/stocktakingcollections/2\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "title": "" }, { "docid": "7e29f814d593c22cbb27ad7ef5c2b56d", "score": "0.5940894", "text": "def test_non_admin_is_forbidden(self):\n self.login(self.user3)\n resp = self.do_request(self.c1.pk, self.user1)\n self.assert403(resp)", "title": "" }, { "docid": "4204339c78768920632068c6087140d3", "score": "0.59281415", "text": "def test_given_existing_user_when_add_comment_to_invalid_album_then_fail(self):\n # Arrange\n user = UsuarioFixture().create()\n \n album_fixture = AlbumFixture()\n album_fixture.default_acceso = fixtures.ObjectFixture(Acceso.PRIVADO)\n album = album_fixture.create()\n \n comentario_data = {\n \"texto\": \"Este es un comentario\"\n }\n\n # Act\n response = self.run_authenticated(user, 'post', f'/album/{album.id+1}/comentarios', data=json.dumps(comentario_data))\n\n # Assert\n self.assertEqual(404, response.status_code)", "title": "" }, { "docid": "b5f0b5fed9ff72184ab65aa40f51552e", "score": "0.59189796", "text": "def test_channel_addowner_general():\n initial_data = initialise_data()\n user_data = initial_data[\"users\"]\n channel_id = initial_data[\"channel_id\"]\n\n # User 1 making user 2 an owner of the channel\n channel.channel_addowner(user_data[1][\"token\"], channel_id, user_data[2][\"u_id\"])\n owners = channel.channel_details(user_data[0][\"token\"], channel_id)['owner_members']\n\n # Checking user 2 has been made an owner of channel 0\n is_owner = False\n for owner in owners:\n if owner[\"u_id\"] == 2:\n is_owner = True\n assert is_owner", "title": "" }, { "docid": "707ad84ec10b1f805e3a9de32504eba6", "score": "0.5905093", "text": "def test_user_cannot_access(self): # user has neither Staff status nor is member of group\n response = self.client.post('/login/', self.credentials, follow=True)\n self.assertTrue(response.context['user'].is_active)\n response = self.client.get(reverse('animal', kwargs={'id': self.animal.id}))\n self.assertEqual(self.user.has_perm('zoo.add_medicalreport'), False)\n self.assertNotContains(response, \"Add a medical report:\")\n self.assertNotContains(response, \"Medical Reports\")", "title": "" }, { "docid": "8bb0ca78e32e7008039d092a1228edef", "score": "0.5890699", "text": "def test_auth_private_owned(self):\r\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "title": "" }, { "docid": "d3be0443e911133de6f8d6dd77f8f2ed", "score": "0.58865005", "text": "def test_private_object_other_users_cant_see(self):\n self.client.login(username='other_user', password='temp')\n\n get_response = self.client.get(f'{CHARACTERS_PATH}{self.private_character.id}/', \\\n format='json')\n self.assertIn(get_response.status_code, [403, 404])", "title": "" }, { "docid": "b543e273ed684745cacc880aa430602e", "score": "0.586534", "text": "def test_incorrect_permissions(self):\n self.client.force_login(self.user)\n response = self.client.get(f\"/accounts/{self.user.uid}/\", follow=True)\n self.assertContains(response, self.user.get_full_name())\n response = self.client.get(f\"/accounts/{self.superuser.uid}/\", follow=True)\n self.assertContains(response, \"You don&#39;t have the required permissions.\")\n self.client.force_login(self.superuser)\n response = self.client.get(f\"/accounts/{self.superuser.uid}/\", follow=True)\n self.assertContains(response, self.superuser.get_full_name())\n response = self.client.get(f\"/accounts/{self.user.uid}/\", follow=True)\n self.assertContains(response, self.user.get_full_name())", "title": "" }, { "docid": "190297dd813f72a38630fdc1de99a9c9", "score": "0.58624434", "text": "def test_show_not_live_perm(self):\n user = User.objects.create_user(username=\"user\")\n course = CourseFactory.create()\n assert not AuthorizationHelpers.can_see_own_not_live(course, user)\n\n # Instructor group has edit_own_content permission\n user.groups.add(Group.objects.get(name=\"Instructor\"))\n # Need to do this to refresh user permissions\n user = User.objects.get(username=\"user\")\n # Instructor does not own course\n assert not AuthorizationHelpers.can_see_own_not_live(course, user)\n # Now that instructor is an owner this check should pass\n course.owners.add(user)\n user = User.objects.get(username=\"user\")\n assert AuthorizationHelpers.can_see_own_not_live(course, user)", "title": "" }, { "docid": "e4175c29f6264acd57ee3e5de92609c4", "score": "0.5860226", "text": "def test_post_users_unauthorized(self):\n url = reverse('user-list')\n username = 'captobv'\n first_name = 'Captain'\n last_name = 'Obvious'\n email = 'captain@obvious.com'\n response = self.client.post(url, {'username': username, 'first_name': first_name, 'last_name': last_name, 'email': email}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "8e482cdd7525485542b43b332dec0013", "score": "0.585883", "text": "def test_user_list_if_admin(self):\n\n url = reverse('user-list')\n self.client.login(username='admin', password='test12345')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()['results']), 2)", "title": "" }, { "docid": "38b1e66bb279887be55a6733989e520d", "score": "0.58514494", "text": "def test_permissions_no_grant(self):\n\n form = UserRegistrationForm({\n 'email': 'test@nightmare.com',\n 'password': 'test123',\n 'confirm_password': 'test123',\n 'first_name': 'test',\n 'last_name': 'test',\n 'chosen_organisations': [self.organisation],\n })\n self.assertTrue(form.is_valid())\n user = form.save()\n self.assertFalse(user.approved_organisations.exists())", "title": "" }, { "docid": "fcd2bef79bd82d0498ced0514bf4a79b", "score": "0.5847434", "text": "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, project_id='froggy')", "title": "" }, { "docid": "b49aca067bdc306dcb02212b72b3d757", "score": "0.5843088", "text": "def test_4_quizz_access_not_log(self):\n response = self.client.get(reverse('nadeshiko:quizz'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Désolé vous n'êtes pas authentifié\")", "title": "" }, { "docid": "d6234badba4f5a0f37f8a67153afcdcb", "score": "0.58379155", "text": "def test_user_perms_not_staff_cannot_access(self): # user is member of group and has NO Staff status\n self.user.groups.add(self.group)\n response = self.client.post('/login/', self.credentials, follow=True)\n self.assertTrue(response.context['user'].is_active)\n response = self.client.get(reverse('animal', kwargs={'id': self.animal.id}))\n self.assertEqual(self.user.has_perm('zoo.add_medicalreport'), True)\n self.assertNotContains(response, \"Add a medical report:\")\n self.assertNotContains(response, \"Medical Reports\")", "title": "" }, { "docid": "fbcfeb26d7e06eced1fc25b258ec3141", "score": "0.5831114", "text": "def test_auth_private(self):\n self.do_visible(True, None, False, project_id='froggy')", "title": "" }, { "docid": "a99e2de2ad6cf9277c0f5f92f7a4b293", "score": "0.5831058", "text": "def test_users_head_logged_in(self):\n url = reverse('user-list')\n self.client.force_authenticate(user=self.user)\n response = self.client.options(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "a54dc32159a1c50f976fd0431b3d0532", "score": "0.58282346", "text": "def test_is_owner(self):\n user = User.objects.create_user(username=\"user\")\n course = CourseFactory.create()\n assert not AuthorizationHelpers.is_owner(course, user)\n course.owners.add(user)\n assert AuthorizationHelpers.is_owner(course, user)", "title": "" }, { "docid": "a34367ce38c8e8c93a46446c4852266a", "score": "0.582533", "text": "def test_allow_mozilla_collections(self):\n id_url = f\"{reverse('collections.list')}{settings.TASK_USER_ID}/\"\n username_url = f\"{reverse('collections.list')}mozilla/\"\n response = self.client.get('/robots.txt')\n assert response.status_code == 200\n content = response.content.decode('utf-8')\n assert f'Allow: {id_url}' in content\n assert f'Disallow: {id_url}$' in content\n assert f'Allow: {username_url}' in content\n assert f'Disallow: {username_url}$' in content", "title": "" }, { "docid": "d2cf6aefc364005a3b878ffe2d291845", "score": "0.5817972", "text": "def test_no_user(self):\n #Make API call to list documents\n response = self.client.get(\n '/api/v1/document_retrieval/doc_list'\n )\n\n #Process response\n self.assertEqual(response.status_code, HTTPStatus.UNAUTHORIZED.value)", "title": "" }, { "docid": "9a73e57d061e0b7b474f4aa442852c5f", "score": "0.581421", "text": "def test_list_bucketlists_unauthorized(self):\n self.client.credentials()\n response = self.client.get(\"/bucketlists/\")\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "title": "" }, { "docid": "ed4c7de5338f3a34ab2c6df99ce7c33e", "score": "0.58141834", "text": "def test_activity_list_user_not_login(self):\n\t\tresponse = self.client.get(self.list_url)\n\t\tself.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "387c379d50f4d2b39d8830e6f1857108", "score": "0.5813421", "text": "def test_users_head_unauthorized(self):\n url = reverse('user-list')\n response = self.client.head(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "df80e80397f7387d2d642fda299196c4", "score": "0.5812996", "text": "def test_empty_private_owned(self):\r\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "title": "" }, { "docid": "3d5757bf92ccf79c4f6a02ec62238b4a", "score": "0.5804766", "text": "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n self.occasion = get_object_or_404(Occasion, slug=self.kwargs[\"oc_slug\"])\n return (\n not self.occasion.membres.exists()\n or self.request.user in self.occasion.membres.all()\n )", "title": "" }, { "docid": "f37d7d0418b9cbbfeb43ec6ac193e850", "score": "0.58042616", "text": "def test_block_non_contributor_is_forbidden(self):\n non_contributor = self.create_user(username='dorn')\n self.login(self.user1)\n resp = self.do_request(self.c1.pk, non_contributor)\n self.assert400(resp)\n self.assertNotIn(non_contributor.profile.id,\n Conversation.objects.get(pk=self.c1.pk).blocked)", "title": "" }, { "docid": "1d11122a9e93539d25602aff5b1a825a", "score": "0.58042043", "text": "def test_users_options_unauthorized(self):\n url = reverse('user-list')\n response = self.client.options(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "5a8608af3ca08d3d2f0b42287dd91be4", "score": "0.57947713", "text": "def test_click_add_to_wish_list_as_not_logged_user(self):\n self.product_page.click_add_to_wish_list_button()\n info_message = 'You must login or create an account to save Apple Cinema 30\" to your wish list!'\n assert info_message in self.product_page.catch_info_message.get_success_message()", "title": "" }, { "docid": "56418b12fd9dc4a687a0023d930c7260", "score": "0.5793207", "text": "def test_unauthenticated_user_retrieve_users_unsuccessful(self):\n client = APIClient()\n response = client.get(self.url_list, format=\"json\")\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)", "title": "" }, { "docid": "85bd6d3d24964a4b735a89e5ea54bae0", "score": "0.579254", "text": "def test_auth_public_unowned(self):\r\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "title": "" }, { "docid": "3b6e3647c8d30939b31d74f9bf234346", "score": "0.5789757", "text": "def test_get_user_list_as_regular_user(self):\n url = reverse('user-list')\n self.client.force_authenticate(user=self.user)\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "0de6cf1c8ba0d7c18c15ff0f1815c606", "score": "0.57843643", "text": "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self.assertEquals(Business.objects.count(), 1)", "title": "" }, { "docid": "1eca54333f244a8dd1e879cacc6456f7", "score": "0.57775795", "text": "def test_is_owner_anonymous(self):\n user = AnonymousUser()\n course = CourseFactory.create()\n assert not AuthorizationHelpers.is_owner(course, user)", "title": "" }, { "docid": "c5a7f4b29ee989fdb7a05eff06bb92d8", "score": "0.577747", "text": "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "title": "" }, { "docid": "c4531b2058a948328c887df32b8b5655", "score": "0.57739586", "text": "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self.assertEquals(Business.objects.count(), 0)", "title": "" }, { "docid": "b52e728c01bc7ffe15dfdc090ae086da", "score": "0.5768109", "text": "def test_get_quotas_unauthorized(self):\n global auth_url\n token = \"buahfhsda\"\n try:\n client = AstakosClient(token, auth_url)\n client.get_quotas()\n except Unauthorized:\n pass\n except Exception as err:\n self.fail(\"Shouldn't raise Exception %s\" % err)\n else:\n self.fail(\"Should have raised Unauthorized Exception\")", "title": "" }, { "docid": "3ec07f86f3bb6ead809e391d72c44ba9", "score": "0.57649356", "text": "def test_versions_all_view_as_device_owner(self):\n # Assert that the permission is denied\n self._assert_get_as_device_owner_fails(\n self.versions_all_url, expected_status=status.HTTP_403_FORBIDDEN\n )", "title": "" }, { "docid": "a1fa21d298ba2c538e0e4f4fdbf691f5", "score": "0.57631636", "text": "def test_auth(self):\n self.api_client.logout()\n resp = self.api_client.get('/api/metadata/search/')\n self.assertEqual(resp.status_code, 403)", "title": "" }, { "docid": "24917450c652a98188db6460b995b372", "score": "0.5762538", "text": "def test_read_with_no_permissions(self):\n # Load objects\n study = Study.objects.get(name=\"Test Study 1\")\n user1 = User.objects.get(username=\"test1\")\n # Asserts\n self.assertFalse(study.user_can_read(user1))", "title": "" }, { "docid": "71a77dcd1e782f6aebcdda27516cb6e5", "score": "0.57587403", "text": "def test_get_refunds_as_admin(self):\n # Do 5 refunds\n self.insert_default_refunds()\n res = self.get(url='/refunds/2', role='admin')\n self.assertEqual(res.status_code, 200)\n refund = json.loads(res.data)\n self.assertEqual(refund['id'], 2)\n self.assertEqual(refund['user_id'], 2)\n self.assertEqual(refund['total_price'], 200)\n self.assertFalse(refund['revoked'])\n\n required = ['id', 'timestamp', 'user_id', 'total_price', 'comment',\n 'revoked', 'revokehistory']\n assert all(x in refund for x in required)", "title": "" }, { "docid": "6950b44d07e26b8f363c9040f1d9b5c9", "score": "0.5757649", "text": "def test_project_retrieve(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n good_users = [\n self.superuser,\n self.owner_as_cat.user,\n self.owner_as.user,\n self.delegate_as.user,\n self.contributor_as.user,\n self.guest_as.user,\n ]\n bad_users = [self.user_no_roles]\n self.assert_response_api(url, good_users, 200)\n self.assert_response_api(url, bad_users, 403)\n self.assert_response_api(url, self.anonymous, 401)\n self.assert_response_api(url, good_users, 200, knox=True)\n self.assert_response_api(url, bad_users, 403, knox=True)", "title": "" }, { "docid": "d72abca41c1ddec7b168e5bf1a54fd7f", "score": "0.57559955", "text": "def test_authenticated_non_owner_editor_user_cant_edit(self):\n self.client.login(username='other_user', password='temp')\n\n response = self.client.options(f'{SKILLS_PATH}{self.skill.id}/')\n self.assertNotIn('actions', response.json())", "title": "" }, { "docid": "644a214683c208f7b9b3cf3b4078a517", "score": "0.57548666", "text": "def test_channel_removeowner_valid():\n initial_data = initialise_data()\n user_data = initial_data[\"users\"]\n channel_id = initial_data[\"channel_id\"]\n\n # User 0 making revoking user 1 as an owner of the channel\n channel.channel_removeowner(user_data[0][\"token\"], channel_id, user_data[1][\"u_id\"])\n owners = channel.channel_details(user_data[0][\"token\"], channel_id)[\"owner_members\"]\n\n # Checking if user 1 is no longer an owner of channel 0\n is_owner = False\n for owner in owners:\n if owner[\"u_id\"] == 1:\n is_owner = True\n assert not is_owner", "title": "" }, { "docid": "d1d5c02424541e612d1051e271e4c506", "score": "0.5747527", "text": "def test_bad_user(self):\n #Make API call to list documents\n response = self.client.get(\n '/api/v1/document_retrieval/doc_list',\n headers=self.get_headers('','')\n )\n\n #Process response\n self.assertEqual(response.status_code, HTTPStatus.UNAUTHORIZED.value)", "title": "" }, { "docid": "b4964852d3886be5ebf2b9fe9f613a1f", "score": "0.57467353", "text": "def test_auth_private(self):\r\n self.do_visible(True, None, False, tenant='froggy')", "title": "" }, { "docid": "0bb895b6a9ae68a53fce6141d0473f19", "score": "0.57453626", "text": "def test_list_users_correct( self ):\n found = False\n\n game_cursor = mongo.list_users()\n for gameid in game_cursor:\n if self.user[\"user_id\"] == gameid[\"user_id\"]:\n found = True\n\n self.assertEqual( found, True,\n msg=f'{BColors.FAIL}\\t[-]\\tAdded user was not in the listed games!{BColors.ENDC}'\n + where() )\n print(f\"{BColors.OKGREEN}\\t[+]\\tPass User-Profile database list.{BColors.ENDC}\")", "title": "" }, { "docid": "dd4d888bfcc32e775effa4a36a089635", "score": "0.5743937", "text": "def test_show_account_not_logged_in(self):\n\n result = self.client.get(\"/account\")\n\n self.assertNotIn('Your Account', result.data)\n self.assertIn('/login', result.data)", "title": "" }, { "docid": "cd096f67a53f4bb675c953f9864eaa8b", "score": "0.5743659", "text": "def test_user_list(self):\r\n\r\n result = self.client.get('/users')\r\n self.assertIn(b'rose@tardis.com', result.data)", "title": "" }, { "docid": "6f4cff4d79a35201c4f6a5663f442c49", "score": "0.57398736", "text": "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self.assertEquals(self.model.objects.count(), 2)", "title": "" }, { "docid": "d61e63f4f914c2d5fa4af675cc8d1686", "score": "0.5738668", "text": "def test_channel_removeowner_invalid_token():\n initial_data = initialise_data()\n user_data = initial_data[\"users\"]\n channel_id = initial_data[\"channel_id\"]\n\n # User 0 logging out\n auth.auth_logout(user_data[0][\"token\"])\n\n with pytest.raises(AccessError):\n # User 0, an owner who has logged out, removing user 1 as owner\n assert channel.channel_removeowner(user_data[0][\"token\"], channel_id, user_data[1][\"u_id\"])", "title": "" }, { "docid": "e1f55958299d05e0783155511a192a4f", "score": "0.57380736", "text": "def test_permissions_non_superuser(self):\n self.client.force_login(self.user)\n response = self.client.get(f\"/accounts/{self.user.uid}/delete/\", follow=True)\n self.assertContains(response, \"Delete\")\n response = self.client.get(\n f\"/accounts/{self.superuser.uid}/delete/\", follow=True\n )\n self.assertContains(response, \"You don&#39;t have the required permissions.\")\n self.client.force_login(self.superuser)\n response = self.client.get(\n f\"/accounts/{self.superuser.uid}/delete/\", follow=True\n )\n self.assertContains(response, \"Delete\")\n response = self.client.get(f\"/accounts/{self.user.uid}/delete/\", follow=True)\n self.assertContains(response, \"Delete\")", "title": "" }, { "docid": "70a5eacb5f2bd8b7133a3f0159a606ba", "score": "0.57301354", "text": "def test_not_logged_in(self):\n response = self.client.get('/funding/%d/' % self.def_app.pk)\n self.assertEqual(response.status_code, 403)", "title": "" }, { "docid": "44ccfac7607fac961988d47c2ea2d4ac", "score": "0.57300884", "text": "def test_retrieve_user_unauthorized(self):\n # this test should have a status of 401 = unauthorized request \n # cause we don't want unauthenticated user to access other user data\n\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "title": "" }, { "docid": "be72eca79f958c7d4565f998fe7b9405", "score": "0.5728444", "text": "def test_list_user_permissions(self):\n list_user_permissions = yield self.service.list_user_permissions()\n self.failUnless(list_user_permissions[\"command\"] == \"list_user_permissions\")\n result = list_user_permissions[\"result\"]\n self.failUnless(result['/'] == ['guest', '.*', '.*', '.*'])", "title": "" }, { "docid": "4b9034101d979b4bf81f487cc72dd15d", "score": "0.5717942", "text": "def test_forbidden_get(self):\n self.client.login(username=self.forbidden_user.username, password=self.password)\n\n resp = self.client.get(self.endpoint, content_type='application/json')\n self.assertEqual(resp.status_code, 403) # forbidden", "title": "" }, { "docid": "4b9034101d979b4bf81f487cc72dd15d", "score": "0.5717942", "text": "def test_forbidden_get(self):\n self.client.login(username=self.forbidden_user.username, password=self.password)\n\n resp = self.client.get(self.endpoint, content_type='application/json')\n self.assertEqual(resp.status_code, 403) # forbidden", "title": "" }, { "docid": "4b9034101d979b4bf81f487cc72dd15d", "score": "0.5717942", "text": "def test_forbidden_get(self):\n self.client.login(username=self.forbidden_user.username, password=self.password)\n\n resp = self.client.get(self.endpoint, content_type='application/json')\n self.assertEqual(resp.status_code, 403) # forbidden", "title": "" }, { "docid": "4b9034101d979b4bf81f487cc72dd15d", "score": "0.5717942", "text": "def test_forbidden_get(self):\n self.client.login(username=self.forbidden_user.username, password=self.password)\n\n resp = self.client.get(self.endpoint, content_type='application/json')\n self.assertEqual(resp.status_code, 403) # forbidden", "title": "" }, { "docid": "4b9034101d979b4bf81f487cc72dd15d", "score": "0.5717942", "text": "def test_forbidden_get(self):\n self.client.login(username=self.forbidden_user.username, password=self.password)\n\n resp = self.client.get(self.endpoint, content_type='application/json')\n self.assertEqual(resp.status_code, 403) # forbidden", "title": "" } ]
334f6cdf19b6c8625c88452985f78908
The Huffman compression algorithm which compresses the symbols "000" to "111" based on their frequencies.
[ { "docid": "0c789003af40ce578fb302bf36ff424f", "score": "0.66966075", "text": "def HuffmanCompression(D: Union[np.ndarray, Dict[str, int]]) -> Tuple[Dict[str, str], DefaultDict[str, int]]:\n compressedDB = defaultdict(int)\n chars = freq = None\n # If the database is a dictionary, process it appropriately\n if isinstance(D, dict):\n chars = list(D.keys())\n freq = list(D.values())\n # If the database is a numpy array, process it appropriately\n else:\n chars = [str(i) for i in np.arange(0, 8)]\n freq = D\n\n # list containing unused nodes\n nodes = list()\n\n # Converting characters and frequencies into huffman tree nodes\n for x in range(len(chars)):\n nodes.append(node(freq[x], chars[x]))\n\n # We continue processing the nodes as long as there are more than 1 node in the list.\n while len(nodes) > 1:\n # Sort all the nodes in ascending order based on their frequency\n nodes = sorted(nodes, key=lambda x: x.freq)\n\n # pick 2 smallest nodes\n left = nodes[0]\n right = nodes[1]\n\n # assign directional value to these nodes\n left.huff = 0\n right.huff = 1\n\n # Combine the 2 smallest nodes to create new node as their parent\n newNode = node(left.freq+right.freq, left.symbol +\n right.symbol, left, right)\n\n # remove the 2 nodes and add their\n # parent as new node among others\n nodes.remove(left)\n nodes.remove(right)\n nodes.append(newNode)\n\n # Huffman Tree is ready!\n symbolCodeMapping = dict()\n # Note that storeNodes traverses the binary tree in DFS manner. We stop at the leaf nodes and record the values in the dictionary.\n storeNodes(nodes[0], symbolCodeDict=symbolCodeMapping)\n\n # Return the new database\n if isinstance(D, dict):\n for symbol, code in symbolCodeMapping.items():\n compressedDB[code] += D[symbol]\n else:\n for symbol, code in symbolCodeMapping.items():\n compressedDB[code] += D[int(symbol)]\n\n return symbolCodeMapping, compressedDB", "title": "" } ]
[ { "docid": "a28069afcd22e3460f263258dc2433c7", "score": "0.7296862", "text": "def encode(symb2freq):\r\n huffCode = namedtuple('huffCode', ' symbol code')\r\n lista = []\r\n heap = [[wt, [sym, \"\"]] for sym, wt in symb2freq.items()]\r\n heapify(heap)\r\n while len(heap) > 1:\r\n lo = heappop(heap)\r\n hi = heappop(heap)\r\n for pair in lo[1:]:\r\n pair[1] = '0' + pair[1]\r\n for pair in hi[1:]:\r\n pair[1] = '1' + pair[1]\r\n heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:])\r\n for elem in heappop(heap)[1:]:\r\n lista.append(huffCode(elem[0], elem[1]))\r\n return lista", "title": "" }, { "docid": "fd4d610a22f188aeea6dabf90c3a1e10", "score": "0.71840584", "text": "def compress(input, bit_out):\n initfreqs = FlatFrequencyTable(257)\n freqs = SimpleFrequencyTable(initfreqs)\n enc = ArithmeticEncoder(32, bit_out)\n sym = input.read(1)\n\n while sym:\n sym = sym[0]\n enc.write(freqs, sym)\n freqs.increment(sym)\n sym = input.read(1)\n\n enc.write(freqs, 256)\n enc.finish()\n\n print_result(freqs, bit_out)", "title": "" }, { "docid": "7a6535735c56b3b5100c59034747c790", "score": "0.7095041", "text": "def Huffman_Encoding(data):\n\n symbol_with_freq = get_frequency(data)\n symbols = symbol_with_freq.keys()\n frequencies = symbol_with_freq.values()\n print(\"symbols: \", symbols)\n print(\"frequencies: \", frequencies)\n \n nodes = []\n \n # converting symbols and probabilities into huffman tree nodes\n for symbol in symbols:\n nodes.append(Node(symbol_with_freq.get(symbol), symbol))\n \n while len(nodes) > 1:\n # sort all the nodes in ascending order based on their probability\n nodes = sorted(nodes, key=lambda x: x.freq)\n \n right = nodes[0]\n left = nodes[1]\n \n left.code = 0\n right.code = 1\n \n # combine the 2 smallest nodes to create new node\n newNode = Node(left.freq+right.freq, left.symbol+right.symbol, left, right)\n \n nodes.remove(left)\n nodes.remove(right)\n nodes.append(newNode)\n \n huffman_encoding = get_codes(nodes[0])\n print(\"symbols with codes\", huffman_encoding)\n Total_Gain(data, huffman_encoding)\n encoded_output = encode_output(data,huffman_encoding)\n return encoded_output, nodes[0]", "title": "" }, { "docid": "0b367123c4898f88256e7084e165a031", "score": "0.69861346", "text": "def huffman_code(freq_map, endian='big'):\n if not isinstance(freq_map, dict):\n raise TypeError(\"dict expected\")\n if len(freq_map) == 0:\n raise ValueError(\"non-empty dict expected\")\n\n class Node(object):\n # a Node object will have either .symbol or .child set below,\n # .freq will always be set\n def __lt__(self, other):\n # heapq needs to be able to compare the nodes\n return self.freq < other.freq\n\n def huff_tree(freq_map):\n # given a dictionary mapping symbols to thier frequency,\n # construct a Huffman tree and return its root node\n\n minheap = []\n # create all the leaf nodes and push them onto the queue\n for sym, f in freq_map.items():\n nd = Node()\n nd.symbol = sym\n nd.freq = f\n heapq.heappush(minheap, nd)\n\n # repeat the process until only one node remains\n while len(minheap) > 1:\n # take the nodes with smallest frequencies from the queue\n child_0 = heapq.heappop(minheap)\n child_1 = heapq.heappop(minheap)\n # construct the new internal node and push it onto the queue\n parent = Node()\n parent.child = [child_0, child_1]\n parent.freq = child_0.freq + child_1.freq\n heapq.heappush(minheap, parent)\n\n # the single remaining node is the root of the Huffman tree\n return minheap[0]\n\n result = {}\n\n def traverse(nd, prefix=bitarray(endian=endian)):\n if hasattr(nd, 'symbol'): # leaf\n result[nd.symbol] = prefix\n else: # parent, so traverse each of the children\n traverse(nd.child[0], prefix + bitarray([0]))\n traverse(nd.child[1], prefix + bitarray([1]))\n\n traverse(huff_tree(freq_map))\n return result", "title": "" }, { "docid": "01dbdecc09b78e8fca3af781f75dcbd9", "score": "0.6897816", "text": "def encode_huffman(value):\n #briskoume th thesh tou value sto pinaka me ta ranges \n def get_HuffmanRange(table, target):\n for i, row in enumerate(table):\n for j, element in enumerate(row):\n if target == element:\n return (i, j)\n raise ValueError('error:de mporei na brei to range')\n '''\n gia na xehwrisoume an h timh pou pairnoume antistoixei se DC h AC\n prepei na doume an milame gia lista h akeraia timh.\n Xeroume oti to DC tha einai kapoio int enw to AC tha einai mia lista \n me to mhkos tou poses fores emfanistike enas arithmos\n '''\n #elegxoume an einai lista, dld tupou collection.iterable\n if not isinstance(value, collections.Iterable): # afou den einai ara einai DC\n if value <= -2048 or value >= 2048:\n raise ValueError(\n 'uparxei error giati to DC einai para polu megalo'\n )\n #briskoume th thesh tou value mesa sto pinaka\n size, code = get_HuffmanRange(HUFFMAN_CATEGORIES, value)\n\n #me bash th thesh gurname thn antistoixh kwdikopoihsh\n if size == 0:#an einai to prwto dld value=0\n return HUFFMAN_CATEGORY_CODEWORD[DC][size]\n\n return (HUFFMAN_CATEGORY_CODEWORD[DC][size]\n + '{:0{padding}b}'.format(code, padding=size))\n else: # afou einai lista einai AC \n value = tuple(value)\n if value == EOB or value == ZRL: #an to AC einai kapoio ap ta duo special sumbola gurname amesws th kwdikopoihsh \n return HUFFMAN_CATEGORY_CODEWORD[AC][value]\n\n length, key = value\n if key == 0 or key <= -1024 or key >= 1024:\n raise ValueError(\n 'error: to AC anhkei mono sto diasthma (-1024,0)u(0,1024)'\n )\n #antistoixa me DC\n size, code = get_HuffmanRange(HUFFMAN_CATEGORIES, key)\n return (HUFFMAN_CATEGORY_CODEWORD[AC][(length, size)]\n + '{:0{padding}b}'.format(code, padding=size))", "title": "" }, { "docid": "baad9cda6f346a53594b0defc3b87d8f", "score": "0.689176", "text": "def huffman_encoding(input_string):\n freq_dict = _get_freq(input_string)\n binary_tree = copy.deepcopy(freq_dict)\n result_string = ''\n\n if len(binary_tree) == 1:\n for char in input_string:\n result_string += '0'\n return result_string\n\n binary_tree, root = _construct_huffman_tree(binary_tree)\n\n for char in input_string:\n result_string += binary_tree_dfs(binary_tree, char, root, '')\n\n return _bit_string_to_byte_string(result_string), freq_dict", "title": "" }, { "docid": "38dcf5e7f8c9ce88ed2dfdc39e3f8793", "score": "0.67771584", "text": "def compress(in_file, out_file):\r\n with open(in_file, \"rb\") as f1:\r\n text = f1.read()\r\n freq = make_freq_dict(text)\r\n tree = huffman_tree(freq)\r\n codes = get_codes(tree)\r\n number_nodes(tree)\r\n print(\"Bits per symbol:\", avg_length(tree, freq))\r\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\r\n size_to_bytes(len(text)))\r\n result += generate_compressed(text, codes)\r\n with open(out_file, \"wb\") as f2:\r\n f2.write(result)", "title": "" }, { "docid": "292b4622ffd80140fb441e93cd41f94c", "score": "0.6762563", "text": "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "title": "" }, { "docid": "292b4622ffd80140fb441e93cd41f94c", "score": "0.6762563", "text": "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "title": "" }, { "docid": "2b263553fc9145d03b5bfdb206e8104a", "score": "0.660233", "text": "def make_encoding_table(huffman_tree):\n pass", "title": "" }, { "docid": "1eaa92b0dd36b09b720d15d7a6b7474e", "score": "0.6563524", "text": "def LZ77Huffman(seq,l):\n\n print(\"First encoding: LZ77...\")\n LZ77gen = Lempel_Ziv.LZ77(seq, l)\n\n print(\"Second encoding: Huffman...\")\n val, counts = np.unique(LZ77gen, return_counts=True, axis=0)\n tuples = []\n for i in val:\n tuples.append(tuple(i))\n\n prob = counts / sum(counts)\n code = Huffman.huffman_procedure(prob)\n\n print(\"Compressing...\")\n zip_iterator = zip(tuples, code)\n dictionary = dict(zip_iterator)\n\n Huffgen =''\n for element in LZ77gen:\n element = tuple(map(str, element))\n Huffgen += dictionary[element]\n\n \"\"\"\n alphab = tuples\n pos = np.arange(len(alphab))\n width = 0.5 # gives histogram aspect to the bar diagram\n\n ax = plt.axes()\n plt.xlabel(\"Tuples\")\n plt.ylabel(\"Proportion\")\n\n plt.bar(pos, prob, width, color='blue')\n plt.savefig(\"LZ77Huffman.pdf\")\n plt.show()\n \"\"\"\n \n return Huffgen", "title": "" }, { "docid": "c039ab51fe24a68e11549c8e42873a15", "score": "0.6527305", "text": "def huffman(name,usingWords = False):\n probabilityArray,percentage = buildProbabilityArray(name)\n if usingWords:\n n = 65536\n else:\n n = 256\n\n nonZeroProbabilityArray = {}\n for i in range(len(probabilityArray)):\n if probabilityArray[i] != 0:\n nonZeroProbabilityArray[str(i)] = probabilityArray[i]\n \n codeArray = ['']*n\n done = False\n sumHistory = []\n while not (done):\n smallestProbabilityKey = min(nonZeroProbabilityArray,key = nonZeroProbabilityArray.get)\n smallestProbabilityValue = nonZeroProbabilityArray[smallestProbabilityKey]\n del nonZeroProbabilityArray[smallestProbabilityKey]\n secondSmallestProbabilityKey = min(nonZeroProbabilityArray,key = nonZeroProbabilityArray.get)\n secondSmallestProbabilityValue = nonZeroProbabilityArray[secondSmallestProbabilityKey]\n del nonZeroProbabilityArray[secondSmallestProbabilityKey]\n newValue = smallestProbabilityValue + secondSmallestProbabilityValue\n nonZeroProbabilityArray[smallestProbabilityKey+' '+secondSmallestProbabilityKey] = newValue\n sumHistory.append((smallestProbabilityKey,secondSmallestProbabilityKey))\n if len(nonZeroProbabilityArray) == 1:\n done = True\n \n sumHistory.reverse()\n for historyTuple in sumHistory:\n first = historyTuple[0].split()\n for i in first:\n codeArray[int(i)] += '1'\n second = historyTuple[1].split()\n for i in second:\n codeArray[int(i)] += '0'\n s = 0\n for i in range(256):\n s += len(codeArray[i])*percentage[i]\n\n print(\"The average word length is: \" + str(s)) \n return codeArray", "title": "" }, { "docid": "ef289276db8f8b072f88d31fc47c03b9", "score": "0.65113676", "text": "def compress(name):\n try:\n file = open(name,\"rb\")\n except FileNotFoundError:\n try:\n name = os.path.expanduser('~/'+name)\n file = open(name,'rb')\n except:\n print(\"Can't find file! Check if it is either on the same folder as this script or your home folder\")\n sys.exit(1)\n \n d = bytearray(file.read())\n file.close()\n\n compress_name = name + '.dsg'\n\n dictionary = huffman(d,usingWords=False)\n writeBuffer = ''\n for i in dictionary:\n if i != '':\n size = format(len(i),'#010b')\n size = size[2:]\n key = format(dictionary.index(i),'#010b')\n key = key[2:]\n symbol = i\n writeBuffer += size + key + symbol\n\n writeBuffer += '0'*8\n\n for i in d:\n writeBuffer+=dictionary[i]\n \n if len(writeBuffer)%8 != 0:\n padsize = 8 - len(writeBuffer)%8\n writeBuffer += '0'*padsize\n padsize = format(padsize,'#010b')\n padsize = padsize[2:]\n writeBuffer += padsize\n \n file = open(compress_name,'wb')\n\n for i in range(int(len(writeBuffer)/8)):\n hexBuffer = writeBuffer[8*i:8*i+8]\n hexBuffer = int(hexBuffer,2)\n hexBuffer = format(hexBuffer,'#04x')\n hexBuffer = bytes.fromhex(hexBuffer[2:])\n file.write(hexBuffer)\n\n file.close()\n compressedLen = len(writeBuffer)\n beforeCompressionLen = len(d)\n compressionRate = beforeCompressionLen/compressedLen*8\n print(\"The compression rate was: \" + str(compressionRate))", "title": "" }, { "docid": "a276b94c083743f8d3428af743253eed", "score": "0.64922434", "text": "def huffman_encoding(data):\n if len(data) == 0:\n return \"\", None\n huffman_tree_root = build_huffman_tree(data)\n codes_dict = {}\n encoded_data = generate_encoded_data(data, huffman_tree_root, codes_dict)\n return encoded_data, huffman_tree_root", "title": "" }, { "docid": "0de9d0fca8c6259bb1346967f956a072", "score": "0.6430497", "text": "def huffman_coding(data):\n\tassert type(data) == dict\n\n\tif data == {}:\n\t\treturn {}\n\n\t# Initializing external nodes from data\n\th = [(data[key], Node(key, data[key])) for key in data]\n\theapq.heapify(h)\n\n\t# At the end of the loop there will be a single node present in the minheap\n\tfor i in range(len(data)-1):\n\t\t# Pop out two smallest nodes\n\t\tx = heapq.heappop(h)\n\t\ty = heapq.heappop(h)\n\n\t\t# Create a new node and add \"x\" as its left child and \"y\" as its right\n\t\tz = Node(\"i\", x[0] + y[0], x[1], y[1])\n\n\t\t# Push this new node into the heap\n\t\theapq.heappush(h, (x[0]+y[0], z))\n\n\t# Generate codes for characters by using preorder traversal\n\tt = heapq.heappop(h)\n\tcodes = {}\n\tprefix_traverse(t[1], codes)\n\treturn codes", "title": "" }, { "docid": "465ea05c098acf527a82e9efecaf3637", "score": "0.6347244", "text": "def main():\n import pprint\n frequencies = file_character_frequencies(sys.argv[1])\n pprint.pprint(frequencies)\n codes = huffman_codes_from_frequencies(frequencies)\n pprint.pprint(codes)", "title": "" }, { "docid": "b74f9305faf8acd7b64bc7bfac686600", "score": "0.6320148", "text": "def encode(self: object) -> None:\n tree = HuffmanTree(self.seq.read())\n tree.get_codings(tree.root)\n self.binary = tree.seq_to_binstr()\n self.unicode = HuffmanTree.binstr_to_unicode(self.binary)\n self.header = tree.codes_to_header()\n self.compressed = self.header + self.unicode\n Sequence(self.huff_output).write_bytes(self.compressed)", "title": "" }, { "docid": "e788ad9b8056a1b377cbc20ec03ba40a", "score": "0.6295107", "text": "def huffman_encode(ascii_chars, encoding):\n code_string = ''\n\n for char in ascii_chars:\n code_string += encoding[char]\n\n return code_string", "title": "" }, { "docid": "8231df5b4fbe6e01b862253c4187bab3", "score": "0.6239969", "text": "def compress(huff, args, filelen):\r\n try:\r\n with open(args.file, 'rb') as file:\r\n mmp = mmap.mmap(file.fileno(), length=0, flags=mmap.MAP_PRIVATE, prot=mmap.PROT_READ)\r\n # Datos del cabezal\r\n numeromagico = 'JA'\r\n sym_arraylen = len(huff)\r\n sym_arraysize = len(huff[-1])\r\n\r\n # Armamos el codificado total, los datos en si comprimidos\r\n codificadoTotal = ''\r\n while True:\r\n b = mmp.read(1)\r\n if not b:\r\n break\r\n for h in huff:\r\n if b == h.symbol:\r\n codificado = h.code #se hace un string con todos los codigos huff de las letras en vez de su codigo binario\r\n codificadoTotal += codificado\r\n # debemos agregar en cod total los 0 al final que falten para tener tamano multiplo de 8\r\n cantAAgregar = 8 - (len(codificadoTotal) % 8)\r\n for _ in range(cantAAgregar):\r\n codificadoTotal += '0'\r\n # El largo del archivo comprimido es el largo del symarray por el tamano de cada uno de sus elementos mas\r\n # el largo del bit stream (que es el codificado total)\r\n compressedfilelen = (len(codificadoTotal) / 8 + len(huff) * 6) + 8 # 8 bytes ocupa el cabezal, cada entrada en\r\n # huff ocupa 6 bytes y se suma los bytes del codificado\r\n if not args.force:\r\n if filelen < compressedfilelen:\r\n print(\"El archivo resultante comprimido es mas grande que el dado.\")\r\n file.close()\r\n return None\r\n newfile = open(args.file + \".huff\", 'wb')\r\n\r\n newfile.write(struct.pack('>ccBBI', numeromagico[0].encode(encoding='ascii'),\r\n numeromagico[1].encode(encoding='ascii'), sym_arraylen - 1, sym_arraysize,\r\n filelen))\r\n # Ahora se debe agregar un array de elementos de 6 bytes, cada uno de los cuales identifica un símbolo,\r\n # su tamano y su código Huffman. En nuestro caso estos datos estan en huff\r\n\r\n for elem in huff:\r\n symb = elem.symbol\r\n size = len(elem.code) \r\n code = elem.code \r\n newfile.write(struct.pack('>cBI', symb, size, int(code)))\r\n\r\n for x in range(0, len(codificadoTotal), 8):\r\n newfile.write(struct.pack('>B', int(codificadoTotal[x: x + 8], 2))) \r\n\r\n newfile.close()\r\n file.close()\r\n print(\"archivo compreso con exito\")\r\n return compressedfilelen\r\n except OSError as err:\r\n print(\"El error es \" + \"OS error: {0}\".format(err))\r\n return", "title": "" }, { "docid": "d7ad6c7a082bf998ebdd818313f919b7", "score": "0.61150146", "text": "def huffman_decoding(input_string, freq_dict):\n binary_tree, root = _construct_huffman_tree(freq_dict)\n result_string = ''\n i = 0\n input_string = _byte_string_to_bit_string(input_string)\n while i < len(input_string):\n node = root\n while binary_tree[node].get('left') or binary_tree[node].get('right'):\n node = binary_tree[node]['left'] if input_string[i] == '0' else binary_tree[node]['right']\n i += 1\n result_string += str(node)\n return result_string", "title": "" }, { "docid": "b41b1b7d324c7f4eaee806fa3c6cf411", "score": "0.60744846", "text": "def compression_size(freq: Dict, lst: List) -> int:\n total = 0\n for item in lst:\n # Get each frequency\n frequency = freq[item[0]]\n # Multiply the frequency by the size of the corresponding huffman encoding and add total\n total += frequency * len(item[1])\n # Divide by 8 because total is currently in bits and take the ceiling of this\n return math.ceil(total // 8)", "title": "" }, { "docid": "2fefa870b5a9f022a804ee812884ef4c", "score": "0.59565395", "text": "def compress(data: str):\n dictionary, res = {chr(i): chr(i) for i in range(256)}, []\n cur_char, code_gen = 0, counter(256)\n\n while cur_char < len(data):\n pref_in_dict = '' # find largest prefix in dict\n while cur_char < len(data) and (new_pref := pref_in_dict + data[cur_char]) in dictionary:\n pref_in_dict = new_pref\n cur_char += 1\n\n res += [dictionary[pref_in_dict]] # write it as code, add next pref to dict\n dictionary[new_pref] = next(code_gen)\n\n return res", "title": "" }, { "docid": "b7f915378873134742f900e080e29717", "score": "0.58810526", "text": "def huffman_encoding_recursion(node, code=''):\n if type(node) is str:\n return {node: code}\n\n encode_map = dict()\n\n left_node = node.get_left_child()\n right_node = node.get_right_child()\n\n encode_map.update(huffman_encoding_recursion(left_node, code + '0'))\n encode_map.update(huffman_encoding_recursion(right_node, code + '1'))\n return encode_map", "title": "" }, { "docid": "1f80085786849f9fd59b2a82ac390f5a", "score": "0.58447087", "text": "def huffman_tree(freq_dict):\r\n\r\n items = list(freq_dict.items())\r\n items.sort(key=lambda x: x[1])\r\n data = items\r\n\r\n node_list = []\r\n for i in data:\r\n node = HuffmanNode(i[0])\r\n node.number = i[1]\r\n node_list.append(node)\r\n\r\n while len(node_list) != 1:\r\n # Keeps track of order of tuples\r\n a, b = node_list.pop(0), node_list.pop(0)\r\n\r\n if a.number <= b.number:\r\n root = HuffmanNode(None, a, b)\r\n else:\r\n root = HuffmanNode(None, b, a)\r\n\r\n root.number = a.number + b.number\r\n node_list.append(root)\r\n node_list.sort(key=lambda x: x.number)\r\n\r\n return node_list[0]", "title": "" }, { "docid": "306c7a1e218d35164298071b64e0548b", "score": "0.5801021", "text": "def compress(uncompressed):\n count = 0\n # Build the dictionary.\n dict_size = 256\n dictionary = dict((chr(i), i) for i in range(dict_size))\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\n\n w = \"\"\n result = []\n for c in uncompressed:\n wc = w + c\n count += 1\n if wc in dictionary:\n w = wc\n else:\n result.append(dictionary[w])\n # Add wc to the dictionary.\n dictionary[wc] = dict_size\n dict_size += 1\n w = c\n\n # Output the code for w.\n if w:\n result.append(dictionary[w])\n return result, count", "title": "" }, { "docid": "bff38e04670d1159cb68abf581bcda42", "score": "0.57936007", "text": "def lz77_compress2(text, w=2**12-1, max_length=2**5-1):\n result = []\n out_string = \"\"\n n = len(text)\n p = 0\n while p<n:\n if ord(text[p]) >= 128: continue\n m,k = maxmatch(text, p, w, max_length)\n if k<4: # modified from k<2\n result.append(text[p]) # a single char\n p += 1 #even if k was 2 (why?)\n else:\n result.append([m,k]) # two or more chars in match\n p += k\n return result # produces a list composed of chars and pairs", "title": "" }, { "docid": "26bace224af216d51a3c8bd31c32c27b", "score": "0.57908577", "text": "def huffman_tree(freq_dict):\n huffmanNodes = sortDict(freq_dict)\n while len(huffmanNodes) > 1:\n small1, small2 = huffmanNodes.pop(), huffmanNodes.pop()\n newTup = (small1[0] + small2[0], HuffmanNode(None, small1[1], small2[1]))\n huffmanNodes.append(newTup)\n #huffmanNodes = sorted(huffmanNodes, key=lambda hf: hf[0], reverse=True)\n huffmanNodes.sort()\n huffmanNodes = huffmanNodes[::-1]\n if len(huffmanNodes) > 0:\n return huffmanNodes[0][1]\n else:\n return HuffmanNode()", "title": "" }, { "docid": "f2022ac5a1119d6541986fbedca61416", "score": "0.57861996", "text": "def compression(s):\n r = \"\"\n length = len(s)\n\n if length == 0:\n return \"\"\n\n if length == 1:\n return s + \"1\"\n\n last = s[0]\n cnt = 1\n i = 1\n\n while i < length:\n\n if s[i] == s[i - 1]:\n cnt += 1\n else:\n r = r + s[i - 1] + str(cnt)\n cnt = 1\n\n i += 1\n\n r = r + s[i - 1] + str(cnt)\n\n return r", "title": "" }, { "docid": "f41c591a14bd1935be6eb7c37a5df705", "score": "0.57711864", "text": "def lz77_compress(text, w=2**12-1, max_length=2**5-1):\n result = []\n n = len(text)\n p = 0\n while p<n:\n if ord(text[p]) >= 128: continue\n m,k = maxmatch(text, p, w, max_length)\n if k<2:\n result.append(text[p]) # a single char\n p += 1\n else:\n result.append([m,k]) # two or more chars in match\n p += k\n return result # produces a list composed of chars and pairs", "title": "" }, { "docid": "28f2f50d9ee711456ef4562f84617c7b", "score": "0.57638574", "text": "def compress(filename):\n table = {chr(i): i for i in xrange(256)}\n next_i = 256\n string = None\n try:\n f = open(filename, 'r')\n out = open(filename + '.zl', 'wb')\n except Exception as e:\n print e\n return\n for line in f:\n for c in line:\n if string is None:\n string = c\n continue\n next_string = string + c\n if next_string not in table:\n code = table[string]\n table[next_string] = next_i\n next_i += 1\n out.write(struct.pack('h', code))\n sys.stdout.write(string)\n string = c\n else:\n string = next_string\n out.write(struct.pack('h', table[string]))\n out.close()\n f.close()", "title": "" }, { "docid": "f5b2062371736e831bff81d86a0a3e61", "score": "0.5741122", "text": "def huffman(path):\n # create the list with letters\n letterList = Letter.build_list(path)\n\n # build the huffman tree\n root = Node.build_tree(letterList)\n\n # get the bitstring for each letter\n letters = Node.get_bitstring(root, \"\")\n\n # open the file\n with open(path) as f:\n with open(\"huffman.txt\", \"w\") as h:\n while True:\n\n # read each character in the file\n c = f.read(1)\n\n # EOF\n if not c:\n break\n\n # get the object Letter correspondent\n letter = list(filter(lambda l: l.ch == c, letters))[0]\n\n h.write(letter.bitstring)", "title": "" }, { "docid": "2ee954a0df1314e04d70663aece28355", "score": "0.5704366", "text": "def huffman_tree(freq_dict):\n new_lst = []\n\n if len(freq_dict) == 0:\n return None\n else:\n new_tup = [(freq, key) for (key, freq) in freq_dict.items()]\n new_tup.sort()\n for (freq, value) in new_tup:\n new_lst.append((freq, HuffmanNode(value)))\n\n if len(new_lst) == 1:\n new_lst.append((0, HuffmanNode(None)))\n\n while len(new_lst) > 1:\n left, right = new_lst.pop(0), new_lst.pop(0)\n new = HuffmanNode(None, left[1], right[1])\n new_lst.append((left[0] + right[0], new))\n new_lst.sort()\n\n return new_lst[0][1]", "title": "" }, { "docid": "e308ddcf72c0bfb58b4eab2705bc7584", "score": "0.56976193", "text": "def huffman_letter_codes_from_file_contents(file_name):\n # Suggested strategy...\n #freqs = file_character_frequencies(file_name)\n #return huffman_codes_from_frequencies(freqs)\n return {}", "title": "" }, { "docid": "d1fa5c0f0437597ea4c8d556ec741a5d", "score": "0.56970656", "text": "def compress(gene: str) -> int:\n bit_string: int = 1\n\n mapping: Dict[str, int] = {\n \"A\": 0b00,\n \"C\": 0b01,\n \"G\": 0b10,\n \"T\": 0b11\n }\n\n for nucleotide in gene.upper():\n bit_string <<= 2\n\n if nucleotide not in mapping:\n raise ValueError(f\"Invalid nucleotide '{nucleotide}'.\")\n\n bit_string |= mapping[nucleotide]\n\n return bit_string", "title": "" }, { "docid": "ea5a6edd05d11b3779d7a698ccd8870e", "score": "0.5668884", "text": "def huffman_decoding(data,tree):\n decoded_data = \"\"\n node = tree\n if len(data)==0:\n return decoded_data\n elif tree.char:\n for i in range(len(data)):\n decoded_data += tree.char\n else:\n for i in range(len(data)):\n if data[i] == '0':\n node = node.left_child\n else:\n node = node.right_child\n if node.char:\n decoded_data += node.char\n node = tree\n return decoded_data", "title": "" }, { "docid": "0d582bba33754df6bf0b3ce292d323b0", "score": "0.56604034", "text": "def Huffman_Decoding(encoded_data, huffman_tree):\n\n tree_head = huffman_tree\n decoded_output = []\n for x in encoded_data:\n if x == '1':\n huffman_tree = huffman_tree.right \n elif x == '0':\n huffman_tree = huffman_tree.left\n try:\n if huffman_tree.left.symbol == None and huffman_tree.right.symbol == None:\n pass\n except:\n decoded_output.append(huffman_tree.symbol)\n huffman_tree = tree_head\n \n # decoded output is a list and we are trying to get a string \n string = ''.join([str(item) for item in decoded_output])\n return string", "title": "" }, { "docid": "a323c0a5f11c1d9f0f96a419e70af4b4", "score": "0.5656753", "text": "def encode(self,inFileName,outFileName):\r\n txtFile = open(inFileName,\"rb\")\r\n outFile = open(outFileName,\"w\")\r\n with txtFile as b:\r\n while 1:\r\n reading = b.read(1).decode(\"utf-8\")\r\n if not reading:\r\n break\r\n elif reading not in self.lookup:\r\n self.lookup[reading] = 1\r\n else:\r\n x = int(self.lookup.get(reading))\r\n x = int(x) + int(1)\r\n self.lookup[reading] = x\r\n \r\n txtFile.close()\r\n\r\n for t in self.lookup:\r\n w = Node(t,self.lookup.get(t))\r\n self.heap.append(w)\r\n \r\n heapq.heapify(self.heap)\r\n self.heap.sort()\r\n\r\n while len(self.heap) > 1:\r\n t = heapq.heappop(self.heap)\r\n u = heapq.heappop(self.heap)\r\n v = Node(\"\",int(t.weight) + int(u.weight))\r\n if t <= u:\r\n v.zero = t\r\n v.one = u\r\n else:\r\n v.zero = u\r\n v.one = t\r\n heapq.heappush(self.heap,v)\r\n root = heapq.heappop(self.heap)\r\n self.printNodes(root,\"\")\r\n\r\n for x in self.lookup:\r\n outFile.write(x + \"-\" + str(self.lookup.get(x)))\r\n outFile.write(\"\\n\")\r\n\r\n txtFile = open(inFileName,\"rb\")\r\n\r\n\r\n with txtFile as b:\r\n while 1:\r\n reading = b.read(1).decode(\"utf-8\")\r\n if not reading:\r\n break\r\n else:\r\n outFile.write(\"%s\" % self.lookup2.get(reading))\r\n txtFile.close()\r\n outFile.close()", "title": "" }, { "docid": "9b357190242872f6569a5a7f24fe9f48", "score": "0.5655801", "text": "def compress(uncompressed):\n # uncompressed is a string\n # Build the dictionary.\n dict_size = 256\n dictionary = {chr(i): i for i in range(dict_size)}\n w = \"\"\n result = []\n for c in uncompressed:\n wc = w + c\n if wc in dictionary:\n w = wc\n else:\n result.append(dictionary[w])\n # Add wc to the dictionary.\n dictionary[wc] = dict_size\n dict_size += 1\n w = c\n # Output the code for w.\n if w:\n result.append(dictionary[w])\n return result", "title": "" }, { "docid": "0d49d633bc470977b03f6ea36299d9a0", "score": "0.5630084", "text": "def generate_compressed(text, codes):\n finalList = []\n byte = \"\"\n start = 0\n end = 8\n\n for number in list(text):\n byte += codes[number]\n #print(byte)\n while start < len(byte):\n finalList.append(byte[start: end])\n start = end\n end += 8\n #print(finalList)\n\n return bytes([bits_to_byte(c) for c in finalList])", "title": "" }, { "docid": "cfeb50885d198af09abb0b9b183b04ac", "score": "0.55930924", "text": "def packword(word):\n packed, bits = 0, 28\n for letter in word:\n lettercode = code.index(letter)\n DebugPrint('lettercode for \"%s\" is 0x%x' % (letter, lettercode))\n length = 4 + (lettercode > 7) + (2 * (lettercode > 15)) # using True as 1\n lettercode += (8 * (length == 5)) + ((96 - 16) * (length == 7)) # True=1\n DebugPrint('length of huffman code is %d' % length)\n packed = (packed << length) + lettercode\n DebugPrint('packed is now: 0x%08x' % packed)\n bits -= length\n packed <<= bits + 4\n if word != unpack(packed):\n sys.stderr.write('packword: error: word \"%s\" packed as 0x%08x, \"%s\"\\n' % (\n word, packed, unpack(packed)))\n sys.exit(1)\n else:\n DebugPrint('packed: 0x%08x' % packed)\n return packed", "title": "" }, { "docid": "76079bb742df346ad8bc23d2e21fef14", "score": "0.5574277", "text": "def compress(input):\n \n #\"A valid input consists of one or more upper case english letters A-Z\"\n #Comment out the next two lines to accept invalid inputs. Manual testing showed they are handled smoothly, ex: @@@@ -> @@2\n if not input.isalpha() or not input.isupper():\n return \"Invalid input. A valid input consists of one or more upper case english letters A-Z\"\n #The first character of the input is always included in the compression\n output = input[0:1]\n count = 1\n #This flag is set when we have a run of 11 of the same character in a row\n too_long = False\n for i in range(1, len(input)):\n if input[i] != input[i-1] or too_long:\n #This is the start of a new run, either because the characters didn't match or because the run was too long\n #Append a digit, if needed, for the length of the previous run\n if count > 1:\n output += str(count - 2)\n too_long = False\n #Add the character that begins the next run.\n output += input[i]\n count = 1\n else:\n #The characters match, so we increase the length of this run\n count += 1\n #We display the second instance of a character\n if count == 2:\n output += input[i]\n #We can only compress so much, so if the length of a run is too high we break it up\n elif count > 10:\n #set the flag for it be broken up on the next pass through the loop\n too_long = True\n #After the last character, we may need a digit for replaced characters\n if i == len(input) - 1:\n if count > 1:\n output += str(count - 2)\n return output", "title": "" }, { "docid": "e02cdc60b938d37b9b739004399af36a", "score": "0.5547646", "text": "def compress(data, blocksize=DEFAULT_BLOCK_SIZE):\n comp = HuffmanCompressor(blocksize)\n return comp.compress(data) + comp.flush()", "title": "" }, { "docid": "5e95c8096320a0fb889b87c52b5b30a2", "score": "0.5541536", "text": "def compress(filename):\n with open(filename, 'rb') as f:\n uncompressed = array.array(\"B\", f.read())\n\n codewords = {}\n # initialize codewords for ASCII characters\n for i in range(256):\n codewords[struct.pack(\">H\", i)] = chr(i)\n\n outname = filename + \"test.zl\"\n outfile = open(outname, 'wb')\n # compress using LZW compression\n index = 256\n string = chr(uncompressed[0])\n for elem in uncompressed[1:]:\n symbol = chr(elem)\n if (string + symbol) in codewords.values():\n string = string + symbol\n else:\n codewords[index.to_bytes(2, 'big')] = string + symbol\n position = list(codewords.values()).index(string)\n outfile.write(struct.pack(\"<H\", position))\n index += 1\n string = symbol\n outfile.write(struct.pack(\"<H\", list(codewords.values()).index(string)))", "title": "" }, { "docid": "e703f800d4e6217f494fd4a69619b63d", "score": "0.5526157", "text": "def combine_nodes(self):\n node1 = self.node_list[0]\n node2 = self.node_list[1]\n # create a new node thats combines the two lowest frequency nodes\n combo_str = node1.key + node2.key\n new_node = HuffmanNode(combo_str)\n new_node.freq = node1.freq + node2.freq\n\n # ********* attempt at graphical stuff **************\n # add nodes\n self.pygraph.add_node(node1)\n self.pygraph.add_node(node2)\n self.pygraph.add_node(new_node)\n\n # set the new nodes children based off of lowest frequency\n if node1 < node2:\n new_node.leftchild = node1\n new_node.rightchild = node2\n\n # create edges between parent and children nodes\n self.pygraph.add_edge(new_node, node1, label='0', color='red')\n self.pygraph.add_edge(new_node, node2, label='1', color='blue')\n else:\n new_node.leftchild = node2\n new_node.rightchild = node1\n # create edges between parent and children nodes\n self.pygraph.add_edge(new_node, node2, label='0', color='red')\n self.pygraph.add_edge(new_node, node1, label='1', color='blue')\n\n\n # # show progression of huffman tree\n # self.pygraph.write('temp.dot')\n # self.pygraph.layout(prog='dot')\n # self.pygraph.draw('temptreePic.png')\n #\n # img = Image.open('temptreePic.png')\n # img.show()\n\n # set bit type. This corresponds to the encoding path\n new_node.leftchild.bit_type = '0'\n new_node.rightchild.bit_type = '1'\n\n # pop the two lowest frequency characters off of the list\n self.node_list.pop(0)\n self.node_list.pop(0)\n\n # put new node into the list and re-sort to give a min heap\n self.node_list.insert(0, new_node)\n self.node_list.sort()\n\n # briefly sleep and then close the open picture.\n # An attempt to make it look like the tree is building\n # time.sleep(2)", "title": "" }, { "docid": "75291928915f62ebc65aef26fd7e8573", "score": "0.55026746", "text": "def no_compression_size(freq: Dict) -> int:\n total = 0\n for k in freq:\n # Add the frequency of each character because each character is one byte\n total += freq[k]\n return total", "title": "" }, { "docid": "5459b5f90509579e56082cd188c99e76", "score": "0.55024654", "text": "def generateCodeword(heap):\n while heap.size > 1:\n n1 = removeMin(heap)\n n2 = removeMin(heap)\n newFreq = n1.aFreq + n2.aFreq\n for i in range(len(n1.symbols)):\n symbol = n1.symbols[i]\n symbol.codeword = '0' + symbol.codeword\n for i in range(len(n2.symbols)):\n symbol = n2.symbols[i]\n symbol.codeword = '1' + symbol.codeword\n newSymbols = n1.symbols + n2.symbols\n node = createNode(newFreq, newSymbols)\n add(heap, node)", "title": "" }, { "docid": "5477892df823996ab0f64a6b8f8263c6", "score": "0.550105", "text": "def get_huffman_tree(text, do_count=False):\n if do_count:\n text = count(text, sort=True)\n # construct a deque containing leaves out of the frequencies returned by\n # count. store the letter in an attr of each node\n initial_weights = deque([BinaryTree(l[1], leaf=True, letter=l[0])\n for l in text])\n combined_weights = deque()\n\n while len(initial_weights) + len(combined_weights) > 1:\n least_two_nodes = []\n # (for loop over range(2) = do this twice)\n for _ in range(2):\n if not initial_weights or not combined_weights:\n least_two_nodes.append(\n (initial_weights or combined_weights).popleft())\n elif initial_weights[0].root <= combined_weights[0].root:\n least_two_nodes.append(initial_weights.popleft())\n else:\n least_two_nodes.append(combined_weights.popleft())\n left, right = least_two_nodes\n node = BinaryTree(left.root + right.root)\n node.attach_left(left)\n node.attach_right(right)\n combined_weights.append(node)\n root = (initial_weights or combined_weights).popleft()\n return root", "title": "" }, { "docid": "ece8006fc6c35e41ed6e43d2a6844963", "score": "0.5451689", "text": "def main():\n parser = argparse.ArgumentParser(description='generate code strings given a collection of text using Huffman coding.')\n parser.add_argument('-i','--in_file', type=str, help='file to encode', required=True)\n parser.add_argument('-e','--encoding', type=str, help='encoding file', required=True)\n parser.add_argument('-o','--out_file', type=str, help='save encoded file', required=True)\n args = parser.parse_args()\n\n encoding = load_encoding(args.encoding)\n clear_text = load_file(args.in_file)\n ascii_chars = char_to_ascii(clear_text)\n encoded_text = huffman_encode(ascii_chars, encoding)\n save_file(encoded_text, args.out_file)", "title": "" }, { "docid": "40b5f93209b09e60be4aebe529d4e1ba", "score": "0.54115933", "text": "def encode_hamming(bits: list) -> list:\n new_bits = list.copy(bits)\n bits_amount = len(new_bits)\n index = 1\n # adding parity bits in correct places to the list\n while index <= bits_amount:\n new_bits.insert(index - 1, 0)\n bits_amount += 1\n index *= 2\n # algorithm step 5\n index = 1\n while index <= bits_amount:\n i = index\n summ = 0\n while i <= bits_amount:\n for j in range(index):\n # we need to check if we haven't exceeded data amount (in case the Hamming code is not full)\n if i <= bits_amount:\n summ += new_bits[i - 1]\n i += 1\n i += index\n new_bits[index - 1] = summ % 2\n index *= 2\n # after encoding the information with Hamming code we add an extra parity bit (SECDED)\n summ = 0\n for i in range(bits_amount):\n summ += new_bits[i]\n new_bits.append(summ % 2)\n return new_bits", "title": "" }, { "docid": "0727c5f9e241f0eeed67a79852d19187", "score": "0.5407204", "text": "def generate_compressed(text, codes):\n the_text = list(text)\n new_str = ''\n for item in the_text:\n new_str += codes[item]\n while len(new_str) % 8 != 0:\n new_str += \"0\"\n result = []\n for i in range(int(len(new_str) / 8)):\n new_byte = bits_to_byte(new_str[8 * i: 8 * (i + 1)]) # padding\n result.append(new_byte)\n return bytes(result)", "title": "" }, { "docid": "09943ea60a2927d1496879b978fe5366", "score": "0.54037935", "text": "def BuildCodeTree(self, freq_table, divisor):\n def MN(x):\n if isinstance(x, int):\n return x\n return ord(x)\n if len(freq_table) < 2:\n # that'd be stupid...\n raise StandardError()\n\n leaves = deque()\n internals = deque()\n for elem in freq_table:\n (sym, freq) = elem\n if freq == 0:\n freq = 1.0/512\n weight = freq * 1.0 / divisor\n leaves.append( (weight, MN(sym), 0, []) )\n\n # freq_table is (symbol, count)\n # code_tree is [freq, symbol, depth, children]\n leaves = deque(sorted(leaves))\n internals = deque()\n while len(leaves) + len(internals) > 1:\n children = []\n while len(children) < 2:\n if leaves and internals:\n if leaves[0][0] <= internals[0][0]:\n children.append(leaves.popleft())\n else:\n children.append(internals.popleft())\n elif leaves:\n children.append(leaves.popleft())\n else:\n children.append(internals.popleft())\n internals.append([(children[0][0] + children[1][0]), None,\n max(children[0][2], children[1][2]) + 1, children])\n if len(leaves):\n raise StandardError()\n self.code_tree = internals.pop()\n return self.code_tree[2]", "title": "" }, { "docid": "861ee545b51ef2026a7fec0be978f161", "score": "0.54018384", "text": "def get_codes(node, val=''):\n\n # huffman code for current node\n newVal = val + str(node.code)\n\n if(node.left):\n get_codes(node.left, newVal)\n if(node.right):\n get_codes(node.right, newVal)\n\n if(not node.left and not node.right):\n codes[node.symbol] = newVal\n \n return codes", "title": "" }, { "docid": "58147bd3cb756afbf4b33a0653556243", "score": "0.5367168", "text": "def compressionFunction(input1, input2, bitSize):\n alpha = 'abcd'\n subBitSize = bitSize / 4\n rounds = 64\n \n for x in range(rounds):\n blocks = {}\n newBlocks = {}\n \n for y in range(4):\n blocks[alpha[y]] = input2[y*subBitSize:y*subBitSize+subBitSize]\n \n shiftSize = subBitSize / 2 - 1\n a_j = j(blocks['a'], blocks['b'], blocks['c'])\n a_k = k(blocks['a'], a_j, blocks['d'])\n newBlocks['a'] = add(a_k, blocks['b'], subBitSize)\n newBlocks['b'] = blocks['a']\n newBlocks['c'] = leftShift(blocks['d'], shiftSize)\n newBlocks['d'] = add(blocks['b'], blocks['c'], subBitSize)\n \n for z in range(4):\n input2[z*subBitSize:z*subBitSize+subBitSize] = newBlocks[alpha[z]]\n\n output = input1 ^ input2\n \n return output", "title": "" }, { "docid": "bf50728f4565ed10a13d1f546b422e56", "score": "0.53586096", "text": "def generate_uncompressed(tree, text, size):\n\n # convert get_codes(tree) so that its values (codes) are mapped to keys (symbols)\n swapDict = {}\n codesDict = get_codes(tree)\n for symbol, bit in codesDict.items():\n swapDict[bit] = symbol\n\n i = 0\n myStr = \"\"\n finalList = []\n\n # 1. for loop on the range of size:\n for a in range(size):\n\n for b in swapDict:\n\n if len(b) > len(myStr) and i < len(text):\n #2. for every bit in text: use 'bytes to bits' on that index of text\n myStr += byte_to_bits(text[i])\n #3. add this conversion to an emptry string \"myStr\" and increment the index by 1\n i += 1\n #4 check if myStr[:len(b)] is the current key in swapDict\n if myStr[:len(b)] == b:\n finalList.append(swapDict[b])\n\n myStr = myStr[len(b):]\n\n break\n return bytes(finalList)", "title": "" }, { "docid": "bf0d68291c934a5c23ee1cd38daef678", "score": "0.5358498", "text": "def main(csv_file: str, enc_file: str) -> None:\n df = pd.read_csv(csv_file, parse_dates=['ts'])\n\n df['T'] = df['T'].apply(lambda x: round(x))\n\n unique_values = df['T'].unique()\n values = df['T']\n freq_dict = {val: (values == val).sum() for val in unique_values}\n\n codec = HuffmanCodec.from_frequencies(freq_dict)\n save_encoder(codec, enc_file)", "title": "" }, { "docid": "2727df0b695abf2477072f8f599f20e0", "score": "0.53329396", "text": "def encode(seq, channels):\r\n map = {'A':0, 'C':1, 'G': 2, 'T':3}\r\n out = [[[]] for _ in range(channels)]\r\n for c in seq:\r\n for i in range(channels):\r\n if i == map[c]: out[i][0].append(1)\r\n else: out[i][0].append(0)\r\n return out", "title": "" }, { "docid": "fb8bff6df2de4ecc91c52148d9ec754b", "score": "0.53049654", "text": "def build_one_hot_encode_map(field, values):\n # Find all the categorical values and assign them a number\n n = 0\n values_iter = iter(values)\n header = next(values_iter)\n for val in values_iter:\n if val not in _one_hot_encode_map[field]:\n _one_hot_encode_map[field][val] = n\n n += 1\n\n return ('{0}_{1}'.format(header, k) for k in _one_hot_encode_map[field].keys())", "title": "" }, { "docid": "8d1289ab399fd0bf0f552173ed2a8c75", "score": "0.52627945", "text": "def to_binary(self):\n bit_couplings = {}\n\n # Iterate over monomial terms.\n for index, value in self.couplings.items():\n\n # The monomial term will be represented by a linear combination of monomials of bits.\n # Each monomial is (by definition) the product of a certain number of bits; iterate\n # over this number.\n for num_bit_factors in range(len(index) + 1):\n\n # Iterate over the combinations of num_bit_factors variables to which the\n # num_bit_factors bits relate.\n for bit_factors_positions in combinations(range(len(index)), num_bit_factors):\n variables_at_positions = [index[position] for position in bit_factors_positions]\n\n # For each variable related to a bit, iterate over the bits representing\n # the variable.\n for bits_indices in product(*[\n range(len(self.variables[variable_index][\"bits\"]))\n for variable_index in variables_at_positions\n ]):\n\n bit_couplings_index = tuple(sorted(self.variables[variable_index][\"bits\"][bits_indices[i]] for i, variable_index in enumerate(variables_at_positions)))\n\n # If a bit is repeated in the bits monomial, the latter can be simplified (b^2 = b). This\n # simplification is performed by a conversion to a set.\n bit_couplings_index = tuple(sorted(list(set(bit_couplings_index))))\n\n a = [(self.variables[variable_index][\"max\"] - self.variables[variable_index][\"min\"]) / (2 ** len(self.variables[variable_index][\"bits\"]) - 1) * 2 ** bits_indices[i] for i, variable_index in enumerate(variables_at_positions)]\n\n b = [self.variables[variable_index][\"min\"] for i, variable_index in enumerate(index) if i not in bit_factors_positions]\n\n bit_couplings_value = reduce(lambda x, y: x * y, a + b, 1) * value\n\n if bit_couplings_index not in bit_couplings:\n bit_couplings[bit_couplings_index] = 0\n\n bit_couplings[bit_couplings_index] += bit_couplings_value\n\n return bit_couplings", "title": "" }, { "docid": "b19c91525d3b2f839cd672aa3291627d", "score": "0.52609473", "text": "def compress(S):\n \n def recurse(T, memo):\n if T == \"\":\n return \"\"\n elif T[0] == '\\n':\n return recurse(T[1:])\n elif memo == 0:\n zerocount = zero_count(T)\n zerocount = min(MAX_RUN_LENGTH, zerocount)\n return str(numToBinary(COMPRESSED_BLOCK_SIZE, zerocount)) + recurse(T[zerocount:],1) #GLOBAL COMPRESSED_BLOCK_SIZE\n elif memo == 1:\n onecount = one_count(T)\n onecount = min(MAX_RUN_LENGTH, onecount)\n return str(numToBinary(COMPRESSED_BLOCK_SIZE, onecount)) + recurse(T[onecount:],0) #GLOBAL \n else:\n pass\n\n return recurse(S, 0)\n\n \n \n\n # I use numToBinary() instead of the built in bin() to avoid the '0b' \n # that leads output from bin(), and also to format the string to \n # k bytes", "title": "" }, { "docid": "8db8da24bb0efd743f24a72117d5473e", "score": "0.52333206", "text": "def improved_hash(text, debug=False):\n CHR_COUNT_START = 64 # we convert to chars; char 65 is A\n t_text = stripped_string(text)\n t_hash = ''.join(sorted(t_text, key=lambda t: freqsort[t]))\n letset = set(t_hash)\n break_letter = t_hash[-1:]\n if break_letter not in ENGLISH_LETTER_LIST:\n break_letter = ENGLISH_LETTER_LIST[-1]\n compressed_hash = ''\n for letter in ENGLISH_LETTER_LIST:\n if letter in letset:\n count = len(re.findall(letter, t_hash))\n count = (count if count < 48 else 48)\n # this is a hacky way of sanity checking our values.\n # if this shows up as a match we'll ignore it\n compressed_hash += chr(count + CHR_COUNT_START)\n else:\n if freqsort[letter] > freqsort[break_letter]:\n if len(compressed_hash) % 2:\n # an uneven number of bytes will cause unicode errors?\n compressed_hash += chr(64)\n break\n compressed_hash += chr(64)\n\n if len(compressed_hash) == 0:\n print('hash length is zero?')\n return '@@'\n return compressed_hash", "title": "" }, { "docid": "8c3893863423377c97c0599baa80c48d", "score": "0.52298105", "text": "def decode(self,inFileName,outFileName):\r\n txtFile = open(inFileName,\"r\")\r\n outFile = open(outFileName,\"wb\")\r\n encoded_str = str()\r\n decoded = str()\r\n with txtFile as r:\r\n while 1:\r\n reading = r.readline()\r\n if not reading:\r\n break\r\n else:\r\n reading = str(reading).split('-')\r\n if len(reading) == 2:\r\n self.lookup[reading[0]] = reading[1]\r\n else:\r\n encoded_str = str(reading[0])\r\n \r\n for x in self.lookup:\r\n y = Node(x,int(self.lookup.get(x)))\r\n self.heap.append(y)\r\n \r\n heapq.heapify(self.heap)\r\n self.heap.sort()\r\n\r\n while len(self.heap) > 1:\r\n t = heapq.heappop(self.heap)\r\n u = heapq.heappop(self.heap)\r\n v = Node(\"\",int(t.weight) + int(u.weight))\r\n if t <= u:\r\n v.zero = t\r\n v.one = u\r\n else:\r\n v.zero = u\r\n v.one = t\r\n heapq.heappush(self.heap,v)\r\n \r\n root = heapq.heappop(self.heap)\r\n self.printNodes(root,\"\")\r\n\r\n length = 0\r\n lengthMin = 5000\r\n for x in self.lookup2:\r\n if len(str(self.lookup2.get(x))) > length:\r\n length = len(str(self.lookup2.get(x)))\r\n \r\n for x in self.lookup2:\r\n if len(str(self.lookup2.get(x))) < lengthMin:\r\n lengthMin = len(str(self.lookup2.get(x)))\r\n \r\n start = 0\r\n end = length\r\n while start < len(encoded_str):\r\n while int(end - start) >= lengthMin:\r\n temp = str(encoded_str[start:end])\r\n if temp in list(self.lookup2.values()):\r\n decoded = str(decoded) + str(list(self.lookup2.keys())[list(self.lookup2.values()).index(temp)])\r\n start = end\r\n end = start + length\r\n if end >= len(encoded_str):\r\n end = len(encoded_str)\r\n break\r\n else:\r\n end = end - 1\r\n if int(end - start) < lengthMin:\r\n start = end\r\n end = start + length\r\n if end > len(encoded_str):\r\n end = len(encoded_str) \r\n \r\n print(decoded)\r\n for x in decoded:\r\n outFile.write(x.encode(\"utf-8\"))", "title": "" }, { "docid": "3f688236e0bc7a3d8b7a34c32202aaa8", "score": "0.5226041", "text": "def hfm(files,comp):\n\tlog.log(f'hfm {\"-c\"*comp}{\"-d\"*(not comp)} {\" \".join(files)}')\n\tlog.log(\"Importing 'huffman' module...\")\n\ttry:\n\t\tfrom _includes.huffman import compress_file,decompress_file\n\texcept:\n\t\tlog.log(\"'huffman' module not found.\")\n\t\treturn\n\tfor file in files:\n\t\tif not access(file,F_OK):\n\t\t\tlog.log(f\"'{path(file)}' not found.\")\n\t\t\tprint(f\"'{file}' not found.\")\n\t\t\tcontinue\n\t\tif comp:\n\t\t\tcompress_file(path(file))\n\t\t\tprint(f\"'{file}' successfully compressed.\")\n\t\telse:\n\t\t\tdecompress_file(path(file))\n\t\t\tprint(f\"'{file}' successfully decompressed.\")", "title": "" }, { "docid": "e2ba68d5b9821dc8af00b8a4776fbac1", "score": "0.5217697", "text": "def generate_compressed(text, codes):\r\n byte_list = []\r\n codes_ = ''\r\n for item in text:\r\n codes_ += codes[item]\r\n remainder = len(codes_) - (len(codes_) // 8) * 8\r\n for i in range(len(codes_) // 8):\r\n byte_list.append(codes_[8 * i: 8 * i + 8])\r\n if len(codes_) % 8 != 0:\r\n remainder_code = codes_[len(codes_) // 8 * 8:]\r\n remainder_code += '0' * (8 - remainder)\r\n byte_list.append(remainder_code)\r\n list1 = []\r\n for item in byte_list:\r\n list1.append(bits_to_byte(item))\r\n if len(list1) > 0:\r\n ibyte = bytes([list1[0]])\r\n for i in range(1, len(list1)):\r\n ibyte += bytes([list1[i]])\r\n return ibyte\r\n else:\r\n return bytes([])", "title": "" }, { "docid": "76fb87cb22f0acb074bcbb733f2b25ab", "score": "0.52073383", "text": "def compress(inputString):\n compressedString = \"\"\n count = 1\n secondChar = \"\"\n if(inputString != None and inputString != \"\"):\n compressedString += inputString[:1]\n #print compressedString\n\n for i in range(len(inputString)-1):\n if(inputString[i] == inputString[i+1]):\n count += 1\n else:\n if(count > 1):\n compressedString += str(count)\n\n compressedString += inputString[i+1]\n count = 1\n\n if (count > 1 ):\n compressedString += str(count)\n\n elif(inputString == None):\n compressedString = None\n return compressedString\n elif(inputString == \"\"):\n compressedString = \"\"\n return compressedString\n\n if(len(compressedString) == len(inputString)):\n return inputString\n else:\n print compressedString\n return compressedString", "title": "" }, { "docid": "ea9010e475b25751bf80832e49a61ffa", "score": "0.5198662", "text": "def _compressKerningPhase1(kerning):\n # create a dict of form {(glyph1, value) : set(glyph2s)}\n compressed = {}\n for (glyph1, glyph2), value in kerning.items():\n k = (glyph1, value)\n if k not in compressed:\n compressed[k] = set()\n compressed[k].add(glyph2)\n return compressed", "title": "" }, { "docid": "a973059d8db35bc280ed2c5b28fde493", "score": "0.51813567", "text": "def encode(buf):\n out = [1] # save one free space for code\n code_idx = 0 # index to be replaced with code\n code = 1 # the next code to generate -- initialy 1\n for i in range(len(buf)):\n if buf[i] == 0:\n # zero will be replaced with a code\n # save previous code\n out[code_idx] = code\n code = 1\n code_idx = len(out)\n # insert place-hoder for next code\n out.append(code)\n else:\n out.append(buf[i])\n code += 1\n if code == 255:\n # save the max-run of 254 non-zero chars\n out[code_idx] = code\n code = 1\n code_idx = len(out)\n out.append(code)\n\n\n # save the last code\n out[code_idx] = code\n\n # terminate its\n out.append(0)\n\n return out", "title": "" }, { "docid": "e5b9151059788316846927cf5a453dda", "score": "0.51800495", "text": "def decode_hamming(bits: list) -> list:\n bits_amount = len(bits)\n index = 1\n wrong_bit_index = 0\n is_fixed = \"\"\n while index < bits_amount:\n i = index\n summ = 0\n while i < bits_amount:\n for j in range(index):\n # we need to check if we haven't exceeded data amount (in case the Hamming code is not full)\n if i < bits_amount:\n summ += bits[i - 1]\n i += 1\n i += index\n if summ % 2 != 0:\n wrong_bit_index += index\n index *= 2\n\n if 0 < wrong_bit_index <= bits_amount:\n wrong_bit_index -= 1\n if bits[wrong_bit_index]:\n bits[wrong_bit_index] = 0\n else:\n bits[wrong_bit_index] = 1\n summ = 0\n # checking the additional parity bit\n for i in range(bits_amount - 1):\n summ += bits[i]\n if summ % 2 == bits[-1]:\n is_fixed = \"F\"\n else:\n return [\"R\"]\n\n index = 1\n new_bits = []\n for i in range(bits_amount - 1):\n if i + 1 == index:\n index *= 2\n else:\n new_bits.append(bits[i])\n if is_fixed:\n new_bits.append(is_fixed)\n return new_bits", "title": "" }, { "docid": "2b2e9ab263cd241cba3ff295c310082d", "score": "0.51631564", "text": "def lz_encoding(t):\n\n n = len(t)\n i = 0\n c = list()\n\n while i < n:\n l, dist = find_the_longest_substring(t, i) # find the longest repeated substring starting at i\n if l == 0: # t[i] appears for the first time\n c.append((0, t[i]))\n i = i + 1\n else: # that is, it does exist a copy with length l > 0\n c.append((dist, l))\n i = i + l\n\n return c", "title": "" }, { "docid": "4be7cba7bfcb7ec56a417f3fe43c2444", "score": "0.5148446", "text": "def hash(self, word):\n res = 0\n for i in range(len(word)):\n res += ord(word[i]) * (RabinKarp.FP**i)\n return res", "title": "" }, { "docid": "e231b811660417a97df036af4a4f6927", "score": "0.5147589", "text": "def generate_uncompressed(tree, text, size):\n the_bits = ''\n uncompress_list = []\n current = tree\n\n for byte in text:\n the_bits += byte_to_bits(byte)\n for bit in the_bits:\n if bit == '0':\n current = current.left\n # keep going down the tree until you reach a leaf\n elif bit == '1':\n current = current.right\n if current.symbol is not None:\n uncompress_list.append(current.symbol)\n if len(uncompress_list) == size:\n # once it equals size, then we're done\n break\n current = tree\n\n return bytes(uncompress_list)", "title": "" }, { "docid": "5dd33050feef66534378c397f924fca6", "score": "0.51413554", "text": "def full_zip(self: object) -> None:\n self.bw_encoder = BWEncoder(self.path)\n self.bw_encoder.encode()\n \n self.huff_encoder = HuffEncoder(self.bw_encoder.bwt_output)\n self.huff_encoder.encode()", "title": "" }, { "docid": "67278a2933652ff942f32327a17863e3", "score": "0.5130303", "text": "def compress(self, inputstring):\n if len(inputstring) % 2 == 1:\n inputstring = \"0\"+inputstring\n outstring = \"\"\n y = 0\n for x in inputstring:\n if y == 0:\n z = x\n y = 1\n elif y == 1:\n z += x\n outstring += self.charlist[int(z)]\n y = 0\n return outstring", "title": "" }, { "docid": "3de202e2a5b22edf1579a13822b8174e", "score": "0.5127153", "text": "def charFreq(text):\n print \"Character Frequency\"\n print \"------ ---------\"\n textcount = sortFreqs(getFreqs(makeDict(text)[0], makeDict(text)[1])) \n makeHisto(textcount)", "title": "" }, { "docid": "13000ded614d71beeae027bffb181872", "score": "0.51259714", "text": "def _calc_frequencies(self):\r\n self.symbols = {}\r\n for symbol in self.list:\r\n self.symbols[symbol] = self.symbols.get(symbol, 0) + 1", "title": "" }, { "docid": "c2c1cac5722b9ff1b55d86283e6adc78", "score": "0.5121479", "text": "def one_hot_encoding(data):\n encoded_data = []\n value = list(set(data))\n value_cnt = len(value)\n for i in range(len(data)):\n data_tmp = np.zeros(value_cnt)\n for j in range(value_cnt):\n if data[i] == value[j]:\n data_tmp[j] = 1\n encoded_data.append(data_tmp)\n continue\n return encoded_data", "title": "" }, { "docid": "198fd4c7a1dd18f63571ffdc8dfb2e3a", "score": "0.51138365", "text": "def _compressbits(bitvector, wordsize=32):\n ans = []\n for start in range(0, len(bitvector), wordsize):\n compressed = 0\n for i in range(wordsize):\n if i + start < len(bitvector) and bitvector[i + start]:\n compressed += 2**i\n ans.append(compressed)\n\n return ans", "title": "" }, { "docid": "6d190ec964b6cfdc37891bfa241d18b1", "score": "0.5112729", "text": "def compress (inf, outf=None, scheme='Crack'):\n if outf == None: # read from list, return list\n if schemes[scheme]['header'] != \"\":\n outf = [(schemes[scheme]['header'])]\n else:\n outf = []\n prev = \"\"\n for word in inf:\n num = 0\n while num < len(prev) and \\\n num < len(word) and \\\n prev[num] == word[num] and \\\n num < len(schemes[scheme]['numarr']):\n num += 1\n outf.append(schemes[scheme]['numarr'][num] + word[num:])\n prev = word\n return outf\n else: # read from file, write to file\n if schemes[scheme]['header'] != \"\":\n print >>outf, schemes[scheme]['header']\n prev = \"\"\n for word in inf:\n num = 0\n while num < len(prev) and \\\n num < len(word) and \\\n prev[num] == word[num] and \\\n num < len(schemes[scheme]['numarr']):\n num += 1\n outf.write(schemes[scheme]['numarr'][num] + word[num:])\n prev = word\n return None", "title": "" }, { "docid": "8910bf793619a03680ad027835e07694", "score": "0.5100882", "text": "def generateCorrectionsTable():\n\tcorrections = [-1] * (1<<11)\n\t\n\tcorrections[0] = 0\n\tfor weight in range(1, 4): \n\t\tfor t in util.counterUtils.SubsetIterator(range(23), weight): \n\t\t\ttint = reduce(lambda x, y: x+y, map(lambda x: 1<<x, t))\n\t\t\tcorrections[getSyndrome(tint)] = tint\n\treturn corrections", "title": "" }, { "docid": "a702a9e63632c10959cb543fff0b55c4", "score": "0.5096264", "text": "def assign_huffman_in():\n huffmandatastream.runlength.next = rle_outputs.runlength\n huffmandatastream.vli_size.next = rle_outputs.size\n huffmandatastream.vli.next = rle_outputs.amplitude\n huffmandatastream.data_valid.next = rle_outputs.read_enable\n rle_fifo_empty.next = rle_outputs.fifo_empty\n rle_outputs.buffer_sel.next = huffmandatastream.buffer_sel", "title": "" }, { "docid": "c828e69dbebba2205599eda7f0a93bbc", "score": "0.5081681", "text": "def encoding(text, bit_length):\n bit_len = int((bit_length / 8) - 11)\n bit_text = bin(str2num(text)).replace('0b', '0')\n bit_text_list = text_split(bit_text, bit_len * 8)\n bit_text_list = [int(i, 2) for i in bit_text_list]\n return bit_text_list", "title": "" }, { "docid": "8464040943cb7ef4736c59bc3adcdba3", "score": "0.50793535", "text": "def encode(document):\n encoded = [(len(list(run)), symbol) for symbol, run in groupby(document)]\n return encoded", "title": "" }, { "docid": "adaf2bb9dd74317ce6dfb42ba3dafe7d", "score": "0.50326353", "text": "def encode(self, data):\n if type(self.root) is Leaf: # special case of degenerate tree\n return '0' * len(data)\n code = []\n for sym in data:\n code += self.__encode_symbol(sym)\n return ''.join(code)", "title": "" }, { "docid": "6f6f8530ee223866f19ebec56c8a05d1", "score": "0.5031904", "text": "def _compressKerningPhase2(kerning):\n # create a dict of form {(glyph2s, value) : set(glyph1s)}\n compressed = {}\n for (glyph1, value), glyph2List in kerning.items():\n k = (tuple(sorted(glyph2List)), value)\n if k not in compressed:\n compressed[k] = set()\n compressed[k].add(glyph1)\n return compressed", "title": "" }, { "docid": "d7112ab1c717858e0f3a61705518ab96", "score": "0.5005971", "text": "def gen_codewords(n, q):\n orig = string.ascii_uppercase[:n]\n perms = [''.join(p) for p in permutations(orig)]\n mainOut = []\n\n for perm in perms:\n subOut = ['0' * n]\n for support in q.split():\n newE = str(perm)\n for letter in support:\n newE = str.replace(newE, letter, '1')\n newE = ''.join('0' if i.isalpha() else i for i in newE)\n subOut.append(newE)\n #we cannot cast to a set later without making frozensets\n mainOut.append(frozenset(subOut))\n \n #this step removes any duplicates\n mainOut = set(mainOut)\n \n #and we return in an easier to manage output\n return [sorted(tuple(e)) for e in mainOut]", "title": "" }, { "docid": "1969eee889e558214e74d27b8edcb7e3", "score": "0.50021356", "text": "def counter_huff():\n\n if huffmancntrl.start:\n enable_huff_read.next = True\n rle_outputs.read_enable.next = True\n\n else:\n rle_outputs.read_enable.next = False\n\n if enable_huff_read:\n enable_huff_read_s.next = True\n enable_huff_read.next = False\n\n if enable_huff_read_s:\n if not rle_outputs.fifo_empty:\n counter.next = counter + 1\n else:\n counter.next = 0\n rle_outputs.read_enable.next = False\n\n if counter >= value:\n rle_outputs.read_enable.next = not rle_outputs.read_enable\n counter.next = 0\n if huffmancntrl.ready or rle_outputs.fifo_empty:\n rle_outputs.read_enable.next = False\n enable_huff_read_s.next = False\n\n if huffmancntrl.ready:\n rle_outputs.read_enable.next = False\n enable_huff_read_s.next = False\n counter.next = 0", "title": "" }, { "docid": "196076cdcc2d2d620199ebd0f890d177", "score": "0.49987584", "text": "def char_freq(filename):\n #Can once again use the .get function\n #This time .get(x,0) will look through the dictionary for x and return \n #0 if x is not found. \n #Then add 1 so it will put a 1 in our result dictionary if x is found\n \n ## Prof G - Interesting, this counts characters, white space, and CR's.\n ## Prof G - Did you intend to do that. If so, for non-printable \n ## Prof G - characters like CR, you might want to print the hex code\n ## Prof G - associated with the character\n\n file = open(filename) #opens the file\n\n text = file.read() #reads the file and splits it into lines \n file.close() \n \n result = {} #sets our result as an empty dictionary.\n #the function will fill\n \n for char in text: #iterate through each character in our text\n result[char] = result.get(char, 0) + 1 #\n keylist = list(result.keys()) #retrieved dictionary keys as a list and\n keylist.sort() #sorted it \n for k in keylist :\n print(k + ': ' + str(result[k]) + '\\n') #prints the results in \n #individual lines", "title": "" }, { "docid": "7440f06d1394e7b82b1ae32e4c04e5db", "score": "0.4998099", "text": "def encode(n, length=None, check_digit=False):\r\n n = abs(n) # positive integers only\r\n if length:\r\n length = int(length)\r\n if int(n) > 32**length:\r\n raise OverflowError('%d bigger than 32**%d' % (n, length))\r\n else:\r\n length = int(math.ceil(math.log(int(n), 32)))\r\n # Map binary string to base 32\r\n padded_bin = bin(n)[2:].rjust(5*length,'0')\r\n code = ''.join([ FW_MAP[padded_bin[i*5:i*5+5]] for i in range(0, length) ])\r\n return code + digit(code) if check_digit else code", "title": "" }, { "docid": "734e145ee7826ffce868f5fdd3ac753b", "score": "0.4994799", "text": "def one_hot_encode(x):\n\n unique_vals = set(x.ravel())\n n_cols = len(unique_vals) - 1 \n ohe_x = np.zeros((x.shape[0],n_cols))\n for (row,col) in enumerate(x):\n if col < n_cols:\n ohe_x[int(row),int(col)] = 1\n return ohe_x", "title": "" }, { "docid": "c849f81f4378e9868daf0401dd2a5368", "score": "0.49744153", "text": "def onehot_encoder(seqs, vocab_dict):\n \n onehot_seqs = [] \n\n for seq in seqs:\n onehot_seq = []\n \n integer_encoded = [vocab_dict[char] for char in seq]\n \n for value in integer_encoded:\n onehot_vec = [0 for _ in range(len(vocab_dict.keys()))]\n onehot_vec[value - 1] = 1\n onehot_seq.append(onehot_vec)\n \n onehot_seqs.append(onehot_seq)\n\n return onehot_seqs", "title": "" }, { "docid": "2ec77f7797490658b568b13c158e4cbe", "score": "0.4972765", "text": "def encode_onehot():\n return sklearn.preprocessing.OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore')", "title": "" }, { "docid": "410d0f1f672b8f3d1148af10f1953394", "score": "0.49677393", "text": "def encode(self, x):\n h1 = F.relu(self.fc1(x))\n h2 = F.relu(self.fc21(h1))\n return self.fc211(h2), self.fc221(h2)", "title": "" }, { "docid": "1b929dc8b47021998c0b2efa1c2aee01", "score": "0.4967482", "text": "def compress(self, tokens, rate=0.5, return_lemma=False):\n tree = collections.defaultdict(lambda: [])\n token_dict = {}\n CCs = []\n conjs = []\n for token in tokens:\n if token['rel'] == 'punct': # Remove punctuation\n continue\n if token['rel'] == 'cc': # Remember CCs\n CCs.append(token)\n if token['rel'] == 'conj': # Remember conjunctions\n conjs.append(token)\n tree[token['head']].append(token)\n token_dict[token['idx']] = token\n #\n for token in conjs:\n for sibling in tree[token['head']]:\n if sibling['rel'] == 'cc':\n # Add link from cc to conj\n tree[sibling['idx']].append(token)\n #\n paths = self.traverse(tree)\n #dict([(x['idx'], x) for x in functools.reduce(lambda a,b: a+b, tree.values())])\n \"\"\"ranking = sorted([\n ([node for node in tree[path[-2]]\n if node['idx'] == path[-1]\n ][0]['tfidf'],\n path)\n for path in paths\n ], reverse=True)\"\"\"\n try:\n ranking = sorted([(token_dict[path[-1]]['tfidf'], path) for path in paths], reverse=True)\n except KeyError:\n return \"\" # Return empty line for empty path set (failed compression)\n output = set()\n assert rate > 0 and rate < 1\n last_token = None\n for _, path in ranking:\n if len(output) > len(token_dict)*rate and last_idx != path[-1]:\n break\n last_idx = path[-1]\n for idx in path:\n if idx != 0:\n output.add(idx)\n #\n \"\"\"for cc in CCs:\n siblings = tree[cc['head']]\n i = [node['idx'] for node in siblings].index(cc['idx'])\n left, right = siblings[i-1], siblings[i+1]\n if left['idx'] in output and right['idx'] in output:\n output.add(cc['idx'])\"\"\"\n #\n #return str(round(float(len(output))/len(token_dict)*100))+'%: '+' '.join([token_dict[idx]['token'] for idx in sorted(output)])\n #return ' '.join([token_dict[idx]['token'] for idx in sorted(output)])\n if return_lemma:\n return [token_dict[idx]['lemma'] for idx in sorted(output)]\n else:\n return [token_dict[idx]['token'] for idx in sorted(output)]", "title": "" }, { "docid": "e97186c701d6edd492c7ef2124bd0b23", "score": "0.4966586", "text": "def part1():\r\n used = 0\r\n for i in range(128):\r\n hsh = knot_hash(\"hxtvlmkl-\" + str(i))\r\n row = str(bin(int(hsh, 16))[2:].zfill(128))\r\n row = list(\"0\"*(128-len(row)) + row)\r\n for space in row:\r\n if space == '1':\r\n used += 1\r\n return used", "title": "" }, { "docid": "a794ec910a04516c6ca3a084d90822d7", "score": "0.49572015", "text": "def frequencySort(self, s):\n\n # Create unique (char, frequency) tuples and add to list\n letter_freq = []\n for char in s:\n count = s.count(char)\n if not (char, count) in letter_freq:\n letter_freq.append((char, count))\n\n sorted_s = \"\"\n # Remove most-frequent chars from list, concat to string, until list empty\n while letter_freq:\n curr_max = max([freq for letter, freq in letter_freq])\n char = [letter for letter, freq in letter_freq if freq == curr_max][0]\n sorted_s += char * curr_max\n letter_freq.remove((char, curr_max))\n return sorted_s", "title": "" }, { "docid": "784a5dacc85cc56932a67b802bcfbd50", "score": "0.49552047", "text": "def _convert_freq2code(self):\n def get_CN0code(signal):\n signal = signal.split(\"_\")\n const = signal[0]\n try: \n freq = signal[1]\n except LookupError:\n return np.nan\n #\n if const == \"GPS\":\n if freq == \"L1CA\": return \"Sig1\"\n elif freq == \"L2C\": return \"Sig2\"\n elif freq == \"L5\": return \"Sig3\"\n else: return np.nan\n elif const == \"GAL\":\n if freq == \"L1BC\": return \"Sig1\"\n elif freq == \"E5a\": return \"Sig2\"\n elif freq == \"E5b\": return \"Sig3\"\n else: return np.nan\n #elif const == \"GLO\":\n # if freq == \"L1CA\": return \"Sig1\"\n # elif freq == \"L2C\": return \"Sig2\"\n # else: return np.nan\n #elif const == \"BDS\": \n # if freq == \"B1\": return \"Sig1\"\n # elif freq == \"B2\": return \"Sig2\"\n # elif freq == \"B3\": return \"Sig3\"\n # else: return np.nan\n elif const == \"GEO\": # SBAS\n if freq == \"L1\" or freq == \"L1CA\": return \"Sig1\"\n elif freq == \"L5\": return \"Sig2\"\n else: return np.nan\n #elif const == \"QZS\": # const name might change, verify!\n # if freq == \"L1CA\": return \"Sig1\"\n # elif freq == \"L2C\": return \"Sig2\"\n # elif freq == \"L5\": return \"Sig3\"\n # else: return np.nan\n #elif const == \"IRN\": # const name might change, verify!\n # if freq == \"B1\": return \"Sig1\"\n # else: return np.nan\n else: return np.nan\n \n self.df[\"Signal\"] = self.df[\"Signal\"].astype(str).apply(get_CN0code)\n # Drop nan values in \"Signal\" column\n self.df.dropna(subset=[\"Signal\"], inplace=True) \n\n return self.df", "title": "" }, { "docid": "c592c517fcb20f1ffb7674fdf77d4aff", "score": "0.49510717", "text": "def frequencySort(self, s):\n # Create a counter\n counter = Counter(s)\n heap = []\n new_string = ''\n\n # Create a min-heap\n for key, val in counter.items():\n heapq.heappush(heap, (val, key))\n\n # Pop an element from the heap, until it gets empty\n while heap:\n count, char = heapq.heappop(heap)\n\n # Repeat the char N times\n while count:\n new_string += char\n count -= 1\n\n # Flip the string\n return new_string[::-1]", "title": "" }, { "docid": "fb32f23fb268b749625c013c16b527ac", "score": "0.49270913", "text": "def _apply_compress_scheme(decompressed_input_bin) -> tuple:\n return apply_lz4_compression(decompressed_input_bin)", "title": "" }, { "docid": "d8f39097ec31fc9c15a1a35f25fc749c", "score": "0.4921862", "text": "def PREFIX_CB(self, bts):\r\n time = 8\r\n bins = bin(int(bts,16))[2:]\r\n bins = \"0\"*(8-len(bins)) + bins\r\n type_bits = bins[0:2]\r\n \r\n level_bits = bins[2:5]\r\n bit = int(level_bits, 2)\r\n \r\n reg_bits = bins[5:8]\r\n register = REG_TABLE[reg_bits]\r\n\r\n # CB Table\r\n\r\n if type_bits == \"00\": # Rotations\r\n pass\r\n\r\n if type_bits == \"01\": # Test bits\r\n if register == \"hl\":\r\n time = 16\r\n val = self.mem[self.reg.hl]\r\n else:\r\n val = self.reg.get_register(register)\r\n self.reg.test(val,bit)\r\n\r\n if type_bits == \"10\": # Resets\r\n if register == \"hl\":\r\n time = 16\r\n bits = bin(self.mem[self.reg.hl])[2:]\r\n bits = bits[:bit] + \"0\" + bits[bit:]\r\n self.mem[self.reg.hl] = int(bits,2)\r\n else:\r\n bits = bin(self.reg.get_register(register))[2:]\r\n bits = bits[:bit] + \"0\" + bits[bit:]\r\n self.reg.set_register(register, int(bits,2))\r\n\r\n if type_bits == \"11\": # Sets\r\n if register == \"hl\":\r\n time = 16\r\n bits = bin(self.mem[self.reg.hl])[2:]\r\n bits = bits[:bit] + \"1\" + bits[bit:]\r\n self.mem[self.reg.hl] = int(bits,2)\r\n else:\r\n bits = bin(self.reg.get_register(register))[2:]\r\n bits = bits[:bit] + \"1\" + bits[bit:]\r\n self.reg.set_register(register, int(bits,2))\r\n\r\n return time", "title": "" }, { "docid": "dfb284808730d11dbf9ef9c089c653ea", "score": "0.49214345", "text": "def build_tree(self):\n # continue combining nodes together until the list is of size 1\n while len(self.node_list) > 1:\n self.combine_nodes()\n\n # set root of huffman tree equal to the last 'mega' node we created\n # which contains all chars\n self.root = self.node_list[0]", "title": "" }, { "docid": "34d8ebb0b6188513085f26483b9dcbed", "score": "0.4908716", "text": "def bits_str(bits):\n return ' '.join(sorted(list(bits)))", "title": "" } ]
31d033cc82d32e22921df7903354c64b
Method that generates the dataset and performs augmentation in case enabled
[ { "docid": "70c0eb3495fba542eedac61fb1518c1d", "score": "0.0", "text": "def get_dataset(config):\n # dataset statistics: https://discuss.pytorch.org/t/normalization-in-the-mnist-example/457\n data_mean = 0.1307\n data_stddev = 0.3081\n\n transforms = [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize([data_mean], [data_stddev])]\n \n # TRAIN transforms\n train_transforms = transforms + config['augmentation']*[torchvision.transforms.RandomCrop(14, padding=4), \n torchvision.transforms.RandomHorizontalFlip()]\n transform_train = torchvision.transforms.Compose(train_transforms)\n\n # TEST transforms\n transform_test = torchvision.transforms.Compose(transforms)\n\n training_set = config['dataset']('./data/mnist/', train=True, transform=transform_train, nb=1000)\n test_set = config['dataset']('./data/mnist/', train=False, transform=transform_test, nb=1000)\n\n if config['verbose']:\n print(training_set)\n print(test_set)\n\n # create training generator\n training_loader = torch.utils.data.DataLoader(\n training_set,\n batch_size=config['batch_size'],\n shuffle=True,\n num_workers=2\n )\n # create test generator\n test_loader = torch.utils.data.DataLoader(\n test_set,\n batch_size=config['batch_size'],\n shuffle=False,\n num_workers=2\n )\n\n return training_loader, test_loader", "title": "" } ]
[ { "docid": "07a991037dd21221b0577b5aaca8428f", "score": "0.6980123", "text": "def test_data_augmentation():\n data = bz2.BZ2File('src/tests/dataprocessing/affectNet_sample.pbz2', 'rb')\n df = cPickle.load(data)\n\n x, y = affect.preprocess_data(df)\n x, y = affect.clean_data_and_normalize(x, y)\n x_train = split_data(x, y)[0]\n datagen = data_augmentation(x_train)\n\n assert type(datagen) == ImageDataGenerator", "title": "" }, { "docid": "b322236ee772f0253d4cffefc02bcd27", "score": "0.69348776", "text": "def augmentate(dataset_basedir, augment_type):\n # specify type of file to be augmented\n ext = \".wav\"\n # set up training directory\n train_dir = dataset_basedir + \"train/\"\n # set up augmentation directory\n augment_type_dir = train_dir + \"augmented_data/\" + augment_type + \"/\"\n # ensure that directory for augmented data exists. If not, create the directory\n if not os.path.isdir(train_dir + \"augmented_data/\"):\n os.mkdir(train_dir + \"augmented_data/\")\n # if the augmented data has already been generated, exit\n if os.path.isdir(augment_type_dir):\n return\n # create directory where augmented data will be stored\n os.mkdir(augment_type_dir)\n # file_id will be used to create unique file names\n file_id = 0\n # loop through original training set\n for root, dirs, files in os.walk(train_dir):\n # do not augment data that is already augmented\n if \"augmented_data\" in root:\n continue\n # for each file in original training set\n for f in files:\n _, file_ext = os.path.splitext(f)\n file_ext.lower()\n # if file is a wav file\n if file_ext == ext:\n # load the wav file\n wav, sample_rate = librosa.load(os.path.join(root, f), sr=None)\n # do noise addition\n if augment_type == \"add_noise\":\n wav = add_noise(wav)\n # else do time stretching\n elif augment_type == \"stretch_time\":\n wav = stretch_time(wav)\n # or else do pitch shifting\n elif augment_type == \"shift_pitch\":\n wav = shift_pitch(wav, sample_rate)\n # output the new wav file generated\n new_f = f[0:2] + str(file_id) + ext\n sf.write(os.path.join(augment_type_dir, new_f), wav, sample_rate)\n file_id += 1\n return", "title": "" }, { "docid": "78cc4c37aef32c95e2fcf730b26236d1", "score": "0.66568416", "text": "def do_augmentation(data):\n\n print \"Augmenting data...\"\n n_tracks = len(set(data['track_id']))\n samples_per_track = data['track_id'].count(0)\n for track_id in range(n_tracks):\n # randomly choose three segments for each track\n for random_choice in np.random.choice(range(samples_per_track), 3):\n segment_index = random_choice + track_id * samples_per_track\n # Apply each time-stretch to the segment\n stretched_segments = [time_stretch(data['data'][segment_index], factor)\n for factor in [0.2, 0.5, 1.2, 1.5]]\n # Then apply pitch-shift to all of those\n augmentations = [pitch_shift(segment, factor)\n for segment in stretched_segments\n for factor in [-5, -2, 2, 5]]\n\n # Append the extra samples to the training set\n data['data'] += augmentations\n data['track_id'] += [data['track_id'][segment_index]] * len(augmentations)\n data['labels'] += [data['labels'][segment_index]] * len(augmentations)\n print \" done.\"", "title": "" }, { "docid": "b5f75e8dfde91fce7c949fc6d593e3ab", "score": "0.6601623", "text": "def augment(self, seqs):\n # reset data lists\n self.A_raw_data = []\n self.A_Lx = []\n self.A_Ly = []\n self.A_Rx = []\n self.A_Ry = []\n self.A_Bx = []\n self.A_By = []\n self.A_border_x = []\n self.A_border_y = []\n # append original non augmented data\n self.A_raw_data.append(self.raw_data)\n self.A_Lx.append(self.L['Lx'])\n self.A_Ly.append(self.L['Ly'])\n self.A_Rx.append(self.R['Rx'])\n self.A_Ry.append(self.R['Ry'])\n self.A_Bx.append(self.B['Bx'])\n self.A_By.append(self.B['By'])\n self.A_border_x.append([0, 0, len(self.raw_data[:, 0]), len(self.raw_data[:, 0])])\n self.A_border_y.append([0, len(self.raw_data[0, :]), len(self.raw_data[0, :]), 0])\n\n if self.perform_augment:\n # embed data in a larger numpy array filled with zeros\n (x_dim, y_dim) = np.shape(self.raw_data)\n\n embd_raw = np.zeros((x_dim*3, y_dim*3))\n embd_raw[x_dim:2*x_dim, y_dim:2*y_dim] = np.array(self.raw_data)\n # iterate through augmentation sequences\n seq_len = len(seqs)\n\n # prevent adding to the same list over and over again\n end_seqs = seqs.copy()\n if self.swap_axes:\n swap = iaa.Sequential(\n [\n iaa.Affine(rotate=(90, 90)),\n iaa.Flipud(1),\n ])\n # only swap and do nothing else\n end_seqs.append(swap)\n\n # add the swaped augmentations as well\n for k in range(seq_len):\n swap_seq = iaa.Sequential(\n [\n swap,\n seqs[k]\n ])\n end_seqs.append(swap_seq)\n\n for k, seq in enumerate(end_seqs):\n seq_det = seq.to_deterministic()\n A_Lx_i = []\n A_Ly_i = []\n A_Rx_i = []\n A_Ry_i = []\n A_Bx_i = []\n A_By_i = []\n\n self.A_raw_data.append(seq_det.augment_image(embd_raw))\n\n # augment Left transition\n for keypoints in self.Keypoints_L:\n aug_keypoints = seq_det.augment_keypoints(ia.KeypointsOnImage(keypoints, shape=np.shape(embd_raw)))\n coords_array = aug_keypoints.to_xy_array()\n A_Lx_i.append(coords_array[:, 0])\n A_Ly_i.append(coords_array[:, 1])\n\n # augment right transition\n for keypoints in self.Keypoints_R:\n aug_keypoints = seq_det.augment_keypoints(ia.KeypointsOnImage(keypoints, shape=np.shape(embd_raw)))\n coords_array = aug_keypoints.to_xy_array()\n A_Rx_i.append(coords_array[:, 0])\n A_Ry_i.append(coords_array[:, 1])\n\n # augment Left transition\n for keypoints in self.Keypoints_B:\n aug_keypoints = seq_det.augment_keypoints(ia.KeypointsOnImage(keypoints, shape=np.shape(embd_raw)))\n coords_array = aug_keypoints.to_xy_array()\n A_Bx_i.append(coords_array[:, 0])\n A_By_i.append(coords_array[:, 1])\n # transform borders as well\n border_key = []\n cords = [(x_dim, y_dim),\n (x_dim, 2*y_dim-1),\n (2*x_dim-1, 2*y_dim-1),\n (2*x_dim-1, y_dim)]\n\n for (x, y) in cords:\n border_key.append(ia.Keypoint(x=x, y=y))\n corner_aug = seq_det.augment_keypoints(\n ia.KeypointsOnImage(border_key, shape=np.shape(embd_raw))\n ).to_xy_array()\n\n # Write the augmented points into list\n\n self.A_Bx.append(A_Bx_i)\n self.A_By.append(A_By_i)\n\n # make sure that for swapped images the left & right transitions are changed as well\n if k < seq_len:\n self.A_border_x.append(corner_aug[:, 0])\n self.A_border_y.append(corner_aug[:, 1])\n self.A_Lx.append(A_Lx_i)\n self.A_Ly.append(A_Ly_i)\n self.A_Rx.append(A_Rx_i)\n self.A_Ry.append(A_Ry_i)\n\n # change left & right transitions\n else:\n # change order of points (somehow the obvious A_Rx_i[::-1]\n # does not work\n A_Rx_i_rev = []\n for el in A_Rx_i:\n A_Rx_i_rev.append(el[::-1])\n A_Ry_i_rev = []\n for el in A_Ry_i:\n A_Ry_i_rev.append(el[::-1])\n A_Ly_i_rev = []\n for el in A_Ly_i:\n A_Ly_i_rev.append(el[::-1])\n A_Lx_i_rev = []\n for el in A_Lx_i:\n A_Lx_i_rev.append(el[::-1])\n\n rolled_corner = np.roll(corner_aug, 1, axis=0)\n self.A_border_x.append(rolled_corner[::-1, 0])\n self.A_border_y.append(rolled_corner[::-1, 1])\n self.A_Lx.append(A_Rx_i_rev)\n self.A_Ly.append(A_Ry_i_rev)\n self.A_Rx.append(A_Lx_i_rev)\n self.A_Ry.append(A_Ly_i_rev)", "title": "" }, { "docid": "10abf34d7bf6cf465d4c9a9072ab77d0", "score": "0.65915203", "text": "def augment_data(self, images_path: str, train:float = 0.7, image_format:str = \".png\"):\r\n count = 0\r\n # dir = 'train_data'\r\n\r\n dir_image_train = 'train_data/images/train'\r\n dir_image_val = 'train_data/images/val'\r\n dir_label_train = 'train_data/labels/train'\r\n dir_label_val = 'train_data/labels/val'\r\n\r\n folder_err = self.create_train_data_folder()\r\n if folder_err:\r\n print('Please remove existing trin_data folder and try again.')\r\n return 1\r\n\r\n # image_format = \".png\"\r\n # train = 0.7\r\n test = 1 - train\r\n files_to_process = sorted(os.listdir(images_path))\r\n print('Num of files: ' + str(len(files_to_process)))\r\n # udelat sort souboru!!!\r\n\r\n for filename in files_to_process:\r\n print('Processing: ' + filename)\r\n\r\n if filename.endswith(image_format.lower()) or filename.endswith(image_format.upper()):\r\n title, ext = os.path.splitext(os.path.basename(filename))\r\n print(images_path)\r\n print(filename)\r\n image = self.read_image(images_path, filename)\r\n if filename.endswith(\".txt\"):\r\n xmlTitle, txtExt = os.path.splitext(os.path.basename(filename))\r\n if xmlTitle == title:\r\n # bboxes = getCoordinates(filename)\r\n bboxes = self.read_yolo(images_path + xmlTitle + '.txt')\r\n print(images_path + xmlTitle + '.txt')\r\n for aug in self.augment:\r\n img = copy.deepcopy(image)\r\n transform, name_tag = self.aug_functions[aug]()\r\n dice = random.uniform(0, 1)\r\n try:\r\n transformed = transform(image=img, bboxes=bboxes)\r\n transformed_image = transformed['image']\r\n transformed_bboxes = transformed['bboxes']\r\n name = title + '_' + str(count) + '_' + name_tag + '.jpg'\r\n # print(name)\r\n if dice <= train:\r\n p_name = '/content/' + dir_image_train + '/' + name\r\n else:\r\n p_name = '/content/' + dir_image_val + '/' + name\r\n\r\n cv2.imwrite(p_name, transformed_image)\r\n print('Writing ' + name)\r\n # print(transformed_bboxes)\r\n # writeVoc(transformed_bboxes, count, transformed_image)\r\n # pTitle='/content/'+dir+'/'+title\r\n if dice <= train:\r\n p_title = '/content/' + dir_label_train + '/' + title + '_' + str(count) + '_' + name_tag\r\n else:\r\n p_title = '/content/' + dir_label_val + '/' + title + '_' + str(count) + '_' + name_tag\r\n\r\n self.write_yolo(transformed_bboxes, p_title)\r\n count = count + 1\r\n except:\r\n print(\"Bounding Box exception!!!\")\r\n pass", "title": "" }, { "docid": "7b55f51265dc67791388d0a4384bfa94", "score": "0.6588373", "text": "def augment_transition_data(train=False, evaluate=False):\n print('Starting data processing ...')\n # define augmentation transformations\n rot = iaa.Sequential(\n [\n iaa.Affine(rotate=(-8, 8)),\n ])\n\n inc_capacities = iaa.Sequential(\n [\n iaa.Affine(rotate=(45, 45)),\n iaa.Affine(scale={\"x\": (0.6, 1.7), \"y\": (0.6, 1.7)}), # before (0.7, 1.5)\n iaa.Affine(rotate=(-45, -45))\n ])\n\n scale = iaa.Sequential(\n [\n iaa.Affine(scale={\"x\": (0.6, 1.7), \"y\": (0.6, 1.7)}), # before (0.65, 1.4)\n ]\n )\n\n # put the above transformations together and apply between 1 to all of them\n seq = iaa.SomeOf((1, None),\n [\n rot,\n inc_capacities,\n scale\n ]\n )\n\n # for swap_axes=True twice as many augmentation sequences will be performed\n # i.e. the same with swaped axes as well\n augmentation_sequence = [seq] * 8\n\n # Augment training data\n if train:\n # set paths, folders need to be created manually\n train_input_folder = '../data/fine/train/marked/'\n train_output_folder = '../data/fine/train/augmented/'\n\n A_train = augmenter.Augmenter(train_input_folder, train_output_folder,\n perform_augment=True, swap_axes=True)\n then = time.time()\n # perform augmentation\n while A_train.next_file():\n A_train.define_keypoints()\n A_train.augment(augmentation_sequence)\n A_train.save_maps()\n print('Augmenting training data took: {}s'.format(time.time() - then))\n\n # Augment evaluation data\n if evaluate:\n # set paths, folders need to be created manually\n evaluation_input_folder = '../data/fine/evaluation/marked/'\n evaluation_output_folder = '../data/fine/evaluation/augmented/'\n # do not actually augment the data, but still process them\n A_evaluation = augmenter.Augmenter(evaluation_input_folder, evaluation_output_folder,\n perform_augment=False, swap_axes=False)\n then = time.time()\n # perform augmentation\n while A_evaluation.next_file():\n A_evaluation.define_keypoints()\n A_evaluation.augment(augmentation_sequence)\n A_evaluation.save_maps()\n print('Augmenting evaluation data took: {}s'.format(time.time() - then))", "title": "" }, { "docid": "aeb219c122f913b374e51cc9d7c8c42d", "score": "0.64621013", "text": "def augment_data(paths, subsample = False, augment_x = 1, configuration = None,\n\t\t\t\t write_augmented = False,\n\t\t\t\t write_data = False, feature_extraction = 'mfcc',\n\t\t\t\t strategy = 'powers', exercise = 'regression'):\n\tconfig_path = None\n\tif not configuration:\n\t\tconfig_path = paths['primary_config']\n\t\tprint ('Using primary configuration to augment data')\n\telse:\n\t\tconfig_path = configuration\n\n\tconfig = yaml.load(open(config_path, 'r'))\n\n\taugmentation_config = config['DataAugmentation']\n\taugmentation_config['exercise'] = exercise\n\taugmentation_config['feature_kind'] = feature_extraction\n\n\tvalid, parsed_effects, error_message = validate_reduce_fx(augmentation_config)\n\tif not valid:\n\t\tprint (error_message)\n\t\tprint ('Please alter your configuration')\n\t\tsys.exit('Stopping..')\n\n\taugmentation_config['effects'] = parsed_effects\n\n\tprint ('----- ACTIVE EFFECTS -----')\n\tpprint (augmentation_config['active'])\n\t# augmentation pipline doesn't need this\n\taugmentation_config.pop('active')\n\n\tprint ('----- PERSISTING EFFECTS -----')\n\tpprint (augmentation_config['sustain'])\n\n\tmodels_trained, models_holdout = (config['pipeline_config'].get(x) \\\n\t\t\t\t\t\t\t\t\tfor x in ['train_models', 'test_models'])\n\tall_models = models_trained + [models_holdout]\n\n\tprint ('----- GUITAR MODELS FOR TRAINING -----')\n\tprint ('\\n'.join('{} {}'.format(*v) for v in enumerate(models_trained)))\n\tprint ('----- MODELS FOR HOLDOUT -----')\n\tprint ('\\n'.join('{} {}'.format(*v) for v in enumerate([models_holdout])))\n\n\tchord_paths = paths.get('chord_paths')\n\taugmented_paths = paths.get('augmentation_paths')\n\n\tdef path_by_strat(paths_):\n\t\treturn list(filter(lambda x: x.meta == strategy,paths_))[0]\n\n\tsoundfile_path = path_by_strat(chord_paths)\n\tsoundfile_path = soundfile_path.trace + soundfile_path.extension\n\tdestination = path_by_strat(paths_ = augmented_paths)\n\tuse_soundfiles = glob.glob(soundfile_path)\n\n\tif subsample not in [False, None]:\n\t\tprint ('Subsampling {} files from {} available'.format(subsample, len(use_soundfiles)))\n\t\tuse_soundfiles = random.choices(population = use_soundfiles, k = subsample)\n\n\tif write_augmented:\n\t\tprint ('Are you sure you want to augmented {} tracks to \"{}\"?'.format(\n\t\t\t\tlen(use_soundfiles) * augment_x, destination.trace))\n\n\t\tprint ('1 to proceed, any other key to terminate')\n\t\tif int(input()) == 1:\n\n\t\t\t# Build necessary files\n\t\t\tfor model in all_models:\n\t\t\t\tmodel_paths = os.path.join(destination.trace, model, '')\n\t\t\t\tos.makedirs(model_paths, exist_ok = True)\n\t\t\taugmentation_config['write'] = destination.trace\n\n\t\telse:\n\t\t\taugmentation_config['write'] = False\n\n\tmodel_pattern = re.compile('|'.join(all_models))\n\n\tstore_all = []\n\n\tprint ('\\nAUGMENTING DATA, please be patient..')\n\n\twith progressbar.ProgressBar(maxval=(len(use_soundfiles) * augment_x)) as bar:\n\t\ttotal = 0\n\t\tfor ix, sf in enumerate(use_soundfiles):\n\t\t\tfor i in range(augment_x):\n\t\t\t\tstore_all.append(augment_track(sf,\n\t\t\t\t\t\t\t\t\tmodel_selection = model_pattern,\n\t\t\t\t\t\t\t\t\tdestination = destination,\n\t\t\t\t\t\t\t\t\tn = i,\n\t\t\t\t\t\t\t\t\t**augmentation_config\n\t\t\t\t\t\t\t\t\t))\n\t\t\t\ttotal += 1\n\t\t\t\tbar.update(total)\n\n\tlabels, features = zip(*store_all)\n\tall_features = [np.expand_dims(x, axis = 1) for x in features]\n\n\tfeatures = pad(all_features)\n\tlabels = pd.DataFrame(list(labels))\n\n\tif write_data is True:\n\t\tresults_dir = paths.get('reports_path')\n\t\twrite_dir = os.path.join(results_dir(), '')\n\t\tnp.save(write_dir + 'training_X_' + strategy, arr=features)\n\t\tlabels.to_csv(open(write_dir + 'training_Y_' + strategy + '.csv', 'w'))\n\t\tprint ('Features and labels written to \"{}\"'.format(write_dir))\n\n\treturn features, labels", "title": "" }, { "docid": "45bdd8a9e6d8aa0158afa60d0e3f0a2c", "score": "0.6459652", "text": "def custom_dataset():", "title": "" }, { "docid": "17bad98494e372836189916d72f5b9a5", "score": "0.6436107", "text": "def augment_reference_data(train=False, evaluate=False):\n print('Starting data processing ...')\n\n # define augmentation transformations\n rot = iaa.Sequential(\n [\n iaa.Affine(rotate=(-8, 8)),\n ])\n\n inc_capacities = iaa.Sequential(\n [\n iaa.Affine(rotate=(45, 45)),\n iaa.Affine(scale={\"x\": (0.6, 1.7), \"y\": (0.6, 1.7)}), # before (0.7, 1.5)\n iaa.Affine(rotate=(-45, -45))\n ])\n\n scale = iaa.Sequential(\n [\n iaa.Affine(scale={\"x\": (0.6, 1.7), \"y\": (0.6, 1.7)}), # before (0.65, 1.4)\n ]\n )\n\n # put the above transformations together and apply between 1 to all of them\n seq = iaa.SomeOf((1, None),\n [\n rot,\n inc_capacities,\n scale\n ]\n )\n\n # for swap_axes=True twice as many augmentation sequences will be performed\n # i.e. the same with swaped axes as well\n augmentation_sequence = [seq] * 8\n\n # Augment training data\n if train:\n # set paths, folders need to be created manually\n train_input_folder = '../../data/coarse/train/marked/'\n train_output_folder = '../../data/coarse/train/augmented/'\n\n A_train = augmenter.Augmenter(train_input_folder, train_output_folder,\n perform_augment=True, swap_axes=True)\n then = time.time()\n # perform augmentation\n while A_train.next_file():\n A_train.define_keypoints()\n A_train.augment(augmentation_sequence)\n A_train.save_maps()\n print('Augmenting training data took: {}s'.format(time.time() - then))\n\n # Augment evaluation data\n if evaluate:\n # set paths, folders need to be created manually\n evaluation_input_folder = '../../data/coarse/evaluation/marked/'\n evaluation_output_folder = '../../data/coarse/evaluation/augmented/'\n # do not actually augment the data, but still process them\n A_evaluation = augmenter.Augmenter(evaluation_input_folder, evaluation_output_folder,\n perform_augment=False, swap_axes=False)\n then = time.time()\n # perform augmentation\n while A_evaluation.next_file():\n A_evaluation.define_keypoints()\n A_evaluation.augment(augmentation_sequence)\n A_evaluation.save_maps()\n print('Augmenting evaluation data took: {}s'.format(time.time() - then))", "title": "" }, { "docid": "0e78e57b17e7be1047d5fd4b31ef7a17", "score": "0.6390825", "text": "def deepaugment_image_generator(X, y, policy, batch_size=64, augment_chance=0.5):\n if type(policy) == str:\n\n if policy==\"random\":\n policy=[]\n for i in range(20):\n policy.append(\n {\n \"aug1_type\": augment_type_chooser(),\n \"aug1_magnitude\":np.random.rand(),\n \"aug2_type\": augment_type_chooser(),\n \"aug2_magnitude\": np.random.rand(),\n \"portion\":np.random.rand()\n }\n )\n else:\n policy_df = pd.read_csv(policy)\n policy_df = policy_df[\n [\"aug1_type\", \"aug1_magnitude\", \"aug2_type\", \"aug2_magnitude\"]\n ]\n policy = policy_df.to_dict(orient=\"records\")\n\n print(\"Policies are:\")\n print(policy)\n print()\n count = 0\n while True:\n ix = np.arange(len(X))\n np.random.shuffle(ix)\n for i in range(len(X) // batch_size):\n _ix = ix[i * batch_size : (i + 1) * batch_size]\n _X = X[_ix]\n _y = y[_ix]\n\n tiny_batch_size = 4\n aug_X = _X[0:tiny_batch_size]\n aug_y = _y[0:tiny_batch_size]\n for j in range(1, len(_X) // tiny_batch_size):\n tiny_X = _X[j * tiny_batch_size : (j + 1) * tiny_batch_size]\n tiny_y = _y[j * tiny_batch_size : (j + 1) * tiny_batch_size]\n if np.random.rand() <= augment_chance:\n aug_chain = np.random.choice(policy)\n aug_chain[\n \"portion\"\n ] = 1.0 # last element is portion, which we want to be 1\n hyperparams = list(aug_chain.values())\n\n aug_data = augment_by_policy(tiny_X, tiny_y, *hyperparams)\n\n aug_data[\"X_train\"] = apply_default_transformations(\n aug_data[\"X_train\"]\n )\n\n aug_X = np.concatenate([aug_X, aug_data[\"X_train\"]])\n aug_y = np.concatenate([aug_y, aug_data[\"y_train\"]])\n if count %10 == 0:\n plt.imshow(aug_X)\n plt.savefig('num'+count+'.png')\n else:\n aug_X = np.concatenate([aug_X, tiny_X])\n aug_y = np.concatenate([aug_y, tiny_y])\n yield aug_X, aug_y", "title": "" }, { "docid": "8100712d8649f5437cb26849ceadef07", "score": "0.6374343", "text": "def _get_augmented_data(self, type=''):\r\n with tf.name_scope('augment_data'):\r\n def true_fn():\r\n augmented_data = dict(zip(['target_image', 'target_label', 'target_weight'],\r\n random_affine_augment([self.data['target_image'], self.data['target_label'],\r\n self.data['target_weight']],\r\n interp_methods=['linear', 'nearest', 'linear'],\r\n **self.aug_kwargs)))\r\n # augmented_data.update(dict(zip(['atlases_image', 'atlases_label'], random_affine_augment([\r\n # self.data['atlases_image'], self.data['atlases_label']], interp_methods=['linear', 'nearest'],\r\n # **self.aug_kwargs))))\r\n augmented_data.update(dict(zip(['atlases_image', 'atlases_label', 'atlases_weight'],\r\n [self.data['atlases_image'], self.data['atlases_label'],\r\n self.data['atlases_weight']])))\r\n return augmented_data\r\n\r\n return tf.cond(self.train_phase, true_fn, lambda: self.data)", "title": "" }, { "docid": "e5258f70692fd4df33d104b1610563e2", "score": "0.636105", "text": "def augment(self, data, attr, seed=None):\n cfg = self.cfg\n\n if cfg is None:\n return data\n\n # Override RNG for reproducibility with parallel dataloader.\n if seed is not None:\n self.rng = np.random.default_rng(seed)\n\n if 'recenter' in cfg:\n if cfg['recenter']:\n data['point'] = self.recenter(data['point'], cfg['recenter'])\n\n if 'normalize' in cfg:\n data['point'], _ = self.normalize(data['point'], None,\n cfg['normalize'])\n\n if 'rotate' in cfg:\n data['point'] = self.rotate(data['point'], cfg['rotate'])\n\n if 'scale' in cfg:\n data['point'] = self.scale(data['point'], cfg['scale'])\n\n if 'noise' in cfg:\n data['point'] = self.noise(data['point'], cfg['noise'])\n\n if 'ObjectSample' in cfg:\n if not hasattr(self, 'db_boxes_dict'):\n data_path = attr['path']\n # remove tail of path to get root data path\n for _ in range(3):\n data_path = os.path.split(data_path)[0]\n pickle_path = os.path.join(data_path, 'bboxes.pkl')\n if 'pickle_path' not in cfg['ObjectSample']:\n cfg['ObjectSample']['pickle_path'] = pickle_path\n self.load_gt_database(**cfg['ObjectSample'])\n\n data = self.ObjectSample(\n data,\n db_boxes_dict=self.db_boxes_dict,\n sample_dict=cfg['ObjectSample']['sample_dict'])\n\n if cfg.get('ObjectRangeFilter', False):\n data = self.ObjectRangeFilter(\n data, cfg['ObjectRangeFilter']['point_cloud_range'])\n\n if cfg.get('PointShuffle', False):\n data = self.PointShuffle(data)\n\n return data", "title": "" }, { "docid": "a96308fe116986a1d9c4143ed21187df", "score": "0.63507265", "text": "def ds_augment(ds, transform):\n return AugDataset(ds, transform)", "title": "" }, { "docid": "b48d181faae80b01ec3b1703ae81804d", "score": "0.6282673", "text": "def generator(self):\n\n if self.set_split == 'train' and not self.predict:\n data_augmentation = self.params['data_augmentation']\n else:\n data_augmentation = False\n\n it = 0\n while 1:\n if self.set_split == 'train' and it % self.params['num_iterations'] == 0 and \\\n not self.predict and self.params['random_samples'] == -1 and self.params['shuffle']:\n silence = self.dataset.silence\n self.dataset.silence = True\n self.dataset.shuffleTraining()\n self.dataset.silence = silence\n if it % self.params['num_iterations'] == 0 and self.params['random_samples'] == -1:\n self.dataset.resetCounters(set_name=self.set_split)\n it += 1\n\n # Checks if we are finishing processing the data split\n init_sample = (it - 1) * self.params['batch_size']\n final_sample = it * self.params['batch_size']\n batch_size = self.params['batch_size']\n n_samples_split = eval(\"self.dataset.len_\" + self.set_split)\n if final_sample >= n_samples_split:\n final_sample = n_samples_split\n batch_size = final_sample - init_sample\n it = 0\n\n # Recovers a batch of data\n if self.params['random_samples'] > 0:\n num_retrieve = min(self.params['random_samples'], self.params['batch_size'])\n if self.temporally_linked:\n if self.first_idx == -1:\n self.first_idx = np.random.randint(0, n_samples_split - self.params['random_samples'], 1)[0]\n self.next_idx = self.first_idx\n indices = range(self.next_idx, self.next_idx + num_retrieve)\n self.next_idx += num_retrieve\n else:\n indices = np.random.randint(0, n_samples_split, num_retrieve)\n self.params['random_samples'] -= num_retrieve\n\n # At sampling from train/val, we always have Y\n if self.predict:\n X_batch = self.dataset.getX_FromIndices(self.set_split,\n indices,\n normalization=self.params['normalization'],\n meanSubstraction=self.params['mean_substraction'],\n dataAugmentation=data_augmentation)\n data = self.net.prepareData(X_batch, None)[0]\n\n else:\n X_batch, Y_batch = self.dataset.getXY_FromIndices(self.set_split,\n indices,\n normalization=self.params['normalization'],\n meanSubstraction=self.params['mean_substraction'],\n dataAugmentation=data_augmentation)\n data = self.net.prepareData(X_batch, Y_batch)\n\n elif self.init_sample > -1 and self.final_sample > -1:\n indices = range(self.init_sample, self.final_sample)\n if self.predict:\n X_batch = self.dataset.getX_FromIndices(self.set_split,\n indices,\n normalization=self.params['normalization'],\n meanSubstraction=self.params['mean_substraction'],\n dataAugmentation=data_augmentation)\n data = self.net.prepareData(X_batch, None)[0]\n\n else:\n X_batch, Y_batch = self.dataset.getXY_FromIndices(self.set_split,\n indices,\n normalization=self.params['normalization'],\n meanSubstraction=self.params['mean_substraction'],\n dataAugmentation=data_augmentation)\n data = self.net.prepareData(X_batch, Y_batch)\n\n else:\n if self.predict:\n X_batch = self.dataset.getX(self.set_split,\n init_sample,\n final_sample,\n normalization=self.params['normalization'],\n meanSubstraction=self.params['mean_substraction'],\n dataAugmentation=False)\n data = self.net.prepareData(X_batch, None)[0]\n else:\n X_batch, Y_batch = self.dataset.getXY(self.set_split,\n batch_size,\n normalization=self.params['normalization'],\n meanSubstraction=self.params['mean_substraction'],\n dataAugmentation=data_augmentation)\n data = self.net.prepareData(X_batch, Y_batch)\n yield (data)", "title": "" }, { "docid": "db2f3d2441466b0df0eb25c5019b692c", "score": "0.62696373", "text": "def _augment_data_with_id(_id, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_TRAIN_DATA, augment_times=5):\n imgs = []\n boneages = []\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n\n boneage = dataset_df.loc[dataset_df[\"id\"]==_id].boneage.values[0] / SCALE\n img_file_name = data_dir + \"/{}.png\".format(_id)\n img = cv2.imread(img_file_name, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n\n imgs.append(preprocess_fn(np.array(img, dtype=np.float32)))\n boneages.append(boneage)\n\n if augment_times > 0:\n flipped = cv2.flip(img, 1) # horzational flip\n imgs.append(preprocess_fn(np.array(flipped, dtype=np.float32)))\n boneages.append(boneage)\n for i in range(augment_times):\n angle = np.random.randint(0, 360)\n M = cv2.getRotationMatrix2D(center=(img.shape[0] / 2, img.shape[1] / 2), angle=angle, scale=1)\n dst_ori = cv2.warpAffine(img, M, (img.shape[0], img.shape[1]), borderMode=cv2.BORDER_CONSTANT, borderValue=0)\n dst_flip = cv2.warpAffine(flipped, M, (img.shape[0], img.shape[1]), borderMode=cv2.BORDER_CONSTANT, borderValue=0)\n imgs.append(preprocess_fn(np.array(dst_ori, dtype=np.float32)))\n imgs.append(preprocess_fn(np.array(dst_flip, dtype=np.float32)))\n\n boneages.append(boneage)\n boneages.append(boneage)\n\n return imgs, boneages", "title": "" }, { "docid": "42012660335fda9bb9dcc3840079d9bf", "score": "0.6251642", "text": "def data_augmentation_X8(data_json, label_type_arr=None):\n tt_transform = transforms.Compose([transforms.ToTensor()])\n rh_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=1),\n transforms.ToTensor()])\n rv_transform = transforms.Compose([transforms.RandomVerticalFlip(p=1),\n transforms.ToTensor()])\n rr_transform = transforms.Compose([\n transforms.RandomRotation(90, expand=False, center=None,\n interpolation=InterpolationMode.BILINEAR),\n transforms.ToTensor()])\n ra_transform = transforms.Compose([\n transforms.RandomAffine(90, translate=(0.2, 0.2), scale=(0.9, 1.1)),\n transforms.ToTensor()])\n cj_transform = transforms.Compose([\n transforms.ColorJitter(brightness=0.5,\n contrast=0.5, saturation=0.5, hue=0.5),\n transforms.ToTensor()])\n rp_transform = transforms.Compose([\n transforms.RandomPerspective(distortion_scale=0.5, p=1, fill=0,\n interpolation=InterpolationMode.BILINEAR),\n transforms.ToTensor()])\n re_transform = transforms.Compose([\n transforms.ToTensor(), transforms.RandomErasing(p=1)])\n\n dataset_og = Cifar50Dataset(data_json, tt_transform, label_type_arr)\n dataset_rh = Cifar50Dataset(data_json, rh_transform, label_type_arr)\n dataset_rv = Cifar50Dataset(data_json, rv_transform, label_type_arr)\n dataset_ra = Cifar50Dataset(data_json, ra_transform, label_type_arr)\n dataset_rr = Cifar50Dataset(data_json, rr_transform, label_type_arr)\n dataset_cj = Cifar50Dataset(data_json, cj_transform, label_type_arr)\n dataset_rp = Cifar50Dataset(data_json, rp_transform, label_type_arr)\n dataset_re = Cifar50Dataset(data_json, re_transform, label_type_arr)\n\n dataset_arr = [dataset_og, dataset_rh, dataset_rv, dataset_ra, dataset_rr,\n dataset_cj, dataset_rp, dataset_re]\n aug_dataset = ConcatDataset(dataset_arr)\n\n return aug_dataset, dataset_arr", "title": "" }, { "docid": "1d89dfadcf054ed66e1d790e5de3c7c9", "score": "0.62270284", "text": "def augmentationImages(img_path,img_aug_path):\n\n #!rm -rf /content/databaseAug #useful when running the method again for makedirs\n\n img_list = os.listdir(img_path)\n\n for img_name in img_list:\n os.makedirs(img_aug_path + '/' + str(img_name[:-4]))\n\n\n print(\"Creating the augmented dataset...\")\n # List all files in a directory using scandir()\n basepath = img_path\n with os.scandir(basepath) as images:\n for im in images:\n\n imagePath = basepath + '/' + im.name\n print(imagePath)\n k = 0\n\n while (k < 20): \n image = preprocess_images(imagePath)\n \n augPath = img_aug_path+'/'+ im.name[:-4] +'/' + im.name[:-4] + str(k) + '.jpg'\n image = tf.keras.preprocessing.image.img_to_array(image)\n plt.imsave(augPath, image/255)\n k += 1", "title": "" }, { "docid": "103d49bba3f6213c53c867b83cd1d792", "score": "0.62204933", "text": "def augment(input_img, input_angle):\n \n output_images, output_angles = [],[]\n\n \"\"\"\n\n Fun fact: I had this condition around the data flipping code\n but these function calls somehow SIGNIFICANTLY slowed down\n the training process!!!\n\n if ((abs(input_angle) < 0.2 and np.random.random() < 0.10) or \n (abs(input_angle) > 5.0 and np.random.random() < 0.70)) :\n\n \"\"\"\n # Add flipped images and angles to data set\n image_flipped = np.fliplr(input_img)\n angle_flipped = -input_angle\n output_images.append(image_flipped)\n output_angles.append(angle_flipped)\n\n # Add blurred image\n blurred = cv2.GaussianBlur(input_img, (5, 5), 0)\n output_images.append(blurred)\n output_angles.append(input_angle)\n \n # Add noisy image\n noise = np.zeros_like(input_img)\n cv2.randn(noise,(0),(45))\n noisy_img = input_img+noise \n output_images.append(noisy_img)\n output_angles.append(input_angle)\n\n return output_images, output_angles", "title": "" }, { "docid": "19a9bc077bd676646e082d5fefbdeefc", "score": "0.6171274", "text": "def do_data_augmentation(images, # [batch_size, x, y, t, channels]\n data_aug_ratio, # between 0.0 and 1.0\n trans_min, # -10\n trans_max, # 10\n rot_min, # -10\n rot_max, # 10\n scale_min, # 0.9\n scale_max): # 1.1\n\n images_ = np.copy(images)\n \n # iterate over each slice in the batch\n for i in range(images.shape[0]):\n\n # ===========\n # translation\n # ===========\n if np.random.rand() < data_aug_ratio:\n \n random_shift_x = np.random.uniform(trans_min, trans_max)\n random_shift_y = np.random.uniform(trans_min, trans_max)\n \n # Apply for each time step and for each channel\n for t in range(images_.shape[3]):\n for channel in range(images_.shape[4]):\n images_[i,:,:,t,channel] = scipy.ndimage.interpolation.shift(images_[i,:,:,t,channel],\n shift = (random_shift_x, random_shift_y),\n order = 1)\n \n # ========\n # rotation\n # ========\n if np.random.rand() < data_aug_ratio:\n \n random_angle = np.random.uniform(rot_min, rot_max)\n\n # Apply for each time step and for each channel\n for t in range(images_.shape[3]):\n for channel in range(images_.shape[4]):\n images_[i,:,:,t,channel] = scipy.ndimage.interpolation.rotate(images_[i,:,:,t,channel],\n reshape = False,\n angle = random_angle,\n axes = (1, 0),\n order = 1)\n \n # ========\n # scaling\n # ========\n if np.random.rand() < data_aug_ratio:\n \n n_x, n_y = images_.shape[1], images_.shape[2]\n \n scale_val = np.round(np.random.uniform(scale_min, scale_max), 2)\n \n # Apply for each time step and for each channel\n for t in range(images_.shape[3]):\n for channel in range(images_.shape[4]):\n images_i_tmp = transform.rescale(images_[i,:,:,t,channel], \n scale_val,\n order = 1,\n preserve_range = True,\n mode = 'constant')\n \n images_[i,:,:,t,channel] = crop_or_pad_slice_to_size(images_i_tmp, n_x, n_y)\n \n return images_", "title": "" }, { "docid": "5f8c4e0e25b10b329ae33cfaeb535123", "score": "0.6162844", "text": "def increase_train_data(self,images,model_path,threshold,save_data=False,save_directory='', selected=[],merge_with_original=False):\n\n not_selected = list(set(np.arange(self._n_input)) - set(selected))\n\n self._class_wise_mean = np.array(self._dataset.groupby(['label']).mean())\n self._class_wise_mean[:,not_selected] = 0.\n\n self._outfile = open(save_directory+'augmented_dataset.csv','w')\n\n\n\n\n correct_array = [0 for i in range(self._n_classes)]\n\n print('-------------- Training Data Augmentation -----------------\\n\\n')\n\n for image in images:\n correct_array = self._train_augmentation( image,model_path,\n correct_array,\n threshold,not_selected,\n save_data=save_data,\n save_directory=save_directory)\n\n print('-----------------------------------------------------------\\n\\n')\n\n self._outfile.close()\n\n if merge_with_original == True:\n with open(save_directory+'augmented_dataset.csv', 'a') as f:\n self._dataset.to_csv(f, header=False,index=False)\n\n print('Dataset is saved at: ',save_directory+'augmented_dataset.csv\\n')\n print('Classified images are saved in {} ending with augmentation_mask.csv\\n'.format(save_directory))\n\n return correct_array", "title": "" }, { "docid": "e133f36ecf91955fb752183002c422c1", "score": "0.6158213", "text": "def setup_dataset(self):\r\n\r\n \"\"\"\r\n train_path = \"small_dataset/images/nir/\"\r\n val_path = \"small_dataset/images/nir/\"\r\n test_path = \"small_dataset/images/nir/\"\r\n\r\n train_labels_path = \"small_dataset/labels/\"\r\n val_labels_path = \"small_dataset/labels/\"\r\n test_labels_path = \"small_dataset/labels/\"\r\n \"\"\"\r\n\r\n train_path = f\"{self.cfg.data_dir}/images\"\r\n val_path = f\"{self.cfg.data_dir}/images\"\r\n test_path = f\"{self.cfg.data_dir}/images\"\r\n\r\n train_labels_path = f\"{self.cfg.data_dir}/labels/\"\r\n val_labels_path = f\"{self.cfg.data_dir}/labels/\"\r\n test_labels_path = f\"{self.cfg.data_dir}/labels/\"\r\n\r\n train = os.listdir(train_path)\r\n val = os.listdir(val_path)\r\n test = os.listdir(test_path)\r\n\r\n random.shuffle(train)\r\n random.shuffle(val)\r\n random.shuffle(test)\r\n\r\n train_img_names_index = train[:10000]\r\n val_img_names_index = val[:2000]\r\n test_img_names_index = test[:2000]\r\n\r\n labels_one_hot = {}\r\n k = self.cfg.classes # 8\r\n i=0\r\n for label in listdir_nohidden(train_labels_path):\r\n if label!=\"storm_damage\":\r\n labels_one_hot[label] = np.zeros((k,))\r\n labels_one_hot[label][i] = 1\r\n i+=1\r\n\r\n train_dataset = SegmentationDataset(\"train\", train_img_names_index, labels_one_hot, train_path, train_labels_path, use_cache=True)\r\n val_dataset = SegmentationDataset(\"validation\", val_img_names_index, labels_one_hot, val_path, val_labels_path, use_cache=True)\r\n # test_dataset = SegmentationDataset(\"test\", test_img_names_index, labels_one_hot, test_path, test_labels_path, use_cache=True)\r\n\r\n self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=self.shuffle)\r\n self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=self.shuffle)\r\n # self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size)\r", "title": "" }, { "docid": "8bbea453f39eebe8596e5a74f04bde7d", "score": "0.61561024", "text": "def create_augmented_dataset(X, y):\n\n X_augmented = np.zeros(X.shape)\n\n datagen = ImageDataGenerator(\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n horizontal_flip=True)\n\n data = np.ndarray(shape=(X.shape[1], 32, 32, 3))\n for datum in range(X.shape[1]):\n data[datum, :] = X[:, datum].reshape(3, 32, 32).transpose(1, 2, 0)\n for X_batch, _ in datagen.flow(data[datum:datum + 1], y[datum:datum + 1], batch_size=1):\n X_augmented[:, datum] = X_batch[0].transpose(2, 1, 0).reshape(3072)\n break\n\n return X_augmented", "title": "" }, { "docid": "650b910b7a0144eea6c819995586be46", "score": "0.6149345", "text": "def test_augmented_data_control():\n d = ToyDataset()\n X, yn = d.generate(n_samples=512, dataset_type='spirals', noise=.4)\n X_train = torch.Tensor(X).to(device)\n y_train = torch.LongTensor(yn.long()).to(device)\n train = data.TensorDataset(X_train, y_train)\n\n trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)\n\n f = nn.Sequential(DataControl(),\n nn.Linear(12, 64),\n nn.Tanh(),\n nn.Linear(64, 6))\n\n model = nn.Sequential(Augmenter(augment_idx=1, augment_func=nn.Linear(2, 4)),\n NeuralODE(f, solver='dopri5')\n ).to(device)\n learn = TestLearner(t_span, model, trainloader=trainloader)\n trainer = pl.Trainer(min_epochs=1, max_epochs=1)\n\n trainer.fit(learn)", "title": "" }, { "docid": "71340eb299b10c3e6132aa4b46c63134", "score": "0.6137266", "text": "def train_data_augmentation(imgs, masks, batch_size):\n # create two instances with the same arguments\n # create dictionary with the input augmentation values\n data_gen_args = dict(rotation_range = 10, \n width_shift_range = 0.1, \n height_shift_range = 0.1, \n zoom_range = 0.2)\n \n # use this method with both images and masks\n image_datagen = ImageDataGenerator(**data_gen_args)\n mask_datagen = ImageDataGenerator(**data_gen_args)\n \n # provide the same seed and keyword arguments to the fit and flow methods\n seed = 1\n \n # fit the augmentation model to the images and masks with the same seed\n image_datagen.fit(imgs, augment = True, seed = seed)\n mask_datagen.fit(masks, augment = True, seed = seed)\n \n # set the parameters for the data to come from images and masks\n image_generator = image_datagen.flow(imgs, \n batch_size = batch_size, \n shuffle = False, \n seed = seed)\n mask_generator = mask_datagen.flow(masks, \n batch_size = batch_size, \n shuffle = False, \n seed = seed)\n \n # combine generators into one which yields image and masks\n # train_generator = zip(image_generator, mask_generator)\n while True:\n yield(image_generator.next(), mask_generator.next())", "title": "" }, { "docid": "76298dd2fdef80d33bc73bb4239ad933", "score": "0.612494", "text": "def create_dataset():\n batch_size = 4\n\n noise_input = np.random.uniform(-1.0, 1.0, size=[batch_size, 100])\n\n front_images = glob.glob(\"../../processed_data/front_images/*\")\n profile_images = glob.glob(\"../../processed_data/profile_images/*\")\n facial_landmarks = \"../../processed_data/cropped_images\"\n in_prof_img, in_prof_leye, in_prof_reye, in_prof_nose, in_prof_mouth, in_prof_noise,in_front_img = [],[],[],[],[],[],[]\n y = np.ones([2 * batch_size, 1])\n y[batch_size:, :] = 0\n for prof_img in profile_images:\n image_name = prof_img.split(\"/\")[-1].split(\".\")[0]\n\n\n in_prof_img.append(cv2.resize(cv2.imread(prof_img), dsize=(128, 128), interpolation=cv2.INTER_LINEAR))\n\n leye = cv2.imread(os.path.join(facial_landmarks, \"profile_images\", image_name, \"left_eye.jpg\"))\n in_prof_leye.append(cv2.resize(leye, dsize=(40, 40), interpolation=cv2.INTER_LINEAR))\n\n reye = cv2.imread(os.path.join(facial_landmarks, \"profile_images\", image_name, \"right_eye.jpg\"))\n in_prof_reye.append(cv2.resize(reye, dsize=(40, 40), interpolation=cv2.INTER_LINEAR))\n\n nose = cv2.imread(os.path.join(facial_landmarks, \"profile_images\", image_name, \"nose.jpg\"))\n in_prof_nose.append(cv2.resize(nose, dsize=(32,40), interpolation=cv2.INTER_LINEAR))\n\n mouth = cv2.imread(os.path.join(facial_landmarks, \"profile_images\", image_name, \"mouth.jpg\"))\n in_prof_mouth.append(cv2.resize(mouth, dsize=(32,48), interpolation=cv2.INTER_LINEAR))\n\n for prof_img in front_images:\n\n in_front_img.append(cv2.resize(cv2.imread(prof_img), dsize=(128, 128), interpolation=cv2.INTER_LINEAR))\n\n x = in_front_img + in_prof_img\n\n return np.array(x),np.array(y),np.array(in_prof_img), np.array(in_prof_leye), np.array(in_prof_reye),\\\n np.array(in_prof_nose),np.array(in_prof_mouth),np.array(in_front_img), np.array(noise_input)", "title": "" }, { "docid": "0e94300430c5879e956671f4417c50c1", "score": "0.6122736", "text": "def augment(dataset, dataset_def):\n\n data = []\n\n def mirror(row):\n angle = float(row[\"angle\"])\n if angle != 0.0:\n filename = row[\"filename\"]\n img = cv2.imread(dataset + \"\\\\frames\\\\\" + filename)\n flip_img = cv2.flip(img, 1)\n cv2.imwrite(dataset + \"\\\\frames\\\\m-\" + filename, flip_img)\n data.append([\"m-\" + filename, -angle])\n\n dataset_def.apply(mirror, axis=1)\n\n data_frame = pd.DataFrame(data, columns=[\"filename\", \"angle\"])\n return pd.concat([dataset_def, data_frame])", "title": "" }, { "docid": "6de9eb210c81483b224c17c11f858962", "score": "0.6116996", "text": "def download(self, data_path):\n if os.path.exists(data_path):\n return\n \n print(\"[BBBC013] Loading images and metadata ...\")\n metadata_DF = pd.read_csv(csv_file)\n metadata_DF = metadata_DF[metadata_DF.column != 2]\n data = np.load(img_file)\n print(data.shape)#.transpose(0, 3, 1, 2)\n \n print(\"[BBBC013] Processing training metadata ...\")\n metadata_train_DF = metadata_DF[[r in ['A', 'B', 'C', 'E', 'F', 'G'] for r in metadata_DF.row]]\n concentration = metadata_train_DF.concentration.fillna(0).values.reshape(-1)\n group = metadata_train_DF.group.apply(lambda s: self.group_names.index(s)).values.reshape(-1)\n targets = np.array(list(zip(group, concentration)))\n\n selected_indexes = [i in metadata_train_DF.index for i in range(len(dataset))]\n selected_images = dataset[selected_indexes]\n\n print(\"[BBBC013] Augmenting training images ...\")\n images = torch.empty(self.n_augmentations*len(selected_images), 2, self.img_size, self.img_size)\n labels = torch.empty(self.n_augmentations*len(selected_images), 2)\n for i in range(self.n_augmentations):\n images[i*len(selected_images):(i+1)*len(selected_images)] = torch.cat([self.aug_transform(img).view(1, 2, self.img_size, self.img_size) for img in selected_images])\n labels[i*len(selected_images):(i+1)*len(selected_images)] = torch.from_numpy(targets)\n \n print(\"[BBBC013] Saving training data at '{}' ...\".format(data_path))\n with h5py.File(data_path, \"w\") as f:\n f.create_dataset(\"BBBC013/train/images\", data=images.numpy())\n f.create_dataset(\"BBBC013/train/concentrations\", data=labels.numpy()[:,1])\n f.create_dataset(\"BBBC013/train/groups\", data=labels.numpy()[:,0].astype(np.uint8))\n f.create_dataset(\"BBBC013/train/binary_index_table\", data=np.where([label[0] in [0, 1] for label in labels.numpy()])[0])\n \n print(\"[BBBC013] Processing test metadata ...\")\n metadata_test_DF = metadata_DF[[r not in ['A', 'B', 'C', 'E', 'F', 'G'] for r in metadata_DF.row]]\n concentration = metadata_test_DF.concentration.fillna(0).values.reshape(-1)\n group = metadata_test_DF.group.apply(lambda s: groups.index(s)).values.reshape(-1)\n targets = np.array(list(zip(group, concentration)))\n\n selected_indexes = [i in metadata_test_DF.index for i in range(len(dataset))]\n selected_images = dataset[selected_indexes]\n \n print(\"[BBBC013] Augmenting test images ...\")\n images = torch.empty(self.n_augmentations*len(selected_images), 2, self.img_size, self.img_size)\n labels = torch.empty(self.n_augmentations*len(selected_images), 2)\n for i in range(self.n_augmentations):\n images[i*len(selected_images):(i+1)*len(selected_images)] = torch.cat([self.aug_transform(img).view(1, 2, self.img_size, self.img_size) for img in selected_images])\n labels[i*len(selected_images):(i+1)*len(selected_images)] = torch.from_numpy(targets)\n \n print(\"[BBBC013] Saving test data at '{}' ...\".format(data_path))\n with h5py.File(data_path, \"w\") as f:\n f.create_dataset(\"BBBC013/test/images\", data=images.numpy())\n f.create_dataset(\"BBBC013/test/concentrations\", data=labels.numpy()[:,1])\n f.create_dataset(\"BBBC013/test/groups\", data=labels.numpy()[:,0].astype(np.uint8))\n f.create_dataset(\"BBBC013/test/binary_index_table\", data=np.where([label[0] in [0, 1] for label in labels.numpy()])[0])\n \n print(\"[BBBC013] Done.\")", "title": "" }, { "docid": "e910f32f95eb4ac75d644d898659df08", "score": "0.6111333", "text": "def generate_samples(samples, augment=True, batch_size=128):\n num_samples = len(samples)\n #print('kind of indices', samples)\n while True:\n # Generate random batch of indices\n for batch in range(0, num_samples, batch_size):\n batch_samples = samples[batch:(batch + batch_size)]\n # create list array\n images = []\n steering_angles = []\n # Read in and preprocess a batch of images\n for batch_sample in batch_samples:\n # for i in range(0, 3):\n #choose a random camero for read in an image\n camera = np.random.randint(len(cameras)) if augment else 1\n source_path = batch_sample[camera]\n #source_path = source_path.split('/')[-1]\n filename = source_path.split('\\\\')[-1]\n #current_path = os.path.join('../data/IMG/',filename)\n current_path = '../data/IMG/' + filename\n image = cv2.imread(current_path)\n # randomize brightness\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n random_brightness = .25 + np.random.uniform()\n image[:, :, 2] = image[:, :, 2] * random_brightness\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\n\n images.append(image)\n # read steering angle and add an offset depending on\n # the camera possition\n steering_angle = float(batch_sample[STEERING_ANGLE])\n #print(\" Shape of steering angle ater read in: {}:\".format(steering_angle))\n steering_angle = float(steering_angle) + float(left_right_steering_correction[camera])\n #print(\" Shape of steering angle after add or sub wor out: {}:\".format(steering_angle))\n steering_angles.append(steering_angle)\n\n #print('this is the format of current path', current_path)\n #print('this is the format of filename', filename)\n #print('this is the format of source_path', source_path) \n # create a numpy array because keras expect a numpy array as input\n x = np.array(images)\n y = np.array(steering_angles)\n print(\" Shape of image: {}:\".format(x.shape))\n print(\" Shape of steering angle: {}:\".format(y.shape))\n\n # # Randomly flip half of images in the batch\n flip_indices = random.sample(range(x.shape[0]), int(x.shape[0] / 2))\n x[flip_indices] = np.fliplr(x[flip_indices])\n # #x[flip_indices] = cv2.flip(x[flip_indices],1)\n y[flip_indices] = -y[flip_indices]\n x_shape = x.shape\n print(\"Image data shape after flipping images vertical =\", x_shape)\n yield sklearn.utils.shuffle(x, y)", "title": "" }, { "docid": "81690c4fe2f906f5bfd2f681d766faa5", "score": "0.61102974", "text": "def data_augmentation(x_batch):\n\n pad = 2\n\n batch_size, h, w, c = x_batch.shape\n padded_shape = (batch_size, h + 2 * pad, w + 2 * pad, c)\n\n padding = np.zeros(padded_shape)\n padding[:, 2:-2, 2:-2, :] = x_batch\n\n delta_x = np.random.randint(3)\n delta_y = np.random.randint(3)\n\n x_batch = padding[:, delta_y:h+delta_y, delta_x:w+delta_x, :]\n\n # random left-right reflection\n flip_idx = np.random.binomial(n=1, p=0.5, size=batch_size)\n x_batch = np.array([np.fliplr(x_batch[i]) if flip_idx[i] else x_batch[i]\n for i in range(batch_size)])\n\n # random up-down reflection\n flip_idx = np.random.binomial(n=1, p=0.5, size=batch_size)\n x_batch = np.array([np.flipud(x_batch[i]) if flip_idx[i] else x_batch[i]\n for i in range(batch_size)])\n\n return x_batch", "title": "" }, { "docid": "eff9fd71364f9ae235d86b5b84dd8cb5", "score": "0.6107671", "text": "def prepare_data(batch_size=128, valid_frac=0.3):\n # Data augmentation\n n_holes = 1\n length = 16\n\n mean = [x / 255.0 for x in [125.3, 123.0, 113.9]]\n std = [x / 255.0 for x in [63.0, 62.1, 66.7]]\n\n train_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomAffine(degrees=0,translate=(0.125, 0.125)),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ])\n\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ])\n \n train_transform.transforms.append(Cutout(n_holes=n_holes, length=length))\n \n # Define dataset objects\n train_dataset = torchvision.datasets.CIFAR10(\n root='./data', train=True, transform=train_transform, download=True)\n valid_dataset = torchvision.datasets.CIFAR10(\n root='./data', train=True, transform=train_transform, download=True)\n test_dataset = torchvision.datasets.CIFAR10(\n root='./data', train=False, transform=test_transform, download=True)\n\n num_train = len(train_dataset)\n indices = list(range(num_train))\n split = int(np.floor(valid_frac * num_train))\n manual_seed=10\n np.random.seed(manual_seed)\n np.random.shuffle(indices)\n\n train_idx, valid_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n # Create datasets, iterable on batches\n train_loader = torch.utils.data.DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n sampler=train_sampler,\n pin_memory=True,\n drop_last=True,\n )\n\n valid_loader = torch.utils.data.DataLoader(\n dataset=valid_dataset,\n batch_size=batch_size,\n sampler=valid_sampler,\n pin_memory=True,\n drop_last=False,\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False,\n pin_memory=True,\n drop_last=False,\n )\n\n return train_loader, valid_loader, test_loader", "title": "" }, { "docid": "fda8c81e94c97a7c4fd06dca5e12acd8", "score": "0.6093871", "text": "def augment_classes(x_traindf, counted, number_of_samples=1):\n\n to_augment_label = []\n number_of_augmentations = []\n\n N = number_of_samples;\n\n for key in counted.keys():\n #Count the number of augmentations required so each class has 100 observations\n to_augment_label.append(key)\n number_of_augmentations.append(100-counted.get(key))\n\n\n augmented_images = []\n augmented_labels = []\n \n for i in range(0,len(to_augment_label)):\n char_class = x_traindf[x_traindf[\"Unicode\"] == to_augment_label[i]]\n for k in range(0,number_of_augmentations[i]):\n \n #Sample a random file from the character class \n sample = char_class['Files'].sample(N,replace=True)\n sample = str(sample.values)\n file = Path(sample[2:len(sample)-2])\n \n #Generate augmented data from the sampled image\n augmented_images.append(generate_data(file,n=N))\n\n #Make sure that the augmentations have labels associated with them\n augmented_labels.append(to_augment_label[i])\n \n return augmented_images, augmented_labels", "title": "" }, { "docid": "c4dda7751d7f059cb2bd673cca014052", "score": "0.60894746", "text": "def data_augment(X,y,rndom_noise=False,shift=False,pitch=False,ratio=0.4):\n X_ret = copy.deepcopy(X)\n y_ret = copy.deepcopy(y)\n if rndom_noise:\n X_noise,y_noise = subset(X,y,ratio)\n X_noise = add_noise(X_noise)\n X_ret = np.append(X_ret,X_noise,0)\n y_ret = np.append(y_ret,y_noise,0)\n if shift:\n X_shift,y_shift = subset(X,y,ratio)\n X_shift = add_shift(X_shift)\n X_ret = np.append(X_ret,X_shift,0)\n y_ret = np.append(y_ret,y_shift,0)\n if pitch:\n X_pitch,y_pitch = subset(X,y,ratio)\n X_pitch = add_pitch(X_pitch)\n X_ret = np.append(X_ret,X_pitch,0)\n y_ret = np.append(y_ret,y_pitch,0)\n return X_ret,y_ret", "title": "" }, { "docid": "1d5445430f5b44fd36c561a1a38870e3", "score": "0.6073562", "text": "def generate_1d_contrastive_data(num_training_points, observation_noise_variance):\n\n ### Background data\n index_points_bg = np.random.uniform(-2.0, 2.0, (num_training_points, 1))\n index_points_bg = index_points_bg.astype(np.float64)\n\n # y = f(x) + noise\n observations_bg = sinusoid(index_points_bg, scale=SCALE_SHARED) + np.random.normal(\n loc=0, scale=np.sqrt(observation_noise_variance), size=(num_training_points)\n )\n\n ### Foreground data\n index_points_fg1 = np.random.uniform(-2.0, 2.0, (num_training_points // 2, 1))\n index_points_fg1 = index_points_fg1.astype(np.float64)\n\n observations_fg1 = (\n sinusoid(index_points_fg1, scale=SCALE_SHARED, shift=SHIFT_SHARED)\n + sinusoid(index_points_fg1, scale=SCALE_FG, shift=SHIFT_FG)\n + np.random.normal(\n loc=0,\n scale=np.sqrt(observation_noise_variance),\n size=(num_training_points // 2),\n )\n )\n\n index_points_fg2 = np.random.uniform(-2.0, 2.0, (num_training_points // 2, 1))\n index_points_fg2 = index_points_fg2.astype(np.float64)\n\n observations_fg2 = (\n sinusoid(index_points_fg2, scale=SCALE_FG, shift=SHIFT_FG)\n + sinusoid(index_points_fg2, scale=SCALE_SHARED, shift=SHIFT_SHARED)\n + np.random.normal(\n loc=0,\n scale=np.sqrt(observation_noise_variance),\n size=(num_training_points // 2),\n )\n )\n\n index_points_fg = np.concatenate([index_points_fg1, index_points_fg2])\n observations_fg = np.concatenate([observations_fg1, observations_fg2])\n\n Y_bg = np.hstack([index_points_bg, np.expand_dims(observations_bg, 1)])\n Y_fg = np.hstack([index_points_fg, np.expand_dims(observations_fg, 1)])\n\n return Y_bg, Y_fg", "title": "" }, { "docid": "932f3f7e1458700d0af79dc29d9b7647", "score": "0.60670316", "text": "def prepare_data(self, is_train=True):\n raise NotImplementedError", "title": "" }, { "docid": "6a50160245c63c68ef44bf53a93063cd", "score": "0.60625726", "text": "def create_dataset_ori(config, is_train):\n ds.config.set_prefetch_size(config.dataset.prefetch_size)\n if config.dataset.type == \"IC15\":\n data_loader = IC15DataLoader(config, isTrain=is_train)\n elif config.dataset.type == \"TotalText\":\n data_loader = TotalTextDataLoader(config, isTrain=is_train)\n else:\n raise ValueError(f\"Not support dataset.type: {config.dataset.type}.\")\n change_swap_op = ds.vision.HWC2CHW()\n normalize_op = ds.vision.Normalize(mean=config.dataset.mean, std=config.dataset.std)\n color_adjust_op = ds.vision.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)\n if is_train:\n dataset = ds.GeneratorDataset(data_loader,\n ['img', 'gts', 'gt_masks', 'thresh_maps', 'thresh_masks'],\n num_parallel_workers=config.dataset.num_workers,\n num_shards=config.device_num, shard_id=config.rank_id,\n shuffle=True, max_rowsize=config.dataset.max_rowsize)\n dataset = dataset.map(operations=[color_adjust_op, normalize_op, change_swap_op], input_columns=[\"img\"])\n dataset = dataset.project(['img', 'gts', 'gt_masks', 'thresh_maps', 'thresh_masks'])\n else:\n dataset = ds.GeneratorDataset(data_loader, ['original', 'img', 'polys', 'dontcare'])\n dataset = dataset.map(operations=[normalize_op, change_swap_op], input_columns=[\"img\"])\n dataset = dataset.project(['original', 'img', 'polys', 'dontcare'])\n batch_size = config.train.batch_size if is_train else 1\n dataset = dataset.batch(batch_size, drop_remainder=is_train)\n steps_pre_epoch = dataset.get_dataset_size()\n return dataset, steps_pre_epoch", "title": "" }, { "docid": "bb5e5e11a24b5a11b2eda98c2668cf9e", "score": "0.60599273", "text": "def augment_data(image_f, annotation_f, channels=3):\n with tf.name_scope(\"data_augmentation\"):\n # ElasticDeformation are very slow..\n image_f, annotation_f = random_flip(image_f, annotation_f, 0.5) \n image_f, annotation_f = random_rotate(image_f, annotation_f, 0.2) \n # image_f, annotation_f = random_blur(image_f, annotation_f, 0.2, channels)\n # image_f, annotation_f = random_brightness(image_f, annotation_f, 0.2, channels)\n #image_f, annotation_f = random_elastic_deformation(image_f, annotation_f, \n # 0.5, 0.06, 0.12, 1.1)\n return image_f, annotation_f", "title": "" }, { "docid": "2c2301aa257d8437f9178b64361343a4", "score": "0.6053697", "text": "def addDataset(self, dataset):\n assert (dataset.getDimension('input') == self.indim)\n assert (dataset.getDimension('target') == 1)\n\n self.trainx = r_[self.trainx, dataset.getField('input')]\n self.trainy = r_[self.trainy, ravel(dataset.getField('target'))]\n self.noise = array([0.001] * len(self.trainx))\n self.calculated = False", "title": "" }, { "docid": "cf3ae0571e1be939a9a7127d3a1484cd", "score": "0.6043795", "text": "def make_augmented_training_set(n_augmented_per_image: int, size: int) -> None:\n assert n_augmented_per_image > 0\n train_data = PathologyDataset(\"trainsplit\", size)\n new_X = []\n new_y = []\n print(\n f\"Making augmented training set ({n_augmented_per_image} \"\n \"new versions per image)...\"\n )\n for i in tqdm(range(train_data.ninstances)):\n img = train_data.X[i]\n label = train_data.y[i]\n new_X.append(img)\n new_y.append(label)\n for _ in range(n_augmented_per_image):\n new_X.append(randomly_augment(img))\n new_y.append(label)\n\n augmented_train_data = copy.deepcopy(train_data)\n augmented_train_data.X = np.array(new_X)\n augmented_train_data.y = np.array(new_y)\n augmented_train_data.ninstances = len(augmented_train_data.X)\n augmented_train_data.name = f\"trainsplit-augmented-{n_augmented_per_image}\"\n augmented_train_data.to_intermediate()", "title": "" }, { "docid": "1761bd27e2980d688effc2886db5f587", "score": "0.60146", "text": "def _core_augment_algorithm(self):\n\n input_img_data = self._img_data.input_data\n new_img = copy.deepcopy(input_img_data)\n\n width = input_img_data.shape[1]\n height = input_img_data.shape[0]\n area = width * height\n\n for i in range(100):\n aspect_ratio = random.uniform(0.3, 1 / 0.3)\n target_area = random.uniform(0.05, 0.2) * area\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if h < height and w < width:\n x1 = random.randint(0, width - w)\n y1 = random.randint(0, height - h)\n new_img[y1:y1 + h, x1:x1 + w] = 0\n return new_img\n return new_img", "title": "" }, { "docid": "4081fdb1c11df25221768dfdaf97af79", "score": "0.59942573", "text": "def __init__(self, directory):\n\n self.logger.info(\"Loading DS from files...\")\n self.augmenter = SignalAugmenter(augmentation_start=0.1, augmentation_end=0.9)\n examples = self.load_examples(directory)\n examples.shuffle()\n\n train, test = examples.split(self.TRAIN_RATIO)\n\n augmented_train = self.augmenter.augment_examples(train, 400)\n print \"Augmented `train` with %d examples, %d originally\" % (\n augmented_train.num_examples - train.num_examples, train.num_examples)\n augmented_train.shuffle()\n augmented_train.scale_features(self.Feature_Range, self.Feature_Mean)\n\n augmented_test = self.augmenter.augment_examples(test, 400)\n print \"Augmented `test` with %d examples, %d originally\" % (\n augmented_test.num_examples - test.num_examples, test.num_examples)\n augmented_test.shuffle()\n augmented_test.scale_features(self.Feature_Range, self.Feature_Mean)\n\n self.human_labels = {v: k for k, v in self.label_mapping.items()}\n self.X_train = self.flatten2d(augmented_train.features)\n self.y_train = augmented_train.labels\n self.X_test = self.flatten2d(augmented_test.features)\n self.y_test = augmented_test.labels\n\n self.num_labels = len(self.human_labels)\n self.num_features = self.X_train.shape[1]\n self.num_train_examples = self.X_train.shape[0]\n self.num_test_examples = self.X_test.shape[0]", "title": "" }, { "docid": "22c4d92e974a31472ac388f21eb27448", "score": "0.5992269", "text": "def train_dataset(self):\n raise NotImplementedError", "title": "" }, { "docid": "dc7a691689912e07cd271df8e25d3f9d", "score": "0.597441", "text": "def train_set(self):\r\n self.dataAugmentation = True\r\n self.currIdx = 0\r\n random.shuffle(self.trainSamples)\r\n self.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]", "title": "" }, { "docid": "e93066bd8c185a344d35c715e54cc25c", "score": "0.5967484", "text": "def generate_new_data(base_directory):\n augmentation.generate_new_data(base_directory, base_directory)", "title": "" }, { "docid": "d453e431459956884b54e03f06484afc", "score": "0.5951816", "text": "def process_train_example(self, example):\n image = example['inputs']\n\n example['inputs'] = self.augment_image(image, self.data_augmentations)\n example['inputs'] = self.maybe_resize(example['inputs'])\n example['inputs'] = self.convert_and_normalize(example['inputs'])\n\n if self.teacher_data_augmentations:\n example['teacher_inputs'] = self.augment_image(\n image, self.teacher_data_augmentations)\n example['teacher_inputs'] = self.maybe_resize(example['teacher_inputs'])\n example['teacher_inputs'] = self.convert_and_normalize(\n example['teacher_inputs'])\n return example", "title": "" }, { "docid": "0c8048b4050773d5b55fd7fb37dcc0e7", "score": "0.59506583", "text": "def prepare(self, ds, opt, cache_loc='datasets/cache.tfcache'):\n \n ds = ds.cache(cache_loc)\n \n # Shuffle the dataset\n ds = ds.shuffle(self.data_size)\n \n # Apply the augmentation to each patch\n ds = ds.map(lambda lr, hr: augment(lr,hr, opt), num_parallel_calls = AUTOTUNE) # augment each patch\n \n # Build the minibatches\n ds = ds.batch(self.batch_size, drop_remainder=True)\n \n # Prefetch the dataset for faster loading\n ds = ds.prefetch(buffer_size=AUTOTUNE)\n return ds", "title": "" }, { "docid": "301b7a2f38d2b974e7b0ce3c484a8043", "score": "0.59441876", "text": "def synthetic_dataset():\n cat_features = {\n 'sex': 2, 'cp': 4, 'fbs': 2, 'restecg': 3, \n 'exang':2, 'slope':3, 'ca': 5, 'thal': 4,\n }\n num_features = {\n 'age': (29, 77), 'trestbps': (94, 200), 'chol': (126, 564), \n 'thalach': (71, 202), 'oldpeak': (0, 6.2),\n }\n target = {'target': 2}\n\n nrows = 100\n data = dict()\n for fea, nvars in cat_features.items():\n data[fea] = np.random.randint(0, nvars, nrows)\n for fea, (minv, maxv) in num_features.items():\n if fea == 'oldpeak':\n data[fea] = np.random.randint(minv*10, (maxv + 1)*10, nrows) / 10\n else: \n data[fea] = np.random.randint(minv, maxv + 1, nrows)\n for fea, nvars in target.items():\n data[fea] = np.random.randint(0, nvars, nrows)\n\n data = pd.DataFrame(data)\n return data", "title": "" }, { "docid": "58673ba94ddd2299b1c59ca504529c5b", "score": "0.59436864", "text": "def generate_dataset(self, aug_file, knn, domain = ['unseen'], num_features = [200]):\n\n if (len(domain)>1) and (len(num_features)==1):\n num_features = [num_features[0]]*len(domain)\n\n new_dataset = { 'train':{'X': np.array([]),\n 'Y': np.array([]),\n 'A': {'continuous': np.array([])}},\n 'info': {'num_features' : str(num_features),\n 'domain' : str(domain)}}\n\n for _domain, _num in zip(domain, num_features):\n domain_in = {'unseen': zip(knn.zsl.data, knn.zsl.ids),\n 'seen': zip(knn.openval.data, knn.openval.ids),\n 'openset': zip(knn.openset.data, knn.openset.ids)}[_domain]\n\n print(\":: Generating features [{}:{}]\".format(_domain, _num))\n new_features = self._generate_features(domain_in, _num)\n new_dataset['train'] = merge_dict(new_dataset['train'], new_features)\n\n DataH5py().save_dict_to_hdf5(new_dataset, aug_file)", "title": "" }, { "docid": "9a634e479fc2e3a353302f84f6ba68ca", "score": "0.59158736", "text": "def __init__(self, train_list, test_list, args,\n data_aug=False):\n # Define types of data augmentation\n random.seed(1234)\n self.args = args\n self.data_aug = data_aug\n self.data_aug_flip = data_aug\n self.args.data_aug_flip = data_aug\n self.data_aug_scales = args.data_aug_scales\n self.use_original_mask = args.use_original_mask\n self.vg_random_rotate_angle = args.vg_random_rotate_angle\n self.vg_random_crop_ratio = args.vg_random_crop_ratio\n self.vg_color_aug = args.vg_color_aug\n self.vg_keep_aspect_ratio = args.vg_keep_aspect_ratio\n self.vg_pad_ratio = args.vg_pad_ratio\n self.sg_center_perturb_ratio = args.sg_center_perturb_ratio\n self.sg_std_perturb_ratio = args.sg_std_perturb_ratio\n self.bbox_sup = args.bbox_sup\n self.multiclass = hasattr(args, 'data_version') and args.data_version == 2017 \\\n or hasattr(args, 'multiclass') and args.multiclass \n self.train_list = train_list\n self.test_list = test_list\n self.train_ptr = 0\n self.test_ptr = 0\n self.train_size = len(train_list)\n print '#training samples', self.train_size\n self.test_size = len(test_list)\n self.train_idx = np.arange(self.train_size)\n self.test_idx = np.arange(self.test_size)\n self.crf_infer_steps = 5\n self.args.dilate_structure = get_dilate_structure(5)\n np.random.shuffle(self.train_idx)\n self.size = args.im_size\n self.mean_value = args.mean_value #np.array((104, 117, 123))\n self.scale_value = args.scale_value # 0.00787 for mobilenet \n self.args.guide_size = (224, 224)\n if args.num_loader > 1:\n self.pool = mp.Pool(processes=args.num_loader)", "title": "" }, { "docid": "83b4c82516dc148cefea66da4ece4911", "score": "0.59110874", "text": "def preprocess():\n dataset = data.load(\"../train\", im_size)\n x, y = zip(*dataset)\n r = data.onehot_label(y)\n y = list(map(lambda k: r[k], y))\n x, m, s = data.normalize(x)\n (x_train, y_train), (x_test, y_test) = data.train_val_test_split((x, y))\n training_generator = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=80,\n width_shift_range=.3,\n height_shift_range=.3,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.5,\n shear_range=0.5,\n fill_mode=\"reflect\"\n )\n test_generator = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=0,\n width_shift_range=.0,\n height_shift_range=.0,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.5,\n shear_range=0.5,\n fill_mode=\"reflect\"\n )\n return training_generator, (x_train, y_train), test_generator, (x_test, y_test), m, s", "title": "" }, { "docid": "e790551e3679c2ca6b6a744ff44821f3", "score": "0.59087664", "text": "def reproduce(self) -> Dataset:\n raise NotImplementedError", "title": "" }, { "docid": "cb1792f99a40b5163afbfc94a23c6630", "score": "0.58919907", "text": "def dataset():\n\n pass", "title": "" }, { "docid": "8d41395dc5108ba66866860e5b40e599", "score": "0.58914936", "text": "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n\n # get the filenames\n lines = lines[2:]\n random.seed(0)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n\n if (i+1) < 2000:\n self.test_dataset.append(filename)\n else:\n self.train_dataset.append(filename)\n\n print('Finished preprocessing the CelebA dataset...')", "title": "" }, { "docid": "8d1691453634f9efab5c44362c1c31f3", "score": "0.5881771", "text": "def augment(img_data, config, augment = True):\r\n assert 'filepath' in img_data\r\n assert 'bboxes'in img_data\r\n assert 'width' in img_data\r\n assert 'height' in img_data\r\n \r\n #深复制,和python机制有关\r\n img_data_aug = copy.deepcopy(img_data)\r\n img = cv2.imread(img_data_aug['filepath'])\r\n \r\n if augment:\r\n rows, cols = img.shape[:2]\r\n #第二个判断条件是随机生成的,也即随机对数据进行各种操作 以增强数据集\r\n #第二个判断条件生成1的时候不进行任何操作\r\n if config.use_horizontal_flips and np.random.randint(0,2) == 0:\r\n img = cv2.flip(img,1)\r\n #图片水平翻转了 box也得跟着转\r\n for bbox in img_data_aug['bboxes']:\r\n x1 = bbox['x1']\r\n x2 = bbox['x2']\r\n bbox['x2'] = cols - x1\r\n bbox['x1'] = cols - x2\r\n \r\n if config.use_vertical_flips and np.random.randint(0,2) == 1:\r\n img = cv2.flip(img,0)\r\n #图片水平翻转了 box也得跟着转\r\n for bbox in img_data_aug['bboxes']:\r\n y1 = bbox['y1']\r\n y2 = bbox['y2']\r\n bbox['y2'] = rows - y1\r\n bbox['y1'] = rows - y2 \r\n \r\n #顺时针旋转九十° \r\n if config.rot_90:\r\n #在这4个数里面选随机选择一个\r\n angle = np.random.choice([0,90,270,180],1)[0]\r\n if angle == 270:\r\n #转置矩阵,但是是镜像转置,需要翻转才能是270°\r\n img = np.transpose(img, (1,0,2))\r\n img = cv2.flip(img,0)\r\n elif angle == 90:\r\n img = np.transpose(img, (1,0,2))\r\n img = cv2.flip(img,1) \r\n elif angle == 180:\r\n img = cv2.flip(img, -1)\r\n \r\n for bbox in img_data_aug['bboxes']:\r\n x1 = bbox['x1']\r\n x2 = bbox['x2']\r\n y1 = bbox['y1']\r\n y2 = bbox['y2']\r\n if angle == 270:\r\n bbox['x1'] = y1\r\n bbox['x2'] = y2\r\n bbox['y1'] = cols - x2\r\n bbox['y2'] = cols - x1 \r\n elif angle == 90:\r\n bbox['x1'] = rows - y2\r\n bbox['x2'] = rows - y1\r\n bbox['y1'] = x1\r\n bbox['y2'] = x2 \r\n elif angle == 180:\r\n bbox['x1'] = cols - x2\r\n bbox['x2'] = cols - x1\r\n bbox['y1'] = rows - y2\r\n bbox['y2'] = rows - y1\r\n \r\n #宽和高颠倒是cv2的设置,有毛病\r\n img_data_aug['width'] = img.shape[1]\r\n img_data_aug['height'] = img.shape[0]\r\n \r\n #可能更新了图片的形状 所以要更新\r\n return img_data_aug, img", "title": "" }, { "docid": "539b0ae1a8a107d2a10aab3bf6d62eca", "score": "0.58726066", "text": "def generate_samples(data_folder, data, augment=True):\n \n while True:\n # Generate random batch of indices\n indices = np.random.permutation(data.shape[0])\n batch_size = 128\n for batch in range(0, len(indices), batch_size):\n batch_indices = indices[batch:(batch + batch_size)]\n # Output arrays\n x = np.empty([0, 160, 320, 3], dtype=np.float32)\n y = np.empty([0], dtype=np.float32)\n \n # Read in and preprocess a batch of images\n for i in batch_indices:\n # Randomly select camera and set the angle with corresponding offset\n camera = np.random.randint(len(cameras)) if augment else 1\n image = mpimg.imread(data_folder+data[cameras[camera]].values[i].split('/')[-1])\n angle = data.steering.values[i] + cameras_steering_correction[camera]\n if augment:\n # Add random shadow as a vertical slice of image\n h, w = image.shape[0], image.shape[1]\n [x1, x2] = np.random.choice(w, 2, replace=False)\n k = h / (x2 - x1)\n b = - k * x1\n for i in range(h):\n c = int((i - b) / k)\n image[i, :c, :] = (image[i, :c, :] * .5).astype(np.int32)\n \n x = np.append(x, [image], axis=0)\n y = np.append(y, [angle])\n # Randomly flip half of images in the batch\n flip_indices = random.sample(range(x.shape[0]), int(x.shape[0] / 2))\n x[flip_indices] = x[flip_indices, :, ::-1, :]\n y[flip_indices] = -y[flip_indices]\n yield (x, y)", "title": "" }, { "docid": "cb244fa1bc64a1c26bbb8d9570778be4", "score": "0.5855388", "text": "def augment_images(self, output_loc, vid_file):\n\n num_files = len(os.listdir(output_loc))\n\n datagen = ImageDataGenerator(\n rotation_range=45, #Random rotation between 0 and 45\n width_shift_range=0.2, \n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest') \n\n #Multiclass. Read dirctly from the folder structure using flow_from_directory\n i = 0\n for batch in datagen.flow_from_directory(directory=output_loc[:-7], \n batch_size= num_files, \n target_size=(256, 256),\n color_mode=\"rgb\",\n save_to_dir= output_loc, \n save_prefix='aug_'+vid_file[:-4], \n save_format='jpg'):\n i += 1\n if i > 10:\n break", "title": "" }, { "docid": "a5cac5fe6363f2c90e0094c0a20daa86", "score": "0.5848866", "text": "def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),\n tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256):\n super(public_SegICH_AttentionDataset2D, self).__init__()\n self.data_df = data_df\n self.data_path = data_path\n self.window = window\n\n self.transform = tf.Compose(*augmentation_transform,\n tf.Resize(H=output_size, W=output_size),\n tf.ToTorchTensor())", "title": "" }, { "docid": "4bee6b774da69d8ebe7758577cf007c9", "score": "0.58356047", "text": "def _create_examples(self, file_name, set_type, augmented): \n print(\"Creating Examples ...................\")\n stats = {}\n augmented_stats = {}\n labels = self.get_labels()\n for label in labels:\n stats[label] = 0\n augmented_stats[label] = 0\n \n examples = [] \n with open(file_name) as file:\n source = json.load(file)\n for (i, diag) in enumerate(source):\n for (j, item) in enumerate(diag):\n guid = \"{}-{}-{}\".format(set_type, i, j)\n item_examples = self._create_examples_of_item(guid, diag, j, set_type, augmented);\n examples.extend(item_examples)\n \n if len(item_examples) > 0 and item_examples[0].label:\n stats[item_examples[0].label] += 1\n augmented_stats[item_examples[0].label] += len(item_examples)\n \n if not hasattr(self, 'stats'):\n self.stats = {}\n self.augmented_stats = {}\n \n self.stats[set_type] = stats\n self.augmented_stats[set_type] = augmented_stats\n return examples", "title": "" }, { "docid": "6ad1921488294cef7a418ce8e7952675", "score": "0.58322513", "text": "def prepare_data(self, batch_size, epoch=0, shuffle=True):\n raise NotImplementedError", "title": "" }, { "docid": "0ffdd8838483349079abf8a37506094f", "score": "0.58322024", "text": "def test_augmentations(sample_num: int=12, aug_level: int=20):\n with timeit_context(\"load ds\"):\n ds = DatasetAugs(fold=0, is_training=True, debug=True, img_size=224, augmentation_level=aug_level)\n # print and plot sample\n print(ds[sample_num])\n plt.figure()\n plt.imshow(ds[sample_num][\"img\"], cmap=plt.cm.gist_gray)\n for annot in ds[sample_num][\"annot\"]:\n p0 = annot[0:2]\n p1 = annot[2:4]\n plt.gca().add_patch(plt.Rectangle(p0, width=(p1 - p0)[0], height=(p1 - p0)[1], fill=False, edgecolor=\"r\", linewidth=2))\n\n # plot augmented sample, 10 examples\n for i in range(10):\n plt.figure()\n sample = ds[sample_num]\n plt.imshow(sample[\"img\"], cmap=plt.cm.gist_gray)\n for annot in sample[\"annot\"]:\n p0 = annot[0:2]\n p1 = annot[2:4]\n plt.gca().add_patch(plt.Rectangle(p0, width=(p1 - p0)[0], height=(p1 - p0)[1], fill=False, edgecolor=\"r\", linewidth=2))\n plt.show()", "title": "" }, { "docid": "3cce3274d67f98307ce42fb77abda2c6", "score": "0.58071095", "text": "def data_augment_batch(self, batch_images):\n batch_images = tf.cond(self.augment_rotate, lambda: self.rotate_batch(batch_images), lambda: batch_images)\n return batch_images", "title": "" }, { "docid": "c7853f7281b009a2f257cabf7fb088ce", "score": "0.5804942", "text": "def build_dataset():\n train_set=Corrupted_dataset(args,root=data_dir,download=True,transform=None,target_transform=None,train=True,meta=False,do_meta=True)\n train_meta_set=Corrupted_dataset(args,root=data_dir,download=True,transform=None,target_transform=None,train=True,meta=True,do_meta=True)\n test_set=Corrupted_dataset(args,root=data_dir,download=True,transform=None,target_transform=None,train=False)\n train_loader,noised_sample=train_set.get_data_loader()\n train_meta_loader=train_meta_set.get_data_loader()\n test_loader=test_set.get_data_loader()\n \n\n return train_loader, train_meta_loader, test_loader, noised_sample", "title": "" }, { "docid": "e4903e00d13c6d1a5d4967e817001609", "score": "0.58030856", "text": "def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == 'World Expo':\n train_transform = torchvision.transforms.Compose([data.RandomlySelectPathWithNoPerspectiveRescale(),\n data.RandomHorizontalFlip(),\n data.NegativeOneToOneNormalizeImage(),\n data.NumpyArraysToTorchTensors()])\n validation_transform = torchvision.transforms.Compose([data.RandomlySelectPathWithNoPerspectiveRescale(),\n data.NegativeOneToOneNormalizeImage(),\n data.NumpyArraysToTorchTensors()])\n dataset_path = '../World Expo/'\n with open(os.path.join(dataset_path, 'viable_with_validation_and_random_test.json')) as json_file:\n cameras_dict = json.load(json_file)\n self.train_dataset = CrowdDataset(dataset_path, camera_names=cameras_dict['train'],\n number_of_cameras=settings.number_of_cameras,\n number_of_images_per_camera=settings.number_of_images_per_camera,\n transform=train_transform, seed=settings.labeled_dataset_seed)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size, shuffle=True,\n pin_memory=True, num_workers=settings.number_of_data_workers)\n # self.unlabeled_dataset = CrowdDataset(dataset_path, camera_names=cameras_dict['validation'],\n # transform=train_transform, unlabeled=True,\n # seed=100)\n self.unlabeled_dataset = CrowdDataset(dataset_path, camera_names=cameras_dict['train'],\n number_of_cameras=settings.number_of_cameras,\n transform=train_transform, unlabeled=True,\n seed=settings.labeled_dataset_seed)\n self.unlabeled_dataset_loader = DataLoader(self.unlabeled_dataset, batch_size=settings.batch_size, shuffle=True,\n pin_memory=True, num_workers=settings.number_of_data_workers)\n self.validation_dataset = CrowdDataset(dataset_path, camera_names=cameras_dict['validation'],\n transform=validation_transform, seed=101)\n elif settings.crowd_dataset == 'ShanghaiTech':\n train_transform = torchvision.transforms.Compose([data.ExtractPatchForRandomPosition(),\n data.RandomHorizontalFlip(),\n data.NegativeOneToOneNormalizeImage(),\n data.NumpyArraysToTorchTensors()])\n validation_transform = torchvision.transforms.Compose([data.ExtractPatchForRandomPosition(),\n data.NegativeOneToOneNormalizeImage(),\n data.NumpyArraysToTorchTensors()])\n self.train_dataset = ShanghaiTechDataset(transform=train_transform, seed=settings.labeled_dataset_seed)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size, shuffle=True,\n pin_memory=True, num_workers=settings.number_of_data_workers)\n self.unlabeled_dataset = ShanghaiTechDataset(transform=train_transform, seed=settings.labeled_dataset_seed,\n unlabeled=True)\n self.unlabeled_dataset_loader = DataLoader(self.unlabeled_dataset, batch_size=settings.batch_size, shuffle=True,\n pin_memory=True, num_workers=settings.number_of_data_workers)\n self.validation_dataset = ShanghaiTechDataset(dataset='test', transform=validation_transform, seed=101)\n else:\n raise ValueError('{} is not an understood crowd dataset.'.format(settings.crowd_dataset))", "title": "" }, { "docid": "86335e1faa14596674dbba1cea78abca", "score": "0.57958204", "text": "def augment_batch(X_batch, shift_max, noise_max = 1, add_shift = True, add_noise = True):\n target_length = X_batch[0].shape[0]\n \n X_batch_out = []\n \n for X_elem in X_batch:\n \n if add_shift:\n shift = random.randint(0,shift_max)\n #print(shift)\n else:\n shift = 0\n deletion = []\n end = 0\n start = 1\n \n # cut randomly in the front and back of the columns\n for f in range(shift):\n if random.uniform(0.1, 1.1) > 0.55:\n deletion.append( X_elem.shape[0] - (end+1) )\n \n #X_elem = np.delete(X_elem,(X_elem.shape[0] - (end+1),0)\n\n end +=1\n else:\n deletion.append(start)\n #X_elem = np.delete(X_elem,(start),0)\n\n start +=1\n\n X_elem = np.delete(X_elem,deletion,0)\n # randomly add the missing columns to the end or beginning of the dataset\n \n missing = target_length - X_elem.shape[0]\n if missing > 0 :\n if random.uniform(0, 1) > 0.5:\n # add to the front\n X_elem = np.vstack((X_elem,np.random.rand(missing,13)))\n \n else:\n # add to the back\n X_elem = np.vstack((np.random.rand(missing,13), X_elem))\n \n if add_noise:\n noise = np.random.normal(0,noise_max,(99*13)).reshape((99, 13))\n #X_batch_out.append(np.add(noise,X_elem))\n \n else:\n missing = X_elem.shape[0] -target_length \n delete= []\n for element in range(missing):\n delete.append(X_elem.shape[0] - (element) )\n #print(\"element too long,removing one \")\n X_elem = np.delete(X_elem,delete,0)\n \n if add_noise:\n noise = np.random.normal(0,noise_max,(99*13)).reshape((99, 13))\n \n try:\n X_batch_out.append(np.add(noise,X_elem))\n except:\n X_elem = np.delete(X_elem,X_elem.shape[0]-1,0)\n X_batch_out.append(np.add(noise,X_elem))\n \n return X_batch_out\n #\n #X_out = np.add(X_elem,noise)", "title": "" }, { "docid": "86335e1faa14596674dbba1cea78abca", "score": "0.57958204", "text": "def augment_batch(X_batch, shift_max, noise_max = 1, add_shift = True, add_noise = True):\n target_length = X_batch[0].shape[0]\n \n X_batch_out = []\n \n for X_elem in X_batch:\n \n if add_shift:\n shift = random.randint(0,shift_max)\n #print(shift)\n else:\n shift = 0\n deletion = []\n end = 0\n start = 1\n \n # cut randomly in the front and back of the columns\n for f in range(shift):\n if random.uniform(0.1, 1.1) > 0.55:\n deletion.append( X_elem.shape[0] - (end+1) )\n \n #X_elem = np.delete(X_elem,(X_elem.shape[0] - (end+1),0)\n\n end +=1\n else:\n deletion.append(start)\n #X_elem = np.delete(X_elem,(start),0)\n\n start +=1\n\n X_elem = np.delete(X_elem,deletion,0)\n # randomly add the missing columns to the end or beginning of the dataset\n \n missing = target_length - X_elem.shape[0]\n if missing > 0 :\n if random.uniform(0, 1) > 0.5:\n # add to the front\n X_elem = np.vstack((X_elem,np.random.rand(missing,13)))\n \n else:\n # add to the back\n X_elem = np.vstack((np.random.rand(missing,13), X_elem))\n \n if add_noise:\n noise = np.random.normal(0,noise_max,(99*13)).reshape((99, 13))\n #X_batch_out.append(np.add(noise,X_elem))\n \n else:\n missing = X_elem.shape[0] -target_length \n delete= []\n for element in range(missing):\n delete.append(X_elem.shape[0] - (element) )\n #print(\"element too long,removing one \")\n X_elem = np.delete(X_elem,delete,0)\n \n if add_noise:\n noise = np.random.normal(0,noise_max,(99*13)).reshape((99, 13))\n \n try:\n X_batch_out.append(np.add(noise,X_elem))\n except:\n X_elem = np.delete(X_elem,X_elem.shape[0]-1,0)\n X_batch_out.append(np.add(noise,X_elem))\n \n return X_batch_out\n #\n #X_out = np.add(X_elem,noise)", "title": "" }, { "docid": "8c1a6f2d9f6aae8a5c6589d7bddf8fa6", "score": "0.57923645", "text": "def _generate_data(self):\n\n #pdb.set_trace()\n\n # Create random samples\n y = numpy.zeros((self.samples, 1))\n\n # Regression case\n if self.task == 'regression':\n samps = self.samples + 1\n\n deltaT = self.max_theta / float(self.samples)\n X = numpy.zeros((samps, 2))\n for i in range(1, X.shape[0]):\n X[i, 1] = X[i-1, 1] + deltaT\n X[i, 0] = self.turn_rate*X[i-1, 1] + self.distance\n\n y = X[1:, :] # First example is not a target\n X = X[:-1, :] # Last example is only a target\n\n # Classification case\n else:\n X = numpy.random.rand(self.samples, 2)\n X[:, 0] = self.bounds_radius*X[:, 0]\n X[:, 1] = self.max_theta*X[:, 1]\n\n def onSpiral(samp):\n r = (self.turn_rate*samp[1] + self.distance)\n return abs(samp[0] - r) < self.epsilon\n\n # Label positive points (assumed all negative initially)\n for i in range(X.shape[0]):\n if onSpiral(X[i, ...]):\n y[i, 0] = 1\n\n # Ensure we have the requested number of samples\n pos_samps = int(y.sum())\n needed_pos_samps = self.samples * self.positive_sample_rate\n needed_pos_samps = int(math.ceil(needed_pos_samps))\n\n def getRandIdxs(count, label):\n idxs = numpy.copy(numpy.where(y == label)[0])\n numpy.random.shuffle(idxs)\n return idxs\n\n # Case: too many examples\n # Probabilistically speaking this will not happen.\n if pos_samps > needed_pos_samps:\n pos_samps_diff = pos_samps - needed_pos_samps\n idxs = getRandIdxs(pos_samps_diff, 1)\n\n for i in range(pos_samps_diff):\n idx = idxs[i]\n y[idx, 0] = 0\n while onSpiral(X[idx, ...]):\n X[idx, 0] = self.bounds_radius*rng.Random.rand()\n\n # Case: too few examples\n elif pos_samps < needed_pos_samps:\n pos_samps_diff = needed_pos_samps - pos_samps\n idxs = getRandIdxs(pos_samps_diff, 0)\n\n for i in range(pos_samps_diff):\n idx = idxs[i]\n X[idx, 0] = self.turn_rate*X[idx, 1] + self.distance\n y[idx, 0] = 1\n\n # Convert to x,y space if appropriate\n if self.space == 'x,y':\n for i in range(X.shape[0]):\n x_temp = X[i, 0]*math.cos(X[i, 1])\n y_temp = X[i, 0]*math.sin(X[i, 1])\n X[i, 0] = x_temp\n X[i, 1] = y_temp\n\n return (X, y)", "title": "" }, { "docid": "42f481f135c3d0f446d7f247ba54b818", "score": "0.57920945", "text": "def _augment(self, data):\n return data", "title": "" }, { "docid": "ee895b0da9bb4bf799dad44e0718aec7", "score": "0.5791352", "text": "def generate_augmented_train_test(debug=False):\n num_rows = 100 if debug else None\n with timer(\"train & test\"):\n df = train_test(num_rows).reset_index()\n with timer(\"historical transactions\"):\n historical_transactions_df = historical_transactions(num_rows).reset_index()\n df = pd.merge(df, historical_transactions_df, on='card_id', how='outer')\n with timer(\"new merchants\"):\n new_merchants_transactions_df = new_merchant_transactions(num_rows).reset_index()\n df = pd.merge(df, new_merchants_transactions_df, on='card_id', how='outer')\n with timer(\"additional features\"):\n df = additional_features(df)\n with timer(\"split train & test\"):\n train_df = df[df['target'].notnull()]\n test_df = df[df['target'].isnull()]\n del test_df['target']\n del df\n gc.collect()\n with timer(\"Save train and test files\"):\n train_df.to_csv('elo/data/augmented_train.csv', index=False)\n test_df.to_csv('elo/data/augmented_test.csv', index=False)", "title": "" }, { "docid": "887bef62c9afff92072263b23de14fe9", "score": "0.57837564", "text": "def init_augmenter(img_mode=\"color\"):\n ia.seed(10)\n\n if img_mode == 'color':\n return iaa.Sequential([\n sometimes(iaa.Fliplr()),\n iaa.MultiplyBrightness((0.6, 1.4)),\n # TODO: try no ChangeColor or Brightness\n sometimes(iaa.ChangeColorTemperature((5000, 7000))),\n iaa.Crop(percent=(\n (0, 0.50),\n (0, 0.50),\n (0, 0.50),\n (0, 0.50)\n ))\n # sometimes(iaa.OneOf([\n # iaa.Cutout(nb_iterations=(1, 4), size=0.2,\n # squared=False, cval=(0, 255), fill_mode=\"constant\"),\n # iaa.Cutout(nb_iterations=(1, 4), size=0.2, squared=False, cval=(\n # 0, 255), fill_mode=\"gaussian\", fill_per_channel=True),\n # iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))\n # ]))\n ])\n else:\n return iaa.Sequential([\n sometimes(iaa.Fliplr()),\n iaa.Crop(percent=(\n (0, 0.40),\n (0, 0.40),\n (0, 0.40),\n (0, 0.40)\n ))\n ])", "title": "" }, { "docid": "1fc599734b5370972e831a8395c0ca26", "score": "0.5783601", "text": "def prepare_data(self) -> None:\n self.train_dataset = self.dataset\n self.val_dataset = self.dataset\n self.test_dataset = self.dataset", "title": "" }, { "docid": "c39bc838fe64492c50e98365a188c9f1", "score": "0.57790786", "text": "def train_model(model):\n # Add your code here\n\n #Preprocessing \n # Data augmentation - creation of more images to train on\n train_datagen = ImageDataGenerator(\n rescale = 1./255,\n shear_range=0.2,\n zoom_range=0.2,\n width_shift_range=0.2,\n\t\theight_shift_range=0.2,\n rotation_range=20,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale = 1./255)\n\n training_set = train_datagen.flow_from_directory( #how can I check if I am actually making many more instances of pictures.\n '/Users/harryrodger/Desktop/data',\n target_size=(64,64),\n batch_size=32,\n class_mode='categorical'\n )\n\n test_set = test_datagen.flow_from_directory( #how can I check if I am actually making many more instances of pictures.\n '/Users/harryrodger/Desktop/ProjectCOMP309/ProjectCode/data/test',\n target_size=(64,64),\n batch_size=32,\n class_mode='categorical'\n )\n\n print('Data augmentation complete')\n\n model.fit_generator( \n training_set,\n steps_per_epoch=8000, \n epochs = 50,\n validation_data=test_set,\n validation_steps = 15) \n\n return model", "title": "" }, { "docid": "2608b6cf6b300efa38a4a8543f4221ac", "score": "0.57776386", "text": "def make_dataloaders(height=128,width=64,batch_size=256,transform=True,intensity=True,weighted=False,seed=42,intensity_type=\"channel\",final=False):\n if final:\n train_set = dataset(train=True,transform=transform,intensity=intensity,height=height,width=width,seed=seed,intensity_type=intensity_type,data_path=\"../../../../../27/c/138037/eyefoss-project-blobs/\",final=final)\n else:\n train_set = dataset(train=True,transform=transform,intensity=intensity,height=height,width=width,seed=seed,intensity_type=intensity_type,final=final)\n test_set = dataset(train=False,transform=False,intensity=intensity,height=height,width=width,intensity_type=intensity_type,final=final)\n \n if weighted:\n weights = []\n\n train_paths = train_set.get_image_paths()\n oat_length = len(os.listdir('../data/train/Oat'))\n wheat_length = len(os.listdir('../data/train/Wheat'))\n rye_length = len(os.listdir('../data/train/Rye'))\n broken_length = len(os.listdir('../data/train/Broken'))\n barley_length = len(os.listdir('../data/train/Barley'))\n\n for file in train_paths:\n label = os.path.split(os.path.split(file)[0])[1]\n if label == 'Oat':\n weights.append(0.2/oat_length)\n elif label == \"Wheat\":\n weights.append(0.2/wheat_length)\n elif label == \"Rye\":\n weights.append(0.2/rye_length)\n elif label == \"Broken\":\n weights.append(0.2/broken_length)\n else:\n weights.append(0.2/barley_length)\n weights = torch.FloatTensor(weights)\n sampler = WeightedRandomSampler(weights=weights,num_samples=len(train_set),replacement=False)\n train_loader = DataLoader(train_set, batch_size=batch_size,sampler=sampler, worker_init_fn=np.random.seed(seed),num_workers=4, pin_memory=True)\n \n else:\n train_loader = DataLoader(train_set, batch_size=batch_size,shuffle=True, worker_init_fn=np.random.seed(seed),num_workers=4, pin_memory=True)\n \n if not final:\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, worker_init_fn=np.random.seed(seed),num_workers=4, pin_memory=True) \n return train_loader,test_loader\n else:\n return train_loader", "title": "" }, { "docid": "c77af5b494a196f832e13ab1159d055f", "score": "0.5774632", "text": "def _train(self, observations, metadata):\n pass", "title": "" }, { "docid": "2e59d3fd1b3da7e05ace0d0ade59aec2", "score": "0.5768929", "text": "def general_dataset_eval(dataset_type,\n detpath,\n annopath,\n imagesetfile,\n classname,\n sample_mode,\n ovthresh=0.5,\n use_07_metric=False):\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(dataset_type, annopath.format(imagename))\n if i % 100 == 0:\n print 'Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames))\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n #difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n\n DontCareR = [obj for obj in recs[imagename] if obj['name'] == 'dontcare']\n DontCarebbox = np.array([x['bbox'] for x in DontCareR])\n DontCaredet = [False] * len(DontCareR)\n #EXTEND FUNCTIONALITY HERE\n #UPDATE ANNOTATION MODES HERE\n #Indicate how data should be filtered for testing here\n #Remeber to associate it to a new dataset_type\n if dataset_type.lower() == 'kitti':\n ValidInds = np.array([x['sample_mode']<=sample_mode for x in R]).astype(np.bool)\n else:\n ValidInds = np.array([x['sample_mode']==sample_mode for x in R]).astype(np.bool)\n \n #npos = npos + sum(ValidInds) - sum(ValidInds & difficult)\n npos = npos + sum(ValidInds)\n class_recs[imagename] = {'bbox': bbox,\n 'DontCarebbox': DontCarebbox,\n 'ValidInds': ValidInds,\n 'det': det,\n 'DontCaredet': DontCaredet}\n\n # read dets\n # detfile = detpath.format(classname)\n with open(detpath, 'r') as f:\n lines = f.readlines()\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n \n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n # list of indices to ignore in evaluation\n delInds = []\n #EXTEND FUNCTIONALITY HERE\n #UPDATE ANNOTATION SAMPLE MODES HERE\n #Filter Bounding Boxes based on Size (Height)\n #Remeber to associate it to a new dataset_type\n if dataset_type.lower() == 'kitti':\n if sample_mode == 1:\n MinHeight = 40\n else:\n MinHeight = 25\n else:\n MinHeight = 25\n\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n\n if (bb[3] - bb[1]) < MinHeight:\n delInds.append(d)\n continue\n\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n DontCare_ovmax = -np.inf\n DontCareBBGT = R['DontCarebbox'].astype(float) \n \n if DontCareBBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(DontCareBBGT[:, 0], bb[0])\n iymin = np.maximum(DontCareBBGT[:, 1], bb[1])\n ixmax = np.minimum(DontCareBBGT[:, 2], bb[2])\n iymax = np.minimum(DontCareBBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (DontCareBBGT[:, 2] - DontCareBBGT[:, 0] + 1.) *\n (DontCareBBGT[:, 3] - DontCareBBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n DontCare_ovmax = np.max(overlaps)\n DontCare_jmax = np.argmax(overlaps)\n\n\n if ovmax >= ovthresh:\n if R['ValidInds'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n if R['det'][jmax] == 0:\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n if (DontCare_ovmax >= ovthresh) and (R['DontCaredet'][DontCare_jmax] == 0):\n R['DontCaredet'][DontCare_jmax] = 1\n else:\n fp[d] = 1.\n\n if (fp[d] == 0) and (tp[d] == 0):\n delInds.append(d)\n\n #delete inds from fp and tp\n np.delete(fp, delInds)\n np.delete(tp, delInds)\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = general_dataset_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap", "title": "" }, { "docid": "bc7fce5b711a69e5924a00bd8bc57dab", "score": "0.5762296", "text": "def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),\n tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256):\n super(brain_extract_Dataset2D, self).__init__()\n self.data_df = data_df\n self.data_path = data_path\n self.window = window\n\n self.transform = tf.Compose(*augmentation_transform,\n tf.Resize(H=output_size, W=output_size),\n tf.ToTorchTensor())", "title": "" }, { "docid": "341e08825500dc31561174f53b52943a", "score": "0.5760397", "text": "def learn_old(self, dataset, learning_rate, coding_costraint = 0.08):\n # The list of all data batches to currently process\n # input_data[recording][events, timestamp if 0 or xy coordinates if 1]\n # In the first layer the input data is directly dataset given by the user\n input_data=dataset\n \n for layer in range(self.layers):\n layer_activations = []\n new_data = []\n all_surfaces = []\n all_surfaces_plus_null=[]\n # Create the varational autoencoder for this layer\n #intermediate_dim = self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][0]\n intermediate_dim = 40\n# self.vaes.append(create_vae(self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer],\n# self.latent_variables[layer], intermediate_dim, learning_rate[layer], coding_costraint))\n self.vaes.append(create_sparse(self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer],\n self.latent_variables[layer], intermediate_dim, learning_rate[layer], coding_costraint))\n # The code is going to run on gpus, to improve performances rather than \n # a pure online algorithm I am going to minibatch \n batch_size = 500\n for recording in range(len(input_data)):\n n_batch = len(input_data[recording][0]) // batch_size\n \n \n # Cut the excess data in the first layer : \n if layer == 0 :\n input_data[recording][0]=input_data[recording][0][:n_batch*batch_size]\n input_data[recording][1]=input_data[recording][1][:n_batch*batch_size]\n input_data[recording][2]=input_data[recording][2][:n_batch*batch_size]\n event = [[input_data[recording][0][event_ind],\n input_data[recording][1][event_ind],\n input_data[recording][2][event_ind]]for event_ind in range(n_batch*batch_size)] \n else :\n event = [[input_data[recording][0][event_ind],\n input_data[recording][1][event_ind],\n input_data[recording][2][event_ind],\n input_data[recording][3][event_ind]]for event_ind in range(n_batch*batch_size)] \n # The multiple event polarities are all synchonized in the layers after the first.\n # As a single time surface is build on all polarities, there is no need to build a time \n # surface per each event with a different polarity and equal time stamp, thus only \n # a fraction of the events are extracted here\n if layer != 0 :\n recording_surfaces = Parallel(n_jobs=self.threads)(delayed(Time_Surface_event)(self.surfaces_dimensions[layer][0],\n self.surfaces_dimensions[layer][1], event[event_ind].copy(),\n self.taus[layer], input_data[recording].copy(), self.polarities[layer], minv=0.1) for event_ind in range(0,n_batch*batch_size,self.polarities[layer]))\n else:\n recording_surfaces = Parallel(n_jobs=self.threads)(delayed(Time_Surface_event)(self.surfaces_dimensions[layer][0],\n self.surfaces_dimensions[layer][1], event[event_ind].copy(),\n self.taus[layer], input_data[recording].copy(), self.polarities[layer], minv=0.1) for event_ind in range(n_batch*batch_size))\n null_surfaces = [np.zeros(self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer]) for i in range(10000) ] \n# all_surfaces = all_surfaces + [(i==0)*recording_surfaces[ind]+(i==1)*null_surfaces[ind] for i in range(2) for ind in range(len(recording_surfaces))]\n all_surfaces_plus_null = all_surfaces + recording_surfaces + null_surfaces\n all_surfaces = all_surfaces + recording_surfaces\n all_surfaces=np.array(all_surfaces)\n all_surfaces_plus_null= np.array(all_surfaces_plus_null)\n # pre training \n print('Pre training')\n \n tsurf_size = self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer]\n self.vaes[layer][1].fit(np.zeros([100000,tsurf_size]),\n np.zeros([100000,self.latent_variables[layer]]), shuffle=False,\n epochs=5, batch_size=batch_size)\n \n self.vaes[layer][2].fit(np.zeros([100000,self.latent_variables[layer]]),\n np.zeros([100000,tsurf_size]), shuffle=False,\n epochs=5, batch_size=batch_size)\n \n self.vaes[layer][0].fit(all_surfaces, shuffle=False,\n epochs=20, batch_size=batch_size,\n validation_data=(all_surfaces, None))\n \n self.vaes[layer][1].fit(np.zeros([100000,tsurf_size]),\n np.zeros([100000,self.latent_variables[layer]]), shuffle=False,\n epochs=10, batch_size=batch_size)\n \n self.vaes[layer][2].fit(np.zeros([100000,self.latent_variables[layer]]),\n np.zeros([100000,tsurf_size]), shuffle=False,\n epochs=10, batch_size=batch_size)\n \n self.vaes[layer][0].fit(all_surfaces, shuffle=False,\n epochs=1, batch_size=batch_size,\n validation_data=(all_surfaces, None))\n# all_surfaces=all_surfaces[::2]\n current_pos = 0\n for recording in range(len(input_data)): \n # Get network activations at steady state (after learning)\n if layer != 0 :\n recording_results = self.vaes[layer][1].predict(np.array(all_surfaces[current_pos:current_pos+len(input_data[recording][0])//self.polarities[layer]]), batch_size=batch_size)\n current_pos += len(input_data[recording][0])//self.polarities[layer]\n else:\n recording_results= self.vaes[layer][1].predict(np.array(all_surfaces[current_pos:current_pos+len(input_data[recording][0])]), batch_size=batch_size)\n current_pos += len(input_data[recording][0])\n layer_activations.append(recording_results)\n \n # Generate new events only if I am not at the last layer\n if layer != (self.layers-1):\n if layer != 0:\n new_data.append(events_from_activations(recording_results, [input_data[recording][0][range(0,len(input_data[recording][0]),self.polarities[layer])],\n input_data[recording][1][range(0,len(input_data[recording][0]),self.polarities[layer])]]))\n else:\n new_data.append(events_from_activations(recording_results, input_data[recording]))\n \n input_data=new_data\n self.last_layer_activations = layer_activations", "title": "" }, { "docid": "33d16ff3587dc31e28ebf5ec6e2b8841", "score": "0.57599354", "text": "def create_data_generators(cfg, target_shape, data_augmentation=\"none\"):\n\n # handle color mode\n if target_shape[2] == 1:\n color_mode = 'grayscale'\n else:\n color_mode = 'rgb'\n\n # raw data generator required for calculating image statistics\n # on 2000 images\n img_path = os.path.join(cfg_path['images'], 'train')\n logging.info(\"Initializing raw generator in %s\" % img_path)\n datagen_raw = ImageDataGenerator(rescale=1./255)\n raw_generator = datagen_raw.flow_from_directory(\n img_path,\n target_size=target_shape[0:2],\n color_mode=color_mode,\n batch_size=2000,\n class_mode='sparse',\n seed=cfg_model['random_seed'])\n\n # no data augmentation\n if data_augmentation == \"none\":\n # data augmentation / preprocessing for train data\n datagen_train = ImageDataGenerator(\n rescale=1./255)\n # augmentation / preprocessing for test / validation data\n datagen_test = ImageDataGenerator(rescale=1./255)\n\n # Snapshot Serengeti mode\n elif data_augmentation == \"ss\":\n # data augmentation / preprocessing for train data\n datagen_train = ImageDataGenerator(\n rescale=1./255,\n # samplewise_center=True,\n # samplewise_std_normalization=True,\n featurewise_center=True,\n featurewise_std_normalization=True,\n horizontal_flip=True,\n zoom_range=[0.9, 1])\n # augmentation / preprocessing for test / validation data\n datagen_test = ImageDataGenerator(\n rescale=1./255,\n featurewise_center=True,\n featurewise_std_normalization=True)\n\n # Not implemented data augmentation exception\n else:\n NotImplementedError(\"data_augmentation mode %s not implemented\"\n % data_augmentation)\n\n # create generators which serve images from directories for\n # test / train and validation data\n if cfg_model['image_iterator'] == 'double_iterator':\n batch_size = cfg['batch_size'] * 50\n else:\n batch_size = cfg['batch_size']\n\n img_path = os.path.join(cfg_path['images'], 'train')\n logging.info(\"Initializing train generator in %s\" % img_path)\n train_generator = datagen_train.flow_from_directory(\n img_path,\n target_size=target_shape[0:2],\n color_mode=color_mode,\n batch_size=batch_size,\n class_mode='sparse',\n seed=cfg_model['random_seed'])\n\n img_path = os.path.join(cfg_path['images'], 'test')\n logging.info(\"Initializing test generator in %s\" % img_path)\n test_generator = datagen_test.flow_from_directory(\n img_path,\n target_size=target_shape[0:2],\n color_mode=color_mode,\n batch_size=batch_size,\n class_mode='sparse',\n seed=cfg_model['random_seed'])\n\n img_path = os.path.join(cfg_path['images'], 'val')\n logging.info(\"Initializing val generator in %s\" % img_path)\n val_generator = datagen_test.flow_from_directory(\n img_path,\n target_size=target_shape[0:2],\n color_mode=color_mode,\n batch_size=batch_size,\n class_mode='sparse',\n seed=cfg_model['random_seed'])\n\n # fit data generators if required\n if any([datagen_train.featurewise_center,\n datagen_train.featurewise_std_normalization,\n datagen_train.zca_whitening]):\n\n # get random batch of raw training data\n x, y = raw_generator.next()\n\n # fit statistics from same batch of training data on the\n # data generators\n for gen in (datagen_train, datagen_test):\n gen.fit(x, seed=cfg_model['random_seed'])\n if any([gen.featurewise_center,\n gen.featurewise_std_normalization]):\n logging.info(\"Featurewise center, means: %s\" % gen.mean)\n logging.info(\"Featurewise center, std: %s\" % gen.std)\n\n if cfg_model['image_iterator'] == 'double_iterator':\n res = ()\n for gen in (train_generator, test_generator, val_generator):\n big = DoubleIterator(gen, batch_size=cfg['batch_size'],\n seed=cfg_model['random_seed'])\n res = res + (big, )\n logging.info(\"Initialized DoubleIterator\")\n return res\n\n # print all class mappings\n for gen, label in zip((train_generator, test_generator, val_generator),\n [\"train\", \"test\", \"val\"]):\n logging.info(\"Class mapping for set: %s\" % label)\n\n # empty class list\n classes_all = list()\n # mapping of index to class\n class_mapper = {v: k for k, v in gen.class_indices.items()}\n for i in range(0, len(class_mapper.keys())):\n classes_all.append(class_mapper[i])\n\n for k, v in gen.class_indices.items():\n logging.info(\"Class %s maps to index %s\" % (k, v))\n\n logging.info(\"Full ordered mapping: %s\" % classes_all)\n\n return train_generator, test_generator, val_generator", "title": "" }, { "docid": "1d0acdc00996de75c7226f44afe0bff7", "score": "0.57573146", "text": "def learn(self, dataset, learning_rate, coding_costraint = 0.08):\n # The list of all data batches to currently process\n # input_data[recording][events, timestamp if 0 or xy coordinates if 1]\n # In the first layer the input data is directly dataset given by the user\n input_data=dataset\n \n for layer in range(self.layers):\n layer_activations = []\n new_data = []\n all_surfaces = []\n all_surfaces_plus_null=[]\n # Create the varational autoencoder for this layer\n #intermediate_dim = self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][0]\n intermediate_dim = 40\n self.vaes.append(create_vae(self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer],\n self.latent_variables[layer], intermediate_dim, learning_rate[layer], coding_costraint))\n\n # The code is going to run on gpus, to improve performances rather than \n # a pure online algorithm I am going to minibatch \n batch_size = 125\n for recording in range(len(input_data)):\n n_batch = len(input_data[recording][0]) // batch_size\n \n \n # Cut the excess data in the first layer : \n if layer == 0 :\n input_data[recording][0]=input_data[recording][0][:n_batch*batch_size]\n input_data[recording][1]=input_data[recording][1][:n_batch*batch_size]\n input_data[recording][2]=input_data[recording][2][:n_batch*batch_size]\n event = [[input_data[recording][0][event_ind],\n input_data[recording][1][event_ind],\n input_data[recording][2][event_ind]]for event_ind in range(n_batch*batch_size)] \n else :\n event = [[input_data[recording][0][event_ind],\n input_data[recording][1][event_ind],\n input_data[recording][2][event_ind],\n input_data[recording][3][event_ind]]for event_ind in range(n_batch*batch_size)] \n # The multiple event polarities are all synchonized in the layers after the first.\n # As a single time surface is build on all polarities, there is no need to build a time \n # surface per each event with a different polarity and equal time stamp, thus only \n # a fraction of the events are extracted here\n if layer != 0 :\n recording_surfaces = Parallel(n_jobs=self.threads)(delayed(Time_Surface_event)(self.surfaces_dimensions[layer][0],\n self.surfaces_dimensions[layer][1], event[event_ind].copy(),\n self.taus[layer], input_data[recording].copy(), self.polarities[layer], minv=0.1) for event_ind in range(0,n_batch*batch_size,self.polarities[layer]))\n else:\n recording_surfaces = Parallel(n_jobs=self.threads)(delayed(Time_Surface_event)(self.surfaces_dimensions[layer][0],\n self.surfaces_dimensions[layer][1], event[event_ind].copy(),\n self.taus[layer], input_data[recording].copy(), self.polarities[layer], minv=0.1) for event_ind in range(n_batch*batch_size))\n# null_surfaces = [np.zeros(self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer]) for i in range(10000) ] \n# all_surfaces = all_surfaces + [(i==0)*recording_surfaces[ind]+(i==1)*null_surfaces[ind] for i in range(2) for ind in range(len(recording_surfaces))]\n# all_surfaces_plus_null = all_surfaces + recording_surfaces + null_surfaces\n all_surfaces = all_surfaces + recording_surfaces\n all_surfaces=np.array(all_surfaces)\n# all_surfaces_plus_null= np.array(all_surfaces_plus_null)\n # pre training \n print('Pre training')\n# tsurf_size = self.surfaces_dimensions[layer][0]*self.surfaces_dimensions[layer][1]*self.polarities[layer]\n# self.vaes[layer][1].fit(np.zeros([100000,tsurf_size]),\n# [np.zeros([100000,self.latent_variables[layer]]), \n# 0*np.ones([100000,self.latent_variables[layer]**2]),\n# 0*np.ones([100000,self.latent_variables[layer]])], shuffle=False,\n# epochs=5, batch_size=batch_size)\n# \n# self.vaes[layer][2].fit(np.zeros([100000,self.latent_variables[layer]]),\n# np.zeros([100000,tsurf_size]), shuffle=False,\n# epochs=5, batch_size=batch_size)\n# \n# \n# self.vaes[layer][0].fit(all_surfaces, shuffle=False,\n# epochs=10, batch_size=batch_size,\n# validation_data=(all_surfaces, None))\n# \n# self.vaes[layer][1].fit(np.zeros([100000,tsurf_size]),\n# [np.zeros([100000,self.latent_variables[layer]]), \n# 0*np.ones([100000,self.latent_variables[layer]**2]),\n# 0*np.ones([100000,self.latent_variables[layer]])], shuffle=False,\n# epochs=10, batch_size=batch_size)\n# \n# self.vaes[layer][2].fit(np.zeros([100000,self.latent_variables[layer]]),\n# np.zeros([100000,tsurf_size]), shuffle=False,\n# epochs=10, batch_size=batch_size)\n \n self.vaes[layer][0].fit(all_surfaces, shuffle=False,\n epochs=10, batch_size=batch_size,\n validation_data=(all_surfaces, None))\n# all_surfaces=all_surfaces[::2]\n current_pos = 0\n for recording in range(len(input_data)): \n # Get network activations at steady state (after learning)\n if layer != 0 :\n recording_results, _, _ = self.vaes[layer][1].predict(np.array(all_surfaces[current_pos:current_pos+len(input_data[recording][0])//self.polarities[layer]]), batch_size=batch_size)\n current_pos += len(input_data[recording][0])//self.polarities[layer]\n else:\n recording_results, _, _ = self.vaes[layer][1].predict(np.array(all_surfaces[current_pos:current_pos+len(input_data[recording][0])]), batch_size=batch_size)\n current_pos += len(input_data[recording][0])\n layer_activations.append(recording_results)\n \n # Generate new events only if I am not at the last layer\n if layer != (self.layers-1):\n if layer != 0:\n new_data.append(events_from_activations(recording_results, [input_data[recording][0][range(0,len(input_data[recording][0]),self.polarities[layer])],\n input_data[recording][1][range(0,len(input_data[recording][0]),self.polarities[layer])]]))\n else:\n new_data.append(events_from_activations(recording_results, input_data[recording]))\n \n input_data=new_data\n self.last_layer_activations = layer_activations", "title": "" }, { "docid": "c8ab4fdaf53fbfcf52709f5a3362d73b", "score": "0.57556194", "text": "def main():\n augment_train_ds = transforms.Compose([\n transforms.RandomCrop(64, padding=2),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.2, 0.2, 0.2)),\n ])\n \"\"\"Normalizing Test Dataset\"\"\"\n augment_test_ds = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.2, 0.2, 0.2)),\n ])\n\n \"\"\"Set seed.\"\"\"\n np.random.seed(0)\n random.seed(0)\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n\n train_dir = '/u/training/tra287/scratch/tiny-imagenet-200/train'\n train_ds = datasets.ImageFolder(train_dir, transform=augment_train_ds)\n # print(train_ds.class_to_idx)\n train_ds_loader = data.DataLoader(train_ds, batch_size=batch_size_train, shuffle=True, num_workers=8)\n\n val_dir = '/u/training/tra287/scratch/tiny-imagenet-200/val/'\n\n # print(\"Now working on Val Dir\")\n if 'val_' in os.listdir(val_dir+'images/')[0]:\n # print(\"Calling create_val_dir() with val_dir: \", val_dir)\n create_val_folder(val_dir)\n val_dir = val_dir + 'images/'\n # print(\"changed val_dir to : \", val_dir)\n else:\n # print(\"Didnt call create_val_dir\")\n val_dir = val_dir + 'images/'\n #train_ds = torchvision.datasets.ImageNet(train_dir, split='train', download=False, transform=augment_train_ds)\n # train_ds_loader = data.DataLoader(train_ds, batch_size=batch_size_train, shuffle=True, num_workers=8)\n # test_ds = torchvision.datasets.ImageNet(val_dir, split='val', download=False, transform=augment_test_ds)\n # test_ds_loader = data.DataLoader(test_ds, batch_size=batch_size_test, shuffle=False, num_workers=8)\n\n test_ds = datasets.ImageFolder(val_dir, transform=augment_test_ds)\n #print(test_ds.class_to_idx)\n test_ds_loader = torch.utils.data.DataLoader(test_ds, batch_size=batch_size_test, shuffle=False, num_workers=8)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n print(\"Initializing Model\")\n basic_block = BasicBlock\n res_net = ResNet(basic_block=basic_block, num_basic_blocks_list=[2, 4, 4, 2], num_classes=200,\n linear_layer_num_input=2304, max_pool_stride=2)\n res_net = res_net.to(device)\n start_epoch = 0\n\n if load_chkpt:\n print(\"Saved Model is being loaded\")\n chkpt = torch.load('./Checkpoint/model_state.pt')\n res_net.load_state_dict(chkpt['res_net_model'])\n start_epoch = chkpt['epoch']\n\n \"\"\"If multiple GPUs are available then use asynchronous training \"\"\"\n if device == 'cuda':\n res_net = torch.nn.DataParallel(res_net)\n cudnn.benchmark = True\n\n \"\"\"___________ Training ___________\"\"\"\n\n print(\"Starting Training\")\n\n \"\"\"Criterion Function: Softmax + Log-Likelihood\"\"\"\n loss_fn = nn.CrossEntropyLoss()\n \"\"\"Adam Optimizer (as it takes advantage of both RMSDrop and Momentum\"\"\"\n optimizer = optim.Adam(res_net.parameters(), lr=learning_rate)\n\n test_acc_list = []\n epochs_list = [x for x in range(epochs)]\n\n for epoch in range(start_epoch, epochs):\n\n cur_loss = 0.0\n total_correct = 0\n total_samples = 0\n\n \"\"\" Overflow error in the optimizer if the step size is not reset.\"\"\"\n if epoch > 8:\n for group in optimizer.param_groups:\n for p in group['params']:\n state = optimizer.state[p]\n if state['step'] >= 1024:\n state['step'] = 1000\n\n for i, (inputs, labels) in enumerate(train_ds_loader):\n\n \"\"\"Transfer inputs and labels to CUDA if available\"\"\"\n inputs = Variable(inputs).to(device)\n labels = Variable(labels).to(device)\n\n \"\"\"Loss function requires the inputs to be wrapped in variables\"\"\"\n # inputs = Variable(inputs)\n\n \"\"\"Torch tends to take cumulative gradients which is not required so setting it to zero after each batch\"\"\"\n optimizer.zero_grad()\n\n outputs = res_net(inputs)\n loss = loss_fn(outputs, labels)\n loss.backward()\n optimizer.step()\n\n cur_loss += loss.item()\n avg_loss = cur_loss / (i + 1)\n\n _, predicted_label = torch.max(outputs, 1)\n # print(predicted_label.shape, labels.shape)\n total_samples += labels.shape[0]\n # arr = (predicted_label == labels).numpy()\n # print(np.sum(arr))\n \"\"\"can not use numpy as the tensors are in CUDA\"\"\"\n total_correct += predicted_label.eq(labels.long()).float().sum().item()\n accuracy = total_correct / total_samples\n\n if i % 100 == 0:\n print('Training [epoch: %d, batch: %d] loss: %.3f, accuracy: %.5f' %\n (epoch + 1, i + 1, avg_loss, accuracy))\n\n test_acc_list.append(test(device, loss_fn, res_net, test_ds_loader))\n\n \"\"\"Saving model after every 5 epochs\"\"\"\n if (epoch + 1) % 5 == 0:\n print('==> Saving model ...')\n state = {\n 'res_net_model': res_net.state_dict(),\n 'epoch': epoch,\n }\n if not os.path.isdir('./Checkpoint'):\n os.mkdir('Checkpoint')\n torch.save(state, './Checkpoint/model_state.pt')\n\n print(\"Training Completed!\")\n\n \"\"\"___________ Testing ____________\"\"\"\n print(\"Testing Started\")\n \"\"\"Puts model in testing state\"\"\"\n res_net.eval()\n\n accuracy = test(device, loss_fn, res_net, test_ds_loader)\n\n print(\"Testing Completed with accuracy:\" + str(accuracy))\n\n with open('graph_resnet_tinyimagenet.csv', 'w') as result_file:\n wr = csv.writer(result_file, dialect='excel')\n wr.writerow(test_acc_list)\n\n print(\"Saved Test Accuracy list for graph\")", "title": "" }, { "docid": "764842370cc8c0075c3862f4e3c7a7d0", "score": "0.5754665", "text": "def augment_batch(batch):\n st = lambda aug: iaa.Sometimes(0.5, aug)\n\n seq = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width\n st(iaa.GaussianBlur((0, 0.6))), # blur images with a sigma between 0 and 3.0\n st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.2), per_channel=0.5)), # add gaussian noise to images\n st(iaa.Dropout((0.0, 0.05), per_channel=0)), # randomly remove up to 10% of the pixels\n st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)\n st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)\n st(iaa.ContrastNormalization((0.6, 2.0), per_channel=0)), # improve or worsen the contrast\n st(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_px={\"x\": (-16, 16), \"y\": (-16, 16)}, # translate by -16 to +16 pixels (per axis)\n rotate=(-10, 10), # rotate by -45 to +45 degrees\n shear=(-10, 10), # shear by -16 to +16 degrees\n # order=ia.ALL, # use any of scikit-image's interpolation methods\n # cval=(0, 1.0) # if mode is constant, use a cval between 0 and 1.0\n # mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n st(iaa.ElasticTransformation(alpha=(0.1, 0.6), sigma=0.2)) # apply elastic transformations with random strengths\n ],\n random_order=True # do all of the above in random order\n )\n\n return seq.augment_images(batch)", "title": "" }, { "docid": "70f8381a6f26092b0ca8424aceb3045a", "score": "0.5750546", "text": "def pre_processing():\n\n # Download data (to 'DR_Data' folder)\n downloaded = os.path.exists('DR_Data')\n\n if not downloaded:\n\n os.mkdir('DR_Data')\n print('Downloading dataset...')\n url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'\n urllib.request.urlretrieve(url, 'DR_Data/train-images-idx3-ubyte.gz')\n url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz' \n urllib.request.urlretrieve(url, 'DR_Data/train-labels-idx1-ubyte.gz')\n url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz' \n urllib.request.urlretrieve(url, 'DR_Data/t10k-images-idx3-ubyte.gz')\n url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz' \n urllib.request.urlretrieve(url, 'DR_Data/t10k-labels-idx1-ubyte.gz')\n\n # Check download\n if (os.path.isfile('DR_Data/train-images-idx3-ubyte.gz') \n and os.path.isfile('DR_Data/train-labels-idx1-ubyte.gz') \n and os.path.isfile('DR_Data/t10k-images-idx3-ubyte.gz') \n and os.path.isfile('DR_Data/t10k-labels-idx1-ubyte.gz')):\n print('Data downloaded successfully')\n else:\n print('Download failed')\n else:\n print('Data has been downloaded')\n \n\n # Load data and convert data to numpy arrays\n os.chdir('DR_Data')\n train_images = idx2numpy.convert_from_file(\n gzip.open('train-images-idx3-ubyte.gz','r'))\n train_labels = idx2numpy.convert_from_file(\n gzip.open('train-labels-idx1-ubyte.gz','r'))\n test_images = idx2numpy.convert_from_file(\n gzip.open('t10k-images-idx3-ubyte.gz','r'))\n test_labels = idx2numpy.convert_from_file(\n gzip.open('t10k-labels-idx1-ubyte.gz','r'))\n os.chdir('..')\n\n # Re-scale input values from intervals [0,255] to [0.01,1] \n # (necessary for optimal performance of NN)\n train_images = train_images * (0.99/255) + 0.01\n test_images = test_images * (0.99/255) + 0.01\n\n # Convert label data to one-hot representation with either 0.01 or 0.99 \n # (also necessary for optimal performance of NN \n # and to compute confusion matrix)\n train_labels = np.asfarray(train_labels)\n test_labels = np.asfarray(test_labels)\n train_labels = np.array(train_labels, ndmin=2).T\n test_labels = np.array(test_labels, ndmin=2).T\n train_labels = (np.arange(10) == train_labels).astype(np.float)\n test_labels = (np.arange(10) == test_labels).astype(np.float)\n train_labels[train_labels == 1] = 0.99\n test_labels[test_labels == 1] = 0.99\n train_labels[train_labels == 0] = 0.01\n test_labels[test_labels == 0] = 0.01\n\n # Function check\n if (train_images.shape == (60000, 28, 28) \n and train_labels.shape == (60000, 10)):\n print('Data preprocessed successfully')\n else:\n print('Preprocessing failed')\n \n return train_images, train_labels, test_images, test_labels", "title": "" }, { "docid": "ad35e4d95cce0d0797430690acd82792", "score": "0.57415843", "text": "def batch_generator(data, labels, augment_data=True, batch_size=32):\n\n batch_action = []\n batch_data = []\n batch_images = []\n sample_count = 0\n batch_data_1 = []\n batch_action_1 = []\n\n while True:\n # Shuffle indices to minimize overfitting.\n for i in np.random.permutation(len(data)):\n\n # Image (1) -> Center image and steering angle.\n center_path = data[i]\n #print(center_path)\n #angle = float(data[i][1])\n #throttle = float(data[i][2])\n action = labels[i]\n\n center_image = utils.load_image(center_path)\n #batch_images = np.expand_dims(center_image,axis=0)\n batch_data.append(center_image)\n #batch_data.append(angle)\n #batch_data.append(throttle)\n #batch_data_1.append(batch_data)\n batch_action.append(action)\n #batch_action_1.append(batch_action)\n\n #print(batch_data_1[0][1])\n\n sample_count += 1\n\n if(augment_data):\n if sample_count % 2 == 0:\n center_image, angle = utils.jitter_image(center_path,angle)\n #batch_images = np.expand_dims(center_image,axis=0)\n batch_data.append(center_image)\n #batch_data.append(angle)\n #batch_data.append(throttle)\n #batch_data_1.append(batch_data)\n batch_action.append(action)\n else:\n center_image= utils.tint_image(center_path)\n #batch_images = np.expand_dims(center_image,axis=0)\n batch_data.append(center_image)\n #batch_data.append(angle)\n #batch_data.append(throttle)\n #batch_data_1.append(batch_data)\n batch_action.append(action)\n\n sample_count += 1\n\n if ((sample_count % batch_size == 0) or (sample_count % len(data) == 0)):\n yield np.array(batch_data), np.array(batch_action)\n # Reset batch/\n batch_data = []\n batch_action = []", "title": "" }, { "docid": "49f75f047dcf67c85b0836473f497ec3", "score": "0.57389784", "text": "def sample(self, train_data):", "title": "" }, { "docid": "4e60ce9f98f46df8c68d84c041a6ddc1", "score": "0.5738414", "text": "def build_data(configuration):\n\n train_x = pd.read_csv(configuration.train_x)\n train_y = pd.read_csv(configuration.train_y)\n train_x[\"tag\"] = train_y[\"tag\"]\n df = train_x.drop('id', 1)\n \n df.to_csv(configuration.train_filename, header=None, index=None, sep=' ', mode='a')\n\n dev_x = pd.read_csv(configuration.dev_x)\n dev_y = pd.read_csv(configuration.dev_y)\n dev_x[\"tag\"] = dev_y[\"tag\"]\n df_dev = dev_x.drop('id', 1)\n df_dev.to_csv(configuration.dev_filename, header=None, index=None, sep=' ', mode='a')\n\n test_x = pd.read_csv(configuration.test_x)\n test = test_util(test_x)\n\n processing_word = get_processing_word(lowercase=configuration.lowercase)\n\n # Generators\n dev = MY_Dataset(configuration.dev_filename, processing_word)\n test = MY_Dataset(configuration.test_filename, processing_word)\n train = MY_Dataset(configuration.train_filename, processing_word)\n\n print \"Generators building done\"\n # Build Word and Tag vocab\n vocab_words, vocab_tags = get_vocabs([train, dev])\n vocab_glove = get_embed_vocab(configuration.glove_filename)\n\n vocab = vocab_words & vocab_glove\n vocab.add(UNK)\n vocab.add(NUM)\n\n\n # Save vocab\n write_vocab(vocab, configuration.words_filename)\n write_vocab(vocab_tags, configuration.tags_filename)\n\n # Embed vectors\n vocab = load_vocab(configuration.words_filename)\n export_embed_vectors(vocab, configuration.glove_filename, configuration.trimmed_filename, configuration.dim)", "title": "" }, { "docid": "13ac53ff48da1bb7ec2ed1361735ecbc", "score": "0.5730238", "text": "def _generate_train_batch(self):", "title": "" }, { "docid": "b798c3cf5f423d65e14ae0ab958c497f", "score": "0.57238996", "text": "def preprocess(img, img_size ,path, data_aug=False):\n \n \n # there are damaged files in IAM dataset - just use black image instead\n if img is None:\n print(\"img None ! \",path)\n img = np.zeros([img_size[1], img_size[0]])\n else:\n #随机上下左右add \n \n try:\n img = get_binary_graph(img)\n img = remove_and_add_side(img)\n #print(\"T \",path)\n except:\n print(\"F \",path)\n #print(img.shape)\n # increase dataset size by applying random stretches to the images\n if data_aug:\n stretch = (random.random() - 0.5) # -0.5 .. +0.5\n wStretched = max(int(img.shape[1] * (1 + stretch)),\n 1) # random width, but at least 1\n img = cv2.resize(\n img, (wStretched,\n img.shape[0])) # stretch horizontally by factor 0.5 .. 1.5\n #cv2.imwrite(\"../data/see/train/\"+str(time.time())+\".png\",img)#path.split(\"/\")[-1]\n # create target image and copy sample image into it\n (wt, ht) = img_size\n (h, w) = img.shape\n fx = w / wt\n fy = h / ht\n f = max(fx, fy)\n new_size = (max(min(wt, int(w / f)), 1), max(\n min(ht, int(h / f)),\n 1)) # scale according to f (result at least 1 and at most wt or ht)\n img = cv2.resize(img, new_size)\n target = np.ones([ht, wt]) * 255\n target[0:new_size[1], 0:new_size[0]] = img\n\n # transpose for TF\n img = cv2.transpose(target)\n\n # normalize\n (m, s) = cv2.meanStdDev(img)\n m = m[0][0]\n s = s[0][0]\n img = img - m\n img = img / s if s > 0 else img\n return img", "title": "" }, { "docid": "b5bafeafc98d9c5543b774b14c18bdbc", "score": "0.5715927", "text": "def dataset_preprocessing(path_to_dataset, denoise=True):\n\n axial_pix_dim = 0.3\n model_image_size = 75\n interpolation = 'Cubic' # 'Sinc'\n original_path = os.path.abspath('.')\n for subject_dir in os.listdir(path_to_dataset):\n if os.path.isdir(path_to_dataset + '/' + subject_dir):\n os.chdir(path_to_dataset + '/' + subject_dir)\n # getting the subject images\n t2star = ''\n scseg = ''\n gmseg = ''\n for file_name in os.listdir('.'):\n if 'im' in file_name:\n t2star = file_name\n elif 'gm' in file_name:\n gmseg = file_name\n elif 'seg' in file_name and 'gm' not in file_name:\n scseg = file_name\n\n new_names = []\n for f_name in [t2star, scseg, gmseg]:\n status, output = sct.run('sct_orientation -i ' + f_name)\n if output[4:7] != 'RPI':\n status, output = sct.run('sct_orientation -i ' + f_name + ' -s RPI')\n new_names.append(output.split(':')[1][1:-1])\n else:\n new_names.append(f_name)\n\n t2star = new_names[0]\n scseg = new_names[1]\n gmseg = new_names[2]\n\n t2star = resample_image(t2star, npx=axial_pix_dim, npy=axial_pix_dim, interpolation=interpolation)\n scseg = resample_image(scseg, npx=axial_pix_dim, npy=axial_pix_dim, binary=True)\n gmseg = resample_image(gmseg, npx=axial_pix_dim, npy=axial_pix_dim, binary=True)\n\n if denoise:\n t2star_im = Image(t2star)\n t2star_im.denoise_ornlm()\n t2star_im.save()\n\n mask_box = crop_t2_star(t2star, scseg, box_size=model_image_size)\n sct.run('sct_crop_over_mask.py -i ' + gmseg + ' -mask ' + mask_box + ' -square 1 -o ' + sct.extract_fname(gmseg)[1] + '_croped')\n\n os.chdir(original_path)\n save_by_slice(path_to_dataset)", "title": "" }, { "docid": "e69e9805e21ee0351c9415eecb6f6422", "score": "0.5705777", "text": "def load_custom(self, dataset_dir, subset):\n # Add classes.\n \n train_set = []\n #Read the speciality yml file \n with open(os.path.join(ROOT_DIR,'speciality.yaml')) as f:\n train_set = yaml.load(f, Loader=yaml.FullLoader)['Flaws']['types']\n \n\n try:\n train_set.remove('BG')\n except:\n pass\n # Add classes.\n for i in range(len(train_set)):\n print(\"types\", i+1, train_set[i])\n self.add_class(\"types\", i+1, train_set[i])\n \n \n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n # We mostly care about the x and y coordinates of each region\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n #print(annotations)\n annotations = list(annotations.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n polygons_org = [r['shape_attributes'] for r in a['regions']]\n objects = [s['region_attributes'] for s in a['regions']]\n\n num_ids = []\n polygons = []\n\n # Component is what I defined the class in VGG Image Annotator\n for n,p in zip(objects, polygons_org):\n try:\n \n \n for i in range(len(train_set)):\n if n['Component'] == train_set[i]:\n num_ids.append(i+1)\n polygons.append(p)\n break\n \n #print(class_names.set_2_2_mm_case[i])\n \n except:\n pass\n\n \n \n # if no detection, skip\n if(len(polygons)==0):\n continue\n \n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"types\", # for a single class just add the name here\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n num_ids=num_ids)", "title": "" }, { "docid": "08c397a8ead6296c6bc381a0285e2035", "score": "0.57053596", "text": "def data_prepare(self, action=None):\n\n if action is not None:\n utils.logger.debug(\"{} in data preparation\".format(action))\n\n try: # just recover from record file\n emb, data_train, test_data = self.read_emb_data_from_input()\n self.x_train, self.y_train, self.y_aux_train = zip(*data_train)\n self.x_train, self.y_train, self.y_aux_train = (\n np.array(self.x_train),\n np.array(self.y_train),\n np.array(self.y_aux_train),\n )\n utils.logger.debug(\"restored data from files for training\")\n self.BIN_FOLDER = defaults.Data_Folder\n\n return self.x_train, self.y_train, self.y_aux_train, test_data, emb\n except FileNotFoundError:\n utils.logger.debug(\n \"cannot restore emb, trainX from jigsaw kaggle file data\"\n )\n except TypeError: # if read out None for data_train\n utils.logger.debug(\n \"cannot restore emb, trainX from jigsaw kaggle file data\"\n )\n\n # if os.path.isfile(DATA_FILE_FLAG) and not self.do_emb_matrix_preparation: # in final stage, no need to check this...\n\n if not self.do_emb_matrix_preparation:\n # global embedding_matrix\n\n if action is not None and action == defaults.DATA_ACTION_NO_NEED_LOAD_EMB_M:\n self.embedding_matrix = None\n else:\n try:\n self.embedding_matrix = pickle.load(\n open(self.E_M_FILE, \"rb\"))\n except FileNotFoundError:\n self.BIN_FOLDER = \"/content/gdrivedata/My Drive/\"\n\n if not os.path.isdir(self.BIN_FOLDER):\n self.BIN_FOLDER = \"./\"\n\n if not utils.file_exist(self.E_M_FILE, fullpath=True):\n self.BIN_FOLDER = defaults.Data_Folder\n self.embedding_matrix = pickle.load(\n open(self.E_M_FILE, \"rb\"))\n utils.BIN_FOLDER = self.BIN_FOLDER # save file to the right place\n utils.logger.debug(self.E_M_FILE)\n\n if action is not None: # exist data, need to convert data\n utils.logger.debug(action)\n\n if (\n action == defaults.CONVERT_TRAIN_DATA\n or action == defaults.CONVERT_ADDITIONAL_NONTOXIC_DATA\n ):\n self.prepare_tfrecord_data(\n train_test_data=True, embedding=False, action=action\n ) # train data will rebuild, so we put it before read from pickle\n\n try:\n data_train = pickle.load(\n open(self.DATA_TRAIN_FILE, \"rb\")\n ) # (None, 2048)\n except FileNotFoundError:\n self.BIN_FOLDER = \"/content/gdrivedata/My Drive/\"\n\n if not os.path.isdir(self.BIN_FOLDER):\n self.BIN_FOLDER = \"./\"\n\n utils.BIN_FOLDER = self.BIN_FOLDER # save file to the right place\n data_train = pickle.load(\n open(self.DATA_TRAIN_FILE, \"rb\")\n ) # (None, 2048)\n\n utils.logger.debug(self.DATA_TRAIN_FILE)\n self.x_test = pickle.load(\n open(self.DATA_TEST_FILE, \"rb\")\n ) # (None, 2048) 2048 features from xception net\n\n self.x_train, self.y_train, self.y_aux_train = zip(*data_train)\n self.x_train, self.y_train, self.y_aux_train = (\n np.array(self.x_train),\n np.array(self.y_train),\n np.array(self.y_aux_train),\n )\n\n # global test_df_id\n # test_df_id = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv').id\n\n try:\n self.test_df_id = pd.read_csv(\n self.INPUT_DATA_DIR + \"test.csv\"\n ).id # only id series is needed for generating submission csv file\n except FileNotFoundError:\n self.INPUT_DATA_DIR = \"../input/\"\n\n if not os.path.isdir(self.INPUT_DATA_DIR):\n self.INPUT_DATA_DIR = \"/home/pengyu/works/input/jigsaw-unintended-bias-in-toxicity-classification/\"\n\n if not os.path.isdir(self.INPUT_DATA_DIR):\n self.INPUT_DATA_DIR = (\n self.BIN_FOLDER\n ) # put same folder in google drive\n self.test_df_id = pd.read_csv(\n self.INPUT_DATA_DIR + \"test.csv\"\n ).id # only id series is needed for generating submission csv file\n\n if (\n action is not None\n ): # exist data, need to convert data, so put after read from pickle\n if action == defaults.CONVERT_DATA_Y_NOT_BINARY:\n self.prepare_tfrecord_data(\n train_test_data=False, embedding=False, action=action\n ) # train_test_data=False just not rebuild words, the y still need to change\n\n return (\n self.x_train,\n self.y_train,\n self.y_aux_train,\n self.x_test,\n self.embedding_matrix,\n )\n else:\n utils.logger.debug(self.DATA_TRAIN_FILE)\n # (x_train, y_train, y_aux_train), x_test = prepare_tfrecord_data()\n\n if action is not None and (\n action == defaults.CONVERT_TRAIN_DATA\n or action == defaults.CONVERT_ADDITIONAL_NONTOXIC_DATA\n ):\n self.embedding_matrix = pickle.load(open(self.E_M_FILE, \"rb\"))\n utils.logger.debug(\n \"Only build train test data, embedding loaded from pickle\"\n )\n\n return self.prepare_tfrecord_data(embedding=False, action=action)\n else:\n return self.prepare_tfrecord_data(embedding=True)", "title": "" }, { "docid": "f8c370ace8e3507056482f4063b516f1", "score": "0.57036906", "text": "def augmentData(self):\n with h5py.File(self.fname, 'r') as f1:\n with h5py.File(self.fname, 'w') as f2:\n g1 = f1.get('group1')\n f2.create_group('group1_lflip')", "title": "" }, { "docid": "d9d7e22c90369d59220d5e7ea08937f5", "score": "0.56988615", "text": "def __init_dataset(self):\r\n\r\n if self.__mode == 'test':\r\n self.__test_data = pd.read_csv(self.__label_root)\r\n self.__get_column_name(self.__test_data)\r\n self.__test_img_list = [os.path.join(self.__data_root, img_name) for img_name in\r\n self.__test_data[self.__img_column_name].values]\r\n else:\r\n self.__train_csv_data = pd.read_csv(self.__label_root)\r\n self.__get_column_name(self.__train_csv_data)\r\n # 全数据训练\r\n if config.train_proportion == 1.0:\r\n\r\n if self.__mode == 'train':\r\n self.__train_img_list = [os.path.join(self.__data_root, img_name) for img_name in\r\n self.__train_csv_data[self.__img_column_name].values]\r\n self.__train_label_list = self.__train_csv_data[self.__label_column_name].values\r\n if self.__mode == 'val':\r\n val_data = self.__train_csv_data[round(0.8*len(self.__train_csv_data)):]\r\n self.__val_img_list = [os.path.join(self.__data_root, img_name) for img_name in\r\n val_data[self.__img_column_name].values]\r\n self.__val_label_list = val_data[self.__label_column_name].values\r\n\r\n\r\n else:\r\n # random split\r\n train_img, train_label, val_img, val_label = train_test_split(\r\n self.__train_csv_data[self.__img_column_name].values,\r\n self.__train_csv_data[self.__label_column_name].values,\r\n train_size=config.train_proportion,\r\n test_size=1-config.train_proportion\r\n )\r\n # data for training\r\n if self.__mode == 'train':\r\n self.__train_img_list = train_img\r\n self.__train_label_list = train_label\r\n # data for validating\r\n if self.__mode == 'val':\r\n self.__val_img_list = val_img\r\n self.__val_label_list = val_label", "title": "" }, { "docid": "80a090d073d68ed81e57198a5d88c113", "score": "0.5686447", "text": "def fit(self):\n data_loader = CSVDataLoader(self.meta_file_name)\n self.dn = data_loader.load_data()\n\n self._check_unsupported_dataset_structure()\n\n self.dn.transform_data()\n self.modeler = Modeler(self.dn)\n self.modeler.model_database()\n self.sampler = Sampler(self.dn, self.modeler)", "title": "" }, { "docid": "814a56e3c3e3affac4f2af95b071d13b", "score": "0.5686442", "text": "def prepare_dataset_simple(filename_dataset):\n log.info(f\"Reading {IRF_FILE}\")\n\n irfs = load_cta_irfs(IRF_FILE)\n\n edisp_gauss = EnergyDispersion2D.from_gauss(\n e_true=ENERGY_AXIS_TRUE.edges,\n migra=MIGRA_AXIS.edges,\n sigma=0.1,\n bias=0,\n offset=[0, 2, 4, 6, 8] * u.deg,\n )\n\n irfs[\"edisp\"] = edisp_gauss\n # irfs[\"aeff\"].data.data = np.ones_like(irfs[\"aeff\"].data.data) * 1e6\n\n observation = Observation.create(\n obs_id=1001, pointing=POINTING, livetime=LIVETIME, irfs=irfs\n )\n\n empty = MapDataset.create(\n WCS_GEOM, energy_axis_true=ENERGY_AXIS_TRUE, migra_axis=MIGRA_AXIS\n )\n # maker = MapDatasetMaker(selection=[\"exposure\", \"edisp\"])\n # maker = MapDatasetMaker(selection=[\"exposure\", \"edisp\", \"background\"])\n maker = MapDatasetMaker(selection=[\"exposure\", \"edisp\", \"psf\", \"background\"])\n dataset = maker.run(empty, observation)\n\n filename_dataset.parent.mkdir(exist_ok=True, parents=True)\n log.info(f\"Writing {filename_dataset}\")\n dataset.write(filename_dataset, overwrite=True)", "title": "" }, { "docid": "5e780ea5d13d79f3332865144e7d5d59", "score": "0.5676346", "text": "def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),\n tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256,\n mode='standard', n_swap=10, swap_w=15, swap_h=15, swap_rot=False, contrastive_augmentation=None):\n super(RSNA_dataset, self).__init__()\n self.data_df = data_df.copy()\n self.n_sample = len(data_df)\n self.data_path = data_path\n self.window = window\n assert mode in ['standard', 'context_restoration', 'contrastive', 'binary_classification', 'multi_classification'], f\"Invalid mode. Must be one of 'standard', 'context_restoration', 'contrastive', 'binary_classification'. Given : {mode}\"\n self.mode = mode\n\n self.transform = tf.Compose(*augmentation_transform, tf.Resize(H=output_size, W=output_size))#,\n #tf.ToTorchTensor())\n self.toTensor = tf.ToTorchTensor()\n if mode == 'context_restoration':\n self.swap_tranform = tf.RandomPatchSwap(n=n_swap, w=swap_w, h=swap_h, rotate=swap_rot)\n elif mode == 'contrastive':\n self.contrastive_transform = tf.Compose(*contrastive_augmentation)\n elif mode == 'multi_classification':\n # add a columns 'no_Hemorrage'\n self.data_df['no_Hemorrhage'] = 1 - self.data_df.Hemorrhage\n # name of the classes\n self.class_name = ['no_Hemorrhage', 'Hemorrhage', 'epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural']", "title": "" }, { "docid": "5f3dba3af1538f8dde86449fc01e48cf", "score": "0.56753206", "text": "def initialisedataset():\r\n import initialdefs\r\n import math\r\n\r\n task_file, alltasks = initialdefs.starup()\r\n\r\n X = np.array([[np.zeros([32, 32]), np.zeros([32, 32])]])\r\n Y = [0]\r\n\r\n # make prelim Y's - labels for which problems are patterns. Prelim because we'll make more samples from each problem\r\n # so we'll only use these to inform us what label we should use\r\n Yprelim = [0] * 400\r\n\r\n # from manually going through and seeing what tasks were filling in repeating patterns / mosaics\r\n for i in [16, 60, 73, 109, 241, 286, 304, 312, 350, 399]:\r\n Yprelim[i] = 1\r\n\r\n for taskno in range(len(alltasks)):\r\n print(taskno)\r\n task = alltasks[taskno]\r\n train = task['train']\r\n\r\n # check the input & output are the same size: if not, don't use (too different, would cause too many problems)\r\n check = train[0]\r\n checkinput = np.array(check['input'])\r\n checkoutput = np.array(check['output'])\r\n\r\n # if they are the same, we can use as sample for the model.\r\n if checkoutput.shape == checkinput.shape:\r\n for trainno in range(len(train)):\r\n # dim0: samples dim1: channels (2: input, out), dim3: x dim4: y\r\n imagepair = train[trainno]\r\n imageinput = imagepair['input']\r\n imageoutput = imagepair['output']\r\n sz0l = math.floor((32 - np.size(imageinput, 0))/2) # padding for the left of dimension 0\r\n sz0r = math.ceil((32 - np.size(imageinput, 0))/2) # padding for the right of dimension 0\r\n sz1l = math.floor((32 - np.size(imageinput, 1))/2) # padding for the left of dimension 1\r\n sz1r = math.ceil((32 - np.size(imageinput, 1))/2) # padding for the right of dimension 1\r\n ippad = np.pad(imageinput, ((sz0l, sz0r), (sz1l, sz1r)), constant_values=(0, 0))\r\n oppad = np.pad(imageoutput, ((sz0l, sz0r), (sz1l, sz1r)), constant_values=(0, 0))\r\n\r\n newsample = np.array([[ippad, oppad]])\r\n\r\n X = np.concatenate((X, newsample), axis=0)\r\n Y.append(Yprelim[taskno])\r\n\r\n # create more images from the rotated versions\r\n for i in range(3):\r\n ippad = np.rot90(ippad)\r\n oppad = np.rot90(oppad)\r\n\r\n newsample = np.array([[ippad, oppad]])\r\n\r\n X = np.concatenate((X, newsample), axis=0)\r\n Y.append(Yprelim[taskno])\r\n\r\n # create more images from the transposed & rotated versions\r\n ippad = ippad.T\r\n oppad = oppad.T\r\n\r\n newsample = np.array([[ippad, oppad]])\r\n\r\n X = np.concatenate((X, newsample), axis=0)\r\n Y.append(Yprelim[taskno])\r\n\r\n for i in range(3):\r\n ippad = np.rot90(ippad)\r\n oppad = np.rot90(oppad)\r\n\r\n newsample = np.array([[ippad, oppad]])\r\n\r\n X = np.concatenate((X, newsample), axis=0)\r\n Y.append(Yprelim[taskno])\r\n\r\n X = np.delete(X, 0, axis=0)\r\n Y.__delitem__(0)\r\n\r\n # make channel the last dim\r\n X = np.moveaxis(X, 1, -1)\r\n\r\n return X, Y", "title": "" }, { "docid": "5bcd4d002ce6ec5f6fd19cc9486589e1", "score": "0.56738806", "text": "def train_generator(X_train, Y_train, transform = None):\n\n# transforms = {'translate discrete': iaa.Affine(translate_px={\"x\": iap.Choice([9, 0, -9]), \"y\": iap.Choice([9, 0, -9])}),\n# 'translate random': iaa.Affine(translate_px={\"x\": (-12, 12), \"y\": (-12, 12)}), \n# 'rotate': iaa.Affine(rotate=(-45, 45)),\n# 'vertical flip': iaa.Flipud(0.5),\n# 'horizontal flip':iaa.Fliplr(0.5),\n# 'scale': iaa.Affine(scale=(0.95, 1.05)),\n# 'blur': iaa.GaussianBlur(sigma = 3.0),\n# 'combined': iaa.Sequential([ \n# iaa.Affine(translate_px={\"x\": iap.Choice([9, 0, -9]), \"y\": iap.Choice([9, 0, -9])}),\n# iaa.Affine(scale=(0.95, 1.05))\n# ])}\n \n if transform:\n if transform in ['original']:\n \"\"\"no further transformation\"\"\"\n pass\n# else:\n# seq = transforms[transform]\n# X_train = seq.augment_images(X_train)\n \n iter_ = image.ImageDataGenerator() \n batch = iter_.flow(X_train, Y_train, batch_size = 32, seed = 1337) \n while True:\n yield batch.next()", "title": "" }, { "docid": "4bbc157b835e92d856c52aa9810ab679", "score": "0.56734693", "text": "def generate_prediction_data(self, *args, **kwargs):\r\n raise NotImplementedError()", "title": "" }, { "docid": "d59730d5180b6fe548d3be313adc6a86", "score": "0.5673305", "text": "def test_load_data(self):\n\n\t\timages, labels = load_data(\"test_data\", 500 * 500, 3)\n\t\t# [rotate, brighten, crop, mirror, blur, sharpen, bright_blur, org, affine, darken]\n\n\n\t\tbrighten = cv2.imread(\"test_data/augmented/brighten.png\", 3)\n\t\tbrighten = np.asarray(brighten, dtype=\"float32\")\n\n\t\trotate = cv2.imread(\"test_data/augmented/rotate.png\", 3)\n\t\trotate = np.asarray(rotate, dtype=\"float32\")\n\n\t\tblur = cv2.imread(\"test_data/augmented/blur.png\", 3)\n\t\tblur = np.asarray(blur, dtype=\"float32\")\n\n\t\tcrop = cv2.imread(\"test_data/augmented/crop.png\", 3)\n\t\tcrop = np.asarray(crop, dtype=\"float32\")\n\n\t\tmirror = cv2.imread(\"test_data/augmented/mirror.png\", 3)\n\t\tmirror = np.asarray(mirror, dtype=\"float32\")\n\n\t\tsharpen = cv2.imread(\"test_data/augmented/sharpen.png\", 3)\n\t\tsharpen = np.asarray(sharpen, dtype=\"float32\")\n\n\t\tbright_blur = cv2.imread(\"test_data/augmented/bright_blur.png\", 3)\n\t\tbright_blur = np.asarray(bright_blur, dtype=\"float32\")\n\n\t\torg = cv2.imread(\"test_data/augmented/org.png\", 3)\n\t\torg = np.asarray(org, dtype=\"float32\")\n\n\t\taffine = cv2.imread(\"test_data/augmented/affine.png\", 3)\n\t\taffine = np.asarray(affine, dtype=\"float32\")\n\n\t\tdarken = cv2.imread(\"test_data/augmented/darken.png\", 3)\n\t\tdarken = np.asarray(darken, dtype=\"float32\")\n\n\t\t# [rotate, brighten, crop, mirror, blur, sharpen, bright_blur, org, affine, darken]\n\t\tarr = []\n\n\t\tarr.append(rotate)\n\t\tarr.append(brighten)\n\t\tarr.append(crop)\n\t\tarr.append(mirror)\n\t\tarr.append(blur)\n\t\tarr.append(sharpen)\n\t\tarr.append(bright_blur)\n\t\tarr.append(org)\n\t\tarr.append(affine)\n\t\tarr.append(darken)\n\n\t\tarr = np.asarray(arr)\n\t\tarr = arr.reshape(-1, 500 * 500, 3)\n\n\t\t# [rotate, brighten, crop, mirror, blur, sharpen, bright_blur, org, affine, darken]\n\n\t\ttest = np.array([1, 1, 1, 1, 1, 1, 0, 1, 1, 1])\n\t\tnp.testing.assert_array_equal(labels.shape, test.shape)\n\t\tnp.testing.assert_array_equal(arr.shape, images.shape)", "title": "" }, { "docid": "4304e2b74231e06c63ab8e4976adca71", "score": "0.5669696", "text": "def _generate_dataset(self):\n self._dataset = TorchRandomDataset(\n [self._args.sample_count, self._args.seq_len, self._args.input_size], self._world_size, dtype=torch.float32\n )\n if len(self._dataset) == 0:\n logger.error('Generate random dataset failed - model: {}'.format(self._name))\n return False\n\n return True", "title": "" }, { "docid": "c36fd789c99cbfdf7442b2c6da9f84ba", "score": "0.5666151", "text": "def create_dataset(shape='gmm', sampleSize=200, n_clusters=3):\n clusterStd = [0.5, 1, 1.3]*3\n clusterStd = clusterStd[:n_clusters]\n \n if shape=='gmm':\n X = datasets.make_blobs(n_samples=sampleSize, n_features=2, centers=n_clusters, cluster_std=clusterStd)[0]\n X[:,0] = 30*(X[:,0]-min(X[:,0]))/(max(X[:,0])-min(X[:,0])) - 15\n X[:,1] = 30*(X[:,1]-min(X[:,1]))/(max(X[:,1])-min(X[:,1])) - 15\n \n elif shape=='circle':\n X = datasets.make_circles(n_samples=sampleSize, factor=.5, noise=.05)[0]\n X[:,0] = 30*(X[:,0]-min(X[:,0]))/(max(X[:,0])-min(X[:,0])) - 15\n X[:,1] = 30*(X[:,1]-min(X[:,1]))/(max(X[:,1])-min(X[:,1])) - 15\n \n elif shape=='moon':\n X = datasets.make_moons(n_samples=sampleSize, noise=.1)[0]\n X[:,0] = 30*(X[:,0]-min(X[:,0]))/(max(X[:,0])-min(X[:,0])) - 15\n X[:,1] = 30*(X[:,1]-min(X[:,1]))/(max(X[:,1])-min(X[:,1])) - 15\n\n elif shape=='anisotropic':\n transformations = {0:[[0.6, -0.6], [-0.4, 0.8]], 1:[[-0.7, -0.6], [0.6, 0.8]], 2:[[0.8, -0.1], [0.8, 0.1]]}\n X, y = datasets.make_blobs(n_samples=sampleSize, n_features=2, centers=n_clusters, cluster_std=clusterStd)\n for i in range(n_clusters):\n X[y==i] = np.dot(X[y==i], transformations[i%3])\n X = 5*X\n X[:,0] = 30*(X[:,0]-min(X[:,0]))/(max(X[:,0])-min(X[:,0])) - 15\n X[:,1] = 30*(X[:,1]-min(X[:,1]))/(max(X[:,1])-min(X[:,1])) - 15\n else:\n X = 30*np.random.rand(sampleSize, 2)-15\n \n return X", "title": "" }, { "docid": "55afc52037d9c7962a5f288c98f22068", "score": "0.56657857", "text": "def process_batch_augmentation(input_filename_list, input_label_list, dim_input, batch_sample_num):\n new_path_list = []\n new_label_list = []\n for k in range(batch_sample_num):\n class_idxs = list(range(0, FLAGS.way_num))\n random.shuffle(class_idxs)\n for class_idx in class_idxs:\n true_idx = class_idx*batch_sample_num + k\n new_path_list.append(input_filename_list[true_idx])\n new_label_list.append(input_label_list[true_idx])\n\n img_list = []\n img_list_h = []\n for filepath in new_path_list:\n this_img = imread(filepath)\n this_img_h = cv2.flip(this_img, 1)\n this_img = np.reshape(this_img, [-1, dim_input])\n this_img = this_img / 255.0\n img_list.append(this_img)\n this_img_h = np.reshape(this_img_h, [-1, dim_input])\n this_img_h = this_img_h / 255.0\n img_list_h.append(this_img_h)\n\n img_list_all = img_list + img_list_h\n label_list_all = new_label_list + new_label_list\n\n img_array = np.array(img_list_all).reshape([FLAGS.way_num*batch_sample_num*2, dim_input])\n label_array = one_hot(np.array(label_list_all)).reshape([FLAGS.way_num*batch_sample_num*2, -1])\n return img_array, label_array", "title": "" }, { "docid": "662337094f4d24385c8d0dd01b9297b8", "score": "0.5665643", "text": "def create_yolo_dataset(mindrecord_dir, batch_size=32, repeat_num=10, device_num=1, rank=0,\n is_training=True, num_parallel_workers=8):\n ds = de.MindDataset(mindrecord_dir, columns_list=[\"image\", \"annotation\"], num_shards=device_num, shard_id=rank,\n num_parallel_workers=num_parallel_workers, shuffle=False)\n decode = C.Decode()\n ds = ds.map(operations=decode, input_columns=[\"image\"])\n compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training))\n\n if is_training:\n hwc_to_chw = C.HWC2CHW()\n ds = ds.map(operations=compose_map_func, input_columns=[\"image\", \"annotation\"],\n output_columns=[\"image\", \"bbox_1\", \"bbox_2\", \"bbox_3\", \"gt_box1\", \"gt_box2\", \"gt_box3\"],\n num_parallel_workers=num_parallel_workers)\n ds = ds.project([\"image\", \"bbox_1\", \"bbox_2\", \"bbox_3\", \"gt_box1\", \"gt_box2\", \"gt_box3\"])\n ds = ds.map(operations=hwc_to_chw, input_columns=[\"image\"], num_parallel_workers=num_parallel_workers)\n ds = ds.batch(batch_size, drop_remainder=True)\n ds = ds.repeat(repeat_num)\n else:\n ds = ds.map(operations=compose_map_func, input_columns=[\"image\", \"annotation\"],\n output_columns=[\"image\", \"image_shape\", \"annotation\"],\n num_parallel_workers=num_parallel_workers)\n return ds", "title": "" } ]
66ac20af48f8b5e02bd54d9a86c45a5a
active_thread_priority(cfi_gen_sptr self) > int
[ { "docid": "a33e52697845d5ead872ccc46c11c439", "score": "0.85378665", "text": "def active_thread_priority(self):\n return _my_lte_swig.cfi_gen_sptr_active_thread_priority(self)", "title": "" } ]
[ { "docid": "b55d3d743ab2f15b11d8c6a1e248a910", "score": "0.83537924", "text": "def active_thread_priority(self):\n return _my_lte_swig.dci_gen_sptr_active_thread_priority(self)", "title": "" }, { "docid": "c417f577ec9e7dadad8ca80f5405b4de", "score": "0.8266334", "text": "def active_thread_priority(self):\n return _my_lte_swig.precoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "aef9390c5b7d9c27ec41facf554e74e1", "score": "0.81147915", "text": "def active_thread_priority(self):\n return _my_lte_swig.pdcch_interleaver_sptr_active_thread_priority(self)", "title": "" }, { "docid": "bd49c13f853f91662b412ec9e73c8137", "score": "0.8105901", "text": "def active_thread_priority(self):\n return _my_lte_swig.scrambler_sptr_active_thread_priority(self)", "title": "" }, { "docid": "4dde87316ecdcc2e1b1d94ce219d5bee", "score": "0.80623734", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_viterbi_decoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "11e4087a19faed3d2027b92e94be3418", "score": "0.80529463", "text": "def active_thread_priority(self):\n return _my_lte_swig.scrambler_phich_sptr_active_thread_priority(self)", "title": "" }, { "docid": "66b2b202acc3af27383e9fd38f6f9576", "score": "0.8041825", "text": "def active_thread_priority(self):\n return _ccsds_swig.output_counter_cc_sptr_active_thread_priority(self)", "title": "" }, { "docid": "34719888eb95bc0952347e6411501d31", "score": "0.80100834", "text": "def active_thread_priority(self):\n return _seniorproj_swig.tos_add_header_dec_sptr_active_thread_priority(self)", "title": "" }, { "docid": "4199db640218ea936b73006f8b131901", "score": "0.80090624", "text": "def active_thread_priority(self):\n return _my_lte_swig.cp_adder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "a506265acdac8460353282afeff8d4dc", "score": "0.8008692", "text": "def active_thread_priority(self):\n return _my_lte_swig.lte_random_bit_gen_sptr_active_thread_priority(self)", "title": "" }, { "docid": "30c4258fae9c2cbe98adcc61c226811a", "score": "0.80032545", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_interleaver_sptr_active_thread_priority(self)", "title": "" }, { "docid": "6e97bb0c070cdfec81e758b4aab690bf", "score": "0.7995824", "text": "def active_thread_priority(self):\n return _ccsds_swig.dll_cc_sptr_active_thread_priority(self)", "title": "" }, { "docid": "7ac8421760a46d172cca892e5f482684", "score": "0.7989383", "text": "def active_thread_priority(self):\n return _my_lte_swig.conv_enc_sptr_active_thread_priority(self)", "title": "" }, { "docid": "d1f2c21aacc5c77a0dc5c08bb9387a18", "score": "0.79821974", "text": "def active_thread_priority(self):\n return _my_lte_swig.layer_mapper_sptr_active_thread_priority(self)", "title": "" }, { "docid": "c4db131ddbac02457f34da8922ec219e", "score": "0.7979503", "text": "def active_thread_priority(self):\n return _my_lte_swig.repeater_sptr_active_thread_priority(self)", "title": "" }, { "docid": "6fccbd552d07c614f453c4d2dbfa171a", "score": "0.79773945", "text": "def active_thread_priority(self):\n return _my_lte_swig.phich_grouping_sptr_active_thread_priority(self)", "title": "" }, { "docid": "1a58bda1b7614d4c2739c213a9d44e9b", "score": "0.797072", "text": "def active_thread_priority(self):\n return _my_lte_swig.resource_mapper_sptr_active_thread_priority(self)", "title": "" }, { "docid": "a34a758061025232171770a2df4d613e", "score": "0.79689956", "text": "def thread_priority(self):\n return _my_lte_swig.cfi_gen_sptr_thread_priority(self)", "title": "" }, { "docid": "24d47851e12709afd6aedc1648855467", "score": "0.7964896", "text": "def active_thread_priority(self):\n return _my_lte_swig.code_block_concatenation_sptr_active_thread_priority(self)", "title": "" }, { "docid": "e4472d4ed6ba25a0c386c28f542bcffe", "score": "0.796289", "text": "def active_thread_priority(self):\n return _my_lte_swig.code_block_segmentation_sptr_active_thread_priority(self)", "title": "" }, { "docid": "de27057e176a0e8999c8f2054eb9d2e9", "score": "0.79603475", "text": "def active_thread_priority(self):\n return _ccsds_swig.add_asm_sptr_active_thread_priority(self)", "title": "" }, { "docid": "af7f08fd057ca10d0d45c9ea0501a683", "score": "0.795257", "text": "def active_thread_priority(self):\n return _ccsds_swig.softbittobit_sptr_active_thread_priority(self)", "title": "" }, { "docid": "5534a9c225b0d568e7c403570860a525", "score": "0.7945813", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_fpll_sptr_active_thread_priority(self)", "title": "" }, { "docid": "0bb1328739ca025baa0b309e3fcb617c", "score": "0.79259944", "text": "def active_thread_priority(self):\n return _my_lte_swig.rate_match_sptr_active_thread_priority(self)", "title": "" }, { "docid": "5cd9c569f947cd7612b9a1687ef11da6", "score": "0.7913584", "text": "def active_thread_priority(self):\n return _my_lte_swig.modulation_mapper_sptr_active_thread_priority(self)", "title": "" }, { "docid": "1fa7cd8d5ed631abe7159599a33026c5", "score": "0.78954846", "text": "def active_thread_priority(self):\n return _ccsds_swig.trunk_tail_sptr_active_thread_priority(self)", "title": "" }, { "docid": "506a55815c6ee637e53a92da20b49f7c", "score": "0.78867155", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_deinterleaver_sptr_active_thread_priority(self)", "title": "" }, { "docid": "5df54b12df2cd80cbc545004e9f4cdad", "score": "0.7885996", "text": "def active_thread_priority(self):\n return _ccsds_swig.softbit_msg_source_f_sptr_active_thread_priority(self)", "title": "" }, { "docid": "0edde3410cb6d49ca0723bb2e050c0db", "score": "0.7883877", "text": "def active_thread_priority(self):\n return _ccsds_swig.send_nanolink_sptr_active_thread_priority(self)", "title": "" }, { "docid": "355d0e94aa6a9b95d7fcf7b73a8d7d8e", "score": "0.7882883", "text": "def active_thread_priority(self):\n return _ccsds_swig.simple_bpsk_SNR_qf_sptr_active_thread_priority(self)", "title": "" }, { "docid": "5bfc12c86f83f2ef1e5e5fc1bb3e5492", "score": "0.7879493", "text": "def active_thread_priority(self):\n return _ccsds_swig.blob_msg_source_b_sptr_active_thread_priority(self)", "title": "" }, { "docid": "13c27a8591cbe50d94c5d81fb1cd89dc", "score": "0.7868223", "text": "def active_thread_priority(self):\n return _my_lte_swig.pdcch_mux_sptr_active_thread_priority(self)", "title": "" }, { "docid": "3b43048ea19e39feb50bf45e026fa7fa", "score": "0.7860645", "text": "def active_thread_priority(self):\n return _ccsds_swig.rs_decode_sptr_active_thread_priority(self)", "title": "" }, { "docid": "611b8f7a15ca5eb06eed43a5a1ca7c80", "score": "0.78443265", "text": "def active_thread_priority(self):\n return _ccsds_swig.message_info_sptr_active_thread_priority(self)", "title": "" }, { "docid": "5f8f80c519e089d4c59951df39e9e4d8", "score": "0.7837761", "text": "def active_thread_priority(self):\n return _ieee80211ah_swig.ah_ofdm_mapper_sptr_active_thread_priority(self)", "title": "" }, { "docid": "84f7741fcdab738da982064e1a46ebad", "score": "0.7833237", "text": "def active_thread_priority(self):\n return _ccsds_swig.mpsk_detector_soft_cf_sptr_active_thread_priority(self)", "title": "" }, { "docid": "8ff4d49b09918f62c39f7186c2a537c9", "score": "0.7831438", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_equalizer_sptr_active_thread_priority(self)", "title": "" }, { "docid": "a4afcb0a3100b8f9b321e50d216ba01b", "score": "0.7822572", "text": "def active_thread_priority(self):\n return _ccsds_swig.reversebits_sptr_active_thread_priority(self)", "title": "" }, { "docid": "f893b6f8aba530e1b300339c9302fc68", "score": "0.78203344", "text": "def active_thread_priority(self):\n return _ccsds_swig.ldpc_decoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "aa160e7814811409903793b4ca10c969", "score": "0.7820056", "text": "def active_thread_priority(self):\n return _ccsds_swig.pll_cc_sptr_active_thread_priority(self)", "title": "" }, { "docid": "18d8dde01f122384dc16185d4979401f", "score": "0.78123456", "text": "def active_thread_priority(self):\n return _ccsds_swig.mpsk_preamble_cc_sptr_active_thread_priority(self)", "title": "" }, { "docid": "41a7a0f3b56d0380cf25324162ab37b5", "score": "0.78113836", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_depad_sptr_active_thread_priority(self)", "title": "" }, { "docid": "36b0330c03dda7273eca8eb680983297", "score": "0.7807858", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_fs_checker_sptr_active_thread_priority(self)", "title": "" }, { "docid": "aa9e257fad54b825c239af5f95b31251", "score": "0.78056014", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_pad_sptr_active_thread_priority(self)", "title": "" }, { "docid": "f826d040f55439d2477e462420bbc941", "score": "0.7804398", "text": "def active_thread_priority(self):\n return _my_lte_swig.rate_match_conv_sptr_active_thread_priority(self)", "title": "" }, { "docid": "56c8ec1af1e77003ab5be3a5ce0da556", "score": "0.7803554", "text": "def active_thread_priority(self):\n return _ccsds_swig.soft_to_hard_message_sptr_active_thread_priority(self)", "title": "" }, { "docid": "714c8b1de44302ebcdd596dab3279e04", "score": "0.77921414", "text": "def active_thread_priority(self):\n return _ccsds_swig.randomiser_sptr_active_thread_priority(self)", "title": "" }, { "docid": "031b339d92b602fda1fc4d4ef1c1f05e", "score": "0.7782636", "text": "def active_thread_priority(self):\n return _my_lte_swig.phich_orthogonalization_sptr_active_thread_priority(self)", "title": "" }, { "docid": "2b1ec80cf74366921537bb9735b37e87", "score": "0.7775431", "text": "def active_thread_priority(self):\n return _ccsds_swig.softbit_msg_sink_f_sptr_active_thread_priority(self)", "title": "" }, { "docid": "341aa54beb1c0ac69cbaaae8c8be8df4", "score": "0.776505", "text": "def active_thread_priority(self):\n return _my_lte_swig.crc_attachment_sptr_active_thread_priority(self)", "title": "" }, { "docid": "27df466c4c519083b9e40fa067d2e1a2", "score": "0.77646255", "text": "def active_thread_priority(self):\n return _my_lte_swig.turbo_encoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "47868e3c6b66355051c6c483655d2f39", "score": "0.77626735", "text": "def active_thread_priority(self):\n return _ccsds_swig.rs_encode_sptr_active_thread_priority(self)", "title": "" }, { "docid": "a04070f45f37771265ccf25d995f0c67", "score": "0.7760976", "text": "def active_thread_priority(self):\n return _ccsds_swig.blob_msg_sink_b_sptr_active_thread_priority(self)", "title": "" }, { "docid": "9cafba4f7232ab4d7d9b0a2b2dde95b0", "score": "0.7754212", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_randomizer_sptr_active_thread_priority(self)", "title": "" }, { "docid": "22b5be2584e5988f941c92ddcae8b526", "score": "0.77533925", "text": "def active_thread_priority(self):\n return _ccsds_swig.mpsk_mod_bc_sptr_active_thread_priority(self)", "title": "" }, { "docid": "d22915d49e8d1d21c8f430fa79921886", "score": "0.77518666", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_derandomizer_sptr_active_thread_priority(self)", "title": "" }, { "docid": "23b5dd30e529cfdebdb46bd215db1989", "score": "0.7751046", "text": "def active_thread_priority(self):\n return _ccsds_swig.conv_encode27_bb_sptr_active_thread_priority(self)", "title": "" }, { "docid": "7a06e92c1332c650c5ffa7e7bdb7e045", "score": "0.7748286", "text": "def thread_priority(self):\n return _my_lte_swig.dci_gen_sptr_thread_priority(self)", "title": "" }, { "docid": "cacff918e75e6622aaf95c7bb0936f8e", "score": "0.77473086", "text": "def active_thread_priority(self):\n return _ccsds_swig.ldpc_encoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "7ceb382f4ba65200dc675d14fde52669", "score": "0.774203", "text": "def active_thread_priority(self):\n return _ccsds_swig.conv_decode27_sptr_active_thread_priority(self)", "title": "" }, { "docid": "f82c47be68d63f9173d2808c973b9c97", "score": "0.77398676", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_rs_decoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "9c64e0a67d0a88184a94486b13b89ccd", "score": "0.7736771", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_bit_timing_loop_sptr_active_thread_priority(self)", "title": "" }, { "docid": "22f93246a00e31bd61e395baa0845b3a", "score": "0.7717515", "text": "def active_thread_priority(self):\n return _ieee80211ah_swig.ah_chunks_to_symbols_sptr_active_thread_priority(self)", "title": "" }, { "docid": "77ae45c60a2e27dcec49961a36be1067", "score": "0.77116966", "text": "def active_thread_priority(self):\n return _ccsds_swig.msg_null_src_sptr_active_thread_priority(self)", "title": "" }, { "docid": "537a623ab3407f287cba5a34c24ca860", "score": "0.7710147", "text": "def active_thread_priority(self) -> \"int\":\n return _m0wut_swig.tcp_client_source_sptr_active_thread_priority(self)", "title": "" }, { "docid": "c8125afbdc5244d5eff9ecd260f0c781", "score": "0.77072114", "text": "def active_thread_priority(self):\n return _ieee80211ah_swig.ah_ofdm_decode_mac_sptr_active_thread_priority(self)", "title": "" }, { "docid": "4dc977628f221e6d74fa33d53ddc979a", "score": "0.77070063", "text": "def thread_priority(self):\n return _my_lte_swig.pdcch_interleaver_sptr_thread_priority(self)", "title": "" }, { "docid": "cdfff09bc958a3e4abfe9edfaa90d05c", "score": "0.7699282", "text": "def active_thread_priority(self):\n return _ccsds_swig.udp_source_rate_idle_b_sptr_active_thread_priority(self)", "title": "" }, { "docid": "3d936bceedcb773ac68c740c2762b22c", "score": "0.768551", "text": "def active_thread_priority(self):\n return _ccsds_swig.randomiser_softbits_sptr_active_thread_priority(self)", "title": "" }, { "docid": "0b218117b655e586f9588a6c14a8fc6d", "score": "0.7680722", "text": "def active_thread_priority(self):\n return _ccsds_swig.ldpc_parallel_decoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "aed72d6571b062fa73d8696527b29685", "score": "0.767879", "text": "def thread_priority(self):\n return _my_lte_swig.precoder_sptr_thread_priority(self)", "title": "" }, { "docid": "dea93e0da8c0c46f2c0b187486c879b7", "score": "0.76698303", "text": "def active_thread_priority(self):\n return _my_lte_swig.phich_alignment_sptr_active_thread_priority(self)", "title": "" }, { "docid": "6b65c62701c5dd3e9df8f27992276240", "score": "0.7662747", "text": "def thread_priority(self):\n return _atsc_swig.atsc_interleaver_sptr_thread_priority(self)", "title": "" }, { "docid": "14a4df9a39faa6fff20cdc6eac6a6fd2", "score": "0.76014894", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_rs_encoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "8249a37bad7dff8480934c2cc57ac959", "score": "0.75453824", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_trellis_encoder_sptr_active_thread_priority(self)", "title": "" }, { "docid": "b5a143955939e5d4057bbc1d4470b935", "score": "0.74935246", "text": "def thread_priority(self):\n return _atsc_swig.atsc_viterbi_decoder_sptr_thread_priority(self)", "title": "" }, { "docid": "b2f80b8eea313864d9a8a3bc16ad7198", "score": "0.7472293", "text": "def active_thread_priority(self):\n return _wirlab_swig.CSI_to_File_sptr_active_thread_priority(self)", "title": "" }, { "docid": "a249989f2304eb972569986f4ac34f08", "score": "0.74681383", "text": "def thread_priority(self):\n return _ccsds_swig.output_counter_cc_sptr_thread_priority(self)", "title": "" }, { "docid": "0623c8df6f160a6d1d81a635e85a2a0d", "score": "0.7455768", "text": "def thread_priority(self):\n return _ccsds_swig.softbittobit_sptr_thread_priority(self)", "title": "" }, { "docid": "c2707b44d729dea82b52e876c9cc40a9", "score": "0.74519557", "text": "def thread_priority(self):\n return _ccsds_swig.dll_cc_sptr_thread_priority(self)", "title": "" }, { "docid": "bf1c922e86af4ca885b1e6a42749f6e2", "score": "0.7451169", "text": "def thread_priority(self):\n return _atsc_swig.atsc_deinterleaver_sptr_thread_priority(self)", "title": "" }, { "docid": "bbffaabe04e1d1de72bdf6db329b460a", "score": "0.7450483", "text": "def thread_priority(self):\n return _my_lte_swig.repeater_sptr_thread_priority(self)", "title": "" }, { "docid": "4b3d750035941cf3d190c487862f85dd", "score": "0.7450216", "text": "def thread_priority(self):\n return _my_lte_swig.scrambler_sptr_thread_priority(self)", "title": "" }, { "docid": "573d8f692f1a31230b1682a843981a98", "score": "0.74494207", "text": "def active_thread_priority(self):\n return _ccsds_swig.mpsk_ambiguity_resolver_f_sptr_active_thread_priority(self)", "title": "" }, { "docid": "4eb7eabeb97d03733050cb34c2bf901c", "score": "0.7445339", "text": "def thread_priority(self):\n return _atsc_swig.atsc_fpll_sptr_thread_priority(self)", "title": "" }, { "docid": "388d8d3442821c102d502b471acdd234", "score": "0.7443719", "text": "def thread_priority(self):\n return _ccsds_swig.udp_source_rate_idle_b_sptr_thread_priority(self)", "title": "" }, { "docid": "e4858a8a132270cb41d33254da9917d9", "score": "0.7416974", "text": "def thread_priority(self):\n return _my_lte_swig.cp_adder_sptr_thread_priority(self)", "title": "" }, { "docid": "28d7c8d87adccb8c595a5d8e8dbef773", "score": "0.7403868", "text": "def thread_priority(self):\n return _my_lte_swig.layer_mapper_sptr_thread_priority(self)", "title": "" }, { "docid": "81b1bf68560df90dd9bc1f1b3d2141a8", "score": "0.73830026", "text": "def thread_priority(self):\n return _my_lte_swig.code_block_segmentation_sptr_thread_priority(self)", "title": "" }, { "docid": "9a8cc7409d2c93f079a0ff023892956d", "score": "0.7367902", "text": "def thread_priority(self):\n return _my_lte_swig.scrambler_phich_sptr_thread_priority(self)", "title": "" }, { "docid": "996799640cbb5d8c5890db403482144d", "score": "0.73665106", "text": "def thread_priority(self):\n return _my_lte_swig.code_block_concatenation_sptr_thread_priority(self)", "title": "" }, { "docid": "e52d595e651f7204abda2f1662580649", "score": "0.73601925", "text": "def active_thread_priority(self):\n return _atsc_swig.atsc_field_sync_mux_sptr_active_thread_priority(self)", "title": "" }, { "docid": "d1049c53585c0a42249cfaa08ae5fb93", "score": "0.7360174", "text": "def thread_priority(self):\n return _my_lte_swig.phich_grouping_sptr_thread_priority(self)", "title": "" }, { "docid": "cc222929c00e060ca87908a345f3979d", "score": "0.73557234", "text": "def thread_priority(self):\n return _my_lte_swig.conv_enc_sptr_thread_priority(self)", "title": "" }, { "docid": "5df8b0d01ca476e7a00fe26f7d54e84d", "score": "0.7334901", "text": "def active_thread_priority(self):\n return _ccsds_swig.softbits_msg_to_bytes_b_sptr_active_thread_priority(self)", "title": "" }, { "docid": "3a2ad05eaa53bdca15e05cbf6db7aead", "score": "0.7332543", "text": "def thread_priority(self):\n return _atsc_swig.atsc_pad_sptr_thread_priority(self)", "title": "" }, { "docid": "5afa8dce02173f6cefea547c95c6fb7d", "score": "0.7326535", "text": "def thread_priority(self):\n return _my_lte_swig.rate_match_sptr_thread_priority(self)", "title": "" }, { "docid": "14346eeb58c6942b9119ebd97b36fe56", "score": "0.73244137", "text": "def thread_priority(self):\n return _atsc_swig.atsc_equalizer_sptr_thread_priority(self)", "title": "" }, { "docid": "30faef0383f23df095db81fb2e059279", "score": "0.73234946", "text": "def thread_priority(self):\n return _my_lte_swig.resource_mapper_sptr_thread_priority(self)", "title": "" }, { "docid": "d24045f461c3455365a7a6480d60efad", "score": "0.7320101", "text": "def thread_priority(self):\n return _my_lte_swig.lte_random_bit_gen_sptr_thread_priority(self)", "title": "" } ]
b1d2b42da53429a0ea3c80e7d9fac1a1
the sum of ratio must equal to 1
[ { "docid": "7d1557dc3a5a1bf229ca9f2e5bc8a903", "score": "0.0", "text": "def split_Train_Test_Data(data_dir, ratio):\r\n dataset = ImageFolder(data_dir) # data_dir精确到分类目录的上一级\r\n character = [[] for i in range(len(dataset.classes))]\r\n for x, y in dataset.samples: # 将数据按类标存放\r\n character[y].append(x)\r\n\r\n train_inputs, test_inputs= [], []\r\n train_labels,test_labels = [], []\r\n for i, data in enumerate(character):\r\n num_sample_train = int(len(data) * ratio[0])\r\n random.shuffle(data) # 打乱后抽取\r\n for x in data[:num_sample_train]:\r\n train_inputs.append(str(x))\r\n train_labels.append(i)\r\n for x in data[num_sample_train:]:\r\n test_inputs.append(str(x))\r\n test_labels.append(i)\r\n train_dataloader = DataLoader(MyDataset(train_inputs, train_labels, train_transformer_ImageNet),\r\n batch_size=80, shuffle=True)\r\n test_dataloader = DataLoader(MyDataset(test_inputs, test_labels, val_transformer_ImageNet),\r\n batch_size=80, shuffle=False)\r\n\r\n return train_dataloader, test_dataloader", "title": "" } ]
[ { "docid": "e540e26756b22115e2f811ea4aad52c7", "score": "0.7472728", "text": "def ratio(self):\n pass", "title": "" }, { "docid": "e540e26756b22115e2f811ea4aad52c7", "score": "0.7472728", "text": "def ratio(self):\n pass", "title": "" }, { "docid": "e540e26756b22115e2f811ea4aad52c7", "score": "0.7472728", "text": "def ratio(self):\n pass", "title": "" }, { "docid": "e540e26756b22115e2f811ea4aad52c7", "score": "0.7472728", "text": "def ratio(self):\n pass", "title": "" }, { "docid": "2e3490af1c721f90e045aa6a9d878089", "score": "0.71816206", "text": "def reward_ratio():", "title": "" }, { "docid": "5549addf5057f064de56d6c74dcf6db4", "score": "0.71478426", "text": "def _calc_ratio(self, vals):\n\n a, b = vals\n if not (a or b):\n return 0\n if abs(a) > abs(b):\n a, b = b, a\n return 1 - a / b", "title": "" }, { "docid": "349f67ec1c8fda3b0fd5dd4add3e7484", "score": "0.69492793", "text": "def ratio(arr):\n toms = count_tomatoes(arr)\n mush = count_mushrooms(arr)\n\n if(toms > mush):\n returnval = mush/toms\n elif(mush > toms):\n returnval = toms/mush\n else:\n returnval = 1.0\n\n return returnval", "title": "" }, { "docid": "22fe8ddc048bba5c852aa39f21fda978", "score": "0.6808471", "text": "def get_an_ratio(self):\n return round(\n sum(self.unique_vals_counts[self.anomalous_indices])\n / sum(self.unique_vals_counts),\n 2,\n )", "title": "" }, { "docid": "29d36f94c664d2e4ddc81fd4eda883f6", "score": "0.6730369", "text": "def pe_ratio(self, price: int) -> float:\n return 1 / self.dividend_yield(price)", "title": "" }, { "docid": "2499fe5624c82bd78c7b9d66b25b720b", "score": "0.6671865", "text": "def compute_ratio(self, state):\n if state == self.lower_state:\n return 1/(1/self.num_state) # the target policy always takes the solid action, so 1/(1/7)\n else:\n return 0 # the target policy always takes the solid action, so 0/(1/7)", "title": "" }, { "docid": "8d949f3d142e653af3f0dab127ee21e6", "score": "0.6647026", "text": "def ratio(a,b):\n return np.round(b/a,3)", "title": "" }, { "docid": "e630b0bfa0ef23adeba4e96df39b59e0", "score": "0.6559955", "text": "def ratio(x, y):\n return 0.0 if abs(value(x)) < 1e-9 and abs(value(y)) < 1e-9 else value(x / y)", "title": "" }, { "docid": "761b55980e63f9dad056df24ec61c337", "score": "0.6544462", "text": "def promedio(self,arr):\n return sum(arr)/len(arr)", "title": "" }, { "docid": "a49b9864bc99bcc1ebd6e7a1568ccad2", "score": "0.65438265", "text": "def calculate1Percent(value):\r\n numof1 = 0;\r\n for i in value:\r\n if(i==1):\r\n numof1 = numof1+1\r\n return numof1*1.0/value.shape[0]", "title": "" }, { "docid": "d35c6cfdb830df3c14f6171a45168943", "score": "0.6461315", "text": "def proportion_in_comparison( self ):\n return np.nan_to_num( self.sum(axis=1).astype(float) / self.sum() )", "title": "" }, { "docid": "f69a08a5b1660e8492e3a4ffcd849f04", "score": "0.64564145", "text": "def f1(predictions, gold):\n if len(gold) == 0:\n return 1 if len(predictions)==0 else 0\n nom = 2*len(set(predictions).intersection(set(gold)))\n denom = len(set(predictions))+len(set(gold))\n return nom/denom", "title": "" }, { "docid": "fad420b3bf0cfa285764b774ebe917cf", "score": "0.6444215", "text": "def ppation_ratio(self):\n a = self.vec[:self.dim*self.n]\n a = a/np.linalg.norm(a, axis=0)\n return np.sum(np.abs(a)**2, axis=0)**2/ \\\n np.sum(np.abs(a)**4, axis=0)", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "550103f981da9b6c6f74eab3ef3926e2", "score": "0.64046246", "text": "def as_integer_ratio(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "985a3347263432073ef2afb4daa49b19", "score": "0.62645483", "text": "def band_ratio(a,b):\n c = (a - b) / (a + b)\n return c", "title": "" }, { "docid": "abea8dd6a930ef3c46b1503b7f629d2a", "score": "0.6261388", "text": "def mass_fration_to_mass_ratio(x: float) -> float :\n return x / (1 - x)", "title": "" }, { "docid": "8cf7646d522d7fbebc52cc05e5c15bdc", "score": "0.6227649", "text": "def calmar_ratio(er, returns, rf):\n return (er - rf) / max_dd(returns)", "title": "" }, { "docid": "0729c6ce92e8f0b46f6920941700a763", "score": "0.62236196", "text": "def ratio(a, b):\n chol = B.cholesky(a)\n return B.sum(B.iqf_diag(b, chol), axis=-1)", "title": "" }, { "docid": "0dc4b5943e5459ec9b3bd6d476dd816c", "score": "0.62141955", "text": "def sparsity(self):\n\n unpruned = torch.sum(torch.tensor([torch.sum(v) for v in self.values()]))\n total = torch.sum(torch.tensor([torch.sum(torch.ones_like(v)) for v in self.values()]))\n return 1 - unpruned.float() / total.float()", "title": "" }, { "docid": "c85627aa2c04cacdb619654d72c486d2", "score": "0.6199893", "text": "def sum_5mc_ratio(d0):\n return (d0[10] / 100 * d0[9]).sum() / d0[9].sum()", "title": "" }, { "docid": "9a233ffee8817f74b2770ea3654470a5", "score": "0.6187528", "text": "def golden_ratio(fibo, n):\n algo = fibo1 if fibo == '1' else fibo2\n n = int(n)\n return algo(n) / algo(n-1)", "title": "" }, { "docid": "dfba654ee41a0464c35771aaac8444b2", "score": "0.6185817", "text": "def proportions( self ):\n return np.nan_to_num( self.astype(float) / self.sum() )", "title": "" }, { "docid": "2a3291f545a9a2fb58584d576c64eb4f", "score": "0.6164248", "text": "def task_0510():\n return sum([20, 10, 15, 75])/len([20, 10, 15, 75])", "title": "" }, { "docid": "35ae7704f31844c3ba7501fe55cbcf58", "score": "0.6157767", "text": "def reasonable(n):\n return n == 0 or 1/n != 0.0", "title": "" }, { "docid": "9b292d8dc97ca5963c3c643e2eb6064a", "score": "0.6157663", "text": "def p_e_ratio(self, price):\n try:\n return price/self.dividend_yield(price)\n except ZeroDivisionError:\n print('Undefined ratio for zero earnings')\n return 0", "title": "" }, { "docid": "a09f9d321c77b1202ab6d9c8593f1a75", "score": "0.6137656", "text": "def acceptance_ratio(self) -> float:\r\n return self.total_accepted_samples / self.total_samples", "title": "" }, { "docid": "6b08cbf5305f72f93dbd2c630a8fb7ae", "score": "0.607771", "text": "def _get_hit_ratio(self):\n\n hit = self.hit_count\n miss = self.miss_count\n if hit == 0 and miss == 0:\n return 0\n\n ratio = hit / (hit + miss)\n return round(ratio * 100, 1)", "title": "" }, { "docid": "c6953faa301efa2968d58e6f9bf67e50", "score": "0.60519433", "text": "def fineness_ratio(self) -> float:\n return np.sqrt(\n self.length() ** 3 / self.volume() * np.pi / 4\n )", "title": "" }, { "docid": "d993bb44cad481d12bf2f868c4b154cb", "score": "0.6039831", "text": "def _get_equivalence_ratio(\n p_fuel,\n p_oxidizer,\n f_a_st\n):\n return p_fuel / p_oxidizer / f_a_st", "title": "" }, { "docid": "9363e8379416d824b1ec08ba91af72ac", "score": "0.6031198", "text": "def f1(self):\n assert 0 <= self.P <= 1 and 0 <= self.R <= 1\n if self.P == 0 or self.R == 0:\n return 0\n return (2 * self.P * self.R) / (self.P + self.R)", "title": "" }, { "docid": "c6b64659a6d8773a8e34ab1714c3d086", "score": "0.6020088", "text": "def ratio(self):\n return self.height / self.width", "title": "" }, { "docid": "c6635ae0d8a0acbc005c4e952f27ea46", "score": "0.6007607", "text": "def img_rgb_ratio(pixel):\n total = sum(pixel)\n pixel_ratio = []\n if(total>0):\n Rr = pixel[0]/total\n Gr = pixel[1]/total\n Br = pixel[2]/total\n pixel_ratio.append(Rr)\n pixel_ratio.append(Gr)\n pixel_ratio.append(Br)\n else:\n pixel_ratio.append(0)\n pixel_ratio.append(0)\n pixel_ratio.append(0)\n return pixel_ratio", "title": "" }, { "docid": "529f09d1a99078c56a10b6c03e2d37d4", "score": "0.60073096", "text": "def recall_ratio(seg, gt):\r\n seg = seg.flatten()\r\n seg[seg > 0.5] = 1\r\n seg[seg <= 0.5] = 0\r\n\r\n gt = gt.flatten()\r\n gt[gt > 0.5] = 1\r\n gt[gt <= 0.5] = 0\r\n\r\n tp = (seg * gt).sum() #jiao\r\n fn = gt.sum()-tp\r\n\r\n\r\n recall = float(tp)/float(fn+0.000001)\r\n\r\n return recall", "title": "" }, { "docid": "784162eea3138467041df0976cabd8f8", "score": "0.5984997", "text": "def calc_poissons_ratio(mod_bulk, mod_shear):\n\n return (3 * mod_bulk - 2 * mod_shear) / (2 * (3 * mod_bulk + mod_shear))", "title": "" }, { "docid": "b4a92050b35b184519912935fcd261d7", "score": "0.59839803", "text": "def proportional_one(values, value):\n maxx = np.max(values)\n minn = np.min(values)\n return (value - minn)/(maxx-minn)", "title": "" }, { "docid": "c547cda9b5cc083d854d722b43b4534f", "score": "0.5973895", "text": "def invratio(a,b):\n return np.round(a/b,3)", "title": "" }, { "docid": "74b8b79da229eea50d16a1068b930322", "score": "0.59577703", "text": "def precision_ratio(seg, gt):\r\n seg = seg.flatten()\r\n seg[seg > 0.5] = 1\r\n seg[seg <= 0.5] = 0\r\n\r\n gt = gt.flatten()\r\n gt[gt > 0.5] = 1\r\n gt[gt <= 0.5] = 0\r\n\r\n tp = (seg * gt).sum() #jiao\r\n fp = seg.sum()-tp\r\n\r\n\r\n precision = float(tp)/float(fp+0.000001)\r\n\r\n return precision", "title": "" }, { "docid": "e2d637aea74623aae7f7c25531aa6b89", "score": "0.595064", "text": "def calc_p_ratio(hps, nhps, total_hps, total_nhps):\n\n hps_proportion = (hps+1) / (total_hps+1) # add-one smoothing\n nhps_proportion = (nhps+1) / (total_nhps+1) # add-one smoothing\n\n p_ratio = round(max(hps_proportion, nhps_proportion)/min(hps_proportion, nhps_proportion), 2)\n\n return p_ratio", "title": "" }, { "docid": "951236be85e7a33c8ba7f7e772e0eb68", "score": "0.59495294", "text": "def ppation_ratio2(self):\n vec = self.vec[:self.dim*self.n]\n vec = vec/np.linalg.norm(vec, axis=0)\n return 1./(self.val.shape[0]*np.sum((vec*vec.conj())**2, axis=0)).real", "title": "" }, { "docid": "14bb4febdfd51e39c4708f31f2e478e2", "score": "0.5944385", "text": "def div(self) -> float:\n return (per(self.has(), .9) - per(self.has(), .1)) / 2.58", "title": "" }, { "docid": "754c6749a85f18567ed789b393176c0c", "score": "0.59433454", "text": "def get_ratio(array, fn):\n cntr = Counter(array)\n\n selected = [(key, val) for key,val in cntr.items() if fn(key)]\n if not selected:\n return 0\n (_, counts) = zip(*selected)\n (_, all_counts) = zip(*cntr.items())\n\n return sum(counts)/float(sum(all_counts))", "title": "" }, { "docid": "89f092a2a9c12d894e729bf7783dd0c2", "score": "0.59371", "text": "def good_total_ratio(self) -> Optional[pulumi.Input['SloRequestBasedSliGoodTotalRatioArgs']]:\n return pulumi.get(self, \"good_total_ratio\")", "title": "" }, { "docid": "054d208b066ac89270c2f6f707fde146", "score": "0.59356093", "text": "def test_one(self):\n df = self.df.head(1).copy()\n ratio = \"CaO/Si\"\n r = get_ratio(df, ratio=ratio)", "title": "" }, { "docid": "2414d3087f5f6ae5d7d12bd5243c8ce9", "score": "0.59217286", "text": "def calculate_ratios(c1, c2):\n if c2 == 0.0:\n return 1.0\n else:\n return c1 / c2", "title": "" }, { "docid": "8b797a998bd2c365b7c28b2d1a7151cd", "score": "0.5921005", "text": "def get_image_ratio(self, image):\n x, y = self.get_image_size(image)\n return float(x) / y", "title": "" }, { "docid": "ff938d2bf3bda281a01c8547f88a022d", "score": "0.5910512", "text": "def ratio_disc(disc, x_real, x_fake):\n\n # Put samples together\n x = torch.cat([x_real, x_fake])\n\n # Compute p / (p + q)\n p_over_pplusq = disc.classify(x)\n\n # Compute q / (p + q)\n q_over_pplusq = 1 - p_over_pplusq\n\n # Compute p / q\n p_over_q = p_over_pplusq / q_over_pplusq\n\n return p_over_q", "title": "" }, { "docid": "e5506b1d0ab4a02111cee63b7dc26846", "score": "0.5901676", "text": "def terminalRatio(self):\n return self.terms_count / float(self.terms_count + self.prims_count)", "title": "" }, { "docid": "f20e82bed113b663a573b26a9b4847c9", "score": "0.5896481", "text": "def calculate_fraction(self, labels):\n return sum(labels) / len(labels)", "title": "" }, { "docid": "16b6a8f50eb6e967fcdea27a298ebbe3", "score": "0.58945316", "text": "def gender_ratio_calc(individuals):\n n_male = 0\n for i in individuals:\n if i.gender == 0:\n n_male += 1\n return n_male/len(individuals)", "title": "" }, { "docid": "fe94b31f0c6f1aede1c312f94269e864", "score": "0.589371", "text": "def proportion_in_reference( self ):\n return np.nan_to_num( self.sum(axis=0).astype(float) / self.sum() )", "title": "" }, { "docid": "4a5e7d2f85870fde0198cc28b7b30401", "score": "0.5887356", "text": "def trivial_fit(R, N):\n total_positive = numpy.sum(R)\n total_count = numpy.sum(N)\n return float(total_positive) / float(total_count)", "title": "" }, { "docid": "68124d4854212f620b4382fc9cccade4", "score": "0.5884088", "text": "def equivalence_ratio(self):\n return (self.fuel_flow / self.inflow.mass_flow) * self.afr_stoichiometric", "title": "" }, { "docid": "2d44937fd2a5b0287c19700f976014ec", "score": "0.58774424", "text": "def task_0512():\n return sum([sum([.25, .75, .1] + [-1,0] + [4,4,4,4])])", "title": "" }, { "docid": "a176f1d5bcf0aa6938f44cdac84e2e14", "score": "0.58582145", "text": "def trickle_ratio(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"trickle_ratio\")", "title": "" }, { "docid": "ecdd4048c980dd568866012f84227a56", "score": "0.58448684", "text": "def RR(v):\n return 1/np.min(v)", "title": "" }, { "docid": "db661a6bb31527789408a65bb58277cf", "score": "0.58427376", "text": "def assert_piece_probabilities_sum_to_one(piece):\n\n if not DEBUGGING:\n return\n\n prob_sum = sum(piece.probability(rank) for rank in piece.ranks())\n if not prob_sum == 1:\n log = logging.getLogger(\"probability\")\n log.error(\"Probability sum for %s is %s\" % (str(piece), str(prob_sum)))\n assert(False)", "title": "" }, { "docid": "cbb075d7d309b1a43d5086e3b0886008", "score": "0.5841456", "text": "def observed(data):\n total = float(np.sum(data))\n agreed = np.sum(data.diagonal())\n percent_agreement = agreed / total\n return percent_agreement", "title": "" }, { "docid": "30f534ad483a0255ec3ab791f7e0bc9d", "score": "0.5838872", "text": "def burke_ratio(er, returns, rf, periods):\n return (er - rf) / math.sqrt(average_dd_squared(returns, periods))", "title": "" }, { "docid": "0b9e09fd642402005e37293fb157b093", "score": "0.5834595", "text": "def universal_ansiotropy_ratio(self):\n Kv, Gv = self.voigt_modulus\n Kr, Gr = self.reuss_modulus\n return 5 * (Gv / Gr) + (Kv / Kr) - 6", "title": "" }, { "docid": "a406373d33749f5231103a2dcaa9b8f3", "score": "0.5833236", "text": "def get_ratio(self, resource_id):\r\n\t\tresult = self.production_ratio if self.resource_id == resource_id else 0\r\n\t\treturn result + sum(child.get_ratio(resource_id) for child in self.children)", "title": "" }, { "docid": "1584640eb2e2acdedf6c0f19857c072f", "score": "0.5831414", "text": "def evaluation(terms,keys):\n matches = set(terms).intersection(keys)\n ratio = len(matches) * 1.0 /len(keys)\n return ratio", "title": "" }, { "docid": "96b003790f4223beb62022fa6d6b0362", "score": "0.5830574", "text": "def conv_ratio(kind):\n commod, rxtr = data.sup_to_commod[kind], data.sup_to_rxtr[kind]\n enr = mean_enr(rxtr, commod)\n return data.converters[kind]['inv'](1.0, enr, commod) / \\\n data.converters[kind]['proc'](1.0, enr, commod)", "title": "" }, { "docid": "9e40b142a405f8e338b23e0704f3bd3d", "score": "0.5821991", "text": "def get_na_ratio(self):\n return round(\n sum(self.unique_vals_counts[self.missing_indices])\n / sum(self.unique_vals_counts),\n 2,\n )", "title": "" }, { "docid": "eef52b726cfb7550b68aa9c7669ddbb9", "score": "0.58172154", "text": "def compute_average_needs_ratio(self):\n ratio = self.compute_ratio_needs()\n self.average_needs_ratio = (ratio['food'] + ratio['clothes'] + ratio['lodging']) / 3", "title": "" }, { "docid": "cd329027210f0d63af7a19da90f518b5", "score": "0.5817186", "text": "def test_proper_ratio_initialization(self):\n valtest_divider = ValidationTestDivider(0.50, 0.40)\n\n self.assertEqual(valtest_divider.get_total_count(), 0)\n self.assertEqual(0.50, valtest_divider.get_validation_ratio())\n self.assertEqual(0.40, valtest_divider.get_test_ratio())", "title": "" }, { "docid": "db899b729841fd099747d6107f5a4c6a", "score": "0.58049285", "text": "def good_total_ratio(self) -> Optional[pulumi.Input['SloWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioArgs']]:\n return pulumi.get(self, \"good_total_ratio\")", "title": "" }, { "docid": "fd2af3cf8466c41a6450cb047bb3f6ae", "score": "0.57965785", "text": "def calculaterRD(_ranking_k,_pro_k,_user_N,_pro_N):\r\n input_ratio=_pro_N/(_user_N-_pro_N)\r\n unpro_k=_ranking_k-_pro_k\r\n \r\n if unpro_k==0: # manually set the case of denominator equals zero\r\n current_ratio=0\r\n else:\r\n current_ratio=_pro_k/unpro_k\r\n\r\n min_ratio=min(input_ratio,current_ratio)\r\n \r\n return abs(min_ratio-input_ratio)", "title": "" }, { "docid": "505af10e98dda1dae7b482e8bcefaefe", "score": "0.57955945", "text": "def ratio_from_mass(self, x):\n # this function is a manual approximation for the number of grid cells you can see\n # in the actual agario at various different masses\n # the data cen be seen inside sizes.txt\n y = 1400 * (x + 150) ** -1 + 7.2\n return y / 15.95", "title": "" }, { "docid": "0db54903dbfe2fdbd6154213b816b722", "score": "0.57935876", "text": "def predicted_FWHM_ratio(r0, L0):\n return np.sqrt(1 - 2.183*(r0/L0)**0.356)", "title": "" }, { "docid": "d11c0b66bc88aa8af7b4d0d34fe85333", "score": "0.5781137", "text": "def conditional_sharpe_ratio(er, returns, rf, alpha):\n return (er - rf) / cvar(returns, alpha)", "title": "" }, { "docid": "afdbc47ce32f3117ac0a5827befd7b5a", "score": "0.5775057", "text": "def triangle_ratio(self):\n a = self.vertices[self.edges[:,0]]\n b = self.vertices[self.edges[:,1]]\n distance = ((b-a)**2).sum(axis=1) ** 0.5\n max_distance = distance.max()\n min_distance = distance.min()\n return min_distance / max_distance", "title": "" }, { "docid": "bb03c6c7a0c196f2390ea843d23bb872", "score": "0.5772194", "text": "def solution1(numerator, denominator): \n if numerator < denominator:\n return 0\n\n return solution1((numerator - denominator), denominator) + 1", "title": "" }, { "docid": "a4112f8121e932426b149c803d37bd4e", "score": "0.5768263", "text": "def gain_ratio(features, results):\n f_entropy = feature_entropy(features, results)\n f_gain = gain(features, results)\n return f_gain - f_entropy", "title": "" }, { "docid": "7969b04d7bd4f24c5b5eaa1527bb0851", "score": "0.5766656", "text": "def compute_ratio_needs(self, level=2):\n ratio_needs_prod = GoodsVector(self.goods)\n cum_needs = self.compute_cum_needs(level)\n prod_capacity = self.compute_production_capacity()\n for good in self.goods:\n ratio_needs_prod[good] = prod_capacity[good] / cum_needs[good]\n return ratio_needs_prod", "title": "" }, { "docid": "65ffec488fc74079c2aae7613e1a9701", "score": "0.5762043", "text": "def wilson_ratio(self, use_binning=False):\n if (not self.is_xray_intensity_array()):\n a = self\n else:\n a = self.f_sq_as_f()\n if (use_binning):\n a.use_binner_of(self)\n second_moment = a.second_moment(use_binning=use_binning)\n if (second_moment is None): return None\n if (not use_binning): return 1/second_moment\n result = []\n for sm in second_moment.data:\n if (sm is None or sm == 0): result.append(None)\n else: result.append(1/sm)\n return binned_data(binner=a.binner(), data=result, data_fmt=\"%7.4f\")", "title": "" }, { "docid": "248c20ab25cec4eec1e01916d71756f9", "score": "0.57584244", "text": "def calculate_common_ratio(self):\n\n this_r = self.initial_selection_probability\n size = Evolver.POPULATION_SIZE\n\n while True:\n next_r = ((size - 2) + 2 * (this_r ** size)) / size\n\n if abs(next_r - this_r) < 0.000001:\n return this_r\n else:\n this_r = next_r", "title": "" }, { "docid": "da941f0cedd5e4daa30ae6f7a82e57e6", "score": "0.5757154", "text": "def desaturate(ratio):\n return lambda x: x + (x.max() - x) * ratio", "title": "" }, { "docid": "13a8ecf1463e214a0bae735ef8559393", "score": "0.57563263", "text": "def divide_by_one(a):\n return a / 1", "title": "" }, { "docid": "abab9cd63c7ace7deb46a77d08737e5b", "score": "0.57548445", "text": "def percent_correct(self) -> float:\n num_correct = 0\n for h in self.hypotheses:\n if h == self.ground_truth:\n num_correct += 1\n return num_correct / len(self.hypotheses)", "title": "" }, { "docid": "f16e8f6ca0815cac6a46390d33f7edaa", "score": "0.57537204", "text": "def solvedPercent( self ):\n\t\ttotalSize = len( self.lineBoard )\n\t\tsolvedCount = 0\n\t\tfor val in self.lineBoard:\n\t\t\tif( ( val >> 4 ) ^ ( val & 15 ) == 15 ):\n\t\t\t\tsolvedCount = solvedCount + 1\n\t\treturn( float( \"%.2f\" % ( solvedCount * 100.0 / totalSize, ) ) )", "title": "" }, { "docid": "10e75664dae30f1c73af84eb229294b4", "score": "0.5748528", "text": "def rational(x):\n return 1 / (x + 1)", "title": "" }, { "docid": "e5e37891252d8421ddd3df633a84f0a3", "score": "0.57362413", "text": "def ratio_driver(x, n, d, ab):\n nmax = np.max(n)\n\n r = np.zeros([x.size, nmax+1])\n xf = x.flatten()\n\n r[:, 0] = 1/ab[0, 1]\n if nmax > 0:\n r[:, 1] = 1/ab[1, 1] * (x - ab[1, 0])\n\n for j in range(2, nmax+1):\n r[:, j] = 1/ab[j, 1] * ((xf - ab[j, 0]) - ab[j-1, 1]/r[:, j-1])\n\n r = r[:, n.flatten()]\n\n if type(d) == int:\n if d == 0:\n return r\n else:\n raise NotImplementedError()\n else:\n raise NotImplementedError()", "title": "" }, { "docid": "45c0f0b28aaacdc47f6794567a510e0c", "score": "0.573349", "text": "def rmse1 (a, p) :\n s = len(a)\n i = 0\n v = 0.0\n while i != s :\n v += sqre_diff(a[i], p[i])\n i += 1\n return math.sqrt(v / s)", "title": "" }, { "docid": "4eb4ab13c45a98af543da4dc2d4c82c6", "score": "0.57272434", "text": "def calc_mixing_ratio(svp, pressure):\n return con.mw_ratio*svp/(pressure-svp)", "title": "" }, { "docid": "9bb9528931e7dfda6b2feefe6c83370b", "score": "0.5726713", "text": "def helpfulnessRatio(x):\n numerator, denominator = x.split('/')\n if numerator > denominator:\n return np.nan\n return np.nan if denominator == '0' else float(numerator) / float(denominator)", "title": "" }, { "docid": "f7af678ea5d044c0ea5e8694f04f98a3", "score": "0.57234097", "text": "def L1Dmiss_swpf_ratio(self, *tag):\n l1d_miss = self.d.L1D_miss(*tag)\n l1d_miss_swpf = self.d.L1D_miss_swpf(*tag)\n return l1d_miss_swpf / l1d_miss.astype(float) * 100", "title": "" }, { "docid": "53fa9d20a082f617aa3781dffec82e7a", "score": "0.5723099", "text": "def computeFraction( poi_messages, all_messages ):\n \n if all_messages == 'NaNNaN': # occurred when created additive features (all emails)\n all_messages = 'NaN'\n if poi_messages == 'NaNNaN':\n poi_messages = 'NaN'\n if all_messages == 'NaN':\n return 0\n if poi_messages == 'NaN':\n return 0\n if all_messages == 0:\n return 0\n return 1.*poi_messages/all_messages\n return fraction", "title": "" } ]
cb561c571a88b19c636e2904f461b665
Get value of key in metadata file dict.
[ { "docid": "1d06cbe8af841874ef8e3fe11e35fef5", "score": "0.76708704", "text": "def get_from_metadata_file(cls, dirpath, key):\n fullpath = os.path.join(dirpath, cls.metadata_filename)\n if os.path.exists(fullpath):\n with open(fullpath, 'rb') as ifh:\n d = pickle.load(ifh)\n return d[key]\n else:\n raise FileNotFoundError(\n \"No such file: {0}\".format(fullpath))", "title": "" } ]
[ { "docid": "fdcf88d1de45ebc3a51e78a7326b9fcf", "score": "0.7239693", "text": "def _get_metadata(self, key):\n try:\n val = self.pod['metadata'][key]\n except (KeyError, TypeError):\n _log.debug('No %s found in pod %s', key, self.pod)\n return None\n return val", "title": "" }, { "docid": "66446344856324b0786d4d142006489a", "score": "0.72259307", "text": "def get(self, key):\n if key not in self.meta:\n raise CrashPlanError(\"(get) Invalid meta data key - %s\" % key)\n \n return self.meta[key]", "title": "" }, { "docid": "eddf3ec88714fbb574b9d102d1d71fde", "score": "0.7178097", "text": "def _GetValue(self, value_name, key):\n value = key.GetValue(value_name)\n if value:\n return value.data", "title": "" }, { "docid": "d6c8a0b77ea07bddc58560b1fb8b3109", "score": "0.70655924", "text": "def get(self, key):\n return self.data[str(key)]", "title": "" }, { "docid": "1fa72f184bc2283d80fe656732d6b5df", "score": "0.6996301", "text": "def get(src, key):\n meta, md = parse_file(src)\n if key in meta:\n print meta[key]\n else:\n print \"Key '%s' is not defined\" % key", "title": "" }, { "docid": "52d63ad389c6f5a6a5fe52617df23009", "score": "0.67988354", "text": "def get_value(self, key):\n\n return self.content[key]", "title": "" }, { "docid": "03a190f9396336435671e2e8fe4c7099", "score": "0.6769885", "text": "def value_of(self, key):\n ext_attr = self.get(key)\n return ext_attr.value if ext_attr else None", "title": "" }, { "docid": "310336ad7f4397c00ab5cd43bae1a08e", "score": "0.67535496", "text": "def get(self, key):\n try:\n return self._data[key]\n except KeyError:\n pass", "title": "" }, { "docid": "c486c90a1861cb1b0e2eb7488acea7f7", "score": "0.67528296", "text": "def get_metadata_by_key(meta_key):\n mongodb = get_mongodb()\n meta = mongodb.metadata.find_one({'meta_key': meta_key})\n return meta", "title": "" }, { "docid": "38c6748439c448c1219f4ae4442bc5cb", "score": "0.6742487", "text": "def get(ctx: click.Context, key: Any) -> None:\n file = ctx.obj['FILE']\n\n with stream_file(file) as stream:\n values = dotenv_values(stream=stream)\n\n stored_value = values.get(key)\n if stored_value:\n click.echo(stored_value)\n else:\n exit(1)", "title": "" }, { "docid": "dc49ce20cfcfaa2c1bf414bd04e5de25", "score": "0.66968954", "text": "def __getitem__(self, key):\n return self.show_metadata[key]", "title": "" }, { "docid": "f7fb9fe64ad0cd7b8dcfd9be5f6f1c6f", "score": "0.6671867", "text": "def get_metadata(self, key=None, default=None):\n metadata = json_utils.loads(self.additional_metadata)\n if not key:\n return metadata\n\n try:\n return self.metadata[key]\n except KeyError:\n return default", "title": "" }, { "docid": "c9331d05b1c3272b5ea47079b575313a", "score": "0.65888417", "text": "def __getitem__(self, key):\n logger.debug(\"AttrFile.__getitem__(%s)\" % key)\n if key in self._attrmap:\n return self._attrmap[ key ]\n else:\n logger.error(\"AttrFile.__getitem__(%s): No such field in %s\" % (key, self._path))\n raise KeyError(\"AttrFile.__getitem__(%s): No such field in %s\" % (key, self._path))", "title": "" }, { "docid": "cd4aea82a30fb007a182e5a2344de251", "score": "0.657604", "text": "def __getitem__(self, key):\n return self._entries.get(str(key))", "title": "" }, { "docid": "c0c97e356e71c2f20569d604dccae79a", "score": "0.65701133", "text": "def get_value(self, key):\n val = dict.__getitem__(self, key)\n return val[-1] # assumes one value with key name.", "title": "" }, { "docid": "bf1d320bb4ddee2354f020bb50cabe65", "score": "0.65627253", "text": "def __getitem__(self, key):\n return self._datainfo[key]", "title": "" }, { "docid": "e0799c28a9012d7991cff4d19a287fdb", "score": "0.65609145", "text": "def get(self, key):\n return self[key]", "title": "" }, { "docid": "8938963023fb50661bb98a592e259679", "score": "0.6541692", "text": "def value(self, key):\n return self._record[key]", "title": "" }, { "docid": "8baf0700061089baf2d0ea78558cae80", "score": "0.6532773", "text": "def __getitem__(self, key):\n return self.config_file[key]", "title": "" }, { "docid": "8888636d5b10a003228ae0946cc1d1fc", "score": "0.6518481", "text": "def get(self, key):\n clean_key = clean_path(key)\n path = os.path.join(self.root_folder, clean_key)\n if not os.path.isfile(path):\n return None\n with open(path, \"r\") as handle:\n data = handle.read()\n return data", "title": "" }, { "docid": "b7774433c01d5cbdbb77653eb7cd13e4", "score": "0.6491299", "text": "def meta(self, meta_key):\n return self.meta[meta_key]", "title": "" }, { "docid": "761f2095f40ad269f9e72f579a39b957", "score": "0.64890975", "text": "def find(self, key: str):\n result, idx = _get_index(key)\n if result:\n return self.entries[idx].value\n else:\n raise KeyError", "title": "" }, { "docid": "71229db8f6513d5933a3870ceed78347", "score": "0.6484786", "text": "def get(self, key):\n entries = self.search(key)\n return entries[0] if entries else None", "title": "" }, { "docid": "fba053a6c0bff5bc6a24a42e44121591", "score": "0.64837193", "text": "def get(self, key):", "title": "" }, { "docid": "3b3a9567df30604704ecd363cc89914d", "score": "0.64802736", "text": "def value_from_specfile(getkey):\n spec = {}\n with open(\"opsapi.spec\", \"r\") as f:\n for line in f.readlines():\n try:\n value = line.split(\":\")[1].lstrip().strip()\n key = line.split(\":\")[0].lstrip().lower().rstrip()\n print(\"X\"+str(key))\n spec[key] = value\n except:\n pass\n return spec[getkey]", "title": "" }, { "docid": "e2f3677af0f13640d878bfda0af81e1a", "score": "0.6462309", "text": "def get(self, key):\n # TODO: Check if the given key exists and return its associated value\n bucket_index = self._bucket_index(key)\n maybe_value = self.buckets[bucket_index].find(lambda x: x[0] == key)\n print(key, maybe_value)\n if maybe_value is not None:\n return maybe_value[1]\n raise (KeyError, 'Key not found')", "title": "" }, { "docid": "afd8cdabe06061a9e32c5840f53fd8cd", "score": "0.64457065", "text": "def get_value(self, key):\n pair = self._map.get(key, None)\n if pair:\n return pair.get_value()\n return None", "title": "" }, { "docid": "f5c40049543f85348bc4e393df288b5f", "score": "0.6390037", "text": "def get_metadata(self, key, counter_id):\n with self._lock:\n metadata = self._metadata[key]\n try:\n return metadata[counter_id]\n except KeyError:\n raise MetadataNotFoundError(\"No metadata for counter id '{}' found in the cache.\".format(counter_id))", "title": "" }, { "docid": "c804cd403f12f0dd1d2f69c947f026ef", "score": "0.63824314", "text": "def get(self, key):\n return self.__dict__.get(key, None)", "title": "" }, { "docid": "d3127e5ac37d82888ec8d622ff2545d3", "score": "0.63534695", "text": "def get(self, key):\n if key in self.dic:\n return self.dic[key]\n else:\n return -1", "title": "" }, { "docid": "c268c82130a1e8c5761333524f09e526", "score": "0.6349868", "text": "def get(self, key):\n ret_val = None\n if self.__resp_file__ is None:\n self.__resp_file__ = self.send_sandesh_req(self,\n get_resp=True)\n try:\n ret_val = self.parse_xml_field(self.__resp_file__, key)\n except Exception as e:\n self.logger.error(\"Error %s\" % e)\n return ret_val", "title": "" }, { "docid": "171505e3039a619fa91fd6143522b7b2", "score": "0.6344824", "text": "def get_value(self, key):\n return self.struct[key]", "title": "" }, { "docid": "3cad920c4a627b3adc920f1f6f8e84e2", "score": "0.6341955", "text": "def get_information(self, key, **kwargs):\n return self.__info_hash[key]", "title": "" }, { "docid": "0e3b69dc6e0d222402ac872e0932bbc1", "score": "0.6338419", "text": "def __getitem__(self, key):\n props = self._data.get('properties', {})\n return props.get(key, None)", "title": "" }, { "docid": "ffa81925b8db78adc30b05147a562766", "score": "0.63354623", "text": "def get_key(filepath, key, default=None):\n with open(filepath, \"r\") as f:\n file_content = json.load(f)\n return file_content.get(key, default)", "title": "" }, { "docid": "ec566a5296c096729dcdb4558e458912", "score": "0.63351345", "text": "def get(self, key: str, *args) -> Any:\n if (key in self._values.keys()):\n return self._values[key]\n elif len(args) == 1:\n return args[0]\n else:\n raise KeyError(\"Key {} not in Qonfig keys\".format(key))", "title": "" }, { "docid": "53428f9009f447e74b451f437797e402", "score": "0.6310125", "text": "def value_of(self, key):\n idx = self._find_position(key)\n assert idx is not None, \"Invalid `key`\"\n return self._entry_list[idx].val", "title": "" }, { "docid": "dcdd749f02ac12e2243d2a0eb0de0fa1", "score": "0.62993413", "text": "def __getitem__(self, key):\n\t\tndx = self._findPosition(key)\n\t\tassert ndx is not None, \"Invalid map key.\"\n\t\treturn self._entryList[ndx].value", "title": "" }, { "docid": "0913aeff754f0abb18acfd1091ba0776", "score": "0.62992805", "text": "def getValue(self, key):\n\n if key is None or key == '':\n raise KeyError(\"You must provide the key parameter\")\n\n if self._conf is None:\n raise Exception(\"You must load the config file first\")\n\n if key in self._conf:\n return self._conf[key]\n else:\n return None", "title": "" }, { "docid": "62dda6943f2422491621d3af0de6d9c8", "score": "0.6289651", "text": "def findValue(self, key):\n return self._storedValue[key]", "title": "" }, { "docid": "745615c1571f9928ed6f1007b84ce47d", "score": "0.6289176", "text": "def __getitem__(self, key):\n value, quality, time = self.read(key)\n return value", "title": "" }, { "docid": "a3f45b67f214263925ba5cf8add96632", "score": "0.62699556", "text": "def get(dictionary, key):\n return dictionary[key]", "title": "" }, { "docid": "90ab2be37a29c00bac33f6e6e242e32c", "score": "0.6260647", "text": "def get(self, key):\n return self._conn.execute(b'get', key)", "title": "" }, { "docid": "5b0bbf1c7e7de8729a1b2db57fbde2b4", "score": "0.6258306", "text": "def read_key(self):\n return self.dict.storage.read_key(self.value)", "title": "" }, { "docid": "5b0bbf1c7e7de8729a1b2db57fbde2b4", "score": "0.6258306", "text": "def read_key(self):\n return self.dict.storage.read_key(self.value)", "title": "" }, { "docid": "cc5b20fbc0974ac44696b80186510046", "score": "0.6250479", "text": "def get_key(value, arg):\n return value.get(arg, None)", "title": "" }, { "docid": "69ccfcafa5bcdf4c7769613166aadfb6", "score": "0.62405634", "text": "def get_metadata(self, metadata_name):\n\n\t\treturn self.metadata[metadata_name]", "title": "" }, { "docid": "5949191a49225b7331548387f31420a5", "score": "0.62345636", "text": "def get(blob_key):\n return BlobInfo.get(blob_key)", "title": "" }, { "docid": "9207da364f4103afc75a948db5989c02", "score": "0.62300557", "text": "def get(josnData,key):\n return josnData[key] if exist(josnData,key) else \"\"", "title": "" }, { "docid": "b16b96be003bf642483b5ee002eaa051", "score": "0.6229425", "text": "def get_metadata(self, k):\n # type: (str) -> object\n return object.__getattribute__(self, k)", "title": "" }, { "docid": "422a22090d1d9b29004906830e34fa45", "score": "0.62286437", "text": "def get(self, key):\n pass", "title": "" }, { "docid": "422a22090d1d9b29004906830e34fa45", "score": "0.62286437", "text": "def get(self, key):\n pass", "title": "" }, { "docid": "b835fae2619854e1aca39f0d1f58b622", "score": "0.6227466", "text": "def value(key):\n return all[key][2]", "title": "" }, { "docid": "b3d81ae8b66df29cb08ab1b3b62891af", "score": "0.62247294", "text": "def getval(filepath, key, condition=True):\n if condition:\n header = get_conditioned_header(filepath, needed_keys=[key])\n else:\n header = get_unconditioned_header(filepath, needed_keys=[key])\n return header[key]", "title": "" }, { "docid": "8e52d175f279d5369216598bbd5ebd99", "score": "0.6223482", "text": "def value_from_dict(dictionary, key):\n pass", "title": "" }, { "docid": "99840129b713fa7170cfb445481281dd", "score": "0.6211947", "text": "def get_header_value_by_key(self, key):\n for i in self.header:\n if i.get(\"name\") == key:\n return i.get(\"value\")", "title": "" }, { "docid": "2b685225122030289f6c090d5bc5059c", "score": "0.6208316", "text": "def get(self, key, default=None):\n try:\n with open(self._pathname(key)) as file_obj:\n result = file_obj.read()\n if self.audit:\n self._doaudit(key, result)\n return result\n except OSError as oopsie:\n if oopsie.errno == errno.ENOENT: # Doesn't exist\n return default\n raise oopsie", "title": "" }, { "docid": "e6c51f74762d47d66d8b963127f6f328", "score": "0.6201602", "text": "def read(self):\n keyobj = self.dict.storage.read_key(self.value)\n return self.dict[keyobj]", "title": "" }, { "docid": "17f1c066b99be00f8caba0ea34fffe7e", "score": "0.6200435", "text": "def get(self, key):\n if key in self.pv_ref:\n return self.pv_ref[key]\n if key in self.mne_ref:\n return self.get(self.mne_ref[key])\n raise KeyError(f\"'{key}' not found.\")", "title": "" }, { "docid": "72c11ab0cf97320d50217d6ea61a8486", "score": "0.61998945", "text": "def _get(self, key):\n try:\n val = getattr(self, f\"_{key}\")\n if val is not None:\n return val\n else:\n self._load()\n return getattr(self, f\"_{key}\")\n except AttributeError:\n return None", "title": "" }, { "docid": "b61f335ecb8a966e4d7fe0cf2d331a13", "score": "0.61948866", "text": "def get_key(fp, k):\n\n print '[guil] Returned key: %s' % k\n\n return get_python_obj(fp)[k]", "title": "" }, { "docid": "978043d661aad234532e53bc9f420eda", "score": "0.61919755", "text": "def get(self, key):\n if isinstance(key, str):\n key = (key,)\n result = dict_digger.dig(self.data, *key)\n if result is None:\n if isinstance(key, (tuple, dict, list)):\n raise KeyError(\"Could not find requested key '%s' in configuration\" % '.'.join(key))\n else:\n raise KeyError(\"Could not find requested key '%s' in configuration\" % key)\n return result", "title": "" }, { "docid": "2c2f44b67bb92504aa9eab86fa25701b", "score": "0.6189424", "text": "def get_value_for_key(self, name, key):\n request = Request(method=\"get\", endpoint=\"/kv/{}/value/{}\".format(name, key))\n\n def response_handler(resp):\n if not resp.is_success:\n raise GetValueError(resp, request)\n else:\n return resp.body\n\n return self._execute(request, response_handler)", "title": "" }, { "docid": "bcac4f79b57af4176ad4284a339677c2", "score": "0.61854106", "text": "def value_for(self, key: str) -> Optional[str]:\n raise NotImplementedError", "title": "" }, { "docid": "e808130e3459334e11c7c0e212623dc8", "score": "0.61745274", "text": "def get(self, key):\n\n raise NotImplementedError", "title": "" }, { "docid": "0a29ab95ef17ccc8d1d42c7f004519b1", "score": "0.61732227", "text": "def __getitem__(self, key):\n return self.contents[key]", "title": "" }, { "docid": "525995bd17c4389bc4a4e69073137967", "score": "0.6164586", "text": "def read_value(self, key: str) -> Any:\n with self._mem_cache_lock:\n if key in self._mem_cache:\n entry = self._mem_cache[key]\n return entry\n\n else:\n raise CacheKeyNotFoundError()", "title": "" }, { "docid": "398a05f22cea58c7c3a681aef15ca39d", "score": "0.6159109", "text": "def getEntry(key, value, path):\n try:\n reader = csv.DictReader(open(path))\n for entry in reader:\n if entry[key] == value:\n return entry\n except:\n return None\n\n return None", "title": "" }, { "docid": "afec1f02ab34476423113f5da7f4da83", "score": "0.6151673", "text": "def get(self, key):\n\t\treturn self.__getitem__(key)", "title": "" }, { "docid": "0a4e32ae5ea7642bea0a4dc1cd45a44d", "score": "0.6135139", "text": "def __getitem__(self, key):\n\t\treturn self._dict[key][1]", "title": "" }, { "docid": "baf7abd3946567c75d65f6b0c4d68705", "score": "0.6130219", "text": "def get(self, key):\n\t\tbucket_index = self.get_bucket(key)\n\t\tentry = self.buckets[bucket_index]\n\t\tif not entry:\n\t\t\treturn None\n\t\treturn entry.value", "title": "" }, { "docid": "f3b06076994231adc79f52ddee0ecdae", "score": "0.6128302", "text": "def get_key(self, key):\n\t\treturn self.properties[key] if key in self.properties else \"\"", "title": "" }, { "docid": "f75e224b1141685b517cfc6e1544330e", "score": "0.6119225", "text": "def get(self, key):\n raise NotImplementedError", "title": "" }, { "docid": "f75e224b1141685b517cfc6e1544330e", "score": "0.6119225", "text": "def get(self, key):\n raise NotImplementedError", "title": "" }, { "docid": "6086ca5a1b8da45e46fa09c7396a5707", "score": "0.61112034", "text": "def search_obj_info_key(key):\n if not key:\n return None\n\n obj = search.search_object_attribute(key=\"key\", strvalue=key, category=settings.WORLD_DATA_INFO_CATEGORY)\n return obj", "title": "" }, { "docid": "292cce65aa74142ca61f890398f0216c", "score": "0.6107089", "text": "def __getitem__(self, key):\n try:\n value = self.config[key]\n except KeyError:\n self.logger and self.logger.error(\n \"Key not found in configuration file\")\n sys.exit(22)\n else:\n return value", "title": "" }, { "docid": "46e21d292ff457d309dff66726dd3e8b", "score": "0.6094251", "text": "def fetch_value_from_string_dictionary(key_name, dictionary):\n if (Tools.is_key_name_in_dictionary(key_name, dictionary)):\n return dictionary[key_name]\n else:\n return None", "title": "" }, { "docid": "05e865126a745192de01c15dba08c5a0", "score": "0.60894233", "text": "def get(self, key_path):\n config = self.data\n for k in key_path.split('/'):\n if config:\n config = config.get(k)\n if config or config == 0:\n return config\n return None", "title": "" }, { "docid": "16a1fea18bd2128bf084132b690e2510", "score": "0.608514", "text": "def get(key):\n return region.get(key)", "title": "" }, { "docid": "4a564de0e20de8ff90c978aff5107ddf", "score": "0.60835946", "text": "def _GetKey(self, log, key):\n value = log.get(key)\n if value is None:\n raise command_base.CommandError(\n 'The log file did not contain a %s key.' % repr(key))\n return value", "title": "" }, { "docid": "109217ffdf7b4b547d9beb4189fabde4", "score": "0.60752785", "text": "def __getitem__(self, key):\n return self.data_dict[key]", "title": "" }, { "docid": "100f350042fc481f09991f6d7b428261", "score": "0.607527", "text": "def read(self):\n return self.dict[self.key]", "title": "" }, { "docid": "40300daf4f6e34cbb31b00a37629e39b", "score": "0.60694855", "text": "def get(key):\n return region.get(key)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.606616", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.606616", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.606616", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "35c1dbf7998e206f1b23f1bef21ed300", "score": "0.6063819", "text": "def get(self, key):\n return self._get(key, doex=False)", "title": "" }, { "docid": "95dbdbeb67a66ef97d90c8efa78ef589", "score": "0.6040562", "text": "def get_key(self, key, default=None):\n response = self.get_response()\n return response.get(key, default)", "title": "" }, { "docid": "53fb1e951b6ff92fb720ed7119ad9cd6", "score": "0.6039315", "text": "def get_os_release_value(self, key):\n os_release = self.parse_os_release()\n value = os_release.get(key)\n return value", "title": "" }, { "docid": "a250896668108ad032b5ef1f821a2f40", "score": "0.6037994", "text": "def get_value(key):\n img = get_image(key, 70, 50)\n return float(get_text(img))", "title": "" }, { "docid": "7ed7863ed5be9b9629bdfa269f7866e9", "score": "0.6037746", "text": "def get(self, key):\n node = self._find_node_for_key(key)\n # node may be None so perform safe attribute access.\n return getattr(node, 'value', None)", "title": "" }, { "docid": "875b6f88ad6d7a94ac50762dff0284e2", "score": "0.6032528", "text": "def get(self, key, default=None):\n return self.read().get(key, default)", "title": "" }, { "docid": "7215c80c389d3720ef03688f5a1f6252", "score": "0.60266376", "text": "def awsReadMetadataKey():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--key\",\n dest=\"keyname\",\n help=\"Which metadata key to read\")\n\n cli = parser.parse_args()\n\n print getMetadataKey(name=cli.keyname)", "title": "" }, { "docid": "2d303969423a9db7a43148fffc41e6c9", "score": "0.6016178", "text": "def get_cfg_item(self, key):\n value = None\n\n if key in self._cfg_file_contents:\n value = self._cfg_file_contents[key]\n else:\n if key in self._default_cfg_val_map:\n value = self._default_cfg_val_map[key]\n else:\n err_msg = \"Key [{}] not found\".format(key)\n raise MissingConfigError(err_msg)\n\n return value", "title": "" }, { "docid": "337941a5ed195d6b76df8f86f94330c0", "score": "0.60092133", "text": "def __getitem__(self, key):\n return self._value[key]", "title": "" }, { "docid": "4b16dcbb8ecf868c94c6b4fd536a5ee1", "score": "0.60091484", "text": "def get(self, key):\n return self.__conf[key]", "title": "" }, { "docid": "e09b9626b22b693811cdd95345e275d3", "score": "0.5999304", "text": "def knownValue(infos, key, value):\n for i in infos:\n if i[key] == value:\n return i\n return None", "title": "" }, { "docid": "506024f70361cbc8650c5b310384c2ce", "score": "0.59983367", "text": "def get(self, key):\n bucket = self._hash(key)\n for pair in self._table[bucket]:\n if pair[0] == key:\n return pair[1]", "title": "" }, { "docid": "e4f6c60d0017e4d64f6db9af80c43903", "score": "0.5994148", "text": "def __getitem__(self, key):\n\n for (k, v) in self.envs:\n if k and k == key:\n return v\n\n return None", "title": "" }, { "docid": "1695dfbf04496fb104b6b5c18d24c3fd", "score": "0.59933376", "text": "def __getitem__(self, key):\n translation = self._translations.get(key, key)\n if isinstance(translation, list):\n for k in translation:\n try:\n return self[k]\n except KeyError:\n pass\n raise KeyError(key)\n getter = getattr(self, '_get_' + translation.replace('-', '_'), None)\n if callable(getter):\n return getter()\n return self._metadata[translation]", "title": "" } ]
5c5ed9a97b3694924465816aef676480
Check if all files are available before going deeper
[ { "docid": "e6972172325e0f574f4e7a7133e53c0b", "score": "0.59060025", "text": "def check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError('\"{}\" is not available'.format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError('\"{}\" is not available'.format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError('\"{}\" is not available'.format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError('\"{}\" is not available'.format(self.gallery_dir))", "title": "" } ]
[ { "docid": "62ef37979051d2c0dbd9aa09ffc24a8c", "score": "0.72828716", "text": "def dir_ready(path): \n files = get_top_level_files(path)\n return len(files) > 0", "title": "" }, { "docid": "d9a328575ad702a323f7305f026f4507", "score": "0.6978389", "text": "def scan(self):\n for root, dirnames, filenames in os.walk(self.dir, followlinks=True):\n subdir = set()\n for filename in filenames:\n _filename = os.path.join(root, filename)\n if os.access(_filename, os.R_OK):\n subdir.update([_filename])\n self.good_files += 1\n else:\n if os.access(root, os.R_OK | os.X_OK):\n print(\"\\rCan't read file {}\".format(_filename))\n self.bad_files += 1\n self.files.update(subdir)\n for dirname in dirnames:\n _dirname = os.path.join(root, dirname)\n if not os.access(_dirname, os.R_OK | os.X_OK):\n print(\"\\rCan't open directory {}\".format(_dirname))\n self.bad_dirs += 1", "title": "" }, { "docid": "48fb8e450f16da20474b771ab6ea8691", "score": "0.6827803", "text": "def _check_files(self) -> None:\n\n # type check all files provided by user\n self.tagged = self._type_check_files(self.tagged, \"Tagged\")\n self.copy = self._type_check_files(self.copy, \"Copyable\")\n self.link = self._type_check_files(self.link, \"Symlink\")\n\n self.tagged_hierarchy = TaggedFilesHierarchy.from_list_paths(\n self.tagged, dir_contents_to_base=True\n )\n\n for i, value in enumerate(self.copy):\n self.copy[i] = self._check_path(value)\n\n for i, value in enumerate(self.link):\n self.link[i] = self._check_path(value)", "title": "" }, { "docid": "d0d3738a0547d32e2571f68bd2068d8e", "score": "0.66646254", "text": "def scan_all(self):\n try:\n for root, directories, files in os.walk(self.scan_dir):\n for file in files:\n work_file = os.path.join(root,file)\n self.scan(work_file)\n self.check_unpack(work_file)\n except Exception as e:\n logger.error(\"Scan Exception: {} for file {}\".format(e, work_file))", "title": "" }, { "docid": "a54891fb6d50686a31ac50b7bd13b832", "score": "0.66240114", "text": "def check_for_read_until_files(self):\n log.debug(\"Checking for Toml\")\n if self.toml is None or self.unblocked_file:\n for path, dirs, files in os.walk(self.run_folder):\n for file in files:\n if file.endswith(\"channels.toml\"):\n if self.toml is None:\n toml_dict = toml_manager.load(os.path.join(path, file))\n self.toml = _prepare_toml(toml_dict)\n if file == \"unblocked_read_ids.txt\":\n if self.unblocked_file is None:\n self.add_unblocked_reads_file(os.path.join(path, file))", "title": "" }, { "docid": "c89339a306e9d1178d9f4c2d98decca6", "score": "0.6578727", "text": "def all_files_exist(paths):\n all_here = True\n for path in paths:\n list_of_files_in_path = os.listdir(path)\n if 'POSCAR' not in list_of_files_in_path:\n #If the calculation is a NEB the 01 sub-folder is also checked\n if not os.path.isfile(os.path.join(path, '01', 'POSCAR')):\n print \"No POSCAR file present in:\\n%s\" % path\n all_here = False\n elif 'POTCAR' not in list_of_files_in_path:\n print \"No POTCAR file present in:\\n%s\" % path\n all_here = False\n elif 'INCAR' not in list_of_files_in_path:\n print \"No INCAR file present in:\\n%s\" % path\n all_here = False\n elif 'KPOINTS' not in list_of_files_in_path:\n print \"No KPOINTS file present in:\\n%s\" % path\n all_here = False\n return all_here", "title": "" }, { "docid": "b5ae3fb11219c8b0dfe07c3c0db570be", "score": "0.65164614", "text": "def _check_integrity(self):\n is_complete = os.path.isdir(self.root)\n is_complete &= os.path.isfile(os.path.join(self.root, self._cv_scheme))\n is_complete &= os.path.isfile(os.path.join(self.root, self._train_val_test_scheme))\n is_complete &= os.path.isfile(os.path.join(self.root, self._mapping_sites))\n\n dir_files = {\n \"cat12vbm\": [\"%s_t1mri_mwp1_participants.csv\", \"%s_t1mri_mwp1_gs-raw_data64.npy\"],\n \"quasi_raw\": [\"%s_t1mri_quasi_raw_participants.csv\", \"%s_t1mri_quasi_raw_data32_1.5mm_skimage.npy\"],\n \"fs\": []\n }\n\n for (dir, files) in dir_files.items():\n for file in files:\n for db in self._studies:\n is_complete &= os.path.isfile(os.path.join(self.root, dir, file%db))\n return is_complete", "title": "" }, { "docid": "cd2a94e892d4c254d68ceb0588c95259", "score": "0.6512319", "text": "def is_effectively_complete(self):\n all_files_exist = True\n for output in self.outputFiles + self.inputFiles:\n if not os.path.exists(output):\n all_files_exist = False\n break\n return all_files_exist", "title": "" }, { "docid": "a4bc8882d9cf2a1b0675d454da177075", "score": "0.64535457", "text": "def check_files(storage_dir, required_files):\n file_list = os.listdir(storage_dir)\n for f in required_files:\n if f not in file_list:\n return False\n return True", "title": "" }, { "docid": "3ff969ffeb5d3f74e93090ce23266ffb", "score": "0.6336105", "text": "def _has_only_files(self, local_folder):\n return not any(os.path.isdir(os.path.join(local_folder, entry))\n for entry in os.listdir(local_folder))", "title": "" }, { "docid": "79ea48f62ad6ad37e945ebbc416f112a", "score": "0.6314823", "text": "def _check_before_run(self):\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n '''\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))\n if not osp.exists(self.probe_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_dir))\n '''", "title": "" }, { "docid": "6bf07bcabcb3a04cc1d2a8ea3fc93393", "score": "0.62760687", "text": "def check_files(directory):\n registry = []\n for file in utils.listdir(directory):\n try:\n if self.name in file or self.bundle_id.id in file or self.bundle_id.app in file or self.filename in file or self.display_name in file:\n registry.append(directory + \"/\" + file)\n elif utils.isdir(directory + \"/\" + file):\n registry = registry + check_files(directory + \"/\" + file)\n except:\n pass\n return registry", "title": "" }, { "docid": "7d5e3de61af7a837abff519c4105ef6b", "score": "0.62266964", "text": "def check_dirs(self):\n for dir in self.dirs:\n file_names = os.listdir(dir)\n for file in file_names:\n if os.path.getsize(dir.rstrip('/')+'/'+file) != 0:\n return \"need shuffle fail materials...\"\n return None", "title": "" }, { "docid": "7d5e3de61af7a837abff519c4105ef6b", "score": "0.62266964", "text": "def check_dirs(self):\n for dir in self.dirs:\n file_names = os.listdir(dir)\n for file in file_names:\n if os.path.getsize(dir.rstrip('/')+'/'+file) != 0:\n return \"need shuffle fail materials...\"\n return None", "title": "" }, { "docid": "332baec6d871a890d809d6b0c1f16b2f", "score": "0.6193012", "text": "def _NoTestResults(self, path):\n return not os.path.isdir(path) or not os.listdir(path)", "title": "" }, { "docid": "4f36adee93b5449907dd4f30466c0cfb", "score": "0.6187744", "text": "def any_missing_file(path_list):\n\tout = False\n\tfor p in path_list:\n\t\tif not os.path.isfile(p):\n\t\t\tout = True\n\treturn out", "title": "" }, { "docid": "9d9dd4f1124ad7dd569818864e1f2eba", "score": "0.61704373", "text": "async def check_files(hass):\n base = f\"{hass.config.path()}/custom_components/{DOMAIN}/\"\n missing = []\n for file in REQUIRED_FILES:\n fullpath = f\"{base}{file}\"\n if not os.path.exists(fullpath):\n missing.append(file)\n\n if missing:\n _LOGGER.critical(\"The following files are missing: %s\", str(missing))\n returnvalue = False\n else:\n returnvalue = True\n\n return returnvalue", "title": "" }, { "docid": "b83595db4e589108b8caca1703bc4d89", "score": "0.61610377", "text": "def check_setup():\n\n # check if the initial setup is already done\n html_base_files = os.listdir(html_base)\n index = 'index.html'\n if index not in html_base_files:\n shutil.copy(html_template_dir + os.sep + index, html_base + os.sep + index)\n\n initial_files = ['css', 'fonts', 'js']\n for i in initial_files:\n if i not in html_base_files:\n shutil.copytree(html_template_dir + os.sep + i, html_base + os.sep + i)\n pass", "title": "" }, { "docid": "505e848a1de2d2cdabb25e26020405b0", "score": "0.61569977", "text": "def PreProcess(self) -> None:\n for file_container in self.GetContainers(\n container_class=containers.FSPath):\n self.files.append(file_container.path)\n\n if not self.files:\n message = 'Would fetch 0 files - bailing out instead.'\n self.logger.critical(message)\n raise DFTimewolfError(message, critical=False)\n self.logger.info(\n f'Filefinder to collect {len(self.files):d} items on each host')", "title": "" }, { "docid": "e0cceb6b03e02ba08ae0762eb04f845d", "score": "0.61397815", "text": "def check_files(self, paths=None):\n if paths is None:\n paths = self.paths\n report = self.options.report\n runner = self.runner\n report.start()\n try:\n for path in paths:\n if os.path.isdir(path):\n self.input_dir(path)\n elif not self.excluded(path):\n runner(path)\n except KeyboardInterrupt:\n print('... stopped')\n report.stop()\n return report", "title": "" }, { "docid": "a689b4bc8a3e9dd34501f10c29fc5c81", "score": "0.612311", "text": "def check_files(hass):\n # Verify that the user downloaded all files.\n base = \"{}/custom_components/{}/\".format(hass.config.path(), DOMAIN)\n missing = []\n for file in REQUIRED_FILES:\n fullpath = \"{}{}\".format(base, file)\n if not os.path.exists(fullpath):\n missing.append(file)\n\n if missing:\n LOGGER.critical(\"The following files are missing: %s\", str(missing))\n returnvalue = False\n else:\n returnvalue = True\n\n return returnvalue", "title": "" }, { "docid": "abb618c195b4bd843c40ca2e0bc0cab6", "score": "0.612065", "text": "def check_tree(cls, root):\n # First, check for the case where a .py file and a directory\n # have the same name (without the extension). This can't be\n # handled, so just raise an exception\n found = set()\n for filename in os.listdir(root):\n path = os.path.join(root, filename)\n if os.path.isfile(path):\n filename, ext = os.path.splitext(filename)\n if ext == '.py':\n found.add(filename)\n\n for dirname in os.listdir(root):\n path = os.path.join(root, dirname)\n if os.path.isdir(path):\n if dirname in found:\n raise ValueError(\n \"Found a directory and python file with same name in \"\n \"benchmark tree: '{0}'\".format(path))\n cls.check_tree(path)", "title": "" }, { "docid": "a0594eee681d26d3e2b0298a91afbb30", "score": "0.6119173", "text": "def ensure_non_empty(self):\n self.ensure_present()\n\n if not os.listdir(self.root):\n raise CommandError(\n 'Directory %s is empty. Use --allowEmptySource to sync anyway.' % (self.root,)\n )", "title": "" }, { "docid": "8e93b2fb7fd6e14f6f7a196f46f014bb", "score": "0.6114551", "text": "def test_files_exist(self):\n\t\tfiles = session.query(sql.File).filter(sql.File.downloaded).all()\n\t\tfor f in files:\n\t\t\tpath = join(settings.get('output.base_dir'), f.path)\n\t\t\tself.assertTrue(isfile(path), \"File does not exist: %s\" % path)", "title": "" }, { "docid": "746d3442422a5ed464771f98fe259dd0", "score": "0.6076458", "text": "def _check_path(self, path):\n path_files = os.listdir(path)\n modules = list()\n for path_file in path_files:\n # exclude directories\n if os.path.isdir(path + path_file):\n if path_file not in self.exclude_dirs:\n self._check_path(path + path_file)\n # modules\n if path_file.endswith(\".py\"):\n if path_file not in self.exclude_files:\n modules.append(path_file)\n self.modules_dic[path_file] = path + path_file\n\n # store the available modules\n self.available_modules += modules", "title": "" }, { "docid": "84b70162fd3faf0ff4138b89e14b76c4", "score": "0.60592264", "text": "def hasFilesInSelection_0(self, files):\n if not files:\n return False\n for f in files:\n if os.path.isfile(f):\n return True\n if os.path.isdir(f) and self.hasFilesInSelection(f):\n return True\n return False", "title": "" }, { "docid": "4ad70c80a6d68199fc18f33577c89304", "score": "0.60216624", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "4ad70c80a6d68199fc18f33577c89304", "score": "0.60216624", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "4ad70c80a6d68199fc18f33577c89304", "score": "0.60216624", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "034482c9ca909c601ab32aeecb0a63f2", "score": "0.6019177", "text": "def _check_before_run(self):\n if not os.path.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not os.path.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not os.path.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "31512680e6e5c2901ff0eafc566c365c", "score": "0.6012012", "text": "def all_exist(filepaths):\n for fname in filepaths:\n if not os.path.exists(fname):\n return False\n return True", "title": "" }, { "docid": "d7dabfda949d367ea51afeefaa0956d6", "score": "0.59918725", "text": "def checkfile(VAMPRNs):\r\n for path in VAMPRNs:\r\n if not os.path.isfile(path):\r\n print(\"File not found!\")\r\n sys.exit()", "title": "" }, { "docid": "f7df3681adf5a277e1aa060901d9aa16", "score": "0.59821445", "text": "def _set_files(self):\n if not os.path.exists(self.resource_dir):\n raise IOError('Resource directory \"www\" does not exists. Exiting..')\n\n for root, dirs, files in os.walk(self.resource_dir):\n for fname in files:\n f = os.path.join(root, fname).split(\"/\", 1)[1]\n if f not in self.file_access_count.keys():\n self.file_access_count[f] = [threading.Lock(), 0]", "title": "" }, { "docid": "98bd7bb65e772096eab76e5cc50862d7", "score": "0.59816945", "text": "def check_xtc_dir(self, path):\n\n file_count = 0\n to_rm, md5_dirs = set(), set()\n\n for root, dirs, fl in os.walk(path):\n to_rm.add(root)\n if os.path.basename(root) == 'md5':\n md5_dirs.add(root)\n else:\n if len(fl) > 0:\n print(\"Found files\", root, len(fl))\n file_count += len(fl)\n\n return file_count, to_rm, md5_dirs", "title": "" }, { "docid": "fa1f12e8409d24fc334a188dbde97abf", "score": "0.59724534", "text": "def check_all():", "title": "" }, { "docid": "eb024de50146441a9c81fac18a3fe36b", "score": "0.5955684", "text": "def complete(self):\n if not os.path.isdir(self.act_out_dir_path):\n return False\n if not len(os.listdir(self.act_out_dir_path)):\n return False\n return True", "title": "" }, { "docid": "a41787423cbbcc4dcc5e03210623158e", "score": "0.59541976", "text": "def test_runfolders_ready(self, data_test_runfolders, rfm):\n for runfolder in rfm.find_runfolders(min_age=0):\n assert all([runfolder.dx_project, rfm.check_fastqs(runfolder), rfm.check_logfiles(runfolder)])", "title": "" }, { "docid": "3e381c951a88ff8abdbb99f5bd29342e", "score": "0.5952547", "text": "def check_file(self, file_name):\n return []", "title": "" }, { "docid": "b4d330341954be5f7e6f1880ac394b4d", "score": "0.5935386", "text": "def _is_ready(self, data_sets=DATA_SETS):\n\n return all([os.path.exists(os.path.join(self._data_directory, data_set)) \n for data_set in data_sets])", "title": "" }, { "docid": "1bd87a552f74aa35bce4542af6602910", "score": "0.5934", "text": "def scan_for_files(self):\n scancode.scan_for_files(self.project)", "title": "" }, { "docid": "d7521ba50b3c8809c2f9ff738a725299", "score": "0.5929955", "text": "def check_fooof(self):\n\n fooof_files = _clean_files(os.listdir(self.fooof_path))\n\n return fooof_files", "title": "" }, { "docid": "40669c30f20f483651e52330d9437f59", "score": "0.59293216", "text": "def is_effectively_complete(self):\n for output in self.outputFiles + self.inputFiles:\n if not os.path.exists(output):\n return False\n return True", "title": "" }, { "docid": "b6023ef2fd702aa12f88e7a873203e66", "score": "0.59290534", "text": "def missing_useful_files(result_dir):\n useful_files = [\n 'ordered_activity_codes.json', # activity codes\n 'product_dict.pickle', # lca.product_dict\n 'bio_dict.pickle', # lca.biosphere_dict\n 'activity_dict.pickle', # lca.activity_dict\n #'tech_params.pickle', # lca.tech_params\n #'bio_params.pickle', # lca.bio_params\n 'IO_Mapping.pickle', # mapping\n #'tech_row_indices.npy', # NO LONGER NEEDED\n #'tech_col_indices.npy', # NO LONGER NEEDED\n #'bio_row_indices.npy', # NO LONGER NEEDED\n #'bio_col_indices.npy' # NO LONGER NEEDED\n ]\n\n if not (Path(result_dir)/\"common_files\").is_dir():\n return useful_files\n\n return [\n file for file in useful_files\n if not (Path(result_dir)/\"common_files\"/file).is_file()\n ]", "title": "" }, { "docid": "3bfcde5c76f9fd81a3be171715233316", "score": "0.5916076", "text": "def test_no_exclusion(self):\n full_names, orphan_names = self.checkAllFilesIncluded()\n root = get_exhale_root(self)\n total = self.total(root)\n assert len(full_names) == total\n assert len(orphan_names) == 0", "title": "" }, { "docid": "d2872164058781525effc24bd9608c1a", "score": "0.5908963", "text": "def all_filepaths_exist(filepaths: List['Path']):\n return all([f.is_file() for f in filepaths])", "title": "" }, { "docid": "e6760dcf0909fcf134960ab32eb8360b", "score": "0.59080744", "text": "def _check_all_systems_ready(self):", "title": "" }, { "docid": "b0b65e1f54097f53f94d76c6997eb5d7", "score": "0.5904612", "text": "def wait_for_all_results(self):\n\n # Check all packages are built\n # NOTE(jhesketh): this could be optimised to check packages in\n # parallel. However, the worst case scenario at the moment is\n # \"time for longest package\" + \"time for num_of_package checks\" which\n # isn't too much more than the minimum\n # (\"time for longest package\" + \"time for one check\")\n for package in self.packages:\n result = self.wait_for_package(package)\n if not result:\n return False\n return True", "title": "" }, { "docid": "3cce03b6f2f2e1b99aa69ad45264da2a", "score": "0.58905435", "text": "def _check_sources(self):\n sources = self._run.all_sources\n for src in self._sources.values():\n if src not in sources:\n raise ValueError(\"Source not found: {}!\".format(src))", "title": "" }, { "docid": "aee952ef4ca54907cdfe785a13dd792a", "score": "0.58861494", "text": "def site_directory_contains_stale_files(site_directory):\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False", "title": "" }, { "docid": "47d66e72479f65747de85b9f3319e8c1", "score": "0.58815163", "text": "def _get_files_to_deploy(self):\n logger.info('Build list of files to deploy')\n for real_dir, dir_name, file_names in os.walk(self.source_dir, topdown=True):\n base_dir = real_dir.replace(os.getcwd() + '/', \"\")\n test = [self._filter_file(file_name, base_dir) for file_name in file_names]\n self.files_to_deploy += filter(lambda v: v is not None, test)", "title": "" }, { "docid": "81e676febef957f46843c8d94bc11d2f", "score": "0.58602566", "text": "def check_mainexp_location(expname):\n print 'checking', expname\n return len(glob.glob(expname))>0", "title": "" }, { "docid": "8f525862fd9412c2c14970e65534d23f", "score": "0.58496445", "text": "def validate(self) -> bool:\n uuids = self.all_uuids\n print(\"testing for \" + str(len(uuids)) + \" resources\")\n all_files = self.data.namelist()\n missing_count = 0\n for uuid in uuids:\n for file in all_files:\n if uuid in file: # only layer file\n try:\n self.data.read(file)\n except IOError:\n print('missing uuid: ' + uuid)\n missing_count += 1\n return missing_count == 0", "title": "" }, { "docid": "4d2e572c12c65a9d085b2245961bb2a5", "score": "0.5848824", "text": "def test_check_files():\n\n fname = os.path.join(data_dir, 'acs_test.fits')\n result = check_files.checkFiles([fname])\n assert result == ([os.path.join(data_dir, 'acs_test.fits')], [None])\n\n with fits.open(fname) as fobj:\n result = check_files.checkFiles([fobj])\n assert isinstance(result[0][0], fits.HDUList)\n assert result[0][0].filename() == os.path.join(data_dir, 'acs_test.fits')\n assert result[1] == [None]\n\n fname = os.path.join(data_dir, 'o4sp040b0_raw.fits')\n assert check_files.checkFiles([fname]) == ([], [])", "title": "" }, { "docid": "1779918424bcc89758dfe4f3d8ee9305", "score": "0.5846198", "text": "def scan_dir(self, *, path):\n\n print(f\"*Scan dir '{path}'\")\n files = {p.resolve() for p in Path(path).glob(\"**/*\")}\n for file in files:\n if file.stat().st_size > 0 and not file.is_dir():\n self.upsert2(file)", "title": "" }, { "docid": "2452fe973a7842ca8703fd265610953e", "score": "0.58363223", "text": "def _collect_files(self):\n for root, dirs, files in os.walk(self.filepath_in):\n file_path_list = glob.glob(os.path.join(root,'*'))\n \n return file_path_list", "title": "" }, { "docid": "5deb5eb1023ab8cba3c8950c3cc1d14f", "score": "0.5834697", "text": "def TAFResAvailable(self,number, filebasename=\"\"):\n #if not os.path.isdir(self.outputdir+\"/\"+str(number)):\n # print(\"TAF not ran for run\", number)\n # return False\n print(\"search for \",self.inputDir+\"/\"+str(number)+\"/\"+filebasename+\"_run\"+str(number)+\".root\")\n if filebasename !=\"\" and os.path.isfile(self.inputDir+\"/\"+str(number)+\"/\"+filebasename+\"_run\"+str(number)+\".root\"):\n return True\n else:\n return False", "title": "" }, { "docid": "0f2e5ba0df0a7c264631e1e17d0530c3", "score": "0.58316815", "text": "def check_path_and_file(self, path: Path):\n if not path.exists():\n return False\n else:\n if path.isdir():\n if all([True for i in self.necessary_files if (path / i).exists()]):\n return True\n else:\n print(f\"Loss necessary files for {path}.\")\n return False\n elif path.isfile():\n warnings.warn(\"If path is one file, we think the necessary_files is just this file, \"\n \"the ``necessary_files`` would be empty,\"\n \"and with no follow-up necessary file inspection.\")\n self.necessary_files = []\n return True\n else:\n return False", "title": "" }, { "docid": "b0acfa81b131ea876b5232d0b8e50a26", "score": "0.5820601", "text": "def test_no_svn_files(self):\n for root, dirs, files in os.walk(self.target_dir):\n self.assertNotIn('.svn', dirs)", "title": "" }, { "docid": "7105f4dd5efcfd6baf5752e45c84ebe1", "score": "0.5819927", "text": "def test_no_files(self) -> None:\n # pass an empty list of paths; should error since no files found\n with self.assertRaises(SystemExit):\n with self.assertLogs(level=\"ERROR\"):\n for _ in self.load([]):\n pass", "title": "" }, { "docid": "65790b98d04eaf376abb195b09253a7e", "score": "0.58127993", "text": "def hasOpenItems(self):\n openDir = os.path.join(self.tempFolder, self.dmsId, \"open_\" + self.userId)\n if os.path.isdir(openDir):\n files = os.listdir(openDir)\n for i in files:\n if os.path.isdir(i):\n return True\n return False", "title": "" }, { "docid": "49b73c1124404d9762806f4c5b987f48", "score": "0.58028114", "text": "def is_empty(path):\n return os.listdir(path) == list()", "title": "" }, { "docid": "52b94bfe8ab0e58507c962cc6463b90b", "score": "0.5791032", "text": "def file_finder():\n while True:\n try:\n path = PATH_LIST.get()\n for root, _, files in os.walk(path):\n for fname in files:\n path = os.path.join(root, fname)\n if os.path.isfile(path):\n FILE_LIST.put(path)\n PATH_LIST.task_done()\n except KeyboardInterrupt:\n break", "title": "" }, { "docid": "2035e4d8fc0d44eb04f1e120960ac95c", "score": "0.57890314", "text": "def check_outfiles(outfiles):\n bypass=[]\n for file in outfiles:\n if os.path.isfile(file):\n if config.resume and os.path.getsize(file) > 0:\n bypass.append(True)\n else:\n bypass.append(False)\n else:\n bypass.append(False)\n\n if False in bypass or not bypass:\n # remove any existing files\n for file in outfiles:\n remove_file(file)\n return False\n else:\n return True", "title": "" }, { "docid": "106371095fed05fefe6b64aca60eec54", "score": "0.57823306", "text": "def _check_file_groups(self, groups, exclude):\n missing = set()\n\n for name, group in groups.items():\n if name in exclude:\n continue\n\n for path in group['source_filenames']:\n static_path = os.path.join('reviewboard', 'static', path)\n\n if not os.path.exists(static_path):\n missing.add(path)\n\n self.assertSetEqual(missing, set())", "title": "" }, { "docid": "79ff01e192bc1f2251425c8077cc2d8d", "score": "0.577545", "text": "def dir_empty(path):\n try:\n next(os.scandir(str(path)))\n except StopIteration:\n return True\n return False", "title": "" }, { "docid": "db92521a14a6b3bfd9c0ac4fe913abb3", "score": "0.5765888", "text": "def _check_integrity(self) -> list[str]:\n # Check if collections exist\n missing_collections = []\n for collection in self.collections:\n stacpath = os.path.join(self.root, collection, \"collection.json\")\n\n if not os.path.exists(stacpath):\n missing_collections.append(collection)\n\n if not missing_collections:\n return []\n\n to_be_downloaded = []\n for collection in missing_collections:\n archive_path = os.path.join(self.root, f\"{collection}.tar.gz\")\n if os.path.exists(archive_path):\n print(f\"Found {collection} archive\")\n if (\n self.checksum\n and check_integrity(\n archive_path, self.collection_md5_dict[collection]\n )\n or not self.checksum\n ):\n print(\"Extracting...\")\n extract_archive(archive_path)\n else:\n print(f\"Collection {collection} is corrupted\")\n to_be_downloaded.append(collection)\n else:\n print(f\"{collection} not found\")\n to_be_downloaded.append(collection)\n\n return to_be_downloaded", "title": "" }, { "docid": "bbe0fb9270bb7a07c82f7af6608693ce", "score": "0.57592857", "text": "def package_files(self):\n return None", "title": "" }, { "docid": "3c5cce305a5eee68901c8b3e260e8fd3", "score": "0.57532513", "text": "def check_outfiles(outfiles):\n bypass = []\n for file in outfiles:\n if os.path.isfile(file):\n if config.resume and os.path.getsize(file) > 0:\n bypass.append(True)\n else:\n bypass.append(False)\n else:\n bypass.append(False)\n\n if False in bypass or not bypass:\n # remove any existing files\n for file in outfiles:\n remove_file(file)\n return False\n else:\n return True", "title": "" }, { "docid": "f0532ac00c4d2f3c0f2f0008ae1cc295", "score": "0.5751989", "text": "def check_files_consistency(self):\n\n files = self.result_files()\n\n for my_result_file in files:\n\n if self.is_valid_result_file(my_result_file):\n self.valid_files.append(my_result_file)\n else:\n self.invalid_files.append(my_result_file)", "title": "" }, { "docid": "f9e4d6c070249d4870885d41e65bc75d", "score": "0.57512265", "text": "def _check_path(self, path):\n for part in self.iter_path(path):\n self._check_component(part, ensure_dir=(path != part))", "title": "" }, { "docid": "a20f3859d65cecb607c25945336d7736", "score": "0.574585", "text": "def check_if_data_sets_are_downloaded():\n\n # If the downloaded data path doesn't exist, raise an error\n if not os.path.exists(downloaded_data_path):\n raise FileNotFoundError(\"The downloaded data path you specified doesn't exist!\")\n\n # Go through each of the supported data sets\n for data_set_name in supported_data_sets:\n # If the data set doesn't exist in the folder, raise an error\n if not os.path.exists(downloaded_data_path + data_set_name + \".pickle\"):\n raise FileNotFoundError(f\"There is no {data_set_name}.pickle in the downloaded data folder!\")\n\n print(\"All the supported data sets have been found in your specified folder!\")", "title": "" }, { "docid": "a1e76a7be72055e5a1f98b1d8fd59450", "score": "0.57429254", "text": "def _is_loaded(self):\n with open(\"/proc/filesystems\", \"r\") as f:\n for l in f:\n if l.split()[-1] == self.name:\n return True\n return False", "title": "" }, { "docid": "9dd81329b1461d21592cf94ccf2aec83", "score": "0.57400995", "text": "def __remove_non_readable_files(self):\n for file_item in self.__dir_file_listing:\n if os.access(file_item,os.R_OK) == False: ##os.access will return False for any reason if the file cannot be open. No try/catch required.\n self.__dir_file_listing.remove(file_item)", "title": "" }, { "docid": "e462fb6d9f299b7153cab02204a281ee", "score": "0.5737967", "text": "def finding_eggs_as_local_directories():", "title": "" }, { "docid": "796ed4b0bee42aca791651f496a5c3b4", "score": "0.5734215", "text": "def check_all_installed(self):\n for package_name in self._packages.iterkeys():\n self.check_installed(package_name)", "title": "" }, { "docid": "cbcda340743ae922a7c7cff3bd3d5f62", "score": "0.57198054", "text": "def scan_dir(self, *, path):\n\n print(f\"*Scan dir '{path}'\")\n files = {p.resolve() for p in Path(path).glob(\"**/*\")}\n #exception when long file name in Windows (~260 char)\n for file in files:\n if len(str(file))< 250:\n if file.stat().st_size > 0 and not file.is_dir():\n self.upsert2(file)\n else:\n logging.debug(f\"Excluded because long path: {file}\")", "title": "" }, { "docid": "5919407486319772008f6ab8d5d90de0", "score": "0.5715629", "text": "def check_files_exist(file_list: Iterable[str]) -> list[str]:\n file_errors: list[str] = []\n cwd = Path(os.getcwd())\n for file_ in file_list:\n if cwd.joinpath(file_).is_file() is False:\n file_errors.append(file_)\n return sorted(file_errors)", "title": "" }, { "docid": "7c5ea70744d79bbb3fdc9661284b83b6", "score": "0.5714103", "text": "def check_dir(self):\n time_list = []\n for file in self.files:\n file_list = []\n file_path = self.path / Path(file)\n if not file.endswith(\"/\"):\n file_list = [file_path]\n else:\n for root, _, files in os.walk(file_path):\n root = Path(root)\n file_list = [root / f for f in files]\n\n time_list += [os.stat(f).st_mtime for f in file_list]\n\n new_sum = sum(time_list)\n result = new_sum != self.old_sum\n self.old_sum = new_sum\n return result", "title": "" }, { "docid": "c6c2ddec14ee9c9479900cff2b8a23cb", "score": "0.5704183", "text": "def check_empty(self,path,walk=False):\n\t\tabsolute = os.path.abspath\n\t\tif os.path.isdir(path) and os.path.exists(path):\n\t\t\tif not os.listdir(path):\n\t\t\t\tself.log(\"rm empty finder {} \".format(path))\n\t\t\t\tos.rmdir(path)\n\t\t\t\treturn True\n\t\tif walk:\n\t\t\tself.rm_counter[path] = 0\n\t\t\tfor sub_path ,dirs,files in os.walk(path):\n\n\t\t\t\tif files:\n\n\t\t\t\t\tself.rm_counter[absolute(sub_path)] = 0\n\t\t\t\t\n\t\t\t\t\tpass\n\n\t\t\t\tfor dir in dirs:\n\t\t\t\t\tpth = os.path.join(sub_path,dir)\n\n\t\t\t\t\tself.rm_counter[absolute(pth)] = 1\n\n\t\t\t\tif dirs:\n\t\t\t\t\tmap(self.check_empty, dirs)\n\t\t\t\telse:\n\t\t\t\t\tself.check_empty(sub_path)\n\n\t\t\n\t\t\tself.rm_dirs = [key for key in self.rm_counter if self.rm_counter[key]]\n\t\t\tself.rm_dirs.sort()\n\t\t\tself.rm_dirs.reverse()\n\t\t\tmap(self.check_empty, self.rm_dirs)", "title": "" }, { "docid": "79fd76a0af61fbbd0e07dbeb65cc5ab7", "score": "0.5698816", "text": "def _check_directories(self):\n mode = os.F_OK | os.R_OK | os.W_OK | os.X_OK\n for attr in ('data_dir', 'data_underlay_dir'):\n path = getattr(self, attr)\n \n # allow an empty underlay path or None\n if attr == 'data_underlay_dir' and not path:\n continue\n\n path_pages = os.path.join(path, \"pages\")\n if not (os.path.isdir(path_pages) and os.access(path_pages, mode)):\n msg = '''\n%(attr)s \"%(path)s\" does not exists, or has incorrect ownership or\npermissions.\n\nMake sure the directory and the subdirectory pages are owned by the web\nserver and are readable, writable and executable by the web server user\nand group.\n\nIt is recommended to use absolute paths and not relative paths. Check\nalso the spelling of the directory name.\n''' % {'attr': attr, 'path': path,}\n raise error.ConfigurationError(msg)", "title": "" }, { "docid": "ed9c52524dc27dd2fcf12df1e42b84a6", "score": "0.56977266", "text": "def test_find_files(self):\n self.assertListEqual(\n self.fill_dir_with_bogus_files(),\n sorted(find_files(path=self.test_results_data_dir, extension='.nc'))\n )", "title": "" }, { "docid": "121fa8bee6cf1728eda4c8051e4d0b30", "score": "0.5696671", "text": "def check_folder(self, filename):\n pass", "title": "" }, { "docid": "e82f7a163c36db17d0daf67fbcb219b3", "score": "0.56964666", "text": "def test_walk(self):\n # Walk the example directory and check all yielded files\n # are in the list of created files\n file_list = self.example_dir.filelist(include_links=True)\n print(str(file_list))\n for f in Md5Checker.walk(self.dirn):\n print(\"Check for %s\" % f)\n self.assertTrue(f in file_list,\"%s not in files or links?\" % f)\n file_list.remove(f)\n # Check that no files were missed\n self.assertTrue(len(file_list) == 0,\n \"Some files not yielded: %s\" % file_list)", "title": "" }, { "docid": "79d4b61653a9b248f5a87caa0c5296d3", "score": "0.5691032", "text": "def checkfiles(self):\n\n for f in self.configfiles.keys():\n try:\n if os.stat(f)[8] != self.configfiles[f]: # check mtime\n return 1\n except os.error:\n if sys.exc_value == 'Connection timed out':\n # can happen when files on NFS mounted filesystem\n log.log( \"<config>Config.checkfiles(): Timeout while trying to stat '%s' - skipping file checks.\"%(f), 5 )\n return 0\n\n return 0", "title": "" }, { "docid": "adc239de5a1dbc2d6569b9fcb785cbe3", "score": "0.5683738", "text": "def handle_directory_pre(self, dir):\n return False", "title": "" }, { "docid": "53eb00ff44cf6ed2a782be40f99b3efd", "score": "0.56826156", "text": "def apply_to_all(path):\n return True", "title": "" }, { "docid": "8efc35f2a24c6481871fbbaf1ea5af54", "score": "0.5670391", "text": "def validate_comp_dirs(self):\n\n for comp in self.required_comps():\n if comp not in self.compdirs:\n return False\n\n compdir = self.compdirs[comp]\n fullpath = os.path.join(self.mount_point, comp, compdir, 'gpdb')\n\n if not os.access(fullpath, os.R_OK | os.W_OK | os.X_OK):\n return False\n\n return True", "title": "" }, { "docid": "1aecf44938aa419df56bcd6bb1833bba", "score": "0.5669523", "text": "def data_is_available():\n return os.path.exists(DATA_DIR)", "title": "" }, { "docid": "c7af72cb8e6e774bd31b2e0daeb47439", "score": "0.5667838", "text": "def is_sys(file_list):\n for file in os.listdir(file_list):\n if file == SYS:\n return True\n return False", "title": "" }, { "docid": "736d4e5ecd08db238906bd465bd7782c", "score": "0.5665297", "text": "def _load_info(self):\n self.files = self.find_files()\n if len(self.files) == 0:\n self.files_available = False\n else:\n self.files_available = True\n self.metadata = self.get_metadata()", "title": "" }, { "docid": "b06cb0670c49e9827dd07402dae75168", "score": "0.5664793", "text": "def file_exists_check_func(**kwargs):\n files = []\n for f in kwargs[\"sftp\"].listdir(kwargs[\"dir\"]):\n if kwargs[\"pattern\"].fullmatch(f) is None:\n continue\n\n fpath = os.path.join(kwargs[\"dir\"], f)\n if _is_file(kwargs[\"sftp\"], fpath):\n if kwargs[\"ignore_empty_file\"] is True:\n if _get_file_size(kwargs[\"sftp\"], fpath) == 0:\n continue\n files.append(f)\n return files", "title": "" }, { "docid": "358a3577524991eca8c9579141c3fcc3", "score": "0.56626815", "text": "def get_files(self):\n directory_found = False\n while not directory_found:\n try:\n self.files = os.listdir(self.path)\n except WindowsError:\n continue_running = input('The path \"{input_path}\" was not found. Do you want to continue? (Y/N)'.\n format(input_path=self.path)).upper()\n\n if continue_running == 'N':\n return\n elif continue_running == 'Y':\n self.path = input(\"Please enter a new path:\")\n\n continue\n else:\n print(\"You did not input a correct value to answer if you wanted to continue.\")\n\n return\n else:\n directory_found = True", "title": "" }, { "docid": "5a976e4b1b1295fbda5c14c876f1c60c", "score": "0.56618124", "text": "def check_files_existence(files: list):\n exist = all(Path(f).exists() for f in files)\n return exist", "title": "" }, { "docid": "4c96554589f575aad64df0d63ee76968", "score": "0.566127", "text": "def reportCollectFiles(): \n print \"\\n\"\n print \"==============================================\"\n print \"Collect all files from defined folder \"\n print \"==============================================\"\n print \"\\n\"", "title": "" }, { "docid": "f54413208e01bd62b09f7aa3c10c3c06", "score": "0.5655476", "text": "def check_db_files():\r\n\r\n path = pathlib.Path().absolute()\r\n path_data = os.path.join(path, db_paths['data'])\r\n path_conf = os.path.join(path, db_paths['conf'])\r\n if (not os.path.exists(path_data) or not os.path.exists(path_conf) ):\r\n return False\r\n return True", "title": "" }, { "docid": "aab7fe763e0744cb3415575a9477aa2f", "score": "0.56490034", "text": "def check_references(self):\n\n #for ref in cmds.file(q=True, r=True):\n # if check_failed:\n # raise Exception(msg)\n pass", "title": "" }, { "docid": "a1f56e0c6c62940a8f802a2b6563e237", "score": "0.56477994", "text": "def check(self):\n self.error = []\n depList = self.swirl.getDependencies()\n returnValue = True\n PluginManager.addSystemPaths(self.extraPath)\n for dep in depList:\n if not PluginManager.isDepsatisfied(dep):\n self.error.append(dep.depname)\n returnValue = False\n return returnValue", "title": "" }, { "docid": "1c2b83bb5fcd0cab4c0e86cb7ca1db51", "score": "0.5645815", "text": "def hasFileDescriptors(self):\n extras = self.getExtras()\n if extras:\n return extras.hasFileDescriptors()", "title": "" }, { "docid": "6882bbc5afebf0f4796588f22e196529", "score": "0.563827", "text": "def is_valid(self) -> bool:\n are_files_exist = []\n for f in self.files:\n full_f_path = self.on + self.frm + f\n is_f_exists = os.path.exists(full_f_path)\n if not is_f_exists:\n click.echo(f'File {full_f_path} does not exist')\n are_files_exist.append(is_f_exists)\n return all(are_files_exist)", "title": "" }, { "docid": "4e1b5c33a050e24c5f83852708debac0", "score": "0.5637182", "text": "def __has_file_been_read__(self):\n if len(self.analyzedFiles) > 0:\n for file in self.analyzedFiles:\n if self.path == file:\n return True\n return False", "title": "" } ]
ca42f6eef5fe72059e7c74868c838763
Check for known face. Return True and name if a face is recognized after 10 seconds otherwise return False, this method is also able to take images with regular webcams
[ { "docid": "e19a957c878eb2968ea10a72cd00597c", "score": "0.7239366", "text": "def known_face(use_nao=True, timeout=True):\n # this stuff was in the main so I put it here\n # fileDir = os.path.dirname(os.path.realpath(__file__))\n fileDir = os.path.join(os.path.dirname(__file__), '')\n modelDir = os.path.join(fileDir, 'models')\n dlibModelDir = os.path.join(modelDir, 'dlib')\n openfaceModelDir = os.path.join(modelDir, 'openface')\n\n dlibFacePredictor = os.path.join(dlibModelDir,\n \"shape_predictor_68_face_landmarks.dat\")\n networkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')\n cuda = False\n\n align = openface.AlignDlib(dlibFacePredictor)\n net = openface.TorchNeuralNet(networkModel, imgDim=IMG_DIM, cuda=cuda)\n\n if use_nao:\n pass # dont worry bout it yo\n else:\n video_capture = cv2.VideoCapture(0)\n video_capture.set(3, WIDTH)\n video_capture.set(4, HEIGHT)\n confidenceList = []\n person_list = []\n images_taken_count = 0\n while True:\n # if it takes longer than 10 images, stop and return False and \"\"\n if images_taken_count > 10 and timeout:\n if not use_nao:\n video_capture.release()\n cv2.destroyAllWindows()\n else:\n headmotions.stiffnessOff()\n print(\"found no known person\")\n return False, \"\"\n\n if use_nao:\n naoqi_frame = get_image()\n # naoqi_frame is an rgb image, I checked this.\n frame = np.array(naoqi_frame)\n else:\n # line for using with webcams\n ret, frame = video_capture.read()\n\n persons, confidences = infer(frame, align, net)\n for i, c in enumerate(confidences):\n if c <= THRESHOLD: # threshold for known faces.\n persons[i] = \"_unknown\"\n print(\"P: \" + str(persons) + \" C: \" + str(confidences))\n\n try:\n # append with two floating point precision\n confidenceList.append('%.2f' % confidences[0])\n person_list.append(persons[0])\n # uncomment if you want to run a few cycles before recognition\n\n # if len(person_list) <= 4:\n # continue\n\n # only check for equal persons in the last 4 entries.\n test_persons = person_list[-4:]\n # test_confidences = confidenceList[-4:]\n # sorry for terribly ugly if statement\n # if the last person is recognised more than once in list\n if test_persons.count(test_persons[-1]) >= REQUIRED_TRIALS and \\\n test_persons[-1] != \"_unknown\":\n # 0.8 threshold for known faces\n # the code previously written recognizes a face above 0.5\n # confidence score, I think thats a bit low so I added a\n # 0.8 minimal score here.\n # if all(i >= 0.65 for i in test_confidences):\n # print(test_confidences)\n print(\"Found \" + str(REQUIRED_TRIALS) + \" high confidences.\")\n if not use_nao:\n video_capture.release()\n cv2.destroyAllWindows()\n headmotions.stiffnessOff()\n return True, test_persons[-1]\n except:\n # If there is no face detected, confidences matrix will be empty.\n # We can simply ignore it.\n pass\n # Print the person name and conf value on the frame\n # cv2.putText(frame, \"P: {} C: {}\".format(persons, confidences),\n # (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n # (255, 255, 255), 1)\n # cv2.imshow('', frame)\n # quit the program on the press of key 'q'\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n images_taken_count += 1\n # When everything is done, release the capture\n # this only runs when someone breaks the loop by pressin gq, don't know\n # if we should keep that...\n # video_capture.release()\n # cv2.destroyAllWindows()", "title": "" } ]
[ { "docid": "9653a7cf0dd78e76556db5bbdf2a997d", "score": "0.7215693", "text": "def face_detection(self) -> bool:\n cascade = CascadeClassifier(data.haarcascades + \"haarcascade_frontalface_default.xml\")\n for _ in range(20):\n ignore, image = self.validation_video.read() # reads video from web cam\n scale = cvtColor(image, COLOR_BGR2GRAY) # convert the captured image to grayscale\n scale_factor = 1.1 # specify how much the image size is reduced at each image scale\n min_neighbors = 5 # specify how many neighbors each candidate rectangle should have to retain it\n faces = cascade.detectMultiScale(scale, scale_factor, min_neighbors) # faces are listed as tuple here\n # This is a hacky way to solve the problem. The problem when using \"if faces:\":\n # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\n # When used either of the suggestion:\n # AttributeError: 'tuple' object has no attribute 'any' / 'all'\n # But happens only when the value is true so ¯\\_(ツ)_/¯\n try:\n if faces:\n pass\n except ValueError:\n imwrite('cv2_open.jpg', image)\n self.validation_video.release()\n return True", "title": "" }, { "docid": "ea1c8870e0e65bd5eab0dd32f29db899", "score": "0.7056951", "text": "def detect_face(face_file, max_results=10):\n content = face_file.read()\n # [START get_vision_service]\n image = vision.Client(project='reseacrh-173507').image(content=content)\n # [END get_vision_service]\n return image.detect_faces()", "title": "" }, { "docid": "95246d5753f0d845573a816cd00c385d", "score": "0.7051931", "text": "def recogniseFace():\n foundUser = ''\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-e\", \"--encodings\", default=\"encoding/encodings.pickle\",\n help=\"path to serialized db of facial encodings\")\n ap.add_argument(\"-r\", \"--resolution\", type=int, default=240,\n help=\"Resolution of the video feed\")\n ap.add_argument(\"-d\", \"--detection-method\", type=str, default=\"hog\",\n help=\"face detection model to use: either `hog` or `cnn`\")\n args = vars(ap.parse_args())\n\n # load the known faces and embeddings\n print(\"[INFO] loading encodings...\")\n data = pickle.loads(open(args[\"encodings\"], \"rb\").read())\n\n # initialize the video stream and then allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n print('\\nPlease look into the camera...')\n vs = VideoStream(src = 0).start()\n time.sleep(2.0)\n\n counter = 0\n\n # loop over frames from the video file stream\n while True:\n # grab the frame from the threaded video stream\n frame = vs.read()\n\n # convert the input frame from BGR to RGB then resize it to have\n # a width of 750px (to speedup processing)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgb = imutils.resize(frame, width = args[\"resolution\"])\n\n # detect the (x, y)-coordinates of the bounding boxes\n # corresponding to each face in the input frame, then compute\n # the facial embeddings for each face\n boxes = face_recognition.face_locations(rgb, model = args[\"detection_method\"])\n encodings = face_recognition.face_encodings(rgb, boxes)\n name = ''\n \n counter += 1\n\n # loop over the facial embeddings\n for encoding in encodings:\n # attempt to match each face in the input image to our known\n # encodings\n matches = face_recognition.compare_faces(data[\"encodings\"], encoding)\n\n # check to see if we have found a match\n if True in matches:\n # find the indexes of all matched faces then initialize a\n # dictionary to count the total number of times each face\n # was matched\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n\n # loop over the matched indexes and maintain a count for\n # each recognized face face\n for i in matchedIdxs:\n name = data[\"names\"][i]\n counts[name] = counts.get(name, 0) + 1\n\n # determine the recognized face with the largest number\n # of votes (note: in the event of an unlikely tie Python\n # will select first entry in the dictionary)\n name = max(counts, key = counts.get)\n \n\n if counter == 30:\n founderUser = ''\n print('\\nFace Recognition Timeout: Exceeded time limit...')\n break\n if name != '':\n # print to console, identified person\n print(\"User id: {}\".format(name))\n foundUser = name \n break \n\n # do a bit of cleanup\n vs.stop()\n \n return foundUser", "title": "" }, { "docid": "1f68ff3734e671654cb403dbd35f5f29", "score": "0.6953561", "text": "def detect_face(endpoint, key, frame):\n img = cv2.imencode('.jpg', frame)[1].tobytes()\n attributes = ''\n detected_faces = utils.detect_face_stream(endpoint=endpoint, key=key, image=img, face_attributes=attributes,\n recognition_model='recognition_03')\n\n detected_faces, sleep_time = process_response(detected_faces)\n return detected_faces, sleep_time", "title": "" }, { "docid": "c8d3819446857af8c974cbb8a45a257e", "score": "0.68639994", "text": "def face_detection(self):\n if self.face_detection_is_on:\n\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n\n faces = self.face_cascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=5,\n minSize=(60, 60),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n if self.ret:\n for (x, y, w, h) in faces:\n cv2.rectangle(self.frame, (x, y), (x + w, y + h), (255, 255, 255), 2)", "title": "" }, { "docid": "e4ea72b93f1a2f060361e24d32e5118d", "score": "0.6764592", "text": "def detect_face(image):\n\ttry:\n\t\timg = cv2.imread(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\texcept:\n\t\tprint(\"Erronous image: \", image, \" deleted.\")\n\t\treturn False\n\tface_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')\n\tfaces = face_cascade.detectMultiScale(gray, 2)\n\t#print('Number of faces detected:', len(faces))\n\t#show_face_detection(img, faces)\n\treturn len(faces) == 1", "title": "" }, { "docid": "75baab138ef0a9e2a2260cfc72e5f88c", "score": "0.6762586", "text": "def detect_faces():\n client = vision.ImageAnnotatorClient()\n\n path = \"1512912404High.jpg\"\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = types.Image(content=content)\n\n response = client.face_detection(image=image)\n faces = response.face_annotations\n nFaces = 0\n for face in faces:\n nFaces +=1\n\n if (nFaces > 1):\n return \"1\"\n else:\n return \"0\"", "title": "" }, { "docid": "2e594f18482e609a2d271332523eea87", "score": "0.67427146", "text": "def find_face(self, file_path, face_path):\r\n img = self.read_img(file_path)\r\n if img is None:\r\n return False\r\n # downscale to a reasonable size (long edge <= 1024)\r\n f = min(1024/img.shape[0], 1024/img.shape[1], 1)\r\n img = cv2.resize(img, None, fx=f, fy=f)\r\n faces = self.face_cascade.detectMultiScale(img)\r\n if len(faces) == 0:\r\n return False\r\n x, y, w, h = max(faces, key=lambda xywh: xywh[2] * xywh[3])\r\n face = img[y:y+h, x:x+w]\r\n cv2.imwrite(face_path, face)\r\n return True", "title": "" }, { "docid": "b4c0636cf85b43d6974ab35f96c21325", "score": "0.6723782", "text": "def recognise_user_face(self):\n # Load the known faces and encodings\n data = pickle.loads(open(self.encoding_file, \"rb\").read())\n # Initialize the video stream and warm up the camera sensor\n print(\"[INFO] Starting video stream up...\")\n vs = VideoStream(src = 0).start()\n time.sleep(2.0)\n # Set a counter to count the maximum record of a user's photos\n face_counter = 0\n # Loop over frames from the video file stream\n while face_counter < 10:\n # Get the frame from the threaded video stream\n frame = vs.read()\n # Convert the input frame from BGR to RGB then resize it to have\n # a width of 750px (to speed up processing)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgb = imutils.resize(frame, width = self.resolution)\n\n # detect the (x, y)-coordinates of the bounding boxes\n # for each corresponding face in the input frame, then compute\n # the facial encodings for each face\n boxes = face_recognition.face_locations(rgb, model = self.detection_method)\n encodings = face_recognition.face_encodings(rgb, boxes)\n names = []\n\n # Loop over the facial encodings\n for encoding in encodings:\n # Attempt to match each face in the input image to the known encodings\n matches = face_recognition.compare_faces(data[\"encodings\"], encoding)\n name = None\n\n # Check if a match is found\n if True in matches:\n # Find the indices of all matched faces then initialize a\n # dictionary to count the total number of times each face was matched\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n\n # Loop over the matched indices and maintain a counter for\n # each recognized face\n for i in matchedIdxs:\n name = data[\"names\"][i]\n counts[name] = counts.get(name, 0) + 1\n\n # Determine the recognized face with the largest number\n # of votes (note: in the event of an unlikely tie Python\n # will select the first entry in the dictionary)\n name = max(counts, key = counts.get)\n\n # Update the list of names\n names.append(name)\n \n # Loop over the recognized faces\n for name in names:\n # Print to console, for an identified person\n if name is not None:\n vs.stop()\n return name\n # Sleep the cam\n time.sleep(3.0)\n\n face_counter += 1\n\n print(\"No matched user has been found!\")\n vs.stop()\n return name", "title": "" }, { "docid": "e00a5351101b33518d620672053a7c10", "score": "0.67164177", "text": "def face_verification():\n cam = VideoCamera()\n cam.get_frame()\n if cam.action:\n user.face_verified = True\n else:\n user.face_verified = False\n failed.add()\n\n return redirect(url_for('main_page'))", "title": "" }, { "docid": "f409e5e769b92b8f38b40b90094b97c6", "score": "0.6694719", "text": "def detect_faces(path):\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n\n try:\n response = client.object_localization(image=image)\n response = response.localized_object_annotations\n # print(\"made call to google vision\")\n # print(response)\n for obj in response:\n if obj.name == 'Person':\n return 'Person detected'\n\n return 'Person not detected'\n except:\n return ''", "title": "" }, { "docid": "5c64b0576d9374280018baad663bee46", "score": "0.66707546", "text": "def Face_detection(img_bytes):\r\n detected_person = ''\r\n notification_type = ''\r\n # Detecting Faces in the Immage\r\n faceDetectionResponse = rekog_client.detect_faces(\r\n Image=\r\n {\r\n 'Bytes': img_bytes\r\n },\r\n Attributes=['ALL']\r\n )\r\n\r\n # Check Face detection in an image\r\n # If there is a registered face(s) into a collection\r\n if len(faceDetectionResponse['FaceDetails']) != 0:\r\n # Search the face into the collection\r\n rekog_face_response = rekog_client.search_faces_by_image(\r\n CollectionId = collectionId,\r\n Image={ \r\n 'Bytes': img_bytes \r\n }, \r\n FaceMatchThreshold= 70,\r\n MaxFaces=10\r\n )\r\n \r\n if rekog_face_response['FaceMatches']:\r\n print('Detected, ',rekog_face_response['FaceMatches'][0]['Face']['ExternalImageId'])\r\n detected_person += rekog_face_response['FaceMatches'][0]['Face']['ExternalImageId'] + ' '\r\n notification_type += 'known'\r\n \r\n else:\r\n notification_type += 'unknown'\r\n detected_person += 'unknown '\r\n print('No faces matched')\r\n \r\n return (detected_person, notification_type)", "title": "" }, { "docid": "d20b699f35145c3bdc8b336bb5e65261", "score": "0.6628699", "text": "def face_recognition(self):\n\n if self.face_recognition_is_on:\n\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n\n recognizer = cv2.face.LBPHFaceRecognizer_create(radius=1, neighbors=8, grid_x=8, grid_y=8)\n path = os.path.join(self.path_root, \"recognizers\")\n\n try:\n recognizer.read(f\"{path}\\\\face_trainer.yml\")\n except Exception as e:\n print(e)\n\n with open(f\"{self.path_root}\\\\recognizers\\\\face_labels.json\", 'r') as jsonFile:\n json_object = json.load(jsonFile)\n\n labels = {}\n for key, value in json_object.items():\n labels[value] = key\n\n for (x, y, w, h) in faces:\n roi_gray = gray[y:y + h, x:x + w]\n try:\n id_, conf = recognizer.predict(roi_gray)\n if 60 <= conf <= 99:\n name = labels[id_]\n cv2.putText(self.frame, name, (x, y), self.font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n else:\n cv2.putText(self.frame, \"Unknown\", (x, y), self.font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n cv2.rectangle(self.frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n except Exception as e:\n print(e)", "title": "" }, { "docid": "5d4cf3ab0e31e3760753ff373f0fe650", "score": "0.6626995", "text": "def check_face_id(self, face_id=None, uploaded_face_id=None):\n try:\n confirmed_image = face_recognition.load_image_file(face_id)\n uploaded_image = face_recognition.load_image_file(uploaded_face_id)\n\n face_locations = face_recognition.face_locations(uploaded_image)\n if len(face_locations) == 0:\n return False\n\n confirmed_encoding = face_recognition.face_encodings(confirmed_image)[\n 0]\n unkown_encoding = face_recognition.face_encodings(uploaded_image)[0]\n\n results = face_recognition.compare_faces(\n [confirmed_encoding], unkown_encoding)\n\n if results[0]:\n return True\n\n return False\n except FileNotFoundError:\n print('File not found.')\n return", "title": "" }, { "docid": "0357525ae24af51cee4cd5d6dcf70054", "score": "0.6625902", "text": "def face_recognition(self) -> Union[None, str]:\n for _ in range(20):\n ret, img = self.validation_video.read() # reads video from web cam\n identifier = face_locations(img, model=self.model) # gets image from the video read above\n encoded_ = face_encodings(img, identifier) # creates an encoding for the image\n for face_encoding, face_location in zip(encoded_, identifier):\n # using learning_rate, the encoding is matched against the encoded matrix for images in named directory\n results = compare_faces(self.train_faces, face_encoding, self.learning_rate)\n if True in results: # if a match is found the directory name is rendered and returned as match value\n match = self.train_names[results.index(True)]\n return match", "title": "" }, { "docid": "6d8a40a1f7c80244ac03a001c28392a9", "score": "0.6599948", "text": "def webcam(self):\n display = Display_image()\n one_face_found = False\n image_size = 160\n\n # We use the function VideoStream from the imutils.video package to improve performance as it perform \n # parallelize computation\n stream = VideoStream(src=self.src).start()\n \n while not one_face_found:\n frame = stream.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n # We detect faces\n faces = self.faceCascade.detectMultiScale(gray,\n scaleFactor=1.3,\n minNeighbors=5) \n\n \n if len(faces) == 1: \n try:\n cv2.destroyAllWindows() \n except:\n pass \n margin = 5\n image_size = 160\n x, y, w, h = faces[0]\n cropped = frame[y-margin//2:y+h+margin//2,x-margin//2:x+w+margin//2,:]\n cv2.putText(frame,f\"Indice de flou: {round(cv2.Laplacian(frame, cv2.CV_64F).var())}\",(20,50), cv2.FONT_HERSHEY_PLAIN,2,(0, 0, 255))\n\n cv2.imshow(\"Photo taken\",frame) \n cv2.imshow(\"Photo cropped\",cropped)\n\n start = time.perf_counter()\n while time.perf_counter() - start < 3: \n if cv2.waitKey(1) & 0xFF == ord('q'):\n one_face_found = True\n break\n \n elif len(faces) > 1:\n cv2.putText(frame,f\"More than one face found ({len(faces)} faces)\",(20,50), cv2.FONT_HERSHEY_PLAIN,2,(0, 0, 255))\n else:\n cv2.putText(frame,\"No faces found\",(20,50), cv2.FONT_HERSHEY_PLAIN,2,(0, 0, 255))\n\n \n stream.stop()\n cv2.destroyAllWindows()\n\n if one_face_found == True:\n loop = False\n name = \"\"\n stream.stop()\n cv2.destroyAllWindows()\n while not loop:\n print(\"Enter the name: \")\n name = input().lower() \n if not self.database.check_name(name):\n loop = True\n else:\n print(\"Cette personne existe déjà dans la base de données\")\n\n # We preprocess image so it can be understand by the model\n image_processor = utils_model_img.image_processing(frame,self.faceCascade)\n\n # We save the cropped image\n cropped_image = image_processor.img_prep(faces[0])\n cv2.imwrite(f'{self.directory}/{name}.jpg', cropped_image)\n\n # We compute the face embeding for the image and we insert the new user to the database\n image_processor.prewhiten()\n emb = self.model_facenet.predict_emb(image_processor.img)\n self.database.update_database(name,emb)", "title": "" }, { "docid": "96814f17498850c5fb7c4ef90277cb28", "score": "0.6593698", "text": "def recognizeFace(img: np.array) -> None:\n # Initialize id counter\n _id: int = 0\n\n # Create recognizer\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n # Load training data\n recognizer.read(\"training.yml\")\n\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades\n + HAARCASCADE_FRONTALFACE)\n\n # Convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Define min window size to be recognized as a face\n minW = 0.1*gray.shape[0]\n minH = 0.1*gray.shape[1]\n\n # Detect faces\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(int(minW), int(minH)),\n )\n\n rectangles: list = []\n recognizedFaces: list = []\n confidences: list = []\n\n for (x, y, w, h) in faces:\n # Start (x, y), end (x, y)\n rectangles.append([(x, y), (x + w, y + h)])\n\n # Recognize face\n _id, confidence = recognizer.predict(gray[y:y + h, x:x + w])\n\n # Check if confidence is less than 100\n # 0 is perfect match\n if (confidence < 100):\n recognizedFaces.append(NAMES[_id])\n else:\n recognizedFaces.append(\"unknown\")\n\n confidences.append(round(100 - confidence, 2))\n\n return {'rectangles': rectangles, 'faces': recognizedFaces, 'confidences': confidences}", "title": "" }, { "docid": "2f7a4c89fb44cc259b7620fd8dedd1d9", "score": "0.6586652", "text": "def faceDetection(imgPath):\n\n # Read the image from the local path\n img = cv.imread(imgPath)\n\n # Convert the image to gscale\n grayImg = gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n # Apply the model to the image\n faces = face_cascade.detectMultiScale(grayImg)\n\n # We only care if there is an image in the picture.\n # If there is no image, the result would be 0\n if len(faces) > 0:\n return True\n else:\n return False", "title": "" }, { "docid": "308589f0a1314d65048b8e7577816b5d", "score": "0.6586483", "text": "def find_face():\n gray = cv2.cvtColor(inputs['cam_frame'], cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n rects = detect_faces(gray, cascade)\n\n area = 0\n pos_x = CENTER_X\n pos_y = CENTER_Y\n \n for x1, y1, x2, y2 in rects:\n a = (x2 - x1) * (y2 - y1)\n if a > area:\n area = a\n pos_x = (x1 + x2) / 2\n pos_y = (y1 + y2) / 2\n size = math.sqrt(area) / 2.0\n if debug:\n cv2.rectangle(inputs['cam_frame'], (int(pos_x - size), int(pos_y - size)), (int(pos_x + size), int(pos_y + size)), (255, 255, 0), 2)\n \n if area > 100: \t\t\t\n inputs['last_known_face_x'] = pos_x\n inputs['last_known_face_y'] = pos_y\n elif inputs['last_known_face_x'] is not None:\n if outputs['head'] > servo_if.HEAD_LEFT and outputs['head'] < servo_if.HEAD_RIGHT and outputs['neck'] > servo_if.NECK_DOWN and outputs['neck'] < servo_if.NECK_UP:\n pos_x = inputs['last_known_face_x']\n pos_y = CENTER_Y\n\n inputs['face_x'] = pos_x\n inputs['face_y'] = pos_y\n inputs['face_area'] = area", "title": "" }, { "docid": "4dd44299e8505f8f258c43ca3ff583ff", "score": "0.65800154", "text": "def face_recognition_start(id):\n gcs.download_trainer()\n\n # Get all users from MySQL Database\n users = UserDatabase()\n user_dict = users.get_all()\n print(user_dict)\n\n # Create Local Binary Patterns Histograms for face recognization\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n\n # Load the trained mode\n recognizer.read('trainer.yml')\n\n # Load prebuilt model for Frontal Face\n cascadePath = \"haarcascade_frontalface_default.xml\"\n\n # Create classifier from prebuilt model\n faceCascade = cv2.CascadeClassifier(cascadePath)\n # Set the font style\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # Initialize and start the video frame capture\n cam = cv2.VideoCapture(get_usable_camera_id(), cv2.CAP_DSHOW)\n\n # Loop\n while True:\n # Read the video frame\n ret, im =cam.read()\n\n # Convert the captured frame into grayscale\n gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\n # Get all face from the video frame\n faces = faceCascade.detectMultiScale(gray, 1.2,5)\n\n # For each face in faces\n for(x,y,w,h) in faces:\n\n # Create rectangle around the face\n cv2.rectangle(im, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)\n\n print(recognizer.predict(gray[y:y+h,x:x+w]))\n # Recognize the face belongs to which ID\n Id = recognizer.predict(gray[y:y+h,x:x+w])\n\n for user in user_dict:\n print(user['USER_ID'])\n print(Id)\n if(Id[0] == user['USER_ID']):\n Id = user['name']\n break\n else:\n Id = \"Unknown\"\n\n # Put text describe who is in the picture\n cv2.rectangle(im, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)\n cv2.putText(im, str(Id), (x,y-40), font, 2, (255,255,255), 3)\n\n # Display the video frame with the bounded rectangle\n cv2.imshow('im',im) \n\n # If 'q' is pressed, close program\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n # Stop the camera\n cam.release()\n\n # Close all windows\n cv2.destroyAllWindows()\n\n return False", "title": "" }, { "docid": "5c30c96c0739573a6959915d498243bb", "score": "0.6564362", "text": "def authenticate(self):\n\t\tself.__cam_stop_flag = False\n\n\t\trecognizer = self.create_recognizer()\n\t\tface_cascade = cv2.CascadeClassifier(self.CASCADE_PATH)\n\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\n\t\tnames = self.get_ids_and_names()\n\n\t\tcam = cv2.VideoCapture(0)\n\t\tcam.set(cv2.CAP_PROP_FPS, self.__FPS)\n\t\tcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\t\tcam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n\n\t\twhile True:\n\t\t\tret, img = cam.read()\n\t\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\t\t\tfaces = face_cascade.detectMultiScale(\n\t\t\t\tgray,\n\t\t\t\tscaleFactor=1.2,\n\t\t\t\tminNeighbors=5,\n\t\t\t\tminSize=(10, 10),\n\t\t\t)\n\n\t\t\tfor (x, y, w, h) in faces:\n\n\t\t\t\tid_, confidence = recognizer.predict(gray[y:y + h, x:x + w])\n\n\t\t\t\tif confidence < self.__CONFIDENCE_THRESHOLD: # some user is recognized\n\t\t\t\t\tname = names[id_]\n\t\t\t\t\tconfidence = \"Confidence: {0}\".format(round(self.__CONFIDENCE_THRESHOLD - confidence))\n\t\t\t\t\tcv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2) # draw a green rectangle around the face\n\t\t\t\telse: # unknown face\n\t\t\t\t\tname = \"unknown\"\n\t\t\t\t\tconfidence = \"Confidence: {0}\".format(round(self.__CONFIDENCE_THRESHOLD - confidence))\n\t\t\t\t\tcv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2) # draw a red rectangle around the face\n\n\t\t\t\tcv2.putText(img, str(name), (x + 5, y - 5), font, 1, (255, 255, 255), 2) # print the name of the user\n\t\t\t\tcv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1) # print confidence\n\n\t\t\tcv2.imshow('camera', img)\n\n\t\t\tk = cv2.waitKey(10) & 0xff # 'ESC' to quit\n\t\t\tif k == 27 or self.__cam_stop_flag:\n\t\t\t\tbreak\n\n\t\t\ttime.sleep(self.__PAUSE)\n\n\t\tcam.release()\n\t\tcv2.destroyAllWindows()", "title": "" }, { "docid": "296ec9592bcb6cab8bbc3d305ac69b22", "score": "0.655878", "text": "def find_face_type():\n # get the lengths of the guidelines\n guideline_lengths = get_guideline_lengths()\n\n forehead_line_length = guideline_lengths[0]\n face_length = guideline_lengths[1]\n ear_line_length = guideline_lengths[2]\n jaw_line_length = guideline_lengths[3]\n\n # the largest of the three guidelines becoemes the face width\n face_width = max(forehead_line_length, ear_line_length, jaw_line_length)\n\n tolerance = 40\n # debugging\n print(f'____________________________\\n{image_name}\\n')\n\n print(f'Forehead line length: {forehead_line_length}\\nEar line length: ' + \n f'{ear_line_length}\\nJaw line length: {jaw_line_length}\\n' +\n f'Face length: {face_length}\\n')\n\n # primary trait conditions\n if forehead_line_length <= ear_line_length and\\\n ear_line_length > jaw_line_length:\n print('You have an oval face')\n elif abs(forehead_line_length - jaw_line_length) <= tolerance and\\\n abs(ear_line_length - jaw_line_length) <= tolerance:\n print('You have a square face')\n elif forehead_line_length - jaw_line_length >= tolerance and\\\n ear_line_length - jaw_line_length >= tolerance:\n print('You have a diamond face')\n \n # secondary trait conditions\n if face_width/face_length <= 0.8:\n print('You have an oblong face')\n elif face_width/face_length >= 0.9:\n print('You have a round face')\n \n\n #debugging\n guidelines = get_guideline_coordinates() \n\n image = cv2.imread(image_name)\n window_name = 'Image'\n color = (0, 255, 0)\n thickness = 4\n t1 = (int(guidelines[0][0]), int(guidelines[0][1]))\n p1 = (int(guidelines[1][0]), int(guidelines[1][1]))\n t2 = (int(guidelines[2][0]), int(guidelines[2][1]))\n p2 = (int(guidelines[3][0]), int(guidelines[3][1]))\n t3 = (int(guidelines[4][0]), int(guidelines[4][1]))\n p3 = (int(guidelines[5][0]), int(guidelines[5][1]))\n t4 = (int(guidelines[6][0]), int(guidelines[6][1]))\n p4 = (int(guidelines[7][0]), int(guidelines[7][1]))\n\n image = cv2.line(image, t1, p1, color, thickness)\n image = cv2.line(image, t2, p2, color, thickness)\n image = cv2.line(image, t3, p3, color, thickness)\n image = cv2.line(image, t4, p4, color, thickness)\n image = cv2.line(image, (10, 10), (50, 10), (255, 0, 0), thickness)\n cv2.imshow(window_name, image)\n cv2.waitKey()", "title": "" }, { "docid": "f5ebbf110d3f528067bbc4ac7d1622b7", "score": "0.6539052", "text": "def IsFace(self) -> bool:", "title": "" }, { "docid": "00c2e97e36db1ad1765b48412a2cd046", "score": "0.6518505", "text": "def detect_face(img_url, landmarks = False, mouth = False):\r\n global CF\r\n if landmarks == False :\r\n face = CF.face.detect(img_url, landmarks = False)\r\n if len(face) > 2:\r\n return False\r\n return face[0].get('faceId')\r\n if landmarks == True and mouth == True :\r\n face = CF.face.detect(img_url, landmarks=True, attributes = \"headPose,emotion\")\r\n if len(face) > 2:\r\n return False\r\n print(face)\r\n # mouth_coordinates = {}\r\n # mouth_coordinates['noseTop'] = face[0].get('faceLandmarks').get('noseRightAlarOutTip')\r\n # mouth_coordinates['leftBrow'] = face[0].get('faceLandmarks').get('eyebrowLeftOuter')\r\n # mouth_coordinates['rightBrow'] = face[0].get('faceLandmarks').get('eyebrowRightOuter')\r\n # mouth_coordinates['noseRoot'] = face[0].get('faceLandmarks').get('noseRootLeft')\r\n # mouth_coordinates['mouthLeft'] = face[0].get('faceLandmarks').get('mouthLeft')\r\n # mouth_coordinates['mouthRight'] = (face[0].get('faceLandmarks').get('mouthRight'))\r\n # mouth_coordinates['noseBottom'] = (face[0].get('faceLandmarks').get('noseLeftAlarOutTip'))\r\n # mouth_coordinates['lipTop'] = (face[0].get('faceLandmarks').get('upperLipTop'))\r\n return face#mouth_coordinates\r", "title": "" }, { "docid": "dfa9b01c86328c5d8868c58af321f0ef", "score": "0.6445684", "text": "def test_detect_single_face(img_url='http://lambdal.com/test2.jpg'):\n assert_with(path('/detect?urls=%s' % img_url), partial(face_detected,\n count=1))", "title": "" }, { "docid": "e363ff82042180618000584b2b09cbc7", "score": "0.64378405", "text": "def classify_face(im):\n faces = get_encoded_faces()\n faces_encoded = list(faces.values())\n known_face_names = list(faces.keys())\n\n img = cv2.imread(im, 1)\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\n #img = img[:,:,::-1]\n \n face_location = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img, face_location)\n\n face_names = []\n for face_encoding in unknown_face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\n name = \"Unknown\"\n #true,false\n print(matches)\n\n # use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\n # array value\n print(face_distances)\n\n best_match_index = np.argmin(face_distances)\n # best_match_index = np.array(face_distances)\n # best_match_index=best_match_index.astype(int)\n\n # # best index image\n print(best_match_index)\n\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n return face_names", "title": "" }, { "docid": "d50ab9eb13b1e8130b88515f8309f3ce", "score": "0.6437357", "text": "def detect_face(face_file, max_results=4):\r\n image_content = face_file.read()\r\n batch_request = [{\r\n 'image': {\r\n 'content': base64.b64encode(image_content).decode('UTF-8')\r\n },\r\n 'features': [{\r\n 'type': 'FACE_DETECTION',\r\n 'maxResults': max_results,\r\n }]\r\n }]\r\n\r\n service = get_vision_service()\r\n request = service.images().annotate(body={\r\n 'requests': batch_request,\r\n })\r\n response = request.execute()\r\n\r\n# return response['responses'][0]['faceAnnotations']\r\n return response['responses'][0]", "title": "" }, { "docid": "3e57c2abd384c7918b26f8ad0a39b63a", "score": "0.64228386", "text": "def recognizeFacePlot() -> None:\n # Initialize id counter\n _id: int = 0\n\n # Create recognizer\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n # Load training data\n recognizer.read(\"training.yml\")\n\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades\n + HAARCASCADE_FRONTALFACE)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # Open camera\n cam: cv2.VideoCapture = cv2.VideoCapture(0)\n # Set video width and height\n cam.set(3, 640)\n cam.set(4, 480)\n\n # Define min window size to be recognized as a face\n minW = 0.1*cam.get(3)\n minH = 0.1*cam.get(4)\n\n while True:\n # Take a photo\n ret, img = cam.read()\n\n if ret:\n # Convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Detect faces\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(int(minW), int(minH)),\n )\n\n # Draw faces\n for(x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Recognize face\n _id, confidence = recognizer.predict(gray[y:y + h, x:x + w])\n\n # Check if confidence is less than 100\n # 0 is perfect match\n if (confidence < 100):\n _id = NAMES[_id]\n else:\n _id = \"unknown\"\n\n confidence = f\" {round(100 - confidence)}%\"\n\n cv2.putText(img, str(_id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)\n cv2.putText(img, str(confidence), (x + 5, y + h - 5),\n font, 1, (255, 255, 0), 1)\n\n cv2.imshow(\"Camera\", img)\n\n # Press \"ESC\" for exiting video\n key = cv2.waitKey(10) & 0xff\n\n if key == 27:\n break\n\n cam.release()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "d04cb75e14e917957dc12316bf514d7c", "score": "0.64131737", "text": "def face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n path = pkg_resources.resource_filename(\n __name__, 'resources/haarcascades/haarcascade_frontalface_alt.xml')\n face_cascade = cv2.CascadeClassifier(path)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0", "title": "" }, { "docid": "f257601aa98445f5dcf1df87896e3392", "score": "0.6402541", "text": "def generateFace(name):\n personName = name\n dataPath = './faces'#Ruta de imagenes\n personPath = dataPath + '/' + personName\n if not os.path.exists(personPath):\n print('Folder created: ',personPath)\n os.makedirs(personPath)\n cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)\n # cap = cv2.VideoCapture('Video.mp4')\n faceClassif = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml') #obtained directly from opencv\n count = 0\n while True:\n \n ret, frame = cap.read()\n if ret == False: break\n frame = imutils.resize(frame, width=640)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #converts frame to gray color\n auxFrame = frame.copy()\n faces = faceClassif.detectMultiScale(gray,1.3,5)\n for (x,y,w,h) in faces: #Creates a frame to take the 100 photos \n cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0),2)\n showFace = auxFrame[y:y+h,x:x+w]\n showFace = cv2.resize(showFace,(150,150),interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(personPath + '/rotro_{}.jpg'.format(count),showFace)\n count = count + 1 #count of photos\n cv2.imshow('frame',frame)\n k = cv2.waitKey(1)\n if k == 27 or count >= 100: #Key: Esc\n break\n cap.release()\n cv2.destroyAllWindows()\n return True", "title": "" }, { "docid": "4ba4bcffccca67fbbee4f10bdb998f1e", "score": "0.6389985", "text": "def detect_one_face(image):\n faces = faceCascade.detectMultiScale(\n image,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(image.shape[1] // 10, image.shape[0] // 10)\n )\n number_of_faces = len(faces)\n if number_of_faces == 0:\n raise UserWarning(\"No face was found on image\")\n elif number_of_faces == 1:\n # One face was found\n x, y, w, h = faces[0]\n else:\n raise UserWarning(\"Multiple faces were found on image\")\n\n face_image = image[y:y + h, x:x + w]\n return face_image", "title": "" }, { "docid": "3c7f84ce9c155e6ed756518095c1cdb4", "score": "0.63824767", "text": "def identify_from_video(endpoint, key, group_id, frame):\n start_measure = time.time()\n thickness = 2\n detected_faces, sleep_time = detect_face(endpoint, key, frame)\n print(detected_faces)\n if sleep_time == 10:\n time.sleep(sleep_time)\n\n if detected_faces is not None:\n faces_info = identify_and_process(detected_faces, endpoint, key, group_id)\n print('Detected: {} and Info {}'.format(detected_faces, faces_info))\n for face, info in zip(detected_faces, faces_info):\n if info['confidence'] > 0.5:\n color = (0, 255, 0)\n else:\n color = (0, 0, 255)\n frame = cv2.rectangle(frame, *utils.get_rectangle(face), color, thickness)\n\n print('Total time required:', time.time() - start_measure)\n return frame, faces_info\n\n print('Total time required:', time.time() - start_measure)\n return frame, None", "title": "" }, { "docid": "3fa1a6e7b7841d07ef78d7cba1e8be33", "score": "0.6354457", "text": "def recognize():\r\n cap = cv2.VideoCapture(0)\r\n time.sleep(2.0)\r\n emb = 'q^'\r\n comp = deque([])\r\n authenticated = False\r\n db_found_user = False\r\n # Loop till user is recognized in feed.\r\n while True:\r\n # Capture the stream and convert to gray scale. Try to detect a face.\r\n ret, img = cap.read()\r\n # Color image to gray scale\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # use\r\n faces = detector(img_gray)\r\n # If face is detected\r\n w = 0\r\n h = 0\r\n if len(faces) >= 1:\r\n # Set face to first\r\n face = faces[0]\r\n # If more than one face is detected select largest in set.\r\n for i in range(len(faces)):\r\n (_x, _y, _w, _h) = face_utils.rect_to_bb(faces[i])\r\n if _w > w or _h > h:\r\n face = faces[i]\r\n # Get bounding box of the detected face.\r\n (x, y, w, h) = face_utils.rect_to_bb(face)\r\n # Align the detected face using face_aligner\r\n face_img = face_aligner.align(img, img_gray, face)\r\n encoding = encode_stream(face_img, model)\r\n comp.append(encoding)\r\n data_string = pickle.dumps(encoding)\r\n # If user is found their motion embeddings will be returned, Else returns 'q^'\r\n if emb == 'q^':\r\n emb = client(data_string) # todo test more.\r\n print(emb)\r\n\r\n while True:\r\n if len(comp) > 60:\r\n comp.popleft()\r\n else:\r\n break\r\n\r\n if emb != 'q^':\r\n if len(comp) == 60:\r\n sim = compare_matrices(comp, emb, metric=0)\r\n print('test')\r\n # Que 60 frames of user dropping oldest and storing newest each iteration\r\n\r\n # Uncomment for visual window\r\n # if min_dist < 0.08:\r\n # cv2.putText(img, \"Face : \" + str(name), (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\r\n # cv2.putText(img, \"Dist : \" + str(min_dist), (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\r\n # else:\r\n # cv2.putText(img, 'No matching faces', (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)\r\n # Show Cam feed in window\r\n # cv2.imshow(\"Frame\", img)\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n # If the `q` key was pressed, break from the loop\r\n if key == ord(\"q\"):\r\n break\r\n\r\n # Clean up--destroy windows and stop stream\r\n cv2.destroyAllWindows()\r\n cap.release()", "title": "" }, { "docid": "a6b32e841a70b8969710a61d441cf3b6", "score": "0.6288752", "text": "def detect_single_face(self,img,print_time=True): \n bounding_box=[]\n landmark_box=[] \n detect_begin=time.time()\n \n if(img is None):\n return [],[] \n \n img=(img-127.5)/128\n \n if self.pnet_model:\n score_box,bounding_box,_=self.detect_Pnet(self.pnet_detector,img)\n \n if(len(bounding_box)==0):\n return [],[]\n\n if self.rnet_model: \n score_box,bounding_box,_=self.detect_Rnet(self.rnet_detector,img,bounding_box)\n \n if(len(bounding_box)==0):\n return [],[]\n \n if self.onet_model:\n score_box,bounding_box,landmark_box=self.detect_Onet(self.onet_detector,img,bounding_box)\n \n if(len(bounding_box)==0):\n return [],[]\n \n if print_time:\n print(\"detect-time: \",time.time()-detect_begin)\n \n return bounding_box,landmark_box", "title": "" }, { "docid": "94b42af6b2b6b0bc4655c2ecd100e330", "score": "0.62557644", "text": "def has_faces(self): # pragma: no cover", "title": "" }, { "docid": "76763a2f7beaebf7889fec42092b956b", "score": "0.62555593", "text": "def check_face(self, target):\n\t\tfaces = self.detector.detectMultiScale(self.face, 1.3,5)\n\t\tif not len(faces):\n\t\t\tself.face_match = 0\n\t\t\treturn self\n\t\t(x,y,w,h) = faces[0]\n\t\tmatch = target.predict(self.face[y:y+h,x:x+w])\n\t\tif match == 1:\n\t\t\tself.face_match = 1\n\t\telse:\n\t\t\tself.face_match = 0 # negative if found face but wasnt match\n\t\treturn self", "title": "" }, { "docid": "f8ad49b8e08ae47bee7f996093bcd05e", "score": "0.6255486", "text": "def face_compare_face():\n try:\n face_base64 = request.json\n result = FaceUtils.compare_face(known_face=face_base64['faceimage1'], unknown_face=face_base64['faceimage2'])\n return jsonify(result)\n except KeyError as e:\n logging.error(e)\n abort(500)", "title": "" }, { "docid": "ad338e6c0917970a0c587b106e057b89", "score": "0.62313974", "text": "def detect_faces(img):\n\n faces_list = []\n\n # Convert the test image to gray scale (opencv face detector expects gray images)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Load OpenCV face detector (LBP is faster)\n face_cascade = cv2.CascadeClassifier('FER/haarcascade_frontalface_default.xml')\n\n # Detect multiple faces (some images may be closer to camera than others)\n # result is a list of faces\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)\n\n # If not face detected, return empty list\n if len(faces) == 0:\n return faces_list\n\n for i in range(0, len(faces)):\n (x, y, w, h) = faces[i]\n face_dict = {'face': gray[y:y + w, x:x + h], 'rect': faces[i]}\n faces_list.append(face_dict)\n\n # Return the face image area and the face rectangle\n return faces_list", "title": "" }, { "docid": "e4ca8d6a18568a6db5a7bf9e5c5497ee", "score": "0.61941296", "text": "def test_detect_one_with_image_without_humans(self):\n imageWithoutFace = VLImage.load(filename=NO_FACES)\n\n detection = self.detector.detectOne(image=imageWithoutFace)\n assert detection is None, detection", "title": "" }, { "docid": "0a3e7ce374137272b1f2e3997e9b5b24", "score": "0.6156937", "text": "def faceset_capture(id, name):\n # list_of_users = read_user_dataset()\n\n # Start camera\n camera = cv2.VideoCapture(get_usable_camera_id(), cv2.CAP_DSHOW)\n\n # Use casecade identifier to detect frontal faces\n face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n # Keep track of number of images captured\n count = 0\n\n while True:\n # Capture camera frame\n _,frame = camera.read()\n\n # Convert to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = face_detector.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0 ,0) , 2)\n count += 1\n file_name = \"User.\"+str(id)+\".\"+str(name)+'.'+str(count)+\".jpg\"\n # cv2.imwrite(\"user_dataset/User.\"+str(id)+\".\"+str(name)+'.'+str(count)+\".jpg\", gray[y:y+h,x:x+w])\n cv2.imwrite(\"user_dataset/\" + file_name, gray[y:y+h,x:x+w])\n # upload images to cloud\n gcs.upload_from_filename(\"user_dataset/\" + file_name, file_name)\n\n # Display the frame, with bounded rectangle on the person's face\n cv2.imshow('frame', frame)\n\n # To stop, press 'q' for at least 100ms or 50 images are taken reach 50\n if (cv2.waitKey(100) & 0xFF == ord('q')) or count > 20:\n break\n\n \n\n # Ends camera\n camera.release()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "870a5f3d08f73fcebc65112325f386b4", "score": "0.61341006", "text": "def detect_face(path):\n \n client = vision.ImageAnnotatorClient()\n\n with open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.face_detection(image=image)\n faces = response.face_annotations\n\n # Names of likelihood from google.cloud.vision.enums\n likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\n 'LIKELY', 'VERY_LIKELY')\n print('Faces:')\n\n \"\"\"for face in faces:\n print('anger: {}'.format(likelihood_name[face.anger_likelihood]))\n print('joy: {}'.format(likelihood_name[face.joy_likelihood]))\n print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))\n print('sorrow: {}'.format(likelihood_name[face.sorrow_likelihood]))\n\n vertices = (['({},{})'.format(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices])\n\n print('face bounds: {}'.format(','.join(vertices)))\"\"\"\n if len(faces) == 0:\n return ['UNKNOWN','UNKNOWN','UNKNOWN','UNKNOWN']\n face = faces[0]\n return [likelihood_name[face.anger_likelihood], likelihood_name[face.joy_likelihood], likelihood_name[face.surprise_likelihood], likelihood_name[face.sorrow_likelihood]]", "title": "" }, { "docid": "29bb8e05a72a2628f318ada4fbf135e3", "score": "0.61306244", "text": "def run_face_recognition(features, names):\n global VIDEO_CAPTURE, FLIP_IMAGE\n bFullScreen = 0 # 1: start in FullScreen mode\n \n # Open a handler for the camera\n # Start capturing the WebCam \n VIDEO_CAPTURE = cv2.VideoCapture(DEVICE_INDEX)\n\n VIDEO_CAPTURE.set(3,240);\n VIDEO_CAPTURE.set(4,240); \n\n # the face_recognitino library uses keys and values of your database separately\n known_face_encodings = list(features.values())\n known_face_names = list(names.values())\n \n hWnd = cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\n cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, bFullScreen)\n \n while VIDEO_CAPTURE.isOpened():\n \n # Grab a single frame of video (and check if it went ok)\n ok, frame = VIDEO_CAPTURE.read()\n if not ok:\n logging.error(\"Could not read frame from camera. Stopping video capture.\")\n break\n \n # run detection and embedding models\n if(FLIP_IMAGE):\n frame = cv2.flip(frame, +1);\n \n # run detection and embedding models\n face_locations, face_encodings = get_face_embeddings_from_image(frame, convert_to_rgb=True)\n # Loop through each face in this frame of video and see if there's a match\n for location, face_encoding in zip(face_locations, face_encodings):\n\n # get the distances from this encoding to those of all reference images\n distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n\n # select the closest match (smallest distance) if it's below the threshold value\n if np.any(distances <= MAX_DISTANCE):\n best_match_idx = np.argmin(distances)\n name = known_face_names[best_match_idx]\n else:\n name = None\n\n # put recognition info on the image\n paint_detected_face_on_image(frame, location, name)\n\n # Display the resulting image\n cv2.imshow(WINDOW_NAME, frame)\n\n ch = cv2.waitKeyEx(1)\n # Test for fullscreen toggle (F11): Qt::Key_F11\n if ch == 0x7A0000: # 0x7A0000 = F11 on Windows\n bFullScreen = 1-bFullScreen\n cv2.setWindowProperty( WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, bFullScreen)\n ch = ch & 0xFF\n if ch == 27:\n \t\t break\n if ch == ord(','):\n change_exposure(0)\n if ch == ord('.'):\n change_exposure(1)\n if ch == ord('-'):\n \t change_exposure(-1)\n if ch == ord('f'):\n \t\t FLIP_IMAGE = not FLIP_IMAGE\n if ch == ord('n'):\n \t\t change_cam(1)\n if ch == ord('p'):\n \t change_cam(-1)\n if ch == ord('q'): \n \t break \n\n # Release handle to the webcam\n VIDEO_CAPTURE.release()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "75a7d7d3ae5bacba3376dad8d75fb910", "score": "0.6125993", "text": "def detect_faces(self, image):\r\n return self.face_detector(image, 1)", "title": "" }, { "docid": "455ff6367ef82b83b5061dcc60c2f6f0", "score": "0.61228395", "text": "def face_detection(frame, gray, faceCascade):\r\n\r\n faces = faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor=1.1,\r\n minNeighbors=5,\r\n minSize=(50, 50),\r\n flags=cv2.CASCADE_SCALE_IMAGE\r\n )\r\n for x, y, w, h in faces:\r\n return x, y, w, h", "title": "" }, { "docid": "4ef9e0ff7c70d5df94a1bc516f454cb5", "score": "0.6105469", "text": "def verify_faces(conf, learner, targets, faces, face_ids):\n #print(type(faces), len(faces))\n #print(type(faces[0]))\n #print(faces[0].size)\n START_TIME = time.time()\n results, score = learner.infer(conf, faces, targets, TTA)\n #frame = draw_box_name([0,0,0,0], '_{:.2f}'.format(score[0]), frame)\n '''\n for idx,bbox in enumerate(bboxes):\n if args.score:\n print(score[idx])\n frame = draw_box_name(bbox, '_{:.2f}'.format(score[idx]), frame)\n else:\n frame = draw_box_name(bbox, names[results[idx] + 1], frame)\n #'''\n #print(*list(zip(face_ids,results,score)), sep=\"\\n\")\n MIN_score = 999\n MIN_face_id = \"\"\n for face_id, result, score in zip(face_ids,results,score):\n if score < MIN_score:\n MIN_score = score \n MIN_face_id = face_id\n END_TIME = time.time()\n print(MIN_face_id, MIN_score, END_TIME - START_TIME)\n #cv2.imwrite(\"test_face_images/verify_result.png\", frame)\n\n \n min_face_image = faces[MIN_face_id]\n min_face_image = cv2.cvtColor(np.array(min_face_image), cv2.COLOR_RGB2BGR) \n #cv2.putText(min_face_image, str(MIN_score),(20,20), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,0,255), 1)\n '''\n cv2.imshow('min face verify', min_face_image)\n cv2.waitKey()\n cv2.destroyAllWindows() \n #'''\n\n return MIN_face_id, MIN_score", "title": "" }, { "docid": "6fac00d200a6b6dc7cd0883f586be98a", "score": "0.61035544", "text": "def faceMatch():\n\n # First thing is to import all the images, resize them, and compute their descriptors\n imagesWithFeatures = importCelebImages()\n\n # Then get our image, resize it and compute it's descriptor\n me_im = cv2.imread(myImagePath, 0)\n me_im = resizeImage(me_im)\n imSegments = breakImageApart(me_im)\n me_features = [computeLBPDescriptor(i) for i in tqdm(imSegments, desc=\"Finding features\")]\n\n # Then compare our fDescriptor with all the others! Will return index of image we want\n celebIndex = compareFeatures(me_features, imagesWithFeatures)\n images = os.listdir(path)\n celeb = images[celebIndex]\n celebrityImage = cv2.imread(path + \"/\" + celeb)\n\n # Finally display both the celebrity image and my image\n il.log(celebrityImage, \"Celebrity image\")\n il.log(cv2.imread(myImagePath), \"My Image\")", "title": "" }, { "docid": "936bdf467dadc0284f973e314ad9f659", "score": "0.60909516", "text": "def identify(self, faces):\n logger.log('Identifying...')\n candidates = []\n try:\n response = self.api.face.identify(faces, self.group.id)\n people = self.dictionarize(response)\n candidates = []\n for person in people:\n candidate = person['candidates'][0]['personId']\n candidates.append(candidate)\n except self.api.CognitiveFaceException as exception:\n logger.log(exception)\n finally:\n return candidates", "title": "" }, { "docid": "275b3032988a43c8c3b160db55cbfb61", "score": "0.6072692", "text": "def detect_faces(img, net):\n h, w, _ = img.shape # image dimensions\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)) # normalized and resized blob object\n net.setInput(blob)\n detections = net.forward()\n face_locations = (detections[0, 0, detections[0, 0, :, 2] > 0.2][:, 3:7] * np.array([w, h, w, h])).astype(int) # face locations found\n return face_locations", "title": "" }, { "docid": "909fce1d8b6c90d632efff125e2ff3e7", "score": "0.60675424", "text": "def faceDetection(self, image, filename):\n # image, filename\n frame = image.copy()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Define the window size\n ws_r = 26\n ws_c = 26\n\n # Crop windows using a steps size\n x = []\n p1 = []\n p2 = []\n for r in range(0,gray.shape[0] - ws_r, 1):\n for c in range(0,gray.shape[1] - ws_c,1):\n p1.append([c, r])\n p2.append([c+ws_c, r+ws_r])\n window = gray[r:r+ws_r,c:c+ws_c]\n x.append(np.array(window))\n predictions = self.predict(x, thresh = 0.5)\n #predictions = VJ.predict(x)\n\n #Average positive predictions\n p1mean = (np.array(p1)[np.array(predictions) == 1]).mean(axis=0).astype(np.int)\n p2mean = (np.array(p2)[np.array(predictions) == 1]).mean(axis=0).astype(np.int)\n cv2.rectangle(frame, tuple(p1mean), tuple(p2mean), (0,255,255), 2)\n # Export\n cv2.imwrite(os.path.join(\"output\", filename + '.png'), frame)", "title": "" }, { "docid": "85b0a4cbbd68210673cb3d5816644405", "score": "0.6023636", "text": "def detect(self,frames, faceRect=False):\n\n\t\tfaceIMG = frames['worldBW'];\n\t\tfaceRects = self.classifyFace(faceIMG);\n\t\t\t\n\t\treturn faceRects;\n\n\t\t# # Data structure to hold frame info\n\t\t# rects = {\n\t\t# \t'face': np.array([],dtype=np.int32)\n\t\t# };\n\t\t\n\t\t# # Detect face if old faceRect not provided\n\t\t# if faceRect is False or len(faceRect) is 0:\n\t\t# \tfaceIMG = frames['worldBW'];\n\t\t# \tfaceRects = self.classifyFace(faceIMG);\n\t\t\t\n\t\t# \treturn faceRects;\n\n\t\t# \t# Ensure a single face found\n\t\t# \tif len(faceRects) is 1:\n\t\t# \t\tfaceRect = faceRects[0];\n\t\t# \telse:\n\t\t# \t\t# TODO throw error message\n\t\t# \t\tprint \"No Faces / Multiple Faces Found!\";\n\t\t# \t\treturn rects;\n\t\t\t\n\t\t# rects['face'] = faceRect;", "title": "" }, { "docid": "e99e90d56309c2cb1f06e593e19df4ed", "score": "0.60148275", "text": "def _event_detected(self):\n event_detected = str(self.mem.getData(\"FaceDetected\"))\n if event_detected == \"[]\":\n print \"False\"\n self._flag_event = 0\n else:\n print \"True\"\n self._flag_event = 1\n self._flag = True", "title": "" }, { "docid": "cf15b70eba200be1db57ee70acd20734", "score": "0.6009485", "text": "def detect_faces(path):\n from google.cloud import vision\n\n client = vision.ImageAnnotatorClient()\n\n # [START vision_python_migration_face_detection]\n # [START vision_python_migration_image_file]\n with open(path, \"rb\") as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n # [END vision_python_migration_image_file]\n\n response = client.face_detection(image=image)\n faces = response.face_annotations\n\n # Names of likelihood from google.cloud.vision.enums\n likelihood_name = (\n \"UNKNOWN\",\n \"VERY_UNLIKELY\",\n \"UNLIKELY\",\n \"POSSIBLE\",\n \"LIKELY\",\n \"VERY_LIKELY\",\n )\n print(\"Faces:\")\n\n for face in faces:\n print(f\"anger: {likelihood_name[face.anger_likelihood]}\")\n print(f\"joy: {likelihood_name[face.joy_likelihood]}\")\n print(f\"surprise: {likelihood_name[face.surprise_likelihood]}\")\n\n vertices = [\n f\"({vertex.x},{vertex.y})\" for vertex in face.bounding_poly.vertices\n ]\n\n print(\"face bounds: {}\".format(\",\".join(vertices)))\n\n if response.error.message:\n raise Exception(\n \"{}\\nFor more info on error messages, check: \"\n \"https://cloud.google.com/apis/design/errors\".format(response.error.message)\n )\n # [END vision_python_migration_face_detection]", "title": "" }, { "docid": "d86cba042b91f6417b2b2b593796a25b", "score": "0.5971045", "text": "def OpenCV_face_detector(self):\n face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n img = cv2.imread(self.img_path) #BGR image\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return faces, img", "title": "" }, { "docid": "3c34222acf5f0b778937844d733778ee", "score": "0.5959797", "text": "def detect_faces(path):\r\n client = vision.ImageAnnotatorClient()\r\n\r\n with io.open(path, 'rb') as image_file:\r\n content = image_file.read()\r\n\r\n image = types.Image(content=content)\r\n\r\n response = client.face_detection(image=image)\r\n faces = response.face_annotations\r\n\r\n # Names of likelihood from google.cloud.vision.enums\r\n likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\r\n 'LIKELY', 'VERY_LIKELY')\r\n\r\n return faces", "title": "" }, { "docid": "c0b5bba52fa726b8965fb383dcfe7aba", "score": "0.59568375", "text": "def recognize_faces(img_path=None, img_url=None):\n\n try:\n assert (bool(img_path and img_url) is False)\n assert (bool(img_path or img_url) is True)\n except AssertionError:\n print(\"One and only one of the img_path or img_url should be specified\")\n\n if img_path:\n with open(img_path, 'rb') as img_f:\n analyze_images = service_v4.analyze(collection_ids=face_detection_CID,\n features=AnalyzeEnums.Features.OBJECTS.value,\n images_file=[FileWithMetadata(img_f)]\n ).get_result()\n else:\n analyze_images = service_v4.analyze(collection_ids=face_detection_CID,\n features=AnalyzeEnums.Features.OBJECTS.value,\n image_url=[img_url]\n ).get_result()\n # return analyze_images\n #print(analyze_images)\n return format_face_coords(analyze_images)", "title": "" }, { "docid": "a81ca38963fb8471e75dd7de7a27b2b1", "score": "0.59487414", "text": "def detect_human(img: np.ndarray) -> bool:\n _, mask1 = detect_face(img, osp.join(CURRENT_DIR, \"haarcascades/haarcascade_frontalface_default.xml\"))\n _, mask2 = detect_eye(img, osp.join(CURRENT_DIR, \"haarcascades/haarcascade_eye.xml\"))\n\n dst = cv2.bitwise_and(mask1, mask2)\n humanIsin = np.sum(dst) > 0\n return humanIsin", "title": "" }, { "docid": "08189b7b3b431cc28bbca689e5c869a2", "score": "0.59307903", "text": "def _detect_faces(self, image, all=False):\n resized_image = cv2.resize(image, (300, 300))\n image_blob = cv2.dnn.blobFromImage(resized_image, 1.0, (300, 300),\n mean_subtract_values, swapRB=False, crop=False)\n self.detector.setInput(image_blob)\n detections = self.detector.forward()\n num_detections = detections.shape[2]\n return num_detections, detections", "title": "" }, { "docid": "af1b0c947db321e740835fb34a215f63", "score": "0.5920692", "text": "def detect_and_track_largest_face():\n # Open the first webcame device\n capture = cv2.VideoCapture(0)\n name = ''\n\n # Create two opencv named windows\n cv2.namedWindow(\"base-image\", cv2.WINDOW_AUTOSIZE)\n cv2.namedWindow(\"result-image\", cv2.WINDOW_AUTOSIZE)\n\n # Position the windows next to eachother\n cv2.moveWindow(\"base-image\", 0, 200)\n cv2.moveWindow(\"result-image\", 500, 200)\n\n # Start the window thread for the two windows we are using\n cv2.startWindowThread()\n\n # Create the tracker we will use\n tracker = dlib.correlation_tracker()\n\n # The variable we use to keep track of the fact whether we are\n # currently using the dlib tracker\n tracking_face = 0\n frame_count = 0\n frame_interval_to_detect = 200\n\n # The color of the rectangle we draw around the face\n rectangle_color = (0, 0, 255)\n\n try:\n while True:\n # Retrieve the latest image from the webcam\n rc, full_size_base_image = capture.read()\n\n # Resize the image to 320x240\n #base_image = cv2.resize(full_size_base_image, (320, 240))\n\n # Check if a key was pressed and if it was Q, then destroy all\n # opencv windows and exit the application\n pressed_key = cv2.waitKey(2)\n if pressed_key == ord('Q'):\n cv2.destroyAllWindows()\n exit(0)\n\n frame_count += 1\n\n base_image = full_size_base_image.copy()\n result_image = base_image.copy()\n #print(frame_count)\n # If we are not tracking a face, then try to detect one\n if not tracking_face or frame_count % frame_interval_to_detect == 1:\n\n faces = detector(sharpening(base_image), 1) #image processing\n # In the console we can show that only now we are\n # using the detector for a face\n #print(\"Using the cascade detector to detect face\")\n\n # For now, we are only interested in the 'largest'\n # face, and we determine this based on the largest\n # area of the found rectangle. First initialize the\n # required variables to 0\n max_area = 0\n x = 0\n y = 0\n w = 0\n h = 0\n\n # Loop over all faces and check if the area for this\n # face is the largest so far\n # We need to convert it to int here because of the\n # requirement of the dlib tracker. If we omit the cast to\n # int here, you will get cast errors since the detector\n # returns numpy.int32 and the tracker requires an int\n\n for d in faces:\n if d.right()*d.bottom() > max_area:\n x = int(d.left())\n y = int(d.top())\n w = int(d.right())\n h = int(d.bottom())\n max_area = w*h\n shape = sp(base_image, d)\n # Calculate encodings of the face detected\n face_descriptor = list(facerec.compute_face_descriptor(base_image, shape))\n face_encoding = [np.array(face_descriptor)]\n name = L2_distance(face_encoding)\n\n # If one or more faces are found, initialize the tracker\n # on the largest face in the picture\n if max_area > 0:\n\n # Initialize the tracker\n tracker.start_track(base_image, dlib.rectangle(x, y, w, h))\n\n # Set the indicator variable such that we know the\n # tracker is tracking a region in the image\n tracking_face = 1\n\n # Check if the tracker is actively tracking a region in the image\n if tracking_face:\n\n # Update the tracker and request information about the\n # quality of the tracking update\n tracking_quality = tracker.update(base_image)\n\n # If the tracking quality is good enough, determine the\n # updated position of the tracked region and draw the\n # rectangle\n if tracking_quality >= 8.75:\n tracked_position = tracker.get_position()\n\n t_x = int(tracked_position.left())\n t_y = int(tracked_position.top())\n t_w = int(tracked_position.width())\n t_h = int(tracked_position.height())\n cv2.rectangle(result_image, (t_x, t_y), (t_x + t_w, t_y + t_h), rectangle_color,2)\n cv2.rectangle(result_image, (t_x, t_y-20), (t_x+t_h, t_y), rectangle_color, cv2.FILLED)\n cv2.putText(result_image, name, (t_x, t_y - 10), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1)\n else:\n # If the quality of the tracking update is not\n # sufficient (e.g. the tracked region moved out of the\n # screen) we stop the tracking of the face and in the\n # next loop we will find the largest face in the image\n # again\n tracking_face = 0\n\n # Since we want to show something larger on the screen than the\n # original 320x240, we resize the image again\n # Note that it would also be possible to keep the large version\n # of the baseimage and make the result image a copy of this large\n # base image and use the scaling factor to draw the rectangle\n # at the right coordinates.\n large_result = cv2.resize(result_image, (OUTPUT_SIZE_WIDTH, OUTPUT_SIZE_HEIGHT))\n\n # Finally, we want to show the images on the screen\n cv2.imshow(\"base-image\", base_image)\n cv2.imshow(\"result-image\", large_result)\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n except all:\n pass\n capture.release()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "729c927ecfc20ceb4f1128e9595c99d6", "score": "0.5920203", "text": "def record_user_face(self,username):\n\n # Create a new folder for the specific user\n folder = \"./{}/{}\".format(self.root_folder,username)\n if not os.path.exists(folder):\n os.makedirs(folder)\n # Start the camera\n cam = cv2.VideoCapture(0)\n # Set video width\n cam.set(3, 640)\n # Set video height\n cam.set(4, 480)\n # Get the pre-built classifier that had been trained on 3 million faces\n face_detector = cv2.CascadeClassifier(self.classifier)\n img_counter = 0\n while img_counter < 10:\n try:\n key = input(\"Press 'q' to quit or 'c' to continue: \")\n if key == \"q\":\n break\n if key == \"c\":\n ret, frame = cam.read()\n if not ret:\n break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_detector.detectMultiScale(gray, 1.3, 5)\n if(len(faces) == 0):\n print(\"No face detected, please try again\")\n continue\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n img_name = \"{}/{:04}.jpg\".format(folder, img_counter)\n cv2.imwrite(img_name, frame[y : y + h, x : x + w])\n print(\"{} written!\".format(img_name))\n img_counter += 1\n else:\n raise InvalidOptionError()\n except InvalidOptionError as ioe:\n print(ioe)\n cam.release()", "title": "" }, { "docid": "072e51084d0859d8b6b4dbd0e4655af9", "score": "0.5912538", "text": "def detect(frame):\n\n # construct a blob from frame\n blob = cv.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n # Pass the blob through the network and obtain the face detections\n faceNet.setInput(blob)\n detections = faceNet.forward()\n\n # Initialize list of faces, their corresponding locations,\n # and the list of predictions from our face mask network.\n faces = []\n locs = []\n preds = []\n\n\n # Loop over the face detections\n for i in range(detections.shape[2]):\n\n # Extract the confidence (i.e., probability) associated with the detection\n confidence = detections[0, 0, i, 2]\n\n # Filter out weak detections\n if confidence > FACE_CONFIDENCE:\n\n # Compute the (x, y)-coordinates of the bounding box for the object\n box = detections[0, 0, i, 3:7] * np.array([FRAME_WIDTH, FRAME_HEIGHT, FRAME_WIDTH, FRAME_HEIGHT])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # Ensure the bounding boxes fall within the dimensions of the frame\n (startX, startY) = (max(0, startX), max(0, startY))\n (endX, endY) = (min(FRAME_WIDTH - 1, endX), min(FRAME_HEIGHT - 1, endY))\n\n # Extract the face ROI and do some preprocessing\n face = frame[startY:endY, startX:endX]\n face = cv.cvtColor(face, cv.COLOR_BGR2RGB)\n face = cv.resize(face, (128,128), cv.INTER_AREA)\n face = image.img_to_array(face)\n face /= 255\n\n # Add the face and bounding boxes to their respective lists\n faces.append(face)\n locs.append((startX, startY, endX, endY))\n\n\n # Predictions can be done only when atleast one face is detected\n if len(faces) > 0:\n\n # For faster inference, prediction is performed on *all* faces\n # in batch mode rather than one-by-one predictions.\n faces = np.array(faces, dtype=\"float32\")\n preds = maskNet.predict(faces, batch_size=32)\n\n\n # Now draw rectangular bounding boxes and\n # print some relevant info in the frame\n\n nums = len(locs)\n count_violations = 0\n\n for i in range(nums):\n\n # Check if mask detected for corresponding face\n # and set color and text for bounding boxes accordingly\n\n # Get current prediction for face-mask\n pred = preds[i][0]\n\n if pred <= MASK_CONFIDENCE: # No mask detected\n\n color = (32,0,176) # Material Red\n\n text = \"no mask: {:.2f}%\".format((1-pred)*100)\n\n # Increase the count\n count_violations += 1\n\n else: # Mask detected\n\n color = (255,54,3) # Material Green\n\n text = \"mask: {:.2f}%\".format(pred*100)\n\n # Get the box coordinates\n (startX, startY, endX, endY) = locs[i]\n\n # Draw box around face\n cv.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n # Write text above box\n cv.putText(frame, text, (startX, startY - 10),\n cv.FONT_HERSHEY_SIMPLEX, 0.45, color, 1, cv.LINE_AA)\n\n\n # At last, print total number of 'no mask' faces at the bottom\n cv.putText(frame,\n f\"Total violations: {count_violations}\",\n (10, FRAME_HEIGHT - 10),\n cv.FONT_HERSHEY_SIMPLEX,\n 1, (30,81,244), 2,\n cv.LINE_AA)", "title": "" }, { "docid": "6a369779e01ed62d3904c73962c7bdf5", "score": "0.5908499", "text": "def recognize_faces(ctx: rs.ContextWrapper):\n\n face_filter: FaceOracleFilter = ctx[prop_face_filter]\n faces: Faces = ctx[prop_subscribe_faces]\n\n # Push faces to face filter\n best_guess_changed = face_filter.push_message(faces)\n if best_guess_changed:\n current_best_guess: Person = face_filter.current_best_guess\n\n onto: Ontology = mem.get_ontology()\n sess: Session = mem.get_session()\n\n person_node = Node(metatype=onto.get_type(\"Person\"))\n\n best_guess_id = current_best_guess.id\n face_vector = current_best_guess.face_vector\n if current_best_guess.is_known:\n\n person_node_query = sess.retrieve(node_id=best_guess_id)\n if person_node_query:\n person_node = person_node_query[0]\n else:\n err_msg = \"Person with id %s is not found in memory.\" % best_guess_id\n logger.error(err_msg)\n return\n else:\n person_node.set_properties({\n 'face_vector': face_vector,\n 'name': interloc.ANON_INTERLOC_ID\n })\n\n push = False\n\n # Check if there is any interlocutor. If necessary and pop the current node and push person node\n # instead.\n if any(ctx.enum(interloc.prop_all)):\n interloc_node: Node = ctx[f'interloc:all:{interloc.ANON_INTERLOC_ID}']\n\n # If interloc and the person nodes are not same pop and push person node.\n if not (interloc_node.get_id() == person_node.get_id()) or interloc_node.get_id() < 0:\n # Remove the current interloc\n logger.info('Popping current interlocutor')\n popped_node = ctx.pop(f'interloc:all:{interloc.ANON_INTERLOC_ID}')\n assert popped_node == True\n push = True\n else:\n # Update the face vector of the person already familiar with\n save_face(ctx, interloc_node.get_id(), current_best_guess.face_vector)\n else:\n push = True\n if push:\n # Push the new interlocutor\n ctx.push(\n parent_property_or_path=interloc.prop_all,\n child=rs.Property(\n name=interloc.ANON_INTERLOC_ID,\n default_value=person_node))\n logger.info(f\"Pushed node with id {person_node.id} to interloc:all\")", "title": "" }, { "docid": "5b2519b56c414e49ac21e0432e3919af", "score": "0.5898897", "text": "def detect_faces_uri(uri):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n image = vision.types.Image()\n image.source.image_uri = uri\n \n response = client.face_detection(image=image)\n faces = response.face_annotations\n \n # Names of likelihood from google.cloud.vision.enums\n likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\n 'LIKELY', 'VERY_LIKELY')\n print('Faces:')\n \n for face in faces:\n \n vertices = (['({},{})'.format(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices])\n \n print('face bounds: {}'.format(','.join(vertices)))\n return vertices\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))", "title": "" }, { "docid": "8303b235f706885419c6691c717015d7", "score": "0.58855623", "text": "def verify_faces(first_id, second_id):\r\n global CF\r\n result = CF.face.verify(first_id, second_id)\r\n if result.get('isIdentical') == True:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "20cf8502e74b7c52452877fefef4da22", "score": "0.58717597", "text": "def recognize_face_base(face, face_models, params=None, show_results=False):\r\n\r\n fm = face_models\r\n if face_models is None:\r\n fm = FaceModels(params)\r\n\r\n start_time = cv2.getTickCount()\r\n\r\n [label, confidence] = fm.model.predict(np.asarray(face, dtype=np.uint8))\r\n\r\n tag = fm.get_tag(label)\r\n\r\n # TEST ONLY\r\n print \"Predicted tag = %s (confidence=%.2f)\" % (tag, confidence)\r\n\r\n rec_time_in_clocks = cv2.getTickCount() - start_time\r\n rec_time_in_seconds = rec_time_in_clocks / cv2.getTickFrequency()\r\n\r\n # Populate dictionary with label, confidence and elapsed CPU time\r\n result = {c.ELAPSED_CPU_TIME_KEY: rec_time_in_seconds, c.ERROR_KEY: '',\r\n c.ASSIGNED_LABEL_KEY: label, c.ASSIGNED_TAG_KEY: tag,\r\n c.CONFIDENCE_KEY: confidence}\r\n\r\n if show_results:\r\n cv2.imshow(str(label), face)\r\n cv2.waitKey(0) \r\n\r\n return result", "title": "" }, { "docid": "de56f6980994f2fdf73dbf8b4254bd68", "score": "0.58600426", "text": "def test_face_detection():\n face_detector = MTCNN(margin=14, keep_all=True, factor=0.5, device=device).eval()\n img = cv2.imread(\"../testdata/uaspniazcl_000.jpg\")\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n frame_draw = img.copy()\n draw = ImageDraw.Draw(frame_draw)\n boxes, scores = face_detector.detect(img)\n for box in boxes:\n draw.rectangle(box.tolist(), outline=(255, 0, 0), width=6)\n frame_draw.save(\"../testdata/uaspniazcl_000_face.jpg\")\n print(boxes)\n print(scores)", "title": "" }, { "docid": "b798a717dc70c7d4b67a179580b15cb2", "score": "0.58526117", "text": "def FaceDetect(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "296fc05d51c5ad50d113869acf6fecb4", "score": "0.5835606", "text": "def detect_faces_uri(uri):\n from google.cloud import vision\n\n client = vision.ImageAnnotatorClient()\n # [START vision_python_migration_image_uri]\n image = vision.Image()\n image.source.image_uri = uri\n # [END vision_python_migration_image_uri]\n\n response = client.face_detection(image=image)\n faces = response.face_annotations\n\n # Names of likelihood from google.cloud.vision.enums\n likelihood_name = (\n \"UNKNOWN\",\n \"VERY_UNLIKELY\",\n \"UNLIKELY\",\n \"POSSIBLE\",\n \"LIKELY\",\n \"VERY_LIKELY\",\n )\n print(\"Faces:\")\n\n for face in faces:\n print(f\"anger: {likelihood_name[face.anger_likelihood]}\")\n print(f\"joy: {likelihood_name[face.joy_likelihood]}\")\n print(f\"surprise: {likelihood_name[face.surprise_likelihood]}\")\n\n vertices = [\n f\"({vertex.x},{vertex.y})\" for vertex in face.bounding_poly.vertices\n ]\n\n print(\"face bounds: {}\".format(\",\".join(vertices)))\n\n if response.error.message:\n raise Exception(\n \"{}\\nFor more info on error messages, check: \"\n \"https://cloud.google.com/apis/design/errors\".format(response.error.message)\n )", "title": "" }, { "docid": "d8f8dff3d665209965f22a90167c407c", "score": "0.583531", "text": "def paint_detected_face_on_image(frame, location, name=None):\n # unpack the coordinates from the location tuple\n top, right, bottom, left = location\n print(location)\n\n if name is None:\n name = 'Unknown'\n color = (0, 0, 255) # red for unrecognized face\n else:\n color = (0, 128, 0) # dark green for recognized face\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "title": "" }, { "docid": "7b7d6b11931f4e471f291110a3badf53", "score": "0.58195627", "text": "def paint_detected_face_on_image(frame, location, name=None):\n # unpack the coordinates from the location tuple\n top, right, bottom, left = location\n\n if name is None:\n name = 'Unknown'\n color = (0, 0, 255) # red for unrecognized face\n else:\n color = (0, 128, 0) # dark green for recognized face\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "title": "" }, { "docid": "fa20aa85a0049155b8e6a0ba36c48b73", "score": "0.5818267", "text": "def find_face_locations(file_stream, out_dir=None):\n img_src = None\n face_locations = []\n cropped_images = []\n\n # Load image content.\n image = face_recognition.load_image_file(file_stream)\n\n # Detect face locations\n face_locations = face_recognition.face_locations(image)\n print(\"Found {} face(s) in this photograph.\".format(len(face_locations)))\n\n for face_location in face_locations:\n # Print the location of each face in this image\n top, right, bottom, left = face_location\n print(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\n\n #######################################################################\n # You can access the actual face itself like this:\n face_image = image[top:bottom, left:right]\n cropped_img = Image.fromarray(face_image)\n if out_dir is not None:\n cropped_id = uuid.uuid4().hex\n fname = 'cropped-{img_id}.jpg'.format(img_id=cropped_id)\n fpath = os.path.join(out_dir, fname)\n cropped_img.save(fpath)\n print('Saving cropped to: %s' % fpath)\n cropped_images.append(fname)\n\n #######################################################################\n # Show rectangles and legend\n for k, face_location in enumerate(face_locations):\n # Print the location of each face in this image\n top, right, bottom, left = face_location\n face_name = 'Face #%d' % k\n\n # Draw a box around the face\n cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(image, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(image, face_name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n # Save final annotated image\n if out_dir is not None:\n img_id = uuid.uuid4().hex\n img_src = 'annot-{img_id}.jpg'.format(img_id=img_id)\n fpath = os.path.join(out_dir, img_src)\n Image.fromarray(image).save(fpath)\n print('Saving final image to: %s' % fpath)\n\n return {\n \"face_locations\": face_locations,\n \"cropped_imgs\": cropped_images,\n \"img_src\": img_src # modified image path\n }", "title": "" }, { "docid": "da86c82a48b55f2493864cb266573cbe", "score": "0.5805979", "text": "def recognise():\n\n #keep track of attempts. Too many attempts will trigger a return.\n attempts = 0\n # load the known faces and embeddings\n print(\"[INFO] loading encodings...\")\n data = pickle.loads(open(args[\"encodings\"], \"rb\").read())\n\n # initialize the video stream and pointer to output video file, then\n # allow the camera sensor to warm up\n #Using a for loop to loop through devices, as a precaution to \n #system related issues, whereby a resource might be allocated a different\n #device number for various reasons.\n print(\"[INFO] starting video stream...\")\n for x in range(0, 2):\n for x in range(0, 20):\n vs = VideoStream(src=x).start()\n if str(vs.read()) != \"None\":\n break\n if str(vs.read()) != \"None\":\n break\n writer = None\n\n #Faint green to indicate starting process\n SenseHat.set_pixels(loading)\n time.sleep(2.0)\n\n # loop over frames from the video file stream\n while True:\n SenseHat.set_pixels(working)\n if attempts >35:\n SenseHat.set_pixels(failed)\n time.sleep(1.5)\n SenseHat.clear()\n cv2.destroyAllWindows()\n vs.stop()\n return {'code' : 400}\n attempts += 1\n # grab the frame from the threaded video stream\n frame = vs.read()\n\n # convert the input frame from BGR to RGB then resize it to have\n # a width of 750px (to speedup processing)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgb = imutils.resize(frame, width=args[\"resolution\"])\n r = frame.shape[1] / float(rgb.shape[1])\n\n # detect the (x, y)-coordinates of the bounding boxes\n # corresponding to each face in the input frame, then compute\n # the facial embeddings for each face\n boxes = face_recognition.face_locations(rgb,\n model=args[\"detection_method\"])\n encodings = face_recognition.face_encodings(rgb, boxes)\n names = []\n\n # loop over the facial embeddings\n for encoding in encodings:\n # attempt to match each face in the input image to our known\n # encodings\n matches = face_recognition.compare_faces(data[\"encodings\"],\n encoding)\n name = \"Unknown\"\n\n # check to see if we have found a match\n if True in matches:\n # find the indexes of all matched faces then initialize a\n # dictionary to count the total number of times each face\n # was matched\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n\n # loop over the matched indexes and maintain a count for\n # each recognized face face\n for i in matchedIdxs:\n\n # name = data[\"names\"][i].split(\"-\")[0].replace(\"_\", \" \")\n name = data[\"names\"][i]\n counts[name] = counts.get(name, 0) + 1\n\n # determine the recognized face with the largest number\n # of votes (note: in the event of an unlikely tie Python\n # will select first entry in the dictionary)\n name = max(counts, key=counts.get)\n\n # update the list of names\n names.append(name)\n\n # loop over the recognized faces\n for ((top, right, bottom, left), name) in zip(boxes, names):\n # rescale the face coordinates\n top = int(top * r)\n right = int(right * r)\n bottom = int(bottom * r)\n left = int(left * r)\n \n # print to console, identified person\n personFound = 'Person found: {}'.format(name)\n print(personFound) \n cv2.destroyAllWindows()\n vs.stop()\n\n # check to see if the video writer point needs to be released\n if writer is not None:\n writer.release()\n SenseHat.clear()\n cv2.destroyAllWindows()\n vs.stop()\n return {'code' : 200, 'identified' : name}\n\n # if the video writer is None *AND* we are supposed to write\n # the output video to disk initialize the writer\n if writer is None and args[\"output\"] is not None:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(args[\"output\"], fourcc, 20, (frame.shape[1], frame.shape[0]), True)\n\n # if the writer is not None, write the frame with recognized\n # faces to disk\n if writer is not None:\n writer.write(frame)\n\n # check to see if we are supposed to display the output frame to\n # the screen\n if args[\"display\"] > 0:\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n # do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\n\n # check to see if the video writer point needs to be released\n if writer is not None:\n writer.release()", "title": "" }, { "docid": "386486d6cdf966cf7ad511c70be1ceff", "score": "0.5795668", "text": "def takeSamples() -> None:\n # Open camera\n cam: cv2.VideoCapture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n # Set video width and height\n cam.set(3, 640)\n cam.set(4, 480)\n\n # Create the haar cascade classifier\n faceDetector: cv2.CascadeClassifier = cv2.CascadeClassifier(cv2.data.haarcascades\n + HAARCASCADE_FRONTALFACE)\n\n username: str = input(\"User name: \")\n\n print(\"\\nInitializing face capture...\")\n\n # Initialize individual sampling face count\n count: int = 0\n\n while count < 30:\n # Take photo\n ret, img = cam.read()\n\n if ret:\n # Flip video image vertically\n img = cv2.flip(img, -1)\n # To gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Detect faces\n faces = faceDetector.detectMultiScale(gray, 1.3, 5)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n count += 1\n\n # Save the captured image into the datasets folder\n cv2.imwrite(f\"dataset/{username}_{count}.jpg\", gray[y:y+h, x:x+w])\n cv2.imshow(\"Image\", img)\n\n # Press \"ESC\" for exiting\n #key = cv2.waitKey(100)\n\n # if key == 27:\n # count = 30\n\n cam.release()\n cv2.destroyAllWindows()\n print(\"\\nFinished...\")", "title": "" }, { "docid": "69e11ffef726edc5e4f7ceaf0737db28", "score": "0.5791926", "text": "def checkCamera():\n global CAMERA\n try:\n if CAMERA:\n CAMERA.poll()\n if CAMERA.returncode is None: \n return True\n else:\n killCamera()\n LOGGER.error(\"pikrellcam was started in this process but is no longer running.\")\n elif CAMERA_PROCESS_NAME in sub.check_output(['ps', '-A'], shell=True):\n # LOGGER.info(\"pikrellcam is running in seperate process.\")\n return True\n return False\n except Exception as e:\n LOGGER.error(str(e))\n raise RuntimeError(\"failed to check camera.\");", "title": "" }, { "docid": "12655f76ce9dd0889f23895d3bcca44d", "score": "0.5790958", "text": "def detectFaces(image, cascadeFile=\"haarcascade_frontalface_default.xml\"):\r\n image = image.astype(\"uint8\")\r\n faceCascade = cv2.CascadeClassifier(cascadeFile)\r\n faces = faceCascade.detectMultiScale(image,\r\n scaleFactor=1.025,\r\n minNeighbors=5,\r\n minSize=(30, 30)\r\n )\r\n\r\n print(\"Found {} faces !\".format(len(faces)))\r\n return faces", "title": "" }, { "docid": "d61e33bd191ea8aaab70d6b76e54bdc1", "score": "0.5785798", "text": "def hog_detection(img, max_faces=100, up_sampling=1):\n global _dlib_detector\n\n if not max_faces:\n max_faces = 100\n if not up_sampling:\n up_sampling = 1\n\n if not _dlib_detector:\n _dlib_detector = dlib.get_frontal_face_detector()\n\n if not _dlib_detector:\n print('ERROR: Modules.FaceLocalisator: Failed to instantiate dlib detector.')\n sys.exit(0)\n\n faces = _dlib_detector(img, up_sampling)\n print(len(faces), 'faces detected')\n\n face_list = list()\n for face in faces:\n face_list.append(img[face.top():face.bottom(), face.left():face.right()])\n\n if len(faces) > max_faces:\n _select_n_faces(face_list, max_faces)\n\n return face_list", "title": "" }, { "docid": "a9d8e7c8605907c622507413d2e175d2", "score": "0.5780105", "text": "def test_redetect_one_with_detection_option(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=detection)\n self.assertFaceDetection(redetect, VLIMAGE_ONE_FACE)", "title": "" }, { "docid": "a560cadd62b88c52d598a1bf57a72d16", "score": "0.57306224", "text": "def are_enough_faces(self):\n existing_faces = 0\n for (_, dirs) in os.walk(self.face_dir):\n for _ in dirs:\n existing_faces += 1\n\n return existing_faces > 1", "title": "" }, { "docid": "144e28b122c9ee4690f8e9a493c46a7f", "score": "0.5718201", "text": "def test_batch_detect_with_image_without_humans(self):\n imageWithoutFace = VLImage.load(filename=NO_FACES)\n detection = self.detector.detect(images=[imageWithoutFace])\n assert 0 == len(detection[0])", "title": "" }, { "docid": "54cce0aaf7b29f2c1c792fdfa9afb202", "score": "0.57157654", "text": "def recognize_face(inputFace, k=100):\n path = os.path.realpath(__file__).split(\"/\")\n path = path[0:len(path) - 1]\n path = \"/\".join(path)\n path += \"/../faces\"\n vectors, covar, avg = cel.covariance(path)\n cel.vectorToImage(avg)\n transVec = vectors.T\n #pca, newEvecs = cel.eigenStuff(vectors, covar, k)\n\n currdir = os.listdir()\n curr_k = len(np.genfromtxt(\"eigen_matrix.csv\", delimiter=',').T)\n face_count = cel.count_faces()\n if \"eigen_matrix.csv\" not in currdir or face_count != len(\n transVec) or k != curr_k:\n print(\"Eigenmatrix is not correct, creating eigenmatrix\")\n cel.eigenStuff(vectors, covar, k)\n newEvecs = np.genfromtxt(\"eigen_matrix.csv\", delimiter=',')\n\n in_vec = cel.read_image(inputFace)\n in_weight = cel.find_weight(newEvecs, in_vec, avg)\n weights = np.array([cel.find_weight(newEvecs, x) for x in transVec])\n\n err = [np.linalg.norm(in_weight - row) for row in weights]\n index = np.argmin(err)\n min_err = err[index]\n print(\"Min Err:\", min_err)\n if min_err < 0.07:\n print(\"This face exists in our database\")\n elif min_err < 0.14:\n print(\n \"This image is most likely a face but does not exist in our database\"\n )\n else:\n print(\"Please make sure the input image is a face\")\n # cel.vectorToImage(in_vec)\n #cel.vectorToImage(cel.reconstruct(newEvecs, in_weight, avg))\n # cel.vectorToImage(transVec[index] + avg)\n [cel.vectorToImage(x) for x in newEvecs.T[:k]]", "title": "" }, { "docid": "6513c51d23e0c37df87d4b1ae14397f6", "score": "0.57103115", "text": "def detect_faces(self, img: np.ndarray) -> Optional[List[List[int]]]:\n img.flags.writeable = False\n faces = []\n\n with self.detector.FaceDetection(\n model_selection=1, min_detection_confidence=0.5\n ) as face_detection:\n results = face_detection.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n if not results.detections:\n return None\n for detection in results.detections:\n boundingBox = detection.location_data.relative_bounding_box\n x = int(boundingBox.xmin * img.shape[1]) - self.w_margin\n y = int(boundingBox.ymin * img.shape[0]) - self.h_margin\n x2 = x + int(boundingBox.width * img.shape[1]) + self.w_margin\n y2 = y + int(boundingBox.height * img.shape[0]) + self.h_margin\n faces.append([x, y, x2, y2])\n\n return faces", "title": "" }, { "docid": "3559aa10a717592970ba988166be083f", "score": "0.570923", "text": "def recognize_face(face, face_models, params=None, show_results=False):\r\n \r\n result = None\r\n \r\n use_one_file = ce.USE_ONE_FILE_FOR_FACE_MODELS\r\n \r\n if (params is not None) and (ce.USE_ONE_FILE_FOR_FACE_MODELS_KEY in params):\r\n use_one_file = params[ce.USE_ONE_FILE_FOR_FACE_MODELS_KEY]\r\n\r\n if use_one_file:\r\n result = recognize_face_from_models_file(\r\n face, face_models, params, show_results)\r\n # result = recognize_face_base(\r\n # face, face_models, params, show_results)\r\n\r\n else:\r\n result = recognize_face_from_model_files(\r\n face, face_models, params, show_results)\r\n\r\n return result", "title": "" }, { "docid": "c9e06cf74cdd73a148b9bb29de7ea8fe", "score": "0.5673698", "text": "def find_faces(frame):\n\n # Lower EAR than EAR_threshold means that eyes are closed\n ear_treshold = 0.22\n\n # check if inited, if not init\n if not ('detector' in globals()):\n global detector\n global predictor\n print(\"[STATE] Detector, predictor initialized while running\")\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n\n # resize for faster code\n # for webcam you can use full size resolution\n # frame = imutils.resize(frame, width=450)\n\n # transfer to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale frame\n rects = detector(gray, 0)\n # grab the indexes of the facial landmarks for the left and\n # right eye, respectively\n (lStart, lEnd) = [42, 48]\n (rStart, rEnd) = [36, 42]\n nose_landmark = 30 # index of root of nose (center of face)\n\n # init output variables\n avg_ear = 1\n blink = False\n nose_pos = []\n # loop over the face detections\n for rect in rects:\n # determine the facial landmarks in the face\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape) # convert to NumPy array\n\n # get coordinates for nose landmark\n nose_pos = shape[nose_landmark] \n # extract the left and right eye coordinates, then use the\n # coordinates to compute the eye aspect ratio for both eyes\n left_eye = shape[lStart:lEnd]\n right_eye = shape[rStart:rEnd]\n left_ear = eye_aspect_ratio(left_eye)\n right_ear = eye_aspect_ratio(right_eye)\n\n # average EAR for both eyes\n avg_ear = (left_ear + right_ear) / 2.0\n\n if avg_ear < ear_treshold:\n # Blink detected\n blink = True\n else:\n # Opened eyes\n blink = False\n return avg_ear, blink, nose_pos", "title": "" }, { "docid": "f1287f176561645cefd6394cdcc34904", "score": "0.5662649", "text": "def get_face(self, uid): # pragma: no cover", "title": "" }, { "docid": "40c4788292b00efcae90c92ee24225a0", "score": "0.5656661", "text": "def isFaceRecognition(text):\n keywords_greetings = {\"wave \", \"hello \", \"hi \", \"check \", \"attendance \", \"call me \", \"greetings \", \"what's up \" }\n for item in keywords_greetings:\n if item in text.lower():\n print(\"item returned: \", item)\n return True\n return False", "title": "" }, { "docid": "f5fa0052fe4c2c0c4a49cb1cc025dc60", "score": "0.56549925", "text": "def video_capture():\r\n\r\n HEAD_MID = [[], [], [], []]\r\n HEAD_SIDE = [[], [], [], [], [], [], [], []]\r\n TEMPLES = [[], [], [], [], [], [], [], []]\r\n HEARS = [[], [], [], [], [], [], [], []]\r\n HEAD_MOVEMENT = 0\r\n DISPLAY_MESSAGE = [\"\"]\r\n \r\n\r\n video = cv2.VideoCapture(0)\r\n faceCascade = cv2.CascadeClassifier(\"haar/haarcascade_frontalface_alt2.xml\")\r\n\r\n counter = 0\r\n \r\n while(True):\r\n \r\n ret, frame = video.read()\r\n frame = cv2.resize(frame, (800, 600))\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n #Inizialisation\r\n if len(HEAD_MID[0]) < 1:\r\n try:\r\n x, y, w, h = face_detection(frame, gray, faceCascade)\r\n initialization(HEAD_MID, HEAD_SIDE, HEARS, TEMPLES,\r\n x, y, w, h)\r\n except:\r\n pass\r\n #no face detection\r\n\r\n\r\n #Initialization Succed\r\n else:\r\n\r\n try:\r\n #Re initialization\r\n x, _, _, _ = face_detection(frame, gray, faceCascade)\r\n if HEAD_MOVEMENT > x + 5 or HEAD_MOVEMENT < x - 5:\r\n HEAD_MID = [[], [], [], []]\r\n HEAD_SIDE = [[], [], [], [], [], [], [], []]\r\n TEMPLES = [[], [], [], [], [], [], [], []]\r\n HEARS = [[], [], [], [], [], [], [], []]\r\n\r\n\r\n #No initialization\r\n else:\r\n area_detection(frame, gray,\r\n HEAD_MID, HEAD_SIDE, TEMPLES, HEARS,\r\n SUBSTRACTOR1, SUBSTRACTOR2, SUBSTRACTOR3,\r\n SUBSTRACTOR4, SUBSTRACTOR5, SUBSTRACTOR6,\r\n SUBSTRACTOR7, DISPLAY_MESSAGE)\r\n\r\n except TypeError:\r\n pass\r\n #no face detection\r\n \r\n\r\n try:\r\n HEAD_MOVEMENT = x\r\n except:\r\n pass\r\n\r\n #Traiting message to display\r\n raise_list = analysing_display_message(DISPLAY_MESSAGE)\r\n if raise_list is True:\r\n DISPLAY_MESSAGE = [\"\"]\r\n\r\n\r\n cv2.imshow('FACE', frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n \r\n\r\n video.release()\r\n cv2.destroyAllWindows()", "title": "" }, { "docid": "5ca24478fc44a21b74a7a741747330bb", "score": "0.5626305", "text": "def processStream(stream_path):\n cap = cv2.VideoCapture(stream_path)\n openCounter = 0\n spoofCounter = 0\n foundList = []\n prev_ret = 0\n while True:\n _, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n try:\n ret = processByFrame(frame)\n print(ret)\n except NameError:\n # spoofedFaces.update({spoofed_face, datetime.now()})\n if spoofCounter > 30:\n pil_image = Image.fromarray(frame)\n detected_face = detectFace(pil_image)\n spoofed_face = compareFaceWithAllEmbeddings(embeddings, detected_face, distance_type=\"euc\")\n saveSpoofedFrame(spoofed_face, frame)\n print(\"Spoofing Detected!\")\n spoofCounter = 0\n spoofCounter = spoofCounter + 1\n openCounter = 0\n prev_ret = 0\n continue\n\n spoofCounter = 0\n\n if ret == prev_ret:\n openCounter = openCounter + 1\n\n if openCounter > 24:\n saveFrame(ret, frame)\n # openDoor()\n openCounter = 0\n print(\"Door is now opened for \" + ret)\n\n # Wait till door is closed.\n\n else:\n openCounter = 0\n\n prev_ret = ret", "title": "" }, { "docid": "1f2a8b0f24691081ab5906c7ba2737eb", "score": "0.56050694", "text": "def detect_face(self,images): \n sign=False \n bounding_box=[]\n landmark_box=[]\n face_boxes=[]\n landmark_boxes=[]\n detect_begin=time.time()\n \n if(np.size(images.shape)==3):\n sign=True\n img=np.zeros((1,images.shape[0],images.shape[1],images.shape[2]))\n img[0,:,:,:]=images\n images=img \n \n for img in images:\n\n if(img is None):\n face_boxes.append([])\n landmark_boxes.append([]) \n continue\n \n img=(img-127.5)/128\n\n if self.pnet_model:\n pt=time.time()\n score_box,bounding_box,landmark_box=self.detect_Pnet(self.pnet_detector,img)\n \n print(\"pnet-time: \",time.time()-pt)\n if(len(bounding_box)==0):\n face_boxes.append([])\n landmark_boxes.append([]) \n continue\n\n if self.rnet_model:\n rt=time.time()\n score_box,bounding_box,landmark_box=self.detect_Rnet(self.rnet_detector,img,bounding_box)\n \n print(\"rnet-time: \",time.time()-rt)\n if(len(bounding_box)==0):\n face_boxes.append([])\n landmark_boxes.append([]) \n continue\n \n if self.onet_model:\n ot=time.time()\n score_box,bounding_box,landmark_box=self.detect_Onet(self.onet_detector,img,bounding_box)\n\n print(\"onet-time: \",time.time()-ot) \n if(len(bounding_box)==0):\n face_boxes.append([])\n landmark_boxes.append([]) \n continue\n\n face_boxes.append(bounding_box)\n landmark_boxes.append(landmark_box)\n \n print(\"detect-time: \",time.time()-detect_begin)\n if(sign):\n return face_boxes[0],landmark_boxes[0]\n else:\n return face_boxes,landmark_boxes", "title": "" }, { "docid": "57f4d58e0f16c22078ce9dd38381e616", "score": "0.557978", "text": "def test_batch_redetect_with_one_face(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetect(\n images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[detection.boundingBox.rect])]\n )[0]\n self.assertFaceDetection(redetect, VLIMAGE_ONE_FACE)", "title": "" }, { "docid": "8bbfc2ba957fbe836fddc81ae23e3f52", "score": "0.5576231", "text": "def test_detect_features(img_url=TEST_URLS[0], features=FEATURES):\n assert_with(path('/detect?urls=%s' % img_url),\n partial(face_detected_with_features, features=features))", "title": "" }, { "docid": "11d51b8f536a18799708e1883e6a5eaa", "score": "0.55724806", "text": "def verify_person(self, img: np.ndarray) -> bool:\n person_encoding = face_rec.calc_emb(img)\n if face_rec.get_distance_embeddings(self.person_encoding, person_encoding) < 0.687:\n return True\n return False", "title": "" }, { "docid": "6fc96e1a404bcafa7924b6ca531d79ab", "score": "0.5572184", "text": "def detectFaceframe(self, frame, classifier):\n grayFrame = self.preProcessing(frame)\n\n if self.classifiertype == self.classifier.CASCADE:\n return self.detectFaceCascadeClassifier(grayFrame, classifier)\n elif self.classifiertype == self.classifier.HOG:\n return self.detectFaceHoG(grayFrame, classifier)\n elif self.classifiertype == self.classifier.DlibCNNv1:\n return self.detectFaceCNN(grayFrame, classifier)", "title": "" }, { "docid": "f18a5387e9979f55998c70da513c08e7", "score": "0.55663455", "text": "def detect_face(img: np.ndarray, face_cascade_path: str) -> list:\n face_cascade = cv2.CascadeClassifier(face_cascade_path)\n gray_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(gray_img, )\n\n return faces", "title": "" }, { "docid": "f96f5c25ffa3892efc23e84f238987f5", "score": "0.55651724", "text": "def track_face(video):\n\n coordinates = []\n face_cascade = cv2.CascadeClassifier('cascades/frontal_face.xml')\n\n # take in the first frame for an estimate\n _, frame = video.read()\n\n faces = get_faces(frame, face_cascade)\n face = faces[0]\n width, height = face[2], face[3]\n\n coordinates.append(find_coordinates(face))\n\n # iterate through the remaining frames\n while True:\n _, frame = video.read()\n if frame is None:\n break\n\n faces = get_faces(frame, face_cascade, width, height)\n\n # no suitable matches, so append the last element again\n if len(faces) == 0:\n coordinates.append(coordinates[-1])\n else:\n best = get_best_face(faces, width, height)\n coordinates.append(find_coordinates(best))\n\n return coordinates", "title": "" }, { "docid": "47eefbfd7e3279c5ff6f89ea32c84d63", "score": "0.55457395", "text": "def single_dlib_face_detector(img, mode):\n print(img.name)\n error_list = []\n image = face_recognition.load_image_file(img)\n try:\n face_locations = face_recognition.face_locations(image, model=\"cnn\")\n print(face_locations)\n if not face_locations:\n error_list.append(img)\n pil_image = Image.fromarray(image)\n Path(PROCESS_PATH, img.parent.parent.name, img.parent.name, \"retry\").mkdir(exist_ok=True, parents=True)\n process_train_path = Path(PROCESS_PATH, img.parent.parent.name, img.parent.name, \"retry\")\n pil_image.save(Path(process_train_path, Path(img.name)))\n else:\n top, right, bottom, left = face_locations[0]\n # plt.imshow(image)\n # plt.show()\n test_original_image = image[top:bottom, left:right]\n Image.fromarray(test_original_image).save(Path(PROCESS_PATH, Path(\"test.jpg\")))\n # plt.imshow(test_original_image)\n # plt.show()\n\n # face_image = image[int(top/2):int((bottom+image.shape[0]) / 2), int(left/2):int((right+image.shape[1]) / 2)]\n loose_crop = image[top - int((top / 2)):bottom + int((top / 2)),\n left - int((top / 2)):right + int((top / 2))]\n\n # plt.imshow(loose_crop)\n # plt.show()\n # tighter_crop = image[top - int((top / 6)):bottom + int((top / 6)),\n # left - int((top / 6)):right + int((top / 6))]\n # plt.imshow(tighter_crop)\n # plt.show()\n\n try:\n try:\n loose_crop = image[top - int((top / 1.5)):bottom + int((top / 1.5)),\n left - int((top / 1.5)):right + int((top / 1.5))]\n loose_crop = Image.fromarray(loose_crop)\n loose_crop.save(\n Path(PROCESS_PATH, Path(img.parent.name), Path(\"very_loose_crop_crop\"),\n Path(img.name)))\n except (ValueError, SystemError):\n try:\n loose_crop = image[top - int((top / 2)):bottom + int((top / 2)),\n left - int((top / 2)):right + int((top / 2))]\n loose_crop = Image.fromarray(loose_crop)\n loose_crop.save(\n Path(PROCESS_PATH, Path(img.parent.name), Path(\"very_loose_crop_crop\"),\n Path(img.name)))\n except (ValueError, SystemError):\n loose_crop = image[top - int((top / 3)):bottom + int((top / 3)),\n left - int((top / 3)):right + int((top / 3))]\n loose_crop = Image.fromarray(loose_crop)\n loose_crop.save(\n Path(PROCESS_PATH, Path(img.parent.name), Path(\"very_loose_crop_crop\"),\n Path(img.name)))\n tighter_crop = image[top - int((top / 6)):bottom + int((top / 6)),\n left - int((top / 6)):right + int((top / 6))]\n tighter_crop = Image.fromarray(tighter_crop)\n\n Path(PROCESS_PATH, Path(img.parent.name), \"loose_crop\").mkdir(exist_ok=True, parents=True)\n Path(PROCESS_PATH, Path(img.parent.name), \"very_loose_crop_crop\").mkdir(exist_ok=True, parents=True)\n\n tighter_crop.save(\n Path(PROCESS_PATH, Path(img.parent.name), Path(\"loose_crop\"), Path(img.name)))\n except (ValueError, SystemError):\n error_list.append(img)\n pil_image = Image.fromarray(img)\n Path(PROCESS_PATH, Path(img.parent.name), \"retry\").mkdir(exist_ok=True, parents=True)\n pil_image.save(Path(PROCESS_PATH, Path(img.parent.name), Path(\"retry\"), Path(img.name)))\n\n except Exception as e:\n print(e)\n error_list.append(img)\n Path(img.parent, Path(\"exclude\")).mkdir(exist_ok=True)\n print(f\"error {str(e)}\")\n pil_image = Image.fromarray(face_recognition.load_image_file(img))\n print(\"ERR COPYING FILE\")\n if 'class' in mode:\n Path(PROCESS_PATH, img.parent.parent.name, img.parent.name).mkdir(exist_ok=True, parents=True)\n process_train_path = Path(PROCESS_PATH, img.parent.parent.name, img.parent.name)\n pil_image.save(Path(process_train_path, Path(img.name)))\n else:\n Path(PROCESS_PATH, img.parent.parent.name, img.parent.name, \"retry\").mkdir(exist_ok=True, parents=True)\n process_train_path = Path(PROCESS_PATH, img.parent.parent.name, img.parent.name, \"retry\")\n pil_image.save(Path(process_train_path, Path(img.name)))\n\n return error_list", "title": "" }, { "docid": "1417e79b5d3013cc7cbc2ef72bd12a8e", "score": "0.553681", "text": "async def _get_matched_face(self, img, index: int) -> Optional[Dict]:\n self.logger.debug(\"attempting to match face in image\")\n pcfg = ProgramConfig.get()\n result = None\n if not pcfg.dry_run:\n client = await self._get_rek_client()\n result = await client.match_face_from_image(img)\n\n if result and \"Face\" in result:\n async with DbConnectionPool.get().acquire_dict_cursor(db=pcfg.rek_db_name) as (cur,conn):\n sql = \"\"\"\n UPDATE processed_faces SET matched_to_face_id = '%s'\n WHERE piwigo_image_id = %s AND face_index = %s\n \"\"\"\n\n await cur.execute(sql % (\n result[\"Face\"][\"FaceId\"],\n self.image.id,\n index\n ))\n await conn.commit()\n\n return result[\"Face\"]\n return None", "title": "" }, { "docid": "2fd990b7a03aef4e31b0bd41ed8ab2d8", "score": "0.5530681", "text": "def FaceRecognize(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "a963c0270747184ef8715fed072ae644", "score": "0.5529612", "text": "def get_camera_face_mask_detections_live(cameras: str = \"\"):\n cameras = get_cameras(cameras)\n for camera in cameras:\n validate_camera_existence(camera)\n return FaceMaskUsageMetric.get_live_report(cameras)", "title": "" }, { "docid": "c25196003f7edd99f9b21b9588c816ca", "score": "0.5525917", "text": "def _detect_faces(self):\n def detect(gray, cascade):\n rects = cascade.detectMultiScale(\n gray, scaleFactor=1.1, minNeighbors=3,\n minSize=(10, 10), flags = cv.CV_HAAR_SCALE_IMAGE)\n return rects\n if not self._faces:\n cascade = cv2.CascadeClassifier(FACE_CASCADE_FILE)\n self._faces = detect(self.gray, cascade)", "title": "" }, { "docid": "2ee11170a05268079aedc3c390e7c0c3", "score": "0.5518989", "text": "def onFaceDetected(self, *_args):\n # Unsubscribe to the event when talking,\n # to avoid repetitions\n memory.unsubscribeToEvent(\"FaceDetected\",\n \"HumanGreeter\")\n\n self.tts.say(\"Hello, you\")\n \n\n # Subscribe again to the event\n memory.subscribeToEvent(\"FaceDetected\",\n \"HumanGreeter\",\n \"onFaceDetected\")", "title": "" }, { "docid": "556e709ef6a451666702bb685078022c", "score": "0.55151826", "text": "def processByFrame(frame):\n global missingCount, embeddings\n\n if len(embeddings) == 0:\n embeddings = loadEmbeddings()\n print(embeddings)\n\n pil_image = Image.fromarray(frame)\n detected_face = detectFace(pil_image)\n\n if detected_face is None:\n missingCount = missingCount + 1\n\n if missingCount >= 24:\n resetFrames()\n missingCount = 0\n\n return\n\n addFrame(frame)\n isSpoofingChecked = checkForSpoofing()\n\n if not isSpoofingChecked:\n raise NameError(\"Spoofing Detected!\")\n\n ret = compareFaceWithAllEmbeddings(embeddings, detected_face)\n\n return ret", "title": "" } ]
7105b31703cb96fb21a04f9bcca0e3b7
Add track to playlist
[ { "docid": "15a26a618a8e29fa4482209cf5c63ceb", "score": "0.7280744", "text": "def _add_track(self, token, playlist_name, playlist_id):\n category_id = db.session.query(Category).filter_by(name=playlist_name).first().id\n playlist_id_db = db.session.query(Playlist).filter_by(name=playlist_name).first().id\n\n query = db.session.query(CategoryTrack.track_id).filter_by(category_id=category_id)\n\n #Add tracks to DB\n list_track_id = list()\n\n for track_id in query:\n if db.session.query(TrackPlaylist).filter_by(playlist_id=playlist_id_db, track_id=track_id[0]).first() is None:\n category_track = TrackPlaylist(playlist_id=playlist_id_db, track_id=track_id)\n db.session.add(category_track)\n db.session.commit()\n \n list_track_id.append(track_id[0])\n\n #Add tracks to spotify playlist\n i = 0\n while i <= len(list_track_id): #have to do this because if URI is too long request return 414\n headers = {\n 'Authorization': token,\n }\n url = f\"https://api.spotify.com/v1/playlists/{playlist_id}/tracks\"\n params = {\n 'uris': \",\".join([f\"spotify:track:{element}\" for element in list_track_id[i:i+50:]])\n }\n\n result = requests.post(\n headers=headers,\n params=params,\n url=url\n )\n print(f\"RESULT:::{result.json()}\")\n\n i+=50\n return {'ok': 'super'}", "title": "" } ]
[ { "docid": "5a2b9c76e830e3fd25acca6023db3f2e", "score": "0.82097757", "text": "def addToPlaylist(self, track):\n #playlist must already exist - TODO: Write code to generate playlist\n playlist = self.data.find(\"/PLAYLISTS/NODE[@TYPE='FOLDER']/SUBNODES/NODE[@TYPE='PLAYLIST'][@NAME='Auto-Cued Tracks']/PLAYLIST\")\n # <ENTRY><PRIMARYKEY TYPE=\"TRACK\" KEY=\"Fry HD/:Music/:iTunes/:iTunes Music/:Jamiroquai/:Travelling Without Moving/:01 Virtual Insanity.mp3\"></PRIMARYKEY></ENTRY>\n entry = etree.Element(\"ENTRY\")\n primarykey = etree.Element(\"PRIMARYKEY\")\n primarykey.set(\"TYPE\", \"TRACK\")\n location = track.find(\"LOCATION\")\n localpath = location.get(\"VOLUME\") + location.get(\"DIR\").replace(\"/[^:]\", \"/:\") + location.get(\"FILE\")\n primarykey.set(\"KEY\", localpath)\n entry.append(primarykey)\n playlist.append(entry)\n playlist.set(\"ENTRIES\", str(int(playlist.get(\"ENTRIES\")) + 1))", "title": "" }, { "docid": "052f276c9cf8b3cb64aec65bd26780e8", "score": "0.78722167", "text": "def _playlist_add(self, url):\n \n url = urllib.quote(url)\n self._http_api_request('command=in_play&input=%s' % url)", "title": "" }, { "docid": "69c3f91d60759f4b73e872dd8ffda12c", "score": "0.78463686", "text": "def add_track(self, track):\n self.tracks.add(track)", "title": "" }, { "docid": "57fd5b6705eb36a3a248b47bc9348c17", "score": "0.7840859", "text": "def add_track_to_playlist(playlist_key, track_key):\n r = rdio_request_with_refresh('addToPlaylist', {\n 'playlist': playlist_key,\n 'tracks': track_key\n })\n\n r.raise_for_status()\n\n payload = r.json()\n return payload['result']", "title": "" }, { "docid": "4d8bf114c74ebdb7a82c1f2637bfaddd", "score": "0.78252214", "text": "def _add_to_playlist(self,playlist_name,track_length,track_artist,track_title,track_location):\n full_playlist_name = \"{}.m3u\".format(playlist_name)\n playlist_path = os.path.join(self._playlist_dir, full_playlist_name)\n\n playlist = open(playlist_path, 'a')\n #Create header for new playlists\n if full_playlist_name not in self._playlists:\n playlist.write(\"#EXTM3U\\n\")\n self._playlists.add(full_playlist_name)\n\n #Add track to playlist\n playlist.write(\"#EXTINF:{}, {} - {}\\n{}\\n\".format(\n track_length, track_artist, track_title, track_location))\n playlist.close()", "title": "" }, { "docid": "227bcc43f69db909cda308aa4d31dcb3", "score": "0.7567807", "text": "def add_to_playlist(self, playlist_name, video_id):\r\n print(\"add_to_playlist needs implementation\")", "title": "" }, { "docid": "bc1888152ba07570939ad4cb4a6491f0", "score": "0.7563945", "text": "def add_song(self, song, position=None):\n if position is None:\n self.tracks.append(song)\n else:\n self.tracks.insert(position,song)", "title": "" }, { "docid": "853b0dc17e8c9fc5d970c816a019a486", "score": "0.75623363", "text": "def playlist_add_track(playlist: Playlist, track: Track) -> PlaylistTrack:\n try:\n return PlaylistTrack.objects.get(playlist=playlist, track=track)\n except ObjectDoesNotExist:\n last = PlaylistTrack.objects.filter(playlist=playlist).last()\n index = 0 if last is None else last.index + 1\n return PlaylistTrack.objects.create(playlist=playlist, track=track, index=index)", "title": "" }, { "docid": "d605df8ef4a99e1e2dcf99e20ca86676", "score": "0.7520795", "text": "def add_song(self, song, position=None):\n if position is None:\n self.tracks.append(song)\n else:\n self.tracks.insert(position, song)", "title": "" }, { "docid": "99a40dda432d8c23a1fd8a4585c4ffad", "score": "0.7408106", "text": "def add_tracks(self, tracks):\n self.tracks.update(tracks)", "title": "" }, { "docid": "815eb7ace18d9699c4a340e2ac452d33", "score": "0.7387276", "text": "def add_playlist(self, name):\n self.playlists.add(name)", "title": "" }, { "docid": "4483179fd9ae2f764bc7a687254a9ef4", "score": "0.7338871", "text": "def append(self, track):\n self._tracks.append(track)\n self._num_tracks+=1", "title": "" }, { "docid": "16a48de34e8a12dcd303dc2d4dcb38ab", "score": "0.73103803", "text": "async def add_tracks(self, links):\n track_uris = []\n for link in links:\n match = re.search(r'(?<=track/).+?(?=\\?|$)', link['url'])\n if match:\n track_uris.append(f'spotify:track:{match.group()}')\n\n if not track_uris:\n return\n\n if datetime.datetime.utcnow() >= self.token_expires:\n await self._refresh_access_token()\n\n headers = {\n 'Authorization': f'Bearer {self.access_token}',\n 'Content-Type': 'application/json'\n }\n body = {\n 'uris': track_uris,\n 'position': 0\n }\n\n async with self.client_session.post(self._playlist_add_url, headers=headers, json=body) as response:\n print(json.dumps(response.__dict__, default=str, indent=2, sort_keys=True))", "title": "" }, { "docid": "365a92ee7b802e40199a44e5ecd70516", "score": "0.7307961", "text": "def add_song(self, song, position=None):\n song_found = find_object(song, self.track)\n # we are searching the song in the album to avoid duplication.\n # Due to presence of the song more then once in the file.\n if song_found is None:\n if position is None:\n self.track.append(song)\n else:\n self.track.insert(position, song)", "title": "" }, { "docid": "9f10cfb726e9264b119c5a9ffcf7a134", "score": "0.7302769", "text": "def add_track(self, track):\n\n self.__tracks.append(track)\n self.__sorted = False", "title": "" }, { "docid": "6ab0a23d3b35189368ac660a352344d3", "score": "0.72726095", "text": "def add_playlist(self, playlist_list):\n for song in playlist_list:\n self.add_song(song)", "title": "" }, { "docid": "e1c5bf4a92c5b50905b238bf3b31fbb7", "score": "0.72086656", "text": "def add_playlist(self, playlist):\n if isinstance(playlist, Playlist):\n if playlist not in self._playlists:\n self._playlists.append(playlist)\n else:\n print('Playlist already exists')\n else:\n raise ValueError('entered value is not a playlist')", "title": "" }, { "docid": "240efbeb2ab0c99e8e9e8cb2b739d949", "score": "0.7178491", "text": "def add_song(self, song):\n self.song_queue.append(song)", "title": "" }, { "docid": "2752913da869b0c3814e00b726bc982c", "score": "0.7163271", "text": "def add_track(self, event):\n # get the file\n userPath = '/home/pi/'\n wildcard = \"MP3 File (*.mp3)|*.mp3|\"\n \n dialog = wx.FileDialog(None, \"Choose file to Play :\", style=1, defaultDir=userPath, wildcard=wildcard, pos=(10,10))\n\n if dialog.ShowModal()==wx.ID_OK:\n self.path = dialog.GetPath()\n print \"def add_track, path : \"+self.path\n self.filename = dialog.GetFilename() #get the filename of the file\n print \"def add_track, filename : \"+self.filename\n self.dirname = dialog.GetDirectory() #get the directory of where file is located\n\n # split it to use leaf as the initial title \n self.file_pieces = self.filename.split(\"/\")\n \n # append it to the playlist\n self.playlist_box.Append(self.filename)\n self.playlist.append([self.path, self.file_pieces[-1],'',''])\n print \"def add_track : append it to the playlist \" +str(self.file_pieces)\n print \"def add_track : add title to playlist display as \" +str(self.filename)\n \n # and set it as the selected track\n self.playlist.select(self.playlist.length()-1)\n self.display_selected_track(self.playlist.selected_track_index())\n print \"def add_track : set it as the selected track\"\n print \"index play : \"+str(self.playlist.selected_track_index())\n\n return \n\n dialog.Destroy()", "title": "" }, { "docid": "cbebbcfdf08e837421eaaf1e03a7795a", "score": "0.71042174", "text": "def add_song(self, song, position=None):\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None: \n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "title": "" }, { "docid": "6d55a775da1a1b2c2d679c639ec242df", "score": "0.7036881", "text": "def do_add(self, resource):\n self.player.event('remote', 'add: ' + str(resource))\n self.player.playlist.add(resource)", "title": "" }, { "docid": "fd16dffed0a78ee7574fbbf027dd4cd0", "score": "0.6962775", "text": "def test_add_songs_to_playlist(self):\n mock_agent = mock.MagicMock()\n mock_playlist_snapshot = mock.MagicMock()\n self.sp.agent = mock_agent\n self.sp.agent.user_playlist_add_tracks = mock_playlist_snapshot\n\n self.sp.add_songs_to_playlist([\"test_track_id_1\"], \"test_pl_id\")\n assert(mock_playlist_snapshot.call_count > 0)", "title": "" }, { "docid": "a6329115f73152c204dbf55a6a8ced45", "score": "0.69148207", "text": "def add_to_playlist(self, playlist_name, video_id):\n print(\"A playlist that is being added to: \\n\")\n \"\"\"Args:\n playlist_name: The playlist name.\n video_id: The video_id to be added.\"\"\"\n for video in library.add_to_playlist():\n print (f\"{video.title} ({video.video_id}) {list(video.tags)} \\n\")\n print(\"add_to_playlist needs implementation\")", "title": "" }, { "docid": "9df14116b6b37c76c235e73d224cd74b", "score": "0.69081163", "text": "def add(self, *args):\n return _gnsdk.GnPlaylistCollection_add(self, *args)", "title": "" }, { "docid": "5525f4f78ce9944ce46a3def33556807", "score": "0.68776774", "text": "async def add_song(self, channel):\n\t\t\n\t\tawait self.bot.send_message(channel, \"```What song do you want to add? (Please provide an url!)```\")\n\t\turl = await self.bot.wait_for_message(author=self.author)\n\t\turl = url.content.strip()\n\t\t\n\t\tawait self.bot.send_message(channel, \"```What playlist do you want to add this song to?```\")\n\t\tplaylist = await self.bot.wait_for_message(author=self.author)\n\t\tplaylist = playlist.content.strip()\n\t\tplaylist += \".txt\"\n\t\t\n\t\tmsg = await self.list_append(playlist, url)\t\t\n\t\tawait self.bot.send_message(channel, msg)", "title": "" }, { "docid": "c3ac8fb5f73763ff72141813aa0b370e", "score": "0.6844587", "text": "def add_self_to_own_service(self):\n self.service.user_playlist_add_tracks('strongohench', self.playlist_id, [self.id])", "title": "" }, { "docid": "a9e356e1aebaf81424cbf7764b6f9289", "score": "0.6813275", "text": "def add_tracks(auth_mgr: SpotifyOAuth, playlist_id: str, track_ids: list):\n auth_mgr.scope = \"playlist-modify-public\"\n sp = spotipy.Spotify(auth_manager=auth_mgr)\n \"\"\"\n Spotify lets you post up to 100 tracks to a playlist at once\n Use list comprehension to break track ID list into chunks\n \"\"\"\n post_limit = 100\n chunked_track_ids = [track_ids[i:i + post_limit] for i in range(0, len(track_ids), post_limit)]\n for chunk in chunked_track_ids:\n sp.playlist_add_items(playlist_id, chunk)\n return", "title": "" }, { "docid": "787f81687e5f99a4ffe579f3fef6a0a5", "score": "0.68063045", "text": "def add_self_to_own_service(self):\n\n add_action_body = \\\n {\n 'snippet':\n {\n 'playlistId': self.playlist_id,\n 'resourceId':\n {\n 'kind': 'youtube#video',\n 'videoId': self.id\n }\n }\n }\n\n self.service.playlistItems().insert(part='snippet', body=add_action_body).execute()", "title": "" }, { "docid": "b7bde295184553a3a2a05f071a65f44a", "score": "0.67385423", "text": "def add_track_to_saved_songs(self, auth_code: str, song_uri: str):\n return self._make_spotify_request(\n 'PUT',\n f'{self.API_URL}/me/tracks',\n params={'ids': song_uri},\n headers={'Authorization': f'Bearer {auth_code}'}\n )", "title": "" }, { "docid": "eebde77bf5190f944d6234ac1dad08b3", "score": "0.6721892", "text": "def addTracks(self, tracks):\n\t\tfor track in tracks:\n\t\t\tself.addTrack(**track)", "title": "" }, { "docid": "cd4cc474b3e90ebe909c8315492de01d", "score": "0.6721367", "text": "def add_song(self):\n\n # Populate dictionary with liked songs\n self.get_liked_vids()\n\n # Collect all URIs\n uris = [info[\"spotify_uri\"]\n for song, info in self.all_song_info.items()]\n\n # Create new playlists\n playlist_id = self.create_spotify_playlist()\n\n # Add all songs into new playlists\n request_data = json.dumps(uris)\n\n query = query = \"https://api.spotify.com/v1/playlists/{}/tracks\".format(playlist_id)\n\n response = requlests.post(query,\n data = request_data,\n headers = {\"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(spotify_token)})\n\n # Check for valid response status\n if response.status_code != 200:\n raise ResponseException(response.status_code)\n\n response_json = response.json()\n return response_json", "title": "" }, { "docid": "32f93c4469e1e40b981ea563229ca72d", "score": "0.6707351", "text": "def addTrack(self, track):\n if track not in self.__tracks:\n self.__tracks[track] = None", "title": "" }, { "docid": "8ab59154eca0629a86fd3fe36100b2de", "score": "0.6697318", "text": "def create_playlist(auth_header, user_id, playlist_tracks, mood, playlist_name):\n\n name = f'{playlist_name}'\n\n payload = { \n 'name' : name,\n 'description': 'Mood generated playlist'\n }\n\n playlist_request = f'{SPOTIFY_API_URL}/users/{user_id}/playlists'\n playlist_data = requests.post(playlist_request, data = json.dumps(payload), headers =auth_header).json()\n playlist_id = playlist_data['id']\n session['playlist'] = playlist_id\n\n playlist_exist = db.session.query(Playlist).filter(Playlist.id == playlist_id).all()\n\n if not playlist_exist:\n new_playlist = Playlist(id = playlist_id,\n user_id = user_id,\n mood = mood)\n db.session.add(new_playlist)\n\n\n for track in playlist_tracks:\n playlist_track_exist = db.session.query(PlaylistTrack).filter(PlaylistTrack.playlist_id == playlist_id, PlaylistTrack.track_uri == track).all()\n if not playlist_track_exist:\n new_playlist_track = PlaylistTrack(playlist_id = playlist_id, track_uri = track)\n db.session.add(new_playlist_track)\n\n db.session.commit()\n\n track_uris = '%2C'.join(playlist_tracks)\n add_tracks = f'{SPOTIFY_API_URL}/playlists/{playlist_id}/tracks?uris={track_uris}'\n tracks_added = requests.post(add_tracks, headers=auth_header).json()\n # tracks_added = post_spotify_data(add_tracks, auth_header)\n\n return playlist_data['external_urls']['spotify']", "title": "" }, { "docid": "c7b36fc96c45278eb3b4bc87b78f1e65", "score": "0.6672039", "text": "def create_playlist(token, tracks):\n spotify = spotipy.Spotify(auth=token)\n user = spotify.current_user()\n user_id = user[\"id\"]\n print(user_id)\n\n name = input('Enter a name for your new playlist!\\n')\n print()\n\n playlist = spotify.user_playlist_create(user_id, name)\n print(\"Playlist generated with name: \" + name)\n\n spotify.user_playlist_add_tracks(user_id, playlist, tracks)\n print(\"Added tracks to playlist!\")", "title": "" }, { "docid": "84c67e1309df66ac67c3dc6cf369a074", "score": "0.66504526", "text": "def append_song_to_playlist(song):\n global sleeper\n current, playlist = get_playlist()\n if not playlist and not current:\n path = get_path_from_song(song)\n player.loadfile(path)\n song.plays += 1\n current = song.id\n sleeper = start_sleeper(song.length)\n else:\n playlist = playlist + [song.id]\n cache.set('playlist', playlist)\n cache.set('current', current)\n return current, playlist", "title": "" }, { "docid": "b79f627d376192d618138b410cb73a98", "score": "0.66073465", "text": "def _build_playlist_with_track(self, media_track):\n self._copy_HLS_metadata(media_track)\n self._setup_track_info(media_track)\n self._add_segment_entries(media_track)\n self._finalize_entries(media_track)", "title": "" }, { "docid": "85150d572c0a18c976e65260f3812024", "score": "0.658229", "text": "async def insert(self, index, obj: Union[PlaylistTrack, Track]) -> None:\n if not isinstance(obj, (PlaylistTrack, Track)):\n raise TypeError(\n f\"Expected a PlaylistTrack or Track object instead got {obj!r}\"\n )\n\n async with MutableTracks(self) as tracks:\n tracks.insert(index, obj)", "title": "" }, { "docid": "482fe1df3dec25adaeb4ad20f2be5ac3", "score": "0.65603113", "text": "def add_music_to_playlist(self, playlist_id: str, song_ids: List[str]) -> None:\n playlist_info = self.spotify.user_playlist(self.username, playlist_id, \"tracks\")\n existing_songs = [x[\"track\"][\"id\"] for x in playlist_info[\"tracks\"][\"items\"]]\n new_song_ids = [song_id for song_id in song_ids if song_id not in existing_songs]\n if new_song_ids:\n self.spotify.user_playlist_add_tracks(self.username, playlist_id, new_song_ids)\n else:\n logger.info(\"No new music to add to the playlist\")", "title": "" }, { "docid": "533eab85e1d0bc0d1961d2ef4ad830cf", "score": "0.6557181", "text": "def create_playlist(self, tracks, playlist_name):\n playlist = SPOTIFY.user_playlist_create(self.username, playlist_name)\n playlist_id = playlist['uri']\n track_ids = [track.uri for track in tracks]\n SPOTIFY.user_playlist_add_tracks(self.username,\n playlist_id,\n track_ids)", "title": "" }, { "docid": "918ba03142b7a286792f28115d19b285", "score": "0.6554511", "text": "def insert_song(self, song_name):\r\n pass", "title": "" }, { "docid": "12fc3c40fd17857be10458da1472ea1f", "score": "0.65516937", "text": "def sp_add_tracks_to_playlist(tids, pl_id, sp_username, sp):\r\n # use spotify api to add songs 20 at a time to stay within rate limits\r\n for group in _chunker(tids, 20): # call chunker to split tids into groups before adding\r\n sp.user_playlist_add_tracks(sp_username, pl_id, tids, position=None)", "title": "" }, { "docid": "48ab6b404146172a8fd65cfc73f5f3df", "score": "0.65331423", "text": "def OnPlaylistClicked(self, event):\n\n if self.ViewChoice.GetSelection() == 1:\n songs = self.TreePanel.getSelectedSongs()\n else:\n songs = self.SearchPanel.getSelectedSongs()\n \n if not songs:\n wx.MessageBox(\"No songs selected.\")\n return\n\n for song in songs:\n self.KaraokeMgr.AddToPlaylist(song)", "title": "" }, { "docid": "308264cb8cf2076ada5afd9e10c38b54", "score": "0.65141684", "text": "async def play(self, ctx, *, query: str):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n query = query.strip('<>')\n\n if not url_rx.match(query):\n query = f'ytsearch:{query}'\n\n results = await player.node.get_tracks(query)\n\n if not results or not results['tracks']:\n embed = discord.Embed(\n color=self.bot.embed_color,\n title=\" Playing Error!\",\n description=\"• The query you searched for was not found.\"\n )\n return await ctx.send(embed=embed)\n\n # position = lavalink.utils.format_time(player.position)\n\n if results['loadType'] == 'PLAYLIST_LOADED':\n tracks = results['tracks']\n\n for track in tracks:\n player.add(requester=ctx.author.id, track=track)\n\n embed = discord.Embed(color=self.bot.embed_color,\n description=f'• **{results[\"playlistInfo\"][\"name\"][\"singer\"]}** - {len(tracks)} tracks',\n title=\" Playlist Added!\")\n embed.set_thumbnail(url=f'https://img.youtube.com/vi/{track[\"info\"][\"identifier\"]}/default.jpg')\n await ctx.send(embed=embed)\n else:\n track = results['tracks'][0]\n\n embed = discord.Embed(color=self.bot.embed_color,\n description=f'• [**{track[\"info\"][\"title\"]}**]({track[\"info\"][\"uri\"]})',\n title=\" Song Added To Queue!\")\n embed.set_thumbnail(url=f'https://img.youtube.com/vi/{track[\"info\"][\"identifier\"]}/default.jpg')\n\n track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)\n player.add(requester=ctx.author.id, track=track)\n\n await ctx.send(embed=embed)\n\n if not player.is_playing:\n await player.play()", "title": "" }, { "docid": "194aa335dec2d86909df508f05b0c13b", "score": "0.64860356", "text": "async def update_spotify_playlist(\n tracks: Iterable, playlist_uri: str, sp: Spotter, insert_top: bool = False\n):\n\n index = 0\n for album_ids in grouper(100, tracks):\n album_ids = clean(album_ids)\n album_ids = [a[\"id\"] for a in album_ids]\n args = (sp.userdata[\"id\"], playlist_uri, album_ids)\n\n if insert_top:\n args = args + (index,) # type: ignore\n\n try:\n await sp.client.user_playlist_add_tracks(*args)\n index += len(album_ids) - 1\n await sleep(0.2)\n except Exception as exc:\n _LOGGER.exception(\n f\"{sp.userdata['id']} fail on POST playlist items {album_ids}\"\n )\n push_sentry_error(exc, sp.userdata[\"id\"], sp.userdata[\"display_name\"])", "title": "" }, { "docid": "861e208fea43174c350734e6c2b56e9d", "score": "0.64666", "text": "def insert_song(self, index, song):\n self.song_queue.insert(index, song)", "title": "" }, { "docid": "5b476e6b1467d7e01f2d6d4e442ee235", "score": "0.64495146", "text": "def addTrack(self, **kwargs):\n\t\t\n\t\tself.xml += u' <track>\\n'\n\t\t\n\t\tfor k, v in kwargs.items():\n\t\t\tif k == \"artist\":\n\t\t\t\tk = \"creator\"\n\t\t\tif k != \"location\":\n\t\t\t\tv = escape(v)\n\t\t\tself.xml += u' <{k}>{v}</{k}>\\n'.format(k=k, v=v)\n\t\t\t\n\t\tself.xml += u' </track>\\n'", "title": "" }, { "docid": "d9a20b94517beecad96c26d3e54bb3e2", "score": "0.6433166", "text": "def add_track_to_user_queue(self, auth_code: str, song_uri: str) -> None:\n self._make_spotify_request(\n 'POST',\n f'{self.API_URL}/me/player/queue',\n params={'uri': song_uri},\n headers={'Authorization': f'Bearer {auth_code}'}\n )", "title": "" }, { "docid": "9eb13468d4f66e8d1e0e10297976f971", "score": "0.64283264", "text": "def add_to_playlist(self, playlist_name, video_id):\r\n deny = \"Cannot add video to \"+playlist_name+\": \"\r\n if playlist_name.upper() in list(self.playlists):\r\n if video_id in self.playlists[playlist_name.upper()]:\r\n print(deny+\"Video already added\")\r\n else:\r\n video = self._video_library.get_video(video_id)\r\n if video:\r\n self.playlists[playlist_name.upper()].append(video_id)\r\n print(\"Added video to \"+playlist_name+\": \"+video.title)\r\n else:\r\n print(deny+\"Video does not exist\")\r\n else:\r\n print(deny+\"Playlist does not exist\")", "title": "" }, { "docid": "206a8f309f639812394a41f8429d8b1d", "score": "0.64214504", "text": "def __OnAddNewSong(self, event):\n \n # call back on a signal we catch ourselves\n viewmgr.signalAddSong()", "title": "" }, { "docid": "f4660366d520fc20fbbc42a130149620", "score": "0.6419865", "text": "def add_self_to_own_playlist(self):\n if self.id is None:\n if not self.search_own_service_for_track_title():\n raise TrackNotFoundException\n playlist = self.get_own_current_playlist()\n if self.id not in playlist:\n self.add_self_to_own_service()\n return True\n else:\n return False", "title": "" }, { "docid": "2d64a3c4ff7af90a1effbf856ac750d9", "score": "0.639478", "text": "def add_to_playlist(self, playlist_name, video_id):\n self.playlist_found = False\n for playlist in self.playlist_lst:\n if playlist_name.upper() == playlist.name:\n self.playlist_found = True\n requested_video = self._video_library.get_video(video_id)\n if requested_video is None:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n elif requested_video not in playlist.videos_included:\n playlist.videos_included.append(requested_video)\n print(f\"Added video to {playlist_name}: {requested_video.title} \")\n else:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n break\n if self.playlist_found is False:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")", "title": "" }, { "docid": "5f0fdd9a52b3f78aa637670cb1ed5d24", "score": "0.6390228", "text": "def create_spotify_playlist(self):\n pass\n\n request_body = json.dumps({\n \"name\": \"Liked YouTube Songs\",\n \"description\": \"All Liked Songs from YouTube\",\n \"public\": True })\n\n query = f\"https://api.spotify.com/v1/users/{self.user_id}/playlists\"\n response = requests.post(query,\n data = request_body,\n headers = {\n \"Content-Type\":\"application/json\",\n \"Authorization\":f\"Bearer {spotify_token}\"})\n response_token = response.json()\n # Playlist ID\n return response_json[\"id\"]", "title": "" }, { "docid": "6b6896afe45b8e27502e32e90007ea75", "score": "0.6372862", "text": "def add_to_playlist(self, playlist_name, video_id):\n\n global user_playlist\n global user_flaglist\n\n counter = 0\n\n video = self._video_library.get_video(video_id)\n\n if len(user_flaglist) > 0:\n for videos in user_flaglist:\n if video_id == videos[0].video_id:\n return print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {videos[1]})\")\n\n if len(user_playlist) < 1:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n return\n\n for x in user_playlist:\n if len(user_playlist) > 0:\n if x[0].lower() != playlist_name.lower():\n counter += 1\n if counter == len(user_playlist):\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n return\n\n if video is None:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n return\n \n for x in user_playlist:\n if len(x) > 1:\n if x[1].title == video.title:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n return\n\n for x in user_playlist:\n if x[0].lower() == playlist_name.lower():\n x.append(video)\n\n print(f\"Added video to {playlist_name}: {video.title}\")", "title": "" }, { "docid": "3cdc74e08e584540bb0a56aeb424c101", "score": "0.63537824", "text": "async def extend(self, tracks: Union[\"Playlist\", Iterable[Union[Track, str]]]):\n bucket: Iterable[Union[Track, str]]\n\n if isinstance(tracks, Playlist):\n bucket = await tracks.get_all_tracks()\n\n elif not hasattr(tracks, \"__iter__\"):\n raise TypeError(\n f\"`tracks` was an invalid type, expected any of: Playlist, Iterable[Union[Track, str]], instead got {type(tracks)}\"\n )\n\n else:\n bucket = list(tracks)\n\n gen: Iterable[str] = (str(track) for track in bucket)\n\n while True:\n head: List[str] = list(islice(gen, 0, 100))\n\n if not head:\n break\n\n await self.__http.add_playlist_tracks(self.id, tracks=head)", "title": "" }, { "docid": "a39e267d4786b8bf1a494fd3b4ae600c", "score": "0.6339422", "text": "def add(self, song):\n if self._get_song_key(song) not in self._loadedSongs.keys():\n self._loadedSongs[self._get_song_key(song)] = song\n # Subscribe to song changes\n song.subscribe(song.DELETED, self._song_deleted)\n song.subscribe(song.UPDATED, self._song_updated)\n # Trigger own subscriptions\n self._trigger_subscriptions(self.ADDED, song=song)", "title": "" }, { "docid": "b422b6b9837d02f593c1dfc2f0cabac6", "score": "0.6330756", "text": "def add_to_playlist(self, playlist_name, video_id):\n if video_id == amazing_cats_video_id: \n print(\"Added video to {playlist_name}: Amazing cats\") \n \n elif video_id == another_cat_video_id: \n print(\"Added video to {playlist_name}: Another cat video\")\n\n elif video_id == funny_dogs_video_id: \n print(\"Added video to {playlist_name}: Funny dogs\")\n\n elif video_id == life_at_google_video_id: \n print(\"Added video to {playlist_name}: Life at Google\")\n\n elif video_id == nothing_video_id: \n print(\"Added video to {playlist_name}: Video about nothing\")\n\n elif playlist_list.count(playlist_name) == 0:\n print(\"Cannot add video to {playlist_name}: Playlist does not exist\")\n\n else:\n print(\"Cannot add video to {playlist_name}: Video does not exist\")", "title": "" }, { "docid": "471d6d0a1655b3d898b1f920040f8492", "score": "0.63197213", "text": "def add_songs_to_playlist(self, auth_code: str, playlist_id: str, songs: list) -> None:\n if len(songs) > self.PLAYLIST_BATCH_SIZE_LIMIT:\n raise ClientException(\n f'Invalid number of songs to add to playlist; \\\n Must be equal to or less than {self.PLAYLIST_BATCH_SIZE_LIMIT}'\n )\n\n url = '{api_url}/playlists/{playlist_id}/tracks'.format(\n api_url=self.API_URL,\n playlist_id=playlist_id\n )\n\n headers = {'Authorization': 'Bearer {}'.format(auth_code)}\n\n data = {'uris': songs}\n\n self._make_spotify_request('POST', url, headers=headers, json=data)", "title": "" }, { "docid": "f5e7443eb440f7315e7eaf439784e185", "score": "0.63144165", "text": "def add_to_playlist(self, playlist_name, video_id):\n\n\n if playlist_name.lower() not in [name.lower() for name in self.playlist_names]:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n elif self._video_library.get_video(video_id) == None:\n print(f\"Cannot add video to {playlist_name.lower()}: Video does not exist\")\n else:\n video = self._video_library.get_video(video_id)\n index = [name.lower() for name in self.playlist_names].index(playlist_name.lower())\n\n if video in self.playlists[index].videos:\n print(f\"Cannot add video to {playlist_name.lower()}: Video already added\")\n elif video.flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {video.flagged_reason})\")\n\n else:\n self.playlists[index].add_video(video)\n\n print(f\"Added video to {playlist_name}: {self._video_library.get_video(video_id).title}\")", "title": "" }, { "docid": "2750ea84a89e69dca18f54fdca030629", "score": "0.63107824", "text": "def create_playlist(self, playlist_name):\r\n print(\"create_playlist needs implementation\")", "title": "" }, { "docid": "6595716b43bd37f6a2a0038623a9864e", "score": "0.63097304", "text": "def add_to_list(self):\n music_directory = askdirectory()\n # appends song directory on disk to playlist in memory\n\n # adds songs into dataframe\n self.df_songs.load(music_directory)", "title": "" }, { "docid": "40e2088c0288f03461ab6070aefb35b3", "score": "0.6308108", "text": "def createPlaylist():\n #args\n playlistID = int(request.args(0))\n \n query = (db.track.id == db.playlist_track.track_id) & \\\n (db.playlist_track.playlist_id == playlistID)\n \n tracks = db(query).select(db.track.ALL,orderby=db.playlist_track.position)\n \n return tracks", "title": "" }, { "docid": "84a371d29a0e851b2c79912e48816d51", "score": "0.6307905", "text": "def add_playlist_to_queue(list_of_songsnames):\n mpdcontrol.clear_current_playlist()\n for song in list_of_songsnames:\n mpdcontrol.add_title_to_pl(song)\n print(mpdcontrol.get_current_song_playlist())\n mpdcontrol.play(0)", "title": "" }, { "docid": "a379b73de335cb198f6b0248ec07b268", "score": "0.62895066", "text": "def create_new_playlist(spotify: Spotify, name: str, user: str):\r\n spotify.user_playlist_create(user=user, name=name)", "title": "" }, { "docid": "cff3eada66d0a7daf92fa593b64f7b6a", "score": "0.6289421", "text": "def create_playlists_for_tracktags(self, client):\n #Keep track of existing playlists\n self._check_existing_playlists()\n\n #iterate through mp3 files under music directory\n for (root, dirs, files) in os.walk(self._music_dir):\n for file_name in files:\n if re.search('\\.[Mm][Pp]3$', file_name):\n\n #Get track information\n track_info = self._get_track_information(root, file_name)\n\n #Get tags for a track \n Track_tags = lastfm.Track_tags()\n tags= Track_tags.get_tags_for_track(client, track_info.artist, track_info.title)\n print(\"{} : {}\".format(track_info.artist, track_info.title))\n if not tags:\n print(\" No tags were found for {} by {}\".format(\n track_info.title, track_info.artist))\n #Iterate through tags\n for tag in tags:\n print(\" {}\".format(tag))\n\n #Create playlist for each unique tag\n self._add_to_playlist(tag, track_info.length,\n track_info.artist, track_info.title, track_info.location)", "title": "" }, { "docid": "76edbc73da496bde8b44de46d4c2f7ea", "score": "0.6288785", "text": "def sp_add_tracks(tids, sp):\r\n # use spotify api to add songs 20 at a time to stay within rate limits\r\n for group in _chunker(tids, 20): # call chunker to split tids into groups before adding\r\n results = sp.current_user_saved_tracks_add(tracks=group)", "title": "" }, { "docid": "360c671c38d1bff74760c6c62c2603b6", "score": "0.6283344", "text": "def add_track_playback(self, track_uri: str) -> str:\n # Creates the url for the request\n url = f\"{self.base}me/player/queue\"\n\n # Create the parameter for the request\n params = {\"uri\": track_uri}\n\n # Create the request to the site\n r = requests.post(url, params=params, headers=self.headers)\n\n # Check the status of the request and return the meanings\n if r.status_code == 403:\n return \"Error, not a premium user\"\n elif r.status_code == 404:\n return \"Error, device not found\"\n\n # If the request was successful return as such\n return \"Successful\"", "title": "" }, { "docid": "7c807e4605929f1837af88a62d477bf3", "score": "0.6275131", "text": "def add_songs(self, playlist_name: str, songs: Set[Music], add_date_prefix=True, public_playlist=True) -> None:\n playlist_id = self.create_playlist(playlist_name, add_date_prefix, public_playlist)\n song_ids = self.get_song_ids(songs)\n self.add_music_to_playlist(playlist_id, song_ids)\n\n message_base = \"All done!\"\n if self.music_not_found:\n not_found = \"\\n\\t\".join(self.music_not_found)\n logger.info(f\"{message_base}\\n\"\n f\"Couldn't find the following songs, you'll have to do this manually for now 😥\\n\\t\"\n f\"{not_found}\")\n else:\n logger.info(f\"{message_base} No songs need to be added manually 🥳\")", "title": "" }, { "docid": "c118386b3d821beb60127eba77e13030", "score": "0.6253961", "text": "def add_playlist(\n self,\n title: str,\n index: Optional[int] = None,\n return_playlist: Optional[bool] = True,\n ) -> Optional[PlaylistInfo]:\n params = {}\n if index is not None:\n params[\"index\"] = index\n if title is not None:\n params[\"title\"] = title\n self._request(ADD_PLAYLIST, params=params)\n\n playlist = None\n if return_playlist and index is None:\n playlist = self.get_playlists().find_playlist(\n title=title, find_last=True\n )\n elif return_playlist:\n playlist = self.get_playlists()[index]\n\n if playlist and playlist.title != title:\n raise AssertionError(\"Wrong playlist returned from request!\")\n return playlist", "title": "" }, { "docid": "03d2d22a6df3cadc1817367a1a36134e", "score": "0.6246704", "text": "def _add_song_to_history(self):\n self.song_history.append(self.song_index)", "title": "" }, { "docid": "006f6162be466ed4f4dbe9e99971cdd7", "score": "0.62450844", "text": "def create_playlist(self, playlist_name):\n\n playlist_list = []\n\n if playlist_name != playlist:\n print(\"Successfully created new playlist: {playlist_name}\")\n playlist = playlist_name\n playlist_list.append(playlist)\n\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "title": "" }, { "docid": "afcf77e8d74f95336bd1276d73ff7ba3", "score": "0.6239709", "text": "def __add_song_helper(self, filename):\n if isinstance(filename, Song):\n if filename not in self._songs:\n self._songs.append(filename)", "title": "" }, { "docid": "a68fc9a1a11636c1613af8ff2c0cfa6f", "score": "0.6231628", "text": "def add_to_playlist(self, playlist_name, video_id):\n\n if playlist_name.lower() in self.createdPlaylists:\n if video_id not in self._video_library._videos:\n print(\"Cannot add video to \" +playlist_name+\": Video does not exist\")\n \n else:\n error_message = \"add video to \"+playlist_name\n title = self._video_library._videos[video_id].title\n tags = self._video_library._videos[video_id].tags\n\n selectedPlaylist = self.createdPlaylists[playlist_name.lower()]\n \n if video_id not in selectedPlaylist.videos_in_queue:\n \n if self.flag_procedure(video_id,error_message):\n return\n \n selectedPlaylist.addToQueue(title,video_id,tags)\n print(\"Added video to \" +playlist_name+\": \"+title)\n \n else:\n if self.flag_procedure(video_id,error_message):\n return\n \n print(\"Cannot add video to \" + playlist_name+\": Video already added\")\n\n else:\n print(\"Cannot add video to \" +playlist_name+\": Playlist does not exist\")", "title": "" }, { "docid": "ec1eaee3a654cce3aa8d4d8238ed27a1", "score": "0.6207694", "text": "def create_playlist(self):\r\n request_body = json.dumps({\r\n \"name\": \"Imported from Youtube\",\r\n \"description\": \"From Youtubify/ Automate to Spotify\",\r\n \"public\": True\r\n })\r\n\r\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\r\n spotify_user_id)\r\n response = requests.post(\r\n query,\r\n data=request_body,\r\n headers={\r\n \"Content-Type\": \"application/json\",\r\n \"Authorization\": \"Bearer {}\".format(self.api_token)\r\n }\r\n )\r\n response_json = response.json()\r\n\r\n # playlist id\r\n return response_json[\"id\"]", "title": "" }, { "docid": "fb183ba36ecbf71802d106df6eec5df2", "score": "0.6207038", "text": "def EnqueueToPlaylist(filename):\n # ?command=in_enqueue&input=<uri>\n payload = {'command':'in_enqueue','input':filename}\n current_status = PlayerStatus(payload)", "title": "" }, { "docid": "27dfcfce70e88e7c1d386290521632a4", "score": "0.6193539", "text": "def add(store_track):\n\n\t\tstore_track['trackType'] = 8\n\n\t\treturn {'create': store_track}", "title": "" }, { "docid": "c31016e18cb496e50c604eed3b0f1975", "score": "0.6191396", "text": "def do_playlist(self, id):\n if not self.session:\n return\n if not self.database:\n return\n playlists = self.database.playlists()\n for p in playlists:\n if str(p.id) == str(id):\n self.database = p\n self._tracks = p.tracks()\n return", "title": "" }, { "docid": "1851aa94d1c3aeb30c19fbca8faa4403", "score": "0.6178544", "text": "async def list_append(self, file, song_link):\n\t\tfirst_time = False\n\t\ttry:\n\t\t\t#Decide if user's playlist is empty\n\t\t\tif os.path.getsize(self.file_path + file) is 0:\n\t\t\t\tfirst_time = True\n\t\n\t\t\tif song_link.find(\"list=\") is not -1:\n\t\t\t\t#if the user wants to add a list, extract the individual songs and add to playlist\n\t\t\t\tplaylist = Extract(song_link)\n\t\t\t\tlist = playlist.check()\n\t\t\t\t\n\t\t\t\twith open(self.file_path + file, 'a') as playlist:\n\t\t\t\t\tfor song in list:\n\t\t\t\t\t\tif first_time:\n\t\t\t\t\t\t\tplaylist.write(song)\n\t\t\t\t\t\t\tfirst_time = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tplaylist.write(\"\\n\")\n\t\t\t\t\t\t\tplaylist.write(song)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\treturn \"```You have successfully appended your playlist to {}!```\".format(file)\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\t#otherwise add the single song to the playlist\t\n\t\t\t\twith open(self.file_path + file, 'a') as list:\n\t\t\t\t\tif first_time:\n\t\t\t\t\t\tlist.write(song_link)\n\t\t\t\t\t\tfirst_time = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tlist.write('\\n')\n\t\t\t\t\t\tlist.write(song_link)\n\t\t\t\treturn \"```You have successfully appended song {0} to {1}!```\".format(song_link, file)\n\t\n\t\texcept FileNotFoundError:\n\t\t\treturn \"File {} not found.\".format(file)", "title": "" }, { "docid": "ea8dc5ae6a15a3a064f265e6ae9169eb", "score": "0.6168225", "text": "def add_to_playlist(self, playlist_name, video_id):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(\"Cannot add video to another_playlist: Playlist does not exist\")\n return\n\n if not self._video_library.get_video(video_id):\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n return\n\n if video_id in self.playlists[playlist_id].videos:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n return\n\n video = self._video_library.get_video(video_id)\n self.playlists[playlist_id].videos.append(video_id)\n print(f\"Added video to {playlist_name}: {video.title}\")\n return", "title": "" }, { "docid": "36cf9d5420b82933c4f7aed9166fe5e9", "score": "0.61653817", "text": "def create(self, playlist):\n temp = playlist.lower()\n if not self._lists or not self._checkCase(temp):\n self._lists[playlist] = []\n print(f\"Successfully created new playlist: {playlist}\")\n\n elif self._checkCase(temp):\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "title": "" }, { "docid": "73cfd1a7dffe8124dd5eb6d0ae288fb4", "score": "0.6163228", "text": "def create_playlist():\n with open(os.path.join(dist_folder, dist_name + '.m3u'),'w') as f:\n for song in new_playlist:\n f.write(song)\n print(\"Created new playlist that points to the new file locations, called 'Playlist'\")", "title": "" }, { "docid": "16230698bcde0ff0fe1562202c21be01", "score": "0.6133549", "text": "def append(queue, song):\n entry = create_entry(**song)\n\n with queue.lock:\n queue.append(entry)\n\n logger.debug(\"append: %s\", song)", "title": "" }, { "docid": "51c9e508895afd26b6e55c6bb7f8bf31", "score": "0.61268187", "text": "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n print(name + \"Not found\")\n album_found = Album(name, year, self.name)\n else:\n print(\"Found album\" + name)\n\n album_found.add_song(title)", "title": "" }, { "docid": "be1273d2a272f82c8fd4760ccc20f2b4", "score": "0.61226547", "text": "def add(tags, query, fakeselect=False):\n songs = selectsongs('database', tags, query, fakeselect)\n for song in songs:\n if 'file' in song:\n client.add(song['file'])\n\n getattr(client, \"shuffle\", None)()", "title": "" }, { "docid": "c3e84fc2b28e3f541f9fd2f21a0cd26f", "score": "0.61202717", "text": "def add_items_playlist(self, playlist_id: str,\n uris: list, position: int = None) -> dict:\n # Creates the url for the request\n url = f\"{self.base}playlists/{playlist_id}/tracks\"\n\n # Add a new header with the content type identifier\n header = self.headers.copy()\n header.update({\"Content-Type\": \"application/json\"})\n\n # Create a body holding the arguments\n body = {\n \"uris\": uris\n }\n\n if position is not None:\n body.update({\"position\": position})\n\n body = json.dumps(body)\n\n # Create the request and grab the returned json file\n r = requests.post(url, headers=header, data=body).json()\n\n return r", "title": "" }, { "docid": "7353bd3f930eff9de65d6b33cb1c2652", "score": "0.6113952", "text": "def create_playlist(self, q, token):\n if self.user_id:\n user_id = self.user_id\n else:\n user_id = self._get_user_id(token)\n\n if q.isdigit() and db.session.query(Category).filter_by(id=q).first() is not None:\n playlist_name = db.session.query(Category).filter_by(id=q).first().name\n if db.session.query(Playlist).filter_by(name=playlist_name).first() is None:\n headers = {\n 'Authorization': token,\n }\n\n data = {\n \"name\": playlist_name,\n \"description\": f\"Your favorite {playlist_name} tracks\",\n \"public\": False\n }\n\n result = requests.post(\n url=f\"https://api.spotify.com/v1/users/{user_id}/playlists\",\n headers=headers,\n data=json.dumps(data)\n )\n\n if db.session.query(Playlist).filter_by(name=playlist_name).first() is None:\n playlist = Playlist(name=playlist_name, user_id=user_id, spotify_id=result.json().get('id'))\n db.session.add(playlist)\n db.session.commit()\n\n self._add_track(token, playlist_name, result.json().get('id'))\n #return result.json()\n return {\n \"message\": \"playlist is create\",\n \"playlist_name\": playlist_name,\n \"playlist_id\": result.json().get('id')\n }\n\n else:\n return {\n \"message\": \"playlist already exists\"\n }\n else:\n return {\n \"message\": \"You have to choose an existing category\"\n }", "title": "" }, { "docid": "648198ca7031ae4620d8d210637a8d2e", "score": "0.61114866", "text": "def add_tube(self,Tube):\n #Add it to the list\n self.Tubes.append(Tube)\n self.Tubes.update()", "title": "" }, { "docid": "541cc75c2a6ac428b795b1c8c7b0449a", "score": "0.60969865", "text": "def create_playlist_for_tag(self, client, playlist_name, pattern):\n #Keep track of existing playlists\n self._check_existing_playlists()\n if \"{}.m3u\".format(playlist_name) in self._playlists:\n print(\"There is already a playlist named {}\".format(playlist_name))\n return\n\n #Iterate through mp3 files under music director\n for (root, dirs, files) in os.walk(self._music_dir):\n for file_name in files:\n if re.search('\\.[Mm][Pp]3$', file_name):\n\n #Get track information\n track_info = self._get_track_information(root, file_name)\n\n #Get tags for a track\n Track_tags = lastfm.Track_tags()\n tags = Track_tags.get_tags_for_track(client, track_info.artist, track_info.title)\n #Iterate through tags\n for tag in tags:\n\n #Check if tag matches pattern\n if re.search(pattern, tag):\n self._add_to_playlist(playlist_name, track_info.length,\n track_info.artist, track_info.title, track_info.location)\n break\n\n #Check if new playlist was created\n if \"{}.m3u\".format(artist) not in self._playlists:\n print(\"Did not find any tags for {}\".format(pattern))", "title": "" }, { "docid": "7212f005c1313a4b2887a87ffb10f64a", "score": "0.60888356", "text": "def main(username, playlist, track_file):\n # Setup logger\n logging.basicConfig(level=logging.DEBUG)\n logging.debug(\"Starting...\")\n # Read the tracks\n tracks = track_file.read().splitlines()\n logging.debug(\"Found %d tracks\", len(tracks))\n add_tracks(username, playlist, tracks)\n return 0", "title": "" }, { "docid": "0469949be7d2ba3e6c2b6808f6642c3a", "score": "0.60842705", "text": "def add_to_playlist(self, playlist_name, video_id):\n try:\n playlist = self._playlists[playlist_name]\n video = self._videos[video_id]\n video.check_allowed()\n playlist.add_video(video)\n print(f\"Added video to {playlist_name}: {video.title}\")\n except (VideoPlaylistLibraryError, VideoPlayerError, VideoLibraryError, FlagError) as e:\n print(f\"Cannot add video to {playlist_name}: {e}\")", "title": "" }, { "docid": "ae8f4593cd519ac76f428798b17ce404", "score": "0.6078558", "text": "def AddToDownloaded(song):\n pass", "title": "" }, { "docid": "64ee1539f0f2392756200ec1b3a27875", "score": "0.60740525", "text": "def get_playlist_tracks(self):\n for playlist in self.config.parent.itunes.playlists:\n if playlist.name in self.playlists:\n for track in playlist.tracks:\n self.tracks.add(track)", "title": "" }, { "docid": "05723f055795445ee98019d93f1d156e", "score": "0.605712", "text": "def create_playlist(self, playlist_name):\r\n if playlist_name.upper() in list(self.playlists):\r\n print(\"Cannot create playlist: A playlist with the same name already exists\")\r\n else:\r\n self.playlists[playlist_name.upper()] = []\r\n self.lists_orig.append(playlist_name)\r\n self.lists_orig.sort(key = lambda x: x.lower())\r\n print(\"Successfully created new playlist: \"+playlist_name)", "title": "" }, { "docid": "4a6b19c2ef69ee61635075892fb5c842", "score": "0.6048197", "text": "async def play(self, ctx, *, query):\n\t\tif ctx is not None:\n\t\t\tawait ctx.message.delete()\n\t\t\n\t\tif query:\n\t\t\tinfo = music_tools.yt_extract_info(query)\n\t\t\tsong = music_tools.Song(info, ctx)\n\t\telse:\n\t\t\tsong = self.playlist.pop(0)\n\n\t\tif self.voice_client.is_playing():\n\t\t\tself.playlist.append(song)\n\t\telse:\n\t\t\t# Delete previous embed\n\t\t\tif self.last_song_embed is not None:\n\t\t\t\tawait self.last_song_embed.delete()\n\t\t\t# Create and send an Embed\n\t\t\tembed = discord.Embed(title=song.title, url=song.yt_url, color=0xf70006)\n\t\t\tembed.set_author(name=song.uploader, url=song.uploader_url)\n\t\t\tembed.set_thumbnail(url=song.thumbnail)\n\t\t\tembed.add_field(name=\"Vues\", value=song.views, inline=True)\n\t\t\tembed.add_field(name=\"Durée\", value=song.duration, inline=True)\n\t\t\tself.last_song_embed = await song.ctx.send(embed=embed)\n\n\t\t\tself.current_song = song\n\t\t\t\n\t\t\t# Play music\n\t\t\tafter_func = lambda ctx : self.play_next(ctx)\n\t\t\tsource = discord.FFmpegPCMAudio(song.url, **FFMPEG_OPTIONS)\n\t\t\tsource = discord.PCMVolumeTransformer(source, volume=1.0) # Transform source to enable volume control\n\t\t\tself.voice_client.play(source, after=after_func)", "title": "" }, { "docid": "d230607da34bb2b8b32704a3f14b8cac", "score": "0.6047819", "text": "def add_entry():\n\n try:\n track1 = Tracks(artist=request.form['artist'], album=request.form['album'], title=request.form['title'])\n db.session.add(track1)\n db.session.commit()\n logger.info(\"New song added: %s by %s\", request.form['title'], request.form['artist'])\n return redirect(url_for('index'))\n except:\n logger.warning(\"Not able to display tracks, error page returned\")\n return render_template('error.html')", "title": "" }, { "docid": "c6bce498fc999afc3884e2787c235517", "score": "0.6030855", "text": "def create_playlist(self, playlist_name):\n\n if playlist_name.lower() in self.playlist_names:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlists.append(Playlist(playlist_name.lower()))\n self.playlist_names.append(playlist_name)\n print(f\"Successfully created new playlist: {playlist_name}\")", "title": "" }, { "docid": "66ce78a29217fc12249e9fe0a224ade6", "score": "0.6012828", "text": "def addTrack(self, track):\n if track.kind == \"audio\":\n if self.__container.format.name in (\"wav\", \"alsa\"):\n codec_name = \"pcm_s16le\"\n elif self.__container.format.name == \"mp3\":\n codec_name = \"mp3\"\n else:\n codec_name = \"aac\"\n stream = self.__container.add_stream(codec_name)\n else:\n if self.__container.format.name == \"image2\":\n stream = self.__container.add_stream(\"png\", rate=30)\n stream.pix_fmt = \"rgb24\"\n else:\n stream = self.__container.add_stream(\"libx264\", rate=30)\n stream.pix_fmt = \"yuv420p\"\n stream.width = 360\n stream.height = 360\n self.__tracks[track] = MediaRecorderContext(stream)", "title": "" }, { "docid": "9ab40828c92b49ae1ac04437a8e4e343", "score": "0.6006444", "text": "def create_playlist(self, playlist_name):\n if playlist_name.upper() not in self.playlist_names_lst:\n self.playlist_lst.append(Playlist(playlist_name))\n self.playlist_names_lst.append(playlist_name.upper())\n print(f\"Successfully created new playlist: {playlist_name}\")\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "title": "" }, { "docid": "c723747a8e233bbe7a9afaabdde2f1d4", "score": "0.5999757", "text": "def addSongFromDict(inDict):\n # If playlist, add all songs from inside\n if \"_type\" in inDict:\n if inDict[\"_type\"] == \"playlist\":\n return [addSongFromDict(entry) for entry in inDict[\"entries\"]]\n if inDict[\"_type\"] == \"url\":\n log.debug(\"Adding playlist song: \"+inDict[\"title\"])\n songDict = getSongOrInit(inDict[\"id\"])\n songDict[\"title\"] = inDict[\"title\"]\n return inDict[\"id\"]\n else: # Otherwise assume its a full song dict\n songDict = getSongOrInit(inDict[\"id\"])\n for myKey, theirKey in (\n (\"title\", \"title\"),\n (\"author\", \"uploader\"),\n (\"length\", \"duration\"),\n (\"songTitle\", \"alt_title\"),\n (\"songArtist\", \"artist\"),\n (\"songAlbum\", \"album\")\n ):\n songDict[myKey] = inDict[theirKey]\n for key in (\"formats\", \"requested_formats\"): # These are like horrendously long, and time-dependant so don't save it\n if key in inDict:\n del inDict[key]\n #songDict[\"ytinfo\"] = inDict # Sure it's a bit of redundant information, but it may be useful!\n return inDict[\"id\"]", "title": "" }, { "docid": "5710a014d1a362110371f1cfeffe0dea", "score": "0.5998568", "text": "def add_to_playlist(self, playlist_name, video_id):\n video = self._video_library.get_video(video_id)\n playlist = self._playlists.get(playlist_name.lower(), None)\n\n if not playlist:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n return None\n \n if not video:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n return None\n \n if video.is_flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged {video.get_flag_reason()}\")\n return None\n\n if playlist.in_playlist(video.video_id):\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n playlist.add_video(video)\n print(f\"Added video to {playlist_name}: {video.title}\")", "title": "" }, { "docid": "62e830501f5e8803486f68cf02615751", "score": "0.5997017", "text": "def addTrack(self, track):\n if track.kind == \"audio\":\n if self.__container.format.name in (\"wav\", \"alsa\"):\n codec_name = \"pcm_s16le\"\n elif self.__container.format.name == \"mp3\":\n codec_name = \"mp3\"\n else:\n codec_name = \"aac\"\n stream = self.__container.add_stream(codec_name)\n else:\n if self.__container.format.name == \"image2\":\n stream = self.__container.add_stream(\"png\", rate=30)\n stream.pix_fmt = \"rgb24\"\n else:\n stream = self.__container.add_stream(\"libx264\", rate=30)\n stream.pix_fmt = \"yuv420p\"\n self.__tracks[track] = MediaRecorderContext(stream)", "title": "" } ]
9fd26661050b991b6f943d1839838ff4
create spark session singleton
[ { "docid": "45cdbd6311a89744c2d5a8886058da2e", "score": "0.75396127", "text": "def create_spark_session():\r\n spark = SparkSession \\\r\n .builder \\\r\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\r\n .getOrCreate()\r\n return spark", "title": "" } ]
[ { "docid": "554b2f15e15e9ec5beee4734dc8b4df0", "score": "0.8047757", "text": "def get_session():\n return SparkSession.builder.config(conf=SparkConf()).getOrCreate()", "title": "" }, { "docid": "d6ae01809c2fbcd7f876bc0b068dd333", "score": "0.8015288", "text": "def create_spark_session():\n return SparkSession \\\n .builder \\\n .appName(\"Sales processor\") \\\n .master('local[*]') \\\n .getOrCreate()", "title": "" }, { "docid": "8f025066f2f407e4e4cf0e8573761ecf", "score": "0.79329634", "text": "def create_spark_session():\n\n return SparkSession.builder.getOrCreate()", "title": "" }, { "docid": "c9a655ff3ea19adcf601807b15bb6432", "score": "0.787245", "text": "def create_pyspark_session(cls):\n return (SparkSession.builder\n .master('local[4]')\n .appName('local-testing-pyspark')\n .getOrCreate())", "title": "" }, { "docid": "c50759fc2c54e5c60c657722bdbac718", "score": "0.7763101", "text": "def spark_session_create(self):\n tfmsa_logger(\"Spark Session Created\")\n\n conf = SparkConf()\n conf.setMaster('spark://{0}'.format(settings.SPARK_HOST))\n conf.setAppName('save_csv_to_df')\n conf.set('spark.driver.cores', settings.SPARK_CORE)\n conf.set('spark.driver.memory', settings.SPARK_MEMORY)\n conf.set('spark.executor.cores', settings.SPARK_WORKER_CORE)\n conf.set('spark.executor.memory', settings.SPARK_WORKER_MEMORY)\n\n self.sc = SparkContext(conf=conf)", "title": "" }, { "docid": "1ab9f8876454ec367383e45b5a44961a", "score": "0.7752179", "text": "def session(self):\n return SparkSession.builder.config(conf=self.conf)\\\n .config('spark.jars', \"/cs/unique/ls99-kf39-cs5052/sparknlp.jar\")\\\n .config(\"spark.driver.memory\", \"3g\")\\\n .getOrCreate()", "title": "" }, { "docid": "610ff97b64030b1a6dbdba61ca23a0e2", "score": "0.7729954", "text": "def create_spark_session():\n\n spark = (\n SparkSession.builder.master(\"spark://spark-master:7077\")\n .appName(\"cities_processor\")\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\n .getOrCreate()\n )\n return spark", "title": "" }, { "docid": "5cf3c53aef874f7fc0259d0bb934efff", "score": "0.76908123", "text": "def create_spark_session():\n spark = (\n SparkSession.builder.config(\n \"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\"\n )\n .appName(\"sparkstand\")\n .getOrCreate()\n )\n logging.info(\"Spark Session Created\")\n return spark", "title": "" }, { "docid": "00f1e255d2ae25200a391617c35728cf", "score": "0.7672434", "text": "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n \n # .config(\"spark.driver.memory\", \"15g\") \\\n \n return spark", "title": "" }, { "docid": "490d194f07d6d6a96c6f9f6a5b81e1ae", "score": "0.7650795", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .appName(\"Data Lake Sparkify\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "946ed52f10d67986352e0bb3e06bc28e", "score": "0.76412165", "text": "def spark_session_create(aws_spark_emr):\n spark = SparkSession.builder.config(aws_spark_emr).getOrCreate()\n return spark", "title": "" }, { "docid": "79e8ad14a135c4abe072b49a6c3dcafc", "score": "0.7624253", "text": "def create_spark_session():\n\n # \"time_tracker\" - timestamp to track running time\n total_execution_start = datetime.datetime.now()\n print(\"Started create_spark_session(): \" + str(total_execution_start))\n\n # create a new spark session\n spark = SparkSession \\\n .builder \\\n .appName(\"etl pipeline for project 4 - Data Lake\") \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n\n # set debug level to ERROR\n spark.sparkContext.setLogLevel('ERROR')\n\n # \"time_tracker\"\n total_execution_end = datetime.datetime.now()\n time_tracker(\"Data_Lake/etl.py/create_spark_session()\", total_execution_start, total_execution_end)\n\n return spark", "title": "" }, { "docid": "a93e413408a37ddde796a944eb273f09", "score": "0.7622908", "text": "def _load_spark_session(cls):\n cls.spark = SparkSession.builder \\\n .appName(\"ALS Recommendation Engine\") \\\n .master(\"local\") \\\n .getOrCreate()", "title": "" }, { "docid": "22ff31210f5d3dc2f7c8a5f81b809e93", "score": "0.76163346", "text": "def getSparkSessionInstance(sparkConf):\n if (\"sparkSessionSingletonInstance\" not in globals()):\n globals()[\"sparkSessionSingletonInstance\"] = SparkSession \\\n .builder \\\n .config(conf=sparkConf) \\\n .getOrCreate()\n return globals()[\"sparkSessionSingletonInstance\"]", "title": "" }, { "docid": "029ff3470668aa031d007e07e669ad59", "score": "0.7602559", "text": "def init_session(self):\n\n\t\tconfig = pyspark.SparkConf().setAll([(\"spark.dynamicAllocation.enabled\",\"True\"),\n\t\t\t\t\t\t\t\t\t(\"spark.executor.cores\",str(self.n_spark_workers))])\n\t\tself.sc = SparkContext(conf=config)\n\t\tself.ss = SparkSession(self.sc)", "title": "" }, { "docid": "70682f8000f8971900224232653c9b61", "score": "0.75862867", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate() \n print(\"Spark session created.\")\n return spark", "title": "" }, { "docid": "b5e36d2bfa90e4620b1ed65ae75c2b1c", "score": "0.75841004", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .appName('etl') \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "c910919c5cb989d0a2961a9c464b3bc4", "score": "0.75759256", "text": "def create_spark_session(): \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "05db55521f1f443c2605a4a0a654667f", "score": "0.7575383", "text": "def spark_session():\n builder = SparkSession.builder \\\n .config('spark.sql.shuffle.partitions', 1) \\\n .config('spark.default.parallelism', 1) \\\n .config('spark.shuffle.compress', False) \\\n .config('spark.rdd.compress', False)\n\n # mleap pyspark wrappers have not been tested against scala 2.12. However,\n # they may still work as long as all jars are with the same scala version.\n classpath = os.environ['SCALA_CLASS_PATH']\n\n return builder \\\n .config('spark.driver.extraClassPath', classpath) \\\n .config('spark.executor.extraClassPath', classpath) \\\n .getOrCreate()", "title": "" }, { "docid": "a10ef18b43be61b70a08dd42b33cc551", "score": "0.7532003", "text": "def create_spark_session():\n\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "b44b65da1bf60e89b768d3582ac69594", "score": "0.7522131", "text": "def create_spark_session(): \n \n spark = SparkSession.builder.config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\").getOrCreate()\n return spark", "title": "" }, { "docid": "b44b65da1bf60e89b768d3582ac69594", "score": "0.7522131", "text": "def create_spark_session(): \n \n spark = SparkSession.builder.config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\").getOrCreate()\n return spark", "title": "" }, { "docid": "cde459fb71b82bc01d231cccacb2e414", "score": "0.75091684", "text": "def create_spark_session(mode): \n if mode == 'local':\n setMaster = \"local[2]\"\n elif mode == 'emr':\n setMaster = \"yarn\"\n\n conf = SparkConf() \\\n .setMaster(setMaster) \\\n .setAppName(\"ETL\") \\\n .set(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .set(\"spark.executor.memory\", \"5g\") \\\n .set(\"spark.driver.memory\", \"5g\") \\\n .set(\"spark.default.parallelism\", \"100\") \\\n .set(\"spark.executor.memoryOverhead\", \"5G\")\n\n spark = SparkSession \\\n .builder \\\n .config(conf=conf) \\\n .getOrCreate()\n\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "8644588cb5332cc1f4b591f578945c8f", "score": "0.74994695", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "c178d33125d023c4edf3ae8ee551416f", "score": "0.74979323", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.5\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "656c01a7857f9af6c3f286bbcfd4ca0e", "score": "0.7497679", "text": "def spark_session(app_name=None):\n if app_name is None:\n print(\"-Ophelia[INFO]: Initializing Spark Session [...]\")\n spark = SparkSession.builder.appName(\"set_app_name_next_time\").getOrCreate()\n print(\"-Ophelia[INFO]: Spark Session Initialized, Version:\", str(spark.version), \"[...]\")\n print(\"-Ophelia[WARN]: Please, Be Aware To Set App Name Next Time [...]\")\n print(\"====\"*18)\n return spark\n \n print(\"-Ophelia[INFO]: Initializing Spark Session [...]\")\n spark = SparkSession.builder.appName(app_name).getOrCreate()\n print(\"-Ophelia[INFO]: Spark Context Initialized Successfully [...]\")\n print(\"====\"*18) \n return spark", "title": "" }, { "docid": "dcf7be4ccbe56bddb01f881c30b1beaa", "score": "0.7486354", "text": "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "244666c056ef335d2e705b000958b9e4", "score": "0.7483679", "text": "def __init__(self, session: SparkSession, jsparkSession=None):\n seq_session = session._jvm.org.apache.spark.sql.SequilaSession(session._jsparkSession) # pylint: disable=W0212\n\n session._jvm.org.apache.spark.sql.SequilaSession.register(seq_session)\n session._jvm.org.biodatageeks.sequila.utils.UDFRegister.register(seq_session)\n session._jvm.SequilaSession.setDefaultSession(seq_session)\n sequila_session = SequilaSession._instantiatedSession\n\n self._sc = sequila_session._sc\n self._jsc = self._sc._jsc\n self._jvm = session._jvm\n\n if jsparkSession is None:\n if (\n self._jvm.SequilaSession.getDefaultSession().isDefined()\n and not self._jvm.SequilaSession.getDefaultSession().get().sparkContext().isStopped()\n ):\n jsparkSession = self._jvm.SequilaSession.getDefaultSession().get()\n else:\n jsparkSession = self._jvm.SequilaSession(self._jsc.sc())\n self._jsparkSession = jsparkSession\n self._jwrapped = self._jsparkSession.sqlContext()\n self._wrapped = SQLContext(self._sc, self, self._jwrapped)\n if SequilaSession._instantiatedSession is None or SequilaSession._instantiatedSession._sc._jsc is None:\n SequilaSession._instantiatedSession = self\n self._jvm.SparkSession.setDefaultSession(self._jsparkSession)", "title": "" }, { "docid": "38b8a3c5b51e3c76e30e5114a4a87e33", "score": "0.74624264", "text": "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\",\"saurfang:spark-sas7bdat:2.0.0-s_2.11\") \\\n .appName(\"i94_Immigration_Schema\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n \n return spark", "title": "" }, { "docid": "6913c5dc40f396580aa3e52d2411d7a8", "score": "0.7456576", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .appName('DataLakes') \\\n .getOrCreate()\n spark.sparkContext._jsc.hadoopConfiguration().set(\"mapreduce.fileoutputcommitter.algorithm.version\", \"2\")\n return spark", "title": "" }, { "docid": "8cd4671f1042d5224deaac7c4d9922bc", "score": "0.74500674", "text": "def with_spark_session(func, conf={}):\n reset_spark_session_conf()\n _add_job_description(conf)\n _set_all_confs(conf)\n ret = func(_spark)\n _check_for_proper_return_values(ret)\n return ret", "title": "" }, { "docid": "8ea9f194f33417611ea9885394214b5c", "score": "0.7447759", "text": "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"saurfang:spark-sas7bdat:2.0.0-s_2.11,org.apache.hadoop:hadoop-aws:2.7.3\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "86e0aa95797d600f2e49370477d491b6", "score": "0.7439922", "text": "def createconnection():\n spark = SparkSession \\\n .builder \\\n .appName(\"StreamingJob\") \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "16fff7ac53e45cd31ef5d401d5a9e36f", "score": "0.7431194", "text": "def create_spark_session():\n spark = SparkSession.builder.config(\n \"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\"\n ).getOrCreate()\n return spark", "title": "" }, { "docid": "f36a37931d32595afd6827fd48835736", "score": "0.7368222", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n spark.conf.set(\"mapreduce.fileoutputcommitter.algorithm.version\", \"2\")\n return spark", "title": "" }, { "docid": "f36a37931d32595afd6827fd48835736", "score": "0.7368222", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n spark.conf.set(\"mapreduce.fileoutputcommitter.algorithm.version\", \"2\")\n return spark", "title": "" }, { "docid": "43fe97db0f747bcc6f8a368ef30d6f61", "score": "0.7365668", "text": "def create_spark_session():\n spark = SparkSession.builder.config(\"spark.jars.packages\",\"saurfang:spark-sas7bdat:2.0.0-s_2.11,org.apache.hadoop:hadoop-aws:2.7.0\")\\\n .enableHiveSupport().getOrCreate()\n return spark", "title": "" }, { "docid": "1c4a19a4e24dec1954f0bc24e8eda9a6", "score": "0.7356501", "text": "def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n\n spark.conf.set(\"mapreduce.fileoutputcommitter.algorithm.version\", \"2\")\n\n return spark", "title": "" }, { "docid": "51ad3259fa2ee41c83027a6cc032e27e", "score": "0.7348446", "text": "def create_spark_session():\n \n print(\"Preparing Spark session for the pipeline...\")\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"saurfang:spark-sas7bdat:2.0.0-s_2.11\") \\\n .enableHiveSupport().getOrCreate()\n \n print(\"Spark session preparation DONE.\")\n\n return spark", "title": "" }, { "docid": "84c11dc95a74908628695c48a24ea077", "score": "0.7313811", "text": "def create_spark_session():\n return SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()", "title": "" }, { "docid": "dfd153cde4d459c14b113c9bd78057e5", "score": "0.72654796", "text": "def create_spark_session(appName=\"myApp\"):\n\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .appName(appName) \\\n .getOrCreate()\n return spark", "title": "" }, { "docid": "62778855ed65ed4c89f53cf714b553e4", "score": "0.72413903", "text": "def get_spark_session():\n \n from pyspark.sql import SparkSession\n return SparkSession.builder.getOrCreate()", "title": "" }, { "docid": "440b4b6d0146305f9807c8872dbcc085", "score": "0.7207663", "text": "def spark_session(request):\n\n # Get the shaded JAR for testing purposes.\n shaded_jar = os.environ['SHADED_JAR_PATH']\n\n spark = SparkSession.builder \\\n .appName('Foresight-test') \\\n .master('local[2]') \\\n .config('spark.jars', shaded_jar) \\\n .config('hive.exec.dynamic.partition.mode', 'nonstrict') \\\n .config('spark.sql.warehouse.dir', mkdtemp()) \\\n .config('javax.jdo.option.ConnectionURL',\n 'jdbc:derby:memory:metastore_db;create=true') \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n request.addfinalizer(lambda: spark.stop())\n\n return spark", "title": "" }, { "docid": "1b47b2a33feeb7274e0b8cfc0e8cfa14", "score": "0.71798325", "text": "def get_spark_session(conf: SparkConf):\n assert isinstance(conf, SparkConf)\n return SparkSession.builder.config(conf=conf).getOrCreate()", "title": "" }, { "docid": "510e33a27e69351f5d9d9116e35c90ea", "score": "0.7153494", "text": "def create_spark_session():\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.4\") \\\n .getOrCreate()\n spark._jsc.hadoopConfiguration().set(\n \"spark.sql.parquet.output.committ‌​er.class\",\n \"org.apache.spark.sql.parquet.DirectParquetOutputCommitter\"\n )\n\n return spark", "title": "" }, { "docid": "edf9bc4f3c96cf7e593cddf64b1cc228", "score": "0.7109194", "text": "def bootstrap_test_spark_session(conf=None):\n conf = conf or _default_spark_configuration()\n spark = pyspark.sql.SparkSession.builder.config(conf=conf).getOrCreate()\n return spark", "title": "" }, { "docid": "446e73d1f759df53654d43b362bf6b9b", "score": "0.7098447", "text": "def spark_session(request):\n request.addfinalizer(lambda: spark.stop())\n\n return spark", "title": "" }, { "docid": "33633832d5bc8024789d37490a85af36", "score": "0.70557755", "text": "def spark():\n return SparkSession.builder \\\n .appName(\"data\") \\\n .master(\"spark://0.0.0.0:7077\") \\\n .getOrCreate()", "title": "" }, { "docid": "3ce0ebd909af21140b1a4edefb133078", "score": "0.70376873", "text": "def get_spark_session(yarn=True, verbose=False):\n sc = SparkContext(appName=\"cms-cpu-efficiency\")\n return SparkSession.builder.config(conf=sc._conf).getOrCreate()", "title": "" }, { "docid": "e49dd1b817de53275b4a3a7d501e676e", "score": "0.7027096", "text": "def get_spark():\n return SparkSession.builder \\\n .master(\"local\") \\\n .appName(\"cleanflow\") \\\n .config(\"spark.some.config.option\", \"config-value\") \\\n .getOrCreate()", "title": "" }, { "docid": "61b08ac16666b320abd544a9bc9e25a6", "score": "0.7008452", "text": "def create_spark_session(config_properties):\n sc_conf = SparkConf()\n for sc_conf_property in config_properties:\n sc_conf.set(sc_conf_property[0], sc_conf_property[1])\n sc = SparkContext(conf=sc_conf)\n spark = SparkSession(sc)\n\n return spark", "title": "" }, { "docid": "cfda8b86cabaf38b03fe2d7ec483da31", "score": "0.699072", "text": "def create_spark():\n conf = SparkConf()\\\n .setAppName(APP_NAME)\\\n .setMaster(\"local[4]\")\\\n .set(\"spark.executor.memory\", \"4g\")\\\n .set(\"spark.executor.cores\", \"4\")\\\n .set(\"spark.driver.cores\", \"2\")\\\n .set(\"spark.driver.memory\", \"2g\")\n sc = SparkContext(conf=conf)\n return sc", "title": "" }, { "docid": "d498a16c7dbf738dc8dc18c843a81c23", "score": "0.698328", "text": "def spark_session():\n spark = (\n SparkSession.builder.master('local[2]')\n .appName('SparkSession Transformations Tests')\n .getOrCreate()\n )\n spark.conf.set(\"spark.sql.execution.arrow.enabled\", True)\n spark.conf.set('spark.sql.session.timeZone', 'UTC')\n yield spark\n spark.stop()", "title": "" }, { "docid": "11911746b4803870a1d4e7470e3b8625", "score": "0.6969778", "text": "def create_spark_session(write_to_s3, config):\n \n if write_to_s3 is False:\n spark = SparkSession.builder \\\n .config(\"spark.jars.packages\",\"saurfang:spark-sas7bdat:2.1.0-s_2.11\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n print(datetime.now(), ' STEP 1 - CREATE SPARK SESSION - DONE')\n else:\n aws_key = config.get('AWS', 'AWS_ACCESS_KEY_ID')\n aws_secret = config.get('AWS', 'AWS_SECRET_ACCESS_KEY')\n print('Reading credentials : {} {}'.format(aws_key, aws_secret))\n spark = SparkSession \\\n .builder \\\n .config('spark.jars.packages', 'saurfang:spark-sas7bdat:2.0.0-s_2.11,org.apache.hadoop:hadoop-aws:2.7.2') \\\n .enableHiveSupport() \\\n .getOrCreate()\n sc = spark.sparkContext\n sc._jsc.hadoopConfiguration().set(\"fs.s3a.access.key\", aws_key)\n sc._jsc.hadoopConfiguration().set(\"fs.s3a.secret.key\", aws_secret)\n sc._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n sc._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3a.enableV4\", \"true\")\n sc._jsc.hadoopConfiguration().set(\"fs.s3a.impl\", \"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n print(datetime.now(), ' STEP 1 - CREATE SPARK SESSION WITH S3 CONTEXT - DONE')\n return spark", "title": "" }, { "docid": "11e43af88be65cd35e28d5647d1caa3d", "score": "0.6955285", "text": "def init_sparksession(name: str=\"my-streaming-app\", shuffle_partitions: int=2,\n log_level: str=\"ERROR\"):\n # Grab the running Spark Session,\n # otherwise create it.\n spark = SparkSession \\\n .builder \\\n .appName(name) \\\n .getOrCreate()\n\n # Set logs to be quieter\n # Put WARN or INFO for debugging, but you will have to dive into\n # a sea of millions irrelevant messages for what you typically need...\n quiet_logs(spark.sparkContext, log_level=log_level)\n\n # keep the size of shuffles small\n spark.conf.set(\"spark.sql.shuffle.partitions\", shuffle_partitions)\n\n return spark", "title": "" }, { "docid": "07354d8956e9aa6458d64c8b4abc817d", "score": "0.69051903", "text": "def get(cls, app_name, params_func=None):\n\n if app_name not in cls.SESSIONS:\n session = ps.SparkSession.builder \\\n .appName(app_name)\\\n #.config(\"spark.ui.port\", \"6667\")\n if params_func:\n params_func(session)\n\n session = session.getOrCreate()\n context = session.sparkContext\n context.setLogLevel(\"ERROR\")\n\n cls.SESSIONS[app_name] = (session, context)\n return cls.SESSIONS[app_name]", "title": "" }, { "docid": "72de37569e149cdab77688cf3e3af331", "score": "0.68999594", "text": "def create_session(self):\n self.check_alive_sessions()\n if(self.max_sess_num < self.alive_sess_cnt):\n print(\"exceed max session number\")\n return False\n\n data = {'kind': 'pyspark',\n \"name\": \"tensormsa\",\n \"executorCores\": 1,\n \"executorMemory\": \"512m\",\n \"driverCores\": 1,\n \"driverMemory\": \"512m\"}\n r = requests.post(self.host + \"/sessions\", data=json.dumps(data), headers=self.headers)\n print(r.json())\n print(r.json()['id'])\n\n return r.json()['id']", "title": "" }, { "docid": "09be1bd737c5b579f4db9f290c1f8768", "score": "0.6694214", "text": "def start_spark_context(spark_session=None):\n print(\"-Ophelia[INFO]: Initializing Spark Context [...]\")\n if spark_session is None:\n print(\"-Ophelia[FAIL]: Please, set spark session argument [...]\")\n return None\n \n print(\"-Ophelia[INFO]: Spark Context Initialized Successfully [...]\")\n print(\"====\"*18)\n return spark_session.sparkContext", "title": "" }, { "docid": "fd1c4815fcc4b2a4389ddd9ffd9dabea", "score": "0.66651595", "text": "def spark_context(self):\n if not self._spark_context and 'sc' not in locals():\n findspark.init()\n conf = SparkConf()\n conf.setMaster(self._spark_master)\n conf.setAppName(self._spark_app_name)\n\n for name, value in self._spark_conf.items():\n conf.set(name, value)\n\n self._spark_context = SparkContext.getOrCreate(conf=conf)\n elif 'sc' in locals():\n self._spark_context = sc\n return self._spark_context", "title": "" }, { "docid": "968d2e6486fdcae5949375e4364a958a", "score": "0.6664732", "text": "def context(self):\n return SparkContext(conf=self.conf)", "title": "" }, { "docid": "4bfda33f6de09172349955530d62bd31", "score": "0.6660643", "text": "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on master/local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf) \n return sc", "title": "" }, { "docid": "9f56652ebec4b6a55fca6dadf433dd0a", "score": "0.6638337", "text": "def get_spark_session(name: str, additional_conf: dict, get_optimal=False) -> SparkSession:\n\n environ = additional_conf.pop('environ')\n os.environ['SPARK_HOME'] = environ['spark_home']\n os.environ['PYSPARK_PYTHON'] = environ['pyspark_python']\n os.environ['PATH'] = f\"/bin:{os.environ['PATH']}\"\n\n if get_optimal:\n num_nodes = check_output(\"yarn node -list | grep 'Total Nodes:'\",\n shell=True) \\\n .decode('utf-8') \\\n .replace('\\n', '') \\\n .replace('Total Nodes:', '')\n\n additional_conf[\"spark.dynamicAllocation.maxExecutors\"] = str(int(num_nodes) * 2)\n additional_conf[\"spark.default.parallelism\"] = str(int(num_nodes) * 4)\n additional_conf[\"spark.sql.shuffle.partitions\"] = str(int(num_nodes) * 8)\n\n spark_session = SparkSession \\\n .builder \\\n .appName(name) \\\n .master('yarn')\n\n for key, value in additional_conf.items():\n spark_session.config(key=key, value=value)\n\n spark_session = spark_session \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n return spark_session", "title": "" }, { "docid": "8a952700575b3b87d4d656a9c09b61c0", "score": "0.6618003", "text": "def create_session() -> _Session:\n return _Session()", "title": "" }, { "docid": "3461d62727d3e266320267822465288a", "score": "0.65996355", "text": "def local_spark(app_name='data', num_cpus='*') -> SparkSession:\n return SparkSession.builder \\\n .appName(app_name) \\\n .master(\"local[{}]\".format(num_cpus)) \\\n .getOrCreate()", "title": "" }, { "docid": "f850d897a4e0b1b62946624523164e92", "score": "0.6593611", "text": "def __init__(self, sparkContext):\n self.sc = sparkContext", "title": "" }, { "docid": "beb83dadba665f0c42ca6fcda28d2206", "score": "0.65833473", "text": "def get_session():\n\n return Session()", "title": "" }, { "docid": "05376cbca24919ce2960574dc2ea5f38", "score": "0.65683514", "text": "def getSparkContext():\n conf = (SparkConf()\n .setMaster(\"local\") # run on local\n .setAppName(\"Logistic Regression\") # Name of App\n .set(\"spark.executor.memory\", \"1g\")) # Set 1 gig of memory\n sc = SparkContext(conf = conf)\n return sc", "title": "" }, { "docid": "caf5b705d24c8caa13461a71328310b1", "score": "0.6560145", "text": "def _get_exist_spark_context(self, jconf):\n return self._jvm.JavaSparkContext(\n self._jvm.org.apache.spark.SparkContext.getOrCreate(jconf)\n )", "title": "" }, { "docid": "a194039d117b91be777e996e71ce9a7c", "score": "0.6526357", "text": "def build_spark() -> ps.sql.SparkSession:\n spark = (ps.sql.SparkSession.builder\n .master('local[8]')\n .appName('sparkSQL')\n .getOrCreate())\n spark.conf.set(\"spark.debug.maxToStringFields\", \"9999\")\n spark.conf.set(\"spark.debug.maxPlanStringLength\", \"9999\")\n return spark", "title": "" }, { "docid": "952b2e85c3836d5733319da39bfd2637", "score": "0.64703304", "text": "def make_session(self):\n return self.sessionmaker()", "title": "" }, { "docid": "7c857464fca7c258aea134f85318efe6", "score": "0.64682335", "text": "def with_cpu_session(func, conf={}):\n copy = dict(conf)\n copy['spark.rapids.sql.enabled'] = 'false'\n return with_spark_session(func, conf=copy)", "title": "" }, { "docid": "a67a0088e545e7e1578c9ab17bb6b2d0", "score": "0.6333464", "text": "def new_session(self) -> Session:\n return self.__session_maker()", "title": "" }, { "docid": "72d8ab79753f6c60c57b78eefde0c87b", "score": "0.62752503", "text": "def init_session():\n credentials = get_config().credentials\n if credentials:\n return boto3.Session(**credentials)\n else:\n profile = get_config().profile\n if not profile:\n raise TaggercoreConfigError(\n \"No profile and no credentials found. Please set the configuration before using tagging classes\"\n )\n return boto3.Session(profile_name=profile)", "title": "" }, { "docid": "d6e4671921a88c2ac0629b7a6b2ac734", "score": "0.6263158", "text": "def _session() -> object:\n _session = Config(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n return _session.create_client()", "title": "" }, { "docid": "50a6e170be3d85e273ea1292ad68b44a", "score": "0.6259879", "text": "def spark_context(request):\n conf = (SparkConf().setMaster(\"local[2]\").setAppName(\"SparkTest\"))\n sc = SparkContext(conf=conf)\n sc.setCheckpointDir('checkpoint') # Stackoverflow error\n request.addfinalizer(lambda: sc.stop())\n\n quiet_py4j()\n return sc", "title": "" }, { "docid": "1b3d265a04ce7070862127873f10d6ed", "score": "0.6257857", "text": "def initialize_spark():\n conf = (SparkConf().setAppName('FeatureConstruction').\n # Allow spark to overwrite output files\n set(\"spark.hadoop.validateOutputSpecs\", \"false\"))\n return SparkContext(conf=conf)", "title": "" }, { "docid": "3416e90b433ce8a61582eae2147d2d15", "score": "0.6253373", "text": "def single_threaded_session(self):\n return self.init_session(num_cpu=1)", "title": "" }, { "docid": "213a85568329054c9bef8c564d815068", "score": "0.6251322", "text": "def single_threaded_session():\n return make_session(1)", "title": "" }, { "docid": "6ad5759b0e0dd4b571a874b5e4efe740", "score": "0.62435234", "text": "def create_session(self): # pylint: disable=E0202\n return tf.Session()", "title": "" }, { "docid": "ab44344015412b4e1ee82ca74bd7cc28", "score": "0.6238381", "text": "def init(self):\n super(SparkWrapper, self).init('spark')", "title": "" }, { "docid": "4e896552a2f85ed3d86318227b2ff9eb", "score": "0.6235708", "text": "def get_session() -> Session:\n return session_cls()", "title": "" }, { "docid": "025a8c0fd5674075ac3894827d39debb", "score": "0.6232708", "text": "def __init__(self):\n self._sc = SparkContext.getOrCreate()\n\n self._logger = util.get_logger(\n self._sc, self.__class__.__name__)", "title": "" }, { "docid": "a832f25323278a1179901e57debd93ea", "score": "0.6231593", "text": "def __call__(self, store):\n data_set = store.model_class.resolve()\n\n # support context access\n try:\n session = data_set.session\n except AttributeError:\n session = None\n\n if session is not None:\n return session\n\n # support thread local access\n try:\n thread_local = data_set.local\n except AttributeError:\n thread_local = data_set.local = local()\n\n try:\n session = thread_local.session\n except AttributeError:\n pass\n\n if session is None:\n session = data_set.new_session(\n graph=self.graph,\n expire_on_commit=self.expire_on_commit,\n )\n thread_local.session = session\n\n return session", "title": "" }, { "docid": "80127b0f8f4f64949b534dd3675a76e4", "score": "0.623058", "text": "def define_session():\n global STS\n global PROFILE\n\n session = boto3.Session(profile_name=PROFILE)\n STS = session.client('sts')", "title": "" }, { "docid": "9ad10d2c98aea0e2a92dad365db2de47", "score": "0.61911964", "text": "def get_session(self):\n\n # create a session\n create_session = sessionmaker(class_=ShardedSession)\n\n # configure the session with the shard lookup\n create_session.configure(shards={\n 0:self.dbs[0],\n 1:self.dbs[1],\n 2:self.dbs[2]\n })\n\n create_session.configure(\n shard_chooser=self.shard_chooser,\n id_chooser=self.id_chooser,\n query_chooser=self.query_chooser\n )\n return create_session()", "title": "" }, { "docid": "f33736416f1eb223206afa1abdd24209", "score": "0.6183467", "text": "def new_session(self):\n if not self.session:\n Session = sessionmaker(bind=self.engine)\n self.session = Session()", "title": "" } ]
dbe20b962bcf7dac313794a3b2d4e57f
A utility function that tests whether the value of the next token label equals a given token label. This method consumes a token from the lexer if and only if there is a match. Either way, a boolean is returned indicating the match status. If `consume` is false then no tokens will ever be consumed. Otherwise, and by default, a token will be consumed if and only if it matches. The parameter `peeklevel` is passed to the peek function for how far ahead to look; the default is one. If `raise_on_fail` set true then a `LexerException` will be raised by default if the match fails. The default can be changed by setting the lexer instance attribute `default_helper_exception`. Similarly, `raise_on_success` raises an exception when a match is found. Either one can be set to a subclass of `Exception` instead of a boolean, and then that exception will be called. The parameter `err_msg_tokens` can be set to change how many tokens worth of text back the error messages report (as debugging information) when an exception is raised. (The count does not include whitespace, but it is printed, too.)
[ { "docid": "51d2ad81eb6f6f9334d17ef83afa5c9e", "score": "0.75924224", "text": "def match_next(self, token_label_to_match, peeklevel=1, consume=True,\n raise_on_fail=False, raise_on_success=False,\n err_msg_tokens=3):\n # TODO: Consider a way for users to define custom error strings for\n # better error-reporting.\n retval = False\n if token_label_to_match == self.peek(peeklevel).token_label:\n retval = True\n if consume and retval:\n self.next() # Eat the token that was matched.\n\n if retval and raise_on_success:\n exception = return_first_exception(raise_on_success,\n self.default_helper_exception)\n raise exception(\n \"Function match_next (with peeklevel={0}) found unexpected \"\n \"token {1}. The text of the {2} tokens up to \"\n \"the error is: {3}\" # TODO fix below, fails with parser\n .format(peeklevel, str(self.peek(peeklevel)), err_msg_tokens,\n self.last_n_tokens_original_text(err_msg_tokens)))\n if not retval and raise_on_fail:\n exception = return_first_exception(raise_on_fail,\n self.default_helper_exception)\n raise exception(\n \"Function match_next (with peeklevel={0}) expected token \"\n \"with label '{1}' but found token {2}. The text parsed \"\n \"from the tokens up to the error is: {3}\" # TODO fix below, fails\n .format(peeklevel, token_label_to_match,\n str(self.peek(peeklevel)),\n self.last_n_tokens_original_text(err_msg_tokens)))\n return retval", "title": "" } ]
[ { "docid": "9605df346bee22f91cce408c8e5a75e9", "score": "0.501809", "text": "def __consume(self, expected: TokenType, err_msg: str):\n if self.__match(expected):\n token = self.__peek()\n self.__advance()\n return token\n else:\n error_handler.parse_error(self.__peek(), err_msg)\n raise ParseError(err_msg)", "title": "" }, { "docid": "b10bb254d3c9ca7cc8010794c201ea95", "score": "0.49298465", "text": "def _scanNextToken( self ):\n assert isinstance( self._lexList, list )\n assert isinstance( self._lexDict, dict )\n assert isinstance( self._tok, int ) or ( self._tok is None )\n assert isinstance( self._buffer, ScannerBuffer )\n assert isinstance( self._tokPos, int )\n assert isinstance( self._tokLineNum, int )\n assert isinstance( self._tokColNum, int )\n assert isinstance( self._tokLineText, str )\n\n self._buffer.consumePast( self.WHITE_SPACE ) # skip whitespace\n theNextChar = self._buffer.peek()\n if theNextChar == None:\n self._buffer.setMark( )\n return Token.EOF\n elif theNextChar in ',': # comma\n self._buffer.setMark( )\n self._buffer.consume( )\n return Token.COMMA\n elif theNextChar in self.PUNCT: # punctuation\n self._buffer.setMark( )\n self._buffer.consume( )\n return Token.PUNCT\n elif theNextChar in '([{': # open\n self._buffer.setMark( )\n self._buffer.consume( )\n return Token.OPEN\n elif theNextChar in ')]}': # close\n self._buffer.setMark( )\n self._buffer.consume( )\n return self._lookupSymbolLex( Token.CLOSE, self.peekLex() )\n elif theNextChar in (self.ALPHA_CAP + self.DIGIT): # Name Symbol\n self._buffer.setMark( )\n self._buffer.consumePast(self.ALPHA_CAP + self.DIGIT)\n self._buffer.consumeIf( ':' )\n return self._lookupSymbolLex( Token.SYMBOL, self.peekLex() )\n elif theNextChar in '-~': # Negation or Op Symbol\n self._buffer.setMark( )\n self._buffer.consume( )\n if self._buffer.peek( ) in '-~':\n return self._lookupSymbolLex( Token.SYMBOL, self.peekLex() )\n else:\n self._buffer.consumePast( self.SYMBOL + self.SIGN )\n return self._lookupSymbolLex( Token.SYMBOL, self.peekLex() )\n elif theNextChar in self.SYMBOL: # Op Symbol\n self._buffer.setMark( )\n self._buffer.consumePast( self.SYMBOL + self.SIGN )\n return self._lookupSymbolLex( Token.SYMBOL, self.peekLex() )\n elif theNextChar in self.OBJECT: # Object Symbol\n self._buffer.setMark( )\n self._buffer.consume( )\n return self._lookupSymbolLex( Token.OBJECT, self.peekLex() )\n elif theNextChar in self.VARIABLE: # Variable Symbol\n self._buffer.setMark( )\n self._buffer.consume( )\n return self._lookupSymbolLex( Token.VARIABLE, self.peekLex() )\n else: # Unknown\n self._buffer.consume( )\n return Token.UNKNOWN", "title": "" }, { "docid": "d3e2b306666363ac26aeeae17a89769d", "score": "0.46770447", "text": "def has_next(peekable: more_itertools.peekable) -> bool:\n try:\n peekable.peek()\n return True\n except StopIteration:\n return False", "title": "" }, { "docid": "91626340a1c41e88a172c412a5e751aa", "score": "0.45198596", "text": "def _peek(self, tokens):\n token = self._tokenizer.token()\n if type(tokens) is list:\n # Expecting one of a number of tokens in a list.\n if token in tokens:\n return token\n else:\n return None\n else:\n # Expecting a specific token.\n if token == tokens:\n return token\n else:\n return None", "title": "" }, { "docid": "ebe7d6a6b5c5a1ba9f29866a26c75dfe", "score": "0.45112723", "text": "def in_ignored_tokens(self, token_label_to_match,\n raise_on_fail=False, raise_on_success=False):\n retval = False\n ignored_token_labels = [t.token_label for t in self.peek().ignored_before]\n if token_label_to_match in ignored_token_labels:\n retval = True\n\n if retval and raise_on_success:\n exception = return_first_exception(raise_on_success,\n self.default_helper_exception)\n raise exception(\n \"Function in_ignored_tokens found unexpected token with \"\n \"label '{0}' before the current token {1}.\"\n .format(token_label_to_match, str(self.token)))\n if not retval and raise_on_fail:\n exception = return_first_exception(raise_on_fail,\n self.default_helper_exception)\n raise exception(\n \"Function in_ignored_tokens expected token with label \"\n \"'{0}' before the current token {1}, but it was not found.\"\n .format(token_label_to_match, str(self.token)))\n return retval", "title": "" }, { "docid": "b7357fedf7a5b6abf26c5eb0b3878fbd", "score": "0.44945878", "text": "def pos_ok(token_pos, tokid, pos_limit):\n if pos_limit:\n return token_pos[tokid] in pos_limit.split()\n else:\n return True", "title": "" }, { "docid": "9d0ff928b6ff633100973d3c2b30fb05", "score": "0.44715393", "text": "def eat(self,tk):\n if tk == self.next():\n return self.advance()\n else:\n where = self.report()\n err1 = \"Unexpected token at \"+where+\". \"\n err2 = \"Saw: '\"+self.next()+\"'. \"\n err3 = \"Expected: '\"+tk+\"'. \"\n raise SyntaxError(err1 + err2 + err3)", "title": "" }, { "docid": "0d9f1a6d372e68b585ff05110bf265b0", "score": "0.44133952", "text": "def _consume(self, rules, subject):\n if not rules:\n # An empty ruleset matches everything.\n return True, rules, subject\n\n if not subject and rules != [anything]:\n # Apart from [anything], no non-empty ruleset matches an\n # empty subject.\n return False, rules, subject\n\n # Figure out which rule we'll be applying. We won't need it\n # again, so we can remove it from the ruleset.\n rule_token = rules.pop(0)\n if rule_token == anything:\n # This is the complicated one.\n\n if not rules:\n # If the final rule is 'anything', then that's redundant,\n # but we can declare success and stop.\n return True, rules, subject\n\n # At this point we know that 'anything' is followed by some\n # other rule token.\n next_rule = rules.pop(0)\n\n # We can consume as many subject tokens as necessary, but\n # eventually a subject token must match this subsequent\n # rule token.\n while subject:\n subject_token = subject.pop(0)\n submatch, ignore1, ignore2 = self._consume(\n [next_rule], [subject_token]\n )\n if submatch:\n # We had to remove some number of subject tokens,\n # but we found one that matches the next rule.\n return True, rules, subject\n else:\n # That token didn't match, but maybe the next one will.\n pass\n\n # We went through the entire remaining subject and didn't\n # find a match for the rule token that follows 'anything'.\n return False, rules, subject\n\n # We're comparing two individual tokens.\n subject_token = subject.pop(0)\n if isinstance(rule_token, CustomMatchToken):\n match = rule_token.matches(subject_token)\n elif rule_token == nonfiction:\n # This is too complex to be a CustomMatchToken because\n # we may be modifying the subject token list.\n match = subject_token not in (\n 'juvenile fiction', 'young adult fiction', 'fiction'\n )\n if match and subject_token not in (\n 'juvenile nonfiction', 'young adult nonfiction'\n ):\n # The implicit top-level lane is 'nonfiction',\n # which means we popped a token like 'History' that\n # needs to go back on the stack.\n subject.insert(0, subject_token)\n else:\n # The strings must match exactly.\n match = rule_token == subject_token\n return match, rules, subject", "title": "" }, { "docid": "ef2b2727b3f30b218d0f3afa05c6c8cd", "score": "0.4336766", "text": "def match(self, x):\n if self.LA(1) == x:\n self.consume()\n else:\n raise Exception(\"exception %s; %s\" % (self.input.get_token_name(x), self.lookahead))", "title": "" }, { "docid": "94fc6b366b2160f52f81191f8a985862", "score": "0.43331122", "text": "def check_token(cls, expected, actual):\n if expected & actual == 0:\n raise ParseException('Unexpected Token at position %d' % cls.tokens.get_cursor_position())", "title": "" }, { "docid": "06125b565ed77cedf01840fca8497971", "score": "0.43279356", "text": "def nextToken(self):\n if self._token == 'eof':\n return False\n while not self._tokens:\n if not self._lines:\n self._token = 'eof'\n return False\n self._line += 1\n self._tokens, rest = self._scanner.scan(self._lines.pop(0))\n if rest:\n self._reporter.error(self._line, \"invalid token encountered at \" + rest.strip())\n self._token = self._tokens.pop(0)\n return True", "title": "" }, { "docid": "108d30672df01132c2ff87269c2e7f52", "score": "0.4269153", "text": "def consume(self, token_type: TokenType, error_message: str):\n if self.check(token_type):\n return self.advance()\n\n raise self.error(self.peek(), error_message)", "title": "" }, { "docid": "edf5637616410703b4e021e3ae0090e3", "score": "0.4267682", "text": "def single_label_accuracies(gold, silver,\n test_tokens, known_tokens,\n print_scores=True):\n\n kno_corr, unk_corr = 0.0, 0.0\n nb_kno, nb_unk = 0.0, 0.0\n\n for gold_pred, silver_pred, tok in zip(gold, silver, test_tokens):\n\n if tok in known_tokens:\n nb_kno += 1\n if gold_pred == silver_pred:\n kno_corr += 1\n else:\n nb_unk += 1\n if gold_pred == silver_pred:\n unk_corr += 1\n\n all_acc = (kno_corr + unk_corr) / (nb_kno + nb_unk)\n kno_acc = kno_corr / nb_kno\n\n # account for situation with no unknowns:\n unk_acc = 0.0\n if nb_unk > 0:\n unk_acc = unk_corr / nb_unk\n\n if print_scores:\n print('+\\tall acc:', all_acc)\n print('+\\tkno acc:', kno_acc)\n print('+\\tunk acc:', unk_acc)\n\n return all_acc, kno_acc, unk_acc", "title": "" }, { "docid": "6521d81bcea3c2aaec039852bf6b6451", "score": "0.42458734", "text": "def consume(self, expected_type):\n curr_token = self.current_token()\n if curr_token.type == expected_type:\n self.advance()\n return curr_token\n else:\n expected_token = Token.tok_name(expected_type)\n self.raise_parser_error(\n 'Token mismatch at function consume. '\n 'Expected type \"%s\" but got token \"%s\"\\n\\n'\n 'Tokens: %s\\n' % (expected_token, curr_token, self._tokens)\n )", "title": "" }, { "docid": "48dfccd084d8a29f4c30e6ce4d540dd0", "score": "0.4240582", "text": "def has_consumed(self, typ):\n if self.peek == typ:\n self.consume()\n return True\n return False", "title": "" }, { "docid": "9e31ddc2bd0f5d52c528268da9adad03", "score": "0.42324087", "text": "def recall(gold, parse, ignore_labels=True):\n\n parsebrackets = list_brackets(parse)\n goldbrackets = list_brackets(gold)\n\n parsebrackets_u = list_brackets(parse, ignore_labels=True)\n goldbrackets_u = list_brackets(gold, ignore_labels=True)\n\n if ignore_labels:\n candidate = parsebrackets_u\n gold = goldbrackets_u\n else:\n candidate = parsebrackets\n gold = goldbrackets\n\n total = len(gold)\n successes = 0\n for bracket in gold:\n if bracket in candidate:\n successes += 1\n return float(successes) / float(total)", "title": "" }, { "docid": "4df140930e44f1c88f0b139a2a2e7e4d", "score": "0.4224258", "text": "def consume(self, token):\n if self.inspect(token):\n self.terms.pop()\n return True\n return False", "title": "" }, { "docid": "a8dd24640de4b57597891a015f8c0b80", "score": "0.41969737", "text": "def _good_token(self, token, offset, start=None):\r\n if start is None:\r\n start = self.offset\r\n try:\r\n comment_index = self.source.rindex('#', start, offset)\r\n except ValueError:\r\n return True\r\n try:\r\n new_line_index = self.source.rindex('\\n', start, offset)\r\n except ValueError:\r\n return False\r\n return comment_index < new_line_index", "title": "" }, { "docid": "a8dd24640de4b57597891a015f8c0b80", "score": "0.41969737", "text": "def _good_token(self, token, offset, start=None):\r\n if start is None:\r\n start = self.offset\r\n try:\r\n comment_index = self.source.rindex('#', start, offset)\r\n except ValueError:\r\n return True\r\n try:\r\n new_line_index = self.source.rindex('\\n', start, offset)\r\n except ValueError:\r\n return False\r\n return comment_index < new_line_index", "title": "" }, { "docid": "edc8a7ff1c95fcd9dc07782bc1224c0d", "score": "0.41943836", "text": "def eat(self, match):\n ch = self.peek()\n ok = match(ch) if callable(match) else match == ch\n if ok:\n self.pos += 1\n\n return ok", "title": "" }, { "docid": "7640370f9adb1c4d34fc43944493d337", "score": "0.41594383", "text": "def multilabel_accuracies(gold, silver,\n test_tokens, known_tokens,\n print_scores=True):\n kno_corr, unk_corr = 0.0, 0.0\n nb_kno, nb_unk = 0.0, 0.0\n\n for gold_pred, silver_pred, tok in zip(gold, silver, test_tokens):\n gold_pred = set(gold_pred.split('|'))\n silver_pred = set(silver_pred.split('|'))\n if tok in known_tokens:\n nb_kno += 1\n if gold_pred == silver_pred:\n kno_corr += 1\n else:\n nb_unk += 1\n if gold_pred == silver_pred:\n unk_corr += 1\n\n all_acc = (kno_corr + unk_corr) / (nb_kno + nb_unk)\n kno_acc = kno_corr / nb_kno\n\n # account for situation with no unknowns:\n unk_acc = 0.0\n if nb_unk > 0:\n unk_acc = unk_corr / nb_unk\n\n if print_scores:\n print('+\\tall acc:', all_acc)\n print('+\\tkno acc:', kno_acc)\n print('+\\tunk acc:', unk_acc)\n\n return all_acc, kno_acc, unk_acc", "title": "" }, { "docid": "613c94257e8387ebfed359e888d7e037", "score": "0.41590932", "text": "def check(self, token_type: TokenType) -> bool:\n if self.isAtEnd():\n return False # Todo : Is this nessary?\n\n return self.peek().type == token_type", "title": "" }, { "docid": "b5d54febed0a38960ce8eeeba1efd4f1", "score": "0.4155827", "text": "def _parse(self, input, stack, state):\n read_next = True\n count = 0\n while state != self._halting_state:\n if read_next:\n try:\n lookahead = input.next()\n except StopIteration:\n return (False,count,state,None)\n read_next = False\n token = lookahead[0]\n\n if (state,token) in self._shift:\n stack.append((state,lookahead))\n state = self._shift[(state,token)]\n read_next = True\n count += 1\n elif (state,token) in self._reduce:\n X,n = self._reduce[(state,token)]\n if n > 0:\n state = stack[-n][0]\n tree = (X,) + tuple(s[1] for s in stack[-n:])\n del stack[-n:]\n else:\n tree = (X,)\n stack.append((state,tree))\n state = self._goto[(state,X)]\n else:\n return (False,count,state,lookahead)\n return (True,count,state,None)", "title": "" }, { "docid": "0a85ca6f62571e01ddaaa5b27afe2f02", "score": "0.4141006", "text": "def consume(self, typ=None):\n if typ is None:\n typ = self.peek\n\n expected_types = typ if isinstance(typ, (list, tuple, set)) else [typ]\n\n if self.peek in expected_types:\n return self.next_token()\n else:\n expected = make_comma_or(expected_types)\n self.error('Expected {0}, got \"{1}\"'.format(expected, self.peek))", "title": "" }, { "docid": "e6b70222d5f2da28fd99a3fe2885d2e4", "score": "0.41010812", "text": "def peekNextToken(self) -> Optional[Token]:\n if self.isNotFinished():\n return self.__tokens[self.__nextIx]\n return None", "title": "" }, { "docid": "996550b7d0ac5357f72115b8cbd4c3f4", "score": "0.40990472", "text": "def go_back(self, num_toks=1, num_is_raw=False):\n\n if not self.text_is_set:\n raise LexerException(\n \"Attempt to call lexer's go_back method when no text is set.\")\n if self.token_generator_state == GenTokenState.uninitialized:\n raise LexerException(\"The token generator has not been initialized \"\n \"or has reached `StopIteration` by reading past the end-token.\")\n\n token_buffer = self.token_buffer # Shorter alias.\n\n # For negative values just pop the required number off the end of token_buffer.\n if num_toks < 0:\n peekahead_num = abs(num_toks)\n print(\"the peekahead num is:\", peekahead_num)\n self._pop_tokens(token_buffer.num_tokens_after_current() - peekahead_num)\n print(\"returning token with value:\", self.token.value)\n return self.token\n\n # We will re-scan at least one token, so reset `already_returned_end_token`.\n self.already_returned_end_token = False\n\n num_buffered_after_current = token_buffer.num_tokens_after_current()\n num_to_pop = num_toks + num_buffered_after_current + 1 # new curr is rescanned\n\n if num_is_raw:\n # Works with lex.all_token_count in production_rules, but why +2?\n # Setting max_peek_tokens doesn't affect it. Clean up code.\n num_to_pop = num_toks + 2 # The added number doesn't matter except when it does...\n\n popped_to_begin_token, current_token_is_first = self._pop_tokens(num_to_pop)\n\n # Re-scan to get the new current token.\n if not popped_to_begin_token:\n self.next()\n\n # Reset some state variables.\n if popped_to_begin_token:\n self.peek().is_first = True\n self._returned_first_token = False\n self._curr_token_is_first = False\n elif current_token_is_first:\n self.token.is_first = True\n self._returned_first_token = True\n self._curr_token_is_first = True\n self.already_returned_end_token = self.token.is_end_token()\n\n return self.token", "title": "" }, { "docid": "30df7f1a6f3c0b3bf43d8af26fe5c27a", "score": "0.40903148", "text": "def peek_next(self):\n\n if len(self.tokens) > 1:\n return self.tokens[1][2]\n else:\n return \"\"", "title": "" }, { "docid": "6c1276403a23c8cb045ae9dd25a9cb72", "score": "0.40850008", "text": "def inspect_token(self):\n if self.has_more_tokens():\n self.cur_tkn = self.cur_cmd[0]\n else:\n self.cur_tkn = (ERROR, 0)\n return self.cur_tkn", "title": "" }, { "docid": "6c73c90d735f8303386de34e3f8c8f75", "score": "0.40618825", "text": "def peek(self, num_toks=1):\n if not self.text_is_set:\n raise LexerException(\n \"Attempt to call lexer's peek method when no text is set.\")\n try:\n retval = self.token_buffer[num_toks]\n except IndexError: # Shouldn't happen.\n raise BufferIndexError\n return retval", "title": "" }, { "docid": "a3cf864f1e221b3747d48776c882da6a", "score": "0.40504596", "text": "def _match(token):\n # type: (Token) -> List[Union[Callable, BaseGrammar]]\n tt_lookup = {\n TokenType.RETURNS: [\n ReturnsGrammar,\n long_description_parse,\n ],\n TokenType.ARGUMENTS: [\n ArgumentsGrammar,\n long_description_parse,\n ],\n TokenType.YIELDS: [\n YieldsGrammar,\n long_description_parse,\n ],\n TokenType.RAISES: [\n RaisesGrammar,\n long_description_parse,\n ],\n TokenType.WARNS: [\n WarnsGrammar,\n long_description_parse,\n ],\n TokenType.RECEIVES: [\n ReceivesGrammar,\n long_description_parse,\n ],\n TokenType.OTHER: [\n OtherArgumentsGrammar,\n long_description_parse,\n ],\n # Discard these two sections -- there's nothing\n # to check against the function description.\n TokenType.SEE: [\n long_description_parse,\n ],\n TokenType.NOTES: [\n long_description_parse,\n ],\n TokenType.EXAMPLES: [\n long_description_parse,\n ],\n } # type: Dict[TokenType, List[Union[BaseGrammar, Callable]]] # noqa: E501\n return tt_lookup.get(token.token_type, [long_description_parse])", "title": "" }, { "docid": "825a73fdf1472739ca17a5e8d2c74eed", "score": "0.40101236", "text": "def inspect(self, token):\n # If there are still tokens left\n if self.terms:\n return self.terms[-1] == token\n return False", "title": "" }, { "docid": "08398ea24818db46b6c3a89e4f29398e", "score": "0.39866233", "text": "def detect_token(token):\n return token_re.fullmatch(token) is not None", "title": "" }, { "docid": "16f076651ce7e5a838670f7eef0175ba", "score": "0.39790156", "text": "def level_matches(self, level, consumer_level):\r\n if isinstance(level, slice):\r\n start, stop = level.start, level.stop\r\n if start is not None and start > consumer_level:\r\n return False\r\n if stop is not None or stop <= consumer_level:\r\n return False\r\n return True\r\n else:\r\n return level >= consumer_level", "title": "" }, { "docid": "47ec477d36e80ad380e760651d52c683", "score": "0.39777708", "text": "def level_matches(self, level, consumer_level):\r\n if isinstance(level, slice):\r\n start, stop = level.start, level.stop\r\n if start is not None and start > consumer_level:\r\n return False\r\n if stop is not None and stop <= consumer_level:\r\n return False\r\n return True\r\n else:\r\n return level >= consumer_level", "title": "" }, { "docid": "47ae1561dfe26c2919ba83f6fdb343e3", "score": "0.39648035", "text": "def test_labels(self):\n alias = parse(self.test_labels.__doc__)[0]\n matchex = alias.matchex\n \n matched = matchex.match(alias.calc, alias.accounts, alias.aliases, 'foo-party1999+m3047.net+2')\n self.assertEqual(len(matched),1)\n return", "title": "" }, { "docid": "b951b49777d7431a0d1f2f863f6f03f8", "score": "0.396455", "text": "def next_if(self, expr):\r\n if self.current.test(expr):\r\n return self.next()", "title": "" }, { "docid": "287a8ddd245861ca93634fad1e58eed3", "score": "0.3957767", "text": "def Parse(token):\n\n if(token[0] in one_word_commands and len(token) == 1): # one word check\n return True\n\n elif(token[0] in two_word_commands and len(token) == 2 and checkNum(token[1])):\n return True\n return False # check failed", "title": "" }, { "docid": "c065529c39b266da299b925186eb53e1", "score": "0.39307138", "text": "def pillar(tgt, delimiter=DEFAULT_TARGET_DELIM):\n if \"matchers\" not in __context__:\n _load_matchers()\n try:\n return __context__[\"matchers\"][\"pillar_match.match\"](\n tgt, delimiter=delimiter, opts=__opts__\n )\n except Exception as exc: # pylint: disable=broad-except\n log.exception(exc)\n return False", "title": "" }, { "docid": "32fcd6172688d41d59ec7c8f2b8215bd", "score": "0.39289674", "text": "def recall_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):\n gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)\n\n positives = np.where(pred == pos_label, 1, 0).astype(bool)\n trues = np.where(gold == pos_label, 1, 0).astype(bool)\n TP = np.sum(positives * trues)\n FN = np.sum(np.logical_not(positives) * trues)\n\n if TP or FN:\n rec = TP / (TP + FN)\n else:\n rec = 0\n\n return rec", "title": "" }, { "docid": "e6b3ba7b256450ead0eb6923a18f4cb8", "score": "0.3923274", "text": "def match_token(self, token_type):\n if self.current_token.token == token_type:\n TOKEN_MATCHING_LIST.append('Matched: %s, %s ' % (token_type, self.current_token.value))\n try:\n # Get the next token list if available\n self.current_token = self.token_list.next()\n except StopIteration:\n return None\n else:\n raise Error('Token not found:\\t Received: %s \\t Got: %s' % (\n token_type,\n self.current_token.token\n ))", "title": "" }, { "docid": "4e40f39b7195314cdac6fa4476be4a7f", "score": "0.3914542", "text": "def __match_then_advance(self, *types) -> bool:\n if self.__match(*types):\n self.__advance()\n return True\n return False", "title": "" }, { "docid": "2cb9be9461bba8c33499b9ac2885619a", "score": "0.3902194", "text": "def checkEOF(self):\n if self.next() != 'eof':\n raise ParseError(\"Parsing failed to consume tokens \"+str(self.tokens[:-1])+\".\")", "title": "" }, { "docid": "ce188683dea806de1a3c6082933aa133", "score": "0.38948062", "text": "def test_stack_peek_correct_value(short_stack):\n expected = 3\n node = short_stack.peek()\n print(node)\n assert node.val == expected", "title": "" }, { "docid": "f54d2ba750ad285f7b70908317260802", "score": "0.38895896", "text": "def showNextToken(self):\r\n if self.hasMoreTokens():\r\n return self.tokenized_text[self.index]\r\n return True", "title": "" }, { "docid": "40b30607e33f7d5f187b16908cfb2c8d", "score": "0.38866135", "text": "def could_be_end_of_sentence(next_token, test_set=TOK.TEXT, multiplier=False):\n return next_token.kind in TOK.END or (\n # Check whether the next token is an uppercase word, except if\n # it is a month name (frequently misspelled in uppercase) or\n # roman numeral, or a currency abbreviation if preceded by a\n # multiplier (for example þ. USD for thousands of USD)\n next_token.kind in test_set\n and next_token.txt[0].isupper()\n and next_token.txt.lower() not in MONTHS\n and not RE_ROMAN_NUMERAL.match(next_token.txt)\n and not (next_token.txt in CURRENCY_ABBREV and multiplier)\n )", "title": "" }, { "docid": "30bd617870364d144c3cecef449ba31e", "score": "0.38732278", "text": "def eat(self, token_type, token_values=[]):\n if self.current_token.type == token_type:\n # just check for type\n # optionally, you can check for a range of values\n if not token_values or (self.current_token.value in token_values):\n self.current_token = self.get_next_token()\n else:\n self.error()", "title": "" }, { "docid": "c2f3cd1318b9c4bfeafcbe8dd9f7bb28", "score": "0.3860282", "text": "def check_label(arg) -> \"Optional[str]\":\n if arg.type == Token.SYMBOL:\n return None\n else:\n return \"expected label\"", "title": "" }, { "docid": "62d3cd9400459dc582558c780413c381", "score": "0.38482854", "text": "def has_labels(self, labels, reducer=any):\n return reducer(l in self.labels for l in labels)", "title": "" }, { "docid": "8d0084f0e2188419eef87f8edffb7c18", "score": "0.3844904", "text": "def test_has_next_true(self):\n self.analyzer.file_handler.tell.return_value = 0\n self.analyzer.file_handler.size = 10\n res = self.analyzer._has_next()\n self.assertEqual(res, True)", "title": "" }, { "docid": "2d555527cc9e66478f9ec5e0d9d66da6", "score": "0.38433146", "text": "def is_op(self, token):\n return self.match(self.op_re, token)", "title": "" }, { "docid": "162bcb8711e0d4a44a9cec3c980dd074", "score": "0.38350123", "text": "def advanceNextToken(self) -> Optional[Token]:\n token = self.peekNextToken()\n if token is not None:\n self.__nextIx += 1\n return token", "title": "" }, { "docid": "eae18e9e4f8a09cb7db390fd0f28e386", "score": "0.38143036", "text": "def gold_has_long_answer(gold_label_list):\n\n gold_has_answer = gold_label_list and (sum([\n not label.long_answer_span.is_null_span() # long answer not null\n for label in gold_label_list # for each annotator\n ]) >= 2)\n\n return gold_has_answer", "title": "" }, { "docid": "63711a0a5eb6551376799c62014148f2", "score": "0.38135365", "text": "def recursive_match(token, tree, deprel=None, form=None, lemma=None,\n upos=None, xpos=None, feats=None,\n head_search=None, child_search=None,\n regex=False, ignorecase=False,\n n_times='+'):\n\n if deprel != None:\n try:\n deprel_match = re.match(deprel, token.deprel)\n except TypeError: # This will be a token without a deprel, like a bridge\n return False # or an enhanced dependency\n if not deprel_match:\n return False\n\n if upos != None:\n if token.upos != upos:\n return False\n\n if xpos != None:\n if token.xpos != xpos:\n return False\n\n if form != None:\n\n form_x = form.lower() if ignorecase else form\n form_y = token.form.lower() if ignorecase else token.form\n\n if regex:\n form_match = re.fullmatch(form_x, form_y)\n if not form_match:\n return False\n elif form_x != form_y:\n return False\n\n if lemma != None:\n\n lemma_x = lemma.lower() if ignorecase else lemma\n lemma_y = token.lemma.lower() if ignorecase else token.lemma\n\n if regex:\n lemma_match = re.fullmatch(lemma_x, lemma_y)\n if not lemma_match:\n return False\n elif lemma_x != lemma_y:\n return False\n\n if feats != None:\n pass\n\n if head_search != None:\n head_search['regex'] = regex\n head_search['ignorecase'] = ignorecase\n if token.head == '0':\n return False # Exclude root node; this is the predicate\n elif token.head == None:\n return False # Will be a bridge or an ED.\n head_tok = tree.map[token.head]\n if not recursive_match(head_tok, tree, **head_search):\n return False\n\n if child_search != None:\n #TODO: Major refactoring\n for search_item in child_search:\n search_item['regex'] = regex # Inherits regex and ignorecase\n search_item['ignorecase'] = ignorecase\n\n # Iterates through the token's children to find context matches\n n_true = 0\n for i, cid in enumerate(token.children):\n child_token = tree.map[cid]\n n_true += recursive_match(child_token, tree, **search_item)\n\n # Note that this is n_times kwarg of the item of the child_search list, not the current query\n occur_req = search_item['n_times']\n\n if occur_req == '+':\n if n_true < 1:\n return False\n\n elif re.match(\"\\d+:\\d+\", occur_req):\n lo, hi = re.search(r'(\\d+):(\\d+)',occur_req).groups()\n if lo > hi:\n raise ValueError(\"lo greater than hi\")\n lo, hi = map(int, [lo,hi])\n if not lo <= n_true <= hi:\n return False\n\n elif re.fullmatch(\"\\d+:\\d+\", occur_req):\n lo, hi = re.search(r'(\\d+):(\\d+)',occur_req).groups()\n if lo > hi:\n raise ValueError(\"lo greater than hi\")\n lo, hi = map(int, [lo,hi])\n if not lo <= n_true <= hi:\n return False\n\n elif re.fullmatch(\"\\d+\", occur_req):\n if n_true != int(occur_req):\n return False\n\n # Returns true if it has passed all specified conditions.\n # If no conditions are specified, just returns true.\n return True", "title": "" }, { "docid": "81b3356e3c77d5889cb003f499f8655b", "score": "0.38078603", "text": "def ifcontain(parser, token):\r\n return do_ifcontain(parser, token, False)", "title": "" }, { "docid": "5e81204d43d4d6ffe78c04b4e5b3fda6", "score": "0.38037205", "text": "def peek(self):\n next_token = self._next_lexeme()\n if not next_token:\n return None\n return int(next_token) if next_token.isdigit() else next_token", "title": "" }, { "docid": "af713921fd0bbe20fcde23bcf4c12f2d", "score": "0.38022035", "text": "def match_consumer(self, consumer):\n if not self.filter_consumer:\n return True\n if not consumer:\n return False\n return bool(match(self.filter_consumer, consumer))", "title": "" }, { "docid": "5865fb5aa783d7ad027065c01ce948a7", "score": "0.3793202", "text": "def test_stack_peek_empty_stack():\n expected = None\n assert Stack().peek() is expected", "title": "" }, { "docid": "0268c7e394a53247458869b09791f473", "score": "0.37864086", "text": "def next_token(self):\n if self.has_more_tokens():\n self.cur_tkn = self.cur_cmd.pop(0)\n else:\n self.cur_tkn = (ERROR, 0)\n return self.cur_tkn", "title": "" }, { "docid": "b9d6a08c46e04eeab311ef2b3ac75328", "score": "0.37775317", "text": "def match_examples(exc, parse_fn, examples, text):\n\n word_re = re.compile(r'\\w+')\n ws_re = re.compile(r'\\s+')\n\n if not hasattr(exc, 'state'):\n return None\n\n err_pos = exc.pos_in_stream + 1\n\n # Try to find the smallest subset of text that produces the error.\n err_string = text\n while err_pos <= len(text):\n try:\n parse_fn(text[:err_pos])\n except (_lark.UnexpectedCharacters, _lark.UnexpectedToken) as err:\n if err.state == exc.state:\n err_string = text[exc.pos_in_stream:err_pos]\n err_pos += 1\n\n closest_dist = 2.0\n closest_example = NO_MATCH_EXAMPLE\n closest_err = None\n closest_ex = None\n\n # Find an example error that fails at the same parser state.\n for example in examples:\n for ex_text in example.examples:\n try:\n parse_fn(ex_text)\n except _lark.UnexpectedCharacters as err:\n if not isinstance(exc, _lark.UnexpectedCharacters):\n continue\n\n # Both the example and the original error got unexpected\n # characters in the stream. If the unexpected characters\n # are roughly the same, call it an exact match.\n if (ex_text[err.pos_in_stream] == text[exc.pos_in_stream] or\n # Call it exact if they're both alpha-numeric\n re_compare(ex_text[err.pos_in_stream:],\n text[exc.pos_in_stream], word_re) or\n # Or both whitespace\n re_compare(ex_text[err.pos_in_stream:],\n text[exc.pos_in_stream], ws_re)):\n return example.message\n\n except _lark.UnexpectedToken as err:\n if not isinstance(exc, _lark.UnexpectedToken):\n continue\n\n # For token errors, check that the state and next token match\n # If just the state matches, we'll call it a partial match\n # and look for something better.\n\n # We annotate the errors from the expression parser, so we don't\n # get confused by similar state stacks that happen to have been labeled\n # in a similar way.\n if not hasattr(err, 'expr_error') == hasattr(exc, 'expr_error'):\n continue\n\n if err.state == exc.state and err.token == exc.token:\n # Try exact match first\n return example.message\n\n else:\n stack1 = list(err.state.state_stack)\n stack2 = list(exc.state.state_stack)\n dist = state_stack_dist(stack1, stack2) + 1\n if exc.token.type == err.token.type:\n dist -= 1\n if dist <= closest_dist:\n closest_err = err\n closest_dist = dist\n closest_example = example\n closest_ex = ex_text\n\n except ParserValueError:\n # Examples should only raise Token or UnexpectedChar errors.\n # ParserValue errors already come with a useful message.\n raise RuntimeError(\n \"Invalid failure example in string_parsers: '{}'\"\n .format(ex_text))\n\n return closest_example.message", "title": "" }, { "docid": "cc7f98bb05d0c6fed065222433597f40", "score": "0.3772153", "text": "def if_member_node(n, label, next_label):\n members = \"|\".join(\n [\"BIT(%s_type::%s)\" % (n[\"feature\"], member) for member in n[\"set\"]]\n )\n return \"%s: if (E.get%s() & (%s)) goto %s;\" % (\n label,\n n[\"feature\"],\n members,\n next_label,\n )", "title": "" }, { "docid": "417a5f04104f159d31e7c932fa10709a", "score": "0.37694868", "text": "def test_parse_will_build_an_if_AST_that_contain_expression(self):\n lexer = LexerStateMachine(' if ( 2 == 3 ) { 5 * 6 ; } ', self.context)\n parser = Parser(lexer, self.manager)\n self.manager.setParser(parser)\n\n token = parser.parse(0)\n\n self.assertEqual('if', token.id)\n self.assertEqual('(', token.data[0].id)\n self.assertEqual('==', token.data[0].data[0].id)\n self.assertEqual(2, token.data[0].data[0].data[0].data[0])\n self.assertEqual(3, token.data[0].data[0].data[1].data[0])\n self.assertEqual('{', token.data[1][0].id)\n self.assertEqual('*', token.data[1][0].data[0].id)\n self.assertEqual(5, token.data[1][0].data[0].data[0].data[0])\n self.assertEqual(6, token.data[1][0].data[0].data[1].data[0])", "title": "" }, { "docid": "403cfb174cb477163dec396289b05149", "score": "0.3768796", "text": "def run(self, context):\n i = 0\n i = context.skip_ws(i, nl=False)\n # if context.check_token(i, [\"RBRACE\", \"LBRACE\"]) is False and context.scope.type != \"GlobalScope\":\n # context.new_error(\"BRACE_EMPTY_LINE\")\n if context.check_token(i, [\"RBRACE\", \"LBRACE\"]) is False:\n context.new_error(\"EXPECTED_BRACE\", context.peek_token(i))\n return False, 0\n i += 1\n i = context.skip_ws(i, nl=False)\n if context.check_token(i, \"NEWLINE\") is False or context.check_token(i, \"NEWLINE\") is None:\n if context.scope.name == \"UserDefinedType\" or context.scope.name == \"UserDefinedEnum\":\n i = context.skip_ws(i, nl=False)\n if context.check_token(i, \"SEMI_COLON\") is True:\n return False, 0\n if context.check_token(i, \"IDENTIFIER\") is False:\n context.new_error(\"BRACE_SHOULD_EOL\", context.peek_token(i - 1))\n else:\n i += 1\n if context.check_token(i, \"SEMI_COLON\") is False:\n context.new_error(\"BRACE_SHOULD_EOL\", context.peek_token(i - 1))\n else:\n context.new_error(\"BRACE_SHOULD_EOL\", context.peek_token(i - 1))\n if context.scope.name == \"Function\" and context.scope.lines > 26:\n context.new_error(\"TOO_MANY_LINES\", context.peek_token(0))\n return False, 0", "title": "" }, { "docid": "777705f0afdf3681f07680bc86dc39a2", "score": "0.3761686", "text": "def consume_till(self, delim):\r\n try:\r\n while True:\r\n t = self.next()\r\n if t.value == delim:\r\n break\r\n\r\n # if end of line is found, it is an exception.\r\n # Since there is no easy way to report the line number,\r\n # leave the error reporting to the python parser later \r\n #@@ This should be fixed.\r\n if t.value == '\\n':\r\n break\r\n except:\r\n #raise ParseError, \"Expected %s, found end of line.\" % repr(delim)\r\n\r\n # raising ParseError doesn't show the line number. \r\n # if this error is ignored, then it will be caught when compiling the python code.\r\n return", "title": "" }, { "docid": "4d5640fd844e5a506cab326b24a1eb66", "score": "0.37579858", "text": "def _lexeme(self, test):\n text = self._text\n index, limit = self._index, self._limit\n lexeme = ''\n while index < limit and test(text[index]):\n lexeme += text[index]\n index += 1\n return lexeme", "title": "" }, { "docid": "b5bdfa5ab11d8b6cd0212f0782b1df37", "score": "0.3744497", "text": "def __call__(self, *args, **opts):\r\n # Start at the beginning of this text.\r\n text, pos = ''.join(args), 0\r\n while pos < len(text):\r\n # Match the text, looking for the next token.\r\n m = self.regex.match(text, pos)\r\n if m is None:\r\n # No token was found, raise an error.\r\n raise UnrecognizedCharacter(text, pos, pos + 1)\r\n elif m.lastgroup in self.ignores:\r\n # An ignored token was found, continue without yielding.\r\n pos = m.end()\r\n continue\r\n else:\r\n # Found a token; yield its name and the text it matched.\r\n yield Token(m)\r\n pos = m.end()", "title": "" }, { "docid": "e20725b907e059cbbafc922fc10cb33e", "score": "0.37433302", "text": "def test_peek_on_non_empty_stack(self):\n self.assertEqual(self.non_empty_stack.peek(), 4)", "title": "" }, { "docid": "1ae2b44a6969380497bfd2551ddfb441", "score": "0.3740692", "text": "def check(self, token):\n if self.i < self.length and self.items[self.i] == token:\n self.i += 1\n return True\n return False", "title": "" }, { "docid": "1e43cbc0f0f5278bc668eb962119c6e6", "score": "0.37284026", "text": "def test_has_next_false(self):\n self.analyzer.file_handler.tell.return_value = 10\n self.analyzer.file_handler.size = 10\n res = self.analyzer._has_next()\n self.assertEqual(res, False)", "title": "" }, { "docid": "28e075de4a24907028057cb3738b4ae1", "score": "0.37242344", "text": "def hasNext(self):\r\n return self.visit is not None or len(self.stack) > 0", "title": "" }, { "docid": "ce1fda276001c4edd79555fcb19d1c52", "score": "0.3723385", "text": "def peek_tok(self) -> Optional[str]:\n if self._token_index >= len(self._tokens):\n raise EndOfStream\n return self._tokens[self._token_index]", "title": "" }, { "docid": "e3078e30a44104f8b41794c25a85ba8b", "score": "0.37217605", "text": "def match(tree, pos, pro):\n if number_match(tree, pos, pro) and gender_match(tree, pos, pro):\n return True\n return False", "title": "" }, { "docid": "d886e8993e2decfd5e0ebf470c67c59d", "score": "0.37209433", "text": "def get_next_token_label_and_value(self, program, prog_unprocessed_indices,\n ERROR_MSG_TEXT_SNIPPET_SIZE):\n return self.pattern_matcher.get_next_token_label_and_value(\n program, prog_unprocessed_indices,\n ERROR_MSG_TEXT_SNIPPET_SIZE)", "title": "" }, { "docid": "7de294cdbd81ad8f3ef35b1ecb2d6e4a", "score": "0.3717633", "text": "def _check_is_max_context(doc_spans, cur_span_index, position):\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index", "title": "" }, { "docid": "dc59803301d0ecda3434a9a90ce1cf61", "score": "0.37110943", "text": "def parse_pseudocode(self):\n\n\t\tpeak = 0\n\t\tdepth = 0\n\n\t\t# Helper\n\t\tconfuse = self.confusions.add\n\t\t\n\t\tret_with_stack_ok = False\n\n\t\tfor op, v in self.pcode:\n\t\t\tif op == \"stack\":\n\t\t\t\tdepth += v\n\t\t\t\tif (depth > peak):\n\t\t\t\t\tpeak = depth\n\n\t\t\telif op == \"ret\":\n\t\t\t\t# We should never return with anything still\n\t\t\t\t# on the stack!\n\t\t\t\tif depth:\n\t\t\t\t\tconfuse(\"ret-with-stack\")\n\n\t\t\t\t# We only understand conditional returns if\n\t\t\t\t# there was never anything on the stack.\n\t\t\t\tif peak and (not v):\n\t\t\t\t\tconfuse(\"conditional-ret\")\n\n\t\t\t\t# Reset to our peak depth.\n\t\t\t\tdepth = peak\n\n\t\t\telif op == \"tablejump\":\n\t\t\t\t# We can be pretty sure that a table jump when\n\t\t\t\t# we have an active stack frame is local-only.\n\t\t\t\tif not depth:\n\t\t\t\t\tconfuse(\"tablejump-with-stack\")\n\n\t\t\telif op == \"indirectjump\":\n\t\t\t\tconfuse(\"indirect-jump\")\n\n\t\t\telif op == \"call\":\n\t\t\t\tself.calls.add(v)\n\n\t\t\telif op == \"tailcall\":\n\t\t\t\tself.tail_calls.add(v)\n\t\t\t\n\t\t\telif op == \"restore_fp\":\n\t\t\t\tret_with_stack_ok = True\n\n\t\t\telse:\n\t\t\t\tconfuse(\"unknown-op\")\n\n\t\t# If we use the frame pointer, it's okay to ret-with-stack.\n\t\tif ret_with_stack_ok and \"ret-with-stack\" in self.confusions:\n\t\t\tself.confusions.remove(\"ret-with-stack\")\n\t\t\n\t\t# If we might tail-call or regular-call, count it as regular.\n\t\tself.tail_calls -= self.calls\n\t\tself.stack = peak", "title": "" }, { "docid": "b5c1b0f8fe71895179d35592e1fef14b", "score": "0.37042397", "text": "def run(self, context):\n for t in context.tokens[: context.tkn_scope]:\n if t.type == \"NEWLINE\" or t.type == \"ESCAPED_NEWLINE\":\n context.scope.lines += 1\n\n if type(context.scope) is GlobalScope:\n\n if context.get_parent_rule() == \"CheckFuncDeclarations\" and context.scope.lines > 25:\n context.new_error(\"TOO_MANY_LINES\", context.tokens[context.tkn_scope])\n return False, 0\n\n if context.get_parent_rule() == \"CheckBrace\":\n if \"LBRACE\" in [t.type for t in context.tokens[: context.tkn_scope + 1]]:\n if type(context.scope) is GlobalScope:\n return False, 0\n else:\n if context.scope.lvl == 0:\n return False, 0\n\n return False, 0", "title": "" }, { "docid": "eefd3d78f13be7addbf9a74acc4bb20d", "score": "0.3699269", "text": "def peek(self,rgx):\n\t\treturn re.match(rgx,self.text)", "title": "" }, { "docid": "7c288fad7258ff183cf07762d57f4b0e", "score": "0.3693138", "text": "def evaluate(root):\n tokens = []\n stack = []\n last = None\n while stack or root:\n if root:\n stack.append(root)\n root = root.left\n else:\n peek = stack[-1]\n if peek.right and peek.right != last:\n root = peek.right\n else:\n tokens.append(peek.val)\n last = stack.pop()\n for t in tokens:\n if t in [0, 1]:\n stack.append(t)\n else:\n op1 = stack.pop()\n op2 = stack.pop()\n if t == 2:\n stack.append(op1 or op2)\n elif t == 3:\n stack.append(op1 and op2)\n return bool(stack[-1])", "title": "" }, { "docid": "d727f7c0b9b5c5af63bb36bc96f8251c", "score": "0.36896503", "text": "def has_more_tokens(self):\n return self.cur_cmd != []", "title": "" }, { "docid": "355b276b8ee9054f378173a4bbb183d2", "score": "0.3688637", "text": "def test_peek_val_empty(small_stack, empty_stack):\n assert small_stack.peek() == small_stack.top\n assert len(small_stack) == 0", "title": "" }, { "docid": "3f70eaf0f357771d82f0f8c9749bde11", "score": "0.36860186", "text": "def _check_is_max_context(doc_spans, cur_span_index, position):\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index", "title": "" }, { "docid": "4be743f9e2282398341d248444c76f53", "score": "0.36833343", "text": "def check_indicator(self, expected, text, title=None):\n self.stack.add(self.depth, Node(text, title=title))\n self.assertEqual(self.finder.has_def_indicator(), expected)\n self.depth += 1", "title": "" }, { "docid": "090ac2e44cd8e01debf168c081982b0d", "score": "0.3682169", "text": "def check_is_max_context(doc_spans, cur_span_index, position):\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index", "title": "" }, { "docid": "340eaab0f07ea3ee385eef872376e9e7", "score": "0.3679568", "text": "def test_null_consume(self):\n r = (x for x in range(10))\n mi.consume(r, 0)\n self.assertEqual(0, next(r))", "title": "" }, { "docid": "c8e17f60178b1fcf4b2bc1b03c466175", "score": "0.36788845", "text": "def process_lexeme(self, token):\n lexeme, line_number = token\n\n # recognize keywords and operators\n if lexeme in KEYWORDS.keys():\n return Token(lexeme, KEYWORDS[lexeme], line_number)\n\n # recognize identifiers\n elif re.match(IDENT, lexeme):\n return Token(lexeme, Terminal.IDENT, line_number)\n \n # recognize integer literals\n elif re.match(INTCONST, lexeme):\n if not (abs(int(lexeme, 10)) >> 31):\n return Token(lexeme, Terminal.INTCONST, line_number)\n else:\n raise MiplInvalidConst(f\"**** invalid integer constant: {lexeme}\")\n \n # recognize character literals\n elif re.match(CHARCONST, lexeme):\n return Token(lexeme, Terminal.CHARCONST, line_number)\n \n # raise for invalid characters\n elif re.match(CHARCONST_INVALID, lexeme):\n raise MiplInvalidConst(f\"**** invalid character constant: {lexeme}\")\n \n # otherwise, return unknown\n else:\n return Token(lexeme, Terminal.UNKNOWN, line_number)", "title": "" }, { "docid": "e561cbaf1f9c8826f0a6556f34366a1c", "score": "0.3669398", "text": "def get_next_token(self) -> Token:\n regex_map: Dict[str, Callable[[], Token]] = OrderedDict((\n (r'(\"|\\')', self.string),\n (r'[_a-zA-Z]', self.id),\n (r'\\d', self.number),\n ))\n\n tokens_map: Dict[str, Token] = OrderedDict((\n ('==', Token(tok.EQ, '==')),\n ('!=', Token(tok.NE, '!=')),\n ('<=', Token(tok.LTE, '<=')),\n ('>=', Token(tok.GTE, '>=')),\n ('>', Token(tok.GT, '>')),\n ('<', Token(tok.LT, '<')),\n ('(', Token(tok.LPAREN, '(')),\n (')', Token(tok.RPAREN, ')')),\n ))\n\n while self.current_char is not None:\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n for regex, func in regex_map.items():\n if re.match(regex, self.current_char):\n return func()\n\n for expected_val, token in tokens_map.items():\n if self.is_token_equal(expected_val):\n self.skip_n_chars(len(expected_val))\n return token\n\n self.error()\n\n return Token(tok.EOF, None)", "title": "" }, { "docid": "a56445b88020dc7432937a09c9c7f199", "score": "0.36657214", "text": "def test_parse_2_plus_3_multiply_4_plus_5(self):\n lexer = LexerStateMachine('2 + 3 * 4 + 5', self.context)\n parser = Parser(lexer, self.manager)\n self.manager.setParser(parser)\n\n token = parser.parse(0)\n\n self.assertEqual('+', token.id)\n self.assertEqual('+', token.data[0].id)\n self.assertEqual('*', token.data[0].data[1].id)\n self.assertEqual('(literal)', token.data[0].data[0].id)\n self.assertEqual('(literal)', token.data[0].data[1].data[0].id)\n self.assertEqual('(literal)', token.data[0].data[1].data[1].id)\n self.assertEqual('(literal)', token.data[1].id)\n self.assertEqual(2, token.data[0].data[0].data[0])\n self.assertEqual(3, token.data[0].data[1].data[0].data[0])\n self.assertEqual(4, token.data[0].data[1].data[1].data[0])\n self.assertEqual(5, token.data[1].data[0])", "title": "" }, { "docid": "8feb0e86c44b7af105829b5dae828f23", "score": "0.366085", "text": "def _check_is_max_context(doc_spans, cur_span_index, position):\r\n\r\n # Because of the sliding window approach taken to scoring documents, a single\r\n # token can appear in multiple documents. E.g.\r\n # Doc: the man went to the store and bought a gallon of milk\r\n # Span A: the man went to the\r\n # Span B: to the store and bought\r\n # Span C: and bought a gallon of\r\n # ...\r\n #\r\n # Now the word 'bought' will have two scores from spans B and C. We only\r\n # want to consider the score with \"maximum context\", which we define as\r\n # the *minimum* of its left and right context (the *sum* of left and\r\n # right context will always be the same, of course).\r\n #\r\n # In the example the maximum context for 'bought' would be span C since\r\n # it has 1 left context and 3 right context, while span B has 4 left context\r\n # and 0 right context.\r\n best_score = None\r\n best_span_index = None\r\n for (span_index, doc_span) in enumerate(doc_spans):\r\n end = doc_span.start + doc_span.length - 1\r\n if position < doc_span.start:\r\n continue\r\n if position > end:\r\n continue\r\n num_left_context = position - doc_span.start\r\n num_right_context = end - position\r\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\r\n if best_score is None or score > best_score:\r\n best_score = score\r\n best_span_index = span_index\r\n\r\n return cur_span_index == best_span_index", "title": "" }, { "docid": "b70b51b44c041b44019ce5899124703f", "score": "0.365273", "text": "def get_next_token(self) -> Token:\n while self.current_char is not None:\n if self.current_char is '{':\n self.advance()\n self.skip_comment()\n continue\n\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n if self.current_char.isdigit():\n return self.number()\n\n if self.current_char is '/' and self.peek() is '/':\n self.advance()\n self.advance()\n return Token(\n type=TokenType.INTEGER_DIV,\n value='//',\n lineno=self.lineno,\n column=self.column\n )\n\n if self.current_char.isalpha() or self.current_char is '_':\n return self.identify()\n\n if self.current_char is ':' and self.peek() is '=':\n self.advance()\n self.advance()\n return Token(\n type=TokenType.ASSIGN,\n value=':=',\n lineno=self.lineno,\n column=self.column\n )\n\n if self.current_char is '<' and self.peek() is '>':\n self.advance()\n self.advance()\n return Token(\n type=TokenType.NOT_EQUALS,\n value='<>',\n lineno=self.lineno,\n column=self.column\n )\n\n if self.current_char is '<' and self.peek() is '=':\n self.advance()\n self.advance()\n return Token(\n type=TokenType.LESS_EQUALS,\n value='<=',\n lineno=self.lineno,\n column=self.column\n )\n\n if self.current_char is '>' and self.peek() is '=':\n self.advance()\n self.advance()\n return Token(\n type=TokenType.GREATER_EQUALS,\n value='>=',\n lineno=self.lineno,\n column=self.column\n )\n\n # single-character token\n try:\n # get enum member by value, e.g.\n # TokenType(';') --> TokenType.SEMI\n token_type = TokenType(self.current_char)\n except ValueError:\n # no enum member with value equal to self.current_char\n self.error()\n else:\n # create a token with a single-character lexeme as its value\n token = Token(\n type=token_type,\n value=token_type.value, # e.g. ';', '.', etc\n lineno=self.lineno,\n column=self.column,\n )\n self.advance()\n return token\n\n return Token(TokenType.EOF, None)", "title": "" }, { "docid": "dbabbf151d8e5e02a58f9eecd71f390f", "score": "0.36492807", "text": "def search_for_label(doc, label_lemmas, label_root):\n # whether a label has been mentioned\n label_mentioned = False\n label_indices = []\n if label_lemmas:\n for t_id, token in enumerate(doc):\n id_next = 0\n for label_lemma in label_lemmas:\n if doc[t_id+id_next].lemma_ != label_lemma:\n break\n else:\n id_next = id_next + 1\n if id_next != 0 and id_next == len(label_lemmas):\n label_mentioned = True\n if label_root:\n label_indices.append(t_id + label_root)\n else:\n label_indices.append(t_id)\n return label_mentioned, label_indices", "title": "" }, { "docid": "9f0af93ae92c9e777a0370e3976957ab", "score": "0.3644853", "text": "def test_fetch_invalid_offset(self):\n topic = self.get_topic().name\n messages = ['hello world', 'foobar']\n\n # publish `messages` to topic\n self.kafka.produce(topic, 0, messages)\n\n t = Topic(self.kafka_cluster, topic)\n\n # get the consumer and set the offset to -1\n consumer = t.subscribe('group2')\n list(consumer.partitions)[0]._next_offset = -1\n\n def test():\n \"\"\"Test that `consumer` can see `messages`.\n\n catches exceptions so we can retry while we wait for kafka to\n coallesce.\n\n \"\"\"\n logger.debug('Running `test`...')\n try:\n self.assertEquals(\n list(islice(consumer, 0, len(messages))),\n messages\n )\n return True\n except AssertionError as e:\n logger.exception('Caught exception: %s', e)\n return False\n\n # wait for one second for :func:`test` to return true or raise an error\n polling_timeout(test, 1)\n consumer.stop_partitions()", "title": "" }, { "docid": "551dc6d1bbb59eac6cdec297683741a1", "score": "0.36444277", "text": "def match(self, *subject):\n # Create parallel lists of the subject and the things it has to\n # match.\n must_match = list(self.ruleset)\n remaining_subject = list(subject)\n\n # Consume tokens from both lists until we've confirmed no\n # match or there is nothing left to match.\n match_so_far = True\n while match_so_far and must_match:\n match_so_far, must_match, remaining_subject = self._consume(\n must_match, remaining_subject\n )\n\n if match_so_far:\n # Everything that had to match, did.\n self.caught.append(subject)\n return self.result\n\n # Something that had to match, didn't.\n return None", "title": "" }, { "docid": "e7163e38c5dcea9fbbc745e06811e718", "score": "0.36414012", "text": "def __contains__(self, token_label):\n return token_label in self.token_subclass_dict", "title": "" }, { "docid": "9361733f791b3c750b83d74e3fc5152d", "score": "0.36400026", "text": "def check_all(self, expected=None, line_offset=0):\r\n self.report.init_file(self.filename, self.lines, expected, line_offset)\r\n self.total_lines = len(self.lines)\r\n if self._ast_checks:\r\n self.check_ast()\r\n self.line_number = 0\r\n self.indent_char = None\r\n self.indent_level = self.previous_indent_level = 0\r\n self.previous_logical = ''\r\n self.tokens = []\r\n self.blank_lines = self.blank_before = 0\r\n parens = 0\r\n for token in self.generate_tokens():\r\n self.tokens.append(token)\r\n token_type, text = token[0:2]\r\n if self.verbose >= 3:\r\n if token[2][0] == token[3][0]:\r\n pos = '[%s:%s]' % (token[2][1] or '', token[3][1])\r\n else:\r\n pos = 'l.%s' % token[3][0]\r\n print('l.%s\\t%s\\t%s\\t%r' %\r\n (token[2][0], pos, tokenize.tok_name[token[0]], text))\r\n if token_type == tokenize.OP:\r\n if text in '([{':\r\n parens += 1\r\n elif text in '}])':\r\n parens -= 1\r\n elif not parens:\r\n if token_type in NEWLINE:\r\n if token_type == tokenize.NEWLINE:\r\n self.check_logical()\r\n self.blank_before = 0\r\n elif len(self.tokens) == 1:\r\n # The physical line contains only this token.\r\n self.blank_lines += 1\r\n del self.tokens[0]\r\n else:\r\n self.check_logical()\r\n elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:\r\n if len(self.tokens) == 1:\r\n # The comment also ends a physical line\r\n token = list(token)\r\n token[1] = text.rstrip('\\r\\n')\r\n token[3] = (token[2][0], token[2][1] + len(token[1]))\r\n self.tokens = [tuple(token)]\r\n self.check_logical()\r\n if len(self.tokens) > 1 and (token_type == tokenize.ENDMARKER and\r\n self.tokens[-2][0] not in SKIP_TOKENS):\r\n self.tokens.pop()\r\n self.check_physical(self.tokens[-1][4])\r\n self.check_logical()\r\n return self.report.get_file_results()", "title": "" }, { "docid": "0cb786b4fa5d57d44b90b37b60953a30", "score": "0.36390397", "text": "def check_token(user, token):\n return token_generator.check_token(user, token)", "title": "" }, { "docid": "340c03b884424430117076266104e125", "score": "0.36386183", "text": "def has_more_tokens(self):\r\n if self.__cur_token_index < len(self.__tokens):\r\n return True\r\n return False", "title": "" }, { "docid": "21352e04e108de58eb6d652af51f0107", "score": "0.36383176", "text": "def _match_next(self):\n self.l.debug(\"Next Match on \" + str(self.pptr) + \\\n \" -> \" + str(self.pattern[self.pptr].Type))\n if self.pattern[self.pptr].Type == PPType.BEGIN:\n self._check_begin() \n return True \n if self.pattern[self.pptr].Type == PPType.END: \n self._check_end() # forced but not allowed\n return True\n if self.pattern[self.pptr].Type == PPType.OR:\n self._check_or()\n return True\n if self.pattern[self.pptr].Type == PPType.ASM: \n return self._match_asm()\n raise RuntimeError( \"UPP Detected: Unidentified Pattern Piece: \" \\\n + str(self.pattern[self.pptr].Type) )", "title": "" }, { "docid": "1002baca79b735663c740dbae27728d6", "score": "0.36342382", "text": "def no_ignored_after(self, raise_on_fail=False, raise_on_success=False):\n retval = True\n if self.peek().ignored_before:\n retval = False\n\n if retval and raise_on_success:\n exception = return_first_exception(raise_on_success,\n self.default_helper_exception)\n raise exception(\n \"Function no_ignored_after expected tokens between the current \"\n \"token {0} and the following token {1}, but there were none.\"\n .format(str(self.token), str(self.peek())))\n if not retval and raise_on_fail:\n exception = return_first_exception(raise_on_fail,\n self.default_helper_exception)\n raise exception(\n \"Function no_ignored_after expected nothing between the \"\n \"current token {0} and the following token {1}, but there \"\n \"were ignored tokens.\"\n .format(str(self.token), str(self.peek())))\n else:\n return False\n return retval", "title": "" }, { "docid": "1a4e64191317d8c61e2ddc1892bdbb9d", "score": "0.36315495", "text": "def expect(self, *tokens):\n token = self.pop()\n if token.type in tokens:\n return token\n if 'LABEL' in tokens:\n if token.type in ['X', 'Y', 'Z', 'SP']:\n token.value = token.type.lower()\n token.type = 'LABEL'\n return token\n elif token.type == 'OPCODE':\n token.type = 'LABEL'\n return token\n Err.log('Expected: \"' + '\", \"'.join(tokens) + '\"')\n return token", "title": "" }, { "docid": "af534b3a1829270b93c697268754a6cd", "score": "0.3621276", "text": "def test_peek_on_empty_stack(self):\n self.assertIsNone(self.empty_stack.peek())", "title": "" }, { "docid": "d4a3c60ba8b9f32b0831c2c86b101ae7", "score": "0.3618692", "text": "def check_is_max_context(doc_spans, cur_span_index, position):\r\n\r\n # Because of the sliding window approach taken to scoring documents, a single\r\n # token can appear in multiple documents. E.g.\r\n # Doc: the man went to the store and bought a gallon of milk\r\n # Span A: the man went to the\r\n # Span B: to the store and bought\r\n # Span C: and bought a gallon of\r\n # ...\r\n #\r\n # Now the word 'bought' will have two scores from spans B and C. We only\r\n # want to consider the score with \"maximum context\", which we define as\r\n # the *minimum* of its left and right context (the *sum* of left and\r\n # right context will always be the same, of course).\r\n #\r\n # In the example the maximum context for 'bought' would be span C since\r\n # it has 1 left context and 3 right context, while span B has 4 left context\r\n # and 0 right context.\r\n best_score = None\r\n best_span_index = None\r\n for (span_index, doc_span) in enumerate(doc_spans):\r\n end = doc_span.start + doc_span.length - 1\r\n if position < doc_span.start:\r\n continue\r\n if position > end:\r\n continue\r\n num_left_context = position - doc_span.start\r\n num_right_context = end - position\r\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\r\n if best_score is None or score > best_score:\r\n best_score = score\r\n best_span_index = span_index\r\n\r\n return cur_span_index == best_span_index", "title": "" } ]
06f5b2af157552051cd0dfb4628f5923
Method to run subprocesses. Calling this will capture the `stderr` and `stdout`, please call `subprocess.run` manually in case you would like for them not to be captured.
[ { "docid": "eb107510d71332148c82e42f766e07cc", "score": "0.5504604", "text": "def run_subprocess(\n command: Union[str, List[str]],\n folder: Optional[Union[str, Path]] = None,\n check=True,\n **kwargs,\n) -> subprocess.CompletedProcess:\n if isinstance(command, str):\n command = command.split()\n\n if isinstance(folder, Path):\n folder = str(folder)\n\n return subprocess.run(\n command,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n check=check,\n encoding=\"utf-8\",\n errors=\"replace\", # if not utf-8, replace char by �\n cwd=folder or os.getcwd(),\n **kwargs,\n )", "title": "" } ]
[ { "docid": "f5688d99c31b258c8534ce6ac571aa0c", "score": "0.7337682", "text": "def captured_run(*args, **kwargs):\n proc = subprocess.run(\n *args, **kwargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n return proc.returncode, proc.stdout, proc.stderr", "title": "" }, { "docid": "af3c2482366f1c1567ca47011b02d31b", "score": "0.7197005", "text": "def runSubproc(*args, **kwargs):\n\n proc = subprocess.Popen(list(args), stdout = subprocess.PIPE, stderr = subprocess.PIPE, stdin = subprocess.PIPE)\n\n try:\n stdin = kwargs['stdin']\n except KeyError:\n stdin = None\n\n try:\n stdout = kwargs['stdout']\n except KeyError:\n stdout = sys.stdout\n\n try:\n stderr = kwargs['stderr']\n except KeyError:\n stderr = sys.stderr\n \n out, err = proc.communicate(stdin)\n\n if stdout is not None and out.strip() != '':\n if type(stdout) is list:\n if out.endswith('\\n'):\n out = out[:-1]\n for line in out.split('\\n'):\n stdout.append(line)\n else:\n stdout.write(out)\n stdout.flush()\n\n if stderr is not None and err.strip() != '':\n if type(stderr) is list:\n if err.endswith('\\n'):\n err = err[:-1]\n for line in err.split('\\n'):\n stderr.append(line)\n else:\n stderr.write(err)\n stderr.flush()\n\n return proc.returncode", "title": "" }, { "docid": "025e533159eaf6db3d4a130fe41fae7a", "score": "0.6771622", "text": "def subprocess_run(*args, silent=None, dry_run=False, **kwargs):\n # 这一段代码行为上肯定是多余的, 但是 run 内部不允许 capture_output 和\n # stdout / stderr 俩参数混用, 因此在这里进行适配\n capture_output = kwargs.pop('capture_output', None)\n if capture_output:\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n\n capture_error = kwargs.pop('capture_error', None)\n if capture_error:\n kwargs['stderr'] = subprocess.PIPE\n\n check = kwargs.pop('check', None)\n if check:\n kwargs['stderr'] = subprocess.PIPE\n\n if not silent:\n ctx = context(silent=True)\n silent = ctx and ctx.obj.get('silent')\n\n abort_on_fail = kwargs.pop('abort_on_fail', None)\n excall(*args, silent=silent)\n if dry_run:\n return\n try:\n res = subprocess.run(*args, **kwargs)\n except subprocess.TimeoutExpired:\n timeout = kwargs['timeout']\n stderr = (\n f'this command reached its {timeout}s timeout:\\n '\n + subprocess.list2cmdline(args[0])\n )\n if not silent:\n error(stderr)\n\n res = subprocess.CompletedProcess(args[0], 1, stdout=stderr, stderr=stderr)\n\n code = rc(res)\n if code:\n if check:\n stdout = res.stdout\n stderr = res.stderr\n if stdout or stderr:\n echo(res.stdout)\n error(res.stderr, exit=code)\n else:\n error(\n f'command did not end well, and has empty output: {args}', exit=code\n )\n elif abort_on_fail:\n context().exit(code)\n\n return res", "title": "" }, { "docid": "60163bed484387de7fcac6bb01109ed3", "score": "0.6767978", "text": "def _run(cmd, *args, **kwargs):\n kwargs.setdefault('stdout', subprocess.PIPE)\n kwargs.setdefault('stderr', subprocess.STDOUT)\n popen = subprocess.Popen(cmd, *args, **kwargs)\n stdout, stderr = popen.communicate()\n if popen.wait():\n description = ' '.join(cmd) if isinstance(cmd, list) else cmd\n output = stderr or stdout or '<No output>'\n raise Error('Subcommand failed: %s: %s' % (description, output))\n if stdout:\n return [l for l in stdout.split('\\n') if l]", "title": "" }, { "docid": "2c95f8ff89ab8345702bebac447bd93f", "score": "0.67432797", "text": "def run_subprocess(\n args: List[str],\n log_output_live: bool,\n cwd: Optional[Union[bytes, str]] = None,\n env: Optional[Dict[str, str]] = None,\n) -> CompletedProcess:\n # It is hard to log output of both stdout and stderr live unless we\n # combine them.\n # See http://stackoverflow.com/a/18423003.\n if log_output_live:\n process_stderr = STDOUT\n else:\n process_stderr = PIPE\n\n with Popen(\n args=args,\n cwd=cwd,\n stdout=PIPE,\n stderr=process_stderr,\n env=env,\n ) as process:\n try:\n if log_output_live:\n stdout = b''\n stderr = b''\n for line in process.stdout:\n LOGGER.debug(line)\n stdout += line\n # Without this, `.poll()` will return None on some\n # systems.\n # See https://stackoverflow.com/a/33563376.\n process.communicate()\n else:\n stdout, stderr = process.communicate()\n except: # noqa: B001 pragma: no cover\n # We clean up if there is an error while getting the output.\n # This may not happen while running tests so we ignore coverage.\n process.kill()\n process.wait()\n raise\n retcode = process.poll()\n if retcode > 0:\n LOGGER.info(str(stderr))\n raise CalledProcessError(\n retcode, args, output=stdout, stderr=stderr\n )\n return CompletedProcess(args, retcode, stdout, stderr)", "title": "" }, { "docid": "87bb06c61997ef1cddb86c22ce136f2c", "score": "0.6739268", "text": "def run_proc(cmd: list[str]) -> subprocess.CompletedProcess:\n # print(cmd)\n return subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, timeout=30.0\n )", "title": "" }, { "docid": "d084d8aba0432dc489d9d3e77968b53b", "score": "0.6548102", "text": "def run(args, return_code=0, cwd=None):\n log = logging.getLogger('run')\n proc = subprocess.Popen(args,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)\n\n stdout, stderr = proc.communicate()\n if proc.returncode != return_code:\n msg = 'Subprocess %r return code was %d, not %d as expected' %\\\n (args, proc.returncode, return_code)\n msg += '\\nStdout: ' + stdout\n msg += '\\nStderr: ' + stderr\n raise Exception(msg)\n\n return stdout, stderr", "title": "" }, { "docid": "258258548aedccd5b5eb1050450c3dd9", "score": "0.65345263", "text": "def run(*args, **kwargs):\n if is_python2_running():\n check = False\n if 'check' in kwargs:\n check = kwargs.pop('check')\n process = subprocess.Popen(*args, **kwargs)\n process._stdout, process._stderr = process.communicate()\n if process.returncode and check:\n raise Subprocess.CalledProcessError\n return process\n else:\n return subprocess.run(*args, **kwargs)", "title": "" }, { "docid": "4f1ff1225972ad931801fb6d3b237033", "score": "0.65112096", "text": "def run_command(cmd):\n proc = subprocess.run(cmd) \n #, stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "title": "" }, { "docid": "a478e4889d52dd1901b654710c9c48d2", "score": "0.6486054", "text": "def execute(args):\n subprocess.run(args)", "title": "" }, { "docid": "e06f7ce13914373afa1d9c7df6dc2b95", "score": "0.6430217", "text": "def run(self, args=''):\n # print('args is: {}'.format(args))\n # Clear output variables from any previous execution\n self.stdout, self.stderr, self.returncode = None, None, None\n\n # Run executable from shell with arguments as string\n process = subprocess.Popen(\n '{} {}'.format(self.command, args), shell=True, \n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n # Store stdout and stderr\n self.stdout, self.stderr = process.communicate()\n self.returncode = process.returncode\n \n # Return stdout\n return self.stdout", "title": "" }, { "docid": "f8f32749df2667a602cc966bf2692a8d", "score": "0.6396884", "text": "def run_process(args, cwd, _progress=None):\n if _progress is None:\n def _progress(msg, *args, **kwargs):\n print(msg)\n\n p = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=os.path.abspath(cwd))\n\n p.wait()\n if p.returncode == 0:\n return p.stdout, p.stderr\n\n _progress(\"Process failed with return code: %i\" % p.returncode, warn=True)\n if p.stdout is not None:\n _progress(\"stdout:\", warn=True)\n print(p.stdout.read())\n if p.stderr is not None:\n _progress(\"stderr:\", warn=True)\n print(p.stderr.read())\n\n raise ValueError(\"Subprocess failed with exit code %i.\" % p.returncode)", "title": "" }, { "docid": "cdb12566b11ad4888c5f848c17c75525", "score": "0.63784456", "text": "def subproc(*args, **kwargs):\n return subprocess.Popen(*args, \n shell=False, close_fds=True,\n stdout=PIPE, stdin=None, stderr=PIPE,\n **kwargs)", "title": "" }, { "docid": "9dfd6a5bb14ff6642052c982e709d54e", "score": "0.63316363", "text": "def run(*args):\n subprocess.run(' '.join(args), shell=True, check=True)", "title": "" }, { "docid": "7976eb8c38d0a77c57bd7f7e72c5349b", "score": "0.62988317", "text": "def run(command, *args, envvars=None):\n command_list = list(args)\n command_list.insert(0, command)\n # Run the subprocess\n env = dict(os.environ)\n if envvars:\n env.update(envvars)\n process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n stdout, stderr = process.communicate()\n try:\n stdout = stdout.decode(\"ascii\")\n except Exception:\n try:\n stdout = stdout.decode(\"utf-8\")\n except Exception:\n pass\n try:\n stderr = stderr.decode(\"ascii\")\n except Exception:\n try:\n stderr = stderr.decode(\"utf-8\")\n except Exception:\n pass\n # Return the code, STDOUT and STDERR\n return process.returncode, stdout, stderr", "title": "" }, { "docid": "9c43a8d12599b455902bbad2a724a697", "score": "0.627233", "text": "def run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False,\n timeout=None,\n check=True,\n universal_newlines=True,\n **kwargs,\n):\n log_subproc.debug(f\"run: {cmd}\")\n args = cmd if shell else shlex.split(cmd)\n result = subprocess.run(\n args,\n stdout=stdout,\n stderr=stderr,\n shell=shell,\n timeout=timeout,\n check=check,\n universal_newlines=universal_newlines,\n **kwargs,\n )\n return result", "title": "" }, { "docid": "75b26b150b4e7c883149f04acf71046b", "score": "0.6262746", "text": "def run(self, args: List[str],\n capture_output: bool = True,\n accept_nonzero_returncode: bool = False) -> subprocess.CompletedProcess:\n process_args = [self.__proc_name]\n process_args.extend(args)\n\n self.__command = f'{self.__proc_name} {\" \".join(args)}'\n\n additional_args = {'encoding': 'utf-8'}\n if capture_output:\n additional_args['stdout'] = subprocess.PIPE\n additional_args['stderr'] = subprocess.PIPE\n\n for count in range(self.__retries):\n logging.debug(f'[{count + 1}/{self.__retries}] Running: `{self.__command}`')\n\n process = subprocess.run(process_args, **additional_args)\n\n if accept_nonzero_returncode:\n return process\n\n if process.returncode == 0:\n return process\n\n if process.stderr:\n logging.warning(process.stderr)\n\n raise RetriesExceeded(f'Retries count reached maximum, command: `{self.__command}`')", "title": "" }, { "docid": "87f57f5254d42274c0a14ac3ebda6cad", "score": "0.6259913", "text": "def run(*args, env=None, timeout=None, stdin=None, expect_stdout=True,\n expect_stderr=True, outencoding='utf-8', errencoding='utf-8'):\n\n # drop stdout & stderr and spare some memory, if not needed\n stdout = subprocess.PIPE\n if not expect_stdout:\n stdout = None\n\n stderr = subprocess.PIPE\n if not expect_stderr:\n stderr = None\n\n # run command\n p = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env)\n if stdin:\n p.stdin.write(stdin)\n stdout, stderr = p.communicate(timeout=timeout)\n\n # decode output\n if expect_stdout and stdout:\n stdoutput = stdout.decode(outencoding)\n else:\n stdoutput = ''\n if expect_stderr and stderr:\n stderror = stderr.decode(errencoding)\n else:\n stderror = ''\n\n return ProcessResult(p.returncode, stdoutput, stderror)", "title": "" }, { "docid": "450680b8189fbab7054c54a101383283", "score": "0.6251427", "text": "def run_subprocess(program):\n print(\"\\tRunning subprocess: %s\" % (\" \".join(program)))\n return_code = None\n while not return_code == 0:\n p = subprocess.Popen(program, stdout=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True, bufsize=1,\n close_fds=True)\n for stdout_line in iter(p.stdout.readline, \"\"):\n print(stdout_line, end='')\n p.stdout.close()\n return_code = p.wait()\n if return_code != 0: print(\"\\t\\t\\t\\t Error n: \", return_code, \" resetting simulation...\")", "title": "" }, { "docid": "a1cb66828ae17353fac81f3e266f21c9", "score": "0.62407917", "text": "def call(cmd_to_run, logger, log_id=None, scan_for_errors=None, **kwargs):\n stdout_log_level = INFO\n stderr_log_level = INFO\n if scan_for_errors is None:\n scan_for_errors = []\n\n child_process = subprocess.Popen(\n cmd_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n\n log_level = {child_process.stdout: stdout_log_level,\n child_process.stderr: stderr_log_level}\n\n def fetch_child_output():\n \"\"\"\n fetch child process output\n \"\"\"\n child_output_streams = select.select(\n [child_process.stdout, child_process.stderr], [], [], 1000)[0]\n for child_output_stream in child_output_streams:\n line = child_output_stream.readline()\n msg = line[:-1]\n msg = msg.decode('utf-8')\n if log_id is not None:\n msg_with_id = '%s %s' % (log_id, msg)\n logger.log(log_level[child_output_stream], msg_with_id)\n for pattern in scan_for_errors:\n if re.match(pattern, msg):\n raise Exception(msg_with_id)\n\n while child_process.poll() is None:\n fetch_child_output()\n\n fetch_child_output()\n\n return child_process.wait()", "title": "" }, { "docid": "3c53a527abb2302afbeb9e212da50093", "score": "0.62195706", "text": "def _run_one(*args, **kwargs) -> subprocess.CompletedProcess:\n if not args or not issubclass(type(args[0]), Sequence):\n raise ValueError(\n \"Arguments must not be empty and first element must be a Sequence\")\n _utils_logger.debug(f\"running: {' '.join(args[0])}\")\n return subprocess.run(*args, **kwargs)", "title": "" }, { "docid": "9b31c8c32ce29b4e438cb0f9c4d6de7d", "score": "0.62186396", "text": "def run_subprocess(command, working_dir='.', env=None, expected_returncode=0,\n print_output=True):\n if env is not None:\n env_ = os.environ.copy()\n env_.update(env)\n env = env_\n\n # Note we need to capture stdout/stderr from the subprocess,\n # then print it, which the unittest will then capture and\n # buffer appropriately\n print(working_dir + \" > \" + \" \".join(command))\n result = subprocess.run(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=working_dir,\n env=env,\n )\n if print_output:\n print(result.stdout.decode('utf-8'))\n if expected_returncode is not None:\n assert result.returncode == expected_returncode, \\\n \"Got unexpected return code {}\".format(result.returncode)\n else:\n return (result.returncode, result.stdout.decode('utf-8'))\n return result.stdout.decode('utf-8')", "title": "" }, { "docid": "d9a3b960e763d1a55d6140a8ca6c7d3e", "score": "0.62018585", "text": "async def exec_subprocess(argv, capture=True):\n future = Future()\n flags = ((Gio.SubprocessFlags.STDOUT_PIPE if capture else\n Gio.SubprocessFlags.NONE) |\n Gio.SubprocessFlags.STDIN_INHERIT)\n process = Gio.Subprocess.new(argv, flags)\n stdin_buf = None\n cancellable = None\n process.communicate_async(\n stdin_buf, cancellable, gio_callback, future)\n result = await future\n success, stdout, stderr = process.communicate_finish(result)\n stdout = stdout.get_data() if capture else None # GLib.Bytes -> bytes\n if not success:\n raise RuntimeError(\"Subprocess did not exit normally!\")\n exit_code = process.get_exit_status()\n if exit_code != 0:\n raise CalledProcessError(\n \"Subprocess returned a non-zero exit-status!\",\n exit_code,\n stdout)\n return stdout", "title": "" }, { "docid": "e3958690e11fddbf4399dbc189348e42", "score": "0.6161879", "text": "def run_helper(*args, **kwargs):\n return subprocess.run(args, stdout=sys.stderr)", "title": "" }, { "docid": "209990fad31f55d7dcca85af37b46100", "score": "0.613211", "text": "def _run(command):\n proc = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n exit_code = proc.poll()\n return exit_code, out, err", "title": "" }, { "docid": "456063928df99654e506cd819993b791", "score": "0.61281276", "text": "def _run(args, stdin=None):\n process = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, err = process.communicate(stdin)\n retcode = process.poll()\n return (output, err, retcode)", "title": "" }, { "docid": "136b1ac64efa05dd01c487716431075a", "score": "0.60793394", "text": "def run(\n *cmd: str,\n capture: bool = False,\n raise_on_err: bool = True,\n check_code: t.Callable[[int], bool] = lambda c: c == 0,\n **popen_kwargs: t.Any,\n) -> RunReturn:\n stdout = subprocess.PIPE if capture else None\n stderr = subprocess.PIPE if capture else None\n proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **popen_kwargs)\n out, err = proc.communicate()\n result = RunReturn(\n proc.returncode,\n \"\" if out is None else out.decode(),\n \"\" if err is None else err.decode(),\n )\n if raise_on_err and not check_code(result.returncode):\n raise RuntimeError(result)\n return result", "title": "" }, { "docid": "71e486e297a96a2b34db052a2fc2f3b6", "score": "0.6077282", "text": "def __call__(self, directory=None, shell=None, **kwargs):\n self.logger.debug('Running subprocess')\n\n if directory is None:\n directory = self.directory or '.'\n\n if shell is None:\n shell = self.shell\n\n with ChangeDir(directory):\n scripts = list(self.scripts)\n while scripts:\n script = scripts.pop()\n p = subprocess.Popen(script, shell=shell)\n p.wait()\n self.processes[\"{}\".format(script)] = p", "title": "" }, { "docid": "64355f43222e2f5eefbb310e7a7a8ffa", "score": "0.6076498", "text": "async def run_command(*args):\n # Create subprocess\n process = await asyncio.create_subprocess_exec(\n *args,\n # stdout must a pipe to be accessible as process.stdout\n stdout=asyncio.subprocess.PIPE)\n\n # Status\n # print('Started:', args, '(pid = ' + str(process.pid) + ')')\n\n # Wait for the subprocess to finish\n stdout, stderr = await process.communicate()\n\n # Progress\n if process.returncode == 0:\n return stdout.decode().strip()\n else:\n raise ChildProcessError()", "title": "" }, { "docid": "15e65a9fc03ecd5e61a40d9406a8d7cb", "score": "0.60728663", "text": "def run_subprocess(cmd, shell=None):\n if DRYRUN:\n telluser(cmd)\n else:\n log.info(cmd)\n subprocess.run(cmd, shell=True)", "title": "" }, { "docid": "303b33a588968dbf4c09832781a9aa36", "score": "0.6046275", "text": "def _run_cmd(self, cmd: List[str], capture_stderr: bool = False) -> str:\n if self.verbose:\n print(' '.join(cmd))\n\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n\n err = stderr.decode().strip()\n if err:\n if capture_stderr:\n result = err\n\n else:\n raise Exception(err)\n\n else:\n result = stdout.decode().strip()\n\n if self.verbose:\n print(result)\n\n return result", "title": "" }, { "docid": "2689b3ad95f996a2c7397e4836ea4731", "score": "0.6042991", "text": "def _run_process(self, command):\n\n if isinstance(command, str):\n command = shlex.split(command)\n\n proc = Popen(command, shell=True, stdin=PIPE,\n stdout=PIPE, stderr=PIPE, encoding='utf8')\n\n out, err = proc.communicate()\n\n if int(proc.returncode) != 0:\n if err.strip() == \"\":\n err = out\n mesg = \"Error [%d]: %s\" % (proc.returncode, command)\n mesg += \"\\nDetail: %s\" % err\n raise Exception(mesg)\n\n return proc.returncode, out, err", "title": "" }, { "docid": "7c4524cbef7f1562e4c3b55663834c8a", "score": "0.6042534", "text": "def run_cmd(argv):\n proc = subprocess.run(argv,\n capture_output=True,\n cwd=root,\n check=True,\n text=True,\n )\n return proc.stdout", "title": "" }, { "docid": "4d1f0b699f0346f2be588d9178e6736e", "score": "0.6030971", "text": "def _run(self, script, args=[]):\n LOG.info(\"running script '%s' with args %s\" % (script, args))\n now = time.time()\n full_args = [script] + args\n try:\n proc = subprocess.Popen(full_args, close_fds=True, stdout=sys.stdout,\n stderr=sys.stderr)\n proc.wait()\n code = proc.returncode\n except OSError, e:\n if e.errno == errno.ENOEXEC:\n raise InternalError('could not determine subcommand runtime')\n else: raise\n LOG.info(\"command finished in %3f seconds with return code %d\" % ((time.time() - now), code))\n return code", "title": "" }, { "docid": "3e12885d38962eab6854e1a255b1ab8e", "score": "0.6020576", "text": "def run_cmd(cmd, raseit=False, cwd=None):\n try:\n process = subprocess.run(\n cmd,\n cwd=cwd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True)\n process.check_returncode()\n except Exception as e:\n if raseit:\n print(\n str(e)\n + \"\\nCMD_SHELL : \"+cmd\n + \"\\nSTDOUT : \"+process.stdout.decode()\n + \"\\nSTDERR : \"+process.stderr.decode(),\n exc_info=True)\n else:\n six.reraise(*sys.exc_info())\n\n return process.stdout.decode()", "title": "" }, { "docid": "9cab3a884c2d07dceb7c3e1d956a6be2", "score": "0.60040134", "text": "def _run_command(args, check_returncode=True, cwd=None, env=None):\n p = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=cwd,\n env=env,\n universal_newlines=True)\n pout, _ = p.communicate()\n if check_returncode and p.returncode != 0:\n _raise_command_exception(args, p.returncode, pout)\n return pout", "title": "" }, { "docid": "f2520f493dd3a0c31e0e0417a6f3fa8f", "score": "0.59841704", "text": "def exec(\n self, args: Union[str, List[str]], retry: bool = False\n ) -> CompletedProcess:\n for _ in range(self.max_retries + 1 if retry else 1):\n result = subprocess.run(\n args,\n universal_newlines=True,\n check=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n if result.returncode == 0:\n break\n _logger.warning(\n \"Retrying execution failure %s of: %s\",\n result.returncode,\n \" \".join(args),\n )\n return result", "title": "" }, { "docid": "a4147de20e951fc9b7cdd9830e134ee8", "score": "0.5962584", "text": "def run(command: List[str], **kwargs) -> subprocess.CompletedProcess:\n new_command, new_kwargs = _modify_for_windows(command, kwargs)\n return subprocess.run(new_command,\n **new_kwargs)", "title": "" }, { "docid": "1fd3009b92a259c05aeda76c89bb4fe9", "score": "0.59464854", "text": "def run(self):\n self.__prg = subprocess.Popen(self.cmd, stderr=self.errfile) # Starts the execution of the python script in a subprocess\n self.prg.communicate() # Waits for child process to terminate", "title": "" }, { "docid": "26695c2d1c9312daded827925d3784ba", "score": "0.5931324", "text": "def run_cmd(self, command: List[str]) -> ProcessResult:\n self.log.debug(\"run command: %s\", \" \".join(command))\n with subprocess.Popen(command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE) as proc:\n result = ProcessResult.process_communicate(proc)\n self.log.debug(\"command exit code: %i\", result.code)\n\n return result", "title": "" }, { "docid": "609234e4f29feca5a3c024bb15fab6b5", "score": "0.5883633", "text": "def run(*args):\n try:\n return subprocess.check_output(args,\n shell=False,\n stderr=subprocess.STDOUT).strip()\n except subprocess.CalledProcessError as e:\n fail(\"Command failed: \", args, \"Output:\", e.output)", "title": "" }, { "docid": "c2a71f4249d3e23e4c2012ac24e7e9c7", "score": "0.5819065", "text": "def run(*cmd):\n return subprocess.call(cmd, stdout=open(\"/dev/null\", \"w\"))", "title": "" }, { "docid": "f6c9ff65f278a6f4328b28cbcc5ea63a", "score": "0.5817694", "text": "def run_agents_remotely(self, run_script):\n print(\"\\nInside run_agents_remotely!\")\n\n # Build cmd to run script, i.e. \"python3 run_virtual_agent.py\"\n cmd = \"python3 \" + str(run_script)\n\n # Store subprocess for each agent\n ssh_subproc_list = []\n output_obj_list = []\n\n for i in range(self.local_system.group_size):\n fout = open('file_out_' + str(i) + '.txt','w')\n ferr = open('file_err_' + str(i) + '.txt','w')\n output_obj_list.append((fout, ferr))\n\n # Begin each agent subprocess and allow them to run simultaneously\n for (agent, id) in zip(self.local_system.agents_list, range(self.local_system.group_size)):\n print(\"\\nOn agent id: \", agent.group_id)\n\n # Start each subprocess\n (fout, ferr) = output_obj_list[id]\n ssh_subproc = self.begin_ssh_subprocess(agent.ipaddr, cmd, (fout,ferr))\n\n # Store subprocess for later\n ssh_subproc_list.append(ssh_subproc)\n\n # Buffer each subproc by 1 second to allow for setup\n time.sleep(2)\n\n # Print poll codes right when subprocs released\n print(\"\\nPoll code 0: complete with no error.\")\n print(\"Poll code 1: complete with error.\")\n print(\"Poll code None: incomplete, still running.\")\n\n print(\"\\nCurrent poll codes: \", [p.poll() for p in ssh_subproc_list])\n \n # Wait until all simultaneous subprocesses have finished\n counter = 0\n while(1):\n all_subproc_finished = True\n\n for p in ssh_subproc_list:\n\n # poll() == 0 means: complete with no errors\n # poll() == 1 means: complete with errors\n # poll() == None means: still running\n\n if p.poll() == None: \n all_subproc_finished = False\n\n if all_subproc_finished == True:\n break\n\n counter += 1\n\n print(\"\\nAll subprocesses finished: \", [p.poll() for p in ssh_subproc_list])\n\n # Close subprocess files\n for file_tuple in output_obj_list:\n (fout,ferr) = file_tuple\n fout.close()\n ferr.close()\n\n # Print subprocess return\n '''num_subproc = len(ssh_subproc_list)\n for (subproc, index) in zip(ssh_subproc_list, range(num_subproc)):\n\n print(\"\\n\\n\\n\\n------------------------subproc stdout: %d\" % (index))\n for line in subproc.stdout.readlines():\n print(line)\n\n print(\"\\nsubproc stderr:\")\n for line in subproc.stderr.readlines():\n print(line)'''\n\n # Return (fout,ferr) files list\n return output_obj_list", "title": "" }, { "docid": "1f3655be46cb25d5f1368cc53c4a3b37", "score": "0.581698", "text": "def _Exec(self, cmd, raise_on_error=True, return_output=False,\r\n mute_stderr=False):\r\n self.logger.debug('Running command: %s', cmd)\r\n stderr = subprocess.PIPE if mute_stderr else None\r\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr)\r\n (stdoutdata, _) = p.communicate()\r\n if raise_on_error and p.returncode:\r\n raise CommandException(\"Received non-zero return code (%d) from \"\r\n \"subprocess '%s'.\" % (p.returncode, ' '.join(cmd)))\r\n return stdoutdata if return_output else p.returncode", "title": "" }, { "docid": "7ce7404d75cc38f8261d8aed15ade13b", "score": "0.58151835", "text": "def run(command: Union[str, List[str]], **kwargs) -> subprocess.CompletedProcess:\n if isinstance(command, str) and not kwargs.get('shell'):\n command = command.split()\n return subprocess.run(command, **kwargs)", "title": "" }, { "docid": "ff88c87372b6cdf69ff5c89ab7370c5b", "score": "0.5808998", "text": "async def run_command():\n # Create subprocess\n process = await asyncio.create_subprocess_exec(\n 123,\n # stdout must a pipe to be accessible as process.stdout\n stdout=asyncio.subprocess.PIPE)\n\n # Status\n print('Started:', 123, '(pid = ' + str(process.pid) + ')')\n\n\n # Wait for the subprocess to finish\n stdout, stderr = await process.communicate()\n\n # Progress\n if process.returncode == 0:\n print('Done:', 123, '(pid = ' + str(process.pid) + ')')\n else:\n print('Failed:', 123, '(pid = ' + str(process.pid) + ')')\n\n # Result\n result = stdout.decode().strip()\n\n # Return stdout\n return result", "title": "" }, { "docid": "5e3566b425df0ee0e10f50f530fdc354", "score": "0.5804455", "text": "def containerexec_run(args):\n if args is None:\n args = sys.args\n\n logging.info('Received arguments: %s', args)\n\n target = Path(args.target[0])\n unpacked_info = metadata_read(target, TYPE_)\n cmdline = args.cmdline\n\n # Loads config\n config = load_config_file(target / 'config.yml', True)\n runs = config.runs\n\n selected_runs = get_runs(runs, args.run, cmdline)\n\n root_dir = target / b\"root\"\n root_dir = str(root_dir.resolve())\n\n if args.x11 and not any('DISPLAY' in s for s in args.pass_env):\n args.pass_env.append('DISPLAY')\n\n signals.pre_run(target=target)\n\n # Each run is executed in its own executor process.\n for run_number in selected_runs:\n run = runs[run_number]\n\n working_dir = shell_escape(run['workingdir'])\n env = fixup_environment(run['environ'], args)\n\n uid = run['uid']\n gid = run['gid']\n\n # FIXME : Use exec -a or something if binary != argv[0]\n if cmdline is None:\n argv = [run['binary']] + run['argv'][1:]\n else:\n argv = cmdline\n\n executor = containerexecutor.ContainerExecutor(uid=uid, gid=gid,\n network_access=True)\n\n # ensure that process gets killed on interrupt/kill signal\n def signal_handler_kill(signum, frame):\n executor.stop()\n signal.signal(signal.SIGTERM, signal_handler_kill)\n signal.signal(signal.SIGINT, signal_handler_kill)\n\n # actual run execution\n try:\n result = executor.execute_run(argv, workingDir=working_dir,\n rootDir=root_dir, environ=env)\n except (BenchExecException, OSError) as e:\n sys.exit(\"Cannot execute process: {0}.\".format(e))\n\n stderr.write(\"\\n*** Command finished, status: %d\\n\" % result.value or result.signal)\n signals.post_run(target=target, retcode=result.value)\n\n # Update input file status\n metadata_update_run(config, unpacked_info, selected_runs)\n metadata_write(target, unpacked_info, TYPE_)", "title": "" }, { "docid": "d679223efcdaacdf7c8a7aa9baf71f99", "score": "0.5799042", "text": "def _run_command(command):\n p = subprocess.Popen(\n command, shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = '\\n'.join(p.stdout.readlines())\n retval = p.wait()\n return retval, output", "title": "" }, { "docid": "5e46e176cf5e7eba3ae9d08727e70e99", "score": "0.57982284", "text": "def run_commands(cmd):\n\n p = subprocess.Popen(\n cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n out, err = p.communicate()\n if out:\n print(out)\n if err:\n print(err)", "title": "" }, { "docid": "293cfa1552dc903c6d00fda3ffe53f9c", "score": "0.5797872", "text": "def run(command, shell=None):\n out_stream = subprocess.PIPE\n err_stream = subprocess.PIPE\n\n if shell is not None:\n p = subprocess.Popen(command, shell=True, stdout=out_stream,\n stderr=err_stream, executable=shell)\n else:\n p = subprocess.Popen(command, shell=True, stdout=out_stream,\n stderr=err_stream)\n (stdout, stderr) = p.communicate()\n\n return stdout, stderr", "title": "" }, { "docid": "d9226dbdc76a1135cbe75feb65dcd8b8", "score": "0.5792703", "text": "def _execute(self, cmd, capture=False, executable=None):\n if capture:\n p_stdout = subprocess.PIPE\n p_stderr = subprocess.PIPE\n else:\n p_stdout = None\n p_stderr = None\n pop = subprocess.Popen(cmd, shell=True, executable=executable,\n stdout=p_stdout,\n stderr=p_stderr)\n __stdout, __stderr = pop.communicate()\n _stdout = six.text_type(__stdout, 'utf-8')\n _stderr = six.text_type(__stderr, 'utf-8')\n out = CommandOut(_stdout, _stderr, cmd, pop.returncode)\n return out", "title": "" }, { "docid": "d9979b9515d327579fa3e7a287921cd8", "score": "0.578042", "text": "def call_subprocess(cmd_tokens: List[str],\n cwd: str,\n check: bool = True,\n shell: bool = False,\n env: Optional[Dict[str, str]] = None,\n verbose: bool = False) -> Tuple[int, Any, Any]:\n try:\n r = subprocess.run(cmd_tokens, cwd=cwd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,\n check=check, shell=shell, env=env)\n return r.returncode, r.stdout, r.stderr\n except subprocess.CalledProcessError as err:\n # raise ValueError(f\"An error occurred in a subprocess call:\\ncmd: {' '.join(cmd_tokens)}\\n\"\n # f\"code: {err.returncode}\\n\"\n # f\"output: {err.stdout} \\nerror: {err.stderr}\")\n if verbose:\n print(f\"An error occurred in a subprocess call:\\ncmd: {' '.join(cmd_tokens)}\\n\"\n f\"code: {err.returncode}\\n\"\n f\"output: {err.stdout} \\nerror: {err.stderr}\")\n return err.returncode, err.stdout, err.stderr", "title": "" }, { "docid": "7be121c2690ea6ab00cb40c55ee99279", "score": "0.57646435", "text": "def call(self, args, **kwargs):\n p = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, **kwargs)\n output, _ = p.communicate()\n retcode = p.wait()\n if output:\n self.progress_item.error_info(self.decode(output))\n self.progress_item.error_info(\n '{command} exited with {rc}'.format(\n command=self.pretty_command(args), rc=retcode))\n return retcode", "title": "" }, { "docid": "c70b839618907ca9888125dd88840fec", "score": "0.5764611", "text": "def SimpleSubprocess(*cmd, **kwargs):\n try:\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdoutdata, stderrdata = process.communicate()\n except OSError, e:\n raise RuntimeError(str(e))\n\n returncode = process.returncode\n\n if returncode not in kwargs.get(\"exitcodes\", [0]):\n raise RuntimeError(\" \".join([str(returncode), stderrdata]))\n\n return stdoutdata, stderrdata", "title": "" }, { "docid": "e281d9ac5465b699ab57fb136cd3f337", "score": "0.5742499", "text": "def run(command, *args, check=True, shell=True, silent=False, **kwargs):\n\n if not silent:\n eprint(f\"+ {command} \")\n if silent:\n kwargs.setdefault(\"stdout\", subprocess.DEVNULL)\n return subprocess.run(command, *args, check=check, shell=shell, **kwargs)", "title": "" }, { "docid": "154e7be934909a66b65808e8fcc2395a", "score": "0.5726085", "text": "def run_side_effect(responses: list[RunMock]):\n\n def _subprocess_run_mock(args, **kwargs) -> subprocess.CompletedProcess:\n for response in responses:\n if response.args == args:\n response.call_count += 1\n return subprocess.CompletedProcess(\n args, response.returncode, response.stdout, response.stderr\n )\n extra_args = \"\".join([f\", {k}={v!r}\" for k, v in kwargs.items()])\n raise NotImplementedError(f\"No response specified for subprocess.run({args=}{extra_args})\")\n\n return _subprocess_run_mock", "title": "" }, { "docid": "6854902e12a84abdf49e13705730bd35", "score": "0.57101405", "text": "def _run_commands(commands):\n Pool(cpu_count() - 1).map(partial(subprocess.run, shell=True), commands)", "title": "" }, { "docid": "f758bd5c887476da717f1ab287b2d625", "score": "0.5697833", "text": "def capture_with_result(args, include_stderr=False):\n stderr = subprocess.PIPE\n if include_stderr:\n stderr = subprocess.STDOUT\n try:\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=stderr)\n except OSError as e:\n if e.errno == errno.ENOENT:\n sys.exit('no such file or directory: %r when running %s.' % (\n args[0], ' '.join(args)))\n raise\n out, _ = p.communicate()\n return out, p.wait()", "title": "" }, { "docid": "8796dd7cba0f03209fa833b9a10f3a4b", "score": "0.568843", "text": "def run_process(*args, input=None):\n shell = os.name == 'nt'\n start_time = _time()\n cp = subprocess.run(\n args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=shell, bufsize=1, universal_newlines=True\n )\n end_time = _time()\n return CompletedProcessWrapper(cp, start_time, end_time)", "title": "" }, { "docid": "e4faf79627520a93174c1e0f39aedcb1", "score": "0.56881654", "text": "def subcall(*args, **kwargs):\n return subprocess.call(*args, \n shell=False, close_fds=True,\n stdout=NULL, stdin=None, stderr=NULL,\n **kwargs)", "title": "" }, { "docid": "853ab03cee54d69b09ff83b2c1501933", "score": "0.56744295", "text": "def runcommand(args):\n stdout, stderr = Popen(args, stdout=PIPE, stderr=PIPE).communicate()\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n\n if stderr:\n print(\"ERROR! Could not run command %s:\\n%s\" % (args, stderr))\n sys.exit(1)\n\n warning = checkWarning(stdout)\n if warning:\n print(warning)\n sys.exit(1)\n\n return stdout", "title": "" }, { "docid": "fd7c66245d554b22a965e9d7d5e73db7", "score": "0.5668603", "text": "def _run(self, full_params):\r\n print(\"Executing: %s\" % (' '.join(full_params),))\r\n process = Popen(full_params, stdout=PIPE)\r\n return process.communicate()", "title": "" }, { "docid": "5e24d84692f4d57c2c4afeb16d328ad0", "score": "0.56570226", "text": "def run_proc(cmd, stdin=\"/dev/null\", stdout=None, stderr=None):\n pl = pipeline.Procline(cmd, stdin=stdin, stdout=stdout, stderr=stderr)\n pl.wait()", "title": "" }, { "docid": "900915bc130311e8f14fb7e4c971c1e2", "score": "0.565544", "text": "def exec_cmd(cmd_args, shell=False):\n proc = Popen(cmd_args, stdout=PIPE, stderr=PIPE, shell=shell)\n (stdout, stderr) = proc.communicate()\n proc.wait()\n return (stdout, stderr, proc.returncode)", "title": "" }, { "docid": "56eff2f49e67e63ff9be7ed57eb07b1d", "score": "0.5646196", "text": "def Run(args, cwd=None, env=None, shell=False):\n print 'Running: ' + ' '.join(args)\n sys.stdout.flush()\n sys.stderr.flush()\n subprocess.check_call(args, cwd=cwd, env=env, shell=shell)\n sys.stdout.flush()\n sys.stderr.flush()", "title": "" }, { "docid": "7e40f226f7abbbf9f8d6beef965d0104", "score": "0.5634612", "text": "def run(cmd):\n print \"Executing: \" + cmd\n ret = os.system(cmd)\n if ret != 0:\n print \"Command did not exit successfully: \" + str(ret)\n raise Exception(\"Error running subcommand.\")", "title": "" }, { "docid": "5589401df640c95b30ed7d683c84f6c4", "score": "0.5633876", "text": "def run(cmd, input=None,\n capture_output: bool = False,\n check: bool = False,\n encoding: typing.Optional[str] = None,\n quiet: bool = False,\n **kwargs) -> typing.Tuple:\n log.debug('run %r', cmd)\n\n if input is not None:\n kwargs['stdin'] = subprocess.PIPE\n if encoding is not None:\n input = input.encode(encoding)\n\n if capture_output: # Python 3.6 compat\n kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE\n\n try:\n proc = subprocess.Popen(cmd, startupinfo=get_startupinfo(), **kwargs)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise ExecutableNotFound(cmd) from e\n else:\n raise\n\n out, err = proc.communicate(input)\n\n if not quiet and err:\n err_encoding = sys.stderr.encoding or sys.getdefaultencoding()\n sys.stderr.write(err.decode(err_encoding))\n sys.stderr.flush()\n\n if encoding is not None:\n if out is not None:\n out = out.decode(encoding)\n if err is not None:\n err = err.decode(encoding)\n\n if check and proc.returncode:\n raise CalledProcessError(proc.returncode, cmd,\n output=out, stderr=err)\n\n return out, err", "title": "" }, { "docid": "f1ef7edba3cd4ee0012558eef4b15fd5", "score": "0.5627938", "text": "def run_cmd(cmd):\n print(\"\\nRunning the following command:\")\n print(\" \".join(cmd))\n subprocess.run(cmd, check=True)", "title": "" }, { "docid": "c1a99002860ebe1549888a979e54e8df", "score": "0.56240106", "text": "def run(cmd, cwd, env, justprint):\n print(cmd)\n if justprint:\n return\n\n p = subprocess.Popen(cmd, cwd=cwd, env=env)\n (_, _) = p.communicate()\n if p.returncode != 0:\n raise RuntimeError('Failed to run %s in %s' % (cmd, cwd))", "title": "" }, { "docid": "c4fbd91aa012db11e1dc9d77f9ca0fda", "score": "0.5615696", "text": "def cmd_run(command):\n\n # Escape special chars\n command = shlex.quote(command)\n command = shlex.split(command)\n # maybe try catch here...\n process = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True\n )\n stdout, stderr = process.communicate()\n stdout = stdout.decode().strip()\n stderr = stderr.decode().strip()\n status = process.returncode\n\n return status, stdout, stderr", "title": "" }, { "docid": "4dc025751d1ed8fac63d4c72fe8e4bac", "score": "0.5611747", "text": "def run_on_commandline(*args):\n return subprocess.run(args, text=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).stdout", "title": "" }, { "docid": "11479b277dd5cc4c89bddf32929ce27d", "score": "0.56092805", "text": "def Exec(args):\n process = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n def Terminate():\n if process.poll() is None:\n process.terminate()\n process.wait()\n atexit.register(Terminate)\n\n return process", "title": "" }, { "docid": "aa8f310a83e0330217dc5c36ff9e9e2e", "score": "0.5605882", "text": "def capture_output(commands):\n result = subprocess.run(commands, stdout=subprocess.PIPE)\n print(f\"return code: {result.returncode}\")\n command = \" \".join(commands)\n print(f\"output from the {command} executed are {result.stdout.decode('utf-8')}\")", "title": "" }, { "docid": "cc5fe50dff2ba6bd0d7f6d8435aef2dc", "score": "0.55886096", "text": "def run_as_shell(command: str, **kwargs) -> subprocess.CompletedProcess:\n return subprocess.run(command,\n shell=True,\n **kwargs)", "title": "" }, { "docid": "f95ce684fb978c8dce1212f0b47550ba", "score": "0.55783695", "text": "def __exec_sys_command(self, command, args):\n raw_data = sub.run([command, args], stdout=sub.PIPE)\n raw_data.check_returncode()\n return raw_data", "title": "" }, { "docid": "f999c6f385c7c0d0b1147a029a812354", "score": "0.5577511", "text": "def run(command, cwd=None):\n\tc = shlex.split(command)\n\tp = subprocess.Popen(c, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tstdout, stderr = p.communicate()\n\treturn (p.returncode, stdout, stderr)", "title": "" }, { "docid": "997fe3a9bcefcd9a573f8c4240c8c2ed", "score": "0.5575681", "text": "def run_interactive_subprocess(\n command: Union[str, List[str]],\n folder: Optional[Union[str, Path]] = None,\n **kwargs,\n) -> Generator[Tuple[IO[str], IO[str]], None, None]:\n if isinstance(command, str):\n command = command.split()\n\n with subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n errors=\"replace\", # if not utf-8, replace char by �\n cwd=folder or os.getcwd(),\n **kwargs,\n ) as process:\n assert process.stdin is not None, \"subprocess is opened as subprocess.PIPE\"\n assert process.stdout is not None, \"subprocess is opened as subprocess.PIPE\"\n yield process.stdin, process.stdout", "title": "" }, { "docid": "12e320572fd865e4ccd9fd5a9e9e260c", "score": "0.557447", "text": "def run_shell_command(args):\n full_command = \" \".join(args)\n print(\"Running command:\", full_command)\n\n try:\n output = subprocess.run(\n full_command,\n capture_output=True,\n shell=True,\n check=True,\n )\n return output.stdout\n except subprocess.CalledProcessError as e:\n print(f\"Command failed: {e.stderr}\")\n raise e", "title": "" }, { "docid": "d611f46592b6a754d9016a14a363f4df", "score": "0.5536819", "text": "def run_and_get_output(arguments: list) -> tuple:\n proc = subprocess.Popen(\n arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n return out.decode('utf-8'), err.decode('utf-8'), proc.returncode", "title": "" }, { "docid": "59d1772c273a4203d4a197cc64c3ccfa", "score": "0.55264896", "text": "def run_command(command, args):\r\n try:\r\n process = subprocess.Popen(\r\n command,\r\n shell=True,\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE)\r\n\r\n except subprocess.CalledProcessError as err:\r\n print(\"Status : FAIL\", err.returncode, err.output)\r\n\r\n else:\r\n proc = process.stdout\r\n\r\n while process.poll() is None:\r\n args[0].insert(\"end\", proc.readline())\r\n args[0].insert(\"end\", \"\\n\")\r\n args[0].see(\"end\")\r\n sys.stdout.flush()\r\n\r\n if process.returncode != 0:\r\n args[0].insert(\"end\", \"Error: \" + process.communicate()[1].decode(\"utf-8\") +\" \\n\")\r\n args[0].see(\"end\")\r\n raise Exception(\"The subprocess did not terminate correctly.\")\r\n\r\n # wait for one process to finish\r\n # before starting another\r\n process.wait()", "title": "" }, { "docid": "2d09e548f79d7496b6d49cc4440e65b3", "score": "0.5524058", "text": "def call(cmd, **kwargs): # pragma: no cover\n print(\" \".join(shlex.quote(c) for c in cmd))\n res = subprocess.run(cmd, **kwargs).returncode\n if res:\n sys.exit(res)", "title": "" }, { "docid": "af6faf93923830e44b9d2647b4994a47", "score": "0.55218154", "text": "def run(cmd, dryrun=False, echo=True, supress_stdout = False):\n\n global DRYRUN\n dryrun = DRYRUN\n\n if type(cmd) is list:\n thiscmd = ' '.join(cmd)\n else: thiscmd = cmd\n if echo:\n logger.info(\"Running: {}\".format(thiscmd))\n if dryrun:\n logger.info('Doing a dryrun')\n return 0\n else:\n p = subprocess.Popen(thiscmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode:\n logger.error('cmd: {} \\n Failed with returncode {}'.format(thiscmd, p.returncode))\n if supress_stdout:\n logger.debug(out)\n else:\n logger.info(out)\n if len(err) > 0 : logger.warning(err)\n return p.returncode", "title": "" }, { "docid": "d427a4ef5472687a141da3cd8550bcd5", "score": "0.5513089", "text": "def run(args, input=None, redirect=None, redirect_stdout=True, redirect_stderr=True, return_tuple=False, return_code=False, tee=False):\n\n if redirect is not None:\n redirect_stderr = redirect\n redirect_stdout = redirect\n\n if redirect_stderr:\n redirect_stderr=subprocess.PIPE\n else:\n redirect_stderr=sys.stderr\n\n if redirect_stdout:\n redirect_stdout=subprocess.PIPE\n else:\n redirect_stdout=sys.stdout\n\n in_redirect=\"\"\n in_fd=None\n if input is not None:\n in_fd = subprocess.PIPE\n in_redirect=\" (<< '%s')\" % input\n\n if verbose: print \"+ %s%s\" % (args, in_redirect)\n p = subprocess.Popen(args, stdin=in_fd, stdout=redirect_stdout, stderr=redirect_stderr)\n \n (out,err) = p.communicate(input=input)\n\n if tee:\n if tee == True: tee=sys.stdout\n if out: print >>tee, \" \"+ out\n if err: print >>tee, \" STDERROR: \" + err\n\n if return_code: return p.returncode\n if return_tuple: return (out,err,p.returncode)\n if err and out: return out + \"\\nSTDERROR: \" + err\n if err: return \"STDERROR: \" + err\n return out", "title": "" }, { "docid": "7c93fb3648ac47cd411865107292daa9", "score": "0.5511636", "text": "def _run_docker(self, run_suffix, processes=1,\n prerun_cmds=None, startGRU=None, countGRU=None, iHRU=None,\n freq_restart=None, progress=None):\n run_cmd = self._gen_summa_cmd(run_suffix, processes, prerun_cmds,\n startGRU, countGRU, iHRU,\n freq_restart, progress)\n run_cmd = run_cmd.replace(self.executable, '/code/bin/summa.exe')\n\n fman_dir = os.path.dirname(self.manager_path)\n settings_path = self.manager['settingsPath'].value\n input_path = self.manager['forcingPath'].value\n output_path = self.manager['outputPath'].value\n cmd = ''.join(['docker run -v {}:{}'.format(fman_dir, fman_dir),\n ' -v {}:{}'.format(input_path, input_path),\n ' -v {}:{} '.format(output_path, output_path),\n \" --entrypoint '/bin/bash' \",\n self.executable,\n ' -c \"',\n run_cmd, '\"'])\n self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n self.status = 'Running'", "title": "" }, { "docid": "64a9db8af39da426382a7916f03d337e", "score": "0.5509359", "text": "def log_run(args: typing.List[str], log: typing.IO[str]) -> subprocess.CompletedProcess:\n return subprocess.run(\n args, stdout=log, stderr=log, universal_newlines=True, check=True\n )", "title": "" }, { "docid": "e2ff58a84ed49bad76e60013d56f752c", "score": "0.5505317", "text": "def call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT):\n print(cmd)\n p = subprocess.run(cmd, stdout=stdout, stderr=stderr, shell=True, check=True, universal_newlines=True)\n return p.stdout", "title": "" }, { "docid": "1c3db29fb42bc2661e182a965ea91213", "score": "0.54950744", "text": "def start_subprocess(command, **kwargs):\n return Popen(command.split(), **kwargs)", "title": "" }, { "docid": "3a635c49a25aa749df517f32ca2d75d2", "score": "0.549118", "text": "def run(cmd, silent=False):\n proc_stdout = ''\n if not silent:\n logging.debug(\"Executing:\\t\\t\" + cmd)\n proc = subprocess.Popen(cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n proc_stdout = proc.communicate()[0]\n if not silent:\n logging.debug(proc_stdout)\n print \"\"\n return proc_stdout.strip()", "title": "" }, { "docid": "6a96f1c9769c0d32e2108c4701db69e1", "score": "0.5490334", "text": "def run_command_and_get_stderr(command: str) -> List[str]:\n print(\"\\nRunning tests, please wait for log output.\")\n proc: subprocess.CompletedProcess = subprocess.run(\n command.split(), capture_output=True\n )\n stderr = proc.stderr.decode(\"utf-8\").splitlines()\n\n print(*stderr, sep=\"\\n\")\n return stderr", "title": "" }, { "docid": "478eda8a482660f0a654203b96e1a1da", "score": "0.5487862", "text": "def run_cmd(args_list, shell=False):\n # info\n args_string = format(' '.join(args_list))\n print('Running system command: '+args_string)\n # transform into string for shell\n if shell:\n args_list = args_string\n # execute\n proc = subprocess.Popen(args_list, shell=shell, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n s_output, s_err = proc.communicate()\n s_return = proc.returncode\n return s_return, s_output, s_err", "title": "" }, { "docid": "8b278ff95d5a09bd923e6b10986f51d9", "score": "0.54722047", "text": "def run_shell(shell_command, envvars=None):\n # Run the subprocess\n env = dict(os.environ)\n if envvars:\n env.update(envvars)\n process = subprocess.Popen(shell_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n stdout, stderr = process.communicate()\n try:\n stdout = stdout.decode(\"ascii\")\n except Exception:\n try:\n stdout = stdout.decode(\"utf-8\")\n except Exception:\n pass\n try:\n stderr = stderr.decode(\"ascii\")\n except Exception:\n try:\n stderr = stderr.decode(\"utf-8\")\n except Exception:\n pass\n # Return the code, STDOUT and STDERR\n return process.returncode, stdout, stderr", "title": "" }, { "docid": "fbfa3f927b7619c04c464b08a943aad3", "score": "0.54561144", "text": "def run(self, cmd, timeout=None, catch=None, shell=None):\n\n # Use object defaults if params not supplied\n if timeout is None:\n timeout = self.timeout\n if catch is None:\n catch = self.catch\n if shell is None:\n shell = self.shell\n\n # stderr = None is taken to imply catching stderr, done with PIPE\n stderr = self.stderr or subprocess.PIPE\n\n self.outlines = []\n self.exitstatus = None\n\n try:\n self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr, shell=shell)\n\n prev_alarm_handler = signal.signal(signal.SIGALRM,\n Spawn._timeout_handler(self.process.pid, timeout))\n signal.alarm(self.timeout)\n for line in self.process.stdout:\n self._write(line)\n self.exitstatus = self.process.wait()\n signal.alarm(0)\n\n signal.signal(signal.SIGALRM, prev_alarm_handler)\n\n except RunTimeoutError as e:\n if catch:\n self._write('Warning - RunTimeoutError: %s\\n' % e)\n else:\n raise\n\n except OSError as e:\n if catch:\n self._write('Warning - OSError: %s\\n' % e)\n else:\n raise\n\n return self.exitstatus", "title": "" }, { "docid": "9c2979b566f410a0204adddcbd02982d", "score": "0.54551834", "text": "def execute(*cmd, **kwargs):\n\n process_input = kwargs.pop('process_input', None)\n check_exit_code = kwargs.pop('check_exit_code', 0)\n delay_on_retry = kwargs.pop('delay_on_retry', True)\n attempts = kwargs.pop('attempts', 1)\n run_as_root = kwargs.pop('run_as_root', False)\n if len(kwargs):\n raise exception.Error(_('Got unknown keyword args '\n 'to utils.execute: %r') % kwargs)\n\n if run_as_root:\n cmd = shlex.split(FLAGS.root_helper) + list(cmd)\n cmd = map(str, cmd)\n\n while attempts > 0:\n attempts -= 1\n try:\n LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))\n _PIPE = -1 # (subprocess.PIPE) # pylint: disable=E1101\n obj = subprocess.Popen(cmd,\n stdin=_PIPE,\n stdout=_PIPE,\n stderr=_PIPE,\n close_fds=True)\n result = None\n if process_input is not None:\n result = obj.communicate(process_input)\n else:\n result = obj.communicate()\n obj.stdin.close() # pylint: disable=E1101\n _returncode = obj.returncode # pylint: disable=E1101\n if _returncode:\n LOG.debug(_('Result was %s') % _returncode)\n if type(check_exit_code) == types.IntType \\\n and _returncode != check_exit_code:\n (stdout, stderr) = result\n raise exception.ProcessExecutionError(\n exit_code=_returncode,\n stdout=stdout,\n stderr=stderr,\n cmd=' '.join(cmd))\n return result\n except exception.ProcessExecutionError:\n if not attempts:\n raise\n else:\n LOG.debug(_('%r failed. Retrying.'), cmd)\n if delay_on_retry:\n greenthread.sleep(random.randint(20, 200) / 100.0)\n finally:\n # NOTE(termie): this appears to be necessary to let the subprocess\n # call clean something up in between calls, without\n # it two execute calls in a row hangs the second one\n greenthread.sleep(0)", "title": "" }, { "docid": "997b83c7aa45efcd1191f63fceefe6f9", "score": "0.5454902", "text": "def run(self, command, cwd=None):\n command = self.format(command)\n print command\n # subprocess.call automatically prints the output to stdout, returns the exit code\n # call() also waits for the process to be done\n procExitCode = subprocess.call(command, cwd=self.format(cwd), shell=True)\n if procExitCode != 0:\n raise FailedProcess(message=\"Command Failed: \"+command, exitCode=procExitCode)", "title": "" }, { "docid": "e8246a5d5d27121a77237d66db27f59c", "score": "0.54546165", "text": "def run(command, **kwargs):\n env = None\n if 'env' in kwargs:\n if kwargs.get('env_empty'):\n env = {}\n else:\n env = copy.deepcopy(os.environ)\n env.update(kwargs['env'])\n stdin = kwargs.get('stdin')\n stdout = kwargs.get('stdout', False)\n stderr = kwargs.get('stderr', False)\n combine = kwargs.get('combine', False)\n if stdout is True:\n stdout = sys.stdout.write\n if stderr is True:\n stderr = sys.stderr.write\n if kwargs:\n args = {}\n q = pipes.quote\n for name, value in kwargs.items():\n if isinstance(value, basestring_type):\n args[name] = q(value)\n elif isinstance(value, (list, tuple)):\n args[name] = u' '.join([q(unicode_type(v)) for v in value])\n elif isinstance(value, dict):\n args[name] = u' '.join([u'%s %s' % (q(n), q(v)) for n, v in value.items()])\n else:\n args[name] = pipes.quote(unicode_type(value))\n command = string.Template(command).safe_substitute(args)\n log.debug('run: %s' % command)\n ref = subprocess.Popen(\n command,\n stdin=None if stdin is None else subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT if combine is True else subprocess.PIPE,\n shell=kwargs.get('shell', True),\n close_fds=kwargs.get('close_fds', True),\n env=env,\n cwd=kwargs.get('cwd', tempfile.gettempdir()),\n )\n if stdin is not None:\n if not isinstance(stdin, basestring_type):\n stdin = unicode_type(stdin)\n if not isinstance(stdin, bytes_type):\n stdin = stdin.encode('utf-8')\n ref.stdin.write(stdin)\n ref.stdin.flush()\n ref.stdin.close()\n fds = [ref.stdout]\n if combine is not True:\n fds.append(ref.stderr)\n stdout_result = b''\n stderr_result = b''\n while fds:\n for fd in select.select(fds, tuple(), tuple())[0]:\n line = fd.readline()\n if line:\n if fd == ref.stdout:\n if stdout:\n stdout(line)\n stdout_result += line\n elif fd == ref.stderr:\n if stderr:\n stderr(line)\n stderr_result += line\n else:\n fds.remove(fd)\n ref.wait()\n return obj({\n 'code': ref.returncode,\n 'command': command,\n 'stdout': stdout_result,\n 'stderr': stderr_result,\n }, bool=ref.returncode == 0, grow=False)", "title": "" }, { "docid": "010bc49fdab1fee1b2f34dcdfb06273d", "score": "0.54536873", "text": "def _run_local(self, run_suffix, processes=1, prerun_cmds=None,\n startGRU=None, countGRU=None, iHRU=None, freq_restart=None,\n progress=None):\n run_cmd = self._gen_summa_cmd(run_suffix, processes, prerun_cmds,\n startGRU, countGRU, iHRU, freq_restart,\n progress)\n self.process = subprocess.Popen(run_cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n self.status = 'Running'", "title": "" }, { "docid": "c9af006ba90e5f221d36906987dbc95e", "score": "0.544714", "text": "def job_run(self, str_cmd):\n d_ret : dict = {\n 'stdout': \"\",\n 'stderr': \"\",\n 'cmd': \"\",\n 'cwd': \"\",\n 'returncode': 0\n }\n str_stdoutLine : str = \"\"\n str_stdout : str = \"\"\n\n p = subprocess.Popen(\n str_cmd.split(),\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE,\n )\n\n # Realtime output on stdout\n str_stdoutLine = \"\"\n str_stdout = \"\"\n while True:\n stdout = p.stdout.readline()\n if p.poll() is not None:\n break\n if stdout:\n str_stdoutLine = stdout.decode()\n if int(self.args['verbosity']):\n print(str_stdoutLine, end = '')\n str_stdout += str_stdoutLine\n d_ret['cmd'] = str_cmd\n d_ret['cwd'] = os.getcwd()\n d_ret['stdout'] = str_stdout\n d_ret['stderr'] = p.stderr.read().decode()\n d_ret['returncode'] = p.returncode\n if int(self.args['verbosity']) and len(d_ret['stderr']):\n print('\\nstderr its here: \\n%s' % d_ret['stderr'])\n return d_ret", "title": "" }, { "docid": "99b57d7991f52ac7bcd29317f114ba89", "score": "0.5446443", "text": "def run_multiproc(num_children, N):\n # TODO: fork num_children subprocesses to compute the results\n # Note: use the create_python_subprocess function above, which returns a POpen\n # object.\n # See https://docs.python.org/3/library/subprocess.html#popen-objects\n # for documentation on POpen objects\n # Note: the return code of the child processes will the value returned by\n # run_child (see __main__ below). You can use this to pass results\n # from the child back to the parent. This is an abuse of the exit code\n # system, which is intended to indicate whether a program failed or not,\n # but since we're only trying to communicate a single integer from the\n # child process to the parent, it suits our purposes.\n # Note: be sure that your implementation is concurrent!\n\n proc_list = []\n for i in range(num_children):\n current_proc = create_python_subprocess([\"child\", str(i), str(num_children), str(N)])\n proc_list.append(current_proc)\n\n result = 0\n for proc in proc_list:\n result += proc.wait()\n return result", "title": "" }, { "docid": "11acf30240adc53695f99442e4a4252f", "score": "0.54438627", "text": "def run_subprocess(executable_command,\n command_arguments = [],\n timeout=None,\n print_process_output=True,\n stdout_file=None,\n stderr_file=None,\n poll_seconds=.100,\n buffer_size=-1,\n daemon=False,\n return_std=False):\n # validate arguments\n # list\n assert_variable_type(command_arguments, list)\n # strings\n assert_variable_type(executable_command, str)\n _string_vars = [stdout_file,\n stderr_file]\n [assert_variable_type(x, [str, NoneType, unicode]) for x in _string_vars + command_arguments]\n # bools\n assert_variable_type(print_process_output, bool)\n assert_variable_type(return_std, bool)\n # floats\n _float_vars = [timeout,\n poll_seconds]\n [assert_variable_type(x, [int, float, NoneType]) for x in _float_vars]\n global process, _nbsr_stdout, _nbsr_stderr\n process = None\n _nbsr_stdout = None\n _nbsr_stderr = None\n def _exec_subprocess():\n # create the subprocess to run the external program\n global process, _nbsr_stdout, _nbsr_stderr\n process = subprocess.Popen([executable_command] + command_arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffer_size, preexec_fn=os.setsid)\n # wrap p.stdout with a NonBlockingStreamReader object:\n _nbsr_stdout = NBSRW(process.stdout, print_process_output, stdout_file)\n _nbsr_stderr = NBSRW(process.stderr, print_process_output, stderr_file)\n # if the process is a dameon break\n # execution time returned is start time\n if daemon:\n return\n # set deadline if timeout was set\n _deadline = None\n if timeout is not None:\n _deadline = timeit.default_timer() + timeout\n # poll process while it runs\n while process.poll() is None:\n # throw TimeoutError if timeout was specified and deadline has passed\n if _deadline is not None and timeit.default_timer() > _deadline and process.poll() is None:\n os.killpg(process.pid, signal.SIGTERM)\n raise TimeoutError(\"Sub-process did not complete before %.4f seconds elapsed\" %(timeout))\n # sleep to yield for other processes\n time.sleep(poll_seconds)\n execution_time = timeit.timeit(_exec_subprocess, number=1)\n # return process to allow application to communicate with it\n # and extract whatever info like stdout, stderr, returncode\n # also return execution_time to allow\n if return_std:\n return process, execution_time, _nbsr_stdout, _nbsr_stderr\n return process, execution_time", "title": "" }, { "docid": "2a29196d1bce5963a9fe73a83ef1aa27", "score": "0.5441018", "text": "def _run(\n cmd,\n cwd=None,\n stdin=None,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n output_encoding=None,\n output_loglevel=\"debug\",\n log_callback=None,\n runas=None,\n group=None,\n shell=DEFAULT_SHELL,\n python_shell=False,\n env=None,\n clean_env=False,\n prepend_path=None,\n rstrip=True,\n template=None,\n umask=None,\n timeout=None,\n with_communicate=True,\n reset_system_locale=True,\n ignore_retcode=False,\n saltenv=None,\n pillarenv=None,\n pillar_override=None,\n use_vt=False,\n password=None,\n bg=False,\n encoded_cmd=False,\n success_retcodes=None,\n success_stdout=None,\n success_stderr=None,\n windows_codepage=65001,\n **kwargs\n):\n if \"pillar\" in kwargs and not pillar_override:\n pillar_override = kwargs[\"pillar\"]\n if output_loglevel != \"quiet\" and _is_valid_shell(shell) is False:\n log.warning(\n \"Attempt to run a shell command with what may be an invalid shell! \"\n \"Check to ensure that the shell <%s> is valid for this user.\",\n shell,\n )\n\n output_loglevel = _check_loglevel(output_loglevel)\n log_callback = _check_cb(log_callback)\n use_sudo = False\n\n if runas is None and \"__context__\" in globals():\n runas = __context__.get(\"runas\")\n\n if password is None and \"__context__\" in globals():\n password = __context__.get(\"runas_password\")\n\n # Set the default working directory to the home directory of the user\n # salt-minion is running as. Defaults to home directory of user under which\n # the minion is running.\n if not cwd:\n cwd = os.path.expanduser(\"~{}\".format(\"\" if not runas else runas))\n\n # make sure we can access the cwd\n # when run from sudo or another environment where the euid is\n # changed ~ will expand to the home of the original uid and\n # the euid might not have access to it. See issue #1844\n if not os.access(cwd, os.R_OK):\n cwd = \"/\"\n if salt.utils.platform.is_windows():\n cwd = os.path.abspath(os.sep)\n else:\n # Handle edge cases where numeric/other input is entered, and would be\n # yaml-ified into non-string types\n cwd = str(cwd)\n\n if bg:\n ignore_retcode = True\n use_vt = False\n\n change_windows_codepage = False\n if not salt.utils.platform.is_windows():\n if not os.path.isfile(shell) or not os.access(shell, os.X_OK):\n msg = \"The shell {} is not available\".format(shell)\n raise CommandExecutionError(msg)\n elif use_vt: # Memozation so not much overhead\n raise CommandExecutionError(\"VT not available on windows\")\n else:\n if windows_codepage:\n if not isinstance(windows_codepage, int):\n windows_codepage = int(windows_codepage)\n previous_windows_codepage = salt.utils.win_chcp.get_codepage_id()\n if windows_codepage != previous_windows_codepage:\n change_windows_codepage = True\n\n # The powershell binary is \"powershell\"\n # The powershell core binary is \"pwsh\"\n # you can also pass a path here as long as the binary name is one of the two\n if any(word in shell.lower().strip() for word in [\"powershell\", \"pwsh\"]):\n # Strip whitespace\n if isinstance(cmd, str):\n cmd = cmd.strip()\n elif isinstance(cmd, list):\n cmd = \" \".join(cmd).strip()\n cmd = cmd.replace('\"', '\\\\\"')\n\n # If we were called by script(), then fakeout the Windows\n # shell to run a Powershell script.\n # Else just run a Powershell command.\n stack = traceback.extract_stack(limit=2)\n\n cmd = _prep_powershell_cmd(shell, cmd, stack, encoded_cmd)\n\n # munge the cmd and cwd through the template\n (cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)\n ret = {}\n\n # If the pub jid is here then this is a remote ex or salt call command and needs to be\n # checked if blacklisted\n if \"__pub_jid\" in kwargs:\n if not _check_avail(cmd):\n raise CommandExecutionError(\n 'The shell command \"{}\" is not permitted'.format(cmd)\n )\n\n env = _parse_env(env)\n\n for bad_env_key in (x for x, y in env.items() if y is None):\n log.error(\n \"Environment variable '%s' passed without a value. \"\n \"Setting value to an empty string\",\n bad_env_key,\n )\n env[bad_env_key] = \"\"\n\n if output_loglevel is not None:\n # Always log the shell commands at INFO unless quiet logging is\n # requested. The command output is what will be controlled by the\n # 'loglevel' parameter.\n msg = \"Executing command {}{}{} {}{}in directory '{}'{}\".format(\n \"'\" if not isinstance(cmd, list) else \"\",\n _log_cmd(cmd),\n \"'\" if not isinstance(cmd, list) else \"\",\n \"as user '{}' \".format(runas) if runas else \"\",\n \"in group '{}' \".format(group) if group else \"\",\n cwd,\n \". Executing command in the background, no output will be logged.\"\n if bg\n else \"\",\n )\n log.info(log_callback(msg))\n\n if runas and salt.utils.platform.is_windows():\n if not HAS_WIN_RUNAS:\n msg = \"missing salt/utils/win_runas.py\"\n raise CommandExecutionError(msg)\n\n if isinstance(cmd, (list, tuple)):\n cmd = \" \".join(cmd)\n\n return win_runas(cmd, runas, password, cwd)\n\n if runas and salt.utils.platform.is_darwin():\n # We need to insert the user simulation into the command itself and not\n # just run it from the environment on macOS as that method doesn't work\n # properly when run as root for certain commands.\n if isinstance(cmd, (list, tuple)):\n cmd = \" \".join(map(_cmd_quote, cmd))\n\n # Ensure directory is correct before running command\n cmd = \"cd -- {dir} && {{ {cmd}\\n }}\".format(dir=_cmd_quote(cwd), cmd=cmd)\n\n # Ensure environment is correct for a newly logged-in user by running\n # the command under bash as a login shell\n try:\n # Do not rely on populated __salt__ dict (ie avoid __salt__['user.info'])\n user_shell = [x for x in pwd.getpwall() if x.pw_name == runas][0].pw_shell\n if re.search(\"bash$\", user_shell):\n cmd = \"{shell} -l -c {cmd}\".format(\n shell=user_shell, cmd=_cmd_quote(cmd)\n )\n except (AttributeError, IndexError):\n pass\n\n # Ensure the login is simulated correctly (note: su runs sh, not bash,\n # which causes the environment to be initialised incorrectly, which is\n # fixed by the previous line of code)\n cmd = \"su -l {} -c {}\".format(_cmd_quote(runas), _cmd_quote(cmd))\n\n # Set runas to None, because if you try to run `su -l` after changing\n # user, su will prompt for the password of the user and cause salt to\n # hang.\n runas = None\n\n if runas:\n # Save the original command before munging it\n try:\n pwd.getpwnam(runas)\n except KeyError:\n raise CommandExecutionError(\"User '{}' is not available\".format(runas))\n\n if group:\n if salt.utils.platform.is_windows():\n msg = \"group is not currently available on Windows\"\n raise SaltInvocationError(msg)\n if not which_bin([\"sudo\"]):\n msg = \"group argument requires sudo but not found\"\n raise CommandExecutionError(msg)\n try:\n grp.getgrnam(group)\n except KeyError:\n raise CommandExecutionError(\"Group '{}' is not available\".format(runas))\n else:\n use_sudo = True\n\n if runas or group:\n try:\n # Getting the environment for the runas user\n # Use markers to thwart any stdout noise\n # There must be a better way to do this.\n import uuid\n\n marker = \"<<<\" + str(uuid.uuid4()) + \">>>\"\n marker_b = marker.encode(__salt_system_encoding__)\n py_code = (\n \"import sys, os, itertools; sys.stdout.write('{0}'); \"\n \"sys.stdout.write('\\\\0'.join(itertools.chain(*os.environ.items()))); \"\n \"sys.stdout.write('{0}');\".format(marker)\n )\n\n if use_sudo:\n env_cmd = [\"sudo\"]\n # runas is optional if use_sudo is set.\n if runas:\n env_cmd.extend([\"-u\", runas])\n if group:\n env_cmd.extend([\"-g\", group])\n if shell != DEFAULT_SHELL:\n env_cmd.extend([\"-s\", \"--\", shell, \"-c\"])\n else:\n env_cmd.extend([\"-i\", \"--\"])\n elif __grains__[\"os\"] in [\"FreeBSD\"]:\n env_cmd = [\n \"su\",\n \"-\",\n runas,\n \"-c\",\n ]\n elif __grains__[\"os_family\"] in [\"Solaris\"]:\n env_cmd = [\"su\", \"-\", runas, \"-c\"]\n elif __grains__[\"os_family\"] in [\"AIX\"]:\n env_cmd = [\"su\", \"-\", runas, \"-c\"]\n else:\n env_cmd = [\"su\", \"-s\", shell, \"-\", runas, \"-c\"]\n\n if not salt.utils.pkg.check_bundled():\n if __grains__[\"os\"] in [\"FreeBSD\"]:\n env_cmd.extend([\"{} -c {}\".format(shell, sys.executable)])\n else:\n env_cmd.extend([sys.executable])\n else:\n with tempfile.NamedTemporaryFile(\"w\", delete=False) as fp:\n if __grains__[\"os\"] in [\"FreeBSD\"]:\n env_cmd.extend(\n [\n \"{} -c {} python {}\".format(\n shell, sys.executable, fp.name\n )\n ]\n )\n else:\n env_cmd.extend([\"{} python {}\".format(sys.executable, fp.name)])\n fp.write(py_code)\n shutil.chown(fp.name, runas)\n\n msg = \"env command: {}\".format(env_cmd)\n log.debug(log_callback(msg))\n env_bytes, env_encoded_err = subprocess.Popen(\n env_cmd,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n ).communicate(salt.utils.stringutils.to_bytes(py_code))\n if salt.utils.pkg.check_bundled():\n os.remove(fp.name)\n marker_count = env_bytes.count(marker_b)\n if marker_count == 0:\n # Possibly PAM prevented the login\n log.error(\n \"Environment could not be retrieved for user '%s': \"\n \"stderr=%r stdout=%r\",\n runas,\n env_encoded_err,\n env_bytes,\n )\n # Ensure that we get an empty env_runas dict below since we\n # were not able to get the environment.\n env_bytes = b\"\"\n elif marker_count != 2:\n raise CommandExecutionError(\n \"Environment could not be retrieved for user '{}'\",\n info={\"stderr\": repr(env_encoded_err), \"stdout\": repr(env_bytes)},\n )\n else:\n # Strip the marker\n env_bytes = env_bytes.split(marker_b)[1]\n\n env_runas = dict(list(zip(*[iter(env_bytes.split(b\"\\0\"))] * 2)))\n\n env_runas = {\n salt.utils.stringutils.to_str(k): salt.utils.stringutils.to_str(v)\n for k, v in env_runas.items()\n }\n env_runas.update(env)\n\n # Fix platforms like Solaris that don't set a USER env var in the\n # user's default environment as obtained above.\n if env_runas.get(\"USER\") != runas:\n env_runas[\"USER\"] = runas\n\n # Fix some corner cases where shelling out to get the user's\n # environment returns the wrong home directory.\n runas_home = os.path.expanduser(\"~{}\".format(runas))\n if env_runas.get(\"HOME\") != runas_home:\n env_runas[\"HOME\"] = runas_home\n\n env = env_runas\n except ValueError as exc:\n log.exception(\"Error raised retrieving environment for user %s\", runas)\n raise CommandExecutionError(\n \"Environment could not be retrieved for user '{}': {}\".format(\n runas, exc\n )\n )\n\n if reset_system_locale is True:\n if not salt.utils.platform.is_windows():\n # Default to C!\n # Salt only knows how to parse English words\n # Don't override if the user has passed LC_ALL\n env.setdefault(\"LC_CTYPE\", \"C\")\n env.setdefault(\"LC_NUMERIC\", \"C\")\n env.setdefault(\"LC_TIME\", \"C\")\n env.setdefault(\"LC_COLLATE\", \"C\")\n env.setdefault(\"LC_MONETARY\", \"C\")\n env.setdefault(\"LC_MESSAGES\", \"C\")\n env.setdefault(\"LC_PAPER\", \"C\")\n env.setdefault(\"LC_NAME\", \"C\")\n env.setdefault(\"LC_ADDRESS\", \"C\")\n env.setdefault(\"LC_TELEPHONE\", \"C\")\n env.setdefault(\"LC_MEASUREMENT\", \"C\")\n env.setdefault(\"LC_IDENTIFICATION\", \"C\")\n env.setdefault(\"LANGUAGE\", \"C\")\n\n if clean_env:\n run_env = env\n\n else:\n if salt.utils.platform.is_windows():\n import nt\n\n run_env = nt.environ.copy()\n else:\n run_env = os.environ.copy()\n run_env.update(env)\n\n if prepend_path:\n run_env[\"PATH\"] = \":\".join((prepend_path, run_env[\"PATH\"]))\n\n if \"NOTIFY_SOCKET\" not in env:\n run_env.pop(\"NOTIFY_SOCKET\", None)\n\n if python_shell is None:\n python_shell = False\n\n new_kwargs = {\n \"cwd\": cwd,\n \"shell\": python_shell,\n \"env\": run_env,\n \"stdin\": str(stdin) if stdin is not None else stdin,\n \"stdout\": stdout,\n \"stderr\": stderr,\n \"with_communicate\": with_communicate,\n \"timeout\": timeout,\n \"bg\": bg,\n }\n\n if \"stdin_raw_newlines\" in kwargs:\n new_kwargs[\"stdin_raw_newlines\"] = kwargs[\"stdin_raw_newlines\"]\n\n if umask is not None:\n _umask = str(umask).lstrip(\"0\")\n\n if _umask == \"\":\n msg = \"Zero umask is not allowed.\"\n raise CommandExecutionError(msg)\n\n try:\n _umask = int(_umask, 8)\n except ValueError:\n raise CommandExecutionError(\"Invalid umask: '{}'\".format(umask))\n else:\n _umask = None\n\n if runas or group or umask:\n new_kwargs[\"preexec_fn\"] = functools.partial(\n salt.utils.user.chugid_and_umask, runas, _umask, group\n )\n\n if not salt.utils.platform.is_windows():\n # close_fds is not supported on Windows platforms if you redirect\n # stdin/stdout/stderr\n if new_kwargs[\"shell\"] is True:\n new_kwargs[\"executable\"] = shell\n if salt.utils.platform.is_freebsd() and sys.version_info < (3, 9):\n # https://bugs.python.org/issue38061\n new_kwargs[\"close_fds\"] = False\n else:\n new_kwargs[\"close_fds\"] = True\n\n if not os.path.isabs(cwd) or not os.path.isdir(cwd):\n raise CommandExecutionError(\n \"Specified cwd '{}' either not absolute or does not exist\".format(cwd)\n )\n\n if (\n python_shell is not True\n and not salt.utils.platform.is_windows()\n and not isinstance(cmd, list)\n ):\n cmd = salt.utils.args.shlex_split(cmd)\n\n if success_retcodes is None:\n success_retcodes = [0]\n else:\n try:\n success_retcodes = [\n int(i) for i in salt.utils.args.split_input(success_retcodes)\n ]\n except ValueError:\n raise SaltInvocationError(\"success_retcodes must be a list of integers\")\n\n if success_stdout is None:\n success_stdout = []\n else:\n success_stdout = salt.utils.args.split_input(success_stdout)\n\n if success_stderr is None:\n success_stderr = []\n else:\n success_stderr = salt.utils.args.split_input(success_stderr)\n\n if not use_vt:\n # This is where the magic happens\n try:\n if change_windows_codepage:\n salt.utils.win_chcp.set_codepage_id(windows_codepage)\n try:\n proc = salt.utils.timed_subprocess.TimedProc(cmd, **new_kwargs)\n except OSError as exc:\n msg = \"Unable to run command '{}' with the context '{}', reason: {}\".format(\n cmd if output_loglevel is not None else \"REDACTED\",\n new_kwargs,\n exc,\n )\n raise CommandExecutionError(msg)\n\n try:\n proc.run()\n except TimedProcTimeoutError as exc:\n ret[\"stdout\"] = str(exc)\n ret[\"stderr\"] = \"\"\n ret[\"retcode\"] = None\n ret[\"pid\"] = proc.process.pid\n # ok return code for timeouts?\n ret[\"retcode\"] = 1\n return ret\n finally:\n if change_windows_codepage:\n salt.utils.win_chcp.set_codepage_id(previous_windows_codepage)\n\n if output_loglevel != \"quiet\" and output_encoding is not None:\n log.debug(\n \"Decoding output from command %s using %s encoding\",\n cmd,\n output_encoding,\n )\n\n try:\n out = salt.utils.stringutils.to_unicode(\n proc.stdout, encoding=output_encoding\n )\n except TypeError:\n # stdout is None\n out = \"\"\n except UnicodeDecodeError:\n out = salt.utils.stringutils.to_unicode(\n proc.stdout, encoding=output_encoding, errors=\"replace\"\n )\n if output_loglevel != \"quiet\":\n log.error(\n \"Failed to decode stdout from command %s, non-decodable \"\n \"characters have been replaced\",\n _log_cmd(cmd),\n )\n\n try:\n err = salt.utils.stringutils.to_unicode(\n proc.stderr, encoding=output_encoding\n )\n except TypeError:\n # stderr is None\n err = \"\"\n except UnicodeDecodeError:\n err = salt.utils.stringutils.to_unicode(\n proc.stderr, encoding=output_encoding, errors=\"replace\"\n )\n if output_loglevel != \"quiet\":\n log.error(\n \"Failed to decode stderr from command %s, non-decodable \"\n \"characters have been replaced\",\n _log_cmd(cmd),\n )\n\n if rstrip:\n if out is not None:\n out = out.rstrip()\n if err is not None:\n err = err.rstrip()\n ret[\"pid\"] = proc.process.pid\n ret[\"retcode\"] = proc.process.returncode\n if ret[\"retcode\"] in success_retcodes:\n ret[\"retcode\"] = 0\n ret[\"stdout\"] = out\n ret[\"stderr\"] = err\n if any(\n [stdo in ret[\"stdout\"] for stdo in success_stdout]\n + [stde in ret[\"stderr\"] for stde in success_stderr]\n ):\n ret[\"retcode\"] = 0\n else:\n formatted_timeout = \"\"\n if timeout:\n formatted_timeout = \" (timeout: {}s)\".format(timeout)\n if output_loglevel is not None:\n msg = \"Running {} in VT{}\".format(cmd, formatted_timeout)\n log.debug(log_callback(msg))\n stdout, stderr = \"\", \"\"\n now = time.time()\n if timeout:\n will_timeout = now + timeout\n else:\n will_timeout = -1\n try:\n proc = salt.utils.vt.Terminal(\n cmd,\n shell=True,\n log_stdout=True,\n log_stderr=True,\n cwd=cwd,\n preexec_fn=new_kwargs.get(\"preexec_fn\", None),\n env=run_env,\n log_stdin_level=output_loglevel,\n log_stdout_level=output_loglevel,\n log_stderr_level=output_loglevel,\n stream_stdout=True,\n stream_stderr=True,\n )\n ret[\"pid\"] = proc.pid\n stdout = \"\"\n stderr = \"\"\n while proc.has_unread_data:\n try:\n try:\n time.sleep(0.5)\n try:\n cstdout, cstderr = proc.recv()\n except OSError:\n cstdout, cstderr = \"\", \"\"\n if cstdout:\n stdout += cstdout\n if cstderr:\n stderr += cstderr\n if timeout and (time.time() > will_timeout):\n ret[\"stderr\"] = \"SALT: Timeout after {}s\\n{}\".format(\n timeout, stderr\n )\n ret[\"retcode\"] = None\n break\n except KeyboardInterrupt:\n ret[\"stderr\"] = \"SALT: User break\\n{}\".format(stderr)\n ret[\"retcode\"] = 1\n break\n except salt.utils.vt.TerminalException as exc:\n log.error(\"VT: %s\", exc, exc_info_on_loglevel=logging.DEBUG)\n ret = {\"retcode\": 1, \"pid\": \"2\"}\n break\n # only set stdout on success as we already mangled in other\n # cases\n ret[\"stdout\"] = stdout\n if not proc.isalive():\n # Process terminated, i.e., not canceled by the user or by\n # the timeout\n ret[\"stderr\"] = stderr\n ret[\"retcode\"] = proc.exitstatus\n if ret[\"retcode\"] in success_retcodes:\n ret[\"retcode\"] = 0\n if any(\n [stdo in ret[\"stdout\"] for stdo in success_stdout]\n + [stde in ret[\"stderr\"] for stde in success_stderr]\n ):\n ret[\"retcode\"] = 0\n ret[\"pid\"] = proc.pid\n finally:\n proc.close(terminate=True, kill=True)\n try:\n if ignore_retcode:\n __context__[\"retcode\"] = 0\n else:\n __context__[\"retcode\"] = ret[\"retcode\"]\n except NameError:\n # Ignore the context error during grain generation\n pass\n\n # Log the output\n if output_loglevel is not None:\n if not ignore_retcode and ret[\"retcode\"] != 0:\n if output_loglevel < LOG_LEVELS[\"error\"]:\n output_loglevel = LOG_LEVELS[\"error\"]\n msg = \"Command '{}' failed with return code: {}\".format(\n _log_cmd(cmd), ret[\"retcode\"]\n )\n log.error(log_callback(msg))\n if ret[\"stdout\"]:\n log.log(output_loglevel, \"stdout: %s\", log_callback(ret[\"stdout\"]))\n if ret[\"stderr\"]:\n log.log(output_loglevel, \"stderr: %s\", log_callback(ret[\"stderr\"]))\n if ret[\"retcode\"]:\n log.log(output_loglevel, \"retcode: %s\", ret[\"retcode\"])\n\n return ret", "title": "" } ]
1f4d4179cd968853da4e5fb2a6f4fb8d
Returns a logger object so that a given file can log its activity. If two loggers are created with the same name, they will output 2x to the same file.
[ { "docid": "004bc6477ad75f3b157d32bd866799bb", "score": "0.6979247", "text": "def get(name, path='activity.log', is_debug_log=False):\n # SOURCE: http://stackoverflow.com/questions/7621897/python-logging-module-globally\n\n # formatter = IndentFormatter(\"%(asctime)s [%(levelname)8s] %(module)30s:%(indent)s%(message)s\")\n formatter = MattsCustomFormatter(name, \"{asctime} [{levelname:8}] {module} :{indent} {message}\", is_debug_log=is_debug_log)\n handler = RotatingFileHandler(path, maxBytes=1024 * 100, backupCount=3)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name) # will return same logger if same name given\n\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n return logger", "title": "" } ]
[ { "docid": "e56dcc006cd3c4b8ef1ffba27df2b49e", "score": "0.8057711", "text": "def get_logger(file_name:str):\n logger = logging.getLogger(file_name)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n )\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n return logger", "title": "" }, { "docid": "c4edc56699a9961793d7a07ea5243015", "score": "0.7980008", "text": "def get_logger(name, file_name):\n\n # create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # create handler\n fileh = logging.FileHandler(file_name)\n fileh.setLevel(logging.DEBUG)\n fileh.terminator = \"\\r\\n\"\n\n # create formatter\n formatter = logging.Formatter(\n # fmt=\"%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s\",\n fmt=\"%(message)s\",\n datefmt=\"%s\")\n\n # add formatter to handler\n fileh.setFormatter(formatter)\n\n # add handler to logger\n logger.addHandler(fileh)\n\n # return logger to caller\n return logger", "title": "" }, { "docid": "24111cc65d259b2bc0881241af9ff71f", "score": "0.7602428", "text": "def __create_logger(cls, log_file_name: str) -> logging.Logger:\n logger = logging.getLogger(log_file_name)\n if logger.handlers:\n # If we already set the handlers for the logger, just return the initialized logger.\n return logger\n logger.setLevel(logging.INFO)\n # logger.addHandler(logging.StreamHandler()) # TODO - un-comment if running only modules.\n # Make sure logs directory exists\n Path(cls.LOG_DIRECTORY).mkdir(parents=True, exist_ok=True)\n # Generate log file\n log_file_path = os.path.join(cls.LOG_DIRECTORY, log_file_name + \".\" + cls._LOG_SUFFIX)\n log_file_handler = logging.FileHandler(log_file_path, mode=\"w\")\n log_format = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n log_file_handler.setFormatter(logging.Formatter(log_format))\n logger.addHandler(log_file_handler)\n return logger", "title": "" }, { "docid": "70852550709c1638d47dc315536b709f", "score": "0.7589762", "text": "def get_logger(filename):\n logger = logging.getLogger('logger')\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(format='%(message)s', level=logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(\n '%(asctime)s:%(levelname)s: %(message)s'))\n logging.getLogger().addHandler(handler)\n\n return logger", "title": "" }, { "docid": "a75d184bbbfe3010e8cd17f93a643991", "score": "0.7575303", "text": "def get_logger(logger_name, logging_format, file_name, level=logging.INFO):\n path, prepared = '', True\n for cat in file_name.split('/')[1:-1]:\n path += '/%s' % cat\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except PermissionError:\n prepared = False\n break\n if not prepared:\n file_name = '/tmp/%s' % file_name.split('/')[-1]\n logging.basicConfig(level=level, format=logging_format)\n log = logging.getLogger(logger_name)\n handler = logging.FileHandler(file_name, encoding='utf8')\n handler.setFormatter(logging.Formatter(logging_format))\n log.addHandler(handler)\n log.setLevel(level=level)\n return log", "title": "" }, { "docid": "b25caaebbac0fa92944356c6a487dd7d", "score": "0.75457287", "text": "def create_logger(log_file_name: str) -> logging.Logger:\n logger = LoggerFactory.__create_logger(log_file_name)\n return logger", "title": "" }, { "docid": "3d2c2da6f4454286b7c026f940e116d7", "score": "0.7455807", "text": "def get_logger(cls, filename=__file__):\n options = cls.get_options()\n dirname, basename = os.path.split(os.path.abspath(filename))\n basename = os.path.splitext(basename)[0]\n force_build = False\n force_remove = False\n level = logging.WARN\n if options.debug:\n level = logging.DEBUG\n elif options.info:\n level = logging.INFO\n if options.force:\n force_build = True\n if options.remove:\n force_remove = True\n force_build = True\n mylogger = logging.getLogger(basename)\n logger.setLevel(level)\n mylogger.setLevel(level)\n mylogger.debug(\"tests loaded from file: %s\" % basename)\n if force_build:\n UtilsTest.forceBuild(force_remove)\n return mylogger", "title": "" }, { "docid": "ab52536918b9211963907938d70ce559", "score": "0.7436647", "text": "def get_logger(log_file: str) -> logging.Logger:\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger", "title": "" }, { "docid": "ab52536918b9211963907938d70ce559", "score": "0.7436647", "text": "def get_logger(log_file: str) -> logging.Logger:\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger", "title": "" }, { "docid": "77ed19e4650444f7157d652d3eeac919", "score": "0.7434699", "text": "def get_logger(filename): #这个就是日志文件\n logger = logging.getLogger(\"logger\")\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(format='%(message)s', level=logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(\n '%(asctime)s:%(levelname)s: %(message)s'))\n logging.getLogger().addHandler(handler)\n return logger", "title": "" }, { "docid": "950025712e01b87c18662a1b59875ca0", "score": "0.7419733", "text": "def get_logger(name, level=C.LOGGING_LEVEL, log_file=None):\n logger = logging.getLogger(name)\n logger.addHandler(logging.StreamHandler())\n if log_file:\n logger.addHandler(logging.FileHandler(log_file, encoding='utf-8'))\n logger.setLevel(level)\n return logger", "title": "" }, { "docid": "08062ac862d1070fc1a327e3ec5477c1", "score": "0.7389247", "text": "def get_logger(filepath):\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(filepath)\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(log_format)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger", "title": "" }, { "docid": "6b2aaec3b97c7620678409a1931f8533", "score": "0.7288874", "text": "def get_logger(name):\n\n logger = logging.getLogger(name)\n\n # NOTE: Multiple calls to getLogger() with the same name will return a\n # reference to the same logger object. However, there can be any number of\n # handlers (!) If a logger already as one or more handlers, none will be added\n if len(logger.handlers) > 0:\n return logger\n\n logger.setLevel(logging.DEBUG)\n\n # Write logfile\n file_formatter = logging.Formatter('%(asctime)s - %(name)20s \\\n - %(levelname)s - %(message)s')\n\n # Workaround for ReadTheDocs: do not raise an error if we cannot create a log file\n try:\n file_handler = logging.FileHandler(filename=name+'.log', mode='w')\n file_handler.setLevel(logging.DEBUG) # Level for the logfile\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n except PermissionError:\n pass\n\n # Write log messages on the console\n console_formatter = logging.Formatter('%(levelname)-8s - %(message)s')\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG) # Level for the console log\n console_handler.setFormatter(console_formatter)\n logger.addHandler(console_handler)\n\n return logger", "title": "" }, { "docid": "d29c261ab96d046a9f600ff31bbe8c56", "score": "0.7267315", "text": "def get_logger(\n name,\n format_str=\"%(asctime)s [%(pathname)s:%(lineno)s - %(levelname)s ] %(message)s\",\n date_format=\"%Y-%m-%d %H:%M:%S\",\n file=False):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n # file or console\n handler = logging.StreamHandler() if not file else logging.FileHandler(\n name)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt=format_str, datefmt=date_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "title": "" }, { "docid": "de217b56549995c85fc6a6756f4bfe5a", "score": "0.71906865", "text": "def logger(self,fpath=None,log_level=None):\n log = logging.getLogger(fpath)\n pass", "title": "" }, { "docid": "ce50a06860d2a6c6e853fc3eeceef8d0", "score": "0.7169888", "text": "def get_logger(\n name: str, verbose: bool = True, log_file: str = None, level: str = \"info\"\n) -> logging.Logger:\n\n global loggers\n if loggers.get(name):\n return loggers.get(name)\n else:\n logger = logging.getLogger(name)\n\n level_dict = {\n \"info\": logging.INFO,\n \"debug\": logging.DEBUG,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n }\n\n logger.setLevel(logging.INFO if verbose else logging.DEBUG)\n logger.setLevel(level_dict[level.lower()])\n if log_file is None:\n handler = logging.StreamHandler()\n else:\n handler = logging.RotatingFileHandler(log_file)\n formatter = logging.Formatter(\n \"[%(asctime)s] p%(process)s {%(filename)s:%(lineno)d} %(levelname)5s - %(message)s\",\n \"%m-%d %H:%M:%S\",\n )\n\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n loggers[name] = logger\n return logger", "title": "" }, { "docid": "26701f6205192fce3e1a659203bd67d8", "score": "0.7152482", "text": "def get_named_logger(name__, file__, auto_config_when_main=True):\n if name__ == '__main__': # pragma: no cover\n name__ = os.path.basename(file__)\n if auto_config_when_main:\n logging.basicConfig()\n\n return logging.getLogger(name__)", "title": "" }, { "docid": "fc1ac2a05da8a5dbd0a9a86d2f986e78", "score": "0.70876354", "text": "def setup_logger(out_dir, file_name='log.log'):\r\n if not os.path.exists(out_dir):\r\n raise NotADirectoryError(\"Could not open {}\".format(out_dir))\r\n import logging\r\n logger = logging.getLogger(os.path.splitext(os.path.basename(file_name))[0])\r\n handler = logging.FileHandler(os.path.join(out_dir, file_name))\r\n handler.setFormatter(logging.Formatter('%(message)s'))\r\n logger.addHandler(handler)\r\n logger.setLevel(logging.INFO)\r\n return logger", "title": "" }, { "docid": "38f2e96b2ab49e325e4e3a7c9cf95a71", "score": "0.70174956", "text": "def get_logger(name):\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.setLevel(logging.INFO)\n\n log_formatter = logging.Formatter(\"%(asctime)s -- %(message)s\")\n\n sys_handler = logging.FileHandler(f\"{config.LOG_DIR}/ac-control.log\")\n sys_handler.setFormatter(log_formatter)\n logger.addHandler(sys_handler)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n\n return logger", "title": "" }, { "docid": "b3d1c7538266bc3c384ce36cb1031a74", "score": "0.7012724", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file, mode='w') \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "7eb9feb6db9baf22187b255e4fea1e1d", "score": "0.70087415", "text": "def getLogger(name=''):\n\n progname = path.basename(argv[0]).replace('.py', '')\n if name:\n progname += '.' + name\n\n log = logging.getLogger(progname)\n log.setLevel(logging.DEBUG)\n\n if len(log.handlers) == 0:\n ch = logging.StreamHandler(stdout)\n ch.setLevel(logging.DEBUG)\n log.addHandler(ch)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n ch.setFormatter(formatter)\n\n return log", "title": "" }, { "docid": "65832309773cad556ea6d17559c0ebe0", "score": "0.699998", "text": "def setup_logger(name, log_file=None, level=logging.DEBUG):\n if log_file:\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n else:\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "abfe432d92f9edc57f931b37b6c1c992", "score": "0.69960773", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "892acc32168dfb5283abf749a59fd888", "score": "0.6994684", "text": "def get_logger(*, logger_name):\n\n logger = logging.getLogger(logger_name)\n\n logger.setLevel(logging.INFO)\n\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n logger.propagate = False\n\n return logger", "title": "" }, { "docid": "1bc27b2208b6428999da3f0ae7bcbca2", "score": "0.6991203", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "1bc27b2208b6428999da3f0ae7bcbca2", "score": "0.6991203", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "1bc27b2208b6428999da3f0ae7bcbca2", "score": "0.6991203", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "1bc27b2208b6428999da3f0ae7bcbca2", "score": "0.6991203", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "1bc27b2208b6428999da3f0ae7bcbca2", "score": "0.6991203", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "587b63f78a6dd604d114b5c7484b3cdf", "score": "0.6990134", "text": "def setup_logger(name, log_file, level=logging.INFO):\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n return logger", "title": "" }, { "docid": "77dd32ff4d4846558c737d7a34efe917", "score": "0.6986329", "text": "def setup_logger(name, log_file, level=logging.DEBUG):\n\n handler = logging.FileHandler(log_file, mode='w')\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "dd82c3e3cdc27dbe4a63ff7cea2f1f8b", "score": "0.69780666", "text": "def get_logger(name, log_dir, config_dir):\n\tconfig_dict = json.load(open( config_dir + 'log_config.json'))\n\tconfig_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n\tlogging.config.dictConfig(config_dict)\n\tlogger = logging.getLogger(name)\n\n\tstd_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n\tconsoleHandler = logging.StreamHandler(sys.stdout)\n\tconsoleHandler.setFormatter(logging.Formatter(std_out_format))\n\tlogger.addHandler(consoleHandler)\n\n\treturn logger", "title": "" }, { "docid": "acfa292a3ad174817b7e50f6b7e44d4b", "score": "0.69537467", "text": "def create_logger(name):\n logger = getLogger(name)\n log_filename = path.join(path.dirname(path.realpath(__file__)), LOG_FILE_NAME)\n log_formatter = Formatter(LOG_FORMAT)\n\n file_handler = RotatingFileHandler(log_filename, mode='a', maxBytes=512000, backupCount=3)\n file_handler.setLevel(WARNING)\n file_handler.setFormatter(log_formatter)\n\n del logger.handlers[:]\n logger.addHandler(file_handler)\n logger.setLevel(WARNING)\n\n return logger", "title": "" }, { "docid": "5e0f8dbd0470152dc3b784063a0ad717", "score": "0.6941345", "text": "def create_logger(fname, level=logging.INFO):\n logger = logging.getLogger()\n logger.setLevel(level)\n\n # Create outputs.\n if not logger.handlers:\n # Log to file.\n file_handler = logging.FileHandler(fname)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Log to screen.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n\n return logger", "title": "" }, { "docid": "219e7cdacb19c972d414d859ea5dfa49", "score": "0.69398046", "text": "def getlogger(name, level=\"INFO\",\n filename=None, when=None, interval=1, backupCount=10):\n logger = logging.getLogger(name)\n if filename:\n if when:\n handler = logging.handlers.TimedRotatingFileHandler(filename,\n when=when,\n interval=interval,\n backupCount=backupCount)\n else:\n handler = logging.FileHandler(filename)\n else:\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(format_string, datefmt=datefmt_string)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(LEVEL_DICT.get(level))\n return logger", "title": "" }, { "docid": "9aa79e2c301aa02a754411ef0f6b70bb", "score": "0.6930704", "text": "def get_logger(name=None, level=\"DEBUG\", print_level=\"DEBUG\", file_level=None):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n formatter_full = logging.Formatter(\n '[%(levelname)s] %(asctime)s %(message)s (%(filename)s:%(lineno)s)')\n\n formatter_brief = logging.Formatter(\n '[%(levelname)s] %(asctime)s %(message)s')\n\n if print_level.upper() == 'INFO':\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(print_level)\n stream_handler.setFormatter(formatter_brief)\n logger.addHandler(stream_handler)\n else:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(print_level)\n stream_handler.setFormatter(formatter_full)\n logger.addHandler(stream_handler)\n\n\n if not os.path.exists('./logs'):\n os.makedirs('./logs', exist_ok=True)\n if file_level.upper() == 'INFO':\n file_handler = logging.handlers.TimedRotatingFileHandler(\n f'./logs/mmnp_{str(name)}.log', when='D', encoding='utf-8')\n file_handler.setLevel(file_level)\n file_handler.setFormatter(formatter_brief)\n logger.addHandler(file_handler)\n elif file_level.upper() == 'DEBUG':\n file_handler = logging.handlers.TimedRotatingFileHandler(\n f'./logs/mmnp_{str(name)}.log', when='D', encoding='utf-8')\n file_handler.setLevel(file_level)\n file_handler.setFormatter(formatter_full)\n logger.addHandler(file_handler)\n\n return logger", "title": "" }, { "docid": "9429d535506145ee881adfa76e4901f9", "score": "0.6926516", "text": "def logger(name, path):\n\t# Create a logger\n\tlogger = logging.getLogger(path)\n\tlogger.setLevel(logging.INFO)\n\t# Create a file handler\n\t# delay = True does not create the logging file if nothing is written\n\tfh = logging.FileHandler(path, mode = 'w', delay = True)\n\t# Define the format of the messages to be logged\n\tformatter = logging.Formatter('%(message)s')\n\tfh.setFormatter(formatter)\n\t# Add the file handler to the logger\n\tlogger.addHandler(fh)\n\treturn logger", "title": "" }, { "docid": "5213cc75e071abb479933009c33193be", "score": "0.6919946", "text": "def _setup_logger(name\n , file\n , dir=os.path.join(file_path, \"logs/\")\n , level=logging.DEBUG):\n\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n \n if not os.path.exists(dir):\n os.makedirs(dir)\n\n handler = logging.FileHandler(os.path.join(dir, file))\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "beaf03122fe9276e6be8a48305851e83", "score": "0.6917317", "text": "def setup_logger(name, log_file, level=logging.DEBUG):\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "1bf2fa6a94a50357e6b1da47915e8a93", "score": "0.6917282", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n\thandler = logging.FileHandler(log_file) \n\thandler.setFormatter(formatter)\n\n\tlogger = logging.getLogger(name)\n\tlogger.setLevel(level)\n\tlogger.addHandler(handler)\n\n\treturn logger", "title": "" }, { "docid": "1bf2fa6a94a50357e6b1da47915e8a93", "score": "0.6917282", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n\thandler = logging.FileHandler(log_file) \n\thandler.setFormatter(formatter)\n\n\tlogger = logging.getLogger(name)\n\tlogger.setLevel(level)\n\tlogger.addHandler(handler)\n\n\treturn logger", "title": "" }, { "docid": "ed4f1b66ee5fd1ad9e4bf706d124c506", "score": "0.69157034", "text": "def createlogger(name):\r\n config = GetConfigs(\"common\")\r\n lev_key = config.getstr(\"LOG_FITER\", \"Default\", \"common\").upper()\r\n lev_dict = {\"DEBUG\": logging.DEBUG, \"INFO\": logging.INFO,\r\n \"WARNING\": logging.WARNING, \"ERROR\": logging.ERROR,\r\n \"CRITICAL\": logging.CRITICAL}\r\n logger = logging.getLogger(name)\r\n logger.setLevel(lev_dict[lev_key])\r\n ch = logging.StreamHandler()\r\n current_time = time.strftime('%Y_%m_%d %H_%M_%S', time.localtime(time.time()))\r\n fh = logging.FileHandler(current_time + \"_CTS_Verifier\" + \".log\")\r\n\r\n formatter = logging.Formatter(\r\n '%(asctime)s.%(msecs)03d: [%(levelname)s] [%(name)s] [%(funcName)s][%(lineno)d] %(message)s',\r\n '%y%m%d %H:%M:%S')\r\n ch.setFormatter(formatter)\r\n logger.addHandler(ch)\r\n logger.addHandler(fh)\r\n fh.setFormatter(formatter)\r\n return logger", "title": "" }, { "docid": "1206a04037174943687f8621aa468156", "score": "0.6911622", "text": "def get_logger(name, log_dir, config_dir):\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger", "title": "" }, { "docid": "066a018d8894d2e658b783de7989a057", "score": "0.691053", "text": "def init_logger(log_file: str):\n logger = logging.getLogger(DEFAULT_LOGGER_ID)\n\n logger.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.INFO)\n\n format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(format_string)\n\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger", "title": "" }, { "docid": "76231aa2b09b1e29084b5abdf355a2f6", "score": "0.6904924", "text": "def get_logger() -> logging.Logger:\n level = logging.INFO if args.debug else logging.DEBUG\n logging.basicConfig(filename='main.log',\n filemode='a',\n format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=level)\n\n logger = logging.getLogger('main')\n logger.addHandler(logging.StreamHandler())\n return logger", "title": "" }, { "docid": "0f1c1333092e1de0ebbb58a441e6eeb7", "score": "0.6896513", "text": "def get_logger():\n\n logging.basicConfig(filename=os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir,\n \"functiondefextractor\",\n \"extractor.log\")),\n format='%(asctime)s %(message)s', filemode='a') # pragma: no mutate\n # Creating log Object\n __logger = logging.getLogger()\n # Setting the threshold of logger to DEBUG\n __logger.setLevel(logging.DEBUG)\n return __logger", "title": "" }, { "docid": "bf66d3ed284f75e751d6ccafabd6834b", "score": "0.6896079", "text": "def setup_logger(name, log_file, level=LOG_LEVEL) -> logging.Logger:\n\n if not os.path.exists(log_file):\n dr = os.path.dirname(log_file)\n if not os.path.isdir(dr):\n os.makedirs(dr)\n with open(log_file, 'w'):\n pass\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(LOG_FORMATTER)\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(LOG_FORMATTER)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n\n return logger", "title": "" }, { "docid": "b4d816b02726270a2956dc2a459d260b", "score": "0.68728113", "text": "def setup_logger(name, log_file, level=logging.INFO, format=formatter):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(format)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "e1a897e46dec5bce73a94e9a50ad4e06", "score": "0.68723553", "text": "def get_logger(lname, logfile):\n logger = logging.getLogger(lname)\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(levelname)s:\\t%(message)s'\n },\n 'verbose': {\n 'format': '%(levelname)s:\\t%(message)s\\tFROM: %(name)s'\n }\n },\n 'handlers': {\n 'stdout': {\n 'level': 'INFO',\n 'formatter': 'verbose',\n 'class': 'logging.StreamHandler'\n },\n 'logfile': {\n 'level': 'INFO',\n 'formatter': 'standard',\n 'class': 'logging.FileHandler',\n 'filename': logfile\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['stdout', 'logfile'],\n 'level': 'INFO',\n 'propagate': True\n }\n }\n })\n return logger", "title": "" }, { "docid": "720438826ef7b2b821b3622b9bbd107c", "score": "0.68673486", "text": "def get_logger(name=None, level='INFO', stream='stderr', filename=None, log_dir='./logs/'):\n os.makedirs(log_dir, exist_ok=True)\n stream = sys.stderr if stream == 'stderr' else sys.stdout\n log_level = {'DEBUG': logging.DEBUG,\n 'INFO': logging.INFO,\n 'WARNING': logging.WARNING,\n 'ERROR': logging.ERROR,\n 'CRITICAL': logging.CRITICAL}.get(level.upper(), 'INFO')\n handlers = []\n if filename:\n handlers.append(logging.FileHandler(os.path.join(log_dir, filename + '.log')))\n if stream:\n handlers.append(logging.StreamHandler(stream))\n\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\",\n handlers=handlers)\n return logging.getLogger(name)", "title": "" }, { "docid": "529facc3c3ac7f9803c8c42e894aa3cd", "score": "0.6860295", "text": "def get_logger(path):\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n logger = logging.getLogger(\"simulation\")\n file_handler = logging.FileHandler(path, mode=\"w\")\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "title": "" }, { "docid": "832c3a604158937fb9685491d47ca127", "score": "0.68588614", "text": "def get_logger(module_name):\n logger = logging.getLogger('%s_logger' % (module_name))\n file_handler = logging.FileHandler('newscrapy.log')\n console_handler = logging.StreamHandler()\n file_formatter = logging.Formatter('%(asctime)s - %(name)s - '\n '%(levelname)s - %(message)s')\n console_formatter = logging.Formatter('%(message)s')\n logger.setLevel(logging.DEBUG)\n file_handler.setLevel(logging.DEBUG)\n console_handler.setLevel(logging.INFO)\n file_handler.setFormatter(file_formatter)\n console_handler.setFormatter(console_formatter)\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n return logger", "title": "" }, { "docid": "6630dd2fce770c2002ab0e6c9278f36e", "score": "0.684771", "text": "def initialize_logger(file_dir):\n logger = logging.getLogger()\n fhandler = logging.FileHandler(filename=file_dir, mode='a')\n formatter = logging.Formatter('%(asctime)s - %(message)s',\"%Y-%m-%d %H:%M:%S\")\n fhandler.setFormatter(formatter)\n logger.addHandler(fhandler)\n logger.setLevel(logging.INFO)\n return logger", "title": "" }, { "docid": "2289764a63b9bf76db484b2ce3b9d62f", "score": "0.6838617", "text": "def get_logger(logger_name: str) -> Logger:\n\n # logging module seems to use snake case\n # pylint: disable=invalid-name\n logger = logging.getLogger(logger_name)\n\n fileHandler = logging.FileHandler(\"logs/{}.log\".format(logger_name))\n streamHandler = logging.StreamHandler()\n\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n fileHandler.setFormatter(formatter)\n streamHandler.setFormatter(formatter)\n\n # Default logging level INFO\n logger.setLevel(logging.INFO)\n\n return logger", "title": "" }, { "docid": "61ca326a63da19f93c21285bf48c3d87", "score": "0.68242145", "text": "def get_logger(name=None):\n global loggers\n\n if not name: name = __name__\n\n if loggers.get(name):\n return loggers.get(name)\n\n logger = logging.getLogger(name)\n logger.setLevel(LOG_LEVEL)\n\n # 输出到控制台\n if LOG_ENABLED and LOG_TO_CONSOLE:\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setLevel(level=LOG_LEVEL)\n formatter = logging.Formatter(LOG_FORMAT)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # 输出到文件\n if LOG_ENABLED and LOG_TO_FILE:\n # 如果路径不存在,创建日志文件文件夹\n log_dir = dirname(LOG_PATH)\n if not exists(log_dir): makedirs(log_dir)\n # 添加 FileHandler\n file_handler = logging.FileHandler(LOG_PATH, encoding='utf-8')\n file_handler.setLevel(level=LOG_LEVEL)\n formatter = logging.Formatter(LOG_FORMAT)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # 保存到全局 loggers\n loggers[name] = logger\n return logger", "title": "" }, { "docid": "1e24e3dd10d773188f1fdf88a31c50e5", "score": "0.6817867", "text": "def get_skll_logger(name, filepath=None, log_level=logging.INFO):\n\n # first get the logger instance associated with the\n # given name if one already exists\n logger = logging.getLogger(name)\n logger.setLevel(log_level)\n\n # if we are given a file path and this existing logger doesn't already\n # have a file handler for this file, then add one.\n if filepath:\n def is_file_handler(handler):\n return isinstance(handler, logging.FileHandler) and handler.stream.name == filepath\n need_file_handler = not any([is_file_handler(handler) for handler in logger.handlers])\n if need_file_handler:\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - '\n '%(message)s')\n file_handler = logging.FileHandler(filepath, mode='w')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n logger.addHandler(file_handler)\n\n warnings.showwarning = partial(send_sklearn_warnings_to_logger, logger)\n\n # return the logger instance\n return logger", "title": "" }, { "docid": "914dd4dd6642c579f4d5a2961a5961b9", "score": "0.68069", "text": "def get_logger():\n\n logging.basicConfig(filename=os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir,\n \"similarity_processor\",\n \"text_similarity.log\")),\n format='%(asctime)s %(message)s', filemode='a')\n # Creating log Object\n __logger = logging.getLogger()\n # Setting the threshold of logger to DEBUG\n __logger.setLevel(logging.DEBUG)\n return __logger", "title": "" }, { "docid": "b060e4d37eb0adfc5ab2a7be69bc506d", "score": "0.6787787", "text": "def default_logger(\n self, name=__name__, enable_stream=True, enable_file=False\n ):\n\n log = logging.getLogger(name)\n self.name = name\n\n if enable_file:\n self.enable_file = enable_file\n file_handler = handlers.RotatingFileHandler(\n filename=self.return_logfile(filename=\"%s.log\" % name),\n maxBytes=self.max_size,\n backupCount=self.max_backup,\n )\n self.set_handler(log, handler=file_handler)\n\n if enable_stream:\n self.enable_stream = enable_stream\n stream_handler = logging.StreamHandler()\n self.set_handler(log, handler=stream_handler)\n\n return log", "title": "" }, { "docid": "ff6fd2f5aa1a5ee8e508923569d2ed6b", "score": "0.67446846", "text": "def setup_logger(name, log_file, level=logging.INFO, log_loc='log'):\n\n BASE_DIR = os.path.abspath(os.path.dirname(__name__))\n log_dir = os.path.join(BASE_DIR, log_loc)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n filename = log_dir + '/' + log_file + \".txt\"\n handler = TimedRotatingFileHandler(filename, when='D', interval=1, backupCount=5, encoding=None)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "bdcb7bed0c17544fd1a1ed72e897ff45", "score": "0.6742584", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n\n handler = logging.FileHandler(log_file, mode='a') \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n \n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(formatter)\n logger.addHandler(console)\n\n return logger", "title": "" }, { "docid": "35995657a3e6ee166ccf96db0e6211bb", "score": "0.6738479", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n formatter = logging.Formatter('%(asctime)s.%(msecs)03d : %(message)s')\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "title": "" }, { "docid": "183ebb39f13ffd88a3b29257972bef51", "score": "0.67303795", "text": "def get_logger(name='recibrew'):\n import logging\n logger = logging.getLogger(name)\n c_handler = logging.StreamHandler()\n c_handler.setLevel(logging.INFO)\n c_format = logging.Formatter('[%(name)s] - [%(levelname)s] || %(message)s')\n c_handler.setFormatter(c_format)\n logger.addHandler(c_handler)\n logger.setLevel(logging.INFO)\n return logger", "title": "" }, { "docid": "b6cfbdf61a4d5925e295ff07318cea86", "score": "0.6726224", "text": "def __init__(self, file_path, name=\"logger\"):\n self.logger = logging.getLogger(name)\n handler = logging.FileHandler(filename=file_path)\n self.logger.setLevel(logging.INFO)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)", "title": "" }, { "docid": "ba95c2a401b9af83768f74ab5b7e44e5", "score": "0.6713972", "text": "def get_logger(name='osmnx', level=lg.INFO):\n logger = lg.getLogger(name)\n \n # if a logger with this name is not already set up\n if not getattr(logger, 'handler_set', None):\n \n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime('%Y_%m_%d')\n log_filename = '{}/{}_{}.log'.format(_logs_folder, name, todays_date)\n \n # if the logs folder does not already exist, create it\n if not os.path.exists(_logs_folder):\n os.makedirs(_logs_folder)\n \n # create file handler and log formatter and set them up\n handler = lg.FileHandler(log_filename, encoding='utf-8')\n formatter = lg.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.handler_set = True\n \n return logger", "title": "" }, { "docid": "1169ec6fff68784bd1907af1834cbf91", "score": "0.6712116", "text": "def get_logger():\n return logger", "title": "" }, { "docid": "4e8f3118a69ce669fbcf0f6628d6591a", "score": "0.67117894", "text": "def _create_logger(save_folder, logger_name='SVM_logger', logger_file='svm.log'):\n\n # Set up logger\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n log_path = os.path.join(save_folder, logger_file)\n\n # Create handlers\n s_handler = logging.StreamHandler()\n f_handler = logging.FileHandler(log_path)\n s_handler.setLevel(logging.INFO)\n f_handler.setLevel(logging.INFO)\n\n # Create formatters and add it to handlers\n s_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n s_handler.setFormatter(s_format)\n f_handler.setFormatter(f_format)\n\n # Add handlers to the logger\n logger.addHandler(s_handler)\n logger.addHandler(f_handler)\n\n return logger", "title": "" }, { "docid": "8716261b8f928cfabae3be28d257429a", "score": "0.67083234", "text": "def get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n # We do not want to print in the console\n # logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n logger.propagate = False\n return logger", "title": "" }, { "docid": "b8c6f8e29588322de5b89048789f715e", "score": "0.67043406", "text": "def createLoggers(file_name):\n logger = logging.getLogger('logger_master')\n logger.setLevel(logging.DEBUG)\n\n if is_logging:\n fh = logging.FileHandler(log_location+file_name, 'w')\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n logger.addHandler(ch)", "title": "" }, { "docid": "e606d42571f0ab3a57bb040c08324c14", "score": "0.67004263", "text": "def make_new_logger(name, log_dir=None):\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n if log_dir:\n log_file = join(log_dir, name+'.log')\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n stream = logging.StreamHandler(sys.stdout)\n stream.setLevel(logging.INFO)\n logger.addHandler(stream)\n\n return logger", "title": "" }, { "docid": "afb1a55ce7ebcc57351139b062f1f941", "score": "0.66951686", "text": "def get_logger():\n logger = get_logger\n logger = logging.getLogger(__name__)\n logger.addHandler(logging.StreamHandler())\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n FORMAT = \"[%(filename)s:%(lineno)s - %(funcName)5s() ] %(message)s\"\n formatter = logging.Formatter(FORMAT)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "title": "" }, { "docid": "07ae0c60a907ad2406d03029a8fab56b", "score": "0.668413", "text": "def create_logger():\n logging.basicConfig(filename=\"Log.log\", level=logging.WARNING,\n format='[%(funcName)s]:%(message)s')\n return logging", "title": "" }, { "docid": "878b4b7f13b45ffc18807baa91ddd497", "score": "0.66814494", "text": "def create_logger(filepath, rank):\r\n # create log formatter\r\n log_formatter = LogFormatter()\r\n\r\n # create file handler and set level to debug\r\n if filepath is not None:\r\n if rank > 0:\r\n filepath = '%s-%i' % (filepath, rank)\r\n file_handler = logging.FileHandler(filepath, \"a\")\r\n file_handler.setLevel(logging.DEBUG)\r\n file_handler.setFormatter(log_formatter)\r\n\r\n # create console handler and set level to info\r\n console_handler = logging.StreamHandler()\r\n console_handler.setLevel(logging.INFO)\r\n console_handler.setFormatter(log_formatter)\r\n\r\n # create logger and set level to debug\r\n logger = logging.getLogger()\r\n logger.handlers = []\r\n logger.setLevel(logging.DEBUG)\r\n logger.propagate = False\r\n if filepath is not None:\r\n logger.addHandler(file_handler)\r\n logger.addHandler(console_handler)\r\n\r\n # reset logger elapsed time\r\n def reset_time():\r\n log_formatter.start_time = time.time()\r\n logger.reset_time = reset_time\r\n\r\n return logger", "title": "" }, { "docid": "f7525206fecf0848141f5b2972989f9b", "score": "0.6676363", "text": "def log(file: str, level: str = 'debug') -> logging.Logger:\n logger = logging.getLogger(file)\n logger.setLevel(f'{level.upper()}')\n name = f'{Path(file.lower()).stem}.log'\n name = Path(os.path.join(_logs, name))\n custom_format = ('%(asctime)s %(levelname)-8s %(threadName)-8s '\n '%(filename)s:%(lineno)-15s %(message)s')\n formatter = TimeFormatter(custom_format, '%Y-%m-%d %H:%M:%S.%F %Z')\n # Create log file.\n file_handler = logging.FileHandler(os.path.join(_logs, name))\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n # Print log statement.\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n # Return logger object.\n return logger", "title": "" }, { "docid": "8dce0b3e45a3a996fd16a99169d90fe6", "score": "0.6673964", "text": "def setup_logger(name, log_file, level=logging.INFO):\n\n # Create the Logger\n logger = logging.getLogger(name)\n logger.addFilter(MemoryTracer())\n\n logger.setLevel(logging.DEBUG)\n\n if not any(isinstance(hdl, logging.FileHandler) for hdl in logger.handlers):\n if logger.hasHandlers():\n logger.handlers.clear()\n logger.propagate = False\n\n # Create a Formatter for formatting the log messages\n formatter = logging.Formatter(\n '{asctime} (Mem:{mem}) {name:^15} {levelname}: {message}',\n '%H:%M:%S',\n style=\"{\"\n )\n\n # Create the Handler for logging data to a file\n Path(log_file.parent).mkdir(exist_ok=True)\n logger_handler = logging.FileHandler(str(log_file))\n logger_handler.setLevel(logging.DEBUG)\n logger_handler.setFormatter(formatter)\n logger.addHandler(logger_handler)\n\n # Create the Handler for logging data to console.\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.getLevelName(level))\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger", "title": "" }, { "docid": "7e1196be78a45e2f7307511e22e9d955", "score": "0.6663203", "text": "def create_logger(filepath='gbackend/config/logging.yml'):\n logging.config.dictConfig(yaml.load(open(filepath)))\n logger = logging.getLogger()\n return logger", "title": "" }, { "docid": "dad4f5e1927c7028dad498554aebe166", "score": "0.6660014", "text": "def get_logger(name):\n\n logger = logging.getLogger(name)\n\n return logger", "title": "" }, { "docid": "05697bbbe893c9f652a8aecca4b98f73", "score": "0.66529167", "text": "def get_logger(name):\n return logging.getLogger(name=name)", "title": "" }, { "docid": "4332ad525804ba7d583593845fc8f3d7", "score": "0.66517586", "text": "def setup_file_logger(filename, formatting, log_level):\n logger = logging.getLogger()\n # If a stream handler has been attached, remove it.\n if logger.handlers:\n logger.removeHandler(logger.handlers[0])\n handler = logging.FileHandler(filename)\n logger.addHandler(handler)\n formatter = logging.Formatter(*formatting)\n handler.setFormatter(formatter)\n logger.setLevel(log_level)\n handler.setLevel(log_level)\n return logger", "title": "" }, { "docid": "3162b2d25bd3b7e2cc3157716c24ced3", "score": "0.6637398", "text": "def getLogger(name):\n log = logging.getLogger(name=name)\n for handler in log.handlers:\n if name == handler.name:\n return log\n else:\n return LogSetup().default_logger(name=name.split('.')[0])", "title": "" }, { "docid": "e866ae482e0d8249fcba04c05a998d6f", "score": "0.6636008", "text": "def get_logger(name):\n log = logging.getLogger(name)\n log.addHandler(logging.NullHandler())\n return log", "title": "" }, { "docid": "6311a0fa81efee48590efdfa16e3b2e6", "score": "0.6633553", "text": "def get_logger() -> logging.Logger:\n return logging.getLogger(logger_name())", "title": "" }, { "docid": "e31163ca4a5f0493440c753f4d2378bd", "score": "0.6615768", "text": "def get_logger(logger_name=DEFAULT_LOGGER_NAME, log_file_name=None, log_level=None):\n\n # a level of logging can be set by enviroment variable.\n # Check it if this one wasn't be obtained by parameter\n if log_level is None:\n if ENV_LOG_LEVEL in os.environ:\n log_level = os.environ.get(ENV_LOG_LEVEL)\n else:\n log_level = DEFAULT_LOG_LEVEL\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(log_level)\n log_handler = None\n\n if log_file_name is None:\n if ENV_LOG_FNAME in os.environ:\n log_file_name = os.environ.get(ENV_LOG_FNAME)\n\n # the whole path to the log file. Can be None\n whole_log_file = None\n\n if log_file_name is not None:\n whole_log_file = _get_whole_log_file(log_file_name)\n\n if whole_log_file is None:\n log_handler = logging.StreamHandler()\n else:\n log_handler = logging.FileHandler(whole_log_file)\n\n formatter = logging.Formatter(LOG_FORMAT)\n log_handler.setFormatter(formatter)\n\n logger.addHandler(log_handler)\n\n return logger", "title": "" }, { "docid": "90a973e284b8e8234a050cb348fc4158", "score": "0.6609217", "text": "def createlogger(name): \n logger = logging.getLogger(name)\n logger.setLevel(\"DEBUG\")\n fh = logging.FileHandler(log_path+\"\\\\flash_test.txt\")\n #ch = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s.%(msecs)03d: [%(levelname)s] [%(name)s] [%(funcName)s] %(message)s',\n '%y%m%d %H:%M:%S')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n return logger", "title": "" }, { "docid": "72367079163345ebbb139e710d271f0c", "score": "0.66047484", "text": "def create_logger(logger_name, logger_level=20, log_file_path=None):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logger_level)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Console output\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n if log_file_path:\n # File output\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.ERROR)\n logger.addHandler(file_handler)\n\n return logger", "title": "" }, { "docid": "a13a4781328730bb12e9b1bdd601a296", "score": "0.65970457", "text": "def default_logger(self, name=__name__, enable_stream=False,\n enable_file=True):\n if self.format is None:\n self.format = logging.Formatter(\n '%(asctime)s - %(module)s:%(levelname)s => %(message)s'\n )\n\n log = logging.getLogger(name)\n self.name = name\n\n if enable_file is True:\n file_handler = handlers.RotatingFileHandler(\n filename=self.return_logfile(filename='%s.log' % name),\n maxBytes=self.max_size,\n backupCount=self.max_backup\n )\n self.set_handler(log, handler=file_handler)\n\n if enable_stream is True or self.debug_logging is True:\n stream_handler = logging.StreamHandler()\n self.set_handler(log, handler=stream_handler)\n\n log.info('Logger [ %s ] loaded', name)\n return log", "title": "" }, { "docid": "220c6b06fcd7493f806730ba05162a3c", "score": "0.6596836", "text": "def get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n stderr_handler = logging.StreamHandler()\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s | %(name)s | %(message)s')\n stderr_handler.setFormatter(formatter)\n logger.addHandler(stderr_handler)\n return logger", "title": "" }, { "docid": "079e27f9bbb6e43b5cefd453eaf7fafa", "score": "0.6593608", "text": "def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None,\n stream=None, level=logging.INFO, filename=None, filemode='w',\n filelevel=None, propagate=True):\n\n # Get a logger for the specified name\n logger = logging.getLogger(name)\n logger.setLevel(level)\n fmt = logging.Formatter(format, datefmt)\n logger.propagate = propagate\n\n # Remove existing handlers, otherwise multiple handlers can accrue\n for hdlr in logger.handlers:\n logger.removeHandler(hdlr)\n\n # Add handlers. Add NullHandler if no file or stream output so that\n # modules don't emit a warning about no handler.\n if not (filename or stream):\n logger.addHandler(logging.NullHandler())\n\n if filename:\n hdlr = logging.FileHandler(filename, filemode)\n if filelevel is None:\n filelevel = level\n hdlr.setLevel(filelevel)\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n\n if stream:\n hdlr = logging.StreamHandler(stream)\n hdlr.setLevel(level)\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n\n return logger", "title": "" }, { "docid": "78e07148db19e750e99095022001c32b", "score": "0.65847135", "text": "def get_logger(name):\n log = logging.getLogger(name)\n set_log_level(log)\n return log", "title": "" }, { "docid": "0d5c3e2b6547b9191986f40547d7cd6e", "score": "0.6575159", "text": "def set_logger(log_file,\n min_level=logging.DEBUG,\n file_log_level=logging.INFO,\n screen_log_level=logging.WARNING, delete_previous_log=False):\n if delete_previous_log:\n file_delete(log_file)\n\n the_logger = logging.getLogger()\n the_logger.setLevel(min_level)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh = logging.FileHandler(log_file)\n fh.setLevel(file_log_level)\n fh.setFormatter(formatter)\n the_logger.addHandler(fh)\n ch = logging.StreamHandler()\n ch.setLevel(screen_log_level)\n ch.setFormatter(formatter)\n the_logger.addHandler(ch)\n return the_logger", "title": "" }, { "docid": "954cb9c15576490ce5ac5cdc75bceb94", "score": "0.65723926", "text": "def create_logger():\n from . import config\n\n logger = getLogger(\"oar\")\n del logger.handlers[:]\n\n logger.setLevel(LEVELS[config['LOG_LEVEL']])\n\n log_file = config.get('LOG_FILE', None)\n if log_file is not None:\n if log_file == \":stdout:\": # pragma: no cover\n handler = get_global_stream_handler(\"stdout\")\n if log_file == \":stderr:\":\n handler = get_global_stream_handler(\"stderr\")\n else: # pragma: no cover\n touch(log_file)\n handler = FileHandler(log_file)\n\n if handler not in logger.handlers:\n logger.addHandler(handler)\n else: # pragma: no cover\n logger.addHandler(NullHandler())\n\n logger.propagate = False\n\n return logger", "title": "" }, { "docid": "a8d47b773ec38441bc22e68e5bc03f80", "score": "0.6572043", "text": "def create_logger(level: int) -> logging.Logger:\n global logger_count\n logger_count += 1\n # add logger count to differentiate between different fides\n # optimization instances and avoid deadlocks\n logger = logging.getLogger(f'fides_{logger_count}')\n ch = logging.StreamHandler()\n formatter = logging.Formatter(\n fmt='%(asctime)s fides(%(levelname)s) %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger.setLevel(level)\n return logger", "title": "" }, { "docid": "ed44916242821259bfc0d273231a660b", "score": "0.65687263", "text": "def getLogger(out_path, filetag, **kwargs):\n name = kwargs.get('name', '')\n logFile = os.path.join(out_path, filetag + \"_{}.log\".format(name))\n\n # logging.basicConfig(filename=logFile, level=logging.INFO)\n # get the myLogger\n pnp_logger = logging.getLogger('simlog')\n pnp_logger.setLevel(logging.DEBUG)\n # create file handler which logs even debug messages\n fh = logging.FileHandler(logFile)\n fh.setLevel(logging.DEBUG)\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # create formatter and add it to the handlers\n # formatter \t= logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n # ch.setFormatter(formatter)\n # fh.setFormatter(formatter)\n\n # add the handlers to the logger\n pnp_logger.addHandler(fh)\n pnp_logger.addHandler(ch)\n\n return pnp_logger", "title": "" }, { "docid": "33c43103a3a22cc998b8207330f0a381", "score": "0.6565355", "text": "def create_logger(name: str):\n log_format = '%(levelname)s %(asctime)s %(name)s %(message)s'\n logging.basicConfig(filename='/tmp/{}.log'.format(name), filemode='a', format=log_format)\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler()\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger #logger", "title": "" }, { "docid": "9ff9adc8c31c8b3219e17057493a2e66", "score": "0.6564403", "text": "def getLogger(logname = '',\n logfile = '',\n logformat = '%(asctime)s %(name)s [%(levelname)s] <%(filename)s:%(lineno)d> %(message)s'):\n logger = logging.getLogger(logname)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(logformat)\n ## append to file\n if logfile:\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n ## append to sys.stderr\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "title": "" }, { "docid": "a971efa754443c3bbdab89c24e898775", "score": "0.6560767", "text": "def get_logger(\n name: str,\n level=logging.DEBUG,\n log_format=\"%(asctime)s %(levelname)s %(name)s %(message)s\",\n stream=sys.stdout,\n):\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n formatter = logging.Formatter(log_format)\n\n sh = logging.StreamHandler(stream=stream)\n sh.setFormatter(formatter)\n\n logger.addHandler(sh)\n\n return logger", "title": "" }, { "docid": "b973a2292c661a3cf729497dc5ff21ec", "score": "0.65554243", "text": "def go_logger(name, **kwargs):\r\n logger = logging.getLogger(name)\r\n if '--help' in sys.argv:\r\n # Skip log file creation if the user is just getting help on the CLI\r\n return logger\r\n if not options.log_file_prefix or options.logging.upper() == 'NONE':\r\n # Logging is disabled but we still have to return the adapter so that\r\n # passing metadata to the logger won't throw exceptions\r\n return JSONAdapter(logger, kwargs)\r\n if name == None:\r\n # root logger; leave it alone\r\n LOGS.add(options.log_file_prefix)\r\n return logger\r\n # Remove any existing handlers on the logger\r\n logger.handlers = []\r\n logger.setLevel(getattr(logging, options.logging.upper()))\r\n if options.log_file_prefix:\r\n if name:\r\n basepath = os.path.split(options.log_file_prefix)[0]\r\n filename = name.replace('.', '-') + '.log'\r\n path = os.path.join(basepath, filename)\r\n else:\r\n path = options.log_file_prefix\r\n basepath = os.path.split(options.log_file_prefix)[0]\r\n if not os.path.isdir(basepath):\r\n mkdir_p(basepath)\r\n LOGS.add(path)\r\n channel = logging.handlers.RotatingFileHandler(\r\n filename=path,\r\n maxBytes=options.log_file_max_size,\r\n backupCount=options.log_file_num_backups)\r\n channel.setFormatter(LogFormatter(color=False))\r\n logger.addHandler(channel)\r\n logger = JSONAdapter(logger, kwargs)\r\n return logger", "title": "" }, { "docid": "31611e4058cfe1f3e7f3906a815f3ec2", "score": "0.6540639", "text": "def setup_logger(logger_name, log_level=\"INFO\", log_file=None,\n log_format=FORMAT):\n logger = logging.getLogger(logger_name)\n current_handlers = [str(h) for h in logger.handlers]\n\n # Set root logger to debug, handlers will control levels above debug\n logger.setLevel(LOG_LEVEL[\"DEBUG\"])\n\n handlers = []\n if isinstance(log_file, list):\n for h in log_file:\n handlers.append(get_handler(log_level=log_level, log_file=h,\n log_format=log_format))\n else:\n handlers.append(get_handler(log_level=log_level, log_file=log_file,\n log_format=log_format))\n for handler in handlers:\n if str(handler) not in current_handlers:\n logger.addHandler(handler)\n return logger", "title": "" }, { "docid": "7fc16121c0cf83224f6cf550deaaad2f", "score": "0.6539337", "text": "def get_file_and_stream_logger(logdir: Optional[str], logger_name: str,\n log_file_name: Optional[str],\n stream_log_level: Optional[str] = \"info\",\n file_log_level: Optional[str] = \"debug\",\n logger_level: Optional[str] = \"debug\",\n new_file: Optional[bool] = True,\n datefmt: Optional[str] = None,\n fmt: Optional[str] = None,\n no_file_logger: bool = False,\n no_stream_logger: bool = False) -> Tuple[str, logging.Logger]:\n if not datefmt:\n datefmt = '%Y/%m/%d %I:%M:%S %p'\n if not fmt:\n '%(asctime)s %(message)s'\n logger = logging.getLogger(logger_name)\n formatter = logging.Formatter(datefmt=datefmt, fmt=fmt)\n if not no_file_logger and logdir and log_file_name:\n if not os.path.exists(logdir):\n os.mkdir(logdir)\n if not log_file_name.endswith('.log'):\n log_file_name += '.log'\n log_file = os.path.abspath(os.path.join(logdir, log_file_name))\n if new_file and os.path.exists(log_file):\n backup_num = get_backup_num(logdir, log_file_name)\n os.rename(log_file, log_file + '.' + str(backup_num))\n file_handler = logging.FileHandler(log_file)\n else:\n log_file = \"\"\n if not no_stream_logger:\n stream_handler = logging.StreamHandler(sys.stdout)\n if stream_log_level is not None and hasattr(logging, stream_log_level.upper()):\n stream_handler.setLevel(getattr(logging, stream_log_level.upper()))\n else:\n stream_handler.setLevel(logging.INFO)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n if not no_file_logger:\n if file_log_level is not None and hasattr(logging, file_log_level.upper()):\n file_handler.setLevel(getattr(logging, file_log_level.upper()))\n else:\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n if logger_level is not None and hasattr(logging, logger_level.upper()):\n logger.setLevel(getattr(logging, logger_level.upper()))\n else:\n logger.setLevel(logging.DEBUG)\n return log_file, logger", "title": "" }, { "docid": "53549a639f2809b502dadfc58ba4b73f", "score": "0.6537406", "text": "def get_logger(self, name):\n logger = logging.getLogger(name)\n logger.setLevel(self.get_level(name))\n return logger", "title": "" }, { "docid": "dff1796a5c168eeaa333af9940041f2b", "score": "0.6536021", "text": "def with_logger(name, log_file=None):\n\n def _create_log_handler():\n # Create a FileHandler if the log_file is provided\n if log_file:\n # Recursively create log subdirectories if needed\n log_dir = os.path.dirname(log_file)\n if log_dir:\n os.makedirs(log_dir, exist_ok=True)\n handler = logging.FileHandler(log_file)\n # Create a console StreamHandler if the log_file is not provided\n else:\n handler = logging.StreamHandler()\n # Set a JSON formatter\n formatter = JsonFormatter()\n handler.setFormatter(formatter)\n return handler\n\n def decorator(original):\n @wraps(original)\n def decorated(*args, **kwargs):\n logger = logging.getLogger(name)\n # Add the configured log handler if it is not already added\n if not logger.handlers:\n handler = _create_log_handler()\n logger.addHandler(handler)\n # Set a log level from the LOG_LEVEL environment variable\n log_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n logger.setLevel(log_level)\n # Provide the configured logger to the original function\n return original(logger, *args, **kwargs)\n\n return decorated\n\n return decorator", "title": "" } ]
c472743f52030deaf5de990780590f7a
Imports string as if CSV file. This is used in unit tests
[ { "docid": "0b8aa7ad592fff084c01e1e19b36f1ec", "score": "0.6923848", "text": "def import_csv_string(self, csv_string):\n csv.register_dialect('spreadsheet', delimiter=',', quoting=csv.QUOTE_NONE)\n csv_reader = csv.reader(StringIO.StringIO(csv_string), 'spreadsheet')\n for row_key, row in enumerate(csv_reader):\n for field_key, csv_field in enumerate(row):\n self.update_cell(csv_field.strip(), row_key + 1, field_key) # TODO: this should create use cell object", "title": "" } ]
[ { "docid": "d6070672033524f9ce53baf99a3704c0", "score": "0.662369", "text": "def __importCsv(self, event):\n self.__importFile(\",\", \"csv files (*.csv)|*.csv\", \"Import CSV file\")", "title": "" }, { "docid": "33e993137b4edcc3fbd1690710fd7b72", "score": "0.6558638", "text": "def test_read_csv(self):\n \"\"\"\n file_path: str,\n delim: str,\n header_rows: int,\n footer_rows: int,\n encod: str,\n \"\"\"\n raise NotImplementedError", "title": "" }, { "docid": "c5fe81bfc76a7636db4d661dcce27490", "score": "0.63470364", "text": "def import_from_csv(self):\n\n self.lokalita = []\n self.cena = []\n self.velikost = []\n self.odkaz = []\n self.identifier = []\n self.date_created = []\n self.offer_type = []\n\n # try pro pripad, ze soubor neexistuje\n try:\n with open(self._path, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=';', quotechar='|')\n for row in reader:\n self.lokalita.append(row[0])\n self.cena.append(row[1])\n self.velikost.append(row[2])\n self.odkaz.append(row[3])\n self.identifier.append(row[4])\n self.date_created.append(row[5])\n self.offer_type.append(row[6])\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "b732b95d269d1ed8495e3e5fcc4c72a9", "score": "0.63433224", "text": "def csv_import(name, sep, header):\n csv_file = pd.read_csv(name, sep = sep, header = header) ##loading data using read_csv from pandas\n return csv_file #returning the data structure", "title": "" }, { "docid": "570cf273ec27f8c892c5cf7395afec20", "score": "0.62232953", "text": "def parse(self, csv_string):\n return self._reader.read_record(csv_string)", "title": "" }, { "docid": "764502781fd02290f8b77af4395e739c", "score": "0.6199115", "text": "def import_csv(self, budget_id, account_id, csv_filename):\n raise Exception(\"NOT YET IMPLEMENTED\")", "title": "" }, { "docid": "3506ad53523b7af1addf583e0a3e6c9c", "score": "0.6182247", "text": "def test_import_csv_basic_chars(self):\n input_dict = {'abc': '123', 'cde': '456', 'efg': '789'}\n if PY2:\n mock_csv = BytesIO(b\"abc,cde,efg\\n123,456,789\")\n else:\n mock_csv = StringIO(\"abc,cde,efg\\n123,456,789\")\n features = []\n import_from_csv(features=features, fields=[], file_obj=mock_csv)\n for k, v in input_dict.items():\n self.assertEquals(v, features[0]['properties'][k])", "title": "" }, { "docid": "0a307fef3e4d333b800dabf74a9c7ae0", "score": "0.60628295", "text": "def process_csv(self, file_name: str):", "title": "" }, { "docid": "3b67ddb0a40e9844dfce4c5795d4674d", "score": "0.59732985", "text": "def importCSV(self, filename, namefield='name'):\n import csv\n csv.register_dialect('custom', skipinitialspace=True)\n dr = csv.DictReader(open(filename,'r'),dialect='custom')\n data = []\n for r in dr: \n data.append(r)\n self.importDict(data, namefield)\n return", "title": "" }, { "docid": "c5e06a5d7a5db40010747dd765dff935", "score": "0.59311044", "text": "def csv(string):\n return string.split(',')", "title": "" }, { "docid": "029485435c73194cf35f601c020d638c", "score": "0.59251064", "text": "def load_csv(cls, path):\n with pyglet.resource.file(path, mode='r') as csv_file:\n csv_data = list(csv.reader(csv_file))\n\n return csv_data", "title": "" }, { "docid": "95de5d5d5bd05f9031896b76b8aa4e2d", "score": "0.58774656", "text": "def import_csv(self):\n load_path = str(\n QFileDialog.getOpenFileName(\n self, self.tr(\"Load CSV file\"), \"\", \"CSV (*.csv)\"\n )[0]\n )\n if isfile(load_path):\n df = read_csv(\n load_path,\n header=None,\n )\n if type(df.values[0, 0]) is str:\n # Load remove first line\n df = read_csv(\n load_path,\n )\n val = df.values\n # Check Shape\n error = \"\"\n shape = val.shape\n if self.shape_min:\n if self.shape_min[0] and shape[0] < self.shape_min[0]:\n error = \"Minimum number of line should be \" + str(self.shape_min[0])\n if self.shape_min[1] and shape[1] < self.shape_min[1]:\n error = \"Minimum number of column should be \" + str(\n self.shape_min[1]\n )\n if self.shape_max:\n if self.shape_max[0] and shape[0] > self.shape_max[0]:\n error = \"Maximum number of line should be \" + str(self.shape_max[0])\n if self.shape_max[1] and shape[1] > self.shape_max[1]:\n error = \"Maximum number of column should be \" + str(\n self.shape_max[1]\n )\n if error:\n QMessageBox().critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"Error while loading csv file:\\n\" + error),\n )\n return\n # Try to fill the table\n try:\n old_data = self.data\n self.data = val\n self.update()\n except Exception as e:\n QMessageBox().critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"Error while loading csv file:\\n\" + str(e)),\n )", "title": "" }, { "docid": "b4ba8e5df909971e74e991b9bff38c1f", "score": "0.58602303", "text": "def __CSVFile(modelType: str) -> Union[str, TextIO]:\n\n if modelType.lower() == 'driver':\n CSVPath = 'data/csv/drivers.csv'\n elif modelType.lower() == 'team':\n CSVPath = 'data/csv/teams.csv'\n elif modelType.lower() == 'testdriver':\n CSVPath = '../data/tests/csv/drivers.csv'\n elif modelType.lower() == 'testteam':\n CSVPath = '../data/tests/csv/teams.csv'\n else:\n raise Exception('Incorrect model type!')\n\n try:\n # Must define encoding='utf-8-sig' to function seamlessly with Excel sheets and exports.\n CSVFile = open(CSVPath, 'r', encoding='utf-8-sig')\n\n if os.stat(CSVPath).st_size == 0:\n os.remove(CSVPath)\n raise IOError\n except IOError:\n raise\n\n return CSVFile", "title": "" }, { "docid": "34ad27dfddf3cf770d0c453dd46656d9", "score": "0.58479047", "text": "def __init__(self, raw_input_data_csv: str):\n self.raw_input_data_csv = raw_input_data_csv", "title": "" }, { "docid": "18bb5fdab074a307ba251fe2299c217b", "score": "0.5829573", "text": "def leerCSV():\r\n arch = open('D:/Facultad/Seminario Python/ACTIVIDAD 1 TEORIA/twitchdata-update.csv', 'r', encoding='utf-8-sig')\r\n csvreader = csv.reader(arch, delimiter=',')\r\n return csvreader", "title": "" }, { "docid": "57d25e005cb81dc23fcfb3fbea1159a7", "score": "0.5820537", "text": "def parseCSVFile(inputFile):\n\tcsvFile = inputFile\n\tdialect = csv.Sniffer().sniff(codecs.EncodedFile(csvFile, \"utf-8\").read(1024))\n\tcsvFile.open()\n\t# reader = csv.reader(codecs.EncodedFile(csvFile, \"utf-8\"), delimiter=',', dialect=dialect)\n\treader = csv.reader(codecs.EncodedFile(csvFile, \"utf-8\"), delimiter=',', dialect='excel')\n\trowResults = [row for row in reader]\n\treturn rowResults", "title": "" }, { "docid": "942ec552634dc28d75b6409e45d93a38", "score": "0.5787089", "text": "def test_import_csv_non_ascii_chars(self):\n input_dict = {u'à': u'¡', u'£': u'Ç'}\n if PY2:\n mock_csv = BytesIO(\"à,£\\n¡,Ç\")\n else:\n mock_csv = StringIO(\"à,£\\n¡,Ç\")\n features = []\n import_from_csv(features=features, fields=[], file_obj=mock_csv)\n for k, v in input_dict.items():\n self.assertEquals(v, features[0]['properties'][k])", "title": "" }, { "docid": "d89100d59ce4a4c96c72010f1614a835", "score": "0.5780772", "text": "def read_record(self, csv_string):\n if six.PY2:\n # TODO(caveness): Test performance impact of removing nested dots\n # throughout file and possibly update decoder based on results.\n line = tf.compat.as_bytes(csv_string)\n else:\n line = tf.compat.as_text(csv_string)\n self._line_generator.push_line(line)\n output = next(self._reader)\n return [tf.compat.as_bytes(x) for x in output]", "title": "" }, { "docid": "34387aa2d41c902837828e41f1318aa4", "score": "0.5776792", "text": "def test_csvdata(db, specialization, slu1, slu2, student, grade_slu1, grade_slu2):\n\n specialization.unit_count = 2\n spc_list = [specialization]\n unit_list = [slu1, slu2]\n object_list = [\n {\n \"user\": student,\n \"grades\": [grade_slu1, grade_slu2],\n \"submission_date\": datetime(year=2021, month=8, day=15),\n \"total_score\": 38,\n }\n ]\n text = csvdata(spc_list, unit_list, object_list)\n assert (\n text == \"username,slack_id,submission_date,total_score,S01-SLU01,S01-SLU02\\r\\n\"\n \"test_student,U12J14XV12Z,2021-08-15 00:00:00,38,18,20\\r\\n\"\n )", "title": "" }, { "docid": "79670c67f44eddd43f8acaf53f7d4d3e", "score": "0.5751834", "text": "def _testing_csv_parsing(self, file_path):\n mock_file = _load_convert_file(file_path)\n mock_request = mock.MagicMock()\n mock_request.files = {'zipcounty_file': mock_file}\n output = _handle_csv_request(\n self.app,\n mock_request\n )\n output = standardize_request(output)\n\n return output", "title": "" }, { "docid": "f33d7c01b7c95c71eeee6794d889ac75", "score": "0.573986", "text": "def test_read_csv_file_with_automatic_dialect_specification(self):\n\n with self.assertRaises(csv.Error):\n prepare_data.read_csv_file(TESTDATA_FILENAME)[0]", "title": "" }, { "docid": "31c1d6ea71e707b333d5e09d36062688", "score": "0.5739288", "text": "def from_csv(self, csv_path, has_headers = True, delim = ',', spec_format = False):\n\n self.infilepath = os.path.abspath(csv_path)\n\n if not self.discretized:\n self.row_data, self.headers = read_csv_rows(csv_path, has_headers, delim, spec_format)\n self.build_col_data()\n\n else:\n raise Warning(\"You may not import new data into \\\n an already discretized time series\")\n return", "title": "" }, { "docid": "92c5860a5459f097ec860e796164ec4b", "score": "0.5729209", "text": "def fields_container_csv():\n return resolve_test_file(\"fields_container.csv\", \"csvToField\")", "title": "" }, { "docid": "0d1dbbe64e169c7b1199092dab2e2969", "score": "0.5721805", "text": "def test_csv_roundtrip(entities):\n mimetype = \"text/csv\"\n dummy_file = ReadWriteDummy()\n save_entities(\n entities=entities,\n file_=dummy_file,\n mimetype=mimetype,\n attributes=DEFAULT_ATTRIBUTES,\n )\n read_entities = list(load_entities(file_=dummy_file, mimetype=mimetype))\n assert_sequence_partial_equals(\n expected=entities, actual=read_entities, attributes_to_test=[\"ID\", \"href\"]\n )\n\n # needs second round trip test as csv converts everything to strings!\n dummy_file_2 = ReadWriteDummy()\n save_entities(\n entities=entities,\n file_=dummy_file_2,\n mimetype=mimetype,\n attributes=DEFAULT_ATTRIBUTES,\n )\n assert dummy_file.data == dummy_file_2.data\n read_entities_2 = list(load_entities(file_=dummy_file_2, mimetype=mimetype))\n assert_sequence_equals(expected=read_entities, actual=read_entities_2)", "title": "" }, { "docid": "2ce8e6de561af4decc807b84f7f1b442", "score": "0.5717098", "text": "def import_csv(self, csvfile, verbose = True):\r\n\r\n with open(csvfile, 'r') as f:\r\n reader = csv.reader(f, delimiter = '\\t')\r\n rows = [row for row in reader]\r\n\r\n # get rid of the comment rows\r\n\r\n rows = [row for row in rows if row[0][0] != '#'][2:]\r\n\r\n # transpose the rows and get the dates/flows\r\n\r\n org, gagename, gagedates, gageflows, gageflags = zip(*rows)\r\n\r\n # convert dates to datetime and flows to integers\r\n\r\n self.gagedates = [datetime.datetime.strptime(d, '%Y-%m-%d') \r\n for d in gagedates]\r\n self.gageflows = [float(g) if g else None for g in gageflows]", "title": "" }, { "docid": "8e3b91f77d1d4de60df7126cf17e1dbc", "score": "0.57129526", "text": "def assert_csv(self, path: Path) -> bool:\r\n return str.lower(path.suffix) == '.csv'", "title": "" }, { "docid": "7c7146c9e781f94e604dced0995fe212", "score": "0.571014", "text": "def test_read_csv_file_with_manual_dialect_specification(self):\n\n self.assertEqual(\n prepare_data.read_csv_file(TESTDATA_FILENAME, \n delimiter = \",\", quotechar = \"|\", \n has_header = True)[0], \n [\"Row0\",\"val0\",\"0\"])", "title": "" }, { "docid": "6ec7cb94a621d27b35f15ba02e11d734", "score": "0.5708421", "text": "def _open_for_csv(path):\n if sys.version_info[0] < 3:\n return open(path, 'rb')\n else:\n return open(path, 'r', newline='')", "title": "" }, { "docid": "17bbcb2534737d60c0492a69e51127d2", "score": "0.5708419", "text": "def load_text(csvfile, sep=\",\", num_headerlines=0, header_return=None):\n loaded_csv = open(csvfile, 'r')\n out = []\n for row in loaded_csv:\n if sep is not None:\n out.append(row.strip().split(sep))\n else:\n out.append(row.strip().split())\n if header_return is not None:\n header = out[header_return]\n else:\n header = False\n out = out[num_headerlines:] # strip header\n return header, out", "title": "" }, { "docid": "75f7adb336abe96f7e359029976cd285", "score": "0.5695346", "text": "def test_csv_parser():\n colnames, data_rows = ctt.read_data_from_csv_file(\n pjoin(DATA_PATH, \"csv_test_data.csv\"))\n npt.assert_equal(colnames[0], \"col1\")\n npt.assert_equal(colnames[-1], \"col4\")\n npt.assert_equal(len(data_rows[0]), 4)\n npt.assert_equal(len(data_rows[-1]), 4)\n npt.assert_equal(data_rows[0][-1], \"4\")", "title": "" }, { "docid": "937ae49561634adbb7f9a06f4826b3ca", "score": "0.56863654", "text": "def import_csv(request):\n with open('./sample_data.csv') as f:\n data = csv.reader(f, delimiter=',')\n for row in data:\n if row[0] != 'date':\n record = Record.objects.create()\n record.date = datetime.strptime(row[0], '%d.%m.%Y')\n record.channel = row[1]\n record.country = row[2]\n record.os = row[3]\n record.impressions = int(row[4])\n record.clicks = int(row[5])\n record.installs = int(row[6])\n record.spend = float(row[7])\n record.revenue = float(row[8])\n record.cpi = record.spend / record.installs\n record.save()\n return HttpResponse('Sample data imported.<br>Please do not use this link again :)')", "title": "" }, { "docid": "413bc4ea68eeb318b35ff2c57f1c7c34", "score": "0.568457", "text": "def _open_for_csv(self, path):\n if sys.version_info[0] < 3:\n return open(path, 'rb')\n else:\n return open(path, 'r', newline='')", "title": "" }, { "docid": "a303a84459f008e120d375645eb455ce", "score": "0.5678701", "text": "def test_import_string(self):\n # Make sure an invalid module path returns None\n self.assertIsNone(import_string('nope.nope'))\n\n # Make sure an invalid module name returns None\n self.assertIsNone(import_string('dynamic_initial_data.nope'))\n\n # For test coverage, import a null value\n self.assertIsNone(import_string('dynamic_initial_data.tests.mocks.mock_null_value'))\n\n # For test coverage, import a real class\n self.assertIsNotNone(import_string('dynamic_initial_data.tests.mocks.MockClass'))", "title": "" }, { "docid": "586b9fc1b3dd0d5fff97d301b369f0ab", "score": "0.5676111", "text": "def __init__(self, csv_path: Union[str, Path]) -> None:\n self.csv_path = Path(csv_path)", "title": "" }, { "docid": "2b5d0fb53a3baf93f6ff6fa4a0915343", "score": "0.5667406", "text": "def test_csvInput(self):\n module = Config.load_plugin('test/csv_test.py')\n doc = Doc.from_url(\"csv_test\")\n result = doc.query( { 'hash' : '41e25e514d90e9c8bc570484dbaff62b' } )\n self.assertEqual( result, {'name':'cmd.exe',\n 'hash':'41e25e514d90e9c8bc570484dbaff62b',\n 'from_csv_input': True,\n 'date.created': datetime(2018,2,20,11,23,0),\n 'nonce' : 'ca79d9cbb8c73cbe610dfa05030a2183'} )", "title": "" }, { "docid": "e5260e872f3b3ef76574d763c4f9f184", "score": "0.56593513", "text": "def import_csv(file_path):\n #data_frame = csv_import_adapter.import_dataframe_from_path(\n data_frame=pd.read_csv(\n os.path.join(file_path), sep=\";\")\n data_frame[\"time:timestamp\"] = data_frame[\"time:timestamp\"].apply(lambda x:\n datetime.strptime(x, '%d-%m-%Y:%H.%M'))\n if 'time:complete' in data_frame.columns:\n data_frame[\"time:complete\"] = pd.to_datetime(data_frame[\"time:complete\"], format='%d-%m-%Y:%H.%M')\n data_frame[\"Activity\"] = data_frame[\"concept:name\"]\n parameters = {constants.PARAMETER_CONSTANT_CASEID_KEY: \"concept:name\",\n constants.PARAMETER_CONSTANT_ACTIVITY_KEY: \"activity\",\n constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: \"time:timestamp\"}\n csv_log = conversion_factory.apply(data_frame, parameters=parameters)\n print(\"Import of csv successful,with {0} traces in total\".format(len(csv_log)))\n return csv_log", "title": "" }, { "docid": "df50c1d9d53807067dc57878adf3db8f", "score": "0.5651957", "text": "def write(self, csv_string):\n self.csv_string = csv_string", "title": "" }, { "docid": "5ac65d2641dbfec512f5a506d5ab747f", "score": "0.5642873", "text": "def load_csv_file(csv_file_path):\n\n csv_list = []\n if (os.path.isfile(csv_file_path)):\n # f_csv = csv.reader(open(csv_file_path, 'r'))\n f_csv = csv.reader(io.FileIO(csv_file_path, 'r'), encoding='utf-8')\n csv_headers = f_csv.next()\n csv_list.append(csv_headers)\n named_tuple_type = collections.namedtuple('row_tuple_type', csv_headers)\n csv_list += map(named_tuple_type._make, f_csv)\n return csv_list\n\n\n # csv_list = []\n # if (os.path.isfile(csv_file_path)):\n # with open(csv_file_path, 'r') as f:\n # f_csv = csv.reader(f)\n # csv_list = list(f_csv)\n # return csv_list", "title": "" }, { "docid": "d2922b15023975c887154f7eec837aa6", "score": "0.5638414", "text": "def import_data(path):\n try:\n model_data = pd.read_csv(path)\n logging.info('Data loaded from %s : SUCCESS', path)\n except FileNotFoundError:\n logging.error('Data load from %s failed. Please check file at path', path)\n return \"Cannot find file. Please check the file path.\"\n return model_data", "title": "" }, { "docid": "1266887174b04e35c0ae4f891eb500b2", "score": "0.5630805", "text": "def open_csv(self, csv_file_path):\n delimiter = ','\n if 'delimiter' in self.config:\n delimiter = self.config['delimiter']\n\n quotechar = '\"'\n if 'quotechar' in self.config:\n quotechar = self.config['quotechar']\n\n self.csv_file = open(csv_file_path, 'rbU')\n try:\n self.csv_reader = csv.DictReader(self.csv_file, fieldnames=self.config['fieldnames'], delimiter=delimiter,\n quotechar=quotechar)\n except Exception as e:\n self.csv_file.close()\n raise e", "title": "" }, { "docid": "73b649f317aa5c98c406c5597370b056", "score": "0.5628779", "text": "def read_csv(cls, path, *, encoding=\"utf-8\", sep=\",\", header=True, columns=[], strings_as_object=inf, dtypes={}):\n import pandas as pd\n data = pd.read_csv(path,\n sep=sep,\n header=0 if header else None,\n usecols=columns or None,\n dtype=dtypes,\n parse_dates=False,\n encoding=encoding,\n low_memory=False)\n\n if not header:\n data.columns = util.generate_colnames(len(data.columns))\n return cls.from_pandas(data, strings_as_object=strings_as_object, dtypes=dtypes)", "title": "" }, { "docid": "801779781bb212eb383306803d1c2483", "score": "0.56269896", "text": "def import_data_from(filename):\n print 'Importing data from \"{0}\"...'.format(filename)\n\n csv = open(filename)\n imported_csv = csv.read()\n csv.close()\n\n return imported_csv", "title": "" }, { "docid": "26d153f1364c37d28423e75542611465", "score": "0.5621772", "text": "def test_csv(self):\n # \"issueKey, Epic Link, summary, created, started, completed\"\n\n raw_dict = {\n \"key\" : \"PROJECT-1\",\n \"id\" : \"00001\",\n \"fields\" : {\n \"created\" : \"2022-01-01T00:00:00.000+00:00\",\n \"summary\" : \"Issue Summary\",\n \"customfield_10008\" : \"EPIC-1\",\n \"issuetype\" : {\n \"name\" : \"Issue\"\n }}, # fields\n \"changelog\" : {\n \"histories\" : [{\n \"created\" : \"2022-01-02T00:00:00.000+00:00\",\n \"items\" : [\n { \"toString\" : \"In Progress\",\n \"field\" : \"status\" }]\n },{\n \"created\" : \"2022-01-03T00:00:00.000+00:00\",\n \"items\" : [\n { \"toString\" : \"Done\",\n \"field\" : \"status\" }]\n }] # histories\n } # changelog\n } # fromDict\n\n test_issue = Issue(options=None, session=None, raw=raw_dict)\n formatted_issue = CSV.format_issue(test_issue)\n test_line = \"PROJECT-1,\" # issueKey\n test_line += \"EPIC-1,\" # Epic Link\n test_line += \"\\\"Issue Summary\\\",\" # Issue Summary\n test_line += \"\\\"01/01/2022 00:00\\\",\" # Created\n test_line += \"\\\"02/01/2022 00:00\\\",\" # started (moved to 'In Progress')\n test_line += \"\\\"03/01/2022 00:00\\\"\" # completed (moved to 'Done')\n self.assertEqual(test_line, formatted_issue)", "title": "" }, { "docid": "78072f5175075df6338eb95d93a29231", "score": "0.5618183", "text": "def importCSV(self,file):\n\n\n print('WARNING: import_file_csv() needs some improvement...')\n\n try:\n f = open(file, 'r')\n\n if f:\n metacounter=0\n for l in f:\n metacounter+=1\n line = l.split('\\t')\n if 'Material' in line: self.material = line[1]\n elif 'Pump Power' in line: self.pumpPw = float(line[1])\n elif 'Pump Spot' in line: self.pumpSp = float(line[1])\n elif 'Probe Power' in line: self.probePw = float(line[1])\n elif 'Probe Spot' in line: self.probeSp = float(line[1])\n elif 'date' in line: self.date = line[1]\n elif 'Destruction Power' in line: self.destrPw = float(line[1])\n elif 'R0' in line: self.R0 = float(line[1])\n elif 'Temperature' in line: self.temperature = float(line[1])\n elif 'RawData' in line :\n break\n f.close()\n skipline = True\n n=0\n data = []\n\n while skipline: # skip metadata section then import array of data\n try:\n data = np.loadtxt(file, delimiter=',', skiprows=n)\n n+=1\n skipline = False\n except ValueError:\n n+=1\n\n for i in range(len(data)):\n self.time.append(data[i][0])\n self.rawtrace.append(data[i][1])\n self.trace.append(data[i][2])\n except FileNotFoundError:\n print('ERROR 404: file not found')", "title": "" }, { "docid": "916511916e4ca00beeec18f5691b3504", "score": "0.5610115", "text": "def openCSV(p):\n try:\n csv1 = open(p)\n except IOError:\n print \"Please enter valid file path!\"\n quit(0)\n else:\n headers = [i.lower() for i in csv1.readline().replace('\\n', '').split(',')]\n content = []\n for line in csv1.readlines():\n format_line = []\n line = line.replace('\\n', '').split(',')\n temp = \"\"\n while len(line):\n if line[0].count('\"') % 2 == 1:\n while True:\n if temp is \"\":\n temp += line.pop(0)\n else:\n temp += \",\" + line.pop(0) # give value back it's , that we removed previously\n if len(line) == 0:\n break\n if line[0].count('\"') % 2 == 1:\n temp += \",\" + line.pop(0) # give value back it's , that we removed previously\n break\n temp = temp[1:-1].replace('\"\"', '\"') # replace \"\" with \" if exists, excel's way of delimiting \"\n format_line.append(temp) # strip \" from combined value\n temp = \"\"\n elif line[0].count('\"\"') >= 1:\n format_line.append(line.pop(0)[1:-1].replace('\"\"', '\"'))\n else:\n format_line.append(line.pop(0))\n content.append(format_line)\n csv1.close()\n return [headers, content]", "title": "" }, { "docid": "161be9751edcb9a2a44689967bb99fdd", "score": "0.5606836", "text": "def csv_string(string):\n if type(string) == unicode:\n string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')\n string = re.sub(' ', '_', string)\n string = re.sub(r'\\W+', '', string)\n\n return string", "title": "" }, { "docid": "3b840ddeb5a8d02027f4b187357ae645", "score": "0.5590417", "text": "def import_raw_file(self):\n table_name = self.next_tmp_table(\"raw\")\n sql.import_csv(self.filename, table_name, self.file_field_seperator,\n self.file_has_header, csv_quoting=self.file_quoting,\n sql_conn=self.sql_conn)", "title": "" }, { "docid": "691d5274afe598f266c316be39942531", "score": "0.55904156", "text": "def from_csv(cls, csv_input, strip_spaces=True, skip_blank_lines=True,\n encoding=\"utf-8\", delimiter=\",\", force_unique_col_names=False):\n def _force_unique(col_headers):\n seen_names = set()\n unique_col_headers = list()\n for i, col_name in enumerate(col_headers):\n if col_name in seen_names:\n col_name += \"_%s\" % i\n seen_names.add(col_name)\n unique_col_headers.append(col_name)\n return unique_col_headers\n\n def _pad_row(row):\n if len(row) < num_cols:\n for i in range(num_cols - len(row)):\n row.append('')\n return row\n\n def _process_row(row):\n if strip_spaces:\n return _pad_row( [value.strip() for value in row] )\n else:\n return _pad_row( row )\n\n if isinstance(csv_input, basestring):\n csv_stream = StringIO(csv_input)\n else:\n csv_stream = csv_input\n \n csv_reader = csv.reader(csv_stream, delimiter=delimiter)\n \n column_headers = [header.strip() for header in csv_reader.next()]\n if force_unique_col_names:\n column_headers = _force_unique(column_headers)\n num_cols = len(column_headers)\n\n # Make a list to gather entries for each column in the data file...\n raw_text_cols = [list() for i in range(num_cols)]\n for row in csv_reader:\n processed_row = _process_row(row)\n # Add this new row if we either allow blank lines or if any field\n # in the line is not blank. We do this to the processed row,\n # because spaces may or may not be significant, depending on\n # whether strip_spaces is True.\n if (not skip_blank_lines) or any(processed_row):\n for i in range(num_cols):\n raw_text_cols[i].append(unicode(processed_row[i], encoding))\n\n # Now take the raw data and put it into our DataColumn...\n cols = [ DataColumn(raw_col) for raw_col in raw_text_cols ]\n\n # column_headers ex: ['FirstName', 'LastName']\n # cols ex: \n # [ DataColumn([\"David\", \"John\"]), DataColumn([\"Ormsbee\", \"Doe\"]) ]\n return ColumnGroup(zip(column_headers, cols))", "title": "" }, { "docid": "94b2c9dc5efbca1e86ca592be1c00500", "score": "0.5581796", "text": "def loadcsv(filename):\n asyncio.run(load_csv(filename))", "title": "" }, { "docid": "d22f1fa8ac0287c8c7f39083fdd0d0f8", "score": "0.555023", "text": "def csv(csv_file, dialect='excel', **fmt_params):\n if isinstance(csv_file, str):\n input_file = ReusableFile(csv_file, mode='r')\n elif hasattr(csv_file, 'next') or hasattr(csv_file, '__next__'):\n input_file = csv_file\n else:\n raise ValueError('csv_file must be a file path or implement the iterator interface')\n\n csv_input = csvapi.reader(input_file, dialect=dialect, **fmt_params)\n return seq(csv_input).cache(delete_lineage=True)", "title": "" }, { "docid": "ce472c1537e750c4b728ea6a18eb1704", "score": "0.554804", "text": "def __init__(self, name: str, destination_path: str):\n self.destination_path = check.str_param(destination_path, \"destination_path\")\n super().__init__(\"Csv\", name)", "title": "" }, { "docid": "92da0a05a9142b7153007babbc4fdcfb", "score": "0.5543044", "text": "def read_csv_file(self):\n\n self.data = []\n with open(self.filepath, 'r', newline='') as f:\n delimiter_ = self.ui.lineEdit_delimiter.text()\n if delimiter_ == '':\n msg = _(\"A column delimiter has not been set.\")\n Message(self.app, _(\"Warning\"), msg, \"warning\").exec()\n return False\n if delimiter_ in ('ta', 'tab'):\n delimiter_ = \"\\t\"\n # The English text is in the GUI - do not translate with qt linguist\n quoting_ = csv.QUOTE_MINIMAL\n quote_type = self.ui.comboBox_quote.currentText()\n if quote_type == \"NONE\":\n quoting_ = csv.QUOTE_NONE\n if quote_type == \"ALL\":\n quoting_ = csv.QUOTE_ALL\n reader = csv.reader(f, delimiter=delimiter_, quoting=quoting_)\n try:\n for row in reader:\n self.data.append(row)\n except csv.Error as err:\n logger.error(('file %s, line %d: %s' % (self.filepath, reader.line_num, err)))\n self.parent_textEdit.append(_(\"Row error: \") + str(reader.line_num) + \" \" + str(err))\n return False\n # Get field names and replace blacks with a placeholder\n self.fields = []\n for i, f in enumerate(self.data[0]):\n if f != '':\n self.fields.append(str(f))\n else:\n self.fields.append(\"Field_\" + str(i))\n self.data = self.data[1:]\n return True", "title": "" }, { "docid": "942cf3463282bc186457b41f6fa524dc", "score": "0.5539255", "text": "def make_csv_api_friendly(csvtext):\n new_csv = csvtext.replace(',\"\"', ',') # replace ,\"\",\"\" with ,,\n new_csv = new_csv.replace('\"FALSE\"', '\"F\"') # Shorten bool\n new_csv = new_csv.replace('\"TRUE\"', '\"T\"') # Shorten bool\n\n # use regex to find dates and remove quotes\n # format is \"01/20/2017 19:16\"\n datestr_found = re.findall('\\\"[01][0-9]\\/[0123][0-9]\\/20.{2} .{2}:.{2}\\\"', new_csv)\n for d in datestr_found:\n noquotes = d.replace('\"', '')\n new_csv = new_csv.replace(d, noquotes)\n return new_csv", "title": "" }, { "docid": "1ad3194c3d2bbc63c939525d62ca97cd", "score": "0.5529867", "text": "def test_csv(tmpdir):\n input_files = ['file1.exe', 'file2.exe']\n results = [\n {\n 'other': {'field1': 'value1', 'field2': ['value2', 'value3']},\n 'outputfile': [['out_name', 'out_desc', 'out_md5'], ['out_name2', 'out_desc2', 'out_md52']],\n 'address': ['https://google.com', 'ftp://amazon.com']\n },\n {\n 'a': ['b', 'c'],\n }\n ]\n csv_file = tmpdir / 'test.csv'\n\n cli._write_csv(input_files, results, str(csv_file))\n\n expected = (\n 'scan_date,inputfilename,outputfile.name,outputfile.description,outputfile.md5,a,address,other.field1,other.field2\\n'\n '[TIMESTAMP],file1.exe,\"out_name\\nout_name2\",\"out_desc\\nout_desc2\",\"out_md5\\nout_md52\",,\"https://google.com\\nftp://amazon.com\",value1,\"value2\\nvalue3\"\\n'\n '[TIMESTAMP],file2.exe,,,,\"b\\nc\",,,\\n'\n )\n with csv_file.open() as fo:\n # Replace timestamp.\n results = re.sub('\\n[^\"]*?,', '\\n[TIMESTAMP],', fo.read())\n assert results == expected", "title": "" }, { "docid": "247ccaf45c775489ba3747dc7b8a0796", "score": "0.55293435", "text": "def __parse_legacy_csv(self, contents):\n subformatters = [\n \"transaction_reference\",\n \"transaction_transaction_date\",\n \"transaction_amount\",\n \"account_current_balance\"\n ]\n if self.options['filetype'] in ['csv', 'empty']:\n opts = {\n 'delimiter' : str(';'),\n 'quoting' : csv.QUOTE_NONE\n }\n rows = CsvParser().parse(contents, opts=opts)\n return (rows, subformatters)\n else:\n return (contents, subformatters)", "title": "" }, { "docid": "5c3e739f6ef434f114a040571f64953d", "score": "0.5526034", "text": "def import_csv(file, delimiter=\",\"):\n csv_data = pd.read_csv(file, delimiter=delimiter)\n x = csv_data[0]\n sx = csv_data[1]\n y = csv_data[2]\n sy = csv_data[3]\n\n instance_data = Data(x, y, sx, sy)\n instance = InstanceOptions(\"\", instance_data)\n return instance", "title": "" }, { "docid": "e5fa1100ca09fe5ef84832497aeb75a1", "score": "0.5518729", "text": "def import_csv(self, request):\n if request.method == \"POST\":\n form = forms.BulkImportForm(request.POST, request.FILES)\n if not form.is_valid():\n self.message_user(request, \"Error: Invalid form\", level=messages.ERROR)\n return self.render_bulk_import_form(request, form)\n\n try:\n csv_file = TextIOWrapper(form.cleaned_data['csv_file'], encoding=request.encoding)\n dialect = csv.Sniffer().sniff(csv_file.read())\n csv_file.seek(0)\n reader = csv.DictReader(csv_file, dialect=dialect)\n except Exception as err:\n self.message_user(request, \"Error: {}\".format(err), level=messages.ERROR)\n return self.render_bulk_import_form(request, form)\n\n try:\n send_email = form.cleaned_data['send_email']\n ignore_existing = form.cleaned_data['ignore_existing']\n\n user_type = self.get_user_type(request)\n staff = self.is_user_staff()\n\n created_users = self.create_users(user_type, reader, staff, send_email, skip_existing=ignore_existing)\n except Exception as err:\n self.message_user(request, f\"Error on row number {reader.line_num}: {err}\", level=messages.ERROR)\n return self.render_bulk_import_form(request, form)\n else:\n created_users = [escape(x) for x in created_users]\n names = '<br/>'.join(created_users)\n self.message_user(request, mark_safe(\"{} users have been created:<br/>{}\".format(len(created_users), names)))\n return redirect(\"..\")\n\n else:\n return self.render_bulk_import_form(request, forms.BulkImportForm())", "title": "" }, { "docid": "a965749b50489f71973d08e62ab57327", "score": "0.5513935", "text": "def csv_to_list(csv_str, dem = ','):\n lines = csv_str.strip('\\n').split('\\n')\n csv_data = []\n for l in lines:\n csv_data.append(l.strip('\\r').split(dem))\n return csv_data", "title": "" }, { "docid": "956dea6be90fc47e947fdf34ef6f4335", "score": "0.5513228", "text": "def load_csv(path, cursor):\r\n logger.debug(\"Appel de la fonction load_csv()\")\r\n logger.debug(\"Tentative d'ouverture du fichier %s\", path)\r\n with open(path, \"r\") as f:\r\n # ignorer la première ligne qui correspond au nom des colonnes\r\n f.readline() # On lit la première ligne\r\n line = f.readline()\r\n logger.debug(\"Ouverture et lecture de la première ligne du fichier réussi\")\r\n logger.debug(\"Appel de la fonction insert_csv_row() pour chaque ligne du fichier csv\")\r\n # boucle la lecture tant qu'il y a des lignes dans le fichier csv\r\n while line:\r\n insert_csv_row(line, cursor)\r\n line = f.readline()", "title": "" }, { "docid": "b2d1ea74f1b33b452ef14a283fcbe51c", "score": "0.5513008", "text": "def csv(csvfile, dialect='excel', **fmtparams):\n if isinstance(csvfile, str):\n input_file = LazyFile(csvfile, mode='r')\n elif hasattr(csvfile, 'next') or hasattr(csvfile, '__next__'):\n input_file = csvfile\n else:\n raise ValueError('csvfile must be a filepath or implement the iterator interface')\n\n csv_input = csvapi.reader(input_file, dialect=dialect, **fmtparams)\n return seq(csv_input)", "title": "" }, { "docid": "3c86ed276275615cfae5017d209d0e12", "score": "0.55126864", "text": "def read_data_file(sp_context, delimiter, file_path):\n\n raw_data = sp_context.textFile(file_path).cache()\n csv_data = raw_data.map(lambda line: line.split(delimiter)).cache()\n header = csv_data.first() # extract header\n raw_data = csv_data.filter(lambda x: x != header)\n '''\n TODO:\n '''\n row_data = raw_data.map(lambda p: Row(\n # sku=p[0],\n # product_price=p[1],\n # sales_rank=p[2],\n # reviews=p[3],\n # image_size=p[4],\n # returns=p[5],\n # cancels=p[6],\n # stock_status=p[7],\n ))\n\n\n return row_data", "title": "" }, { "docid": "b77accbbedad03a27977ff6c8c3b1842", "score": "0.55085003", "text": "def parseLineCSV(self, input_string):\n\n result = None\n\n temp_data = input_string.split(',')\n if len(temp_data) == 4:\n date_string = self.parseDate(temp_data[0])\n \n if date_string is not None:\n result = {\n 'date': date_string,\n 'job_title': temp_data[1],\n 'company_name': temp_data[2],\n 'salary': int(temp_data[3])\n }\n\n return result", "title": "" }, { "docid": "fa1d38635306fcb247386ea99d4ed41f", "score": "0.5503829", "text": "def __init__(self, path_to_csv, model_url=MODEL_FOLDER_PATH):\n self.path_to_csv = path_to_csv\n self.df = pd.read_csv(path_to_csv)\n self.model_url = model_url", "title": "" }, { "docid": "9d58c4729a2e7211f39a0b28223ae0f2", "score": "0.5502433", "text": "def import_csv(self):\n if not self.csv_path:\n raise MissingImportPathException()\n\n try:\n sis_import = sis_import_by_path(self.csv_path,\n self.override_sis_stickiness)\n self.post_status = 200\n self.canvas_id = sis_import.import_id\n self.canvas_state = sis_import.workflow_state\n except DataFailureException as ex:\n self.post_status = ex.status\n self.canvas_errors = ex\n\n self.save()", "title": "" }, { "docid": "159476e6301cd4e183bc804f7bd8acb1", "score": "0.54991055", "text": "def recfromcsv():\n \n \n return None", "title": "" }, { "docid": "70c5af4a74110f96d239c9c20e7c7253", "score": "0.54855573", "text": "def test_load_data(self):\n tabfile = TabFile('test',self.fp,delimiter=',')\n self.assertEqual(len(tabfile),3,\"Input has 3 lines of data\")\n self.assertEqual(tabfile.header(),[],\"Header should be empty\")\n self.assertEqual(str(tabfile[0]),\"chr1,1,234,4.6\",\"Incorrect string representation\")\n self.assertEqual(tabfile[2][0],'chr2',\"Incorrect data\")\n self.assertEqual(tabfile.nColumns(),4)\n self.assertEqual(tabfile.filename(),'test')", "title": "" }, { "docid": "70c5af4a74110f96d239c9c20e7c7253", "score": "0.54855573", "text": "def test_load_data(self):\n tabfile = TabFile('test',self.fp,delimiter=',')\n self.assertEqual(len(tabfile),3,\"Input has 3 lines of data\")\n self.assertEqual(tabfile.header(),[],\"Header should be empty\")\n self.assertEqual(str(tabfile[0]),\"chr1,1,234,4.6\",\"Incorrect string representation\")\n self.assertEqual(tabfile[2][0],'chr2',\"Incorrect data\")\n self.assertEqual(tabfile.nColumns(),4)\n self.assertEqual(tabfile.filename(),'test')", "title": "" }, { "docid": "bad41858aa2b99b5b2ac67a1d75dba4e", "score": "0.54849666", "text": "def parse_csv_line(line):\n ret = []\n tmp = \"\"\n in_quotes = False\n i = 0\n while i < len(line):\n c = line[i]\n if c == '\"':\n if in_quotes:\n in_quotes = False\n ret.append(tmp)\n tmp = \"\"\n i += 1\n else:\n in_quotes = True\n elif c != \",\":\n tmp += c\n else:\n if not in_quotes:\n ret.append(tmp)\n tmp = \"\"\n else:\n tmp += c\n i += 1\n\n if tmp != \"\":\n ret.append(tmp)\n return ret", "title": "" }, { "docid": "cc7bad0bdc69c6652bda0567196f4eed", "score": "0.54843175", "text": "def save_uploaded_data_csv(file_reader, module):\n return save_uploaded_data(file_reader, module)", "title": "" }, { "docid": "a399b2fd395fe64b13ec390d53729685", "score": "0.54801506", "text": "def create_student_from_csv(csv_path, cohort_id, user_type):\n\n # Read in CSV file of students\n with open(csv_path, 'rb') as f:\n\n # Split at the comma\n reader = csv.reader(f, delimiter=',')\n\n # Skip header row\n headers = reader.next()\n\n # Go through rows and add each student\n for row in reader:\n if row:\n\n # Check if student exists\n student = model.User.query.filter_by(first_name=row[0], last_name=row[1]).first()\n\n # If student doesn't exist, add student and add to student cohorts table\n if student == None:\n user = model.User(user_type=user_type, first_name=row[0], last_name=row[1])\n model.session.add(user)\n model.session.commit()\n\n # Get student's ID and add to studentcohorts table\n student_id = (model.User.query.filter_by(first_name=row[0], last_name=row[1]).first()).id\n studentcohort = model.StudentCohort(student_id=student_id, cohort_id=cohort_id)\n model.session.add(studentcohort)\n model.session.commit()", "title": "" }, { "docid": "bacf3d78368f3841beed62008cb69c1f", "score": "0.5475773", "text": "def test_csv_tuples(entities):\n mimetype = \"text/csv\"\n dummy_file = ReadWriteDummy()\n save_entities(\n entities=entities,\n file_=dummy_file,\n mimetype=mimetype,\n attributes=DEFAULT_ATTRIBUTES,\n )\n read_entities = list(ensure_dict(load_entities(file_=dummy_file, mimetype=mimetype)))\n assert_sequence_partial_equals(\n expected=entities, actual=read_entities, attributes_to_test=[\"ID\", \"href\"]\n )\n\n # needs second round trip test as csv converts everything to strings!\n dummy_file_2 = ReadWriteDummy()\n save_entities(\n entities=entities,\n file_=dummy_file_2,\n mimetype=mimetype,\n attributes=DEFAULT_ATTRIBUTES,\n )\n assert dummy_file.data == dummy_file_2.data\n read_entities_2 = list(\n ensure_dict(load_entities(file_=dummy_file_2, mimetype=mimetype))\n )\n assert_sequence_equals(expected=read_entities, actual=read_entities_2)", "title": "" }, { "docid": "ec943bbd5813a680f338efee7763c5bb", "score": "0.54753804", "text": "def csv(fname: str, fun):\n path = os.path.dirname(os.path.abspath(__file__))\n f_path = os.path.join(path, \"../data\", fname)\n with open(f_path, \"r+\") as input_file:\n for line in input_file:\n newLine = line.replace(\"\\n\", \"\").rstrip().split(\",\")\n t = []\n for i in newLine:\n t.append(coerce(i))\n fun(t.copy())", "title": "" }, { "docid": "135085555c16e939d22d13feb40d9a5b", "score": "0.5465842", "text": "def copy_from_csv(csv_path, tables):\n # Run \"import_career_data.py\"\n s1 = time.time()\n if settings.DEBUG:\n python_ver = \"python\" # My LocalHost\n else:\n python_ver = \"python3.6\"\n\n # python manage.py runscript process_career_file --script-args 14 18\n command = '{} manage.py runscript import_career_data --script-args \"{}\" \"{}\"'.format(\n python_ver, csv_path, ','.join(tables)\n )\n\n args = shlex.split(command)\n p = subprocess.Popen(args, close_fds=True)\n p.wait()\n logging.info('Tables imported in {}s.'.format(\n round(time.time() - s1, 3)\n ))", "title": "" }, { "docid": "0a08b34ba15a133514c6d96b17993c61", "score": "0.545642", "text": "def read_csv(self, csv_file_path):\n with open(csv_file_path, mode=\"r\") as input_file:\n # return <_csv.reader object at 0x000001D2463D5820>\n read_csv = csv.reader(input_file, delimiter=self.delimeter)\n list_read_csv = []\n for row in read_csv:\n list_read_csv.append(row)\n self.csv_file = list_read_csv", "title": "" }, { "docid": "23c9a5ac283cbaa3d26f6a551dc3ea4c", "score": "0.54507256", "text": "def load(self, dataset_file_path, delimiter=','):\n raise NotImplementedError", "title": "" }, { "docid": "4f4da6c66cc4626a8b0434a897a7abc8", "score": "0.5447038", "text": "def open_csv(self, filename: str):\n data = []\n delimiter = \",\"\n quotechar = None\n with open(file=filename, newline='') as csvfile:\n csv_reader = csv.reader(\n csvfile, delimiter=delimiter, quotechar=quotechar)\n data = [row for row in csv_reader]\n self.select_data(np.array(data))\n #print(\"open csv\")", "title": "" }, { "docid": "eb29cde4bd703eb385d7429e498e3b29", "score": "0.544013", "text": "def test_importer_command_with_semicolon(self):\n warehouse_csv = os.path.join(\n self.base_dir,\n 'tests/samples/warehouses_semicolon.csv'\n )\n management.call_command(\n 'importer',\n warehouse_csv,\n type='file',\n model_name='Warehouse',\n delimiter=';',\n )\n self.assertTrue(Warehouse.objects.filter(\n name=\"Berlin\"\n ).exists())", "title": "" }, { "docid": "f0bfacf7d539a16f800e24d797e3f6a4", "score": "0.5440052", "text": "def load(input):\n #return the file as a pandas data frame\n return pandas.read_csv(input, sep=\"\\s+\", header=None, names=INPUT_COLUMNS)", "title": "" }, { "docid": "47314be37e507029b5d1816b38aed67c", "score": "0.54383683", "text": "def read_csv(file_path, sep=\",\", quotechar=None, encoding='utf-8', nrows=10000000, timest_format=None):\n from pm4py.objects.log.util import dataframe_utils\n import pandas as pd\n if quotechar is not None:\n df = pd.read_csv(file_path, sep=sep, quotechar=quotechar, encoding=encoding, nrows=nrows)\n else:\n df = pd.read_csv(file_path, sep=sep, encoding=encoding, nrows=nrows)\n df = dataframe_utils.convert_timestamp_columns_in_df(df, timest_format=timest_format)\n if len(df.columns) < 2:\n logging.error(\n \"Less than three columns were imported from the CSV file. Please check the specification of the separation and the quote character!\")\n else:\n # logging.warning(\n # \"Please specify the format of the dataframe: df = pm4py.format_dataframe(df, case_id='<name of the case ID column>', activity_key='<name of the activity column>', timestamp_key='<name of the timestamp column>')\")\n pass\n\n return df", "title": "" }, { "docid": "56341c5743b8f11c5da69752860435a0", "score": "0.54359436", "text": "def test_load_sample_data_from_csv(self):\n data = load_data('Hemant-3.txt')\n self.assertEqual(type(data), list)\n\n if len(data) > 0:\n self.assertEqual(type(data[0]), list)", "title": "" }, { "docid": "e535454052cdb69f682a8d50d1353240", "score": "0.54346704", "text": "def import_data(filename):\n with Path(filename).open() as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader) # skip header\n buffer, count = [], 0\n for row in csv_reader:\n count += 1\n survived, pclass, name, sex, age, siblings, parents, fare = row\n buffer.append(models.Person(\n survived=bool(survived), passenger_class=int(pclass),\n name=name, sex=sex, age=int(float(age)),\n siblings_or_spouses_aboard=int(float(siblings)),\n parents_or_children_aboard=int(parents),\n fare=float(fare)))\n if len(buffer) % 10000 == 0:\n print(f'Imported: {count} items')\n db.session.bulk_save_objects(buffer)\n db.session.commit()\n buffer = []\n if buffer:\n print(f'Imported: {count} items')\n db.session.bulk_save_objects(buffer)\n db.session.commit()", "title": "" }, { "docid": "e51088b3b0499ca1e3d30770bce02c9e", "score": "0.54280627", "text": "def import_csv_OLD (file_name, directory):\n stanza_list = []\n if file_name.lower().endswith('csv'):\n file_name = file_name[:-4]\n csv_file = directory + file_name + '.csv'\n with open(csv_file, 'r', encoding='utf8') as file:\n reader = csv.reader(file, quotechar='\"', delimiter=',', quoting=csv.QUOTE_ALL)\n for row in reader:\n stanza_list.append(row)\n return stanza_list", "title": "" }, { "docid": "00fe1d14d08fcc8aba400b0435c21b6c", "score": "0.54268855", "text": "def test_remote_csv_init_autodetect(self, csv_path):\n assert isinstance(Project(cfg=csv_path), Project)", "title": "" }, { "docid": "31439e669eff687dc31eeb7e42c3b31e", "score": "0.5419872", "text": "def import_csv (file_name, directory):\n stanza_list = []\n if file_name.lower().endswith('csv'):\n file_name = file_name[:-4]\n csv_file = directory + file_name + '.csv'\n with open(csv_file, 'r', encoding='utf8') as file:\n reader = csv.reader(file, quotechar='\"', delimiter=',', quoting=csv.QUOTE_ALL)\n for row in reader:\n stanza_list.append(row)\n return stanza_list", "title": "" }, { "docid": "640c8523e745246bca76bc54eace27c5", "score": "0.5418849", "text": "def read_csv_file(path, seperateHeader, removeEmptyLines):\n #Check if the given path is available\n if os.path.exists(path) == False:\n raise ValueError('The path specified does not exist')\n \n #Check if the file is a file (and not a directory)\n if os.path.isfile(path) == False:\n raise ValueError('The path specified is not a file') \n \n #Open the CSV file \n #Ignore encoding since python uses unicode as default charset. \n with open(path, newline='') as csvfile:\n\n try:\n # sniff the first line of the file to check its dialect\n dialect = csv.Sniffer().sniff( csvfile.readline() )\n except Exception as e:\n # catching all Exceptions is a bad thing. But the user\n # needs to be properly informed.\n raise ValueError('The path specified is not a valid CSV file. Error: ' + str(e))\n\n #Check if the csv.Sniffer detected a header line \n has_header = csv.Sniffer().has_header\n\n # Don't forget to reset the read position back to the start of\n # the file before reading any entries\n csvfile.seek(0)\n\n # read csv file according to dialect\n reader = csv.reader( csvfile, dialect )\n\n # read header, if there is one\n if has_header:\n header = next(reader)\n \n # read data into memory\n if removeEmptyLines:\n #but exclude all empty lines\n data = [row for row in reader if row != []]\n else:\n #read the lines exactly as they are\n data = [row for row in reader]\n\n # close input file\n csvfile.close()\n\n if seperateHeader:\n #The user would like to receive a dictonary\n #to be able to process the header of the file differently\n return {\n \"Header\": header,\n \"Body\": data\n }\n\n if removeEmptyLines and header == []:\n #It is possible that there is a header row with actualy no values\n #in this case, do not add the header line\n return data\n \n #add back the header, because the user does not want to separate.\n data.insert(0, header)\n return data", "title": "" }, { "docid": "28b440bd5970da5c02d6e1bf01e8b7ac", "score": "0.5412908", "text": "def exportCSV(self):\r\n self.updateDialect()\r\n \r\n try: \r\n self.filePath = asksaveasfilename(\r\n defaultextension = \".csv\",\r\n filetypes = [(\"CSV file\", \"*.csv\")],\r\n initialfile = \"output.csv\")\r\n self.exporter.exportAsCSV(self.filePath, \r\n self.data,\r\n self.sepCharBuffer, \r\n self.quoteCharBuffer, \r\n self.encodingBuffer)\r\n showinfo(\"success!\",\r\n \"Your CSV file is sucessfully saved!\")\r\n except TypeError:\r\n showerror(\"ERROR!\",\r\n \"Delimiter and quotechar must be a 1-character string.\")", "title": "" }, { "docid": "299e12f7cf7b4a1bc24c80b323900c1d", "score": "0.5410478", "text": "def test_csv_cli(tmpdir, script_runner):\n test_file = tmpdir / 'test.txt'\n test_file.write_binary(b'This is some test data!')\n test_file = test_file.basename\n cwd = str(tmpdir)\n\n ret = script_runner.run('mwcp', 'parse', '--no-output-files', '--format', 'csv', 'foo', test_file, cwd=cwd)\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n\n assert ret.success\n\n expected = (\n 'scan_date,inputfilename,outputfile.name,outputfile.description,outputfile.md5,address,debug,url\\n'\n f'[TIMESTAMP],{test_file},fooconfigtest.txt,example output file,5eb63bbbe01eeed093cb22bb8f5acdc3,127.0.0.1,'\n '\"[+] File test.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] test.txt dispatched residual file: fooconfigtest.txt\\n'\n '[+] File fooconfigtest.txt described as example output file\\n'\n f'[+] operating on inputfile {test_file}'\n '\",http://127.0.0.1\\n'\n )\n results = ret.stdout\n # Replace timestamp.\n results = re.sub('\\n[^\"]*?,', '\\n[TIMESTAMP],', results)\n assert results == expected", "title": "" }, { "docid": "5b9766d365c8dbd001941d191a90fe32", "score": "0.54054517", "text": "def upload_csv(self,csv_string: str) -> None:\n duplicates = [ ]\n new_labels = [ ]\n # Split CSV string assuming no header and first column is label\n lines = csv_string.splitlines()\n for line in lines:\n cells = line.split(\"\\t\")\n new_label = cells[0]\n if new_label in self.labels:\n duplicates.append(new_label)\n else:\n new_labels.append(new_label)\n # For all new labels, load to database as current items\n if new_labels:\n l = self.message.get()\n v = Version.objects.create(label=l)\n for new_label in new_labels:\n item = self.model.objects.create(label=new_label, version_first=v)\n item.save()\n self.set_list_context()\n d = {'new_labels': new_labels, 'duplicates': duplicates, 'model_name': self.name }\n self.context['item_list_heading'] = self.message.get(\"item_upload_heading\",d)\n self.context['item_list_message'] = self.message.get(\"item_upload_report\",d)\n return self.context", "title": "" }, { "docid": "60143a508d93a72dcdee1d9c5729f99d", "score": "0.5405044", "text": "def import_data_generator(filename):\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in reader:\n yield row", "title": "" }, { "docid": "0eaf0c9f45054cb0fded8598c76465c1", "score": "0.5404698", "text": "def _read_csv(self, csv, json_in_note, time_columns):\n df = pd.read_csv(csv, sep=';', index_col='Date', parse_dates=[0], dayfirst=True,\n decimal=',', escapechar='\\\\')\n df.sort_index(axis=0, ascending=True, inplace=True) # Reverse by rows\n if json_in_note:\n df = self.__convert_json_columns(df, 'Note')\n if time_columns:\n df = self.__convert_time_columns(df, time_columns)\n return df", "title": "" }, { "docid": "b4201f944775244cac2d1686d05ec774", "score": "0.5401778", "text": "def import_career_data(\n user_id,\n fifa_edition,\n csv_path,\n):\n\n to_import = []\n\n for csv in SUPPORTED_TABLES:\n # example: media\\<user>\\data\\csv\\career_calendar.csv\n full_csv_path = os.path.join(csv_path, csv) + \".csv\"\n\n if csv == \"players\":\n if fifa_edition == '18':\n csv = \"players\"\n else:\n csv = \"players{}\".format(fifa_edition)\n\n model_name = \"datausers{}\".format(csv.replace(\"_\", \"\"))\n\n ct = ContentType.objects.get(model=model_name)\n model = ct.model_class()\n delete_from_model(model=model, user_id=user_id)\n\n if os.path.exists(full_csv_path):\n to_import.append(csv)\n\n copy_from_csv(csv_path=csv_path, tables=to_import)", "title": "" }, { "docid": "e2c1f7013f37dcc596a728c03a36c27f", "score": "0.53916377", "text": "def __parse_csv(self, contents):\n subformatters = [\n None, # Radnummer\n \"account_account_code\",\n \"account_account_number\",\n None, # Produkt\n \"currency_code\",\n \"transaction_accounting_date\",\n \"transaction_transaction_date\",\n None, # Valutadag\n \"transaction_reference\",\n None, # Beskrivning\n \"transaction_amount\",\n \"account_current_balance\"\n ]\n print((\"Filetype:\", self.options['filetype']))\n if self.options['filetype'] in ['csv', 'empty']:\n opts = {\n 'delimiter' : str(','),\n 'quoting' : csv.QUOTE_NONE\n }\n rows = CsvParser().parse(contents, opts=opts)\n return (rows, subformatters)\n else:\n return (contents, subformatters)", "title": "" }, { "docid": "3507a09fc37cec6bae2d4571dd36fed6", "score": "0.5385086", "text": "def LoadCSVFileIntoTable(DBEntry, FileEntry):\r\n try:\r\n # connect to database instance\r\n SqlConn = MySQLdb.connect(**DBEntry.ConnectionString)\r\n SqlCur = SqlConn.cursor()\r\n # execute bulk insert command\r\n SqlCur.execute((\"LOAD DATA INFILE %%s INTO TABLE %s \"\r\n \"FIELDS TERMINATED BY ','\") % \\\r\n FileEntry.TargetTable, \r\n (FileEntry.FilePath,))\r\n SqlCur.close()\r\n SqlConn.close()\r\n except Exception, e:\r\n # print e\r\n # bulk insert failed\r\n SqlCur.close()\r\n SqlConn.close()\r\n return False\r\n # bulk insert success\r\n return True", "title": "" }, { "docid": "655cca2fa441d9982b0577e455caf072", "score": "0.53826994", "text": "def load_csv(csv):\n portfolio_data_df = pd.read_csv(csv)\n return portfolio_data_df", "title": "" }, { "docid": "53cfb096472b9c7c3cb99308b33c7e21", "score": "0.5380278", "text": "def test_unicodecsv(self):\n from toolbox import unicodecsv\n from cStringIO import StringIO\n d = StringIO(\"\"\"Name,Type,County\nLa Cañada Flintridge,Neighborhood,L.A.County\nDowntown,Neighborhood,L.A.County\n\"\"\")\n reader = unicodecsv.UnicodeDictReader(d)\n reader.next()\n reader.__iter__()\n self.assertEqual(type(list(reader)), type([]))", "title": "" }, { "docid": "900a4edf76ff5e25edfad59649367a0c", "score": "0.5371871", "text": "def read_csv(input_file):\n\n file = open(input_file, 'r')\n reader = csv.reader(file)\n return reader", "title": "" }, { "docid": "0a011edc00847c5dc7752186a5b55bc6", "score": "0.5369007", "text": "def import_csv(self):\r\n path = askopenfilename(title=\"Philip Deck - Open\", filetypes=[('CSV files', '*.csv')])\r\n # #ADD ERROR CHECKING\r\n if path is not None and path != \"\":\r\n self.app.set_file(path)\r\n self.app.load_data_from_file()\r\n self.populate_listbox(self.app.data)\r\n self.set_infobox_msg(\"Imported \" + str(self.listbox.size()) + \" rows from \" + path)", "title": "" }, { "docid": "968542a7f349ab5cfdb21b0b19b77f88", "score": "0.5365539", "text": "def validate_csv(self, reader):\n pass", "title": "" }, { "docid": "388c3e170f48b821a3982f1eed3fa43f", "score": "0.5354577", "text": "def structure_from_csv(self):\n\n csvfile = self.REQUEST.form['csv_structure']\n if not csvfile:\n return 'Please provide a csv structure file !'\n else:\n import transaction\n import os\n from zope.interface import alsoProvides, noLongerProvides\n from Products.csvreplicata.interfaces import ICSVReplicable, \\\n Icsvreplicata\n from pgolf.tma.config import getImportOptionsFromIni\n import_settings, default_transition = getImportOptionsFromIni()\n import foo\n\n # are we in zopetestcase ?\n # if not we have to install temorarly csvreplicata just to import\n # structure else it's installed via base_setup\n zopetestcase = (('ZOPE_TESTCASE' in os.environ)\n or ('ZOPETESTCASE' in os.environ))\n\n portal_quickinstaller = self.portal_quickinstaller\n has_csvreplicata = portal_quickinstaller.\\\n isProductInstalled('csvreplicata')\n\n if not zopetestcase and not has_csvreplicata:\n #install temporarly csvreplicata if not used by policy.\n portal_quickinstaller.installProduct('csvreplicata')\n transaction.savepoint()\n\n #provide ICSVReplicable to plonesite\n alsoProvides(self, ICSVReplicable)\n\n csvreplicatatool = self.portal_csvreplicatatool\n # save existents user settings\n if has_csvreplicata:\n old_replicabletypes_settings = csvreplicatatool.replicabletypes\n\n # register Structure replicabletypes\n csvreplicatatool.replicabletypes = import_settings\n # now import\n replicator = Icsvreplicata(self)\n replicator.csvimport(csvfile, datetimeformat='%d/%m/%Y',\n wf_transition=default_transition)\n\n # restore replicabletypes user settings\n if has_csvreplicata :\n csvreplicatatool.replicabletypes = old_replicabletypes_settings\n\n #remove ICSVReplicable interface for self\n noLongerProvides(self, ICSVReplicable)\n\n #uninistall csvreplicata if not used by policy\n if not zopetestcase and not has_csvreplicata:\n portal_quickinstaller.uninstallProducts(['csvreplicata'])\n\n status_message = _(u'structure succesfully imported',\n default=u'structure succesfully imported')\n addStatusMessage(self.REQUEST, status_message)\n # and redirect him to the fine location\n next_url = self.portal_url()\n self.REQUEST.RESPONSE.redirect(next_url)\n return self.REQUEST.RESPONSE", "title": "" }, { "docid": "6c78de5959ed34e150db84474c50c55a", "score": "0.535013", "text": "def load_from_csv(self, data: List):\n self.date = datetime.strptime(data[0], \"%m/%d/%Y\")\n self.vendor = data[1]\n self.amount = int(float(data[3]))\n self.debit = True if data[4] == 'debit' else False\n self.category = data[5]\n self.label = data[7]", "title": "" } ]
5ad97563f6b71b5c046bd4bf03b2a5ee
Tests whether the parser is able to remove accents in a sentence.
[ { "docid": "e454df3d7ba40f6d5044264afe1d6097", "score": "0.74372214", "text": "def test_parser_can_remove_accents(self):\n parser = Parser(\n \"bienvenue à lès brignoles\",\n STOPWORDS,\n ACCENTS,\n QUESTIONS\n )\n result = parser.remove_accents()\n assert result == \"bienvenue a les brignoles\"", "title": "" } ]
[ { "docid": "89fa645e87c860f8c72a5b0778d44014", "score": "0.7088505", "text": "def test_remove_all_accent_string_with_accent(self):\r\n assert Parser().remove_all_accent(\"clélio favoccià\") \\\r\n == \"clelio favoccia\"", "title": "" }, { "docid": "dd1fa717b226a27361066e24002f1706", "score": "0.6986062", "text": "def test_accents(self):\n self.assertEqual(\"eeeaui\", tools.normalize_string(u\"éèêàüï\"))", "title": "" }, { "docid": "b0e53d995cabb7279878f8920b9a2794", "score": "0.69053733", "text": "def test_remove_all_accent_string_without_accent(self):\r\n assert Parser().remove_all_accent(\"clelio favoccia\") \\\r\n == \"clelio favoccia\"", "title": "" }, { "docid": "d360da970af4383d63e6b23b7379dadb", "score": "0.65611434", "text": "def test_sentence_input(self, sentence):\n if len(sentence.strip()) == 0: return False\n # reject_pat = re.compile(r\"(^')|('$)|\\s'|'\\s|[\\\"(\\(\\)\\[\\])]\")\n reject_pat = re.compile(r\"(?!)\")\n # Decode unicode, mainly to normalize fancy quotation marks\n if sentence.__class__.__name__ == \"str\": # pragma: no cover\n decoded = sentence\n else: # pragma: no cover\n decoded = unidecode(sentence)\n # Sentence shouldn't contain problematic characters\n if re.search(reject_pat, decoded): return False\n return True", "title": "" }, { "docid": "b336e06593eb18854a0491692dab8b61", "score": "0.64085853", "text": "def remove_accents(self, paragraph):\r\n pass", "title": "" }, { "docid": "55ca733df9c432b6bbea607d6ce588ff", "score": "0.6061409", "text": "def strip_accents(s):\r\n return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')", "title": "" }, { "docid": "5bd47f4ae79a5a8948e4725b51b5b4cb", "score": "0.6029013", "text": "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "500aeeec44739e7f946195f49995de0b", "score": "0.5990186", "text": "def tweet_with_car_accident(text):\n if \"תאונת דרכים\" in text or \"ת.ד\" in text:\n return True\n return False", "title": "" }, { "docid": "1c9236a6f2d54c4b41c115ad0c661b65", "score": "0.5981236", "text": "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "1c9236a6f2d54c4b41c115ad0c661b65", "score": "0.5981236", "text": "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "1c9236a6f2d54c4b41c115ad0c661b65", "score": "0.5981236", "text": "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "3803369be6ce104f4a125471e5bc9196", "score": "0.5960543", "text": "def strip_accents_unicode(s):\n normalized = unicodedata.normalize('NFKD', s)\n if normalized == s:\n return s\n else:\n return ''.join([c for c in normalized if not unicodedata.combining(c)])", "title": "" }, { "docid": "e46f7a66449722e47396b5eae0dfbc21", "score": "0.5956367", "text": "def strip_accents_unicode(s):\n return ''.join([c for c in unicodedata.normalize('NFKD', s)\n if not unicodedata.combining(c)])", "title": "" }, { "docid": "31f95a9a7395b7c9a4d807ddab9b0a24", "score": "0.59493446", "text": "def strip_accents(s):\n try:\n s = unicode(s)\n except:\n s = s.decode(\"utf-8\")\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')", "title": "" }, { "docid": "8f3f3d37bd1283df139e0afdce56a8a4", "score": "0.59460425", "text": "def filter_accents(text):\r\n return ''.join(letter for letter in text.lower() if 'WITH' in name(letter))", "title": "" }, { "docid": "4710a949e3a566d4397997812e8f6da7", "score": "0.59433866", "text": "def removeAccents(str):\n nkfd_form = unicodedata.normalize('NFKD', str)\n return \"\".join([c for c in nkfd_form if not unicodedata.combining(c)])", "title": "" }, { "docid": "05b67022a71181329193a35933f5abdc", "score": "0.59154075", "text": "def filter_accents(text):\n return [\n char.lower() for char in text if unicodedata.normalize(\"NFKD\", char) != char\n ]", "title": "" }, { "docid": "cb677d6712e9cb5fe843869381e08520", "score": "0.58429533", "text": "def strip_accents(s):\n return ''.join(c\n for c in normalize('NFD', s)\n if not combining(c))", "title": "" }, { "docid": "4c2c2158a5ffbb61579a4c473009bbfd", "score": "0.5828674", "text": "def test_clean_function(self):\n from chatterbot.apis import clean\n\n text = u\"Klüft skräms inför på fédéral électoral große\"\n clean_text = clean(text)\n normal_text = \"Kluft skrams infor pa federal electoral groe\"\n\n self.assertEqual(clean_text, normal_text)", "title": "" }, { "docid": "73253653578a82143a2841de132e6c09", "score": "0.58210325", "text": "def remove_accents(input_str):\n nfkd_form = unicodedata.normalize(\"NFKD\", input_str)\n return \"\".join([c for c in nfkd_form if not unicodedata.combining(c)])", "title": "" }, { "docid": "eaa59a0a29aff95b90173554c73f0f20", "score": "0.58013266", "text": "def strip_accents(self,text):\n import unicodedata\n try:\n text = unicode(text, 'utf-8')\n except NameError: # unicode is a default on python 3 \n pass\n except TypeError: # if text is already unicode\n pass\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)", "title": "" }, { "docid": "4184a28e2c2e3e9962576ecf61f34920", "score": "0.5794036", "text": "def StripAccents(text):\n try:\n text = unicode(text, 'utf-8')\n except NameError: # unicode is a default on python 3 \n pass\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)", "title": "" }, { "docid": "4184a28e2c2e3e9962576ecf61f34920", "score": "0.5794036", "text": "def StripAccents(text):\n try:\n text = unicode(text, 'utf-8')\n except NameError: # unicode is a default on python 3 \n pass\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)", "title": "" }, { "docid": "6f8015c4da2cd43a9cfb68a4bbf3c01f", "score": "0.5791557", "text": "def test_spaces_it(self):\n self.assertEqual(normalize('questo è un test ',\n lang='it'), 'questo è 1 test')\n self.assertEqual(normalize('un altro test ',\n lang='it'), '1 altro test')\n self.assertEqual(normalize('questa è un\\' altra amica ', lang='it',\n remove_articles=False),\n 'questa è 1 altra amica')\n self.assertEqual(normalize('questo è un test ', lang='it',\n remove_articles=False), 'questo è 1 test')", "title": "" }, { "docid": "89cf96c1cf1c0c2d00f123c3ae800fa5", "score": "0.5780324", "text": "def is_valid_unicode(unicode_entity):\n return unicode_entity in emojis.keys()", "title": "" }, { "docid": "a436238b00f4bb53e2052e273e61eca0", "score": "0.5775102", "text": "def remove_accents(self, data):\r\n return ''.join(x for x in unicodedata.normalize('NFKD', html.unescape(data)) if x in string.printable)", "title": "" }, { "docid": "99d6632bbd536fd62d90db376e250ce8", "score": "0.57742864", "text": "def is_egzotic_entity(token):\n if any(t in token for t in [\".\", \"!\", \"#\", \"&\"]):\n return True\n #Camel case words \n if not token.isupper() and not token.islower():\n return True\n return False", "title": "" }, { "docid": "23be2fa55b14c5643eff547d789e8315", "score": "0.57036155", "text": "def test_acronyms_unicode(self, _, case, value, expected):\n case_converter = getattr(case_conversion, case)\n result = case_converter(value, acronyms=ACRONYMS_UNICODE)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "04f502521dd649322f0ee99df46444d1", "score": "0.56898874", "text": "def test_latin_parse_diacritics(self):\n inputs = [\"a\", \"ū\", \"ï\"]\n transcriber = lat.Transcriber(\"Classical\", \"Allen\")\n outputs = [transcriber._parse_diacritics(char) for char in inputs]\n target = [unicodedata.normalize('NFC', c) for c in \n [\"a///\", \"u/\" + lat.chars.LONG + \"//\", \n \"i//\" + lat.chars.DIAERESIS + \"/\"]]\n self.assertEqual(outputs, target)", "title": "" }, { "docid": "70fcef3bd48804488ff8fe624702f3f6", "score": "0.5678072", "text": "def test_acronyms_not_an_acronym_different_symbols(self):\n text = \"I.B-M\"\n acronyms_list = acronyms(text)\n self.assertEqual(len(acronyms_list), 0)", "title": "" }, { "docid": "fa5f9c62d061aedc45427a3e7e8a093b", "score": "0.5648356", "text": "def remove_accent(string_with_diacritics):\n return unicodedata.normalize('NFKD', string_with_diacritics).encode('ASCII', 'ignore').decode('ASCII')", "title": "" }, { "docid": "58e40857075ead26a541ea332b2d079b", "score": "0.564304", "text": "def filter_term_en(term):\n if term.strip() == '' \\\n or not term[0].isalnum() \\\n or term.find('---') > -1 \\\n or term.find('___') > -1 \\\n or len([c for c in term if c.isalpha()]) * 2 < len(term) \\\n or len(term) > 75:\n return True\n return False", "title": "" }, { "docid": "041a03276f9f5bbc2d722f2957388e2b", "score": "0.5635907", "text": "def _check_text(text):\n for char in text:\n if char not in \"/()”*^\\\\\\\"<>[]\\'~\":\n if not (0 <= ord(char) <= 127):\n return True\n return classify(text)", "title": "" }, { "docid": "60cec6ec93b821f8956e0618b319e192", "score": "0.56347173", "text": "def is_english(word_):\n return word_.lower().strip('.,!()') in WORD_LIST", "title": "" }, { "docid": "9ed08ff321622a08e723b4c1b53665d4", "score": "0.56319976", "text": "def is_english(entity):\n return entity[:6] == b\"/c/en/\"", "title": "" }, { "docid": "43172c203dc0847855dc870001a5d06e", "score": "0.56305623", "text": "def strip_accents_ascii(s):\n nkfd_form = unicodedata.normalize('NFKD', s)\n return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')", "title": "" }, { "docid": "43172c203dc0847855dc870001a5d06e", "score": "0.56305623", "text": "def strip_accents_ascii(s):\n nkfd_form = unicodedata.normalize('NFKD', s)\n return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')", "title": "" }, { "docid": "be2192fbd07be2854542c0d346fc6b92", "score": "0.5614366", "text": "def remove_accents(raw_text):\n\n raw_text = re.sub(u\"[àáâãäå]\", 'a', raw_text)\n raw_text = re.sub(u\"[èéêë]\", 'e', raw_text)\n raw_text = re.sub(u\"[ìíîï]\", 'i', raw_text)\n raw_text = re.sub(u\"[òóôõö]\", 'o', raw_text)\n raw_text = re.sub(u\"[ùúûü]\", 'u', raw_text)\n raw_text = re.sub(u\"[ýÿ]\", 'y', raw_text)\n raw_text = re.sub(u\"[ß]\", 'ss', raw_text)\n raw_text = re.sub(u\"[ñ]\", 'n', raw_text)\n return raw_text", "title": "" }, { "docid": "a15817955ec0ae98fe933dcc1e815b3d", "score": "0.5608889", "text": "def deaccent_string(self, string):\n temp = ''\n for s in string:\n if s.lower() in AZBUKA_STR:\n temp += s\n elif s in ' {}1234567890dд':\n temp += s\n return temp", "title": "" }, { "docid": "ce1132f9aa463cbe31071795d4b3c760", "score": "0.5588464", "text": "def strip_accents(self,text):\n try:\n text = unicode(text, 'utf-8')\n except (TypeError, NameError): # unicode is a default on python 3\n pass\n #text = re.sub('[ ]+', '_', text)\n #text.replace(' ', '')\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)", "title": "" }, { "docid": "27a641d30da6d82b4da8d6b6c90e3d1b", "score": "0.55866206", "text": "def test_tokenization_of_not_acronyms(self):\n text = \"This-is-not-an-acronym\"\n tokens = tokenize_content(text)\n self.assertCountEqual(\n tokens,\n [\"This\", \"is\", \"not\", \"an\", \"acronym\", \"This-is-not-an-acronym\", \"Thisisnotanacronym\"]\n )", "title": "" }, { "docid": "b26f297e29bb14123b73c9294b4a01b9", "score": "0.5571181", "text": "def test_sent_invalid(self):\n sentence = 'Papa the caviar with a spoon'\n result = self.parser.parse_sentence(sentence)\n self.assertEqual(result, 'NONE')", "title": "" }, { "docid": "1e4cd650245c709cae0798b29d4c253b", "score": "0.5556966", "text": "def strip_accents(text):\n try:\n text = unicode(text, 'utf-8')\n except (TypeError, NameError): # unicode is a default on python 3 \n pass\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)", "title": "" }, { "docid": "51b51e3426528bcb3687a8732eb5050d", "score": "0.5521159", "text": "def test_acronyms_not_an_acronym(self):\n text = \"This-is-not-an-acronym\"\n acronyms_list = acronyms(text)\n self.assertEqual(len(acronyms_list), 0)", "title": "" }, { "docid": "fc56516502fbcf65844da7276e518639", "score": "0.5516247", "text": "def filter_sentence(sentence):\n sent = \"\"\n for i in sentence.lower():\n if i in eng_alphabets:\n sent = sent + i\n return sent", "title": "" }, { "docid": "9769aec873306e18cd8a68bdd2e70b77", "score": "0.550561", "text": "def remove_accents(raw_text:str) -> str :\n raw_text = re.sub(u\"[àáâãäå]\", 'a', raw_text)\n raw_text = re.sub(u\"[èéêë]\", 'e', raw_text)\n raw_text = re.sub(u\"[ìíîï]\", 'i', raw_text)\n raw_text = re.sub(u\"[òóôõö]\", 'o', raw_text)\n raw_text = re.sub(u\"[ùúûü]\", 'u', raw_text)\n raw_text = re.sub(u\"[ýÿ]\", 'y', raw_text)\n raw_text = re.sub(u\"[ß]\", 'ss', raw_text)\n raw_text = re.sub(u\"[ñ]\", 'n', raw_text)\n return raw_text", "title": "" }, { "docid": "a17b740bb1e81d19d5f8551ca948c494", "score": "0.550504", "text": "def is_ascii(word):\n def onlyascii(char):\n if ord(char) > 127:\n return ''\n else:\n return char\n for c in word:\n if not onlyascii(c):\n return False\n return True", "title": "" }, { "docid": "a22b5e3415fd6b02484c931959780d4c", "score": "0.5504136", "text": "def strip_accent(text, match_case=False, throw_error=False):\n new_word = \"\"\n indx = 0\n length = len(text)\n found = False\n while indx < length:\n x = text[indx]\n ordinal = ord(x)\n if ordinal > ASCII_BOUNDRY:\n if ordinal in ORDINAL:\n x = ORDINAL[ordinal]\n if match_case and (indx == 0 or (not isletter(text[indx - 1]))):\n x = x.upper()\n found = True\n elif throw_error:\n raise Exception(\"Ordinal not in Accent dict: \" + str(ordinal) + \" - char: \" + x)\n\n new_word += x\n indx += 1\n\n if found and match_case:\n new_word = upperit(new_word)\n\n return new_word", "title": "" }, { "docid": "f2782b401d40613af56f7dbdc5e46621", "score": "0.550059", "text": "def is_uc(self):\n c = self.get_feature('lemma')[0]\n \n return unicodedata.category(c) == 'Lu'", "title": "" }, { "docid": "f4feaabda0a841afcec894bb769fd28e", "score": "0.5488978", "text": "def clean_caesar(text):\n return text.upper()", "title": "" }, { "docid": "5b51f68f75d7d654f36cb1a17ce90d7b", "score": "0.54783815", "text": "def test_unicode_n_verb_encourage(self):\n self.assertEqual(\"\\U0000263A\", \"☺\")\n sentence1 = self.lex.define(self.comment, \"enc☺urage\")\n sentence2 = self.lex[sentence1.idn]\n self.assertEqual(\"enc☺urage\", sentence2.txt)\n self.assertTrue(self.lex['enc☺urage'].is_a_verb())\n self.assertTrue(self.lex['enc☺urage'].exists())\n self.assertTrue(self.lex['enc☺urage'].is_a_verb())", "title": "" }, { "docid": "bdc3dede256f11f477b37cb44e2be4ae", "score": "0.5472161", "text": "def com_google_fonts_check_name_unwanted_chars(ttFont):\n failed = False\n replacement_map = [(\"\\u00a9\", '(c)'),\n (\"\\u00ae\", '(r)'),\n (\"\\u2122\", '(tm)')]\n for name in ttFont['name'].names:\n string = str(name.string, encoding=name.getEncoding())\n for mark, ascii_repl in replacement_map:\n new_string = string.replace(mark, ascii_repl)\n if string != new_string:\n yield FAIL,\\\n Message(\"unwanted-chars\",\n f\"NAMEID #{name.nameID} contains symbols that\"\n f\" should be replaced by '{ascii_repl}'.\")\n failed = True\n if not failed:\n yield PASS, (\"No need to substitute copyright, registered and\"\n \" trademark symbols in name table entries of this font.\")", "title": "" }, { "docid": "884dea320a7f45ab0d962fd9c356a258", "score": "0.54688823", "text": "def com_google_fonts_check_name_ascii_only_entries(ttFont):\n bad_entries = []\n for name in ttFont[\"name\"].names:\n if name.nameID == NameID.COPYRIGHT_NOTICE or \\\n name.nameID == NameID.POSTSCRIPT_NAME:\n string = name.string.decode(name.getEncoding())\n try:\n string.encode('ascii')\n except:\n bad_entries.append(name)\n badstring = string.encode(\"ascii\",\n errors='xmlcharrefreplace')\n yield FAIL,\\\n Message(\"bad-string\",\n (f\"Bad string at\"\n f\" [nameID {name.nameID}, '{name.getEncoding()}']:\"\n f\" '{badstring}'\"))\n if len(bad_entries) > 0:\n yield FAIL,\\\n Message(\"non-ascii-strings\",\n (f\"There are {len(bad_entries)} strings containing\"\n \" non-ASCII characters in the ASCII-only\"\n \" NAME table entries.\"))\n else:\n yield PASS, (\"None of the ASCII-only NAME table entries\"\n \" contain non-ASCII characteres.\")", "title": "" }, { "docid": "2545d05bf653e492c433d6f99a4794b8", "score": "0.5459224", "text": "def isEnglish(s):\r\n try:\r\n s.encode(encoding='utf-8').decode('ascii')\r\n except UnicodeDecodeError:\r\n return False\r\n else:\r\n return True", "title": "" }, { "docid": "02bd484af9335b56795a9db2f492b79d", "score": "0.5458182", "text": "def test_greek_s_voice_assimilation(self):\n condition = grc.Word(\"ẹːrgɑsménon\", grc.GREEK[\"Attic\"][\"Probert\"])\n condition._refresh()\n condition._s_voice_assimilation()\n output = ''.join([p.ipa for p in condition.phones])\n target = unicodedata.normalize('NFC', \"ẹːrgɑzménon\")\n self.assertEqual(output, target)", "title": "" }, { "docid": "c23184493c99696ad28ad83916d91299", "score": "0.54565585", "text": "def test_articles_it(self):\n self.assertEqual(normalize('questo è il test',\n lang='it', remove_articles=True),\n 'questo è test')\n self.assertEqual(normalize('questa è la frase',\n lang='it', remove_articles=True),\n 'questa è frase')\n self.assertEqual(normalize('questo è lo scopo', lang='it',\n remove_articles=True),\n 'questo è scopo')\n self.assertEqual(normalize('questo è il test extra',\n lang='it', remove_articles=False),\n 'questo è il test extra')", "title": "" }, { "docid": "9d0084eb94234436078d883351e3f4d6", "score": "0.5430357", "text": "def test_acronyms_preserve_case_unicode(self, _, case, value, expected):\n case_converter = getattr(case_conversion, case)\n result = case_converter(value, acronyms=ACRONYMS_UNICODE)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "de8ee1d8fb2e8180b3ead03a0516b378", "score": "0.5426595", "text": "def test_capitalization(self):\n assert not chk(\n \"In the stone age\", self.l_caps, self.err, self.msg,\n ignore_case=False)\n assert chk(\n \"In the Stone Age\", self.l_caps, self.err, self.msg,\n ignore_case=False) == []", "title": "" }, { "docid": "44e1038c5cdc35f5a80dc56862abe500", "score": "0.5405425", "text": "def remove_accent_marks(request):\n keys = ['text']\n values = get_data(request, keys)\n\n if not values[0]:\n abort(400, 'missing text parameter')\n\n return _call('remove_accent_marks', keys, values)", "title": "" }, { "docid": "fae46f32b842e6660775bf86e1ca14a3", "score": "0.54042774", "text": "def remove_accents(string):\n\n try:\n string = unicode(string, 'utf-8')\n # unicode is a default on python 3\n except (TypeError, NameError):\n pass\n\n nfkd_form = unicodedata.normalize('NFKD', string)\n return u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])", "title": "" }, { "docid": "9eda154261add799566226c67cc4f301", "score": "0.5392797", "text": "def stripNull(self):\n for i in range(len(self.doc._Document__sentence)):\n self.doc._Document__sentence[i].sentence = ''.join([char for char in self.doc._Document__sentence[i].sentence if 97 <= ord(char) <= 122 or 65 <= ord(char) <= 90 or 48 <= ord(char) <= 57 or ord(char) == 32])", "title": "" }, { "docid": "d66162e83ec1b48a58c16f9ea4125c5b", "score": "0.5385774", "text": "def remove_accents(string: str) -> str:\n\n return \"\".join(\n c for c in unicodedata.normalize(\"NFKD\", string) if not unicodedata.combining(c)\n )", "title": "" }, { "docid": "29c28394abe68483e92b5f804dfad249", "score": "0.5381181", "text": "def nonascii_character_check(line):\n return __regex_nonascii_characters.search(line) is not None", "title": "" }, { "docid": "2e09c65fc97ddd306a88bddb5c496ec1", "score": "0.53644603", "text": "def isascii(text):\n return all(ord(c) < 128 for c in text)", "title": "" }, { "docid": "778b8b4a926a6dd2327f5a9578bed5a8", "score": "0.53600067", "text": "def test_normalise(self):\n\t\tself.assertEqual(normalise('nɪçt'), 'nɪçt') # ç in normal form C\n\t\tself.assertEqual(normalise('nɪçt'), 'nɪçt') # ç in normal form D", "title": "" }, { "docid": "97571150937ec0111c6ba8321a3e9c1c", "score": "0.53321946", "text": "def is_ascii(self, name):\n return all(ord(c) < 128 for c in name)", "title": "" }, { "docid": "477c2ffeda7e01656b8365c3c233d4c7", "score": "0.5331093", "text": "def normalize(s):\r\n return strip_accents(strip_non_alphanumerics(s)).lower()", "title": "" }, { "docid": "2733b4334dbf940cf90f4f15448c5549", "score": "0.5305471", "text": "def is_ascii(s):\n return all(97 <= ord(c) <= 122 for c in s)", "title": "" }, { "docid": "a6bd34186acee6efff184613d349fb32", "score": "0.53048724", "text": "def basic_cleanup(corpus_blob, lang):\n corpus_blob = corpus_blob.split('\\n')\n corpus_blob = [s for s in corpus_blob if not (s.startswith(\"=\") and s.endswith(\"=\"))]\n corpus_blob = \" \".join(corpus_blob)\n sents = nltk.sent_tokenize(corpus_blob, language=lang) # sentence-tokenize\n sents = [re.sub(\"(&quot\\s*;|&amp\\s;)\", \"\", sent) for sent in sents] # remove html quirks\n sents = [unicodedata.normalize('NFKC', sent) for sent in sents]\n return sents", "title": "" }, { "docid": "f202e625487601f9a2e826de69bdedc9", "score": "0.5304463", "text": "def _is_clean(self, word):\n\n word_pos = pos_tagger.tag([word])[0][1]\n\n # si c'est un mot clé\n if not word.lower() in self._keywords:\n return False\n\n # normal ascii, no punctuation\n for char in word:\n if not char in string.printable:\n return False\n if char in string.punctuation:\n return False\n\n CURRENT = [\"PROPN\", \"ADJ\", \"NUM\", \"ADV\", \"NOUN\", \"X\"]\n # adj and noun only\n if not word_pos in CURRENT:\n return False\n\n # removes words like \"use\" (NN), but allows abbreviations\n if ((word_pos == \"NOUN\" or word_pos == \"ADJ\") and len(word) <= 4) or (word_pos == \"PROPN\" and len(word) <= 5):\n if word.islower() or word[:1].isupper() and word[1:].islower():\n return False\n\n # removes small unimportant words\n if word in stopwords.words(\"french\"):\n return False\n\n return True", "title": "" }, { "docid": "ce0f295a8b9b1065516a5ec4739d7023", "score": "0.5303659", "text": "def _remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "c4036b4ce759e6519f15dfb5f433ebbb", "score": "0.5298334", "text": "def deaccent(word):\n return word.lower().translate(str.maketrans(\"áéíóöőúüű\", \"aeiooouuu\"))", "title": "" }, { "docid": "669c341b6c411bbfc399a8e0779e591f", "score": "0.52831894", "text": "def supprime_accent(ligne):\n accent = ['é', 'è', 'ê', 'à', 'ù', 'û', 'ç', 'ô', 'î', 'ï', 'â']\n sans_accent = ['e', 'e', 'e', 'a', 'u', 'u', 'c', 'o', 'i', 'i', 'a']\n i = 0\n while i < len(accent):\n ligne = ligne.replace(accent[i], sans_accent[i])\n i += 1\n return ligne", "title": "" }, { "docid": "765023f251f249f62666f8cf713a78a2", "score": "0.5267472", "text": "def RemoveAccentedChars(data):\n assert isinstance(data, str)\n\n data = unicodedata.normalize('NFKD', data).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n return data", "title": "" }, { "docid": "37b473381463f31629919e7a547779d6", "score": "0.52660984", "text": "def test_nonalphabetic(self):\n content = \"\"\"\nLEXICON Cc\n+CC:0 # ;\n\"\"\"\n expected_result = \"\"\"\nLEXICON Cc\n +CC:0 # ;\n\"\"\"\n aligner = LexcAligner()\n aligner.parse_lines(content.split(\"\\n\"))\n self.assertEqual(expected_result, \"\\n\".join(aligner.adjust_lines()))", "title": "" }, { "docid": "65fb2064da116ab39551df806eb17fdc", "score": "0.5249962", "text": "def test_is_letter_non_ipa(self):\n\t\tfor char in ['ʣ', 'ɫ', 'g', 'Γ', 'F', 'Lj']:\n\t\t\tself.assertFalse(is_letter(char, strict=True))\n\t\t\tself.assertTrue(is_letter(char, strict=False))", "title": "" }, { "docid": "9bda64fdba8613ec4ffb4b2a529e4061", "score": "0.5246925", "text": "def is_letter(c):\n if unicodedata.category(c) in ['Ll', 'Lu']:\n return unicodedata.name(c).find('LATIN') >= 0\n else:\n return False", "title": "" }, { "docid": "df35ee16750b9c1c68c28edaf988af52", "score": "0.52449775", "text": "def test_answers_are_lowercase(self, riddle_cls):\n for a in riddle_cls.CORRECT_ANSWERS:\n for char in a:\n self.assertFalse(char.isupper())", "title": "" }, { "docid": "769dae6ed5b4884c34dc4461964596c4", "score": "0.5244727", "text": "def test_parser_can_return_clean_of_all_output(self):\n parser = Parser(\n \"Bonjour Grandpy, peux-tu me dire l'adresse de la tour Eiffèl?\",\n STOPWORDS,\n ACCENTS,\n QUESTIONS\n )\n result = parser.clean()\n assert result == \"tour eiffel\"", "title": "" }, { "docid": "8532c024d433b1bdb3486bb97ef32b92", "score": "0.5241758", "text": "def parse_sent(sentence):\n # remove whitespace at the beginning\n sline = sentence.strip()\n tokenized_line = jieba.cut(filterpunt(sline))\n # parse digits, remove signs\n is_alpha_word_line = [\n word for word in tokenized_line\n if not wordnet.synsets(word) and not word.isdigit()\n if word not in stopwords\n ]\n\n return is_alpha_word_line", "title": "" }, { "docid": "bcc3d49b1eb6d9198dd6d6f4a1b5b4fa", "score": "0.52405924", "text": "def remove_non_english_words(sentence):\n return \" \".join(str(w) for w in sentence.split() if w in english_words)", "title": "" }, { "docid": "f3d3e962bcdadde6bcce6f4177c02d19", "score": "0.5230504", "text": "def _remove_non_ascii(words):\r\n new_words = []\r\n for word in words:\r\n word=word.replace('xa0','')\r\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\r\n new_words.append(new_word)\r\n return new_words", "title": "" }, { "docid": "5f9a1c3bc58a40e61b41ed39bf471325", "score": "0.5216079", "text": "def _validate(self):\n # TODO: create phrase validation\n # 1. removes all substitutions\n # 2. checks pronouncable symbol sets\n pass", "title": "" }, { "docid": "36e1ce9c5158a34f93458ae3af3815c5", "score": "0.5215159", "text": "def normalize_answer(s):\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "36e1ce9c5158a34f93458ae3af3815c5", "score": "0.5215159", "text": "def normalize_answer(s):\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "d5adc416fa49981519f5cebe50bdcb2c", "score": "0.5209722", "text": "def test_latin_n_place_assimilation(self):\n condition = lat.Word(\"lɪn̪gʷa\", lat.LATIN[\"Classical\"][\"Allen\"])\n condition._refresh()\n condition._n_place_assimilation()\n output = ''.join([p.ipa for p in condition.phones])\n target = unicodedata.normalize('NFC', \"lɪŋgʷa\")\n self.assertEqual(output, target)", "title": "" }, { "docid": "3ecdc5c1f954d896b3c453eb7b113e43", "score": "0.5207674", "text": "def valid_sent(doc):\n if detect_en(doc) and len(doc) >=4:\n words = set([token.lower for token in doc if token.is_alpha & (len(token)>2)])\n if len(words) >= 3:\n return True\n return False", "title": "" }, { "docid": "db2c58d941a9c94ab0c43f33b7dd47af", "score": "0.5199704", "text": "def is_english(s):\n try:\n s.encode(encoding=\"utf-8\").decode(\"ascii\")\n except UnicodeDecodeError:\n return False\n else:\n return True", "title": "" }, { "docid": "70ddc7e3b9da035162ecc0601f0e87a4", "score": "0.5194205", "text": "def is_capitalized(line):\n return not line or line.strip() == capitalize(line)", "title": "" }, { "docid": "638e6322c65a9b1d3400394ff8912935", "score": "0.51844895", "text": "def sanitize(word):\n word = word.lower()\n clean_word = ''\n for letter in word:\n if letter in string.ascii_lowercase:\n clean_word += letter\n return clean_word", "title": "" }, { "docid": "f9430aa80d9e03c150e7859b25a41505", "score": "0.5179621", "text": "def normalize_answer(s):\n\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "4bff6e3f7b6b01b46023144d60e3c005", "score": "0.5177177", "text": "def tweet_with_accident_vehicle_and_person(text):\n if (\"הולך רגל\" in text or \"הולכת רגל\" in text or \"נהג\" in text or \"אדם\" in text) and (\n \"רכב\" in text\n or \"מכונית\" in text\n or \"אופנוע\" in text\n or \"ג'יפ\" in text\n or \"טרקטור\" in text\n or \"משאית\" in text\n or \"אופניים\" in text\n or \"קורקינט\" in text\n ):\n return True\n return False", "title": "" }, { "docid": "bc7ada5970af9c679a5cbf123ad2c13f", "score": "0.5169849", "text": "def normalize_answer(s):\n\tdef remove_articles(text):\n\t\treturn re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n\tdef white_space_fix(text):\n\t\treturn ' '.join(text.split())\n\n\tdef remove_punc(text):\n\t\texclude = set(string.punctuation)\n\t\treturn ''.join(ch for ch in text if ch not in exclude)\n\n\tdef lower(text):\n\t\treturn text.lower()\n\n\treturn white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "37321b47de193d2c76b310f4af4af39f", "score": "0.5161219", "text": "def __is_valid_ascii(ascii):\n return True", "title": "" }, { "docid": "9648e2b4e3ae52083c62f43b2aaf2b97", "score": "0.5155163", "text": "def transliteration_cleaners(text):\n text = convert_to_ascii(text)\n text = lowercase(text)\n text = collapse_whitespace(text)\n return text", "title": "" }, { "docid": "51e5be38d2aa6539f1ac0aa8ed61c28a", "score": "0.5151134", "text": "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "51e5be38d2aa6539f1ac0aa8ed61c28a", "score": "0.5151134", "text": "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "51e5be38d2aa6539f1ac0aa8ed61c28a", "score": "0.5151134", "text": "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "51e5be38d2aa6539f1ac0aa8ed61c28a", "score": "0.5151134", "text": "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "af9f61a6b10492244f3c6f5341e6e80c", "score": "0.51432914", "text": "def cleanse(s):\n return ''.join([c for c in s if c.isalpha()])", "title": "" } ]
acc7a63206020ff5d6bb00ecbe4b662b
Returns a static stroke object for testing
[ { "docid": "dbd8f386dcf0a1a9e6e9c93e5f877671", "score": "0.67312616", "text": "def get_test_stroke():\n from freestyle.types import Stroke, Interface0DIterator, StrokeVertexIterator, SVertex, Id, StrokeVertex\n # points for our fake stroke\n points = (Vector((1.0, 5.0, 3.0)), Vector((1.0, 2.0, 9.0)),\n Vector((6.0, 2.0, 3.0)), Vector((7.0, 2.0, 3.0)), \n Vector((2.0, 6.0, 3.0)), Vector((2.0, 8.0, 3.0)))\n ids = (Id(0, 0), Id(1, 1), Id(2, 2), Id(3, 3), Id(4, 4), Id(5, 5))\n\n stroke = Stroke()\n it = iter(stroke)\n\n for svert in map(SVertex, points, ids):\n stroke.insert_vertex(StrokeVertex(svert), it)\n it = iter(stroke)\n\n stroke.update_length()\n return stroke", "title": "" } ]
[ { "docid": "26e01ae5b10c2afe2c53fe297785f838", "score": "0.6220379", "text": "def _get_stroke(self):\n return self._stroke", "title": "" }, { "docid": "09db3748c30cbedf546cd383e9fa9b06", "score": "0.6145492", "text": "def getDrawing1():\n\n D = Drawing(400, 200)\n D.add(Rect(50, 50, 300, 100, fillColor=colors.yellow)) #round corners\n D.add(String(180,100, 'Hello World', fillColor=colors.red))\n\n\n return D", "title": "" }, { "docid": "2e8f0e247cd049cde295063d88706360", "score": "0.6033628", "text": "def getDrawing2():\n D = Drawing(400, 200) #, fillColor=colors.purple)\n\n D.add(Line(10,10,390,190))\n D.add(Circle(100,100,20, fillColor=colors.purple))\n D.add(Circle(200,100,20, fillColor=colors.purple))\n D.add(Circle(300,100,20, fillColor=colors.purple))\n\n D.add(Wedge(330,100,40, -10,40, fillColor=colors.purple))\n\n D.add(PolyLine([120,10,130,20,140,10,150,20,160,10,\n 170,20,180,10,190,20,200,10]))\n\n D.add(Polygon([300,20,350,20,390,80,300,75, 330, 40]))\n\n D.add(Ellipse(50, 150, 40, 20))\n\n D.add(Rect(120, 150, 60, 30,\n strokeWidth=10,\n strokeColor=colors.red,\n fillColor=colors.yellow)) #square corners\n\n D.add(Rect(220, 150, 60, 30, 10, 10)) #round corners\n\n D.add(String(10,50, 'Basic Shapes', fillColor=colors.black))\n\n return D", "title": "" }, { "docid": "eb80784940dff22bdd3ac6463a553791", "score": "0.56943166", "text": "def stroke(*args, **kwargs):\n pass", "title": "" }, { "docid": "546fbc32a39df225675e318bc0f33ace", "score": "0.5623495", "text": "def __init__(self):\n self.red_line = Line(\"red\")\n self.green_line = Line(\"green\")\n self.blue_line = Line(\"blue\")", "title": "" }, { "docid": "d476e938a15f0c6fbfec9b64f87a6396", "score": "0.55365694", "text": "def GetPen(self):", "title": "" }, { "docid": "baca23cfeb3fd7604c6b5e690aad4a4d", "score": "0.55054855", "text": "def copy(self):\n\t\tc = Stroke()\n\t\tc.copy_from(self)\n\t\treturn c", "title": "" }, { "docid": "a19f3b5fb48658efeb3bff30249a6b02", "score": "0.54741794", "text": "def get_pen(self):\n # No stroke?\n if (self.stroke_color is None or self.stroke_width <= 0. or\n self.stroke_alpha <= 0.):\n return None\n return aggdraw.Pen(self.stroke_color.hex_l, width=self.stroke_width,\n opacity=int(255 * self.stroke_alpha))", "title": "" }, { "docid": "4537ea1fa50e017caa969eb64e4abd0e", "score": "0.54679406", "text": "def _getShape(self):\n # TODO: Move inside FlatBezierpath.\n #if self._fill is noColor and self._stroke is noColor:\n # self._fill = whiteColor\n shape = self.b.shape()\n\n if self._fill and self._fill != noColor:\n shape.fill(self.getFlatRGB(self._fill))\n else:\n shape.nofill()\n\n if self._stroke and self._stroke != noColor:\n shape.stroke(self.getFlatRGB(self._stroke)).width(self._strokeWidth)\n else:\n shape.nostroke()\n\n return shape", "title": "" }, { "docid": "0423839504e9f2fde4b0e7b193309794", "score": "0.5389282", "text": "def dump_stroke(stroke):\n print \"Stroke Ext: %s\" % stroke.stroke_ext_lookup\n\n if len(stroke.controlpoints) and 'timestamp' in stroke.cp_ext_lookup:\n cp = stroke.controlpoints[0]\n timestamp = stroke.cp_ext_lookup['timestamp']\n start_ts = ' t:%6.1f' % (cp.extension[timestamp] * .001)\n else:\n start_ts = ''\n\n try:\n scale = stroke.extension[stroke.stroke_ext_lookup['scale']]\n except KeyError:\n scale = 1\n\n if 'group' in stroke.stroke_ext_lookup:\n group = stroke.extension[stroke.stroke_ext_lookup['group']]\n else: group = '--'\n\n if 'seed' in stroke.stroke_ext_lookup:\n seed = '%08x' % stroke.extension[stroke.stroke_ext_lookup['seed']]\n else: seed = '-none-'\n\n print \"B:%2d S:%.3f C:#%02X%02X%02X g:%2s s:%8s %s [%4d]\" % (\n stroke.brush_idx, stroke.brush_size * scale,\n int(stroke.brush_color[0] * 255),\n int(stroke.brush_color[1] * 255),\n int(stroke.brush_color[2] * 255),\n #stroke.brush_color[3],\n group, seed,\n start_ts,\n len(stroke.controlpoints))\n\n print \"\\nStroke Control Points: \\n---------------------\"\n for ctrlPt in stroke.controlpoints:\n print \"Position.....: [x: %f, y: %f, z: %f]\" % (ctrlPt.position[0], ctrlPt.position[1], ctrlPt.position[2]) \n print \"Rotation (q).: \", ctrlPt.orientation\n print \"Extension....: %s\" % ctrlPt.extension\n print \"--------------------- \\n\"", "title": "" }, { "docid": "c13ab67d67d920f947fcb9ee5e8693f2", "score": "0.53754455", "text": "def _get_viewFrameStroke(self):\n return self.style.get('viewFrameStroke') # Not inherited", "title": "" }, { "docid": "16b2615de1e3d25e5ddeb9816adf39a2", "score": "0.5364043", "text": "def CreatePen(self, pen):", "title": "" }, { "docid": "16b2615de1e3d25e5ddeb9816adf39a2", "score": "0.536343", "text": "def CreatePen(self, pen):", "title": "" }, { "docid": "b99b8c8b80c2cc606f70a20392fa279b", "score": "0.53600293", "text": "def get_shape():\n global SHAPES, SHAPE_COLORS\n\n return Piece(5, 0, random.choice(SHAPES))", "title": "" }, { "docid": "edba1c8f1b687f8a6f0a605382c26e2a", "score": "0.53588074", "text": "def _create_canvas():\n c = BaseCanvas()\n return c", "title": "" }, { "docid": "fb9c963fb823065c0fd4d3209396fda9", "score": "0.53528285", "text": "def _create_phantom_outline_object(self) -> Tuple[Union[Rectangle, Circle], dict]:\n outline_type = list(self.phantom_outline_object)[0]\n outline_settings = list(self.phantom_outline_object.values())[0]\n settings = {}\n if outline_type == 'Rectangle':\n side_a = self.phantom_radius*outline_settings['width ratio']\n side_b = self.phantom_radius*outline_settings['height ratio']\n half_hyp = np.sqrt(side_a**2 + side_b**2)/2\n internal_angle = ia = np.rad2deg(np.arctan(side_b/side_a))\n new_x = self.phantom_center.x + half_hyp*(geometry.cos(ia)-geometry.cos(ia+self.phantom_angle))\n new_y = self.phantom_center.y + half_hyp*(geometry.sin(ia)-geometry.sin(ia+self.phantom_angle))\n obj = Rectangle(width=self.phantom_radius*outline_settings['width ratio'],\n height=self.phantom_radius*outline_settings['height ratio'],\n center=Point(new_x, new_y))\n settings['angle'] = self.phantom_angle\n elif outline_type == 'Circle':\n obj = Circle(center_point=self.phantom_center,\n radius=self.phantom_radius*outline_settings['radius ratio'])\n else:\n raise ValueError(\"An outline object was passed but was not a Circle or Rectangle.\")\n return obj, settings", "title": "" }, { "docid": "b13cf529ec65a533a623c0decf12ed32", "score": "0.5324104", "text": "def _getOutline(self):\n shape = None\n\n outline_dict = self._module_dict.get('outline')\n if outline_dict != None:\n shape_dict = outline_dict.get('shape')\n if shape_dict != None:\n shape = Shape(shape_dict)\n style = Style(shape_dict, 'outline')\n shape.setStyle(style)\n\n return shape", "title": "" }, { "docid": "40a26e5e50a4986cd3a4d4d3f70008c9", "score": "0.53005433", "text": "def test_initial_solid_construction(self):\n\n test_shape = RotateSplineShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)],\n rotation_angle=360\n )\n\n assert test_shape.hash_value is None\n assert test_shape.solid is not None\n assert type(test_shape.solid).__name__ == \"Workplane\"\n assert test_shape.hash_value is not None", "title": "" }, { "docid": "bf9638efcfa1184c98dfc72b146a04eb", "score": "0.52640176", "text": "def test_partial_constructor(self):\n\n dot = Dot()\n\n assert dot.colour == [255, 0, 0]", "title": "" }, { "docid": "1d0c26ac89ef0f73e1c9d8d8a2319358", "score": "0.52630407", "text": "def main():\n shapeMaker = ShapeMaker()\n shapeMaker.drawRectangle()\n shapeMaker.drawSquare()\n shapeMaker.drawCircle()", "title": "" }, { "docid": "65ce8f9f753ca3ad4f5a0451b7bed41f", "score": "0.52326465", "text": "def CreatePath(self):\n return dcGraphicsPath(parent=self)", "title": "" }, { "docid": "4e745fe8d61f07b729d901e780fe0e8b", "score": "0.51721543", "text": "def test_solid_return(self):\n\n test_shape = RotateSplineShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)],\n rotation_angle=360\n )\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n initial_hash_value = test_shape.hash_value\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n assert initial_hash_value == test_shape.hash_value", "title": "" }, { "docid": "e98d206062a3c722d7695e5a3e437e6b", "score": "0.51159257", "text": "def test_constructor(self):\n\n data = {\"colour\": [255, 127, 0]}\n dot = Dot(**data)\n\n assert dot.colour == [255, 127, 0]", "title": "" }, { "docid": "9b37a2be3fce661e28a728ba93e8399a", "score": "0.5108039", "text": "def create_turtle():\n stamper = turtle.Turtle()\n stamper.shape(\"circle\")\n stamper.color(\"green\")\n stamper.penup() # raise the pen so that we do not have a trail\n return stamper # returns a turtle to the line of code where this function was called", "title": "" }, { "docid": "612d8449d6eaa641c41d999f27a12b42", "score": "0.509658", "text": "def __init__ (self):\n self.lineWidth = 1.0\n self.lineCapStyle = wx.CAP_BUTT\n self.lineJoinStyle = wx.JOIN_MITER\n self.lineDashArray = []\n self.lineDashPhase = 0\n self.miterLimit = None\n self.strokeRGB = wx.Colour(0, 0, 0)\n self.fillRGB = wx.Colour(0, 0, 0) # used for both shapes & text\n self.fillMode = None\n\n self.textMatrix = [1, 0, 0, 1, 0, 0]\n self.textLineMatrix = [1, 0, 0, 1, 0, 0]\n self.charSpacing = 0\n self.wordSpacing = 0\n self.horizontalScaling = None\n self.leading = None\n self.font = None\n self.fontSize = None\n self.textRenderMode = None\n self.textRise = 0", "title": "" }, { "docid": "93350c0f70f79d68b66c0c5f6c5a45cb", "score": "0.5089275", "text": "def __init__(self):\n\n self.solid_angle_tolerance = 0.1", "title": "" }, { "docid": "576571ac1aa6c7ce7df6ec1e423b3c74", "score": "0.5052309", "text": "def __init__(self):\n self.preciousDeg = 5\n #self.radius = 0.001265\n #self.radiusFactor = 5\n self.beamwidth = 35\n self.transparent = 6\n\n self.rules = dict()\n \n self.supportOptions = ((\"EARFCN\", \"PCI\", \"Type\"), (\"Shape\", \"Fill Color\", \"Line Color\"))\n #self.supportOptions = {\"Shape\":(\"Circle\",\"Sector)\")}", "title": "" }, { "docid": "93c4f4834a5828776ead80b4b3a7cf32", "score": "0.5040514", "text": "def beampipe(self):\n r = Rectangle(w=self.width, h=self.height,\n lineColor=ColorChoice.Black,\n lineWidth=2)\n return r", "title": "" }, { "docid": "33e68291254c6b084118bb0765278055", "score": "0.50213116", "text": "def __init__(self, radius=None, diameter=None):\r\n pg.GraphicsObject.__init__(self)\r\n \r\n self.r = radius\r\n self.d = diameter\r\n self.mkPath()", "title": "" }, { "docid": "cc534dc3d9225c905e88f59eb19482a1", "score": "0.49907133", "text": "def draw_test(self):\n\n margin = 10.\n line_length = 10.\n spacing = 10.\n\n if self.test == 'holes':\n width = max(test_holes) + margin * 2\n height = sum(test_holes) + (len(test_holes) - 2) * \\\n spacing + margin * 2\n print(\"These are the settings of the test holes (hole diameter):\\n%s\" % (\n test_holes))\n elif self.test == 'lines':\n width = line_length + 2 * margin\n height = len(test_lines) * spacing + margin * 2\n print(\"These are the settings of the test lines (amount of lines, spacing):\\n%s\" % (\n test_lines))\n else:\n raise Exception(\"Unknown argument for test. Please use 'holes' or 'lines'.\")\n\n drawing = svgwrite.Drawing(size=(\n \"%f%s\" % (width, self.unit),\n \"%f%s\" % (height, self.unit)),\n viewBox=('0 0 %d %d' % (width, height)))\n\n self.drawing = drawing\n\n # create an initial rectangle around the drawing\n border = drawing.rect(size=(width, height),\n stroke=self.colors['cut_stroke'],\n stroke_width=self.linewidth,\n fill=self.colors['background'],\n rx=self.rounded_corners, ry=self.rounded_corners)\n drawing.add(border)\n\n dir_line = 0\n dir_count = 1\n\n if self.test == \"holes\":\n start = (width / 2, margin)\n\n for hole_d in test_holes:\n\n hole = self.drawing.circle(start, r=hole_d / 2, fill='none',\n stroke=self.colors['mark_stroke'], stroke_width=self.linewidth)\n self.drawing.add(hole)\n\n start = (start[0], start[1] + hole_d + spacing)\n elif self.test == \"lines\":\n start = (margin, margin)\n\n for i in range(len(test_lines)):\n end = list(start)\n end[dir_line] += line_length\n\n self.multlines = test_lines[i][0]\n self.multlines_spacing = test_lines[i][1]\n\n self.draw_multlines(\n start, end, dir_count)\n\n start = (start[0], start[1] + spacing)", "title": "" }, { "docid": "7da4184d2d555e441060312fe484f710", "score": "0.498416", "text": "def mouseShape(self):\r\n shape = self.shape()\r\n ds = self.mapToDevice(shape)\r\n stroker = QtGui.QPainterPathStroker()\r\n stroker.setWidh(2)\r\n ds2 = stroker.createStroke(ds).united(ds)\r\n return self.mapFromDevice(ds2)", "title": "" }, { "docid": "dad83e30fea20e058ae68bcde9deeb44", "score": "0.49832934", "text": "def test_style_default_polygon(self):\n style = Style()\n viz = style.compute_viz('polygon')\n\n self.assertIn('color: hex(\"#826DBA\")', viz)\n self.assertIn('strokeWidth: ramp(linear(zoom(),2,18),[0.5,1])', viz)\n self.assertIn('strokeColor: opacity(#2c2c2c,ramp(linear(zoom(),2,18),[0.2,0.6]))', viz)", "title": "" }, { "docid": "87d30d19752e725f3746b60a9d179820", "score": "0.49638152", "text": "def StrokePath(self, path):", "title": "" }, { "docid": "456e5ed4f005ee8662bcc58dce9696ec", "score": "0.49626166", "text": "def stroke(self, r, g=None, b=None, a=None):\n if g and b is None:\n Py5.log_print('If function is called with more than 1 arg, it must be called with all 3', 'stroke', 'Py5')\n return\n if g is not None and b is not None and a is not None:\n self.stroke_color = pygame.Color(r, g, b, a)\n # if self.COLOR_MODE == 'rgb':\n # self.stroke_color = pygame.Color(r, g, b, a)\n # else:\n # self.stroke_color = pygame.Color.hsla(r, g, b)\n elif g is not None and b is not None:\n self.stroke_color = pygame.Color(r, g, b, 255)\n # if self.COLOR_MODE == 'rgb':\n # self.stroke_color = pygame.Color(r, g, b, 255)\n # else:\n # self.stroke_color = pygame.Color.hsla(r, g, b)\n else:\n if isinstance(r, tuple):\n self.stroke_color = pygame.Color(r[0], r[1], r[2])\n # if self.COLOR_MODE == 'rgb':\n # self.stroke_color = pygame.Color(r[0], r[1], r[2])\n # else:\n # self.stroke_color = pygame.Color.hsla(r[0], r[1], r[2])\n else:\n self.stroke_color = pygame.Color(r, r, r, 255)\n # if self.COLOR_MODE == 'rgb':\n # self.stroke_color = pygame.Color(r, r, r, 255)\n # else:\n # self.stroke_color = pygame.Color.hsla(r, r, r)\n if self.stroke_size < 0:\n self.stroke_size = 0\n self._stroke = False\n self._stroke = True", "title": "" }, { "docid": "8ff5984390443018b33f697a106d1582", "score": "0.49581483", "text": "def sewShape(self):\n ...", "title": "" }, { "docid": "6e4ac4a86750e094e9a1fae9117dc1bd", "score": "0.49512506", "text": "def shape(self):\n return TekAWG2000.Shape[self._tek.query(\"FG:{}:SHAP?\".format(\n self._name)).strip().split(',')[0]]", "title": "" }, { "docid": "07d01975a90888be8f3e345295775f01", "score": "0.49511957", "text": "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .create_bathymetry import BathCreator\n return BathCreator(iface)", "title": "" }, { "docid": "91107577004b762e71574561760de9c1", "score": "0.4922384", "text": "def __init__(self, dc, pen):", "title": "" }, { "docid": "0984c21cace36e10293cebf93e3db2c2", "score": "0.48979187", "text": "def ambient(self):\n return WeightRing(self)", "title": "" }, { "docid": "a68a70b586d3b9750e8007d873d7281c", "score": "0.48977816", "text": "def test_init() -> None:\n m = sm.SepalMap()\n draw_control = sm.DrawControl(m)\n assert isinstance(draw_control, sm.DrawControl)\n\n return", "title": "" }, { "docid": "c3e01a86486a7c99cc02a7f67ccf1030", "score": "0.4886971", "text": "def skew(self):\n return XYZ_Knob()", "title": "" }, { "docid": "5e5692b69e5f1bfcad3c1240a7a67b8b", "score": "0.4882261", "text": "def get_colour(self):", "title": "" }, { "docid": "78fad657a3a4269fbc047edc18e5a7f1", "score": "0.4881066", "text": "def test_creation(self):\n\n assert self.test_shape.solid is not None", "title": "" }, { "docid": "0ba97fbb1e08ad547774e0883092fe3b", "score": "0.4865275", "text": "def test_style_default_point(self):\n style = Style()\n viz = style.compute_viz('point')\n\n self.assertIn('color: hex(\"#EE4D5A\")', viz)\n self.assertIn('width: ramp(linear(zoom(),0,18),[2,10])', viz)\n self.assertIn('strokeWidth: ramp(linear(zoom(),0,18),[0,1])', viz)\n self.assertIn('strokeColor: opacity(#222,ramp(linear(zoom(),0,18),[0,1]))', viz)", "title": "" }, { "docid": "8a5c819daa4ef4f53ec1e07c58edee83", "score": "0.48636803", "text": "def hatchling_spawner(name):\n class TurtleStandIn:\n \"\"\"Pretend like there's many nice methods to override in here.\"\"\"\n\n def import_test():\n print(\"I imported correctly.\")\n\n TurtleStandIn.__name__ = name\n return TurtleStandIn", "title": "" }, { "docid": "672d38d8338a0c902beca3d927e980ae", "score": "0.48603755", "text": "def test_start_point1(self):\n\n self.filled_canvas.clear_canvas()\n self.filled_canvas.add_shape(self.shape)\n self.filled_canvas.add_shape(self.wide)\n self.new_tall = model.Shape(-1, 1, -1, 20, '@')\n self.filled_canvas.add_shape(self.new_tall)\n expected_result = (\n \"@++++\\n\"\n \"@.***\\n\"\n \"@....\\n\"\n \"@....\\n\"\n )\n self.assertEqual(self.filled_canvas._as_string(), expected_result)", "title": "" }, { "docid": "04f10b5ef4756f1083fad8fdb766b871", "score": "0.48569232", "text": "def draw_path(self):\n raise('draw_path() method must be implemented for classes that inherit from the Pattern class')\n return", "title": "" }, { "docid": "9b9a6561be933a659b2fc7988a8ac1d4", "score": "0.4850931", "text": "def testInstantiation(self):\n self.assert_(TexturedRaytracer)", "title": "" }, { "docid": "6286997f3134102fd5490a713b8b2230", "score": "0.4845072", "text": "def surface():\n return MockSurface()", "title": "" }, { "docid": "1259ea335bcfb9c406c5dc3491dd516e", "score": "0.48376742", "text": "def test_create_new_shape(self):\n shp = Shape()\n assert shp.type == None", "title": "" }, { "docid": "b1e89f14708308f9bef90826c6d2023f", "score": "0.48358792", "text": "def _make_line(self):\n return GPath(points = [0,DEFENSE_LINE,800,DEFENSE_LINE],\n linewidth = 2, linecolor = 'white')", "title": "" }, { "docid": "2ae69b59092d39e828d86971ab0f9b90", "score": "0.4834523", "text": "def create_pykemon(self):\n health = random.randint(70, 100)\n speed = random.randint(1, 10)\n\n element = self.pykemon_elements[random.randint(\n 0, len(self.pykemon_elements) - 1)]\n name = self.pykemon_names[random.randint(\n 0, len(self.pykemon_names) - 1)]\n\n if element == 'FIRE':\n pykemon = Fire(name, element, health, speed)\n elif element == 'WATER':\n pykemon = Water(name, element, health, speed)\n else:\n pykemon = Grass(name, element, health, speed)\n return pykemon", "title": "" }, { "docid": "1cdff7d1342d9f59e8460ac5723de8d7", "score": "0.48296002", "text": "def test_external_builder_prototype_color(self):\n\n\t\tcube_red = self.small_red_cube.get_color()\n\t\tself.assertEqual(cube_red.get_red(), self.test_color_data[\"red\"][\"red\"])\n\t\tself.assertEqual(cube_red.get_green(), self.test_color_data[\"red\"][\"green\"])\n\t\tself.assertEqual(cube_red.get_blue(), self.test_color_data[\"red\"][\"blue\"])\n\n\t\tsphere_blue = self.large_blue_sphere.get_color()\n\t\tself.assertEqual(sphere_blue.get_red(), self.test_color_data[\"blue\"][\"red\"])\n\t\tself.assertEqual(sphere_blue.get_green(), self.test_color_data[\"blue\"][\"green\"])\n\t\tself.assertEqual(sphere_blue.get_blue(), self.test_color_data[\"blue\"][\"blue\"])", "title": "" }, { "docid": "ff33db81749004c86c0f0881395e100f", "score": "0.48284167", "text": "def get_shape(cls, type_: str) -> Shape:\n if type_ not in cls._cached_shapes:\n return None\n prototype = cls._cached_shapes[type_]\n return copy.deepcopy(prototype)", "title": "" }, { "docid": "5a1d297ef43744b93b442fd65dec42c6", "score": "0.48273876", "text": "def cairo_drawing_test(self): \n # Reset background\n self.cairo_context.set_source_rgba(1, 1, 1, 1)\n self.cairo_context.paint()\n\n self.cairo_context.set_line_width(100)\n self.cairo_context.arc(320, 240, 200, 0, 1.9 * math.pi)\n \n self.cairo_context.set_source_rgba(1, 0, 0, random.random())\n self.cairo_context.fill_preserve()\n \n self.cairo_context.set_source_rgba(0, 1, 0, 0.5)\n self.cairo_context.stroke()", "title": "" }, { "docid": "474e6f4908eee6f34e6bae66156c0dc6", "score": "0.48175007", "text": "def test_start_point2(self):\n\n self.filled_canvas.clear_canvas()\n self.new_tall = model.Shape(2, 1, 2, 20, '@')\n self.filled_canvas.add_shape(self.shape)\n self.filled_canvas.add_shape(self.wide)\n self.filled_canvas.add_shape(self.new_tall)\n expected_result = (\n \"@++++\\n\"\n \"@.***\\n\"\n \"@....\\n\"\n \"@....\\n\"\n )\n self.assertFalse(self.filled_canvas._as_string() == expected_result)", "title": "" }, { "docid": "5ffd35d32b543fb6177e8bdabb86736a", "score": "0.4805434", "text": "def test_bicycle_color():\n\n bike = Bicycle(\"Razor\", \"green\")\n\n assert bike.color == \"green\"", "title": "" }, { "docid": "4ef9c9413c5b45c1329f3e7cab715bfa", "score": "0.4796841", "text": "def getSketch(self):\n\t\tpass", "title": "" }, { "docid": "66ccc6eb98e728c7c90222290a376d60", "score": "0.47914097", "text": "def __init__(self):\r\n super().__init__()\r\n self.shape(\"turtle\")\r\n self.penup()\r\n self.shapesize(stretch_wid=0.6, stretch_len=0.6)\r\n self.color(\"yellow\")\r\n self.new_food()", "title": "" }, { "docid": "b952c805da75f94271b2f69c1235b269", "score": "0.4785803", "text": "def __init__(self, colour, shape):\r\n self.colour = colour\r\n self.shape = shape\r\n self.x = None\r\n self.y = None", "title": "" }, { "docid": "81bdb108eed180e4995f571343ae3668", "score": "0.47851261", "text": "def get_shape():\n return Shape(5, 0, random.choice(shapes))", "title": "" }, { "docid": "e5fba88b8ac759e6f01265e5f3f7a48d", "score": "0.4782466", "text": "def testColorObjConstructorNoArgs(self):\n c = mapscript.colorObj()\n assert (c.red, c.green, c.blue, c.pen) == (0, 0, 0, -4)", "title": "" }, { "docid": "1889cb5d21e5e011a6ce584dc31f3cb9", "score": "0.47815615", "text": "def internal_shape(self):\n return ScaledShape(shape_in=self.solid, reference_point=self.solid.cog, factor=1,\n transparency=0.7, hidden=True)", "title": "" }, { "docid": "4f7122f72b3c149e664822e2e1f60316", "score": "0.47786316", "text": "def __init__(self):\n Color.__init__(self, 'EMPTY')", "title": "" }, { "docid": "4ba95da021ff38d35aa08aaf9a4cf368", "score": "0.477801", "text": "def GetStaticBox(self):", "title": "" }, { "docid": "0ad9d92620e963c9c53a238bc3f01bd5", "score": "0.4774712", "text": "def __init__(self, mod=0):\n self.WHITE = (255, 255, 255)\n self.BLACK = (0, 0, 0)\n self.BLUE = (0, 128, 255)\n self.GREY = (128, 128, 128)\n self.RED = (255, 0, 0)\n self.GREEN = (0, 255, 0)\n self.PURPLE = (191, 87, 231)\n self.PINK = (255, 20, 147)\n self.YELLOW = (252, 237, 135)\n self.WIN = self.GREEN\n self.VERBOSE = self.GREY\n\n terse_colors = [(225, 17, 188), (199, 17, 188), (177, 17, 188), (158, 17, 188), (143, 17, 188)]\n self.TERSE = terse_colors[mod % 5]", "title": "" }, { "docid": "47abaa2279daaa6131f415cf571af190", "score": "0.47740278", "text": "def __init__(self, myRadius=20, myColor='#FFFFFF'):\n # Implement with a human so that: \n self.radius = myRadius\n self.color = myColor", "title": "" }, { "docid": "4a67d1f5f0f8245e8fbf7996caf86aa3", "score": "0.47690198", "text": "def draw(self):\n color = self.settings[\"colors\"][self.obj_type]\n return svg.Circle(self.position + Point2(10, 10), self.radius, color=color)", "title": "" }, { "docid": "6b67b99d4a76f5303c4da108f1f09909", "score": "0.47677562", "text": "def __init__(self):\n self.name = \"Earth\"\n self.mass = 5.972 * 1e24 * um\n self.radius = 6051.8 * 1e3 * ud\n self.distance = 149.6 * 1e9 * ud\n self.speed = 107243 * 1e3 / 3600 * ud\n self.motion = self.computeMotion()\n self.color = mycolors.GREEN", "title": "" }, { "docid": "07b716b3338ffe91f606880e5fb0e2af", "score": "0.4764633", "text": "def main_color(self):", "title": "" }, { "docid": "07b716b3338ffe91f606880e5fb0e2af", "score": "0.4764633", "text": "def main_color(self):", "title": "" }, { "docid": "b663a68997600b9062cca60a34d43b84", "score": "0.47636265", "text": "def __init__(self, simpleSmoothing=None):\n if simpleSmoothing is None:\n self.__simpleSmoothing = GoodTuringSmoothing()\n else:\n self.__simpleSmoothing = simpleSmoothing", "title": "" }, { "docid": "837330365c4157c3b0160d8ebfd1f747", "score": "0.47585487", "text": "def __init__(self, x=0, y=-370, color=\"White\"):\r\n super().__init__()\r\n self.hideturtle()\r\n self.penup()\r\n self.shape(\"square\")\r\n self.color(color)\r\n self.shapesize(stretch_wid=.5, stretch_len=4)\r\n self.goto(x, y)", "title": "" }, { "docid": "4eeafa2be13ce8ddce676fefc87e321f", "score": "0.4758253", "text": "def __init__(self, utility, my_color, no_more_time, calmness_factor):\n self.utility = utility\n self.my_color = my_color\n self.no_more_time = no_more_time\n self.calmness_factor = calmness_factor", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.4749389", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.4749389", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.4749389", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.47490883", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.47490883", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.47488922", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.4748258", "text": "def GetColour(self):", "title": "" }, { "docid": "ef058eecd0dc14f9376811d2f6838b9f", "score": "0.47477603", "text": "def GetColour(self):", "title": "" }, { "docid": "4b0f5b477d6251fffa047778fd318fa7", "score": "0.47471276", "text": "def makeThickness(self):\n ...", "title": "" }, { "docid": "3208bba6dc0dec91edc2c6027c2f4546", "score": "0.47467378", "text": "def draw(self):\n print(' BorderType')\n return 'BorderType'", "title": "" }, { "docid": "0e27f7e848d0baec79369831f40f0ae0", "score": "0.47431365", "text": "def FindOrCreatePen(self, colour, width=1, style=PENSTYLE_SOLID):", "title": "" }, { "docid": "fb2d8cbf628566c604b3e4f6e4e75606", "score": "0.4741599", "text": "def make_polygon(cls, color=None):\n product_name = random.choice(cls.products())\n this_module = __import__(__name__)\n polygon_class = getattr(this_module, product_name)\n polygon = polygon_class(factory_name=cls.__name__)\n if color is not None:\n polygon.color = color\n return polygon", "title": "" }, { "docid": "0d977895a7ba9b277bfca05cdbc99b40", "score": "0.47363675", "text": "def __init__(\n self, color: str = \"\", fill: Boolean = ON, style: SymbolicConstant = SOLID\n ):\n pass", "title": "" }, { "docid": "a04cd468833c996ca6260ec4e327c4c8", "score": "0.47339603", "text": "def test_issue_178(self):\n\n q = io.StringIO(u'''<svg xmlns=\"http://www.w3.org/2000/svg\" viewBox=\"0 0 80 80\">\n <defs>\n <style>\n //this is a comment.\n .cls-1,.cls-2{fill:none;stroke-miterlimit:10;}.cls-1{stroke:blue;}.cls-2{stroke:red;}\n //.cls-2{stroke:pink;}\n /* Testing this should be functional */\n </style>\n </defs>\n <g id=\"Layer_2\" data-name=\"Layer 2\">\n <g id=\"Layer_1-2\" data-name=\"Layer 1\">\n <polygon points=\"56.59 67.4 39.86 57.28 23.01 67.22 26.34 45.99 12.83 30.88 31.62 27.88 40.12 8.6 48.41 27.97 67.17 31.17 53.5 46.14 56.59 67.4\"/>\n <circle class=\"cls-1\" cx=\"40\" cy=\"40\" r=\"35\"/>\n <circle class=\"cls-2\" cx=\"40\" cy=\"40\" r=\"39.5\"/>\n </g>\n </g>\n </svg>''')\n m = SVG.parse(q)\n poly = m[0][0][0]\n circ1 = m[0][0][1]\n circ2 = m[0][0][2]\n\n self.assertEqual(poly.fill, \"black\")\n self.assertEqual(poly.stroke, \"none\")\n\n self.assertEqual(circ1.fill, \"none\")\n self.assertEqual(circ1.stroke, \"blue\")\n\n self.assertEqual(circ2.fill, \"none\")\n self.assertEqual(circ2.stroke, \"red\")", "title": "" }, { "docid": "bbcdc25a8918b1754c6ec713a8ce5c7b", "score": "0.47272366", "text": "def createShapeCtrl(type, name='C_test_ctrl', scale=1, color='yellow'):\n controller = \"\"\n if(type == \"locator\"):\n controller=str(pm.curve(p=[(0, 2, 0), (0, -2, 0), (0, 0, 0), (0, 0, 2), (0, 0, -2), (0, 0, 0), (2, 0, 0), (-2, 0, 0)],k=[0, 1, 2, 3, 4, 5, 6, 7],d=1,name=name))\n elif(type == \"centricArrows\"):\n controller=str(pm.curve(p=[(0, 0, 0), (32, 0, -5.333333), (32, 0, 5.333333), (0, 0, 0), (-32, 0, -5.333333), (-32, 0, 5.333333), (0, 0, 0), (-5.333333, 32, 0), (5.333333, 32, 0), (0, 0, 0), (-5.333333, -32, 0), (5.333333, -32, 0), (0, 0, 0), (0, 32, 5.333333), (0, 32, -5.333333), (0, 0, 0), (0, -32, 5.333333), (0, -32, -5.333333), (0, 0, 0), (32, 5.333333, 0), (32, -5.333333, 0), (0, 0, 0), (-32, 5.333333, 0), (-32, -5.333333, 0), (0, 0, 0), (-5.333333, 0, -32), (5.333333, 0, -32), (0, 0, 0), (-5.333333, 0, 32), (5.333333, 0, 32), (0, 0, 0), (0, 5.333333, -32), (0, -5.333333, -32), (0, 0, 0), (0, 5.333333, 32), (0, -5.333333, 32), (0, 0, 0)],k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36],d=1,name=name)) \n elif(type == \"box\") or (type == \"cube\"):\n pointPosList = [(0.5, 0.5, 0.5), (0.5, 0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5), (0.5, 0.5, -0.5), (-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, 0.5, 0.5)]\n controller=pm.curve(p=pointPosList, d=1,name=name, k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n elif(type == \"jack\"):\n pointPosList = [(0, 0, 0), (0.75, 0, 0), (1, 0.25, 0), (1.25, 0, 0), (1, -0.25, 0), (0.75, 0, 0), (1, 0, 0.25), (1.25, 0, 0), (1, 0, -0.25), (1, 0.25, 0), (1, 0, 0.25), (1, -0.25, 0), (1, 0, -0.25), (0.75, 0, 0), (0, 0, 0), (-0.75, 0, 0), (-1, 0.25, 0), (-1.25, 0, 0), (-1, -0.25, 0), (-0.75, 0, 0), (-1, 0, 0.25), (-1.25, 0, 0), (-1, 0, -0.25), (-1, 0.25, 0), (-1, 0, 0.25), (-1, -0.25, 0), (-1, 0, -0.25), (-0.75, 0, 0), (0, 0, 0), (0, 0.75, 0), (0, 1, -0.25), (0, 1.25, 0), (0, 1, 0.25), (0, 0.75, 0), (-0.25, 1, 0), (0, 1.25, 0), (0.25, 1, 0), (0, 1, 0.25), (-0.25, 1, 0), (0, 1, -0.25), (0.25, 1, 0), (0, 0.75, 0), (0, 0, 0), (0, -0.75, 0), (0, -1, -0.25), (0, -1.25, 0), (0, -1, 0.25), (0, -0.75, 0), (-0.25, -1, 0), (0, -1.25, 0), (0.25, -1, 0), (0, -1, -0.25), (-0.25, -1, 0), (0, -1, 0.25), (0.25, -1, 0), (0, -0.75, 0), (0, 0, 0), (0, 0, -0.75), (0, 0.25, -1), (0, 0, -1.25), (0, -0.25, -1), (0, 0, -0.75), (-0.25, 0, -1), (0, 0, -1.25), (0.25, 0, -1), (0, 0.25, -1), (-0.25, 0, -1), (0, -0.25, -1), (0.25, 0, -1), (0, 0, -0.75), (0, 0, 0), (0, 0, 0.75), (0, 0.25, 1), (0, 0, 1.25), (0, -0.25, 1), (0, 0, 0.75), (-0.25, 0, 1), (0, 0, 1.25), (0.25, 0, 1), (0, 0.25, 1), (-0.25, 0, 1), (0, -0.25, 1), (0.25, 0, 1), (0, 0, 0.75)]\n controller=pm.curve(p=pointPosList, d=1,n=name, k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83])\n elif(type == \"sphere\"):\n pointPosList = [(0, 0, 1), (0, 0.5, 0.866025), (0, 0.866025, 0.5), (0, 1, 0), (0, 0.866025, -0.5), (0, 0.5, -0.866025), (0, 0, -1), (0, -0.5, -0.866025), (0, -0.866025, -0.5), (0, -1, 0), (0, -0.866025, 0.5), (0, -0.5, 0.866025), (0, 0, 1), (0.707107, 0, 0.707107), (1, 0, 0), (0.707107, 0, -0.707107), (0, 0, -1), (-0.707107, 0, -0.707107), (-1, 0, 0), (-0.866025, 0.5, 0), (-0.5, 0.866025, 0), (0, 1, 0), (0.5, 0.866025, 0), (0.866025, 0.5, 0), (1, 0, 0), (0.866025, -0.5, 0), (0.5, -0.866025, 0), (0, -1, 0), (-0.5, -0.866025, 0), (-0.866025, -0.5, 0), (-1, 0, 0), (-0.707107, 0, 0.707107), (0, 0, 1)]\n controller=str(pm.curve(p=pointPosList, d=1,n=name, k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]))\n elif(type == \"circle\"):\n controller=pm.circle(r=scale, n=name, nrx=1, nry=0, nrz=0)[0]\n pm.delete(controller, ch=1)\n elif(type == \"crossAxis\"):\n controller=Shapes().poleCross(name)\n\n elif(type == 'crossPaddle'):\n pointPosList = [(0, 0, 0), (0, 0, 0), (0, 1.26, 0), (0, 1.45, 0.45), (0, 1.89, 0.62), (0, 2.35, 0.45), (0, 2.53, -0.013), (0, 2.35, -0.45), (0, 1.90, -0.64), (0, 1.45, -0.45), (0, 1.26, 0), (0, 1.45, 0.45), (0, 2.35, -0.45), (0, 1.90, -0.64), (0, 1.45, -0.45), (0, 2.35, 0.45)]\n controller= pm.curve(p=pointPosList, n=name, k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], d=1)\n elif(type == \"arrow\"):\n controller=Shapes().arrow(name)\n elif(type == \"PLACER\"):\n controller=Shapes().placer()\n elif(type == \"MOVER\"):\n pointPosList =[pm.dt.Point([-3.360645, 0.0, -3.360645]), pm.dt.Point([-3.37686, 0.0, -3.34443]), pm.dt.Point([-4.247985, 0.0, -2.45985]), pm.dt.Point([-5.257455, 0.0, 0.023115]), pm.dt.Point([-3.724275, 0.0, 3.724275]), pm.dt.Point([0.0, 0.0, 5.267115]), pm.dt.Point([3.724275, 0.0, 3.724275]), pm.dt.Point([5.258145, 0.0, 0.02139]), pm.dt.Point([4.247295, 0.0, -2.461575]), pm.dt.Point([3.375825, 0.0, -3.345465]), pm.dt.Point([3.360645, 0.0, -3.360645]), pm.dt.Point([3.349605, 0.0, -3.349605]), pm.dt.Point([3.186765, 0.0, -3.18711]), pm.dt.Point([3.014265, 0.0, -3.0153]), pm.dt.Point([2.85315, 0.0, -2.85453]), pm.dt.Point([2.84349, 0.0, -2.84487]), pm.dt.Point([2.85315, 0.0, -2.834865]), pm.dt.Point([3.59076, 0.0, -2.08656]), pm.dt.Point([4.45188, 0.0, 0.014145]), pm.dt.Point([3.150885, 0.0, 3.150885]), pm.dt.Point([0.0, 0.0, 4.45671]), pm.dt.Point([-3.150885, 0.0, 3.150885]), pm.dt.Point([-4.448775, 0.0, 0.021735]), pm.dt.Point([-3.593865, 0.0, -2.07897]), pm.dt.Point([-2.85867, 0.0, -2.829345]), pm.dt.Point([-2.84349, 0.0, -2.84487]), pm.dt.Point([-2.853495, 0.0, -2.854875]), pm.dt.Point([-3.013575, 0.0, -3.014265]), pm.dt.Point([-3.18573, 0.0, -3.18642]), pm.dt.Point([-3.348225, 0.0, -3.348225])]\n controller = pm.curve(p=pointPosList, name=name, d=3)\n elif(type == \"DIRECTION\"):\n pointPosList = [pm.dt.Point([-2.4, 0.0, -2.4]), pm.dt.Point([2.4, 0.0, -2.4]), pm.dt.Point([2.4, 0.0, 2.4]), pm.dt.Point([0.0, 0.0, 3.6]), pm.dt.Point([-2.4, 0.0, 2.4])]\n controller = pm.curve(p=pointPosList, name=name, d=1)\n pm.closeCurve(name, ps=1, ch=1, bb=0.5, bki=0, p=0.1, rpo=1)\n elif(type == \"squareAxis45\"):\n pointPosList = [pm.dt.Point([-0.0183221732195, 0.0220372618933, -0.0490237893162]),\n pm.dt.Point([6.98167782678, 7.02203726189, -0.0490237893162]),\n pm.dt.Point([6.98167782678, 10.0220372619, -0.0490237893162]),\n pm.dt.Point([9.98167782678, 10.0220372619, -0.0490237893162]),\n pm.dt.Point([9.98167782678, 7.02203726189, -0.0490237893162]),\n pm.dt.Point([6.98167782678, 7.02203726189, -0.0490237893162])]\n controller = pm.curve(p=pointPosList, name=name, d=1)\n else:\n pm.warning('No type %s found. Doing nothing.'%type)\n return None\n\n # scale and shade\n crvshapes = pm.listRelatives(controller, children=1, type='nurbsCurve')\n for crvshape in crvshapes:\n pm.xform('%s.cv[0:]'%crvshape, s=(scale, scale, scale), r=1)\n\n colorOverride(color, [controller])\n \n return pm.PyNode(controller)", "title": "" }, { "docid": "1ca0d7faee33f96222a869a3689260f1", "score": "0.4720926", "text": "def GetStipple(self):", "title": "" }, { "docid": "7341a46220f3c0b0f240b8e611f6912f", "score": "0.47204852", "text": "def as_outline(self):\n # do the import in function to keep soft dependency\n from .path.creation import box_outline\n # return outline with same size as primitive\n return box_outline(\n extents=self.primitive.extents,\n transform=self.primitive.transform)", "title": "" }, { "docid": "1ca0d7faee33f96222a869a3689260f1", "score": "0.47189635", "text": "def GetStipple(self):", "title": "" }, { "docid": "3f1a89dcbfc574557dde4246734137dd", "score": "0.47166023", "text": "def __init__(self, w: object = 1, h: object = 1, t: object = \"rectangle\") -> object:\r\n Shape.__init__(self, [w, h], t)", "title": "" }, { "docid": "94cb2931f633691f15016993abd2eb7f", "score": "0.4716294", "text": "def ring(self):\r\n pass", "title": "" }, { "docid": "dad74ce8c2d9a1e154e135319a919030", "score": "0.47160915", "text": "def test_solid_01(self):\n pass", "title": "" }, { "docid": "01ededd516d7a4efab8ae1ae0d927f2c", "score": "0.4714911", "text": "def GetRenderer(self):", "title": "" }, { "docid": "8828320b2f4323701b16db24843f15fd", "score": "0.47126523", "text": "def makeArrow(width, gap):\n \n # Length of bar perpendicular to the arrow's shaft\n base_length = 1.8\n \n # Height of arrow's head\n arrow_height = 2.5\n \n # Width of arrow's head\n arrow_base = 1.2\n \n # Create path\n path = \"m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s m %s,%s %s,%s\" % (-gap/2,0, -width/2+gap/2,0, 0,base_length/2, 0,-base_length, arrow_height,(base_length-arrow_base)/2, -arrow_height,arrow_base/2, arrow_height,arrow_base/2, -arrow_height,-arrow_base/2, width/2,0, gap/2,0, width/2-gap/2,0, 0,base_length/2, 0,-base_length, -arrow_height,(base_length-arrow_base)/2, arrow_height,arrow_base/2, -arrow_height,arrow_base/2, arrow_height,-arrow_base/2,) \n \n return path", "title": "" }, { "docid": "bb09b1884b5a89b0d8b147aa64c4373e", "score": "0.47125855", "text": "def draw_outline(o, lw):\n \n o.set_path_effects([patheffects.Stroke(\n linewidth=lw, foreground='black'), patheffects.Normal()])", "title": "" }, { "docid": "a7b9d610805c46b818ae1211e8cec359", "score": "0.471117", "text": "def __init__(self):\n self.name = \"Sun\"\n self.mass = 1.989 * 1e30 * um\n self.radius = 695510 * 1e3 * ud\n self.motion = Motion()\n self.color = mycolors.YELLOW", "title": "" }, { "docid": "683ef8e5ef99de85f1db7ce2c8ebf7be", "score": "0.47091144", "text": "def kml(self):\n kml = simplekml.Kml()\n geom = self.geom\n if geom.geom_type == 'Point':\n geom = geom.buffer(self.species.radius or settings.SENSITIVITY_DEFAULT_RADIUS, 4)\n if self.species.radius:\n geometry = ()\n for coords in geom.coords[0]:\n coords += (self.species.radius, )\n geometry += (coords, )\n geom = GEOSGeometry(Polygon(geometry), srid=settings.SRID)\n geom = geom.transform(4326, clone=True) # KML uses WGS84\n line = kml.newpolygon(name=self.species.name,\n description=plain_text(self.description),\n altitudemode=simplekml.AltitudeMode.relativetoground,\n outerboundaryis=simplify_coords(geom.coords[0]))\n line.style.linestyle.color = simplekml.Color.red # Red\n line.style.linestyle.width = 4 # pixels\n return kml.kml()", "title": "" } ]
02dd63739c339d90408253e7a3b72ff9
test user cant register twice
[ { "docid": "671b34155f4e7e9e3f48771f946fc9e0", "score": "0.77129304", "text": "def test_user_only_registers_once(self):\n\n\t\tuser_data = {'username': 'test', 'password': 'test1023'}\n\t\tresult = self.app.post('/api/v1/auth/register', data = json.dumps(user_data), content_type = 'application/json')\n\t\tself.assertIn('success', str(result.data))\n\t\tres = self.app.post('/api/v1/auth/register', data = json.dumps(user_data), content_type = 'application/json')\n\t\tself.assertEqual(res.status_code, 202)", "title": "" } ]
[ { "docid": "6cc28e7b43510fbd583a6120a419a875", "score": "0.79305637", "text": "def test_already_registered_user(self):\n\n res = self.client.post('/auth/register', data=self.user_data)\n self.assertEqual(res.status_code, 201)\n second_res = self.client.post('/auth/register', data=self.user_data)", "title": "" }, { "docid": "dd3b610504bc693d65a61463790b1b40", "score": "0.79051304", "text": "def test_user_already_registered(self):\n\n\t\tres = self.register_user()\n\t\tself.assertEqual(res.status_code, 201)\n\t\trev = self.register_user()\n\t\tself.assertEqual(rev.status_code, 202)", "title": "" }, { "docid": "cd4c9f1606f701ee117c2b461f5990c2", "score": "0.7891906", "text": "def test_cannot_signup_more_than_once(self):\n signup(self.username, self.password,\n self.conf_pass, self.user_category)\n results = signup(self.username, self.password,\n self.conf_pass, self.user_category)\n self.assertEqual(results, 'user already exists')", "title": "" }, { "docid": "de8e4a6e12118295dcd7cfb7513644a4", "score": "0.76455534", "text": "def test_already_registered_user(self):\n self.test_client.post(self.register_url, data=json.dumps(self.user_data),\n content_type='application/json')\n second_res = self.test_client.post(self.register_url, data=json.dumps(self.user_data))\n self.assertEqual(second_res.status_code, 400)", "title": "" }, { "docid": "541e0dd2dda665b3292a9593baf2d27e", "score": "0.7613534", "text": "def test_duplicate_users(self):\n self.success_user.register_user(self.user_list)\n response = self.success_user.register_user(self.user_list)\n self.assertEqual(response['message'], 'User already exist!')", "title": "" }, { "docid": "e46a7d880ff7837129e31a9bcc55e126", "score": "0.7578377", "text": "def test_already_registered_user(self):\n res = self.client().post('/api/v1/auth/register', data=self.user_data)\n self.assertEqual(res.status_code, 201)\n second_res = self.client().post('/api/v1/auth/register', data=self.user_data)\n self.assertEqual(second_res.status_code, 409)\n # get the results returned in json format\n result = json.loads(second_res.data.decode())\n self.assertEqual(\n result['message'], \"User already exists. Please login.\")", "title": "" }, { "docid": "d5e0772d643af2aa74025a03172224b7", "score": "0.74988645", "text": "def test_already_registered_user(self):\n res = self.client().post('/api/v1/auth/register', data=self.user_data_2)\n self.assertEqual(res.status_code, 201)\n second_res = self.client().post('/api/v1/auth/register', data=self.user_data_2)\n self.assertEqual(second_res.status_code, 202)\n # get the results returned in json format\n result = json.loads(second_res.data.decode())\n self.assertEqual(\n result['message'], \"User with this username already exists kindly try another one!.\")", "title": "" }, { "docid": "bf0217b111026f94cd6d9f466970f286", "score": "0.74761057", "text": "def test_duplicate_username(self):\n\n test_resonse = self.user.register_user('cosmas28', 'mark123444', 'mark123444')\n self.assertEqual(test_resonse, 'The username already exist')", "title": "" }, { "docid": "3dc8e3bfa6f959fd0652d0ce233ea282", "score": "0.747578", "text": "def test_registration_for_exististing_user(self):\n req = self.client().post('api/v1/auth/register', data=self.user)\n self.assertEqual(req.status_code, 201)\n\n same_user = self.user\n req = self.client().post('api/v1/auth/register', data=same_user)\n self.assertNotEqual(req.status_code, 201)\n\n self.assertIn('Username taken!!', str(req.data))", "title": "" }, { "docid": "3349ad56b7816d3110772b6c67f6f0fc", "score": "0.7473192", "text": "def test_register_user(self):\n # Register a new user\n response = self.client.post(\n '/api/v1/auth/register', data=json.dumps(self.user),\n headers={'content-type': 'application/json'})\n self.assertEqual(response.status_code, 200,\n msg=\"Register new user successfully\")\n # Register a user twice\n response_two = self.client.post(\n '/api/v1/auth/register', data=json.dumps(self.user),\n headers={'content-type': 'application/json'})\n self.assertIn('User already registered', str(response_two.data),\n msg=\"Cant register one user twice\")", "title": "" }, { "docid": "afa167393fdda7ddcff2c89eeeed5caa", "score": "0.7431967", "text": "def test_auth_register_duplicate_allowable(self):\n user = add_user()\n with self.client:\n response = self.client.post(\n '/v1/auth/register',\n data=json.dumps(dict(\n username=self.data_generator.username(),\n email=self.data_generator.email(),\n password=user.password,\n name=user.name\n )),\n content_type='application/json',\n headers=[('Accept', 'application/json')]\n )\n data = json.loads(response.data.decode())\n self.assertEqual(data['message'], 'Successfully registered.')\n self.assertTrue(data['auth_token'])\n self.assertEqual(response.content_type, 'application/json')\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "ccf0fd9b5ff2f881c5e8d319f871d53d", "score": "0.734076", "text": "def test_register_account_exist_already(self):\n c = Client()\n self.assertEqual(len(User.objects.all()), 1)\n c.post('/login', {\"username\": \"pierre\", \"password\": \"top_secret\",\n \"type_authentification\": \"username\"})\n response = c.post(\"/management/user/create\", {\"username\": \"pierre\",\n \"password\": \"password123456\"})\n self.assertEqual(len(User.objects.all()), 1)\n self.assertEqual(response.context[\"error\"], \"user exist\")", "title": "" }, { "docid": "6b8a181bb1fe68613dcbf70b70fe6094", "score": "0.73085237", "text": "def test_register_existing_user(self):\n pass", "title": "" }, { "docid": "a944e063bd1847b87858d67c65a2a390", "score": "0.73055834", "text": "def test_cant_register_twice_the_same_email(self):\n user = UserFactory.create()\n\n response = self.client.post(self.register_url, {\n 'email': user.email.upper(),\n 'password': 'potato'\n })\n\n self.assertEqual(response.status_code, 400)\n self.assertIn('email', response.data)", "title": "" }, { "docid": "f088fa2da76440cadc64b84ba13dcf41", "score": "0.7172474", "text": "def test_duplicate_user(self):\n # a. create user 'bob'\n response = self.client.post(self.url, self.payload)\n self.assertEqual(CustomUser.objects.count(), 1)\n\n # b. try creating same user again\n self.client.logout()\n with self.assertNumQueries(2): # (1-2) unique checks\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"username\": [\"A user with that username already exists.\"],\n \"email\": [\"user with this email address already exists.\"],\n })\n self.assertEqual(CustomUser.objects.count(), 1)", "title": "" }, { "docid": "6b78300a8a5455a57fce2ff8d5fe8cdf", "score": "0.7111049", "text": "def test_register_new_user(self):\n self.assertTrue(User.objects.count() == 0)\n self.register_new_user()\n self.assertTrue(User.objects.count() == 1)", "title": "" }, { "docid": "3a64ed200788d4b22409a5757309a0c6", "score": "0.7049239", "text": "def test_non_registered_user_login(self):\n self.request.user.is_registered = False\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "3a64ed200788d4b22409a5757309a0c6", "score": "0.7049239", "text": "def test_non_registered_user_login(self):\n self.request.user.is_registered = False\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "3a64ed200788d4b22409a5757309a0c6", "score": "0.7049239", "text": "def test_non_registered_user_login(self):\n self.request.user.is_registered = False\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "e539098101f6fe66172eb9e004379eb8", "score": "0.7049219", "text": "def test_repeated_signup(self):", "title": "" }, { "docid": "6a670c14e91a2fa245b3c1fd511b8032", "score": "0.69894433", "text": "def test_user_isRegistered_ifNew(self):\n response = self.client.post('/user/signup/',{'name': 'bar',\n 'email': 'bar@gmail.com',\n 'password': 'password'}) #trying to create a user that already exists\n self.assertEqual(response.status_code, 200) #200 if user is created", "title": "" }, { "docid": "47f6f7f169930994fdf9dd6f12010c4a", "score": "0.697263", "text": "def test_signup_new_user_noname(self):\n pass", "title": "" }, { "docid": "e9591686e51edc72a63a6e7f189418af", "score": "0.69572246", "text": "def test_signup_repeatedly(client):\n response = client.post(\n '/api/v2/auth/signup', data=json.dumps(mock_reg[4]),\n content_type='application/json')\n assert response.get_json() == {'message': error_messages[0]['email_conflict']}\n assert response.status_code == 409", "title": "" }, { "docid": "ce771660c4635cceee09f952b0f1ff0e", "score": "0.6951846", "text": "def test_already_exists_user(self):\n self.client.post(\n '/user/create_new/',\n self.user\n )\n response = self.client.post(\n '/user/create_new/',\n self.user\n )\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(\n str(response.content, encoding='utf8'),\n {\n 'ok': False,\n 'email': False\n }\n )", "title": "" }, { "docid": "111f4ce199c10a2df3eb1be235eae8c1", "score": "0.6948717", "text": "def test_register_new_account_with_username_exist_already(self):\n self.assertEqual(1, len(User.objects.filter(username='pierre')))\n self.assertEqual(1, len(User.objects.filter(username='pierre')))\n BackendAuthentification.create_user(username='pierre', password='top_secret')\n self.assertEqual(1, len(User.objects.filter(username='pierre')))\n self.assertEqual(1, len(User.objects.filter(username='pierre')))", "title": "" }, { "docid": "44dd3e3a94e7eb872e8b3f7513817f56", "score": "0.6947589", "text": "def test_duplicated_registration_post(self):\n img = open(TEST_DIR + '/cindy.jpg', 'rb')\n create_user_profile()\n post_data = {'username': 'testuser', 'password': 'testpassword', 'email': 'test@email.com', 'picture': img}\n request = self.client.post(reverse('explore_scotland_app:register'), post_data)\n content = request.content.decode('utf-8')\n # print(content)\n self.assertTrue('<a href=\"/login/\"' in content,\n f\"{FAILURE_HEADER}After repeated registering, we couldn't find the expected link back to the log in page.{FAILURE_FOOTER}\")", "title": "" }, { "docid": "5be95367394dd29daf2b083475db4d14", "score": "0.69418114", "text": "def test_registered_user_is_not_active(self):\n self.register_new_user()\n the_user = User.objects.first()\n self.assertFalse(the_user.is_active)", "title": "" }, { "docid": "b208f7fba182d0937163df07ea181613", "score": "0.6938008", "text": "def test_POST_register_dup_attr(self):\n res = register(self, self.user_data)\n res_in_json = jsonify(res.data)\n self.assertEqual(res.status_code, 201)\n self.assertIn('New user created', str(res_in_json['message']))\n res2 = register(self, self.driver_data)\n res_in_json2 = jsonify(res2.data)\n self.assertIn('Duplicate', str(res_in_json2['err']))", "title": "" }, { "docid": "8be0a2872272bef8d727327a49fc0e39", "score": "0.692011", "text": "def test_password_and_confirm(self):\n user1 = user.register('testuserone','testuser@gmail.com','testpass','testpass1')\n self.assertEqual(user1,'password does not match')", "title": "" }, { "docid": "1971cb58dadb5b9ffaf3b2466db27d7d", "score": "0.69084966", "text": "def test_user_already_logged_in(self):\n response = self.client.post(\"/api/v1/register\",\n data=json.dumps(dict(first_name=\"eveM\",\n last_name=\"maina\",\n username=\"evetM\",\n email=\"evetj@gmail.com\",\n password=\"evvt\")),\n content_type=\"application/json\")\n response = self.client.post(\"/api/v1/login\",\n data=json.dumps(dict(\n username_or_email=\"evetM\",\n password=\"evvt\")),\n content_type=\"application/json\")\n response = self.client.post(\"/api/v1/login\",\n data=json.dumps(dict(\n username_or_email=\"evetM\",\n password=\"evvt\")),\n content_type=\"application/json\")\n self.assertEqual(response.status_code, 409)\n response_msg = json.loads(response.data.decode(\"UTF-8\"))\n self.assertEqual(\"User Already Logged in\", response_msg[\"message\"])", "title": "" }, { "docid": "e13285e93414f8b0f99158dffce4835c", "score": "0.6884058", "text": "def test_503_raised_if_user_already_exists(self):\n response = self.client.post('/user/signup/',{'name': self.user.first_name,\n 'email': self.user.email,\n 'password': 'password'}) #trying to create a user that already exists\n self.assertEqual(response.status_code, 503) #503 is raised when user already existing", "title": "" }, { "docid": "f9bc614dd59203ad0552b5dfafce6eb9", "score": "0.68765694", "text": "def test_duplicate_save( self ):\n created_user = mongo.save_user( self.user )\n created_user = mongo.save_user( self.user )\n\n self.assertEqual( created_user, False,\n msg=f'{BColors.FAIL}\\t[-]\\tGame was not created in the Database!{BColors.ENDC}' + where() )\n print(f\"{BColors.OKGREEN}\\t[+]\\tPass User-Profile database save user duplicate.\\\n {BColors.ENDC}\")", "title": "" }, { "docid": "7e50fd064e92e2732468c9f729ce72f6", "score": "0.68379456", "text": "def test_signup_new_user_bad_password(self):\n pass", "title": "" }, { "docid": "25d4c5a75ecc5ba4620dfbc13d4042b3", "score": "0.682939", "text": "def test_user_registration_behaves_correctly(self):\n with self.client:\n self.app.config['IN_FAKE_TEST'] = True\n\n response = self.client.post(\n '/account/register',\n data=dict(username=\"tester\", email=\"tester@test.com\", password=\"testing\", repassword=\"testing\"),\n follow_redirects=True\n )\n self.assertIn(u'账号激活邮件', response.data.decode('utf8'))\n\n self.app.config['IN_FAKE_TEST'] = False", "title": "" }, { "docid": "3a96ef4edb804ec485aa2c91238d97e7", "score": "0.677534", "text": "def test_passwords_match(self):\n response = self.password_dont_match_user.register_user(self.user_list)\n self.assertTrue(response['message'], 'Passwords dont match!')", "title": "" }, { "docid": "38fd681a5c5dac6c6d86591a56573c76", "score": "0.6756307", "text": "def test_register(self):\n user2 =user.register('testusertwo','testemailtwo@gmail.com','testpass1','testpass1')\n self.assertEqual(user2,'registration succesfull')", "title": "" }, { "docid": "54e8238e10f40e9ac3e2b971760ea1d4", "score": "0.6734344", "text": "def test_cant_register_twice_with_same_email(self):\n UserFactory.create(email='foo@bar.com')\n\n with self.assertRaises(IntegrityError):\n UserFactory.create(email='FOO@BAR.COM')", "title": "" }, { "docid": "c467b35d3bc30eb993d8a89650d15970", "score": "0.6712506", "text": "def test_duplicate_username(self):\n # Test with a User who is not a vendor.\n response = self.try_signup('pate', 'salasana')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'app_vendor_signup.html')\n self.assertNotIn('_auth_user_id', self.client.session)\n self.assert_login('pate', 'salasana', False)\n\n # Test with a User who is a vendor.\n response = self.try_signup('pelle', '1234')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'app_vendor_signup.html')\n self.assertNotIn('_auth_user_id', self.client.session)", "title": "" }, { "docid": "d29fef07f00f377e5ca8deda4bae45ec", "score": "0.6710478", "text": "def test_user_create_duplicate(self):\n C.user.create('bill', 'pass1234', 'regular')\n with pytest.raises(FailedAPICallException):\n C.user.create('bill', 'pass1234', 'regular')", "title": "" }, { "docid": "76e55c075f25fa33b1ff5de03c16695e", "score": "0.6708265", "text": "def test_register_without_confirm(self):\r\n settings.REGISTRATION_REQUIRES_ACTIVATION = False\r\n settings.RECAPTCHA_ENABLE = False\r\n self.submit_form('/register', {\r\n 'username': 'A_USER',\r\n 'password': 'default',\r\n 'password_repeat': 'default',\r\n 'email': 'a.user@example.com'\r\n })\r\n self.login('A_USER', 'default')\r\n response = self.client.get('/en/')\r\n self.assert_('A_USER' in response.data)\r\n user = models.User.query.filter_by(username='A_USER').first()\r\n self.assertEqual(user.email, 'a.user@example.com')\r\n self.assertEqual(user.is_active, True)", "title": "" }, { "docid": "b33f4f0dafee6a342b82c9a999aa67c2", "score": "0.670466", "text": "def test_register_duplicate_username_valid_password_valid(client):\n test_user_data = client.application.config['test_data']\n request = client.post(\n \"/api/users\",\n data=json.dumps(\n {\n \"username\": test_user_data['username'],\n \"password\": test_user_data['password'],\n \"email_address\": test_user_data['mail'],\n }\n ),\n content_type=\"application/json\",\n )\n response = json.loads(request.data)\n assert (\n f\"Registration succesfull {test_user_data['username']}\"\n == response[\"message\"]\n )\n request = client.post(\n \"/api/users\",\n data=json.dumps(\n {\n \"username\": test_user_data['username'],\n \"password\": test_user_data['password'],\n \"email_address\": test_user_data['mail'],\n }\n ),\n content_type=\"application/json\",\n )\n response = json.loads(request.data)\n assert \"User with this username already exist\" == response[\"message\"]", "title": "" }, { "docid": "24a4cade8bd2b94c34901c7c9110aca4", "score": "0.66975677", "text": "def test_register_user(self):\n self.assertEqual(User.objects.count(), 2)\n response = self.client.post(\n \"/auth/register\",\n {\"username\": \"samuel\", \"password\": \"pass123\"},\n format=\"json\"\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 3)", "title": "" }, { "docid": "f5c17269a220e91cdf708c46c5a3f55a", "score": "0.667338", "text": "def test_failed_user_registration(self):\n response = self.client.post(\n reverse('movies:register_user'),\n data={'username': 'test', 'email': 'test@mail.com',\n 'first_name': 'test_ft_name', 'last_name': 'test_lt_name'}\n )\n response_data = response.data\n self.assertEqual(response.status_code, 200)\n self.assertFalse(response_data[\"status\"])\n self.assertEqual(response_data[\"password\"][0],\n \"This field is required.\")", "title": "" }, { "docid": "5b8b6c984ab6b35d9e8839f41e86d3a9", "score": "0.6669268", "text": "def test_can_register_user(self):\n\n with c:\n test_username = \"Reg User Test\"\n username_exists = check_if_username_exists(test_username)\n self.assertFalse(username_exists)\n\n form_data = {\"username\": test_username, \"password\": \"Password\"}\n c.post(\"/register\", data=form_data)\n username_exists = check_if_username_exists(test_username)\n self.assertTrue(username_exists)\n\n # delete test user from database\n connection = open_connection_if_not_already_open()\n with connection.cursor() as cursor:\n cursor.execute('DELETE FROM Users WHERE Users.Username = \"{}\" ; '.format(test_username))\n connection.commit()", "title": "" }, { "docid": "2d17eb40ff200236a5909f677cce4cac", "score": "0.6633316", "text": "def test_login_unregistered_user(self):\n response = self.client.post(\n \"/auth/register\",\n {\"username\": \"monreal\", \"password\": \"pass123\"},\n format=\"json\"\n )\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "c9b1c706cd17c0261cb00fc1ece7cb00", "score": "0.6632867", "text": "def check_user_in_db_reg(form, field):\n message = \"User already exist\"\n if services.users.get_user_by_name(field.data):\n flash(message)\n raise ValidationError(message)", "title": "" }, { "docid": "703accc229258f34c82512fe3613613d", "score": "0.6631244", "text": "def test_add_user_already_exists(self):\n engine = temp_db.get_temp_db()\n t_db = TransactionDB(engine)\n user_id = t_db.add_user('Pere', 'pwd')\n self.assertTrue(user_id >= 0)\n t_db.add_user('Pere', 'pwd')", "title": "" }, { "docid": "f9f261f3105fa79e46ee1493b2f81a2c", "score": "0.66264784", "text": "def test_create_password_twice(self):\n pwd = SpokePwd(self.org_name, self.user_id)\n self.assertRaises(error.AlreadyExists, pwd.create, self.user_def_pwd)", "title": "" }, { "docid": "01a1ac63afbbaf0125b6793c4f697ea3", "score": "0.6618141", "text": "def test_auth_register_duplicate_email(self):\n user = add_user()\n with self.client:\n response = self.client.post(\n '/v1/auth/register',\n data=json.dumps(dict(\n username=self.data_generator.username(),\n email=user.email,\n password=self.data_generator.password(),\n name=self.data_generator.full_name()\n )),\n content_type='application/json',\n headers=[('Accept', 'application/json')]\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['message'], 'Validation Error')\n self.assertEqual(data['errors'][0]['field'], 'email')\n self.assertEqual(data['errors'][0]['message'], 'email already exists')", "title": "" }, { "docid": "45c5fff55901424717a230dff4290af2", "score": "0.66061777", "text": "def test_email_not_logged_in_duplicate(self):\n user2 = UserFactory(email=self.user.email)\n self.assertEqual(user2.email, self.user.email)\n\n post_data = {\n 'username': self.user.email,\n 'password': 'password',\n 'next': '/index.php',\n }\n response = self.client.post(settings.LOGIN_URL, post_data)\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(self.client.session.get('_auth_user_id'))", "title": "" }, { "docid": "6498a3bac90a818f0e92d60fcdd28d0d", "score": "0.6590019", "text": "def test_user_registration(self):\n response = self.register_user(self.test_user_data_1)\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "4cc0aa3c85ebd5a27e8381beff8e39d6", "score": "0.6587234", "text": "def test_auth_register_success(self):\n response = self.client.post(\n reverse(\"auth_register\"), data=self.form_data\n )\n # Check for redirect.\n self.assertRedirects(response, reverse(\"auth_register_complete\"))\n # Check for new user.\n self.assertEqual(\n get_user_model().objects.filter(username=\"alice\").count(), 1\n )", "title": "" }, { "docid": "43b7489c06d5add0dbf5b5cea1858e93", "score": "0.65839845", "text": "def register(self):\n username = self.username_verify.get()\n password = self.password_verify.get()\n flag = 0\n for(k,v) in self.authentication.items():\n if k is not username:\n\n flag = 1\n\n else:\n continue\n if flag == 0:\n messagebox.showerror('Error', 'user is already exits please check again')\n\n elif flag == 1:\n self.authentication[username] = password\n messagebox.showinfo('Thanks', 'Thanks for register')", "title": "" }, { "docid": "f17ce3ce439def6827d13845dd938bb6", "score": "0.6583745", "text": "def test_auth_register_duplicate_username(self):\n user = add_user()\n with self.client:\n response = self.client.post(\n '/v1/auth/register',\n data=json.dumps(dict(\n username=user.username,\n email=self.data_generator.email(),\n password=self.data_generator.password(),\n name=self.data_generator.full_name()\n )),\n content_type='application/json',\n headers=[('Accept', 'application/json')]\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['message'], 'Validation Error')\n self.assertEqual(data['errors'][0]['field'], 'username')\n self.assertEqual(data['errors'][0]['message'], 'username already exists')", "title": "" }, { "docid": "d618cc3305bbfa3253994fed794d37e2", "score": "0.6580318", "text": "def test_user_only_registers_with_requires_data(self):\n\n\t\tuser_data = {'username': '', 'password': ''}\n\t\tresult = self.app.post('/api/v1/auth/register', data = json.dumps(user_data), content_type = 'application/json')\n\t\tself.assertEqual(result.status_code, 403)", "title": "" }, { "docid": "158a602f06284d96575efc3cad36fcfa", "score": "0.65729845", "text": "def test_register_new_account(self):\n\n user_login = self.client.login(\n username = self.email,\n password = self.pwd\n )\n self.assertEqual(user_login, False)\n\n self.client.post(\n '/user/create_new/',\n self.user\n )\n user_login = self.client.login(\n username = self.email,\n password = self.pwd\n )\n self.assertEqual(user_login, True)", "title": "" }, { "docid": "686f0f3d4466378d04c89746bd69c8f8", "score": "0.656718", "text": "def test_one_user_input_missing(self):\n\n self.assertEqual(self.user.register_user('yoyo2018', 'TIA2018', ''), 'Both username and password is required!')\n self.assertEqual(self.user.register_user('', 'TIA2018', 'TIA2018'), 'Both username and password is required!')", "title": "" }, { "docid": "199e6f1db2eafc789e6b02bc0e421e36", "score": "0.6560575", "text": "def test_create_users_who_already_exists(self):\n resource = self.client().post('api/v2/auth/signup', data=json.dumps(dict(email='sh@gmail.com', first_name='sasha', last_name='doe', password='12345678', confirm_password='12345678'\n )), content_type='application/json')\n data = json.loads(resource.data.decode())\n self.assertEqual(resource.status_code, 201)\n self.assertEqual(resource.content_type, 'application/json')\n self.assertEqual(data['response'], 'user created successfully')\n\n resource = self.client().post('api/v2/auth/signup', data=json.dumps(dict(email='sh@gmail.com', first_name='sasha', last_name='doe', password='12345678', confirm_password='12345678'\n )), content_type='application/json')\n data = json.loads(resource.data.decode())\n self.assertEqual(resource.status_code, 409)\n self.assertEqual(resource.content_type, 'application/json')\n self.assertEqual(data['response'], 'user already exists')", "title": "" }, { "docid": "a099dc679877ce15236db3650c2fb0bc", "score": "0.65581733", "text": "def test_model_user_passwords_are_random(self):\n password = self.data_generator.password()\n user_one = add_user(password=password)\n user_two = add_user(password=password)\n self.assertNotEqual(user_one.password, user_two.password)", "title": "" }, { "docid": "b84f32ef0adb0019f5c0c49532ef3c8c", "score": "0.65516835", "text": "def test_register_page(self):\n # test: get request loads page correctly\n res = self.client.get(url_for(\"user.register\"),\n follow_redirects=True)\n self.assertEqual(res.status_code, 200)\n # test: post request, creates new user with the following data\n data = dict(username=\"myusername\", password=\"mypassword\",\n confirm_password=\"mypassword\")\n res = self.client.post(url_for(\"user.register\"), data=data,\n follow_redirects=True)\n self.assertIn(b'Welcome myusername.', res.data)\n # test: try to register same username\n data = dict(username=\"myusername\", password=\"mypassword\",\n confirm_password=\"mypassword\")\n res = self.client.post(url_for(\"user.register\"), data=data,\n follow_redirects=True)\n self.assertIn(b'Username &#34;myusername&#34; already exists.',\n res.data)", "title": "" }, { "docid": "b2cc946c88c0ef15c79b0cea84427b78", "score": "0.65472263", "text": "def test_token_generation_on_register(self):\n response = self.register_user(self.user)\n self.assertIn('token', response.data['user_info'])\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "16bfa9d22ba97faa5ad151af0b68f694", "score": "0.65345246", "text": "def test_create_duplicate_uid( self ):\n created_user = mongo.save_user( self.user )\n created_user = mongo.create_user( self.user[\"user_id\"], self.user2[\"password_hash\"],\n self.user2[\"email\"], self.user2[\"auth_token\"] )\n\n mongo.remove_user(self.user[\"user_id\"])\n self.assertEqual( created_user, False, msg=f'{BColors.FAIL}\\t[-]\\\n \\tDuplicate user was created in the Database!{BColors.ENDC}' + where() )\n print(f\"{BColors.OKGREEN}\\t[+]\\tPass User-Profile database create user duplicate id.\\\n {BColors.ENDC}\")", "title": "" }, { "docid": "b88c189bc974c710ed2b39bd8b438d2c", "score": "0.6531688", "text": "def test_valid_user_registration(self):\n response = self.register('testUser', 'testUserDN',\n 'testCRY', 'TestPass')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'testUser', response.data)", "title": "" }, { "docid": "2459f99b8fe00ef1c11e6b052b17b586", "score": "0.65285194", "text": "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "2459f99b8fe00ef1c11e6b052b17b586", "score": "0.65285194", "text": "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "2459f99b8fe00ef1c11e6b052b17b586", "score": "0.65285194", "text": "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "2459f99b8fe00ef1c11e6b052b17b586", "score": "0.65285194", "text": "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "title": "" }, { "docid": "2a42b23ca0c52c8998eb428b57d85e31", "score": "0.65157664", "text": "def test_valid_email(self):\n user1 = user.register('testuserone','testusergmail.com','testpass1','testpass1')\n self.assertTrue(user1==\"Invalid email\")", "title": "" }, { "docid": "81589fe3eefa1d1d549ba764496ff8a6", "score": "0.65096974", "text": "def test_create_duplicate_email( self ):\n created_user = mongo.save_user( self.user )\n created_user = mongo.create_user( self.user2[\"user_id\"],\n self.user2[\"password_hash\"], self.user[\"email\"], self.user2[\"auth_token\"] )\n mongo.remove_user(self.user[\"user_id\"])\n\n self.assertEqual( created_user, False, msg=f'{BColors.FAIL}\\\n \\t[-]\\tDuplicate user was created in the Database!{BColors.ENDC}' + where() )\n print(f\"{BColors.OKGREEN}\\t[+]\\tPass User-Profile database create duplicate user email.\\\n {BColors.ENDC}\")", "title": "" }, { "docid": "05408a878f825b43ba18da3dd1132622", "score": "0.6474229", "text": "def test_duplicate_user_email(self):\n self.app.post(\"/api/v2/auth/signup\", headers={'Content-Type': 'application/json'},\n data=json.dumps(self.data))\n response = self.app.post(\n \"/api/v2/auth/signup\", headers={'Content-Type': 'application/json'}, data=json.dumps(self.data))\n result = json.loads(response.data)\n self.assertEqual(result['status'], 400)\n self.assertEqual(result['error'], \"email already exists\")", "title": "" }, { "docid": "e6533517aecadf4a1c78eac4211e0451", "score": "0.6452532", "text": "def test_add_user_dup_email(self):\n with self.client:\n self.client.post(\n '/users',\n data=json.dumps({\n 'username': 'rick1',\n 'email': 'rsanchez@randm.com'\n }),\n content_type='application/json',\n )\n res = self.client.post(\n '/users',\n data=json.dumps({\n 'username': 'rick2',\n 'email': 'rsanchez@randm.com'\n }),\n content_type='application/json',\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertIn('Sorry, that email address is already in use.', data['message'])\n self.assertIn('fail', data['status'])", "title": "" }, { "docid": "f35f9bfe98cf818899252d5b136e112c", "score": "0.64496326", "text": "def test_user_registration(self):\n\n req = self.client().post('api/v1/auth/register')\n\n self.assertEqual(req.status_code, 400)\n\n req = self.client().post('api/v1/auth/register', data=self.user)\n\n self.assertEqual(req.status_code, 201)\n\n self.assertIn('Registration successful.', str(req.data))\n\n req = self.client().post('api/v1/auth/register', data={})\n self.assertIn('Provide your name and password!!', str(req.data))", "title": "" }, { "docid": "0d2dab806699e4c78e1c97bf65cb07cd", "score": "0.64404446", "text": "def test_register_existing_user(self):\n username = 'test_user'\n password = 'password'\n tokens = 10\n # create a user in the db\n user = self.db.Users.insert_one({\n \"Username\": username,\n \"Password\": password,\n \"Admin\": 0,\n \"Tokens\":10\n })\n # call register with same data\n data = json.dumps({\n 'username': username,\n 'password': password\n }\n )\n response = self.client.post(\n \"/register\",\n headers={\"Content-Type\": \"application/json\"},\n data=data,\n follow_redirects=True\n )\n data = response.get_json()\n assert data['status'] == 301", "title": "" }, { "docid": "316f1406e1fa5d573d93404bf6ccdceb", "score": "0.6438793", "text": "def test_registration(self):\n with self.client:\n logging_out(self)\n response = self.client.post(\n '/register',\n data = dict(\n first_name=\"testfirst\",\n last_name=\"testsur\",\n email=\"test@test.com\",\n password=\"test123\",\n ),\n follow_redirects=True\n )\n self.assertIn(b'testfirst', response.data)\n self.assertIn(b'testsur', response.data)\n self.assertIn(b'test@test.com', response.data)", "title": "" }, { "docid": "d869531389656eaf360fd23429de052e", "score": "0.6437885", "text": "def test_register_user(self):\n self.success_user.register_user(self.user_list)\n response = User.get_users(self.user_list)\n self.assertTrue({'Username':'ptah', 'Email':'pndungu54@gmail.com', 'Password':'pass123'} in response)", "title": "" }, { "docid": "c84aac37235a0827aad0c212a48d5ca9", "score": "0.6432272", "text": "def test_register_invisible_if_logged_in(self):\n url = '/register/'\n self.client.login(username=self.user.username, password='password')\n self.assertIn('_auth_user_id', self.client.session)\n\n response = self.client.get(url)\n self.assertRedirects(response, '/', target_status_code=302)", "title": "" }, { "docid": "afa40dc20202ce17a1ca17d97cf47ab3", "score": "0.6428046", "text": "def test_register_form_user_exists(self):\n self.driver.get(\"http://localhost:5000/register\")\n # Interact with the Register form\n username = self.driver.find_element_by_id(\"username\")\n username.send_keys(\"NewUser\")\n password = self.driver.find_element_by_id(\"password\")\n password.send_keys(\"123\")\n password = self.driver.find_element_by_id(\"repeat-password\")\n password.send_keys(\"123\")\n # button field\n self.driver.find_element_by_tag_name('button').click()\n WebDriverWait(self.driver, 3).until(\n expected_conditions.text_to_be_present_in_element(\n (By.TAG_NAME, \"h4\"), \"username already in use\")\n )", "title": "" }, { "docid": "ff0422908f07948fe9438d859a369af7", "score": "0.6399364", "text": "def test_register(self):\n TEST_EMAIL = 'the.test.user@gmail.com'\n\n # Make sure the user doesn't already exist.\n with self.assertRaises(User.DoesNotExist):\n User.objects.get(username=TEST_EMAIL)\n\n # Make sure the page has the necessaries.\n get_resp = self.client.get('/')\n self.assertEqual(200, get_resp.status_code)\n self.assertContains(get_resp, \"email\")\n self.assertContains(get_resp, \"password\")\n self.assertContains(get_resp, \"phone\")\n self.assertContains(get_resp, \"<form\")\n\n # Make sure we can register...\n post_resp = self.client.post('/',\n {'email': TEST_EMAIL,\n 'password': 'yay!',\n 'phone_number': '9195555555'})\n self.assertEqual(302, post_resp.status_code,\n msg=\"Didn't get the status code we expected. \"\n \"Response:\\n\"+post_resp.content)\n # ... and that the DB reflects it.\n u = User.objects.get(username=TEST_EMAIL)\n self.assertGreater(u.profile.pk, 0)\n self.assertGreater(u.profile.contact.pk, 0)\n\n u.delete()", "title": "" }, { "docid": "98d21a340ca843813201a93ab9ebc2f1", "score": "0.63963217", "text": "def test_signup_duplicate_user_with_valid_data(self):\n self.superuser = User.objects.create_superuser(\n \"user1\", \"bisonlou@gmail.com\", \"Pa$$word123\"\n )\n data = {\"username\": \"user1\",\n \"password\": \"Pa$$word123\",\n \"email\": \"bisonlou@gmail.com\"\n }\n url = reverse(\"signup\")\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, 400)", "title": "" }, { "docid": "e09ae76cf63d368f24d72e81869531d9", "score": "0.6395266", "text": "def test_user_should_not_register_with_a_taken_email(self, mock_email):\n mock_email.return_value = True\n self.client.post(\n self.registration_url, self.new_user, format=\"json\")\n response = self.client.post(\n self.registration_url, self.new_user, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data[\"errors\"][\"email\"][0]),\n \"user with this email already exists.\")", "title": "" }, { "docid": "9a222e1968305b2d93804205c3bbe717", "score": "0.63945115", "text": "def test_add_user(self):\n new_user = User.objects.create(\n username='test2',\n email='test2@gmail.com',\n password='test208971'\n )\n users_count_after = User.objects.count()\n self.assertFalse(new_user.is_staff)\n self.assertEqual(self.users_count_before + 1, users_count_after)", "title": "" }, { "docid": "034da880ddeb6c444bf939eaf87ef338", "score": "0.6390786", "text": "def test_signup_correct_data(client):\n old_num_users = registered()\n response = client.post(\n '/api/v2/auth/signup', data=json.dumps(mock_reg[4]),\n content_type='application/json')\n new_num_users = registered()\n assert old_num_users + 1 == new_num_users\n assert response.get_json() == success_messages[0]['account_created']\n assert response.status_code == 201", "title": "" }, { "docid": "4e6a9f16c754f450b3b6b00a78a129d4", "score": "0.6388306", "text": "def test_user_registration(self):\n\n\t\treg = self.register_user()\n\t\tself.assertEqual(reg.status_code, 201)", "title": "" }, { "docid": "f3f48554d8c8497bed33017dab5a97b0", "score": "0.63749135", "text": "def test_bad_registrations(self):\n for em, pa, ph in (#('d@gmail.com', 'shortphone', '5555550'),\n ('@invalid.com', 'password', '9195555551'),\n ('not_an_email', 'password', '9195555552'),\n ('spacez bad@gmail.com', 'password', '9195555553'),\n ('', '<-- blank email == bad', '9195555554'),\n ('empty_pass@gmail.com', '', '9195555555')):\n resp = self.client.post('/',\n {'email': em,\n 'password': pa,\n 'phone_number': ph})\n self.assertEqual(200, resp.status_code)\n self.assertEqual(0, User.objects.count())", "title": "" }, { "docid": "bd67aed7e7c9ce73dedc6eeec0a34108", "score": "0.6374292", "text": "def test_register_with_existing_user_conflict(\n self, monkeypatch, client, mock_user: User, mock_user_dict: Dict\n ):\n # Given: Form data where username conflicts with existing mock_user\n # Given: mock login function and some user is registered\n monkeypatch.setattr(\"main.views.login\", mock_login := Mock())\n monkeypatch.setattr(\n NewUserForm, \"save\", mock_save_form := Mock(return_value=mock_user)\n )\n # When: POST user form data of a user that already exists\n response = client.post(reverse(\"register\"), data=mock_user_dict)\n # Then: User is not saved through the form, and login function not called\n mock_save_form.assert_not_called()\n mock_login.assert_not_called()\n assert TemplateNames.REGISTER.value in [t.name for t in response.templates]\n assert response.status_code == HTTPStatus.OK.value", "title": "" }, { "docid": "ae2542610fa249ce7643375beccfb409", "score": "0.63695323", "text": "def test_not_register(self):\n email = \"bonjour@oui.com\"\n resp = self.client.post(reverse(\"verify_token\"), data={\n \"user_email\": email,\n \"token\": \"256\"\n })\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data, [\"DO_NOT_EXIST\"])", "title": "" }, { "docid": "ca8e16394494a97a60b45cc378aba05e", "score": "0.6365737", "text": "def test_second_user(self):\n self.test_register_view()\n data = {\n \"email\": \"jeremy.trips@tamere.com\",\n \"password\": \"pdcdezgf4545freff\",\n \"password2\": \"pdcdezgf4545freff\",\n \"home_address\": \"Zaventem\",\n \"studies\": \"Ingé de ouf\",\n \"first_name\": \"jeremy\",\n \"last_name\": \"Trips\",\n \"noma\": \"14122\",\n \"student_card\": File(open(os.path.join(\"static\", \"no_img.png\"), \"rb\"))\n }\n data_second_user = data\n resp = self.client.post(reverse(\"create\"), data=data_second_user)\n self.assertEqual(resp.data[\"email\"][0], \"custom user with this email already exists.\") \n self.assertEqual(resp.data[\"email\"][0].code, \"unique\")\n self.assertEqual(resp.status_code, 206)", "title": "" }, { "docid": "c9ecd1b6fa921937e0671f60be812452", "score": "0.6365226", "text": "def test_add_new_user_False(self):\n new_user = add_new_user(\"test\", \"test\")\n self.assertFalse(new_user[0]) #because it ever exists", "title": "" }, { "docid": "0bc0376a74bfe353ad6a5a175e7201b4", "score": "0.6348678", "text": "def test_register(self):\n c = Client()\n\n responce = c.get('/register')\n self.assertEqual(responce.status_code, 200)\n\n usersCount = User.objects.count()\n responce = c.post('/register', {\n 'username': 'newUser',\n 'email': 'user@email.com',\n 'firstname': 'new',\n 'lastname': 'user',\n 'password': 'password',\n 'confirmation': 'password',\n })\n self.assertEqual(responce.status_code, 302)\n\n # user count should increase by 1\n newUsersCount = User.objects.count()\n self.assertEqual(usersCount + 1, newUsersCount)", "title": "" }, { "docid": "21c9225241629e25a04aca59f982667b", "score": "0.63452655", "text": "def test_duplicate_user_account_not_created(self):\n self.dataservice.create_account(\n 'john@doe.com', 'secret', 'John', 500)\n\n actual = len(self.dataservice.USERS)\n expected = 1\n self.assertEqual(actual, expected)", "title": "" }, { "docid": "6eb56af06f6cd870d755780b84a9a34e", "score": "0.63443875", "text": "def test_register_user():\n username = f'upvest_test_{uuid.uuid4()}'\n user = tenancy.users.create(username, fresh.password())\n assert user.username == username\n assert user.recovery_kit is not None", "title": "" }, { "docid": "06243b9eb0956df64bad343c6ad75778", "score": "0.6342137", "text": "def test_post_new_org_user_duplicate():\n res = requests.post(\n f'{env.AWG_BASE_URL}{ORG_URL}/mitre/user',\n headers=utils.BASE_HEADERS,\n json={'username': env.AWG_USER_NAME}\n )\n assert res.status_code == 400\n response_contains_json(\n res, 'message',\n f\"The user \\'{env.AWG_USER_NAME}\\' already exists.\")", "title": "" }, { "docid": "350cbc01b4df75f361be236a98dfa259", "score": "0.6338955", "text": "def test_add_user_duplicate_email(self):\n with self.client:\n self.client.post(\n '/users',\n data=json.dumps({\n 'password': 'mypass',\n 'email': 'generic_user@guser.org'\n }),\n content_type='application/json',\n )\n response = self.client.post(\n '/users',\n data=json.dumps({\n 'password': 'mypass',\n 'email': 'generic_user@guser.org'\n }),\n content_type='application/json',\n )\n data = json.loads(response.data.decode())\n self.assertIn(\n 'Sorry. That user already exists.', data['message'])\n self.assertIn('fail', data['status'])", "title": "" }, { "docid": "113564ecb2098b21f1631f1a3d2306af", "score": "0.6338445", "text": "def testregister():\n do_post(\"register\", 400) # No post data, fails\n\n # Check for legal username length\n username = ''\n password = \"legalButBad\"\n for i in range(22):\n usernameLen = len(username)\n if usernameLen < 3 or usernameLen > 20:\n do_register(username, password, 400)\n else:\n do_register(username, password, 204)\n username = username + \"a\"\n\n # Check character set. Note, can't start with 'aa', already used above\n ba = bytearray(\"abc\")\n for i in range(256):\n ba[2] = i\n username = str(ba)\n if username.isalnum():\n do_register(username, password, 204)\n else:\n do_register(username, password, 400)\n\n # Check for legal password length\n username = \"user3\"\n password = \"\"\n for i in range(9):\n if len(password) < 8:\n do_register(username, password, 400)\n else:\n do_register(username, password, 204)\n password = password + \"a\"\n\n # Check for success, followed by duplicate entry\n username = \"user4\"\n password = \"12345678\"\n do_register(username, password, 204)\n\n password = \"87654321\" # same user, different password, fail\n do_register(username, password, 400)", "title": "" }, { "docid": "78bf709d6624375e887c5f3225b98ccd", "score": "0.6335602", "text": "def test_user_authenticated_registration(self):\n self.test_user_registration()\n self.client.login(username = \"test\", email=\"new@test.com\",password = \"sdsdsd65465\")\n response = self.client.get(self.url)\n self.assertEqual(403, response.status_code)", "title": "" }, { "docid": "38774d7823f697b06edc530f10f93bd0", "score": "0.63355315", "text": "def test_user_already_exists(self):\n payload = {\n 'email': 'test1@test.com',\n 'password': 'testPass',\n 'name': 'Jimmychoo'\n }\n create_user(**payload)\n resp = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "132d015e56bda08ebd3b9368770d7293", "score": "0.6333072", "text": "def test_register_unequal_passwords(self):\n\n user_data = json.dumps({'email': 'test2@andela.com',\n 'username': 'testuser', 'first_name': 'first',\n 'last_name': 'last', 'password': 'anDela2014',\n 'confirm_password': 'anDela2018'})\n response = self.run_app.post('/api/v2/auth/register',\n data=user_data, headers=self.headers)\n json_res = json.loads(response.data.decode())\n self.assertEqual(json_res['message'],\n 'Password does not match the confirmation password!')", "title": "" }, { "docid": "18c39cd21c1b66e2648b96a1090ee372", "score": "0.6328516", "text": "def test_503_raised_if_user_doesnot_exists(self):\n response = self.client.post('/user/signin/',{'email': 'stanger@gmail.com',\n 'password': 'password'})\n self.assertEqual(response.status_code, 503) #503 is raised when user already existing", "title": "" }, { "docid": "fa1483bfab575c8c14345dd1f01a0548", "score": "0.6317088", "text": "def test_register_registered_name(self):\n\n first_user = json.dumps({\n 'email': 'test@andela.com', 'username': 'testuser',\n 'first_name': 'first', 'last_name': 'last',\n 'password': 'anDela2018', 'confirm_password': 'anDela2018'})\n self.run_app.post('/api/v2/auth/register',\n data=first_user, headers=self.headers)\n\n user_data = json.dumps({'email': 'test2@andela.com',\n 'username': 'testuser', 'first_name': 'first',\n 'last_name': 'last', 'password': 'anDela2018',\n 'confirm_password': 'anDela2018'})\n response = self.run_app.post('/api/v2/auth/register',\n data=user_data, headers=self.headers)\n\n json_res = json.loads(response.data.decode())\n self.assertEqual(json_res['message'], 'User already exists. Sign in!')", "title": "" }, { "docid": "33e848a98b27d714e6285a5fd07ecdac", "score": "0.63103926", "text": "def user_registration(firstname, lastname, email, password, repeated_password, rol):\n if password == repeated_password:\n name = firstname + ' ' + lastname\n input_password = hashlib.sha256(password.encode(\"utf8\"))\n hex_dig = input_password.hexdigest()\n row = [name, hex_dig, email, rol]\n query = conn.do_query('SELECT username FROM user WHERE email=\\\"' + email + '\\\";')\n if len(query) != 0:\n return False\n else:\n regist = conn.do_query(\n 'INSERT INTO user(username, password, email, rol) VALUES (\\\"' + '\\\",\\\"'.join(row) + '\\\");')\n conn.connection.commit()\n query = conn.do_query('SELECT username FROM user WHERE email=\\\"' + email + '\\\";')\n if len(query) == 0: # User not added\n return False\n else:\n return True\n else:\n return False", "title": "" } ]
3fb46fe5c6fa1ad110ecaa85dafd6cef
Returns the DulmageMendelsohn partition of the incidence graph of the provided variables and constraints.
[ { "docid": "b73a53526604bd10459f4662f4c284fc", "score": "0.7901976", "text": "def dulmage_mendelsohn(self, variables=None, constraints=None):\n variables, constraints = self._validate_input(variables, constraints)\n matrix = self._extract_submatrix(variables, constraints)\n\n row_partition, col_partition = dulmage_mendelsohn(matrix.tocoo())\n con_partition = RowPartition(\n *[[constraints[i] for i in subset] for subset in row_partition]\n )\n var_partition = ColPartition(\n *[[variables[i] for i in subset] for subset in col_partition]\n )\n # Switch the order of the maps here to match the method call.\n # Hopefully this does not get too confusing...\n return var_partition, con_partition", "title": "" } ]
[ { "docid": "657d43145eaa2baf31afc3fd98ae445c", "score": "0.57001877", "text": "def undiGraphToMarkovNetwork(graph,variables=dict(),domainSize=2):\r\n nodes=graph.nodes()\r\n cliques=getAllMaximalCliquesUndiGraph(graph)\r\n definedVariables=variables.keys()\r\n mn=gum.MarkovNet()\r\n for node in nodes:\r\n if node in definedVariables:\r\n mn.add(variables[node])\r\n else:\r\n mn.add(gum.LabelizedVariable(str(node),\"\",domainSize))\r\n for clique in cliques:\r\n mn.addFactor(clique)\r\n return mn", "title": "" }, { "docid": "b8f4807796c55cb26da5225a9e4b82b8", "score": "0.51007825", "text": "def partition(x):\n ## I had no need of using partition function since my id3 implementation is handling it with help of dataframes and mutual_information function", "title": "" }, { "docid": "2bac5af266fc81934a1b87b9b211d531", "score": "0.5070637", "text": "def get_incidence_graph(variables, constraints, include_fixed=True):\n _check_unindexed(variables+constraints)\n N, M = len(variables), len(constraints)\n graph = nx.Graph()\n graph.add_nodes_from(range(M), bipartite=0)\n graph.add_nodes_from(range(M, M+N), bipartite=1)\n var_node_map = ComponentMap((v, M+i) for i, v in enumerate(variables))\n for i, con in enumerate(constraints):\n for var in identify_variables(con.expr, include_fixed=include_fixed):\n if var in var_node_map:\n graph.add_edge(i, var_node_map[var])\n return graph", "title": "" }, { "docid": "96e7bcf05fb1b57c7f9fcf3f9987a180", "score": "0.50161827", "text": "def dictToMarkovNetwork(graph,variables=dict(),domainSize=2):\r\n for var1 in graph.keys():\r\n for var2 in graph[var1]:\r\n if var1 not in graph[var2]:\r\n raise ValueError('Can only convert an adjacency list into a markov network : as \"'\r\n +str(var2)+'\" is a neighbor of \"'+str(var1)+'\", then \"'+str(var1)+'\" must be a neighbor of \"'+str(var2)+'\".')\r\n nodes=graph.keys()\r\n cliques=getAllMaximalCliquesDict(graph)\r\n definedVariables=variables.keys()\r\n mn=gum.MarkovNet()\r\n for node in nodes:\r\n if node in definedVariables:\r\n mn.add(variables[node])\r\n else:\r\n mn.add(gum.LabelizedVariable(str(node),\"\",domainSize))\r\n for clique in cliques:\r\n mn.addFactor(clique)\r\n return mn", "title": "" }, { "docid": "5a79bdc8ea95c2943fef9b553e685c36", "score": "0.48071054", "text": "def test_partition(ishan: Entity, highest: int, dims: int) -> None:\n data = np.random.randint(\n low=-highest, high=highest, size=(dims, dims), dtype=np.int32\n )\n sept = SEPT(\n child=data,\n entity=ishan,\n min_vals=np.ones_like(data) * -100,\n max_vals=np.ones_like(data) * 100,\n )\n reference_tensor = REPT(rows=sept)\n\n reference_tensor.partition(kth=1)\n sept.partition(kth=1)\n assert reference_tensor.child == sept, \"Partition did not work as expected\"", "title": "" }, { "docid": "524d92a55e19eab96bf595a249ca2aa4", "score": "0.4778358", "text": "def _handle_partition_and_hanning(self, dy, dXn, dindices, Xindices):\n \n bd, ed = dindices\n bXn, eXn = Xindices\n \n #if any([x < 0 for x in [bd, ed, bXn, eXn]]):\n # ipdb.set_trace()\n \n hanning_for_y = self.hwy[bXn:eXn,:]\n \n pdy = dy[bd:ed] * hanning_for_y\n\n hanning_for_dXn = self.hwts[bXn:eXn,:]\n \n pdXn = dXn[bd:ed,:] * hanning_for_dXn\n \n return pdXn, pdy", "title": "" }, { "docid": "842de6cbb5d073312930673af3ceb22f", "score": "0.4742497", "text": "def inf_mat_partition(self, infmat):\n infmat_sum = self.inf_mat_colsum(infmat)\n inf_con = {}\n step_size = len(self.dC.ob_dict.keys())\n for i in enumerate(self.dC.ob_dict.keys()):\n inf_con[i[1]] = infmat_sum[i[0]::step_size]\n return inf_con", "title": "" }, { "docid": "23c81f3cd6a714542c281492edfd355a", "score": "0.47314295", "text": "def best_modularity_partition(graph, partition=None):\n dendo = generate_dendrogram(graph, partition)\n return partition_at_level(dendo, len(dendo) - 1)", "title": "" }, { "docid": "1f01a6f5da01d8fe36bbd7fa0ad1a6a1", "score": "0.47275567", "text": "def _kdeparts(self, input_obs, input_ins):\r\n [n, d] = input_obs.shape\r\n\r\n # Compute Kernel Bandwidth Matrix based on Silverman's Rule of Thumb\r\n silverman_factor = np.power(n * (d + 2.0) / 4.0, -1. / (d + 4))\r\n input_centered = input_obs - F.mean(input_obs, axis=0, keepdims=True)\r\n data_covariance = F.matmul(F.transpose(input_centered), input_centered) / n\r\n kernel_bw = F.diagonal(data_covariance) * (silverman_factor ** 2) * np.eye(d, d)\r\n const = 1 / (n * ((2 * np.pi) ** (d/2)) * F.sqrt(F.det(kernel_bw)))\r\n\r\n # Compute energy expressions in the exponent for every observation point\r\n diff = input_obs - input_ins\r\n energy = -0.5 * F.diagonal(F.matmul(F.matmul(diff, F.inv(kernel_bw)), F.transpose(diff)))\r\n\r\n return const, energy", "title": "" }, { "docid": "5450e01fd31ddeca0b319aa6b2629294", "score": "0.47153172", "text": "def partition_girvan_newman(graph, max_depth):\n graph_copy=graph.copy()\n temp=approximate_betweenness(graph_copy,max_depth)\n between=sorted(temp.items(),key=lambda x:(-x[1],x[0]))\n graphs=list()\n i=0\n while len(graphs)<=1:\n \n graph_copy.remove_edge(between[i][0][1],between[i][0][0])\n component=nx.connected_component_subgraphs(graph_copy)\n graphs=list(component)\n \n i=1+i\n return graphs\n ###TODO\n pass", "title": "" }, { "docid": "64415b44898d9c73b842286f9e6af5a9", "score": "0.47109407", "text": "def partition(seq, k):\n # Figure out chart for k = 1\n # For each index and each k, get the min maximum we can do\n # from that division, assuming we are starting from index i\n # DP table for values, DP table for dividers, prefix sums array\n\n # First number will have no previous numbers to add to it\n prefix_sums = [0]\n # For each index, the sum of that number plus all the previous numbers\n for i in xrange(len(seq)):\n prefix_sums.append(prefix_sums[-1] + seq[i])\n print \"Prefix: %r\" % prefix_sums\n\n # The min_max for each index and value of k \n min_maxes = [[None] * k for _ in xrange(len(seq))]\n print \"Min Maxes: %r\" % min_maxes\n # Divider positions (index of first item in partition) for each partition\n dividers = [[None] * k for _ in xrange(len(seq))]\n\n # Fill in column where k = 1, so min_maxes are all items in that subarray\n for i in xrange(len(seq)):\n min_maxes[i][0] = prefix_sums[i+1]\n print \"Min Maxes: %r\" % min_maxes\n # Fill in row 1 for each value of k when there is only one value in seq\n for j in xrange(k):\n min_maxes[0][j] = seq[0]\n print \"Min Maxes: %r\" % min_maxes\n\n for i in xrange(1, len(seq)):\n for j in xrange(1, k):\n min_maxes[i][j] = float(\"inf\")\n for x in xrange(i):\n cost = max(min_maxes[x][j-1], prefix_sums[i+1] - prefix_sums[x+1])\n if cost < min_maxes[i][j]:\n min_maxes[i][j] = cost\n dividers[i][j] = x\n\n # TODO: Reconstruct partition\n print \"Seq:\"\n print seq\n print \"Result:\"\n print min_maxes\n return min_maxes[-1][-1]", "title": "" }, { "docid": "6036783d88b77ea5ebc99c021565879d", "score": "0.46195918", "text": "def assign_design_dimension_projection(n_variables, vary_sol_density):\n if n_variables <= 2:\n print(\"fNo need to assign dimension projections as number of variables is already {n_variables}\")\n return None, None\n mask = np.random.permutation(n_variables-1) # Test againt matlab\n if vary_sol_density:\n diff = np.random.randint(n_variables)\n mask = mask[:diff] # Take the diff first elements\n else: \n half = int(np.ceil(n_variables))\n mask = mask[:half] # Take half first elements\n pi1 = np.zeros(n_variables)\n pi1[mask] = True\n pi2 = pi1\n pi2 = np.ones(n_variables)\n pi2[mask] = False\n return pi1, pi2", "title": "" }, { "docid": "a70c4942e6fb60ea2de6bc7103167dfd", "score": "0.46151423", "text": "def metis(adj_t: SparseTensor, num_parts: int, recursive: bool = False,\n log: bool = True) -> Tuple[Tensor, Tensor]:\n\n if log:\n t = time.perf_counter()\n print(f'Computing METIS partitioning with {num_parts} parts...',\n end=' ', flush=True)\n\n num_nodes = adj_t.size(0)\n\n if num_parts <= 1:\n perm, ptr = torch.arange(num_nodes), torch.tensor([0, num_nodes])\n else:\n rowptr, col, _ = adj_t.csr()\n cluster = partition_fn(rowptr, col, None, num_parts, recursive)\n cluster, perm = cluster.sort()\n ptr = torch.ops.torch_sparse.ind2ptr(cluster, num_parts)\n\n if log:\n print(f'Done! [{time.perf_counter() - t:.2f}s]')\n\n return perm, ptr", "title": "" }, { "docid": "9ac50b046e4eec1b513651cf13ddc3fa", "score": "0.46030438", "text": "def independent_set_ip(graph): \n\n G = graph\n\n #return nx.maximal_independent_set(G) \n \n varsDict = {}\n\n for i in G.nodes_iter(data=True):\n #create a vriable\n varName = i[1]['id']\n\n # print varName\n\n varsDict[varName] = pulp.LpVariable(varName, 0, 1, cat='Integer')\n \n prob = pulp.LpProblem(\"problem\", pulp.LpMaximize)\n\n\n #print pulp.varsDict\n\n for i in G.edges_iter(data=True): \n # print i\n sta = i[0]\n end = i[1] \n x = varsDict[sta]\n y = varsDict[end]\n\n prob += x + y <= 1\n\n\n # every variable x(v) is a node\n # x(v) is in (0,1)\n \n\n prob += sum(varsDict.values()) \n\n prob.solve()\n\n # w(1)*x(1) + w(2)*x(2) + ... / maximize sum over v (x(v))\n # y(v) is in (0,1)\n # a(1)*x(1) + a(2)*x(2) + ... <= b\n # for every edge (u,v), x(u) + x(v) <= 1\n # print 'TEST'\n # x(1), x(2), x(3)\n # expression x(1) + x(2) + x(3)\n #\n # 1-----2\n # |\n # 3\n # x(1) + x(2) <= 1\n # x(1) + x(3) <= 1\n # x1 = LpVariable(\"x1\", 0, 1, cat='Integer')\n # x2 = LpVariable(\"x2\", 0, 1, cat='Integer')\n # x3 = LpVariable(\"x3\", 0, 1, cat='Integer')\n # for k,v in varsDict.iteritems():\n # aTuple = (k,pulp.value(v))\n # listOfTuples.append(aTuple)\n listOfTuples = [(k, pulp.value(v)) for k,v in varsDict.iteritems()]\n\n # print len(nx.maximal_independent_set(G)), pulp.value(sum(varsDict.values()))\n\n return listOfTuples#list of tuples of nodes and the vlaue is going o tbe zero or 1", "title": "" }, { "docid": "519b8c7895b8cde2fc404f7581de9a71", "score": "0.46021736", "text": "def independent_set_lp(graph): \n # TODO: implement function\n G = graph\n\n #return nx.maximal_independent_set(G) \n \n varsDict = {}\n\n for i in G.nodes_iter(data=True):\n #create a vriable\n varName = i[1]['id']\n\n # print varName\n\n varsDict[varName] = pulp.LpVariable(varName, 0, 1)\n \n prob = pulp.LpProblem(\"problem\", pulp.LpMaximize)\n\n\n #print pulp.varsDict\n\n for i in G.edges_iter(data=True): \n # print i\n sta = i[0]\n end = i[1] \n x = varsDict[sta]\n y = varsDict[end]\n\n prob += x + y <= 1\n\n\n # every variable x(v) is a node\n # x(v) is in (0,1)\n \n\n prob += sum(varsDict.values()) \n\n prob.solve()\n\n # w(1)*x(1) + w(2)*x(2) + ... / maximize sum over v (x(v))\n # y(v) is in (0,1)\n # a(1)*x(1) + a(2)*x(2) + ... <= b\n # for every edge (u,v), x(u) + x(v) <= 1\n # print 'TEST'\n # x(1), x(2), x(3)\n # expression x(1) + x(2) + x(3)\n #\n # 1-----2\n # |\n # 3\n # x(1) + x(2) <= 1\n # x(1) + x(3) <= 1\n # x1 = LpVariable(\"x1\", 0, 1, cat='Integer')\n # x2 = LpVariable(\"x2\", 0, 1, cat='Integer')\n # x3 = LpVariable(\"x3\", 0, 1, cat='Integer')\n # for k,v in varsDict.iteritems():\n # aTuple = (k,pulp.value(v))\n # listOfTuples.append(aTuple)\n listOfTuples = [(k, pulp.value(v)) for k,v in varsDict.iteritems()]\n\n # print len(nx.maximal_independent_set(G)), pulp.value(sum(varsDict.values()))\n\n return listOfTuples#list of tuples of nodes and the vlaue is going o tbe zero or 1", "title": "" }, { "docid": "2c9c7a1906741af291a09a36360bdaa6", "score": "0.46008492", "text": "def partition(nn_state, visible_space):\n free_energies = -nn_state.rbm_am.effective_energy(visible_space)\n max_free_energy = free_energies.max()\n\n f_reduced = free_energies - max_free_energy\n logZ = max_free_energy + f_reduced.exp().sum().log()\n return logZ.exp()", "title": "" }, { "docid": "89e5dac6da0053541e4f0bb33b56c042", "score": "0.4598066", "text": "def _get_vars_to_partition(self):\n vars_to_partition = {}\n unpartitioned_vars = {}\n for node in self.node_config:\n partitioner = getattr(node, 'partitioner')\n if partitioner:\n reduction_destinations = []\n for part in node.part_config:\n synchronizer = getattr(part, part.WhichOneof('synchronizer'))\n if hasattr(synchronizer, 'reduction_destination'):\n reduction_destinations.append(synchronizer.reduction_destination)\n else:\n reduction_destinations.append('')\n vars_to_partition[node.var_name] = (partitioner, reduction_destinations)\n logging.info(\"Partitioning variable {} with configuration {}\".format(node.var_name, partitioner))\n else:\n grad, _, _ = self.graph_item.var_op_name_to_grad_info[get_op_name(node.var_name)]\n unpartitioned_vars[node.var_name] = grad\n return vars_to_partition, unpartitioned_vars", "title": "" }, { "docid": "9e7583f68b81cb20451f858dd831c65e", "score": "0.45363924", "text": "def hidden_matrix_partition(data_points: np.ndarray, clusters: np.ndarray, beta: float):\n hidden_matrix = np.exp(-(np.sum((data_points - clusters) ** 2, axis=2) ** 0.5 * beta))\n hidden_matrix = hidden_matrix / hidden_matrix.sum(axis=0)\n\n return hidden_matrix", "title": "" }, { "docid": "58bc3538924a1fe3e791441185ca1283", "score": "0.45310557", "text": "def min_var_split(partition, k, next_label):\n moments = partition.aggregate(\n np.zeros((3, k)),\n lambda x, row: x + np.array([np.ones(k), row[1], row[1] ** 2]),\n lambda x, y: x + y\n )\n means = moments[1] / moments[0]\n variances = moments[2] / moments[0] - means ** 2\n axis = np.argmax(variances)\n return mean_var_split(partition, axis, next_label, means[axis], variances[axis]), axis", "title": "" }, { "docid": "814aaf635049835e2519264ed1973d22", "score": "0.4524478", "text": "def make_L_no_diag(d_nodes_in_graph):\n d2 = d_nodes_in_graph ** 2\n n_vars = d_nodes_in_graph * (d_nodes_in_graph - 1)\n L = np.zeros(shape=(d2, n_vars))\n L_col = 0\n for L_row, (i, j) in enumerate(\n itertools.product(range(d_nodes_in_graph), repeat=2)\n ):\n if i == j:\n pass\n else:\n L[L_row, L_col] = 1\n L_col += 1\n return L", "title": "" }, { "docid": "4f52850d3bd361c9e2b3ca633f934c00", "score": "0.44976318", "text": "def domainsFromAssignment(assignment: Dict[Variable, Value], variables: Set[Variable]) -> Dict[Variable, Set[Value]]:\n domains = {v: v.startDomain for v in variables}\n for var, val in assignment.items():\n domains[var] = {val}\n return domains", "title": "" }, { "docid": "1188eb0a88ea2a07bc77259c007d2ee2", "score": "0.44882283", "text": "def partition(self,water_density) :\n from scipy.interpolate import interp1d\n f = interp1d(water_density[:,0],water_density[:,1],kind=\"cubic\")\n rho = f(self.z)\n rho = rho/rho.max()\n expav = np.exp(-self.av/self.kT) - rho\n expstd = np.abs(expav*(-self.std/self.kT))\n bndint,bndstd = self._trapz(self.z,expav,expstd)\n if bndint < 0 :\n expstd[expstd<0] = 0\n expav[expav<0] = 0\n bndint,bndstd = self._trapz(self.z,expav,expstd)\n# f = plt.figure(999)\n #f.gca().plot(water_density[:,0],water_density[:,1])\n# f.gca().plot(self.z,self.av)\n# f.gca().plot(self.z,rho,'--')\n# f.gca().plot(self.z,f2(self.z),'-*')\n# f.savefig(\"partition.png\",format=\"png\")\n return bndint,bndstd", "title": "" }, { "docid": "61acf219752ed0424a30f34a13230d7b", "score": "0.44871846", "text": "def computePartitionFunction(mn):\r\n P=getUnnormalizedProbabilityDistribution(mn)\r\n return P.sum()", "title": "" }, { "docid": "d5bc7468e6523103d2229faadf6f7c87", "score": "0.44846064", "text": "def get_numeric_incidence_matrix(variables, constraints):\n # NOTE: There are several ways to get a numeric incidence matrix\n # from a Pyomo model. Here we get the numeric incidence matrix by\n # creating a temporary block and using the PyNumero ASL interface.\n comps = list(variables) + list(constraints)\n _check_unindexed(comps)\n block = create_subsystem_block(constraints, variables)\n block._obj = Objective(expr=0)\n nlp = PyomoNLP(block)\n return nlp.extract_submatrix_jacobian(variables, constraints)", "title": "" }, { "docid": "d67ba1ff25d4810fa1a7b46682f63293", "score": "0.448442", "text": "def _partition_D(self):\n\n R1_indices = []\n R2_indices = []\n for i in range(12):\n if self.Releases[i] == False:\n R1_indices.append(i)\n else:\n R2_indices.append(i)\n \n return R1_indices, R2_indices", "title": "" }, { "docid": "bc5e8f220752d410c79c957232cf7ddf", "score": "0.44652802", "text": "def local_set_fixed(y, X, K, d, p2i, i2p):\n iX = list(map(p2i.__getitem__, X))\n iy = p2i[y]\n \n i_trunc = np.argsort(K[np.ix_([iy], iX)].flatten())[::-1][:d].tolist()\n iX_trunc = np.array(iX)[i_trunc]\n X_trunc = list(map(i2p.__getitem__, iX_trunc))\n #print('Length of local covariance set : d = ', len(X_trunc), \", largest = \", K[np.ix_([iy], [np.array(iX)[i_trunc[0]]])],\" smallest =\", K[np.ix_([iy], [np.array(iX)[i_trunc[-1]]])]) \n #pdb.set_trace()\n\n\n return X_trunc", "title": "" }, { "docid": "21955b5cba391879cd30e946bbb4beff", "score": "0.44581193", "text": "def shrink_point_metis(dataset='cora', n_part=2):\r\n # adj_matrix = np.load('processed/{}-processed.npz'.format(dataset))\r\n # print(adj_matrix)\r\n adjacency_list = trans_dataset(dataset)\r\n # n_cuts切割的边数, membership是一个数组, 长度为点总数, 数值为[0,n_part-1]\r\n n_cuts, membership = pymetis.part_graph(n_part, adjacency=adjacency_list)\r\n print(n_cuts)\r\n\r\n nodes_parts = []\r\n for i in range(n_part):\r\n nodes_parts.append(np.argwhere(np.array(membership) == i).ravel())\r\n print(nodes_parts, nodes_parts[0].shape, nodes_parts[1].shape)\r\n\r\n return node_parts", "title": "" }, { "docid": "95615eee25b0bdbdb1e525c17f90582e", "score": "0.44503754", "text": "def gn_graph_partition(g):\n ### Start with initial graph\n c = connected_components(g)\n q = compute_q(g, c)\n partitions = [(q, c)]\n\n ### Copy graph so we can partition it without destroying original\n newg = copy_graph(g)\n\n ### Iterate until there are no remaining edges in the graph\n while True:\n ### Compute betweenness on the current graph\n btwn = shortest_path_edge_betweenness(newg)\n if not btwn:\n ### No information was computed, we're done\n break\n\n ### Find all the edges with maximum betweenness and remove them\n maxbtwn = max(btwn.values())\n maxedges = [edge for edge, b in btwn.iteritems() if b == maxbtwn]\n remove_edges(newg, maxedges)\n\n ### Compute the new list of connected components\n c = connected_components(newg)\n if len(c) > len(partitions[-1][1]):\n ### This is a new partitioning, compute Q and add it to\n ### the list of partitions.\n q = compute_q(g, c)\n partitions.append((q, c))\n\n return partitions", "title": "" }, { "docid": "04acc155b6dea6d27a4b45057eed1f86", "score": "0.4446456", "text": "def _classify_variables(constraints, variables='x', nvars=None):\n if \">\" in constraints or \"<\" in constraints:\n raise NotImplementedError, \"cannot classify inequalities\" \n\n from mystic.symbolic import replace_variables, get_variables\n #XXX: use solve? or first if not in form xi = ... ?\n if list_or_tuple_or_ndarray(variables):\n if nvars is not None: variables = variables[:nvars]\n constraints = replace_variables(constraints, variables)\n varname = '$'\n ndim = len(variables)\n else:\n varname = variables # varname used below instead of variables\n myvar = get_variables(constraints, variables)\n if myvar: ndim = max([int(v.strip(varname)) for v in myvar]) + 1\n else: ndim = 0\n if nvars is not None: ndim = nvars\n\n eqns = constraints.splitlines()\n indices = range(ndim)\n dep = []\n indep = []\n for eqn in eqns: # find which variables are used\n if eqn:\n for var in range(ndim):\n if indices.count(var) != 0:\n if eqn.find(varname + str(var)) != -1:\n indep.append(var)\n indices.remove(var)\n indep.sort()\n _dep = []\n for eqn in eqns: # find which variables are on the LHS\n if eqn:\n split = eqn.split('=')\n for var in indep:\n if split[0].find(varname + str(var)) != -1:\n _dep.append(var)\n indep.remove(var)\n break\n _dep.sort()\n indep = _dep + indep # prefer variables found on LHS\n for eqn in eqns: # find one dependent variable per equation\n _dep = []\n _indep = indep[:]\n if eqn:\n for var in _indep:\n if eqn.find(varname + str(var)) != -1:\n _dep.append(var)\n _indep.remove(var)\n if _dep:\n dep.append(_dep[0])\n indep.remove(_dep[0])\n #FIXME: 'equivalent' equations not ignored (e.g. x2=x2; or x2=1, 2*x2=2)\n \"\"\"These are good:\n >>> constraints = '''\n ... x0 = x4**2\n ... x2 - x4 - x3 = 0.'''\n >>> _classify_variables(constraints, nvars=5)\n {'dependent': ['x0','x2'], 'independent': ['x3','x4'], 'unconstrained': ['x1']}\n >>> constraints = '''\n ... x0 + x2 = 0.\n ... x0 + 2*x2 = 0.'''\n >>> _classify_variables(constraints, nvars=5)\n {'dependent': ['x0','x2'], 'independent': [], 'unconstrained': ['x1','x3','x4']}\n\n This is a bug:\n >>> constraints = '''\n ... x0 + x2 = 0.\n ... 2*x0 + 2*x2 = 0.'''\n >>> _classify_variables(constraints, nvars=5)\n {'dependent': ['x0','x2'], 'independent': [], 'unconstrained': ['x1','x3','x4']}\n \"\"\" #XXX: should simplify first?\n dep.sort()\n indep.sort()\n # return the actual variable names (not the indices)\n if varname == variables: # then was single variable\n variables = [varname+str(i) for i in range(ndim)]\n dep = [variables[i] for i in dep]\n indep = [variables[i] for i in indep]\n indices = [variables[i] for i in indices]\n d = {'dependent':dep, 'independent':indep, 'unconstrained':indices}\n return d", "title": "" }, { "docid": "831568e1fda56d5f621f65868513ab3c", "score": "0.44395167", "text": "def hpdi(marginal, marginal_labels, alpha):\n # Calculate credible mass\n cred_mass = 1 - alpha\n \n # Sort marginal distribution\n # Sort marginal and extract indices\n sort_idx = np.argsort(marginal)\n # Sort marginal vector itself\n sort_marg = np.sort(marginal)[::-1]\n # Label Idxs as binary \n binary_idx = np.where(np.cumsum(sort_marg) <= cred_mass)\n\n # Label points that are part of highest posterior interval\n hpdi = marginal_labels[sort_idx[binary_idx]]\n \n # Calculate cutoff probability\n # i.e., above this probability, all points are part of hpdi\n hpdi_px = sort_marg[binary_idx].min()\n\n return hpdi_px, hpdi", "title": "" }, { "docid": "ebe2673b587b73f957c2ef0028810cb1", "score": "0.44350034", "text": "def Partitioner(q,Inv_Beta,Posterior,x):\n # TODO Make the posterior and x a single object that is used for distribution evaluations\n # TODO and has pairs of points and evaluations at those points, can add or remove points\n # TODO with methods and map them according to affines\n \n m = Inv_Beta.n #get the number of maps being used \n Q = np.zeros([m,x.num]) #initialise the partition functions\n \n for j in range(m):\n #backmap the points from the posterior to the intermediate\n y = x.map(Inv_Beta,j)\n #determine the current mixture using a change of variables\n Q[j,:] = q[j] * multivariate_normal.pdf(y.all,mean=np.zeros(x.d),cov=np.eye(x.d)) * np.linalg.det(Inv_Beta.A[j,:,:])\n \n #now we have the total mixture\n mix = np.sum(Q,axis=0)\n P = np.zeros([m,x.num])\n\n for j in range(m):\n #the partitioner can be found from these\n Q[j,:] /= mix\n #apply the partitioner to the posterior evaluations to get the partitioned components\n P[j,:] = Posterior * Q[j,:]\n \n return Q, P, mix", "title": "" }, { "docid": "3abd61a4702c20ba28ba0d0d9acbd2de", "score": "0.4429201", "text": "def partition(x):\n\n # INSERT YOUR CODE HERE\n xsubset = {}\n for i in range(len(x)):\n if xsubset.get(x[i]) != None:\n xsubset[x[i]].append(i)\n else:\n xsubset[x[i]] = [i]\n return xsubset", "title": "" }, { "docid": "032d50851d7f0905ae476b48c5ae3878", "score": "0.44232932", "text": "def get_diagonal_blocks(self, variables=None, constraints=None):\n variables, constraints = self._validate_input(variables, constraints)\n matrix = self._extract_submatrix(variables, constraints)\n\n if self.row_block_map is None or self.col_block_map is None:\n block_rows, block_cols = get_diagonal_blocks(matrix)\n else:\n block_rows, block_cols = get_blocks_from_maps(\n self.row_block_map, self.col_block_map\n )\n block_cons = [[constraints[i] for i in block] for block in block_rows]\n block_vars = [[variables[i] for i in block] for block in block_cols]\n return block_vars, block_cons", "title": "" }, { "docid": "45e979f6d0fa385508c832cb73b8a810", "score": "0.43934685", "text": "def arrayToDic(partitionArray):\n dic={}\n global inv\n \n #print \"inv\"\n #pprint(inv)\n \n for i in inv:\n \n #print str(i) \n #print \"inv \"+inv[i]\n #print \" -- \"+str( partitionArray[i])\n \n dic[inv[i]]=partitionArray[i]\n #pprint(dic)\n \n return dic", "title": "" }, { "docid": "f34f5842d9d668f91be39e2ecc2a71cd", "score": "0.4376909", "text": "def eliminates(self,var):\n if var.name() in self.var_names:\n q=UtilityTable()\n for i in range(self.nbrDim()):\n if self.variable(i)!=var:\n q.add(self.variable(i))\n if q.nbrDim()>0:\n q.marginalize(self)\n else:\n q=[self.sum()]\n return q\n else:\n return self", "title": "" }, { "docid": "7d9fd9dec33f73d77f3683d92e58535b", "score": "0.43691096", "text": "def getPartition(self,Ps=[1,1,1],delta=None):\n\n if delta == None:\n # Casting to list if it is a constant\n Ps = [Ps] if not isinstance(Ps,list) else Ps\n\n assert isinstance(Ps,list)\n assert 0 < len(Ps) <= 3\n\n if len(Ps) == 1:\n # If the length is one then replicate\n Ps = 3*Ps \n else: \n raise ValueError('Either assign one for all or all of them at once')\n\n # Unpack\n Px,Py,Pz = Ps\n else:\n # Casting to a list\n delta = [delta] if not isinstance(delta,list) else delta\n \n assert isinstance(delta,list)\n assert 0 < len(delta) <= 3\n\n if len(delta) == 1:\n # If the length is one then replicate\n delta = 3*delta \n else: \n raise ValueError('Either assign one for all or all of them at once')\n\n # Unpack\n delta_x,delta_y,delta_z = delta\n\n assert 0 < delta_x <= self.L and 0 < delta_y <= self.W and 0 < delta_z <= self.H\n\n Px = int(self.L/delta_x)\n Py = int(self.W/delta_y)\n Pz = int(self.H/delta_z)\n\n partitionedPlanes = []\n\n Ps = [[Px,Py],[Px,Py],[Py,Pz],[Py,Pz],[Px,Pz],[Px,Pz]]\n\n\n # The bottom face is the fourth face\n for idx in [0,1,2,4,5]: # iteration over each face\n partitionedPlanes.append(self.listPlanes[idx].getPartition(Ps=Ps[idx]))\n\n return list(itertools.chain.from_iterable(partitionedPlanes))", "title": "" }, { "docid": "742b4c6400d7e007c177a0639b6c3be8", "score": "0.435265", "text": "def _create_inter_partition_adjacency_groups(mesh, part_per_element,\n part_mesh_groups, all_neighbor_parts, nonlocal_adj_data, bdry_data,\n boundary_tag_bit):\n global_elem_to_neighbor_elem = _compute_global_elem_to_part_elem(\n part_per_element, all_neighbor_parts, mesh.element_id_dtype)\n\n inter_partition_adj_groups = []\n\n for i_part_grp in range(len(part_mesh_groups)):\n nl = nonlocal_adj_data[i_part_grp]\n bdry = bdry_data[i_part_grp]\n if nl is None and bdry is None:\n # Neither non-local adjacency nor boundary\n elements = np.array([], dtype=mesh.element_id_dtype)\n element_faces = np.array([], dtype=mesh.face_id_dtype)\n neighbor_parts = np.array([], dtype=np.int32)\n neighbors = np.array([], dtype=mesh.element_id_dtype)\n neighbor_elements = np.array([], dtype=mesh.element_id_dtype)\n neighbor_faces = np.array([], dtype=mesh.face_id_dtype)\n\n elif bdry is None:\n # Non-local adjacency only\n elements = nl.elements\n element_faces = nl.element_faces\n neighbor_parts = nl.neighbor_parts\n neighbors = np.empty_like(elements)\n for inonlocal in range(len(neighbors)):\n i_neighbor_part = neighbor_parts[inonlocal]\n from meshmode.mesh import BTAG_REALLY_ALL, BTAG_PARTITION\n neighbors[inonlocal] = -(\n boundary_tag_bit(BTAG_REALLY_ALL)\n | boundary_tag_bit(BTAG_PARTITION(i_neighbor_part)))\n neighbor_elements = global_elem_to_neighbor_elem[nl.global_neighbors]\n neighbor_faces = nl.neighbor_faces\n\n elif nl is None:\n # Boundary only\n nelems = len(bdry.elements)\n elements = bdry.elements\n element_faces = bdry.element_faces\n neighbor_parts = np.empty(nelems, dtype=np.int32)\n neighbor_parts.fill(-1)\n neighbors = bdry.neighbors\n neighbor_elements = np.empty(nelems, dtype=mesh.element_id_dtype)\n neighbor_elements.fill(-1)\n neighbor_faces = np.empty(nelems, dtype=mesh.face_id_dtype)\n neighbor_faces.fill(-1)\n\n else:\n # Both; need to merge together\n nnonlocal = len(nl.elements)\n nbdry = len(bdry.elements)\n nelems = nnonlocal + nbdry\n elements = np.empty(nelems, dtype=mesh.element_id_dtype)\n element_faces = np.empty(nelems, dtype=mesh.face_id_dtype)\n neighbor_parts = np.empty(nelems, dtype=np.int32)\n neighbors = np.empty(nelems, dtype=mesh.element_id_dtype)\n neighbor_elements = np.empty(nelems, dtype=mesh.element_id_dtype)\n neighbor_faces = np.empty(nelems, dtype=mesh.face_id_dtype)\n\n # Combine lists of elements/faces and sort to assist in merging\n combined_elements = np.concatenate((nl.elements, bdry.elements))\n combined_element_faces = np.concatenate((nl.element_faces,\n bdry.element_faces))\n perm = np.lexsort([combined_element_faces, combined_elements])\n\n # Merge non-local part\n nonlocal_indices = np.where(perm < nnonlocal)[0]\n elements[nonlocal_indices] = nl.elements\n element_faces[nonlocal_indices] = nl.element_faces\n neighbor_parts[nonlocal_indices] = nl.neighbor_parts\n for imerged in nonlocal_indices:\n i_neighbor_part = neighbor_parts[imerged]\n from meshmode.mesh import BTAG_REALLY_ALL, BTAG_PARTITION\n neighbors[imerged] = -(\n boundary_tag_bit(BTAG_REALLY_ALL)\n | boundary_tag_bit(BTAG_PARTITION(i_neighbor_part)))\n neighbor_elements[nonlocal_indices] = global_elem_to_neighbor_elem[\n nl.global_neighbors]\n neighbor_faces[nonlocal_indices] = nl.neighbor_faces\n\n # Merge boundary part\n bdry_indices = np.where(perm >= nnonlocal)[0]\n elements[bdry_indices] = bdry.elements\n element_faces[bdry_indices] = bdry.element_faces\n neighbors[bdry_indices] = bdry.neighbors\n neighbor_parts[bdry_indices] = -1\n neighbor_elements[bdry_indices] = -1\n neighbor_faces[bdry_indices] = -1\n\n from meshmode.mesh import InterPartitionAdjacencyGroup\n inter_partition_adj_groups.append(InterPartitionAdjacencyGroup(\n igroup=i_part_grp, ineighbor_group=None, elements=elements,\n element_faces=element_faces, neighbors=neighbors,\n neighbor_partitions=neighbor_parts,\n partition_neighbors=neighbor_elements,\n neighbor_faces=neighbor_faces))\n\n return inter_partition_adj_groups", "title": "" }, { "docid": "27f23d0877341a64a47f06eadeacaf51", "score": "0.43348268", "text": "def log_partitions(self) -> Tensor:\n assert self.mask is not None\n\n if self.proj:\n lengths = self.mask.long().sum(dim=1)\n crf = DependencyCRF(unconvert(self.scores), lengths - 1, multiroot=self.multiroot)\n return crf.partition\n\n return compute_log_partitions(self.scores, self.mask, self.multiroot)", "title": "" }, { "docid": "05cba6acc3f1a1831b517b11b0fe6f06", "score": "0.43167648", "text": "def GetDecomposition (fluid, Terms = 10, MinPanelWidth = 1):\n\t\n\tDecompPath = GetDecompPath()\n\t\n\timport DecompositionGroup as G\n\t\n\t# Try loading the decomposition\n\ttry:\n\t\tDecompGroup = G.DecompositionGroup(DecompPath, fluid, Terms, MinPanelWidth)\n\texcept Exception, (msg):\n\t\tfolder = G.DecompFolderRoot(DecompPath, fluid)\n\t\tprint \"Decomposition group at\\n\t\", folder, \"\\ncouldn't be loaded.\\n\"\n\t\tprint msg\n\n\t\tresponse = raw_input(\"Would you like to create it? (y/n)\")\n\t\n\t\tif response[0] == 'n' or response[0] == 'N':\n\t\t\traise Exception(\"Implicit simulation requires a decomposition group\")\n\n\t\tprint\n\t\tCreateDecomposition(fluid, Terms, MinPanelWidth)\n\t\tprint \"Done.\"\n\n\t\tDecompGroup = G.DecompositionGroup(DecompPath, fluid, Terms, MinPanelWidth)\n\n\treturn DecompGroup", "title": "" }, { "docid": "bced0eae15df2017a2379435326cbab9", "score": "0.430087", "text": "def prepare_to_add_variables(dataset):\n\n\tnum_samples = num_centres = len(dataset)\n\t_id = 0\n\n\t# P and C array contain the id of the corresponding variables\n\tP = np.zeros([num_samples, num_centres]).astype(int)\n\tC = np.zeros([num_samples, num_samples, num_centres]).astype(int)\n\tY = np.zeros([num_samples]).astype(int)\n\n\tprobability_variables = []\n\tfor _point in range(num_samples):\n\t\tfor _centre in range(num_centres):\n\n\t\t\tprobability_variables.append(\"P_{point}_{centre}\".format(\n\t\t\t\t\tpoint = _point,\n\t\t\t\t\tcentre = _centre\n\t\t\t\t))\n\t\t\t# Keep track of P_i_j's position in the lp variable vector \n\t\t\tP[_point][_centre] = _id\n\t\t\t_id += 1\n\n\tabs_constraint_variables = []\n\tfor _point1 in range(num_samples):\n\t\tfor _point2 in range(_point1 + 1, num_samples):\n\t\t\tfor _centre in range(num_centres):\n\n\t\t\t\tabs_constraint_variables.append(\"C_{point1}_{point2}_{centre}\".format(\n\t\t\t\t\t\tpoint1 = _point1,\n\t\t\t\t\t\tpoint2 = _point2,\n\t\t\t\t\t\tcentre = _centre\n\t\t\t\t\t))\n\t\t\t\t# Keep track of C_i_j_k's position in lp variable vector\n\t\t\t\tC[_point1][_point2][_centre] = C[_point2][_point1][_centre] = _id\n\t\t\t\t_id += 1\n\n\tcluster_frac_variables = []\n\tfor centre in range(num_centres):\n\n\t\tcluster_frac_variables.append(\"Y_{centre}\".format(\n\t\t\t\tcentre = centre,\n\t\t\t))\n\n\t\t# Keep track of Y_k's position in lp variable vector\n\t\tY[centre] = _id\n\t\t_id += 1\n\n\t# Concatenating the names of both the types of variables\n\tvariable_names = probability_variables + abs_constraint_variables + cluster_frac_variables\n\n\t# Setting lower bound = 0 and upper bound = 1 for all the variables\n\tnum_variables = len(variable_names)\n\tlower_bound = [0 for i in range(num_variables)]\n\tupper_bound = [1 for i in range(num_variables)]\n\n\t# Computing the coefficients for objective function\n\tobjective = cost_function(dataset, num_variables)\n\n\treturn objective, lower_bound, upper_bound, variable_names, P, C, Y", "title": "" }, { "docid": "015958019b4c71e7dbf89f110f81cbfb", "score": "0.43006065", "text": "def part_graph(graph, k, df=None):\n edgecuts, parts = metis.part_graph(graph, 2, objtype=\"cut\", ufactor=250, seed=42)\n # print(edgecuts)\n for i, p in enumerate(graph.nodes()):\n graph.node[p][\"cluster\"] = parts[i]\n if df is not None:\n df[\"cluster\"] = nx.get_node_attributes(graph, \"cluster\").values()\n return graph", "title": "" }, { "docid": "57bcec1b51c84cc0d041a5cb00eb78ce", "score": "0.42888075", "text": "def build_partition(self, clust):\n d = {i:set() for i in range(self.K)}\n for i in range(len(clust)):\n d[clust[i]].add(i)\n return (d)", "title": "" }, { "docid": "8ddd7ae7282c1a5db21026212ae9a8ee", "score": "0.42848808", "text": "def _get_dim_slice(self, i0, start, end, pre, mcdf=1):\n first = int(i0-pre)\n last = first + end-start-1\n dim, dm0, dm1 = self.get_dim_set(first, last)\n return slice(start, end), start, dim, dm0, dm1", "title": "" }, { "docid": "bd9f77f474f632f64b310e1cf6de5980", "score": "0.4274906", "text": "def _get_dim_slice(self, i0, start, end, pre, mcdf=1):\n first = int(i0-pre)\n dt = self.get_dt()\n trg = self._setting_trigger\n\n def dmx(i): return int(i*dt+trg)\n\n def dim(first, last):\n for i in range(first, last+1):\n yield dmx(i)\n return slice(start, end), first, dim, dmx", "title": "" }, { "docid": "0735704e9f255c55d174a893a98f7ffa", "score": "0.4272621", "text": "def compute_varphragmen(profile, committeesize):\n return rule_approval_ilp.compute_optphragmen_ilp(profile, committeesize, \"varphrag\")", "title": "" }, { "docid": "4ae49aa18c392044a615550e9a4d222d", "score": "0.4269403", "text": "def _build_partition_grid(dask_divisions, p):\n distance_grid = _build_distance_grid(p)\n search_divisions = np.array(\n list(dask_divisions[1:-1]))\n\n side_length = 2 ** p\n partition_grid = np.zeros((side_length, side_length), dtype=np.int64)\n for i in range(side_length):\n for j in range(side_length):\n partition_grid[i, j] = np.searchsorted(\n search_divisions,\n distance_grid[i, j],\n side='right')\n return partition_grid", "title": "" }, { "docid": "b8ba435c505ea08d6053d97ed8889f52", "score": "0.42617068", "text": "def modularity_finetune_und_sign(graph, partition=None, qtype='sta'):\n if (isinstance(graph, nx.Graph)):\n W = nx.to_numpy_matrix(graph)\n if (isinstance(graph, np.ndarray)):\n W = graph\n n = np.shape(W)[0]\n if partition is not None and isinstance(partition, dict):\n M = partition.values()\n else:\n M = np.asarray(range(0, n))\n\n W0 = np.multiply(W, (W > 0))\n W1 = -np.multiply(W, (W < 0))\n s0 = np.sum(W0[:])\n s1 = np.sum(W1[:])\n Knm0 = np.asmatrix(np.zeros([n, n]))\n Knm1 = np.asmatrix(np.zeros([n, n]))\n\n for m in range(0, np.max(M) + 1):\n Knm0[:, m] = np.sum(W0[:, M == m], 1)\n Knm1[:, m] = np.sum(W1[:, M == m], 1)\n\n Kn0 = np.asarray(Knm0.sum(1))\n Kn1 = np.asarray(Knm1.sum(1))\n # per qualche oscuro motivo di indici di np...\n Km0 = np.asarray(Knm0.sum(0))[0]\n # per qualche oscuro motivo di indici di np...\n Km1 = np.asarray(Knm1.sum(0))[0]\n\n d0, d1 = None, None\n if qtype is 'smp':\n d0, d1 = 1 / s0, 1 / s1\n elif qtype is 'gja':\n d0, d1 = 1 / (s0 + s1), 1 / (s0 + s1)\n elif qtype is 'sta':\n d0, d1 = 1 / s0, 1 / (s0 + s1)\n elif qtype is 'pos':\n d0, d1 = 1 / s0, 0\n elif qtype is 'neg':\n d0, d1 = 0, 1 / s1\n else:\n raise Exception('qtype unknown')\n\n if not s0: # adjust for absent positive weights\n s0, d0 = 1, 0\n if not s1: # adjust for absent negative weights\n s1, d1 = 1, 0\n\n f = 1\n while f:\n f = 0\n for u in np.random.permutation(n):\n ma = M[u]\n dQ0 = (Knm0[u, :] + W0[u, u] - Knm0[u, ma]) - \\\n np.multiply(Kn0[u], (Km0 + Kn0[u] - Km0[ma])) / s0\n dQ1 = (Knm1[u, :] + W1[u, u] - Knm1[u, ma]) - \\\n np.multiply(Kn1[u], (Km1 + Kn1[u] - Km1[ma])) / s1\n dQ = (d0 * dQ0 - d1 * dQ1).flat\n dQ[ma] = 0\n max_dQ, mb = np.max(dQ), np.argmax(dQ)\n if max_dQ > 1E-10:\n f = 1\n M[u] = mb\n Knm0[:, mb] += W0[:, u]\n Knm1[:, mb] += W1[:, u]\n Knm0[:, ma] -= W0[:, u]\n Knm1[:, ma] -= W1[:, u]\n Km0[mb] += Kn0[u]\n Km1[mb] += Kn1[u]\n Km0[ma] -= Kn0[u]\n Km1[ma] -= Kn1[u]\n\n communities = np.asarray(linear_remap_list(M))\n communities_dict = {}\n\n for i, n in enumerate(graph.nodes()):\n communities_dict[n] = communities[i]\n\n return communities_dict", "title": "" }, { "docid": "1412fe1b94558add49f9c096060c7e8f", "score": "0.42596105", "text": "def induced_graph(partition, graph):\n ret = nx.Graph()\n ret.add_nodes_from(partition.values())\n\n for node1, node2, datas in graph.edges_iter(data=True):\n weight = datas.get(\"weight\", 1)\n com1 = partition[node1]\n com2 = partition[node2]\n w_prec = ret.get_edge_data(com1, com2, {\"weight\": 0}).get(\"weight\", 1)\n ret.add_edge(com1, com2, weight=w_prec + weight)\n return ret", "title": "" }, { "docid": "5f478b034118dde9ab5f8914752c5bfa", "score": "0.42587575", "text": "def partitions(n, m = None):\n if m is None or m >= n: yield [n]\n for f in range(n-1 if (m is None or m >= n) else m, 0, -1):\n for p in partitions(n-f, f): yield [f] + p", "title": "" }, { "docid": "716b1b5e9062b6c65220231922a141e3", "score": "0.42557806", "text": "def _delsarte_LP_building(n, d, d_star, q, isinteger, solver, maxc = 0):\n from sage.numerical.mip import MixedIntegerLinearProgram\n\n p = MixedIntegerLinearProgram(maximization=True, solver=solver)\n A = p.new_variable(integer=isinteger, nonnegative=True)\n p.set_objective(sum([A[r] for r in range(n+1)]))\n p.add_constraint(A[0]==1)\n for i in range(1,d):\n p.add_constraint(A[i]==0)\n for j in range(1,n+1):\n rhs = sum([Krawtchouk(n,q,j,r,check=False)*A[r] for r in range(n+1)])\n p.add_constraint(0*A[0] <= rhs)\n if j >= d_star:\n p.add_constraint(0*A[0] <= rhs)\n else: # rhs is proportional to j-th weight of the dual code\n p.add_constraint(0*A[0] == rhs)\n\n if maxc > 0:\n p.add_constraint(sum([A[r] for r in range(n+1)]), max=maxc)\n return A, p", "title": "" }, { "docid": "dd697140d7b0eca09b31f66a06fab19e", "score": "0.4252098", "text": "def SoftRelationPartition(edges, n, has_importance=False, threshold=0.05):\n if has_importance:\n heads, rels, tails, e_impts = edges\n else:\n heads, rels, tails = edges\n print('relation partition {} edges into {} parts'.format(len(heads), n))\n uniq, cnts = np.unique(rels, return_counts=True)\n idx = np.flip(np.argsort(cnts))\n cnts = cnts[idx]\n uniq = uniq[idx]\n assert cnts[0] > cnts[-1]\n edge_cnts = np.zeros(shape=(n,), dtype=np.int64)\n rel_cnts = np.zeros(shape=(n,), dtype=np.int64)\n rel_dict = {}\n rel_parts = []\n cross_rel_part = []\n for _ in range(n):\n rel_parts.append([])\n\n large_threshold = int(len(rels) * threshold)\n capacity_per_partition = int(len(rels) / n)\n # ensure any relation larger than the partition capacity will be split\n large_threshold = capacity_per_partition if capacity_per_partition < large_threshold \\\n else large_threshold\n num_cross_part = 0\n for i in range(len(cnts)):\n cnt = cnts[i]\n r = uniq[i]\n r_parts = []\n if cnt > large_threshold:\n avg_part_cnt = (cnt // n) + 1\n num_cross_part += 1\n for j in range(n):\n part_cnt = avg_part_cnt if cnt > avg_part_cnt else cnt\n r_parts.append([j, part_cnt])\n rel_parts[j].append(r)\n edge_cnts[j] += part_cnt\n rel_cnts[j] += 1\n cnt -= part_cnt\n cross_rel_part.append(r)\n else:\n idx = np.argmin(edge_cnts)\n r_parts.append([idx, cnt])\n rel_parts[idx].append(r)\n edge_cnts[idx] += cnt\n rel_cnts[idx] += 1\n rel_dict[r] = r_parts\n\n for i, edge_cnt in enumerate(edge_cnts):\n print('part {} has {} edges and {} relations'.format(i, edge_cnt, rel_cnts[i]))\n print('{}/{} duplicated relation across partitions'.format(num_cross_part, len(cnts)))\n\n parts = []\n for i in range(n):\n parts.append([])\n rel_parts[i] = np.array(rel_parts[i])\n\n for i, r in enumerate(rels):\n r_part = rel_dict[r][0]\n part_idx = r_part[0]\n cnt = r_part[1]\n parts[part_idx].append(i)\n cnt -= 1\n if cnt == 0:\n rel_dict[r].pop(0)\n else:\n rel_dict[r][0][1] = cnt\n\n for i, part in enumerate(parts):\n parts[i] = np.array(part, dtype=np.int64)\n shuffle_idx = np.concatenate(parts)\n heads[:] = heads[shuffle_idx]\n rels[:] = rels[shuffle_idx]\n tails[:] = tails[shuffle_idx]\n if has_importance:\n e_impts[:] = e_impts[shuffle_idx]\n\n off = 0\n for i, part in enumerate(parts):\n parts[i] = np.arange(off, off + len(part))\n off += len(part)\n cross_rel_part = np.array(cross_rel_part)\n\n return parts, rel_parts, num_cross_part > 0, cross_rel_part", "title": "" }, { "docid": "e290aa710d9429fb83ebb366c6ddd122", "score": "0.42476836", "text": "def get_partition_covers(mu, nu):\n if not mu:\n return [{}]\n mu_row_one = mu[0]\n reduced_mu = mu.delete_first_row()\n row_partitions = [row_partition for row_partition in generate_partitions(mu_row_one) if nu.contains(row_partition)]\n return [dictionary for dictionary_list in \n [list_join({1: row_partition}, get_partition_covers(reduced_mu, nu.subtract(row_partition))) for \n row_partition in row_partitions] for dictionary in dictionary_list]", "title": "" }, { "docid": "c3582c8328b31275e064e24270280ef5", "score": "0.42406622", "text": "def create_effect_partitions(mask_partitions):\n effect_partitions = [] # result of partitioning by s'\n\n for mask_partition in mask_partitions: # cluster\n X = [s[2] for s in mask_partitions[mask_partition]] # reduce X from (s, r, s') to s'\n #TODO determine if the following line can be removed\n # mask = [True, True] + list(mask_partition[2:]) # force x && y to be factors when clustering\n mask = list(mask_partition)\n X = [list(compress(record, mask)) for record in X] # remove non-masked indices from s'\n\n\n n_clusters, labels = cluster(X)\n for i in range(n_clusters):\n X = np.array(mask_partitions[mask_partition]) # re-instantiate X as [(s,r,s'), ...]\n effect_partitions.append(X[labels == i])\n\n return effect_partitions", "title": "" }, { "docid": "9ef3ae4ab72a8957e6cd4431e0a682fb", "score": "0.4236583", "text": "def matr_subregions_division_2(var, n, partition, k, Renew,img_size):\n A = var.copy()\n # print('Initial Partition', partition)\n if partition is None:\n partition = []\n nn_up = np.ceil(img_size/n)\n for i in range(n):\n partition.append(i*nn_up)\n partition.append(img_size)\n\n elif Renew and ((n<290 and img_size==299) or img_size<200):\n partition_ = [0]\n for i in range(len(partition)-1):\n partition_.append(np.ceil((partition[i+1]+partition[i])/2))\n partition_.append(partition[i+1])\n partition = partition_\n\n if n>=img_size:\n partition = []\n nn_up = 1\n for i in range(n):\n partition.append(i*nn_up)\n partition.append(img_size)\n # check that the number of intervals is n\n if len(partition)!=n+1:\n print('----- WARNING: the partition is not exact')\n\n association = 0\n for k in range(3):\n for i in range(n):\n xi = partition[i]\n di = partition[i+1]-partition[i]\n for j in range(n):\n xj = partition[j]\n dj = partition[j+1]-partition[j]\n A = associate_block(A, xi, xj, k, di, dj, association)\n association += 1\n print(partition)\n return np.array([A]), partition", "title": "" }, { "docid": "31cbf3612e8932a5217a46d9989eaff6", "score": "0.42124474", "text": "def hasseDiagram(self):\n\n # Internal edges\n graph=[(i,cf.index) for i in range(len(self.cells)) \n for cf in self.cells[i].cofaces]\n\n # Attach external inputs to minimal elements\n graph += [(None,i) for i in range(len(self.cells)) \n if not self.faces(i)]\n\n # Attach external outputs to maximal elements\n graph += [(i,None) for i in range(len(self.cells)) \n if not self.cells[i].cofaces]\n\n return DirectedGraph(graph)", "title": "" }, { "docid": "4971e891535d187ae6b912afbd659617", "score": "0.42103374", "text": "def _partition(self, unp_matrix):\n\n # Create auxiliary lists of released/unreleased DOFs\n R1_indices, R2_indices = self._partition_D()\n\n # Partition the matrix by slicing\n if unp_matrix.shape[1] == 1:\n m1 = unp_matrix[R1_indices, :]\n m2 = unp_matrix[R2_indices, :]\n return m1, m2\n else:\n m11 = unp_matrix[R1_indices, :][:, R1_indices]\n m12 = unp_matrix[R1_indices, :][:, R2_indices]\n m21 = unp_matrix[R2_indices, :][:, R1_indices]\n m22 = unp_matrix[R2_indices, :][:, R2_indices]\n return m11, m12, m21, m22", "title": "" }, { "docid": "ff922e301976de11066b6e81f35f5e55", "score": "0.42102358", "text": "def get_HNF_diagonals(n):\n \n diags = []\n for i in range(1,n+1):\n if not n%i == 0:\n continue\n else:\n q = n//i\n for j in range(1,q+1):\n if not q%j == 0:\n continue\n else:\n diags.append([i,j,q//j])\n \n return diags", "title": "" }, { "docid": "27ae188db7cc7a26c52a9f3d273e5535", "score": "0.4204021", "text": "def write_partition_vtk(self):\n partition_body = np.zeros(self.Nx*self.Ny*self.Nz);\n pp = np.where(self.parts == self.rank)\n partition_body[pp] = 100 # set interior nodes for each partition\n # also signify boundary nodes and halo nodes for each rank\n partition_body[self.halo_nodes_g] = 200 # halo nodes have max value for partition\n partition_body[self.boundary_nodes_g] = 150 # boundary nodes --- intermediate to interior and halo\n vtk_filename = 'partition_map' + str(self.rank) + '.vtk'\n dims = [int(self.Nx), int(self.Ny), int(self.Nz)]\n origin = [0., 0., 0.]; \n spacing = [1., 1., 1.];\n writeVTKpt(partition_body,'partition',vtk_filename,dims,origin,spacing)", "title": "" }, { "docid": "6122ccfe1c9c756ceb8a3c0630b59d67", "score": "0.4203377", "text": "def _position_nodes(g, partition, **kwargs):\r\n\r\n communities = dict()\r\n for node, community in partition.items():\r\n try:\r\n communities[community] += [node]\r\n except KeyError:\r\n communities[community] = [node]\r\n\r\n pos = dict()\r\n for ci, nodes in communities.items():\r\n subgraph = g.subgraph(nodes)\r\n pos_subgraph = nx.spring_layout(subgraph, **kwargs)\r\n pos.update(pos_subgraph)\r\n\r\n return pos", "title": "" }, { "docid": "20ceafbab5224b3df6635acad9f50722", "score": "0.42030975", "text": "def partition(xgraph: XGraph, targets: List[str], last_layer: str=None) -> XGraph:\n\n target_registry.check_targets(targets)\n target_registry.annotate_ops(xgraph)\n\n p_xgraph = xgraph_partitioner.partition(xgraph, targets, last_layer)\n return p_xgraph", "title": "" }, { "docid": "efbfb307058f9c853998c770d7f0ca45", "score": "0.42002094", "text": "def project_graph(G, partition, min_weight, identity, edgenames, nodenames, graphname=None):\n \n nodeset = [n for n, d in G.nodes(data=True) if d[\"bipartite\"] == partition]\n print(\"Confirming partition type:\", type(nodeset[0]))\n S = bipartite.projected_graph(G, nodeset, multigraph=True)\n \n citation_dict = dict(S.degree(S.nodes()))\n \n weight_dict = Counter(S.edges())\n edge_weights = [ (u, v, {'weight': value}) \n for ((u, v), value) in weight_dict.items()]\n W = nx.Graph()\n W.add_nodes_from(S.nodes)\n W.add_edges_from(edge_weights)\n \n degree_dict = dict(W.degree(W.nodes()))\n nx.set_node_attributes(W, degree_dict, 'degree')\n nx.set_node_attributes(W, citation_dict, 'citations')\n \n to_remove = [(a,b) for a,b,attrs in W.edges(data=True) if attrs['weight'] < min_weight]\n remove_size = len(to_remove)\n \n print(f\"Removing {remove_size} edges of weight less than {min_weight}\")\n W.remove_edges_from(to_remove)\n \n isolates_list = list(nx.isolates(W))\n isolates_size = len(list(nx.isolates(W)))\n print(f\"Removing {isolates_size} isolated nodes\")\n W.remove_nodes_from(isolates_list)\n\n edge_df = nx.to_pandas_edgelist(W)\n save_edge_path = OUTPUTS_PATH + edgenames +'.csv'\n edge_df.to_csv(save_edge_path,index=False)\n \n nodelist = list(W.nodes(data=True))\n \n if G.name == \"S\":\n if identity == \"study\":\n node_df = pd.DataFrame(nodelist, columns=['STUDY', 'degree'])\n else:\n node_df = pd.DataFrame(nodelist, columns=['REF_DATA', 'degree'])\n save_node_path = OUTPUTS_PATH + nodenames +'.csv'\n node_df.to_csv(save_node_path,index=False)\n \n elif G.name == \"A\":\n node_df = pd.DataFrame(nodelist, columns=['AUTHOR_ID', 'degree'])\n save_node_path = OUTPUTS_PATH + nodenames +'.csv'\n node_df.to_csv(save_node_path,index=False)\n \n else:\n print(\"Node names not found\")\n \n W.name = graphname\n print(nx.info(W))\n return W", "title": "" }, { "docid": "4ff6c124fa4fd878695afafecc13ae21", "score": "0.41961607", "text": "def n_hierarchical_parameters(self, n_ids):\n n_ids = int(n_ids)\n\n return (0, n_ids * self._n_dim)", "title": "" }, { "docid": "ac92cc243ddcf521105e78f8eba8ebf9", "score": "0.41931883", "text": "def reduce_dimension():\n return ReduceDimension()", "title": "" }, { "docid": "588d465b18358509f79e3482d156db2e", "score": "0.4189588", "text": "def make_partitions(nitems, nparts):\n base, rem = divmod(nitems, nparts)\n sizes = base * nx.ones(nparts, nx.int32)\n sizes[:rem] += 1\n starts = nx.zeros(nparts, nx.int32)\n starts[1:] = nx.cumsum(sizes[:-1])\n return starts, sizes", "title": "" }, { "docid": "fbce0a62aa1ae9cf2ecf01d5e4b8fff4", "score": "0.41863427", "text": "def decode(self, hiddens):\n\n\n if self.act_dec is None:\n act_dec = lambda x: x\n else:\n act_dec = self.act_dec\n if isinstance(hiddens, tensor.Variable):\n X = act_dec(self.visbias + tensor.dot(hiddens, self.w_prime))\n else:\n X= [self.decode(v) for v in hiddens]\n\n g = T.dvector()\n for i in range(self.N):\n start = i * 21\n end = start + 20\n indexis = numpy.arange(start,end+1)\n if i==0:\n g = rescaled_softmax(theano.tensor.subtensor.take(X,indexis,axis=1,mode='raise'),min_val=1e-5)\n else:\n g =T.concatenate([g,rescaled_softmax(theano.tensor.subtensor.take(X,indexis,axis=1,mode='raise'),min_val=1e-5)],axis=1)\n\n return g", "title": "" }, { "docid": "1140d7605cb03e460275ddde7e551493", "score": "0.4180387", "text": "def select_unassigned_variable(self, assignment):\n # get list of unassigned variables\n variables = [i for i in self.domains if i not in assignment]\n # dict of heuristics for each variable\n h = {}\n for var in variables:\n domain = len(self.domains[var])\n degree = len(self.crossword.neighbors(var))\n h[var] = (domain, degree)\n # sort the variables according to the minimum remaining values in domain\n remaining = sorted(h, key=lambda x: h[x][0])\n # check for a tie between top two variables\n if len(remaining) > 1 and h[remaining[0]][0] == h[remaining[1]][0]:\n # if there is a tie return the var with the highest degree\n lowest = {remaining[0]: h[remaining[0]], remaining[1]: h[remaining[1]]}\n return sorted(lowest, key=lambda x: lowest[x][1])[-1]\n # return variable with smallest remaining values in domain\n return remaining[0]", "title": "" }, { "docid": "42899653c40c891c576df3b4232aed05", "score": "0.41772", "text": "def getDD():\n# DD fusion cross section\n DDCross=scipy.io.loadmat('/g/g19/holod1/solid_target/DDCross.mat')\n dat=DDCross.get('DDCrossSection')\n csE=dat[0][0][0][:,0];\n csS=dat[0][0][1][:,0];\n csE=np.append(0,csE);\n csS=np.append(0,csS);\n csE=csE.reshape((len(csE),1));\n csS=csS.reshape((len(csS),1));\n return(csE,csS)", "title": "" }, { "docid": "5e54e8e4013b4398b43674905ca0a89c", "score": "0.4173859", "text": "def _position_nodes(g, partition, **kwargs):\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos", "title": "" }, { "docid": "5e54e8e4013b4398b43674905ca0a89c", "score": "0.4173859", "text": "def _position_nodes(g, partition, **kwargs):\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos", "title": "" }, { "docid": "d339d6c40fc9b66aa07f9eedb3d8b838", "score": "0.41731587", "text": "def getDivDiag(n):\n a = np.ones(n)\n b = np.zeros(n)\n c = -np.ones(n)\n a[1] = 2\n b[0] = -2\n b[-1] = 2\n c[-2] = -2\n m = np.array([a / 2, b / 2, c / 2])\n return sparse.dia_matrix((m, (1, 0, -1)), shape=(n, n), dtype=\"float64\")", "title": "" }, { "docid": "be1ea7f271dd6431764cc4b551ad38aa", "score": "0.41691643", "text": "def dims_trivial(me):\n dims = me.dims\n bins = {x:set() for x in dims}\n for rows,vals in me._tabs.values():\n for row in rows:\n for x in dims:\n if x in row:\n bins[x].add(row[x])\n return {x:bins[x].pop() for x in bins if 1 == len(bins[x])}", "title": "" }, { "docid": "1184664ced3eac07729fb92038a45244", "score": "0.41668814", "text": "def make_prior_mvndiag(latent_size, dtype=tf.float32, name=None):\n with tf.name_scope(name, \"make_prior_mvndiag\", values=[]):\n sigma = tf.fill([latent_size],\n np.array(1., dtype=dtype.as_numpy_dtype))\n prior = tfd.MultivariateNormalDiag(scale_diag=sigma,\n name=\"prior_distribution\")\n return prior", "title": "" }, { "docid": "4ad88df4ff937ca929ecc28772118a5e", "score": "0.41387552", "text": "def _partition_residual(self):\n r_int = self.interior_residual\n r_facet = self.trace_residual\n\n par_loop(self._transfer_kernel.partition,\n ufl.dx,\n {\"x_int\": (r_int, WRITE),\n \"x_facet\": (r_facet, WRITE),\n \"x\": (self.h1_residual, READ)})", "title": "" }, { "docid": "15c433cbf06151418e32015cb38aecbc", "score": "0.41334024", "text": "def get_structural_incidence_matrix(variables, constraints, include_fixed=True):\n _check_unindexed(variables+constraints)\n N, M = len(variables), len(constraints)\n var_idx_map = ComponentMap((v, i) for i, v in enumerate(variables))\n rows = []\n cols = []\n for i, con in enumerate(constraints):\n cols.extend(var_idx_map[v] for v in\n identify_variables(con.expr, include_fixed=include_fixed)\n if v in var_idx_map)\n rows.extend([i]*(len(cols) - len(rows)))\n assert len(rows) == len(cols)\n data = [1.0]*len(rows)\n matrix = sp.sparse.coo_matrix( (data, (rows, cols)), shape=(M, N) )\n return matrix", "title": "" }, { "docid": "e8b952f2331c7fcdb3538ceda7f9af11", "score": "0.41262743", "text": "def _compute_neighborhood_graph(self, X):\n if self.truncation_size is not None:\n k = self.truncation_size\n else:\n k = X.shape[0]\n\n aff, ids = self._knn_search(X, k)\n return aff, ids", "title": "" }, { "docid": "840ef74c5e32ef9e314f27be237b1085", "score": "0.41250855", "text": "def partition(self):\n\n if hoomd.version.mpi_enabled:\n return self.cpp_mpi_conf.getPartition()\n else:\n return 0;", "title": "" }, { "docid": "9aab1cf63e86fcc13e4b6f43dbdc20c1", "score": "0.41245276", "text": "def constraints(self, decision_variables):\r\n pass", "title": "" }, { "docid": "d75f401beecf97690e10f59e721a1160", "score": "0.41096964", "text": "def make_prior_mvndiag(true_getter, base_var, name=None): # pylint: disable=unused-argument\n with tf.name_scope(name, \"make_prior_mvndiag\", [base_var]):\n sigma = tf.ones_like(base_var)\n return tfd.Independent(tfd.Normal(\n loc=np.float32(0.),\n scale=sigma,\n name=\"{}_prior\".format(name)),\n reinterpreted_batch_ndims=tf.rank(base_var))", "title": "" }, { "docid": "be991c97955d2a1e23564402e10b9088", "score": "0.41017497", "text": "def getVariableList(dataset):\n variables = [v for v in dataset.variables.keys() if v not in dataset.dimensions.keys()]\n for d in dataset.dimensions.keys():\n try:\n variables.pop(variables.index(dataset.variables[d].getncattr(\"bounds\")))\n except:\n pass\n return variables", "title": "" }, { "docid": "9a1e02df756cda2e130fe273c48612bc", "score": "0.4101072", "text": "def vandermonde_neighborhood(dx, dy, N):\n x = np.linspace(-N*dx, N*dx, 2*N + 1)\n y = np.linspace(-N*dy, N*dy, 2*N + 1)\n\n xx, yy = np.meshgrid(x,y)\n\n return vandermonde(vandermonde_bilinear_row, xx.flat, yy.flat)", "title": "" }, { "docid": "6232c5d586181f6e538b84775de5ee78", "score": "0.40928665", "text": "def PVdiag(self):\n\n pos,vel=posvel(self,pa=self.p['pa'],dyncen=self.p['dyncen'])\n return pos*self.scale()/1000", "title": "" }, { "docid": "7a0eecf0b61e9e4feec4584835684bd1", "score": "0.40925005", "text": "def MeshPartition(name, comm, mesh):\n\n size = comm.Get_size()\n rank = comm.Get_rank()\n \n if size == 1:\n # Assign global info to local directly.\n mesh.lclNCommNodes = 0\n \n mesh.lclNNodes = mesh.nNodes\n mesh.lclNodeIds = np.empty(mesh.lclNNodes, dtype=int)\n mesh.lclNSpecialHead = mesh.lclNBoundary = len(mesh.boundary)\n mesh.lclNodeIds[:mesh.lclNBoundary] = mesh.boundary\n lclNodeIds = np.arange(mesh.lclNNodes, dtype=int)\n mesh.lclNodeIds[mesh.lclNBoundary:] = lclNodeIds[~np.in1d(lclNodeIds, mesh.boundary)]\n\n sorter = np.argsort(mesh.lclNodeIds)\n mesh.lclElmNodeIds = sorter[np.searchsorted(mesh.lclNodeIds, mesh.elementNodeIds, sorter=sorter)]\n\n mesh.lclBoundary = np.where(np.in1d(mesh.lclNodeIds, mesh.boundary))[0]\n return 0\n\n\n if os.path.exists('{}.npz'.format(name)):\n data = np.load('{}.npz'.format(name), allow_pickle=True)\n\n # assignment\n mesh.nElements = data['nElms']\n mesh.elements = data['elms']\n mesh.elementsIds = data['elmIds']\n mesh.elementNodeIds = data['elmNodeIds']\n mesh.totalCommNodeIds = data['totalCommNodeIds']\n mesh.commNodeIds = data['commNodeIds']\n mesh.partition = data['partition']\n\n CalcLocalInfo(size, mesh)\n\n return 0\n\n nElms = mesh.nElements\n elms = mesh.elements\n\n # Prepare the parameters used to call partMesh.\n # elmdist: contains the elements distributed btw procs initially.\n nAvg = floor(nElms / size)\n nHProcs = nElms - nAvg * size # Number of procs contains one more elm.\n elmdist = np.append((nAvg+1) * np.arange(nHProcs+1),\n ((nAvg+1)*nHProcs) + nAvg * np.arange(1, size-nHProcs+1))\n elmdist = elmdist.astype(np.int64)\n # eptr: contains the head and end pointer to eind of each element.\n # eind: contains the initial element's node ids in each proc.\n ihead = elmdist[rank]\n itail = elmdist[rank+1]\n eptr = np.zeros(itail-ihead+1, dtype=np.int64)\n eind = np.empty(0, dtype=np.int64)\n for index, element in enumerate(elms[ihead:itail]):\n eptr[index+1] = eptr[index] + element.nNodes\n # For effeciency, this need to be changed to allocate space first, then assign in another loop\n eind = np.append(eind, element.nodes)\n\n # Prepare other parameters.\n tpwgts = np.ones(size) * (1.0/size)\n ubvec = np.array([1.05])\n options = np.array([0, 0, 0])\n\n # print('rank {} elmdist {} tpwgts {}'.format(self.rank, elmdist, tpwgts))\n\n # Call ParMETIS parttition mesh.\n (res, edgecut, part) = PyParMETIS_V3_PartMeshKway(\n elmdist, eptr, eind,\n ubvec=ubvec, tpwgts=tpwgts, options=options,\n wgtflag=0, numflag=0,\n ncon=1, nparts=size, ncommonnodes=2, # TODO:: Decide according to specific geometry!\n comm=comm)\n\n if res != 1: # TODO:: Connect with METIS_OK constant.\n print('Calling ParMETIS_PartMeshKway failed!')\n return -1\n\n # DEBUG:\n # print('rank {} has part result {}\\n'.format(self.rank, part))\n\n # Processor send the partition result to root,\n # then receive the elements belong to it from root.\n # Root proc receives all elements from each processor\n # and redistribute them according to the partitioning result.\n partids = np.arange(ihead, itail, dtype=np.int64)\n # Elements current processor owns.\n myElmsSize = int(nElms / size * 1.2)\n myElms = np.empty(myElmsSize, dtype=np.int64)\n\n if rank == 0:\n # Remember the whole partition result.\n mesh.partition = np.empty(nElms, dtype=np.int64)\n\n # Allocate the memory to store the unprocessed elms.\n recvElmsBuf = np.empty(nElms, dtype=np.int64)\n recvElmIdsBuf = np.empty(nElms, dtype=np.int64)\n recvElmsCounter = 0\n\n # Copy the root's partition result into mem first.\n partLength = len(part)\n recvElmsBuf[:partLength] = part\n recvElmIdsBuf[:partLength] = partids\n recvElmsCounter += partLength\n\n # Receive all 'other' elements from each processor.\n recvInfo = MPI.Status()\n for i in range(1, size):\n comm.Recv(recvElmsBuf[recvElmsCounter:], i, TAG_ELM, recvInfo) # MPI.ANY_SOURCE\n recvLen = recvInfo.Get_count(MPI.INT64_T)\n # recvSource = recvInfo.Get_source()\n\n comm.Recv(recvElmIdsBuf[recvElmsCounter:], i, TAG_ELMID, recvInfo) # recvSource\n recvElmsCounter += recvLen\n\n # print('root node collect {} elms, percentage {}.\\n'.format(recvElmsCounter, float(recvElmsCounter)/mesh.nElements))\n\n # Root starts to process the collected data and split it to corresponding process.\n # For root node, pick up directly.\n elmsFlag = (recvElmsBuf == 0)\n elmsCounter = np.sum(elmsFlag)\n\n if elmsCounter > myElmsSize:\n addonSize = elmsCounter - myElmsSize\n myElms = np.append(myElms, np.empty(addonSize, dtype=np.int64))\n print('rank {} myElms has been extended.\\n'.format(rank))\n\n myElms[:elmsCounter] = recvElmIdsBuf[elmsFlag] # This is what will be used finilly!\n # Remember the partition result.\n mesh.partition[recvElmIdsBuf[elmsFlag]] = 0\n\n for i in range(1, size):\n # Find the corresponding range of elms.\n pelmsFlag = (recvElmsBuf == i)\n\n # Start to send the elms to corresponding process.\n comm.Send(recvElmIdsBuf[pelmsFlag], dest=i, tag=TAG_ELMID)\n # Remeber the partition result.\n mesh.partition[recvElmIdsBuf[pelmsFlag]] = i\n else:\n # Other procs send the 'other' elements to root\n # and receive the ones belonging to itself from the root.\n comm.Send(part, dest=0, tag=TAG_ELM)\n comm.Send(partids, dest=0, tag=TAG_ELMID)\n\n # Receive the second part the elms that belong to the processor.\n recvInfo = MPI.Status()\n comm.Recv(myElms, 0, TAG_ELMID, recvInfo)\n elmsCounter = recvInfo.Get_count(MPI.INT64_T)\n\n comm.Barrier()\n\n myElms = myElms[:elmsCounter]\n\n # Update the mesh into sub-mesh in each processor,\n # notice that the [mesh] var acctually points to mesh in self.meshes.\n mesh.nElements = elmsCounter\n mesh.elements = mesh.elements[myElms]\n mesh.elementsIds = myElms\n mesh.elementNodeIds = np.array([elm.nodes for elm in mesh.elements])\n\n # !!! After the distribution/partitioning no processor has all elements in the whole mesh again.\n\n\n # Collect the common nodes between processors,\n # root collect nodes numbers from each processor and count, the node which counts more\n # than one will be set to the common and broadcast to all processors.\n # And each processor will recognize the common nodes it has according to the info it received.\n # Collect local nodes' numbers.\n\n myNodes = np.sort(np.unique(mesh.elementNodeIds.ravel()))\n # Start to send and recv to filter the common nodes.\n if rank == 0:\n # Prepare the counter vector.\n nodesCounter = np.zeros(mesh.nNodes, dtype=int)\n # Start to count.\n nodesCounter[myNodes] += 1\n # Receive and count.\n nodesBuffer = np.empty(mesh.nNodes, dtype=np.int64)\n nodesInfo = MPI.Status()\n for i in range(1, size):\n comm.Recv(nodesBuffer, MPI.ANY_SOURCE, TAG_NODE_INFO, nodesInfo)\n nodesCounter[nodesBuffer[:nodesInfo.Get_count(MPI.INT64_T)]] += 1\n # Filter out the common nodes.\n commonNodes = np.where(nodesCounter > 1)[0]\n nCommon = len(commonNodes)\n else:\n comm.Send(myNodes, 0, TAG_NODE_INFO)\n nCommon = None\n\n # Broadcast the common nodes to everyone.\n nCommon = comm.bcast(nCommon, root=0)\n if rank != 0:\n commonNodes = np.empty(nCommon, dtype=np.int64)\n comm.Bcast(commonNodes, root=0)\n\n # Recognize the common nodes I contain.\n # mesh.commNodeIds = np.array(list(set(commonNodes).intersection(myNodes)))\n mesh.totalCommNodeIds = commonNodes\n mesh.commNodeIds = np.intersect1d(commonNodes, myNodes)\n\n CalcLocalInfo(size, mesh)\n\n # Save the partition results into local files and read from files if existing.\n np.savez(name, nElms=mesh.nElements, elms=mesh.elements,\n elmIds=mesh.elementsIds, elmNodeIds=mesh.elementNodeIds,\n totalCommNodeIds=mesh.totalCommNodeIds, commNodeIds=mesh.commNodeIds,\n partition=mesh.partition)\n\n return 0", "title": "" }, { "docid": "4ed5c68fe3f47e467ba271e172515b60", "score": "0.40825534", "text": "def split_lgm_graph(dgraph):\n \n edge_type = nx.get_edge_attributes(dgraph, 'edge_type')\n labour_edges = [key for key in edge_type if edge_type[key]==0] # labour links\n goods_edges = [key for key in edge_type if edge_type[key]==1] # goods links\n \n dgraph_labour = dgraph.copy()\n dgraph_labour.remove_edges_from(dgraph.edges())\n dgraph_labour.add_edges_from(labour_edges, edge_type=0) \n\n dgraph_goods = dgraph.copy()\n dgraph_goods.remove_edges_from(dgraph.edges())\n dgraph_goods.add_edges_from(goods_edges, edge_type=1) \n\n return dgraph_labour, dgraph_goods", "title": "" }, { "docid": "30c697dfd3a895d3abd5742f2e0a2657", "score": "0.40821412", "text": "def get_on_diagonal_densities(block_ms, block_ns):\n ps = []\n for l in range(len(block_ns)):\n n = block_ns[l]\n m = block_ms[l,l]\n if n == 0 or n == 1:\n ps.append(0)\n else:\n ps.append(2 * m / (n * (n - 1)))\n return ps", "title": "" }, { "docid": "0afd96f23ca2838e537551ce71b4e742", "score": "0.40768495", "text": "def compute_diagram(x, homo_dim=1):\n rips_tree = gudhi.RipsComplex(x).create_simplex_tree(max_dimension=homo_dim)\n rips_diag = rips_tree.persistence()\n return [rips_tree.persistence_intervals_in_dimension(w) for w in range(homo_dim)]", "title": "" }, { "docid": "9bb9f24d01a7ca2cd6e6850d6fddb399", "score": "0.4074237", "text": "def density_mask(self, dens_grid, mindens=1e3):\n mask = dens_grid >= mindens * sc.m_p * 2.\n mask = mask.ravel()\n return [i for i in range(len(mask)) if not mask[i]]", "title": "" }, { "docid": "4af91c162e230237c401ec534139675b", "score": "0.40613353", "text": "def community_layout(g, partition):\n\n pos_communities = _position_communities(g, partition, scale=3.)\n\n pos_nodes = _position_nodes(g, partition, scale=1.)\n\n # combine positions\n pos = dict()\n for node in g.nodes():\n pos[node] = pos_communities[node] + pos_nodes[node]\n\n return pos", "title": "" }, { "docid": "4af91c162e230237c401ec534139675b", "score": "0.40613353", "text": "def community_layout(g, partition):\n\n pos_communities = _position_communities(g, partition, scale=3.)\n\n pos_nodes = _position_nodes(g, partition, scale=1.)\n\n # combine positions\n pos = dict()\n for node in g.nodes():\n pos[node] = pos_communities[node] + pos_nodes[node]\n\n return pos", "title": "" }, { "docid": "995d749af9b5ead5421bc097814dde32", "score": "0.4054922", "text": "def ddx2(self):\r\n\r\n\t\tif self.num_compart <=1:\r\n\t\t\treturn [0]\r\n \r\n\t\tG = np.zeros((self.num_compart, self.num_compart))\r\n\t\tfor i in range(self.num_compart):\r\n\t\t\tif i > 0:\r\n\t\t\t\tG[i][i-1] = 1\r\n\t\t\tG[i][i] = -2\r\n\t\t\tif i < self.num_compart - 1:\r\n\t\t\t\tG[i][i+1] = 1\r\n\r\n\t\tcs = [0, *(map(int, np.cumsum(self.tree.es[\"leng\"])/self.dx))]\r\n\r\n\t\tdef side(e, vid):\r\n\t\t\t\"\"\"Given a vertex e, assigns a compartment to split at according to convention\r\n\t\t\t\r\n\t\t\tArgs:\r\n\t\t\t e (iPython vertex objext): Vertex where split is happening\r\n\t\t\t vid (int): Vertex ID\r\n\t\t\t\r\n\t\t\tReturns:\r\n\t\t\t int: Compartment to split at\r\n\t\t\t\"\"\"\r\n\t\t\tif vid == e.tuple[0]:\r\n\t\t\t\treturn cs[e.index]\r\n\t\t\telif vid == e.tuple[1]:\r\n\t\t\t\treturn cs[e.index] + int(self.tree.es[\"leng\"][e.index]/self.dx) - 1\r\n\r\n\t\tdef connect(a, b):\r\n\t\t\t\"\"\"Creates a connection between two compartments\r\n\t\t\t\r\n\t\t\tArgs:\r\n\t\t\t a (int): First compartment\r\n\t\t\t b (int): Second compartment\r\n\t\t\t\"\"\"\r\n\t\t\tG[a][b] = 1\r\n\t\t\tG[b][a] = 1\r\n\r\n\t\tdef remove(i, mode):\r\n\t\t\t\"\"\"Removes all connections into or out of a given compartment\r\n\t\t\t\r\n\t\t\tArgs:\r\n\t\t\t i (int): Compartment index\r\n\t\t\t mode (str): Specify whether inward or outward connections are removed\r\n\t\t\t\r\n\t\t\t\"\"\"\r\n\t\t\tif i < 0 or i > len(G) - 1:\r\n\t\t\t\treturn\r\n\r\n\t\t\tif mode == \"out\":\r\n\t\t\t\tif i + 1 < len(G):\r\n\t\t\t\t\tG[i][i+1] = 0\r\n\t\t\telif mode == \"in\":\r\n\t\t\t\tif i - 1 >= 0:\r\n\t\t\t\t\tG[i][i-1] = 0\r\n\r\n\t\tfor x in cs:\r\n\t\t\tremove(x - 1, \"out\")\r\n\t\t\tremove(x, \"in\")\r\n\r\n\t\tfor v in self.tree.vs:\r\n\t\t\tel = v.all_edges()\r\n\r\n\t\t\tfirst = side(el[0], v.index)\r\n\t\t\tG[first][first] = -v.degree()\r\n\r\n\t\t\tif len(el) > 1:\r\n\t\t\t\tfor e in el[1:]:\r\n\t\t\t\t\tconnect(first, side(e, v.index))\r\n\r\n\t\tself.G = G", "title": "" }, { "docid": "e3532a46f3518eccaa209ea672504ab2", "score": "0.40511256", "text": "def IntegrateHessian(self, variables):\n \n #NOTE: a simpler way to do this would be to just invert the matrix, remove the relevant rows/columns,\n #then invert again. I tested this, and it gives the same answer.\n if self.jtj_log == None:\n self.j_log, self.jtj_log = self.model.GetJandJtJInLogParameters(np.log(self.params))\n n_deleted = 0\n variables_idx = []\n jtj2_log = self.jtj_log.copy()\n means_log = []\n var_dict = {}\n new_idx = 0\n \n for item in variables:\n variables_idx.append(self.model.params.index_by_key(item))\n\n for var0 in range(np.size(self.jtj_log,0)):\n if var0 not in variables_idx:\n var = var0 - n_deleted\n a_log = jtj2_log[var,var]\n b_log = jtj2_log[:,var]\n b2_log = np.meshgrid(b_log,b_log)\n b2_log = b2_log[0]*b2_log[0].T\n jtj2_log = jtj2_log - b2_log/a_log\n jtj2_log = np.delete(jtj2_log, var, axis = 0)\n jtj2_log = np.delete(jtj2_log, var, axis = 1)\n \n n_deleted += 1\n else:\n name, value = self.model.params.items()[var0]\n means_log.append(np.log(value))\n var_dict[name] = new_idx\n new_idx += 1\n \n return jtj2_log, means_log, var_dict", "title": "" }, { "docid": "e2cb0be32e523a5e20678079da979948", "score": "0.4050201", "text": "def Lesions2SegMNI152(\n necrosis_mni_path, edema_mni_path, enhancing_tumor_path, subject_id\n):\n # import pdb; pdb.set_trace()\n # assert (\n # SubjectID(necrosis_mni_path)\n # == SubjectID(edema_mni_path)\n # == SubjectID(enhancing_tumor_path)\n # == subject_id\n # ), \"Subject Mismatch!!!\"\n mni152_path = os.path.join(\n necrosis_mni_path[: FindOneElement(necrosis_mni_path, \"/\")[-2]], \"MNI152\"\n )\n necrosis_mni_img = sitk.ReadImage(necrosis_mni_path)\n necrosis_mask_nda = sitk.GetArrayFromImage(necrosis_mni_img)\n edema_mask_nda = ReadImage(edema_mni_path)\n enhancing_tumor_mask_nda = ReadImage(enhancing_tumor_path)\n\n # seg in MNI 152 space\n seg_mni = np.zeros(\n (\n 5,\n necrosis_mask_nda.shape[0],\n necrosis_mask_nda.shape[1],\n necrosis_mask_nda.shape[2],\n ),\n dtype=necrosis_mask_nda.dtype,\n )\n seg_mni[1, :] = necrosis_mask_nda\n seg_mni[2, :] = edema_mask_nda\n seg_mni[4, :] = enhancing_tumor_mask_nda\n seg_mask_mni = np.argmax(seg_mni, axis=0).astype(np.int16)\n seg_name = os.path.join(mni152_path, subject_id + \"_seg_MNI152_1mm.nii.gz\")\n print(\"Working on %s\" % seg_name)\n seg_mask_mni_img = sitk.GetImageFromArray(seg_mask_mni)\n seg_mask_mni_img.CopyInformation(necrosis_mni_img)\n sitk.WriteImage(seg_mask_mni_img, seg_name)\n\n necrosis_mni_nda = np.zeros(seg_mask_mni.shape, seg_mask_mni.dtype)\n edema_mni_nda = np.zeros(seg_mask_mni.shape, seg_mask_mni.dtype)\n enhancing_mni_nda = np.zeros(seg_mask_mni.shape, seg_mask_mni.dtype)\n necrosis_mni_nda[seg_mask_mni == 1] = 1\n edema_mni_nda[seg_mask_mni == 2] = 1\n enhancing_mni_nda[seg_mask_mni == 4] = 1\n\n # whole tumor binary mask\n whole_tumor_mask_mni = necrosis_mni_nda + edema_mni_nda + enhancing_mni_nda\n whole_tumor_mask_mni_nda = whole_tumor_mask_mni.astype(np.int16)\n whole_tumor_mask_mni_name = os.path.join(\n mni152_path, subject_id + \"_whole_tumor_MNI152_1mm.nii.gz\"\n )\n whole_tumor_mask_mni_img = sitk.GetImageFromArray(whole_tumor_mask_mni_nda)\n whole_tumor_mask_mni_img.CopyInformation(necrosis_mni_img)\n assert (\n np.amax(whole_tumor_mask_mni_nda) <= 1\n ), \"Maximum of whole tumor mask not equal to 1\"\n sitk.WriteImage(whole_tumor_mask_mni_img, whole_tumor_mask_mni_name)\n\n # tumor core binary mask\n tumor_core_mask_mni = necrosis_mni_nda + enhancing_mni_nda\n tumor_core_mask_mni_nda = tumor_core_mask_mni.astype(np.int16)\n tumor_core_mask_mni_name = os.path.join(\n mni152_path, subject_id + \"_tumor_core_MNI152_1mm.nii.gz\"\n )\n tumor_core_mask_mni_img = sitk.GetImageFromArray(tumor_core_mask_mni_nda)\n tumor_core_mask_mni_img.CopyInformation(necrosis_mni_img)\n assert (\n np.amax(tumor_core_mask_mni_nda) <= 1\n ), \"Maximum of tumor core mask not equal to 1\"\n sitk.WriteImage(tumor_core_mask_mni_img, tumor_core_mask_mni_name)\n\n # enhancing tumor binary mask\n enhancing_tumor_mask_mni = enhancing_mni_nda\n enhancing_tumor_mask_mni_nda = enhancing_tumor_mask_mni.astype(np.int16)\n enhancing_tumor_mask_mni_name = os.path.join(\n mni152_path, subject_id + \"_enhancing_tumor_MNI152_1mm.nii.gz\"\n )\n enhancing_tumor_mask_mni_img = sitk.GetImageFromArray(enhancing_tumor_mask_mni_nda)\n enhancing_tumor_mask_mni_img.CopyInformation(necrosis_mni_img)\n assert (\n np.amax(enhancing_tumor_mask_mni_nda) <= 1\n ), \"Maximum of enhancing tumor mask not equal to 1\"\n print(enhancing_tumor_mask_mni_name)\n sitk.WriteImage(enhancing_tumor_mask_mni_img, enhancing_tumor_mask_mni_name)", "title": "" }, { "docid": "02ba1e6543f35f972e501efb5833b51e", "score": "0.40473327", "text": "def build_skeleton(nodes, independencies):\n\n nodes = list(nodes)\n\n if isinstance(independencies, Independencies):\n def is_independent(X, Y, Zs):\n return IndependenceAssertion(X, Y, Zs) in independencies\n elif callable(independencies):\n is_independent = independencies\n else:\n raise ValueError(\"'independencies' must be either Independencies-instance \" +\n \"or a ternary function that decides independencies.\")\n\n graph = UndirectedGraph(combinations(nodes, 2))\n lim_neighbors = 0\n separating_sets = dict()\n while not all([len(graph.neighbors(node)) < lim_neighbors for node in nodes]):\n for node in nodes:\n for neighbor in graph.neighbors(node):\n # search if there is a set of neighbors (of size lim_neighbors)\n # that makes X and Y independent:\n for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors):\n if is_independent(node, neighbor, separating_set):\n separating_sets[frozenset((node, neighbor))] = separating_set\n graph.remove_edge(node, neighbor)\n break\n lim_neighbors += 1\n\n return graph, separating_sets", "title": "" }, { "docid": "a4d4dfb7cec64cf62425151f0581dd8f", "score": "0.40449205", "text": "def selectNodes(self, domain='interior', ij=[]):\r\n U, V = self.block.getDivUV()\r\n nodes = list()\r\n\r\n # select all nodes except boundary nodes\r\n if domain == 'interior':\r\n istart = 1\r\n iend = U\r\n jstart = 1\r\n jend = V\r\n\r\n if domain == 'ij':\r\n istart = ij[0]\r\n iend = ij[1]\r\n jstart = ij[2]\r\n jend = ij[3]\r\n\r\n for i in range(istart, iend):\r\n for j in range(jstart, jend):\r\n nodes.append((i, j))\r\n\r\n return nodes", "title": "" }, { "docid": "31059f5f9520f4bd640f75df22696eae", "score": "0.40430892", "text": "def dense_layout():\n ch = {'ind': np.arange(NC),\n 'col': np.tile(np.array([2, 0, 3, 1]), int(NC / 4)),\n 'row': np.floor(np.arange(NC) / 2)}\n ch.update(rc2xy(ch['row'], ch['col']))\n return ch", "title": "" }, { "docid": "9ff835dc35ff1caff667350876078477", "score": "0.40428075", "text": "def partition(x, l, h):\n global qasgn\n qasgn += 1\n p = l\n for j in range(l + 1, h + 1):\n if x[j] < x[l]:\n qasgn += 3\n p += 1\n x[j], x[p] = x[p], x[j]\n qasgn += 2\n x[p], x[l] = x[l], x[p]\n return p", "title": "" }, { "docid": "743cc7accdcc8ddccb201b4df68a6b86", "score": "0.40406975", "text": "def window_partition(x: paddle.Tensor,\n window_size: int) -> Tuple[paddle.Tensor, Tuple[int, int]]:\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = paddle.nn.functional.pad(x=x, pad=(0, 0, 0, pad_w, 0, pad_h, 0,\n 0)) # 每个维度分两位数进行pad\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.reshape(\n [B, Hp // window_size, window_size, Wp // window_size, window_size, C])\n windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape(\n [-1, window_size, window_size, C])\n return windows, (Hp, Wp)", "title": "" }, { "docid": "40213dd4baf45861dd8a4697556411c7", "score": "0.40341967", "text": "def export_domain(msh, dim, directory, prefix):\n # Set cell type\n if dim == 2:\n cell_type = \"triangle\"\n elif dim == 3:\n cell_type = \"tetra\"\n # Generate the cell block for the domain cells\n data_array = [arr for (t, arr) in msh.cells if t == cell_type]\n if len(data_array) == 0:\n print(\"WARNING: No domain physical group found.\")\n return\n else:\n data = np.concatenate(data_array)\n cells = [\n meshio.CellBlock(\n type=cell_type,\n data=data,\n )\n ]\n # Generate the domain cells data (for the subdomains)\n try:\n cell_data = {\n \"subdomains\": [\n np.concatenate(\n [\n msh.cell_data[\"gmsh:physical\"][i]\n for i, cellBlock in enumerate(msh.cells)\n if cellBlock.type == cell_type\n ]\n )\n ]\n }\n except KeyError:\n raise ValueError(\n \"\"\"\n No physical group found for the domain.\n Define the domain physical group.\n - if dim=2, the domain is a surface\n - if dim=3, the domain is a volume\n \"\"\"\n )\n\n # Generate a meshio Mesh for the domain\n domain = meshio.Mesh(\n points=msh.points[:, :dim],\n cells=cells,\n cell_data=cell_data,\n )\n # Export the XDMF mesh of the domain\n meshio.write(\n \"{}/{}_{}\".format(directory, prefix, \"domain.xdmf\"),\n domain,\n file_format=\"xdmf\"\n )", "title": "" }, { "docid": "ea8b8f36bce0fc77de24eb2eacb4953c", "score": "0.4032719", "text": "def _get_dimensionwise_marg_var(var, marginal):\n if marginal in (M.OVER_ALL, M.OVER_PIXELS):\n return var\n elif marginal == M.NO:\n var = np.moveaxis(np.diagonal(var, axis1=0, axis2=1), -1, 0)\n\n # [..., X, X, Y, Y, Z, Z, ...] -> [..., X, Y, Z, ..., X, Y, Z, ...]\n var = _zip_axes(var, 1, unzip=True)\n spatial_shape = var.shape[1 + var.ndim // 2:]\n spatial_shape_prod = functools.reduce(op.mul, spatial_shape, 1)\n\n sqnorms = np.diagonal(\n var.reshape((-1, spatial_shape_prod, spatial_shape_prod)),\n axis1=-2,\n axis2=-1)\n sqnorms = sqnorms.reshape((-1,) + spatial_shape)\n return sqnorms", "title": "" }, { "docid": "623d4e38de34c0e99c5f2161d1978cc5", "score": "0.4028507", "text": "def dimension_modularforms(self,k):\n kk=Integer(k)\n if is_odd(kk):\n raise ValueError(\"Use only for even weight k! not k={0}\".format(kk))\n if k==0 :\n dim=1 # the constant functionz \n elif k<2:\n dim=0 \n else:\n dim=self.dimension_cuspforms(k)+self._ncusps\n #(kk-1.0)*(self._genus-_sage_const_1 )+self._nu2()*int(floor(kk/_sage_const_4 ))+self._nu3*int(floor(kk/_sage_const_3 ))+kk/_sage_const_2 *self._ncusps()\n return dim", "title": "" }, { "docid": "c5e90963d8d2838b420f0a0236e20520", "score": "0.40265596", "text": "def part_graph(nparts, adjacency=None, xadj=None, adjncy=None,\n vweights=None, eweights=None, recursive=None):\n xadj, adjncy = _prepare_graph(adjacency, xadj, adjncy)\n\n if recursive is None:\n if nparts > 8:\n recursive = False\n else:\n recursive = True\n\n from pymetis._internal import part_graph\n\n if nparts == 1:\n # metis has a bug in this case--it disregards the index base\n return 0, [0] * (len(xadj)-1)\n\n return part_graph(nparts, xadj, adjncy, vweights, eweights, recursive)", "title": "" } ]
8b6239c4aff78f55d89d8985bbe1f02d
Canonicallize the element to a uniform string
[ { "docid": "ccd7c06bcf8e2c94ec2333f1f448446e", "score": "0.0", "text": "def canon(self, e):\n s = \"\"\n # return strings based on what type the object is\n if isinstance(e, dict):\n return self._wrap_dict(self._process_dict(s, e))\n elif isinstance(e, list):\n return self._wrap_list(self._process_list(s, e))\n elif isinstance(e, str):\n return self._wrap_string(e)\n elif isinstance(e, bool):\n return \"true\" if e is True else \"false\"\n elif isinstance(e, int):\n return str(e)\n elif isinstance(e, float):\n return str(e)\n elif isinstance(e, type(None)):\n return \"null\"\n # If its not a json type raise an error\n raise ValueError(\"Type %s cannot be serialized\" % str(type(e)))", "title": "" } ]
[ { "docid": "13a46b4160cfe2ea93dbb236f8134e0b", "score": "0.6361481", "text": "def canonical(self) -> str:\n return self._canonical", "title": "" }, { "docid": "19df0ba24defec0f26bc8ea0963ac6e5", "score": "0.6322695", "text": "def stringify_affiliation_rec(node):\n parts = _recur_children(node)\n parts_flatten = list(_flatten(parts))\n return \" \".join(parts_flatten).strip()", "title": "" }, { "docid": "28d3080124c9c92ce3132cb5ff1dd3dc", "score": "0.62857914", "text": "def stringify_affiliation(node):\n parts = (\n [node.text]\n + list(\n chain(\n *(\n [c.text if (c.tag != \"label\" and c.tag != \"sup\") else \"\", c.tail]\n for c in node.getchildren()\n )\n )\n )\n + [node.tail]\n )\n return \" \".join(filter(None, parts))", "title": "" }, { "docid": "691e65a9b8c5e679c2e4f8c5c3ae7601", "score": "0.6206247", "text": "def canonical(self) -> str:\n raise TypeError(\"Need to implement this in the subclass\")", "title": "" }, { "docid": "c98d6c39703804fb763c858bfca1ed30", "score": "0.61923754", "text": "def canonical_form(self):\n return self.canonicalize()", "title": "" }, { "docid": "abb0c8840f13d939efd36ad213643243", "score": "0.58122784", "text": "def element_as_string(self,A):\n i=0\n z,t0,cf=factor_matrix_in_sl2z(A)\n if z==-1:\n s='-'\n else:\n s=''\n if t0 != 0:\n s=s+\"T^%\" %t0\n for n in range(len(cf)):\n s=s+\"ST^{%}\" % cf[n]\n return s", "title": "" }, { "docid": "9f73cb6e1c91584c06613cb634cfce7c", "score": "0.58111215", "text": "def _normalize_element(element):\n from unidecode import unidecode\n\n if isinstance(element, str):\n # We want it lowercase, without accents.\n return unidecode(element).lower()\n else:\n return None", "title": "" }, { "docid": "e2b6206adb8af93e6df73e54bb4903f0", "score": "0.57048523", "text": "def _xml_element_2_str(elem):\n rough_string = ElementTree.tostring(elem, method='xml')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "title": "" }, { "docid": "676a6615cd9f073ae20fa0c8a5c267df", "score": "0.5506631", "text": "def tostring(element):\r\n rv = []\r\n finalText = None\r\n filter = ihatexml.InfosetFilter()\r\n def serializeElement(element):\r\n if type(element) == type(ElementTree.ElementTree):\r\n element = element.getroot()\r\n \r\n if element.tag == \"<!DOCTYPE>\":\r\n if element.get(\"publicId\") or element.get(\"systemId\"):\r\n publicId = element.get(\"publicId\") or \"\"\r\n systemId = element.get(\"systemId\") or \"\"\r\n rv.append( \"\"\"<!DOCTYPE %s PUBLIC \"%s\" \"%s\">\"\"\"%(\r\n element.text, publicId, systemId))\r\n else: \r\n rv.append(\"<!DOCTYPE %s>\"%(element.text,))\r\n elif element.tag == \"<DOCUMENT_ROOT>\":\r\n if element.text:\r\n rv.append(element.text)\r\n if element.tail:\r\n finalText = element.tail\r\n \r\n for child in element.getchildren():\r\n serializeElement(child)\r\n \r\n elif type(element.tag) == type(ElementTree.Comment):\r\n rv.append(\"<!--%s-->\"%(element.text,))\r\n else:\r\n #This is assumed to be an ordinary element\r\n if not element.attrib:\r\n rv.append(\"<%s>\"%(filter.fromXmlName(element.tag),))\r\n else:\r\n attr = \" \".join([\"%s=\\\"%s\\\"\"%(\r\n filter.fromXmlName(name), value) \r\n for name, value in element.attrib.iteritems()])\r\n rv.append(\"<%s %s>\"%(element.tag, attr))\r\n if element.text:\r\n rv.append(element.text)\r\n \r\n for child in element.getchildren():\r\n serializeElement(child)\r\n \r\n rv.append(\"</%s>\"%(element.tag,))\r\n \r\n if element.tail:\r\n rv.append(element.tail)\r\n \r\n serializeElement(element)\r\n \r\n if finalText is not None:\r\n rv.append(\"%s\\\"\"%(' '*2, finalText))\r\n \r\n return \"\".join(rv)", "title": "" }, { "docid": "d4b1c2b55e15fb75bb6dc87109f4b472", "score": "0.5443869", "text": "def to_string(self):\r\n return ElementTree.tostring(self.to_didl_element())", "title": "" }, { "docid": "d4b1c2b55e15fb75bb6dc87109f4b472", "score": "0.5443869", "text": "def to_string(self):\r\n return ElementTree.tostring(self.to_didl_element())", "title": "" }, { "docid": "d4b1c2b55e15fb75bb6dc87109f4b472", "score": "0.5443869", "text": "def to_string(self):\r\n return ElementTree.tostring(self.to_didl_element())", "title": "" }, { "docid": "d4b1c2b55e15fb75bb6dc87109f4b472", "score": "0.5443869", "text": "def to_string(self):\r\n return ElementTree.tostring(self.to_didl_element())", "title": "" }, { "docid": "dfb0cc95e588cfc7d291f6cd7dd00bd3", "score": "0.5404248", "text": "def canonicalize(self):\r\n\r\n return Name([x.lower() for x in self.labels])", "title": "" }, { "docid": "ed9e958d8aea26819afa4ba16b225825", "score": "0.5334874", "text": "def __repr__(self):\n if self.element and self.element[-1] == '\\n':\n return \"\".join(self.element[:-1])\n else:\n return \"_\" + \"\".join(self.element) + \"_\"", "title": "" }, { "docid": "a85f473bc32dcc1db989aaab90d1ac2a", "score": "0.5326926", "text": "def encoded(normalized: list) -> str:\n str_encoded: list = list()\n for col in range(len(normalized[0])):\n str_row: str = ''\n for row in range(len(normalized)):\n str_row += normalized[row][col]\n\n str_encoded.append(str_row)\n\n return ' '.join(str_encoded)", "title": "" }, { "docid": "44c762c926e5ab9ebaa134d65dd146d1", "score": "0.53174204", "text": "def tostring(element):\r\n rv = []\r\n finalText = None\r\n def serializeElement(element):\r\n if not hasattr(element, \"tag\"):\r\n if element.docinfo.internalDTD:\r\n if element.docinfo.doctype:\r\n dtd_str = element.docinfo.doctype\r\n else:\r\n dtd_str = \"<!DOCTYPE %s>\"%element.docinfo.root_name\r\n rv.append(dtd_str)\r\n serializeElement(element.getroot())\r\n \r\n elif type(element.tag) == type(etree.Comment):\r\n rv.append(\"<!--%s-->\"%(element.text,))\r\n \r\n else:\r\n #This is assumed to be an ordinary element\r\n if not element.attrib:\r\n rv.append(\"<%s>\"%(element.tag,))\r\n else:\r\n attr = \" \".join([\"%s=\\\"%s\\\"\"%(name, value) \r\n for name, value in element.attrib.iteritems()])\r\n rv.append(\"<%s %s>\"%(element.tag, attr))\r\n if element.text:\r\n rv.append(element.text)\r\n\r\n for child in element.getchildren():\r\n serializeElement(child)\r\n\r\n rv.append(\"</%s>\"%(element.tag,))\r\n\r\n if hasattr(element, \"tail\") and element.tail:\r\n rv.append(element.tail)\r\n\r\n serializeElement(element)\r\n\r\n if finalText is not None:\r\n rv.append(\"%s\\\"\"%(' '*2, finalText))\r\n\r\n return \"\".join(rv)", "title": "" }, { "docid": "77dd9d156e3e1998a6c6606c367490f5", "score": "0.5314133", "text": "def component_name(elements: Iterable[str]) -> str:\n return \"-\".join((e for e in elements if e))", "title": "" }, { "docid": "192c8f29f490d235e31d73295861cc55", "score": "0.5292529", "text": "def standard(cls, element):\n\n # We remove all special characters and return the formatted string.\n return (\n PyFunceble.helpers.Regex(cls.regex_replace)\n .replace_match(element.strip(), \"@funilrys\")\n .replace(\"@funilrys\", \"\")\n )", "title": "" }, { "docid": "3b26beef436b64983ca24c262dcf15d0", "score": "0.52881145", "text": "def prettify(self, elem):\n rough_string = ElementTree.tostring(elem, 'utf8')\n root = etree.fromstring(rough_string)\n return etree.tostring(root, pretty_print=True)", "title": "" }, { "docid": "3b26beef436b64983ca24c262dcf15d0", "score": "0.52881145", "text": "def prettify(self, elem):\n rough_string = ElementTree.tostring(elem, 'utf8')\n root = etree.fromstring(rough_string)\n return etree.tostring(root, pretty_print=True)", "title": "" }, { "docid": "1b9d3a2e49e13cfb0cad7c5c83e5cd42", "score": "0.5231681", "text": "def _print_poly(cls, element: Self) -> str:\n poly = integer_to_poly(int(element), cls.characteristic)\n poly_var = \"α\" if cls.primitive_element == cls.characteristic else \"x\"\n s = poly_to_str(poly, poly_var=poly_var)\n\n if cls._element_fixed_width:\n s = s.rjust(cls._element_fixed_width)\n else:\n cls._element_fixed_width_counter = max(len(s), cls._element_fixed_width_counter)\n\n return s", "title": "" }, { "docid": "14051d3034bdcf8994bf68d7d09b3a12", "score": "0.51784533", "text": "def normalize(x: list[str]) -> str:\n return ' '.join(x)", "title": "" }, { "docid": "78415442bec9d9177b323881e14c63c0", "score": "0.5167201", "text": "def canonical_name(self) -> str:\n name: str = self.value\n name = name.replace('before_', '')\n name = name.replace('after_', '')\n name = name.replace('_start', '')\n return name.replace('_end', '')", "title": "" }, { "docid": "89d613716437788a678dfb7a99cdc6cc", "score": "0.514525", "text": "def getAndClean(element, path):\n x = element.xpath(path).extract()\n return \" \".join(x).encode('ascii','ignore').strip()", "title": "" }, { "docid": "7444bb6967262461727d01a31dd68b5b", "score": "0.51447254", "text": "def canonicalize(self) -> \"BEL\":\n\n # TODO Need to order position independent args\n\n if self.ast:\n self.ast.canonicalize()\n\n return self", "title": "" }, { "docid": "f50d7cce038358d2bf78ab92f779933d", "score": "0.5134202", "text": "def transform_xml(self, data) -> str:\n ldata = list(map(self.transform_helper, data))\n return \"\".join(ldata)", "title": "" }, { "docid": "fc8228260ee3d17131ff347680c61ef0", "score": "0.51297885", "text": "def prettify(elem):\n import xml.dom.minidom\n rough_string = et.tostring(elem, 'utf-8')\n reparsed = xml.dom.minidom.parseString(rough_string)\n return reparsed.toprettyxml()", "title": "" }, { "docid": "bd21a483ace1f0dd0f6917dd1c5f9334", "score": "0.512755", "text": "def list_as_str(self) -> str:\n return \"\\n\".join(str(node) for node in sorted(self.__nodes.values()))", "title": "" }, { "docid": "de4f42f6fa03313dfacaeb8c1c6f95be", "score": "0.5126007", "text": "def printable_to_attribute(name):\n return name.lower().replace(' ', '_').replace('[', '').replace(']', '')", "title": "" }, { "docid": "8f64a1d1cc45c5eeeb0c76654f5ddbe4", "score": "0.5119455", "text": "def encoded_string(self):\n if not self.scheme:\n raise ValueError(\"Attribute scheme must be set\")\n\n result = \"scheme:%s\" % base64.b64encode(self.scheme)\n if self.attrid != None:\n result = \"%s,attrid:%d\" % (result, self.attrid)\n if self.value != None:\n result = \"%s,value:%s\" % (result, base64.b64encode(self.value))\n if self.binaryData != None:\n result = \"%s,binaryData:%s\" %\\\n (result, base64.b64encode(self.binaryData))\n if self.comment != None:\n result = \"%s,comment:%s\" %\\\n (result, base64.b64encode(self.comment))\n if self.instid != None:\n result = \"%s,instid:%s\" %\\\n (result, base64.b64encode(self.instid))\n if self.visibility != None:\n result = \"%s,visibility:%s\" %\\\n (result, base64.b64encode(self.visibility))\n if self.effectiveFrom != None:\n result = \"%s,effectiveFrom:%02d %s %d\" %\\\n (result,\n self.effectiveFrom.day,\n _MONTHS[self.effectiveFrom.month-1],\n self.effectiveFrom.year)\n if self.effectiveTo != None:\n result = \"%s,effectiveTo:%02d %s %d\" %\\\n (result,\n self.effectiveTo.day,\n _MONTHS[self.effectiveTo.month-1],\n self.effectiveTo.year)\n if self.owningGroupid != None:\n result = \"%s,owningGroupid:%s\" %\\\n (result, base64.b64encode(self.owningGroupid))\n return result", "title": "" }, { "docid": "825b5855f06d40d0d0cf2275e7ba5731", "score": "0.5118606", "text": "def canonicalize_host(host):\n temp = [label[:63] for label in host.split(\".\")] # Truncate labels that cannont be encoded (more than 63 bytes..)\n temp = filter(lambda x: len(x) > 0, temp) # Remove zero-length labels due to extraneous dots in the original domain\n temp = map(lambda x: chr(len(x)) + x, temp)\n temp = ''.join(temp)\n if temp[-1] != \"\\x00\":\n temp += \"\\x00\"\n\n debug(3, \"Canonicalized hostname: %s\" % repr(temp))\n return temp", "title": "" }, { "docid": "112046b9d5660e09dfed83b50397f847", "score": "0.5117661", "text": "def __str__(self):\n return \"\".join(str(elem) for elem in self.text)", "title": "" }, { "docid": "e572a8e438c16036a4e8f555bf7b268b", "score": "0.51107025", "text": "def OMstring( x ):\n return ET.tostring( OMobject( x ) )", "title": "" }, { "docid": "6616d32c4054534a8bade767eece004c", "score": "0.51097673", "text": "def u(s):\n return s", "title": "" }, { "docid": "7afb54403b41cea95c21f37f8c7bfc6e", "score": "0.51020896", "text": "def __prettify(cls, elem):\n rough_string = ElementTree.tostring(elem, encoding=cls.encoding)\n if cls.pretty:\n reparsed = minidom.parseString(rough_string)\n pretty_string = reparsed.toprettyxml(indent=cls.indent)\n return pretty_string\n else:\n return rough_string", "title": "" }, { "docid": "c35e9dc92a3cf49cb43f773a5ab0bed6", "score": "0.50995564", "text": "def slugify(uid: UUID) -> str:\n byte_string = uid.bytes.rstrip(b'\\x00')\n byte_string = encodebytes(byte_string)\n # The encoder will add a newline on the end. We need to strip that. After said stripping, to ensure compatibility\n # with URL values, we want to swap out + and / for - and _, respectively. These are 'standard' characters that\n # can be expected from most base64 implementations, including Python's. Consult RFC4648 for full details.\n # Additionally, since the = is padding, we can remove it and restore it later when reversing the process.\n byte_string = byte_string.replace(b'+', b'-').replace(b'/', b'_').strip().rstrip(b'=')\n return byte_string.strip().decode('utf-8')", "title": "" }, { "docid": "9021c1cfcd4501d92113e72049aa836b", "score": "0.509254", "text": "def prettify(elem):\n from xml.dom import minidom\n ugly_xml = ET.tostring(elem, encoding='unicode')\n dom = minidom.parseString(ugly_xml)\n return dom.toprettyxml(indent=\" \")", "title": "" }, { "docid": "1c687722ff8312bd9ccbef47e8661f07", "score": "0.5086158", "text": "def asString(self):", "title": "" }, { "docid": "363acadfd5b9dd22ad4337be47471e59", "score": "0.50787055", "text": "def __str__(self) -> str:\n return \"\".join([\"{\", \", \".join(map(str, self.__elements)), \"}\"])", "title": "" }, { "docid": "b389ea00f08ef012113bd667646b14d1", "score": "0.50603807", "text": "def serialize(self, root: 'Node') -> str:\n def helper(node):\n if not node: return ''\n if not node.children: return str(node.val)\n res = str(node.val)+'['\n for i in range(len(node.children)-1):\n res += helper(node.children[i])\n res += ' '\n res += helper(node.children[-1])\n res += ']'\n return res\n string = helper(root)\n #print(string)\n return string", "title": "" }, { "docid": "816e4907e38ed57d7eda96c331a12e3d", "score": "0.5059625", "text": "def dumps(element, sort_lists=False, excluded_keys=[], ignore_keyerror=False):\n # Excluded keys only work with dictionaries\n if type(element) is not dict and len(excluded_keys):\n raise ValueError(\"Cannot exclude keys for a non-dict type\")\n # remove all the keys\n for key in excluded_keys:\n if not isinstance(key, str):\n raise ValueError(\"Excluded keys must be strings\")\n try:\n del element[key]\n except KeyError:\n if not ignore_keyerror:\n raise ValueError(\"Excluded key does not exist in element\")\n # canonicallize it!\n c = _Canonicallizer(sort_lists)\n return c.canon(element)", "title": "" }, { "docid": "73e8178f56750eae1aa8237beebbbb46", "score": "0.5039466", "text": "def address_string(self) -> str:\n return '0' * (4 - len(str(self.unsigned()))) + str(self.unsigned())", "title": "" }, { "docid": "489b77907699a913d102a6e2cddea6dc", "score": "0.5038601", "text": "def __str__(self):\n return \"<\" + str(self._coords)[1:-1] + \">\" # remove the '[' ']' in the str(list)", "title": "" }, { "docid": "50b066e123663a2d6d0b8c0bbd6902f7", "score": "0.5037772", "text": "def normalize():\n pass", "title": "" }, { "docid": "1b438775c807bf4b0021a5ccb2bc8c14", "score": "0.50360703", "text": "def node_to_str(node):\n if len(node) == 2 and isinstance(node, tuple):\n key, val = node\n else:\n key, val = node, ''\n\n key, val = trim(key), trim(val)\n return ': '.join([key, val])", "title": "" }, { "docid": "b531be9ab7708fee02b572cea4747223", "score": "0.5034251", "text": "def to_text(element):\n result = []\n\n def _to_text(element):\n result.append(element.text or u'')\n for child in element.iterchildren():\n _to_text(child)\n result.append(element.tail or u'')\n\n _to_text(element)\n return u''.join(result)", "title": "" }, { "docid": "a8be08bd656f49896dfd6e0d37260d35", "score": "0.5034145", "text": "def doTransform(self, value):\n return str(value)", "title": "" }, { "docid": "6b9a1a6ec4759303869b0e5075f18bb6", "score": "0.5032068", "text": "def __str__(self):\n\t\treturn \"<{}>\".format(self.data.__str__()[1:-1])", "title": "" }, { "docid": "0a73e9d00d85f1503add0bfffe8b4fad", "score": "0.50311726", "text": "def asString():", "title": "" }, { "docid": "628643c8f856aff953aa2e9b65587a5e", "score": "0.5029192", "text": "def getUid2String(self):", "title": "" }, { "docid": "c4ace4fb740bcf499f572d029051d274", "score": "0.5025612", "text": "def _text_of(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string", "title": "" }, { "docid": "2f1df06b9d0680f9ef5226ac4d3586cf", "score": "0.50226367", "text": "def prettify(elem):\n rough_string = et.tostring(elem, encoding='utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "title": "" }, { "docid": "e0fc769bb19ab229c23ab70fde97f7a0", "score": "0.5021895", "text": "def asciiconverter(self, a):\n pass", "title": "" }, { "docid": "c088c6873116eb59c75487e61d242ef9", "score": "0.50148076", "text": "def _canonical_form(labels: Collection) -> Tuple:\n if len(labels) == 0:\n return 'label',\n else:\n return tuple(sorted(labels))", "title": "" }, { "docid": "f48b777645b694f2b04f36c7eeb82b62", "score": "0.5007526", "text": "def __str__(self):\n class_name = self.__class__.__name__\n initial_index = first_nonzero_index(self.normal_vector)\n\n def to_coeff(value, index: int):\n value = round(value, self.precision)\n if not value % 1:\n value = int(value)\n\n if not value:\n return ''\n\n result = f'{value:+}x_{index}'\n return result\n\n if initial_index is None:\n output = '0'\n\n else:\n output = ''.join(to_coeff(v, i) for i, v in\n enumerate(self.normal_vector))\n if output.startswith('+'):\n output = output[1:]\n\n constant = round(self.constant_term)\n if constant % 1 == 0:\n constant = int(constant)\n\n output += f'={constant}'\n\n return f'{class_name}({output})'", "title": "" }, { "docid": "b13ffa196268e085f0a2b41dc1526fcc", "score": "0.49951085", "text": "def serialize(self, root):\n def encode(root):\n val = []\n if not root:\n val.append('None')\n else:\n val.append(str(root.val))\n val.extend(encode(root.left))\n val.extend(encode(root.right))\n return val\n \n return ' '.join(encode(root))", "title": "" }, { "docid": "c40c77d19b68ac8aa1435d088b7cb913", "score": "0.49860355", "text": "def __str__(self) -> str:\n joined_items = ', '.join([\n f'{attr.name}={attr.as_repr(value=self.get_attribute_value(attribute=attr))}' # noqa: WPS221\n for attr, value in self._values.items()\n ])\n return f'{self.__class__.__name__}({joined_items})'", "title": "" }, { "docid": "5e34a455db9631d6ba355dc5cd1818a0", "score": "0.49835327", "text": "def to_nice(self) -> str:\n return \" \".join(s.capitalize() for s in str(self.value).split(\"-\"))", "title": "" }, { "docid": "d9f109c1a6d5c7fa147412931700ce8e", "score": "0.49822938", "text": "def decanonicalize(self) -> \"BEL\":\n\n if self.ast:\n self.ast.decanonicalize()\n\n return self", "title": "" }, { "docid": "2d2d2ee307664d08c61f7e37a239a6bf", "score": "0.49818042", "text": "def element_to_author_simple(el):\n name = \"\"\n first = el.xpath('.//tei:persName/tei:forename[@type=\"first\"]',\n namespaces=NS)\n if first and len(first) == 1:\n name+=first[0].text+\" \"\n\n middle = el.xpath('.//tei:persName/tei:forename[@type=\"middle\"]',\n namespaces=NS)\n if middle and len(middle) == 1:\n name+=middle[0].text+\" \"\n\n surname = el.xpath('.//tei:persName/tei:surname', namespaces=NS)\n if surname and len(surname) == 1:\n name += surname[0].text\n return name", "title": "" }, { "docid": "98864302d77b08775381484674cf8a49", "score": "0.49762014", "text": "def __str__(self):\n info = \"{\" + \", \".join([d.name for d in self.domains]) + \"}\"\n return \"ComplementaryStrand {0}: {1}\".format(self.name, info)", "title": "" }, { "docid": "df7727bc97eee73db3791a3a34d2a3bf", "score": "0.4972493", "text": "def get_compare_value(compare_element: etree.Element) -> str:\n\n normalize_space.parse_xml_content(compare_element) # Apply normalize-space so whitespace is not considered.\n\n if compare_element.text is not None:\n compare_string = compare_element.text\n else:\n compare_string = \"\"\n\n for embedded_element in compare_element:\n if embedded_element.text is not None:\n compare_string += embedded_element.text\n\n for embedded_lb_element in embedded_element:\n if embedded_lb_element.text is not None:\n compare_string += embedded_lb_element.text\n if embedded_lb_element.tail is not None:\n if not compare_string.endswith(\" \"):\n compare_string += \" \"\n compare_string += embedded_lb_element.tail\n\n if embedded_element.tail is not None:\n compare_string += embedded_element.tail\n\n compare_string = \" \".join(compare_string.split())\n return compare_string", "title": "" }, { "docid": "811805d1d4a740a4dc83547dc311018c", "score": "0.49723154", "text": "def tree2conllstr(t):\n ...", "title": "" }, { "docid": "039eabac6dc405b3c3e17197874dbd66", "score": "0.49702558", "text": "def lower_repr(self):\n return ''.join(x for x in self.__str__().lower() if x != \" \")", "title": "" }, { "docid": "2c1988e3d5538cfdd5885137ac638184", "score": "0.49687293", "text": "def __str__(self):\n return str([y for y in self._elements[:self._count]])", "title": "" }, { "docid": "8f807c16ef8280defbf36d044f10c785", "score": "0.49672222", "text": "def xmlify_element(name, value):\n element = ElementTree.Element(name)\n element.text = value if value is not None else ''\n return ElementTree.tostring(element, encoding='unicode')", "title": "" }, { "docid": "4a22bd9620062af0eb5f173394d9bcfa", "score": "0.49637803", "text": "def canonical(v):\n if abs(vectorops.normSquared(v) - 1.0) > 1e-4:\n raise RuntimeError(\"Nonunit vector supplied to canonical()\")\n assert(len(v)==3)\n if abs(v[0]-1.0) < 1e-5:\n return identity()\n elif abs(v[0]+1.0) < 1e-5:\n #flip of basis\n R = identity()\n R[0] = -1.0\n R[4] = -1.0\n return R\n R = v + [0.]*6\n (x,y,z) = tuple(v)\n scale = (1.0-x)/(1.0-x*x);\n R[3]= -y;\n R[4]= x + scale*z*z;\n R[5]= -scale*y*z;\n R[6]= -z;\n R[7]= -scale*y*z;\n R[8]= x + scale*y*y;\n return R", "title": "" }, { "docid": "fa775032388298ca5cc9321d4826b23d", "score": "0.49613273", "text": "def nx_canonicalize_nodes(g):\n mapping = {v: i for i,v in enumerate(g.nodes)}\n g_clean = nx.relabel_nodes(g, mapping)\n return g_clean", "title": "" }, { "docid": "7990e203421c761938b0ec6e7246a6d6", "score": "0.4958267", "text": "def to_xml_element(self):\n element = super().to_xml_element()\n element[0].text = ' '.join(str(x) for x in self.values)\n return element", "title": "" }, { "docid": "a503e88a87fe34bae7e4ab20d904f288", "score": "0.49523056", "text": "def canonicalize(self):\n self.substrings = canonical_ranges(self.substrings)", "title": "" }, { "docid": "93fe511e47f182d7698b9f75a34c7db3", "score": "0.49449104", "text": "def __str__(self):\r\n\r\n return '<' + str(self._coordinates)[1: -1] + '>'", "title": "" }, { "docid": "69bdf5dcad49d4bf11823c43c43ed303", "score": "0.49444094", "text": "def pretty_str(elem):\n return yaml.dump(elem, default_flow_style=False)", "title": "" }, { "docid": "b3d1797c7d40499382bd2fdf3f4faf33", "score": "0.49374747", "text": "def __str__(self):\n return '<'+str(self._coords)[1:-1]+'>'", "title": "" }, { "docid": "1eb7cc8a025416fb5aa9911a5a00508c", "score": "0.49215057", "text": "def test_build_simple_element(self):\n element = safe_dom.Element('p')\n self.assertEqual('<p></p>', element.__str__())", "title": "" }, { "docid": "056e6348a35d7c41a1e25ffef4d06609", "score": "0.4914198", "text": "def elementText(elem):\n\ttext = ''.join(elem.itertext()).strip()\n\ttext = re.sub('\\n', ' ', text)\n\ttext = re.sub('\\s{2,}', ' ', text)\n\treturn text", "title": "" }, { "docid": "7e4e67e85b03121de00dec0682de8270", "score": "0.49137482", "text": "def canonical_ipv4_address(ip_addr):\n return socket.inet_ntoa(socket.inet_aton(ip_addr))", "title": "" }, { "docid": "c6b04218f2b8b245087edbd45202f274", "score": "0.49110544", "text": "def path_raw(element: Element):\n if element is None:\n return ''\n p = parent(element)\n if p is not None:\n return path_raw(p) + '/' + element.tag\n return element.tag", "title": "" }, { "docid": "37bd7c8e8a3971ed633fceed91f040f1", "score": "0.49106473", "text": "def serialize(self, root):\n if root is None:\n return \"\"\n q = []\n\n def preorder(root):\n if root is None:\n return None\n else:\n q.append(root.val)\n for child in root, children:\n preorder(child)\n q.append(\"#\")\n\n preorder(root)\n return \",\".join(map(str, q))", "title": "" }, { "docid": "b4032fbc12119fbc6b59ed37ec4ac340", "score": "0.49049783", "text": "def unicodify(s, encoding='utf-8', norm=None):\n if not isinstance(s, str):\n s = str(s, encoding)\n\n if norm:\n from unicodedata import normalize\n s = normalize(norm, s)\n\n return s", "title": "" }, { "docid": "8fa2d25933ccc4b587ed7ea4f3c9f53b", "score": "0.48959333", "text": "def serialize(self, root):\n vals = []\n def preorder(node):\n if node:\n vals.append(str(node.val))\n for child in node.children:\n preorder(child)\n vals.append('#')\n preorder(root)\n return ' '.join(vals)", "title": "" }, { "docid": "514267cd01d16245caf3b6ae92d2090f", "score": "0.48806378", "text": "def string(self) -> str:\n if self is NodeApproach.SINGLE:\n return \"single\"\n elif self is NodeApproach.INDIRECT:\n return \"indirect\"\n elif self is NodeApproach.DIRECT:\n return \"direct\"\n elif self is NodeApproach.TOPOLOGY:\n return \"topology\"\n elif self is NodeApproach.GRAD_CHOICE:\n return \"grad-choice\"\n elif self is NodeApproach.AGREE:\n return \"agree\"\n elif self is NodeApproach.INJECTION:\n return \"injection\"\n elif self is NodeApproach.ZERO_FEATURES:\n return \"zero-features\"\n elif self is NodeApproach.MULTIPLE_ATTACKERS:\n return \"multiple-attackers\"", "title": "" }, { "docid": "b3af242094736daca9cb06d82214c7b3", "score": "0.4877691", "text": "def to_unicode(self, omit_final_dot = False):\r\n\r\n if len(self.labels) == 0:\r\n return u'@'\r\n if len(self.labels) == 1 and self.labels[0] == '':\r\n return u'.'\r\n if omit_final_dot and self.is_absolute():\r\n l = self.labels[:-1]\r\n else:\r\n l = self.labels\r\n s = u'.'.join([encodings.idna.ToUnicode(_escapify(x)) for x in l])\r\n return s", "title": "" }, { "docid": "92cf1f6380da59ef5c99bc0eea5267ef", "score": "0.48758608", "text": "def to_string(self) -> str:\n type, namespace, name, version, qualifiers, subpath = normalize( # NOQA\n self.type,\n self.namespace,\n self.name,\n self.version,\n self.qualifiers,\n self.subpath,\n encode=True,\n )\n\n purl = [\"pkg:\", type, \"/\"]\n\n if namespace:\n purl.append(namespace)\n purl.append(\"/\")\n\n purl.append(name)\n\n if version:\n purl.append(\"@\")\n purl.append(version)\n\n if qualifiers:\n purl.append(\"?\")\n purl.append(qualifiers)\n\n if subpath:\n purl.append(\"#\")\n purl.append(subpath)\n\n return \"\".join(purl)", "title": "" }, { "docid": "d9de9a725fc7247ee11d986d20759a79", "score": "0.48735735", "text": "def toSimple(self) -> str:\n\t\tif self.value == self.JSON:\treturn 'json'\n\t\tif self.value == self.CBOR:\treturn 'cbor'\n\t\tif self.value == self.XML:\treturn 'xml'\n\t\treturn None", "title": "" }, { "docid": "59acc97c38c19e22f37562f22ee9c1bd", "score": "0.48712888", "text": "def tostring(element, *args, **kwargs):\r\n global modules\r\n\r\n _bootstrap()\r\n t = _get_type(element)\r\n\r\n etree = modules.get(t, None)\r\n\r\n if not etree:\r\n raise RuntimeError('Unable to find the etree implementation related '\r\n 'to %r (type %r)' % (element, t))\r\n\r\n return etree.tostring(element, *args, **kwargs)", "title": "" }, { "docid": "07b8379d0dba07b926564459247cd7a4", "score": "0.48674852", "text": "def prettify(elem):\n rough_string = ElementTree.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n pretty = reparsed.toprettyxml(indent=\"\\t\")\n prettyStr = \"\\n\".join([line for line in pretty.split('\\n') if line.strip() != ''])\n fix = re.compile(r'((?<=>)(\\n[\\t]*)(?=[^<\\t]))|(?<=[^>\\t])(\\n[\\t]*)(?=<)')\n fixedPrettyStr = re.sub(fix, '', prettyStr)\n return fixedPrettyStr", "title": "" }, { "docid": "dc5e55ce2ebcbf6c2729682d990a4f96", "score": "0.48671067", "text": "def prettify(self, elem):\n rough_string = ElementTree.tostring(elem, 'utf8')\n root = etree.fromstring(rough_string)\n return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(\" \".encode(), \"\\t\".encode())\n # minidom does not support UTF-8\n '''reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\", encoding=ENCODE_METHOD)'''", "title": "" }, { "docid": "119f60c15ad14c22942385482933d3b1", "score": "0.4865885", "text": "def to_xml_element(self):\n element = super().to_xml_element()\n element[0].text = str(self.mesh.id)\n if self.translation is not None:\n element.set('translation', ' '.join(map(str, self.translation)))\n return element", "title": "" }, { "docid": "e35f09cb03f525cd191b35567ca19e15", "score": "0.486148", "text": "def normalizeUUID(value):\n try:\n return str(UUID(value)).upper()\n except (ValueError, TypeError):\n return value", "title": "" }, { "docid": "54c9362a703168cc6f58cfeba1175c38", "score": "0.48604465", "text": "def __str__( self ):\n if self.is_composite:\n info = \"[\" + \",\".join([d.name for d in self.subdomains]) + \"]\"\n else:\n info = self.sequence\n return \"ComplementaryDomain {0}: {1}\".format(self.name, info)", "title": "" }, { "docid": "380faef176246d1296bcce81b42b6f3b", "score": "0.48580432", "text": "def __str__(self):\n string = \"<\"\n while self.rest is not Link.empty:\n string += str(self.value) + \", \"\n self = self.rest\n return string + str(self.value) + \">\"", "title": "" }, { "docid": "1d8bdcbe79c5de62244595b84491efea", "score": "0.4857296", "text": "def xmlattrstr(attrs):\n #XXX Should this be using \n from xml.sax.saxutils import quoteattr\n s = ''\n names = attrs.keys()\n names.sort() # dump attrs sorted by key, not necessary but is more stable\n for name in names:\n s += ' %s=%s' % (name, quoteattr(str(attrs[name])))\n return s", "title": "" }, { "docid": "bb2ea8d3daee9dde7c60d925f2967723", "score": "0.48545521", "text": "def serialize(self, root):\n if not root:\n return \"\"\n preorder = []\n def helper(node):\n if not node:\n preorder.append('#')\n return\n preorder.append(str(node.val))\n helper(node.left)\n helper(node.right)\n helper(root)\n return '|'.join(preorder)", "title": "" }, { "docid": "90701733022d84ffad64b36d2b9549c0", "score": "0.4853868", "text": "def generate_canonical_uri(self): # NOQA\n return NotImplemented", "title": "" }, { "docid": "eddc4035d8f5bb8652bdf28b24847d9a", "score": "0.48535", "text": "def _serialize(self, value: typing.Any, attr: str, obj: typing.Any, **kwargs):\n return str(value)", "title": "" }, { "docid": "f30ca93b78d9a3e6ee99ccd7e4c8f839", "score": "0.48523372", "text": "def _print_int(cls, element: Self) -> str:\n s = f\"{int(element)}\"\n\n if cls._element_fixed_width:\n s = s.rjust(cls._element_fixed_width)\n else:\n cls._element_fixed_width_counter = max(len(s), cls._element_fixed_width_counter)\n\n return s", "title": "" }, { "docid": "2d652a12d88acc7ca91aa140e4a9e985", "score": "0.48513046", "text": "def to_xml_str(component: Component, remove_hs=True, conf_type=ConformerType.Ideal):\n root = to_xml_xml(component, remove_hs, conf_type)\n\n xml = ET.tostring(root, encoding=\"utf-8\", method=\"xml\")\n xmls_tring = minidom.parseString(xml)\n\n return xmls_tring.toprettyxml(indent=\" \")", "title": "" }, { "docid": "ed36b738d4f0bb1b4ca24a35bbf9a03f", "score": "0.48512578", "text": "def get_param_canonical(param):\n return tuple(map(_param_aliases.get_canonical, param))", "title": "" }, { "docid": "9bd37f491d7ab54e5fdbf68401c85f5e", "score": "0.4848201", "text": "def __str__(self):\n return \" \".join(reversed([str(item) for item in self]))", "title": "" }, { "docid": "8ce9d1f261a4211feac8571a0eb42a77", "score": "0.48458597", "text": "def _get_value_as_string(xml, nsmap):\n # Strip first item from iterables.\n if isinstance(xml, types.ListType):\n if len(xml) > 0:\n xml = xml[0]\n else:\n xml = None\n\n if xml is None:\n return None\n elif isinstance(xml, types.StringTypes):\n return xml.encode('utf-8', 'ignore')\n else:\n return et.tostring(xml)", "title": "" } ]
573e059c748a01c6b64a8b842d0f319b
compute the connection time based on speed and distance
[ { "docid": "74dec35c36ad2ebb90f4a0ec67e53438", "score": "0.7062803", "text": "def _compute_rsu_connection_time(self):\n\n self.contact_time = []\n for s in self.car_speed:\n speed_in_ms = s/3.6\n distance_in_m = 1000*self.rsu_distance\n self.contact_time.append(distance_in_m/speed_in_ms * 1e9)", "title": "" } ]
[ { "docid": "6481d0852a6287332ce706d3aaafdd06", "score": "0.68263894", "text": "def _get_distance_time(self):\n try:\n\n ds = my_dist_class.return_distance(self.ozone, self.dzone)\n # ds = np.ceil(\n # # self.DIST_MAT.query(\n # # \"PULocationID == {origin} & DOLocationID == {destination} \".format(\n # # origin=self.ozone, destination=self.dzone\n # # )\n # # )[\"trip_distance_meter\"].values[0]\n # # DIST_MAT[(DIST_MAT[\"PULocationID\"] == self.ozone) & (DIST_MAT[\"DOLocationID\"] == self.dzone)][\n # # \"trip_distance_meter\"].values[0]\n # DIST_MAT.loc[self.ozone, self.dzone][\"trip_distance_meter\"].values\n # )\n except:\n ds = 10 * 1609 # meter\n print(\"didn't find the distance\")\n # Ts = np.int(ds / CONSTANT_SPEED)\n return ds, np.int(ds / CONSTANT_SPEED)", "title": "" }, { "docid": "c32816d41bf582da0ab7770c5e21930a", "score": "0.66361797", "text": "def duration(self):\n return time.time() - self.connect_time", "title": "" }, { "docid": "9777b3382c48a55f294af9a5fa9cf001", "score": "0.6627373", "text": "def travel_time(data, from_node, to_node):\n if from_node == to_node:\n travel_time = 0\n else:\n # travel_time = manhattan_distance(data['locations'][from_node], data[\n # 'locations'][to_node]) / data['vehicle_speed']\n travel_time = data['distance_matrix'][from_node][to_node]/data['vehicle_speed']\n return travel_time", "title": "" }, { "docid": "28db32d8de60989114f387abf8fd5b83", "score": "0.6440792", "text": "def driving_time(miles,speed):\n t=miles/speed\n return t", "title": "" }, { "docid": "7595d14fc097e00ef5c12ef3a5a38419", "score": "0.641473", "text": "def distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance", "title": "" }, { "docid": "df742237e323bc156f3781c095558344", "score": "0.63035", "text": "def speedtime():\n return SPEEDUP*original_time.time()", "title": "" }, { "docid": "af3e369c19c04a4a5a79cf996f2519b0", "score": "0.62936723", "text": "def launch_distance(speed, duration): \n if speed <= 0 or duration <= 0:\n return 0\n time = pow(speed/25000, -0.95)\n diff = time - duration\n return 90 - int(diff/time*90)", "title": "" }, { "docid": "c4f17a5374ae5106dc45443fd94115b1", "score": "0.62887764", "text": "def speed(self):\n return", "title": "" }, { "docid": "c4f17a5374ae5106dc45443fd94115b1", "score": "0.62887764", "text": "def speed(self):\n return", "title": "" }, { "docid": "d3156c47791f9383354e3d3773896b66", "score": "0.62824804", "text": "def trafficTime(self, source, dest, action):\n # find the state for the source location\n newSource, _ = self.findRoad(source)\n\n # if the calculation has already been performed before,\n # return the time by querying the dictionary\n key = tuple(sorted((newSource, dest)))\n if key in self.trafficQuery:\n return self.trafficQuery[key]\n\n times = self.dijkstra(newSource, dest)\n self.trafficQuery[key] = times\n\n return times", "title": "" }, { "docid": "28c3677761958c7e04502d9486addfaf", "score": "0.6280937", "text": "def route_time(self, route, truck_speed):\n\n distance_miles = self.get_route_distance(route)\n distance_time = (distance_miles / truck_speed) * 60 * 60\n\n return distance_time", "title": "" }, { "docid": "26c3eba7fab9bd9fb748654ddf0bf072", "score": "0.62694114", "text": "def get_incremental_distance():\n return current_speed/float(FPS)", "title": "" }, { "docid": "7d9deba05e3686d587d1c8958b6d733c", "score": "0.6268242", "text": "def _getSpeed(self):\n pass", "title": "" }, { "docid": "2fdfe6e80203eb452fbaa5ae2172cf71", "score": "0.62490636", "text": "def get_current_speed(self) -> int:", "title": "" }, { "docid": "1dc3b8444d1307a6cd01938edb45d640", "score": "0.622039", "text": "def tws_connection_time(self):\n return self.conn_time", "title": "" }, { "docid": "8560bc63810e88189096e0ac8d534aa8", "score": "0.6194849", "text": "def server_time(self):", "title": "" }, { "docid": "fa799cb451c0338968d35790338f4e5c", "score": "0.61609924", "text": "def hubble_distance(self):\n return speed_of_light_cgs / self.hubble_constant", "title": "" }, { "docid": "2081dec0ca7e63e4eb1204a5bead4e8f", "score": "0.6127481", "text": "def connection_delay(self, from_node_id: int, to_node_id: int) -> float:\n distance = self.get_distance(from_node_id, to_node_id)\n avg_connection_speed = (\n self.nodes_map[from_node_id]['connection_speed']\n + self.nodes_map[to_node_id]['connection_speed']\n ) / 2.0\n return distance * avg_connection_speed * self.delay_multiplier", "title": "" }, { "docid": "9ff44f43c7bef80cb7c592165e108720", "score": "0.61059767", "text": "def travel_time(self, x2, y2):\n rate = uniform(0.09, 0.15)\n x1, y1 = pag.position()\n distance = math.sqrt(math.pow(x2-x1, 2)+math.pow(y2-y1, 2))\n\n return max(uniform(.08, .12), rate * (distance/randint(250, 270)))", "title": "" }, { "docid": "f077a660db5f0bf91d407f81686368f5", "score": "0.6086411", "text": "def connection_time(self):\n\n return self._connection_time", "title": "" }, { "docid": "a103671a42450297bc1ccac11424f8ca", "score": "0.60625064", "text": "def get_distance(self): \n return self.from_station.distance_to(self.to_station)", "title": "" }, { "docid": "de0df563deb70794732503de872e6f85", "score": "0.60556614", "text": "def get_travel_time(self, destination):\n # TODO\n distance = manhattan_distance(self.location, destination)\n return round(distance/self.speed)", "title": "" }, { "docid": "6ee1e8b3ade7eceb858cca1dd07a6d9b", "score": "0.6050709", "text": "def destinationTime(self) -> float:\n raise NotImplementedError()", "title": "" }, { "docid": "45da88eb11d9c4903ef85c16a8b810ca", "score": "0.60403216", "text": "def get_proxy_delay_time():\n return (proxy_delay_center - proxy_delay_radius) + (2 * proxy_delay_radius * random.random())", "title": "" }, { "docid": "66e16fc88be5f055d23046529ac15b79", "score": "0.6036347", "text": "def _measure_latency(self):\n\n # Generates a random message to transfer\n message = (''.join(\n random.choice(string.ascii_letters + string.digits) for _ in range(6)))\n start_time = time.time()\n write_read_successful = bt_test_utils.write_read_verify_data_sl4a(\n self.phone, self.derived_bt_device, message, False)\n end_time = time.time()\n asserts.assert_true(write_read_successful, 'Failed to send/receive message')\n return (end_time - start_time) * 1000", "title": "" }, { "docid": "750178e41b74aa26912bdbbd1e594097", "score": "0.60271347", "text": "def migration_time_estimate(self, client, Gpon_speed, backhaul_distance): #(GPON 2.5GB/s) #(Km) \n\n Vm_size = int(client.vm['VM_SIZE (MB)'].values[0])\n Vm_ptr = int(client.vm['Page transfer rate (MB/s)'].values[0])\n Vm_pdr = int(client.vm['Page Dirty Rate (4KB pages per second)'].values[0])\n #n = ?\n\n tp_td = (Vm_size *pow(10,6)) / (Vm_ptr*pow(10,6) - Vm_pdr *4*pow(10,3))\n\n ########\n\n lte_st_coor = [client.lte_st['lat'].values[0], client.lte_st['lon'].values[0]]\n server_coor = [client.dfmigrations['Latitude'].values[1], client.dfmigrations['Longitude'].values[1]]\n\n backhaul_time = (client.vm['Total Transferred Data (KB)'] * pow(10,3)) / (Gpon_speed * pow(10,9)) #seconds \n travel_time = (( hs.haversine(lte_st_coor, server_coor, unit=Unit.METERS) + 2*backhaul_distance*1000 ) / (1.5*3*pow(10,8) )) #seconds\n\n\n return tp_td + backhaul_time + travel_time #seconds", "title": "" }, { "docid": "f2242dd2e496afbf45b56b4653deb936", "score": "0.6020684", "text": "def time_for_distance(dist_mm, velocity):\n return abs(float(dist_mm) / float(velocity))", "title": "" }, { "docid": "b0eb0e4063412959004640b40692ec99", "score": "0.6011506", "text": "def _get_port_speed(self):\n return self.__port_speed", "title": "" }, { "docid": "b0eb0e4063412959004640b40692ec99", "score": "0.6011506", "text": "def _get_port_speed(self):\n return self.__port_speed", "title": "" }, { "docid": "6c1eed6e379f3f2f91b308e341aead2b", "score": "0.59983915", "text": "def ultrasonic_distance(self, distance):", "title": "" }, { "docid": "99f8d4baeb3c23ebf7e4cbccc612f41b", "score": "0.5974473", "text": "def __get_speed(self):\n return self.__speed", "title": "" }, { "docid": "7cab7b433ab769e0b553ccaac7371e0a", "score": "0.5968914", "text": "def flight_time(v1, v2):\r\n return ceil(dist(v1, v2))", "title": "" }, { "docid": "3e147d255938594fdec9db3d3ae394c6", "score": "0.5966473", "text": "def getDistance(self):\n GPIO.setup(self.channel, GPIO.OUT)\n # Send 10us pulse to trigger\n GPIO.output(self.channel, True)\n time.sleep(0.00001)\n GPIO.output(self.channel, False)\n start = time.time()\n count=time.time()\n GPIO.setup(self.channel,GPIO.IN)\n while GPIO.input(self.channel)==0 and time.time()-count<0.1:\n start = time.time()\n count=time.time()\n stop=count\n while GPIO.input(self.channel)==1 and time.time()-count<0.1:\n stop = time.time()\n # Calculate pulse length\n elapsed = stop-start\n # Distance pulse travelled in that time is time\n # multiplied by the speed of sound 34000(cm/s) divided by 2\n distance = elapsed * 17000\n return distance", "title": "" }, { "docid": "23aa1632aceee5d9104b3349b97f7c01", "score": "0.5959447", "text": "def get_duration(first, second, speed):\n distance = \\\n geopy.distance.distance(\n (first.latitude, first.longitude),\n (second.latitude, second.longitude))\n return distance.m / kmh_to_ms(speed)", "title": "" }, { "docid": "fc0d5319700aaa26ae1f8fea4e293e04", "score": "0.5952262", "text": "def speed(self):\n return 1", "title": "" }, { "docid": "947c3b3a684076172075ee4c9fbe105b", "score": "0.59391284", "text": "def travel_time_to_target(self) -> timedelta:\n destination = self.tot_waypoint\n total = timedelta()\n for previous_waypoint, waypoint in self.edges():\n if waypoint == self.tot_waypoint:\n # For anything strike-like the TOT waypoint is the *flight's*\n # mission target, but to synchronize with the rest of the\n # package we need to use the travel time to the same position as\n # the others.\n total += self.travel_time_between_waypoints(\n previous_waypoint, self.target_area_waypoint)\n break\n total += self.travel_time_between_waypoints(previous_waypoint,\n waypoint)\n else:\n raise PlanningError(\n f\"Did not find destination waypoint {destination} in \"\n f\"waypoints for {self.flight}\")\n return total", "title": "" }, { "docid": "670ea9771946e5f6c8790a4dffe17dc2", "score": "0.59217656", "text": "def get_speed(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "625cdd998566ffc3c32e18ecfe202783", "score": "0.59136945", "text": "def __init__ (self):\n self.start_timer() # Starts calling handle_timer() at correct rate\n self.neighbour = {} # port:ping to neighbour\n self.table = {} # dst:[port, total cost, time]", "title": "" }, { "docid": "d2bfdbf28bd145b45296d513c40db49c", "score": "0.58999324", "text": "def _get_distance(self):\n try:\n GPIO.output(self._pin_trigger, GPIO.LOW)\n GPIO.output(self._pin_trigger, GPIO.HIGH)\n \n GPIO.wait_for_edge(self._pin_echo, GPIO.RISING)\n pulse_start_time = time.time()\n\n GPIO.wait_for_edge(self._pin_echo, GPIO.FALLING)\n pulse_end_time = time.time()\n\n pulse_duration = pulse_end_time - pulse_start_time\n\n #speed of sound is 343m/s\n distance = round(pulse_duration * 343, 2)\n return distance\n\n except Exception as e:\n print(\"Failed to get distance: \" + str(e))\n self.print_config()\n raise", "title": "" }, { "docid": "e21f9224732ef443e4d3ab7e450fc6ea", "score": "0.589663", "text": "def get_ping_speed(self):\n return self.__ping", "title": "" }, { "docid": "04b7794a708ecb400cc01ca1c54b970d", "score": "0.5895992", "text": "def handle_timer(self):\n for port in self.neighbors.keys():\n for host in self.distanceVector.keys():\n if self.distanceVector[host][1] != port:\n packet = basics.RoutePacket(host, self.distanceVector[host][0])\n self.send(packet, port)", "title": "" }, { "docid": "601c54c042001d7da5181b152874b382", "score": "0.5880622", "text": "def get_speed(time):\n return 3500.0 * 36 / time # 3500 = ball size (3.5 cm)", "title": "" }, { "docid": "d0113e21a07ddeda912f0281c35d8893", "score": "0.5866535", "text": "def trafficTimeForRoad(self, u, v):\n subU = self.searchSubRegion(u)\n subV = self.searchSubRegion(v)\n count = 0\n speed = 0\n if subU:\n speed += subU.getSpeed()\n # print \"found sub-region U with speed: \", subU.getSpeed()\n count += 1\n if subV:\n speed += subV.getSpeed()\n # print \"found sub-region V with speed: \", subV.getSpeed()\n count += 1\n\n if count > 0:\n speed = speed / float(count)\n else:\n speed = 50\n\n return 1.0 / speed", "title": "" }, { "docid": "d59f87e8e46a4a6d52af098f44192031", "score": "0.5856914", "text": "def speed_count(self) -> int:\n return 20", "title": "" }, { "docid": "91736149f550a83fa09e5276f100756a", "score": "0.58351326", "text": "def get_distance():\n \n GPIO.setmode(GPIO.BCM)\n GPIO.setup(Constants.DISTANCE_SENSOR_TRIGGER, GPIO.OUT)\n GPIO.setup(Constants.DISTANCE_SENSOR_ECHO, GPIO.IN)\n\n # set the trigger to HIGH for a very short time\n GPIO.output(Constants.DISTANCE_SENSOR_TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(Constants.DISTANCE_SENSOR_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n while GPIO.input(Constants.DISTANCE_SENSOR_ECHO) == 0:\n start_time = time.time()\n\n while GPIO.input(Constants.DISTANCE_SENSOR_ECHO) == 1:\n stop_time = time.time()\n\n # Calculate the time between the stop and start time\n time_elapsed = stop_time - start_time\n # Multiply the elapsed time by 34300cm/s and divide it by 2 because the signal went to the object AND BACk\n distance = (time_elapsed * 34300) / 2\n\n return distance", "title": "" }, { "docid": "1fb7d6c78f3e4399ea0b604fb9f4c90a", "score": "0.58337045", "text": "def get_speed(self):\n return self.__speed", "title": "" }, { "docid": "1fb7d6c78f3e4399ea0b604fb9f4c90a", "score": "0.58337045", "text": "def get_speed(self):\n return self.__speed", "title": "" }, { "docid": "3b964424a92d7e6ad3bf54d8a03a05b2", "score": "0.58318925", "text": "def latency(self):\n return self.__pb.latency() / 1e6", "title": "" }, { "docid": "7d9cb7ad86aeea13dfd049d9525d3cb7", "score": "0.5823417", "text": "def speed(self, dist, departure_t):\n # Anzahl an Zeitintervallen für welche Geschwindigkeitsfunktion parametriert wurde\n speed_intervals = len(self.ev_speed_params)\n # Faktor zum Zuordnen des 15-minütigen Intervalls zu entsprechender Geschwindigkeitsfunktion\n interval_divisor = speed_intervals / 96\n func_no = int(departure_t * interval_divisor)\n speed = ElectricVehicle.ev_speed_params[func_no][0] + ElectricVehicle.ev_speed_params[func_no][\n 1] * np.log(dist)\n lower_bound = ElectricVehicle.ev_speed_params[func_no][2]\n return max(lower_bound, speed)", "title": "" }, { "docid": "c9f1e3fd55d54cb211cb93d611dfb650", "score": "0.5814356", "text": "def __init__(self):\n self.start = time.time()\n self.end = time.time()\n self.totalTime = self.totalTime = self.end-self.start\n self.turnStart = time.time()", "title": "" }, { "docid": "d81b3e21418fb108bc3cb3b7236ab17a", "score": "0.58115053", "text": "def estimateRTT(self, row, col):\r\n\t\tdist = DISTANCES[row][col]\r\n\t\tdist_delay = dist/(3*1e5)\r\n\t\t\r\n\t\treturn dist_delay", "title": "" }, { "docid": "e69d281a2f7af9ef295ca688930efe3c", "score": "0.57834685", "text": "def calc_distance(time_driven):\n return 51.5*time_driven-3.185", "title": "" }, { "docid": "7866db9ae5bc30ea8daf1b46abfaa9dd", "score": "0.57733136", "text": "def time(self):\n self.switch_to_GPS()\n return", "title": "" }, { "docid": "1f1ee1af58525119d1e7745902b5e8c4", "score": "0.5766321", "text": "def calculate_time_step(self):\n fastest_particle = max(self.particles, key=lambda x: x.speed())\n max_velocity = fastest_particle.speed()\n max_distance = self.particle_r / 8\n return max_distance / max_velocity if max_velocity else sqrt(\n self.particle_r / (4 * self.g))", "title": "" }, { "docid": "40f56f7340f8e7040ac0a2b1f0930d70", "score": "0.57609695", "text": "def travel_time_to_rendezvous(self) -> timedelta:\n return self._travel_time_to_waypoint(self.join)", "title": "" }, { "docid": "f318d232daebe6b54df9e6514d713867", "score": "0.57572687", "text": "def calc_distance_walktime(rows):\n\n route_length = 0\n walk_time = 0\n\n for row in rows:\n\n route_length += row[3]\n # calculate walk time\n if row[5] == 3 or row[5] == 4: # stairs\n walk_speed = 1.2 # meters per second m/s\n elif row[5] == 5 or row[5] == 6: # elevator\n walk_speed = 1.1 # m/s\n else:\n walk_speed = 1.39 # m/s\n\n walk_time += (row[3] / walk_speed)\n\n length_format = \"%.2f\" % route_length\n real_time = format_walk_time(walk_time)\n print {\"route_length\": length_format, \"walk_time\": real_time}", "title": "" }, { "docid": "a87cab6ac935ba209bbb313f6f1dc828", "score": "0.571509", "text": "def calculate_speed_diff(self):\n\t\tcumulative_speed_diff = 0\n\t\tfor car in self.currentVehList:\n\t\t\tspeed_diff = traci.vehicle.getAllowedSpeed(car) - traci.vehicle.getSpeed(car)\n\t\t\tcumulative_speed_diff += speed_diff\n\t\treturn -cumulative_speed_diff", "title": "" }, { "docid": "462feb4ac578b0cf3dc877ecccbeeb97", "score": "0.5702536", "text": "def get_speed_status(self) -> SpeedStatus:", "title": "" }, { "docid": "fbebac2ec1abeb51fc9f7fa1b76d98d3", "score": "0.56966174", "text": "def __init__(self):\n\t\tself.speed=1\n\t\t# Travel speed: 100km/h to convert in m/min =1600.67 meters in 1 minute\n\t\t#self.speed = round(100 * 60 / 3.6, 2)", "title": "" }, { "docid": "241f7dad9a4fc16e2c4a83b71a486dcb", "score": "0.5691064", "text": "def speed(self):\n return self._fan.speed", "title": "" }, { "docid": "af51fc1d82616f1a16bfa93157ecb702", "score": "0.5687072", "text": "def speed(self) -> float:\n return self._speed", "title": "" }, { "docid": "8b68e083a7b823cd37bc38c0979a74e1", "score": "0.5661652", "text": "def compute_travel_time(path, travel_type='driving', start_time=None):\n if (start_time is None):\n start_time = datetime.now() + timedelta(minutes=5)\n\n gmaps = googlemaps.Client(key='AIzaSyBuGbc491h07Hp-ao-6o-dkLmUUX9OG_ho')\n\n total_time = 0\n\n for i in range(1, len(path)):\n dist_matrix = gmaps.distance_matrix((path[i - 1]['latLng']['lat'], path[i - 1]['latLng']['lng']), # origin\n (path[i]['latLng']['lat'], path[i]['latLng']['lng']), # destination\n travel_type, # travel type\n 'English', # language\n None, # things to avoid\n 'imperial', # units\n start_time, # departure time\n None, # arrival time\n None, # public transit\n None, # transit preferences\n 'best_guess') # Traffic Model\n\n total_time += dist_matrix['rows'][0]['elements'][0]['duration']['value']\n\n return total_time", "title": "" }, { "docid": "5c7e774f346934c079508992fe7c244d", "score": "0.56520855", "text": "def durations_from_router(self, router, **kwargs):\n d = []\n counter = 0\n for e in self.tri.edges:\n route = router.directions(locations=self.points[e,:], **kwargs)\n self.snapped_points[e[0]] = np.array(route.raw['waypoints'][0]['location'])\n self.snapped_points[e[1]] = np.array(route.raw['waypoints'][-1]['location'])\n d.append(route.duration/60)\n counter += 1\n print(\"{} of {} edges calculated.\".format(counter, self.tri.edges.shape[0]))\n\n self.fill_distances(np.array(d))", "title": "" }, { "docid": "194cac3a37bb6df366daeb2123294458", "score": "0.56344914", "text": "def cpuTotalTime():", "title": "" }, { "docid": "ab6b1bf8f6661d7cc7940348c53e5e39", "score": "0.56342477", "text": "def time(self) -> float:\n\t\treturn self.BaseTime + self.Loop.time()", "title": "" }, { "docid": "fb2467db9baa5f1f23fc1038626410db", "score": "0.56322324", "text": "def speed(self):\n vx, vy = self.velocity\n return np.sqrt(vx**2 + vy**2)", "title": "" }, { "docid": "20adae8b7ab79ae5ba584a173456d5f9", "score": "0.56288415", "text": "def calculate_delay(routes, sensor, time):\n global G, route_subgraphs, gateways_per_route, dsp_memo # inputs\n\n global error\n\n import sys\n waiting_time = None\n shortest_distance, shortest_path = sys.float_info.max, None # to any gateway\n\n gateway_exists = False\n paths = 0\n\n for r in routes:\n for gateway in gateways_per_route[r]:\n \n gateway_exists = True\n\n g = route_subgraphs[r].copy()\n\n wait_time = None\n\n try:\n distance, path = dsp_memo.getDsp(g, r, sensor.name, namify_stop(G.name, gateway))\n #distance, path = nx.single_source_dijkstra(g, sensor.name, namify_stop(G.name, gateway), weight='length')\n except Exception as e:\n continue\n \n\n while len(path) > 1:\n '''\n make sure then you limit duration to 24 hours. later if time is greater than 24\n message is not delivered\n '''\n # TODO:: error rate too high.. fix it.\n paths += 1\n #print(path)\n departure_list = g[sensor.name][path[1]][0]['departure_time'].get(r, None)\n\n #print(departure_list)\n if departure_list == None:\n # print(\"no departure time found\")\n break\n #g.remove_node(path[1])\n #continue\n\n else:\n wait_time = get_time_to_next_departure(current_time=time, departure_list=departure_list)\n break\n\n\n if wait_time != None:\n\n if distance + wait_time < shortest_distance:\n shortest_distance, shortest_path = distance + wait_time, path\n waiting_time = wait_time\n #break\n \n \n\n if waiting_time == None:\n shortest_distance = None\n \n #If a gateway does exist, but no delivery due to no cycle on the route\n # path==0 leads to indegree better performance than\n if gateway_exists== True and paths==0:\n error +=1\n return\n\n \n sensor.gen_times.append(time) # in sec\n sensor.msg_latencies.append(shortest_distance) # in sec\n sensor.waiting_time.append(waiting_time)\n sensor.hops.append(shortest_path)\n \n return waiting_time", "title": "" }, { "docid": "01143212845a2d7f7edd46d88a09fe6f", "score": "0.56280637", "text": "def _get_negotiated_port_speed(self):\n return self.__negotiated_port_speed", "title": "" }, { "docid": "8db8d120904a305616d933cbed2b8755", "score": "0.5627684", "text": "def __init__(self):\n self.routingTable = {} # maps ports to latency and hosts\n self.distanceVector = {} # maps hosts to latency and first hop\n self.neighbors = {} # maps ports to latency\n self.directHosts = {}\n self.start_timer() # Starts calling handle_timer() at correct rate", "title": "" }, { "docid": "8c7d95e6adf62a6cd54fdbc7ed9f5532", "score": "0.56266624", "text": "def update(self):\n lat_a = self.lat\n lon_a = self.lon\n new_time = datetime.now()\n dur = (new_time - self.datetime).total_seconds()\n lat_b = lat_a + (self.speed * (dur/3600.0)\n * math.cos(math.radians(self.course))\n / 60.0)\n lat_m = (lat_a + lat_b)/2\n lon_b = lon_a + (self.speed * (dur/3600.0)\n * math.sin(math.radians(self.course))\n / math.cos(math.radians(lat_m)) / 60.0)\n self.lat = lat_b\n self.lon = lon_b\n self.datetime = new_time", "title": "" }, { "docid": "98d3f1c8d2745354925aeba209c04832", "score": "0.5625444", "text": "def update_timestep_simple(self):\n Connector = self._Connector\n volumeG1 = self._volumeG1\n volumeG2 = self._volumeG2\n\n tMin = 1e200\n # Treat G1:\n for c in Connector.connections:\n dt = volumeG1[c.i1] / c.exchangeFactor\n if dt < tMin:\n tMin = dt\n # Treat G2:\n for vertex in Connector.vertexG2ToVerticesG1.keys():\n exchangeFactor = 0.0\n for neighbor in Connector.vertexG2ToVerticesG1[vertex]:\n exchangeFactor = exchangeFactor + \\\n Connector.connections[neighbor].exchangeFactor \n dt = volumeG2[vertex] / exchangeFactor\n if dt < tMin:\n tMin = dt\n \n self.dt = tMin", "title": "" }, { "docid": "e16dcfbc7db994d67170c7ea28862d90", "score": "0.5621079", "text": "def metro_time(i, j, metro, **kwargs):\n # change speed based on type of transit\n dist = metro.loc[i,'geometry'].distance(metro.loc[j,'geometry'])\n\n if \"speed\" in kwargs.keys():\n speed = kwargs[\"speed\"]\n return dist * speed[metro.loc[i,\"Type\"]]\n else:\n return dist", "title": "" }, { "docid": "edb409d8b70f7caa26dd95cbbe66627d", "score": "0.5620898", "text": "def calc_timegap(distx, speed):\n if speed > 1.0:\n timegap = distx / speed\n else:\n timegap = 10000\n return timegap", "title": "" }, { "docid": "c52683f5adbf86c2d9b30f57e5c8b5f9", "score": "0.5620636", "text": "def distanceTraveled(self):\n totaltime = 0\n totaldist = 0\n\n lastp = TimedPosition(self.timeBegin, time.time(), self.lastRegisteredSpeed)\n self.log.append(lastp) #The non complete TimedPostion that could miss in the distance assertion is added to the array\n\n for tp in self.log:\n timeElpased = tp.timeEnd - tp.timeBegin\n totaltime += timeElpased\n totaldist += (tp.speed * 0.0277) * timeElpased\n \n self.log.pop() #The non complete TimedPosition is deleted from the array\n\n return [totaldist, totaltime]", "title": "" }, { "docid": "0ffe146a2d52b1e1de4b838eb5e2439b", "score": "0.5619895", "text": "def HCost(p1, p2, speed = 5):\n x1 = p1[0]\n y1 = p1[1]\n z1 = interface.elevations[y1][x1]\n \n x2 = p2[0]\n y2 = p2[1]\n z2 = interface.elevations[y2][x2]\n \n distance = math.sqrt(((x2 - x1)*10.29)**2 + ((y2 - y1)*7.55)**2 + (z2 - z1)**2)\n \n return distance / speed", "title": "" }, { "docid": "bf790f343700e33c599b8e5cbe1d7488", "score": "0.56081283", "text": "def process(self, time_passed):\n \n \n self.brain.think()\n\n if self.speed > 0 and self.location != self.destination:\n\n vec_to_destination = self.destination - self.location\n distance_to_destination = vec_to_destination.get_magnitude()\n self.heading = vec_to_destination\n self.heading.normalise()\n travel_speed = min(distance_to_destination, time_passed * self.speed)\n position = self.heading * travel_speed \n self.location += position", "title": "" }, { "docid": "a645ea52405aac2fd9ca91c45af34a07", "score": "0.56074995", "text": "def ntptime(self):\n addr = socket.getaddrinfo(self.host, 123)[0][-1]\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n try:\n sock.settimeout(0.5)\n query = bytearray(48)\n query[0] = 0x1b\n start = self.nowfloat()\n sock.sendto(query, addr)\n msg = sock.recv(48)\n end = self.nowfloat()\n except OSError as ex:\n print(\"Failed to update time: \", ex)\n return None, None\n finally:\n sock.close()\n (sec, msec) = struct.unpack(\"!II\", msg[40:48])\n\n if sec == 0:\n # if the seconds returned are 0, then we will not\n # accept such a packet\n print(\"BADNTP\")\n return None, None\n\n rtt = (end-start)/2\n msec = msec * NTP_FRACTION + rtt\n if msec > 1:\n sec += 1\n msec -= 1\n\n return (sec - NTP_DELTA, int(msec * 1000))", "title": "" }, { "docid": "89e4245f216f5c7315b2a36d607575fc", "score": "0.55924314", "text": "def speed(self):\n\n return self._speed", "title": "" }, { "docid": "6b5d070225a098cf6a29dade98b8cddf", "score": "0.55923015", "text": "def distance(reindeer, seconds):\n cycle = (reindeer.fly + reindeer.rest)\n full_cycles = seconds // cycle\n remainder = seconds % cycle\n remainder_flying_time = min(remainder, reindeer.fly)\n full_cycle_distance = full_cycles * reindeer.fly * reindeer.speed\n remainder_distance = remainder_flying_time * reindeer.speed\n return full_cycle_distance + remainder_distance", "title": "" }, { "docid": "2272ae96ddf6758441d6d746dec02b9e", "score": "0.55810404", "text": "def drive(self, _distance):\n if connected:\n distance = _distance #cm INPUT\n speed = 160 #constant\n unitSpeed = 11.11 #Units/second CONSTANT\n cmPrUnit = 1.1483 #Constant\n\n unitDist = int(round(distance/cmPrUnit)) #Be careful with int and doubles\n sleepTime = unitDist/unitSpeed * 1.3\n\n set_speed(speed) \n enc_tgt(1,1,unitDist)\n if(_distance > 0):\n fwd()\n else:\n bwd()\n time.sleep(sleepTime)\n return 0", "title": "" }, { "docid": "b186f301ab3b345c17f41ff320856b08", "score": "0.5561445", "text": "def bandwidth():", "title": "" }, { "docid": "1081acef6c7ab63db5305fdf7dba1c53", "score": "0.5550859", "text": "def latency(self) -> float:\n return self._keep_alive.latency", "title": "" }, { "docid": "e46bb260f0b81126ac6fb8829fb7926d", "score": "0.5545357", "text": "def air_speed(self):\n pass", "title": "" }, { "docid": "48d0c690a4f4e0c79b9f28a658047536", "score": "0.55442244", "text": "def distance_cm(self):\n GPIO.output(self.GPIO_TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n start = time.time()\n stop = time.time()\n\n while GPIO.input(self.GPIO_ECHO) == 0:\n start = time.time()\n\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop = time.time()\n\n # Convert to inches:\n return ((stop - start) * 34300)/2", "title": "" }, { "docid": "e8d4ba46ddb97af440fa0b3401026ee3", "score": "0.5540337", "text": "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "title": "" }, { "docid": "a4a9f39f5383cd4c2b6d3dc6b58e7363", "score": "0.55350065", "text": "def getCogSpeed(self):\n result = 6.0\n safezone = self.getSafezoneId()\n result = CTGG.calculateCogSpeed(self.numPlayers, safezone)\n return result", "title": "" }, { "docid": "7fa0408f41d142c3b1df4c33c05739cf", "score": "0.5529188", "text": "def GetSpeed(self):\n return self.speed", "title": "" }, { "docid": "17e5b507e62f48bad00c0feffbb3fd71", "score": "0.55216587", "text": "def calculate_time(self):\n return self.end_time - self.start_time", "title": "" }, { "docid": "df1d8d7923ea9a173775216f585ad21f", "score": "0.5510387", "text": "def reach_times(self) -> List[float]:\n return self._reach_times", "title": "" }, { "docid": "1a88077a6e91c15de4f0ac286cdb1755", "score": "0.5508527", "text": "def average_speed(self):\n # Distance in kms\n distance_kms = self.distance/1000.0\n # Duration in hours\n duration_hrs = self.duration/3600.0\n # Returns average speed in kmph\n return distance_kms/duration_hrs", "title": "" }, { "docid": "286e9dd3d2183c5663a53ef04a91353a", "score": "0.5506505", "text": "def get_total_speed(self):\n base_speed = super().get_total_speed()\n return base_speed / 2", "title": "" }, { "docid": "f2d1ffb31c294396f8fd01184169de9f", "score": "0.5504359", "text": "def get_target_speed(self) -> Optional[int]:", "title": "" }, { "docid": "f01ea28f87724f88d86386754a934ab2", "score": "0.5501712", "text": "def speed_difference(speed_1, speed_2):\n return abs(speed_1 - speed_2)", "title": "" }, { "docid": "69cef329b5655d7718d11c3c6a90f56e", "score": "0.54980093", "text": "def getSpeed(self):\n return self._speed", "title": "" }, { "docid": "80e8f6e776320d05c00fc0a32fe61766", "score": "0.5495716", "text": "def speed(self):\n return self.raw_line.split()[-2+self.index_offset]", "title": "" }, { "docid": "d87fea38619d1c96d21a290cd4a7666a", "score": "0.54914665", "text": "def eval_turnaround_time(self, start_time):\n for event in self.clevents:\n event.wait()\n end_time = start_time\n for key in self.events['gpu'].keys():\n if self.events['gpu'][key][-1].read_end > end_time:\n end_time = self.events['gpu'][key][-1].read_end\n for key in self.events['cpu'].keys():\n if self.events['cpu'][key][-1].read > end_time:\n end_time = self.events['cpu'][key][-1].read_end\n # print end_time, start_time\n return (end_time - start_time).total_seconds()", "title": "" }, { "docid": "b763ecc9102e8314b60f27f51b1c92f6", "score": "0.5483007", "text": "def get_host_latency (host_url) :\n\n try :\n\n # FIXME see comments to #62bebc9 -- this breaks for some cases, or is at\n # least annoying. Thus we disable latency checking for the time being,\n # and return a constant assumed latency of 250ms (which approximately \n # represents a random WAN link).\n return 0.25\n\n\n global _latencies\n\n if host_url in _latencies :\n return _latencies[host_url]\n\n u = saga.Url (host_url)\n\n if u.host : host = u.host\n else : host = 'localhost'\n if u.port : port = u.port\n else : port = 22 # FIXME: we should guess by protocol \n\n import socket\n import time\n\n # ensure host is valid\n ip = socket.gethostbyname (host)\n\n start = time.time ()\n\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect ((host, port))\n s.shutdown (socket.SHUT_RDWR)\n\n stop = time.time ()\n\n latency = stop - start\n\n _latencies[host_url] = latency\n\n return latency\n\n except :\n\n raise", "title": "" }, { "docid": "b357752fe1431f6bf34bbe38a3cfbaaf", "score": "0.54800695", "text": "def at_server_connection(self):\n self.connection_time = time.time()", "title": "" }, { "docid": "ed6a17f9136832750fdc35d4f7725bff", "score": "0.54797536", "text": "def __init__(self):\n self.start_timer() # Starts calling handle_timer() at correct rate\n self.dv_table = {} # {dst :[cost,port,last_updated_time]}\n self.port_table = {} #{port: latency}", "title": "" }, { "docid": "fc8d66f383962f33540ad7ca7b0a6f63", "score": "0.54786444", "text": "def latencyTimeCalc(self):\n output = []\n index = 0\n for chunk in self.chunks:\n prevTime = 0\n for line in chunk:\n data = [x.strip() for x in line.split(',')]\n if data[1].isnumeric():\n userTimestamp = int(data[1])\n else:\n continue\n if prevTime == 0:\n prevTime = userTimestamp\n continue\n density = userTimestamp - prevTime\n prevTime = userTimestamp\n output.append(density)\n index += 1\n return output", "title": "" } ]
ca961428184200e1478e260d1fffe2fb
Partial pipeline for preprocessing raw doc 1. tokenize using wordbreak 2. remove punctuation, whitespace 3. lowercase
[ { "docid": "af00f352315e097ec49060a2ad363845", "score": "0.75026083", "text": "def _doc_pre_pipeline(self, doc):\n\n for token in words(doc):\n token = token.translate(self._pw_remove_table)\n token = token.lower()\n if not token:\n continue\n yield token", "title": "" } ]
[ { "docid": "acb41e359bfd7490763ec6f40b44b52b", "score": "0.72116965", "text": "def preprocess(self, text):\n # Split by white space\n text = text.lower()\n #words = text.split()\n\n # Do preprocessing by removing replacing tokens with empty strings\n # or changing tokens.\n # The original spans will be maintained\n\n #text = ' '.join(words)\n return text", "title": "" }, { "docid": "9f579b858cc6763d0f97d4260c1f27ec", "score": "0.693714", "text": "def tokenize(doc):\n return (tok.lower() for tok in re.findall(r\"\\w+\", doc))", "title": "" }, { "docid": "028f7f626d9dd01193377159aa4e6a6b", "score": "0.6933351", "text": "def pre_processing(data):\n sw = stopwords.words(\"english\")\n # lowercase text\n data = data.apply(lambda x: \" \".join(i.lower() for i in str(x).split())) \n data = data.apply(lambda x: \" \".join(i for i in x.split() if i not in sw))\n data = data.apply(lambda x: re.sub(\"⇓\",\"\",x))\n \n return data", "title": "" }, { "docid": "41e56881863ca2d68a5cd50cadb0d9e4", "score": "0.6838783", "text": "def preprocessing(data):\n tokenizer = RegexpTokenizer(r'\\w+')\n stop_words = set(stopwords.words('english'))\n txt = data.str.lower().str.cat(sep=' ') #1\n words = tokenizer.tokenize(txt) #2\n words = [w for w in words if not w in stop_words] #3\n words = [st.stem(w) for w in words] #4\n return words", "title": "" }, { "docid": "8d0bc7d7e617903544e6c6bd8d674f02", "score": "0.6828423", "text": "def _preprocess(doc, accent_function=None, lower=False):\n if lower:\n doc = doc.lower()\n if accent_function is not None:\n doc = accent_function(doc)\n return doc", "title": "" }, { "docid": "a10f602bc9ddbb931978d5498089ca48", "score": "0.67959034", "text": "def _tokenizer(self):\n pattern = re.compile('(?u)\\\\b\\\\w\\\\w+\\\\b')\n return lambda doc: pattern.findall(doc)", "title": "" }, { "docid": "0e3e8a0327d2bfb3282fddcac2fb0064", "score": "0.6648407", "text": "def _preprocess(self, text):\n text = self.__html_parser(text)\n text = self.__remove_punctuation(text)\n text = self.__lower(text)\n\n tokenizer = RegexpTokenizer(r\"\\w+\")\n seq = self.__tokenize(\n text, tokenizer\n ) # convert text to seq to remove stopwords and stem\n\n seq = self.__remove_stop_words(seq, self.stopwords)\n stemmer = PorterStemmer()\n text = self.__stem(seq, stemmer) # here text is a string\n return text", "title": "" }, { "docid": "c47b0944af694831a83dc00d9e6d4e99", "score": "0.6574876", "text": "def preprocess(self):\n self.clean()\n self.words = nltk.word_tokenize(self.sample)\n self.normalize()\n return ' '.join(self.words)", "title": "" }, { "docid": "29833c1e732926c05b2b07f780a9f3d4", "score": "0.65543836", "text": "def tokenize(self, presc_text):\n return", "title": "" }, { "docid": "04b502803da3ac2bc752df2be643bdb2", "score": "0.6543075", "text": "def tokenize(document):\n return re.findall('\\w+', document.lower())", "title": "" }, { "docid": "47eb58de73ece2ff3ab91642b5a212bf", "score": "0.65169823", "text": "def normalize_one_doc(text):\n try:\n tokens = [word for word in word_tokenize(text) if word.isalpha()]\n tokens = list(filter(lambda t: t not in punctuations, tokens))\n tokens = list(filter(lambda t: t.lower() not in stop_words, tokens))\n filtered_tokens = []\n for token in tokens:\n if re.search(\"[a-zA-Z]\", token):\n filtered_tokens.append(token)\n filtered_tokens = list(map(lambda token: wnl.lemmatize(token.lower()), filtered_tokens))\n filtered_tokens = list(filter(lambda t: t not in punctuations, filtered_tokens))\n return filtered_tokens\n except Exception as e:\n raise e", "title": "" }, { "docid": "08bd9be37d3a140dcf30cd0ee8cf3554", "score": "0.6507603", "text": "def preprocessing(self, text):\n # Lower case\n text = text.lower()\n\n # Regular expression for finding contractions\n contractions_re=re.compile('(%s)' % '|'.join(self.contractions_dict.keys()))\n\n #Expand contractions\n text = self.expand_contractions(text,contractions_re)\n text = self.clean_text(text)\n\n #Remove added spaces\n text = re.sub(\" +\",\" \",text)\n text = text.strip()\n\n #Stop words and Lemmatizing\n text = ' '.join([token.lemma_ for token in list(self.nlp(text)) if (token.is_stop==False)])\n\n return text", "title": "" }, { "docid": "ef8f32bfeb5741f559a6c1f4a8b6bd6c", "score": "0.6506654", "text": "def preprocess(self, x):\n if (six.PY2 and isinstance(x, six.string_types) and not\n isinstance(x, six.text_type)):\n x = Pipeline(lambda s: six.text_type(s, encoding='utf-8'))(x)\n # will strip and then split here!\n if self.sequential and isinstance(x, six.text_type):\n x = self.tokenize(x.rstrip('\\n'))\n if self.lower:\n x = Pipeline(six.text_type.lower)(x)\n if self.preprocessing is not None:\n return self.preprocessing(x)\n else:\n return x", "title": "" }, { "docid": "1c94236c80e23f8e0dd4fc8801290de7", "score": "0.6486636", "text": "def text_preproc(input_text):\n sentences = input_text.lower().split('.')\n sentences_splitted = list(map(lambda x: x.split(), sentences))\n\n def strip_words(list_input):\n return list(map(lambda x: x.strip('<>():,.;!?'), list_input))\n\n sentences_stripped = list(map(lambda x: strip_words(x),\n sentences_splitted))\n return sentences_stripped", "title": "" }, { "docid": "342ddcfda855adce501c40e8113fb9b0", "score": "0.6476344", "text": "def LowercaseFilter(tokens):\r\n \r\n for t in tokens:\r\n t.text = t.text.lower()\r\n yield t", "title": "" }, { "docid": "1dad19393ee12f35ad9e39e2c41c6d38", "score": "0.6458969", "text": "def preprocess(field):\n # remove weird code segment and website link\n cleaned_field = re.sub('//<!\\\\[CDATA\\\\[.*?//\\\\]\\\\]>', '', field, flags=re.DOTALL)\n cleaned_field = re.sub('<.*?>', '', cleaned_field, flags=re.DOTALL)\n\n # remove all escape characters\n escape_chars = ['\\xa0', '\\n', '\\t', '\\r', '\\'']\n for escape_char in escape_chars:\n cleaned_field = cleaned_field.replace(escape_char, ' ')\n\n # apply case_folding, sent_tokenize and word_tokenize\n words_by_sent = [word_tokenize(t) for t in sent_tokenize(cleaned_field.lower())]\n\n # for each word, remove prefix number and suffix punctuation and then apply stemmer\n cleaned_field = []\n for words in words_by_sent:\n for word in words:\n if word not in string.punctuation:\n word = remove_prefix_num(word)\n word = remove_attached_punctuation(word)\n word = stemmer.stem(word)\n if word not in string.punctuation:\n cleaned_field.append(word)\n\n return cleaned_field", "title": "" }, { "docid": "d35ef6a2126522693fa3c944316c2444", "score": "0.64134294", "text": "def preprocessing(text, nlp, gpu_enable=False) -> str:\n if gpu_enable:\n spacy.require_gpu()\n text = str(text)\n text = text.replace('\\\\n', ' ')\n tokens = nlp(text)\n tokens = [token.lemma_ for token in tokens if (\n token.is_stop == False and \\\n token.is_punct == False and \\\n token.lemma_.strip()!= '')]\n cleaned = ' '.join([word.lower() for word in tokens if word.isalpha() and len(word)>2])\n return cleaned", "title": "" }, { "docid": "d420390a04c67cc8fe896a4280d738a8", "score": "0.64049053", "text": "def simple_tokenize(doc):\n return [\n re.sub(r\"[^a-z0-9]\", \"\", t.lemma_.lower()).strip() for t in nlp(doc)\n if (t.text.strip() and\n not t.is_stop and\n not t.is_punct and\n not t._.is_profane)\n ]", "title": "" }, { "docid": "8520771a3d3dc8d761abcabd26d1a0b3", "score": "0.6387224", "text": "def preprocess(self):\n processed_tokens = [[item.lower() for item in each_token] for\n each_token in self.tokens]\n processed_tokens = [[''.join(c for c in s if c not in\n string.punctuation) for s in y] for y in\n processed_tokens]\n processed_tokens = [[s for s in x if s] for x in processed_tokens]\n return processed_tokens", "title": "" }, { "docid": "bdfdd5663ab0b3cca6a14fea6aa04ba2", "score": "0.6380246", "text": "def tokenize(document,lower=False):\n words = [word for word in document.split()]\n if lower:\n words = [lower(word) for word in words]\n re_punc = re.compile(f'[{string.punctuation}]')\n words = [re_punc.sub(\"\",word) for word in words]\n stopwords_set = set(stopwords.words(\"english\"))\n words = [word for word in words if not word in stopwords_set]\n words = [word for word in words if len(word) > 1]\n return words", "title": "" }, { "docid": "a3b58cef207ffb33433ba4a7533f23c8", "score": "0.63562125", "text": "def sentence_preprocess(self, words: List[str]) -> List[str]:\n raise Exception(\"Don't call me, call my subclasses\")", "title": "" }, { "docid": "6c9544fa957d5abe93b95b4e0be9131f", "score": "0.63226354", "text": "def preprocess(self, text, tag=True):\r\n tokenizer = RegexpTokenizer(\"[a-zA-Z'`]+\")\r\n words = tokenizer.tokenize(text)\r\n\r\n for sw in stop_words.intersection(words):\r\n while sw in words:\r\n words.remove(sw)\r\n if tag:\r\n words = pos_tag(words)\r\n return words", "title": "" }, { "docid": "218156dd41d9f5346db6eb309239a126", "score": "0.6319877", "text": "def _preprocess(sentences, preprocess_pipeline, word_tokenize=None):\n if preprocess_pipeline is not None:\n for function in preprocess_pipeline:\n sentences = function(sentences)\n\n if word_tokenize is None:\n return sentences\n else:\n return sentences, [word_tokenize(sentence) for sentence in sentences]", "title": "" }, { "docid": "6ec4602166457f730d730714b0bbc63c", "score": "0.6309391", "text": "def tokenize(self, document):\n return [t.lower() for t in re.findall(r\"\\w+(?:[-']\\w+)*\", document)]", "title": "" }, { "docid": "c885508137e12119f4cf30f5f605fc73", "score": "0.6237851", "text": "def fasttext_preprocess(texts: List[str]) -> List[str]:\n nlp = English()\n tokenizer = nlp.Defaults.create_tokenizer(nlp)\n\n processed_texts = []\n for doc in tokenizer.pipe(texts, batch_size=500):\n processed_texts.append(\" \".join(tok.lower_ for tok in doc if tok.is_alpha))\n return processed_texts", "title": "" }, { "docid": "cdd9fbde164ff39d2e44b47599687849", "score": "0.6232197", "text": "def __tokenize(self,text):\n text = text.lower()\n text = re.sub('/\\W/g',' ',text)\n text = re.sub('/\\s+/g',' ',text)\n text = text.strip()\n text = text.split(' ')\n text = self.__unique(text)\n return text", "title": "" }, { "docid": "a7f893cf77dd089b880cb7172be5be6d", "score": "0.62293476", "text": "def preprocess(tokens):\n tokens = [t for t in tokens if t not in PUNCTUATION and t != '@card@']\n tokens = [LEMMATIZER.lemmatize(t) for t in tokens]\n tokens = list(filter(lambda x: x not in STOP_WORDS, tokens))\n tokens = [t.lower() for t in tokens]\n\n return(tokens)", "title": "" }, { "docid": "f0385d2e8c53f1e575b687766c5cee76", "score": "0.6222712", "text": "def rough_pre_words(self, sent, result):\n sent_bk = deepcopy(sent)\n\n '''rm (xx)'''\n sent = re.sub(r'\\(.*?\\)|#', ' ', sent.lower())\n\n '''expand abbreviations'''\n for abbr in self.abbrs:\n if re.search(abbr, sent):\n sent = re.sub(abbr, self.abbrs[abbr], sent)\n # to check if needed\n abbrs = {\" n't\": \" not\", \"'s\": \"\", \"'d\": \"\"}\n for abbr in abbrs:\n sent = re.sub(abbr, abbrs[abbr], sent)\n\n '''time'''\n sent = re.sub(r'\\d+:\\d+:\\d+', 'time', sent)\n sent = re.sub(r'(\\w{2,}\\.)+\\w{2,}', '', sent) # Java, except e.g. etc.\n sent = re.sub(r' \\-?\\d+[\\-\\.\\d]* ', ' # ', sent) # number\n sent = re.sub(r'^\\-?\\d+[\\-\\.\\d]* ', '# ', sent)\n sent = re.sub(r' \\-?\\d+[\\-\\.\\d]*$', ' #', sent)\n sent = sent.replace('-', '_')\n sent = re.sub(r'(\\w+[\\\\/])+\\w*|\\.\\w+', '', sent)\n sent = re.sub(r'[^,\\.:;_#\\w ]', ' ', sent)\n\n '''remove stop words'''\n stop_words = ['the', 'being', 'been', 'to', 'and', 'of', 'use', 'in', 'for', 'it', 'will', 'a', 'an', 'some',\n 'I', 'they', 'ourselves', 'hers', \\\n 'this', 'that', 'with', 'not', 'on', 'from', 'there', 'their', 'very', 'he', 'own', 'its',\n 'itself', 'me', \\\n 'any', 'may', 'all', 'do', 'theirs', 'themselves', 'his', 'himself', 'herself', 'him', \\\n 'new', 'these', 'those', 'at', 'same', 'also']\n tokens = nltk.word_tokenize(sent)\n tmp = list()\n for word in tokens:\n if word not in stop_words:\n if word in ['id', 'ID']:\n tmp.append('identifier')\n else:\n tmp.append(word)\n\n '''lemmatization'''\n nlps = spacy.load('en_core_web_sm')\n n_sent = nlps(sent)\n sent = \" \".join([token.lemma_ for token in n_sent])\n sent = re.sub(r'^_ | _ | _$', ' ', sent)\n sent = re.sub(r'^[^\\w#]+|[^\\w#]+$', '', sent)\n\n ''' skip too short sentence '''\n puncs = string.punctuation\n pure_sent = list()\n for word in sent.split():\n if word not in puncs:\n pure_sent.append(word)\n if len(pure_sent) < 2:\n return\n\n result.append([sent_bk, sent])", "title": "" }, { "docid": "3f6f278d8469395f8afcdc8f7cff00df", "score": "0.621555", "text": "def TxtPreprocess(txt):\n\n # Lowercase, remove \\t and new line.\n txt = re.sub(r'[\\t\\n]', ' ', txt.lower())\n\n # Remove punctuation before space.\n txt = re.sub(r'[,.\\?!]+ ', ' ', txt)\n\n # Remove punctuation before end.\n txt = re.sub(r'[,.\\?!]+$', ' ', txt)\n\n # Remove punctuation after space.\n txt = re.sub(r' [,.\\?!]+', ' ', txt)\n\n # Remove quotes, [, ], ( and ).\n txt = re.sub(r'[\"\\(\\)\\[\\]]', '', txt)\n\n # Remove extra space.\n txt = re.sub(' +', ' ', txt.strip())\n\n return txt", "title": "" }, { "docid": "cfd237f32f3fba3c9a0e8f0388632363", "score": "0.6196064", "text": "def pre_process(raw_text):\n # remove the space and\n word_lists = re.split(r'\\s+', raw_text.strip())\n\n sent_index = word_lists[0]\n word_lists = word_lists[1:]\n sent_content = ''.join(word_lists)\n return sent_index, sent_content", "title": "" }, { "docid": "dfefd01892b5b271f2e179b9dddb8065", "score": "0.6185027", "text": "def text_preprocessing(sentences):\n\n stop_words = set(stopwords.words('english'))\n\n clean_words = None\n for sent in sentences:\n words = word_tokenize(sent)\n #words = [PorterStemmer.stem(word.lower()) for word in words if word.isalnum()]\n words = [ps.stem(word.lower()) for word in words if word.isalnum()]\n clean_words = [word for word in words if word not in stop_words]\n\n return clean_words", "title": "" }, { "docid": "4847125afab7e09bc309b654dd781c42", "score": "0.6182697", "text": "def tokenize(document):\n # Tokenization\n words = nltk.word_tokenize(document)\n \n # STEP 1: Convert incoming words to lowercase\n # STEP 2: Exclude the stopwords\n # STEP 3: Remove all punctuations, except for words CONTAINING punctuation\n return [word.lower() for word in words if not all(char in string.punctuation for char in word) and word not in nltk.corpus.stopwords.words('english')]", "title": "" }, { "docid": "1e88fc64d37246aa27eb83d632aa5e90", "score": "0.61687154", "text": "def preprocess(document):\n string = '''!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏.'''\n text = re.sub(string, \"\", document)\n document = re.sub(r'@[\\w_-]+', '', document)\n document = re.sub(r'-', ' ', document)\n document = re.sub('https?://[^ ]+', '', document)\n document = ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t]) | (\\w +:\\ / \\ / \\S +)\", \" \", document).split())\n document = re.sub(r'<[^>]*>', '', document)\n document = re.sub(r'\\[(.+)\\][(].+[)]', r'\\1', document)\n document = document.lower()\n return document", "title": "" }, { "docid": "e0802b80935f39ff592f49172483f936", "score": "0.6160165", "text": "def __init__(self,pre_process=\"\",post_process=\"\",reverse=False):\n self.pre_processor = self.pre_processors[pre_process]()\n self.post_processor = self.post_processors[post_process]()\n self.reverse = reverse\n self.re_word_split = re.compile(r\"(-?\\d+(?:[.,]\\d+)*|(?<=\\w)['’]\\w+|\\w+(?:['’]t)?)\")", "title": "" }, { "docid": "59ad59d3cd385cd71c404b33ec89164d", "score": "0.6147588", "text": "def tokenize(document):\n\n # Get set of stop words\n stop_words = set(stopwords.words('english'))\n\n # Convert document to lower case and tokenize into words\n document = document.lower()\n words = nltk.word_tokenize(document)\n\n # Keep track of processed words\n processed_words = []\n\n # Loop through words, and remove all punctuation\n for word in words:\n if word not in stop_words:\n flag = 0\n for char in word:\n if char not in string.punctuation:\n flag = 1\n if flag == 1:\n processed_words.append(word)\n\n # Return processed words\n return processed_words", "title": "" }, { "docid": "1333a5a3a4b51a988bb1d6c6eb1a480f", "score": "0.61379856", "text": "def tokenize(self, document):\n\t\t# Break the document into sentences\n\t\tfor sent in sent_tokenize(document):\n\t\t\t# Break the sentence into part of speech tagged tokens\n\t\t\tfor token, tag in pos_tag(wordpunct_tokenize(sent)):\n\t\t\t\t# Apply preprocessing to the token\n\t\t\t\ttoken = token.lower() if self.lower else token\n\t\t\t\ttoken = token.strip() if self.strip else token\n\t\t\t\ttoken = token.strip('_') if self.strip else token\n\t\t\t\ttoken = token.strip('*') if self.strip else token\n\n\t\t\t\t# If punctuation or stopword, ignore token and continue\n\t\t\t\tif token in self.stopwords or all(char in self.punct for char in token):\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Lemmatize the token and yield\n\t\t\t\tlemma = self.lemmatize(token, tag)\n\t\t\t\t# print(lemma)\n\t\t\t\tyield lemma", "title": "" }, { "docid": "3025307d867b69589e8c75ad5439b5f2", "score": "0.6124529", "text": "def __tokenize(self, doc):\n global STEMMER, STOPWORDS\n no_punc = self.__remov_punc(doc)\n tokens = word_tokenize(no_punc.lower())\n lemmas_no_stop = [STEMMER.stem(word) for word in tokens\n if word not in STOPWORDS]\n return lemmas_no_stop", "title": "" }, { "docid": "9965097147725b5b92f828f42062d9a4", "score": "0.61183876", "text": "def tokenize(document):\n words = word_tokenize(document) # print(len(words))\n stop_words = set(stopwords.words('english'))\n\n filtered = []\n\n for word in words:\n word = word.lower()\n if word not in string.punctuation:\n if word not in stop_words:\n filtered.append(word)\n\n return filtered", "title": "" }, { "docid": "bc3b47305f1e45d3dd01a46c321586b3", "score": "0.6115384", "text": "def tokenizer(txt):\n tokens = nlp(txt)\n filters = []\n \n for word in tokens:\n if not word.is_stop:\n lemma = word.lemma_.lower().strip()\n if re.search('^[a-zA-Z]+$',lemma):\n filters.append(lemma)\n \n return \" \".join(filters)", "title": "" }, { "docid": "7c4f4112914697a34a804d7ed49b6e46", "score": "0.61148095", "text": "def basic_tokenizer(line, normalize_digits=True):\n line = re.sub('<u>', '', line)\n line = re.sub('</u>', '', line)\n line = re.sub('\\[', '', line)\n line = re.sub('\\]', '', line)\n words = []\n _WORD_SPLIT = re.compile(r\"([.,!?\\\"'-<>:;)(])\")\n _DIGIT_RE = re.compile(r\"\\d\")\n \n for ltr in l3:\n line = line.replace(ltr,' ')\n \n for tbr,r in zip(l1,l2):\n line = line.replace(tbr,r)\n \n for fragment in line.strip().lower().split():\n for token in re.split(_WORD_SPLIT, fragment):\n if not token:\n continue\n if normalize_digits:\n token = re.sub(_DIGIT_RE, '#', token)\n if token in l4:\n token = tag_name\n words.append(token)\n return words", "title": "" }, { "docid": "3c16af6faca4359e5485266a5a355c84", "score": "0.61116636", "text": "def preprocess_sentence(text):\n\n text = text.lower()\n text = text.strip()\n return text", "title": "" }, { "docid": "843df12455a802275def99b56bf7eaaf", "score": "0.6109499", "text": "def tokenize(in_file):\n if not isfile(in_file):\n raise RuntimeError(f\"Can't open {in_file}\")\n\n with open(in_file) as fp:\n for line in fp:\n for word in re.sub(r'[^a-z0-9_]', ' ', line).split():\n yield word.lower()", "title": "" }, { "docid": "745f5ad5cc7929742312d3153736a506", "score": "0.6108461", "text": "def prepare_corpus(corpus):\n # 1. Tokenize\n tokenized = nltk.word_tokenize(corpus)\n # 2. Remove Punctuation\n no_punctuation = [token for token in tokenized if token.isalnum()]\n # 3. Remove Stopwords\n stop_words = list(sw)\n no_stop = [word for word in no_punctuation if word not in stop_words]\n # 4. Make Lower Case\n lower_case = []\n for letter in no_stop:\n lower_case.append(letter.lower())\n return lower_case", "title": "" }, { "docid": "3992b19c29b7897d398a71ed341f84df", "score": "0.61066794", "text": "def preprocess(self):\r\n if self.remove_patterns is not None:\r\n combined_pattern = r''\r\n for pattern in self.remove_patterns:\r\n combined_pattern = combined_pattern + f'{pattern}|'\r\n self.raw_text = [re.sub(combined_pattern,'',string) for string in self.raw_text] #remove all specified regex patterns\r\n\r\n self.raw_text = [re.sub(r' {2,}',' ',string) for string in self.raw_text] #replace spaces 2 or bigger with a single space \r\n self.get_acronyms() #save acronyms\r\n self.raw_text = [re.sub(r'\\(.*?\\)','',string,flags=re.DOTALL) for string in self.raw_text] #remove all text within parentheses\r", "title": "" }, { "docid": "df6988d049d0b986af61971daf444efc", "score": "0.6102858", "text": "def __review_sentence_parse(self, doc):\n global SPLIT_SENTENCES\n if self.phrases is None:\n for sentence in SPLIT_SENTENCES.split(doc):\n yield self.__tokenize(sentence)\n else:\n for sentence in SPLIT_SENTENCES.split(doc):\n yield self.phrases[self.__tokenize(sentence)]", "title": "" }, { "docid": "30fd3f885fa8b75c056d4599bb8045b4", "score": "0.60794896", "text": "def preprocessText(self, sent):\n sent = nltk.word_tokenize(sent)\n sent = nltk.pos_tag(sent)\n return sent", "title": "" }, { "docid": "119d95fea19fb24a5240f1429ebca52c", "score": "0.60782015", "text": "def tokenize(document):\n token = nltk.word_tokenize(document)\n filtered = []\n for word in token:\n if word.lower() in nltk.corpus.stopwords.words('english'):\n continue\n elif word[0] in string.punctuation:\n continue\n else:\n filtered.append(word.lower())\n return filtered", "title": "" }, { "docid": "63692e84de3306f5aa63156f2d3d3715", "score": "0.6045194", "text": "def normalizeText(text):\n# s = s.lower()\n# s = re.sub('\\s\\W',' ',s) # hyphens, apostrophes\n# s = re.sub('\\W\\s',' ',s)\n# s = re.sub('\\s+',' ',s) # double spaces\n tokens = nltk.word_tokenize(str(text).lower())\n return ' '.join(tokens)", "title": "" }, { "docid": "8614f249a725710a0d34a878ee15f952", "score": "0.6042588", "text": "def _build_tokenizer(self):\n token_pattern = re.compile(self.token_pattern)\n if self.lowercase:\n return lambda doc: token_pattern.findall(doc.lower())\n else:\n return lambda doc: token_pattern.findall(doc)", "title": "" }, { "docid": "cb8fa25a177974be99938c8d98924af4", "score": "0.6042021", "text": "def preprocess(data):\n from nltk.stem import WordNetLemmatizer\n import re\n from unidecode import unidecode\n lemmatizer = WordNetLemmatizer()\n\n data = ' '.join(data)\n data = data.lower() # Lower - Casing\n data = data.replace('-', ' ') # Removing Hyphen\n words = []\n for word in data.split():\n word = re.sub(\"[0-9]\", \" \", word) # removing numbers,punctuations and special characters\n word = re.sub((r'\\b(oz|ounc|ounce|pound|lb|inch|inches|kg|to)\\b'), ' ', word) # Removing Units\n if len(word) <= 2: continue # Removing words with less than two characters\n word = unidecode(word) # Removing accents\n word = lemmatizer.lemmatize(word) # Lemmatize\n if len(word) > 0: words.append(word)\n return ' '.join(words)", "title": "" }, { "docid": "5d92eb0ecab1e8d3c4290f6f189cb78f", "score": "0.6041289", "text": "def tokenizer(sentence):\n return [w.text.lower() for w in nlp(cleaner(sentence))]", "title": "" }, { "docid": "0fb6701e0dfed159d9b5a75a5350029c", "score": "0.6039358", "text": "def preprocess(sentReview):\n\n\t# Simple tokens, de-accent and lowercase processor\n\ttokens = []\n\tfor i in range(len(sentReview)):\n\t\ttokens.append(gensim.utils.simple_preprocess(sentReview[i], deacc=True, min_len=3))\n\n\tfiltered = []\n\n\t# POS Tagging and filtering sentences\n\tfor i in range(len(sentReview)):\n\t\tdoc = nlp(force_unicode(sentReview[i]))\n\t\tb = []\n\t\tfor tok in doc:\n\t\t\tif tok.is_stop != True and tok.pos_ != 'SYM' and tok.tag_ != 'PRP' and tok.tag_ != 'PRP$' and tok.pos_ != 'NUM' and tok.dep_ != 'aux' and tok.dep_ != 'prep' and tok.dep_ != 'det' and tok.dep_ != 'cc' and len(tok) != 1:\n\t\t\t\tb.append(tok.lemma_)\n\t\tfiltered.append(b)\n\n\treturn tokens, filtered", "title": "" }, { "docid": "29f940fc08c6b730b03bddee9604ce43", "score": "0.6030594", "text": "def tokenize(doc, *, stopwords=None):\n text = doc.read()\n\n tokens = utils.simple_preprocess(text, deacc=True)\n if stopwords is not None:\n tokens = [t for t in tokens if t not in stopwords]\n\n return tokens", "title": "" }, { "docid": "eb204282fc79cd5d5ca9ef6226a658d0", "score": "0.60164773", "text": "def tokenize(self, text):\n pass", "title": "" }, { "docid": "ba9665f7cf3ca67d12d679050b760812", "score": "0.6012135", "text": "def clean_and_tokenize_docs(docs):\n return [clean_and_tokenize(doc) for doc in docs]", "title": "" }, { "docid": "0497ff9b8135e57c4fc9a219a6bf3f39", "score": "0.6008622", "text": "def sent_tokenizer(document):\n return sent_tokenize(document)", "title": "" }, { "docid": "05a772f685a8a1fdd228a6f57fa38888", "score": "0.6000438", "text": "def clean_doc(doc):\n # replace '--' with a space ' '\n doc = doc.replace('.', ' <EOS>')\n doc = doc.replace('?', ' <QUES>')\n # split into tokens by white space\n tokens = doc.split()\n # remove punctuation from each token\n table = str.maketrans('', '', string.punctuation)\n tokens = [w.translate(table) for w in tokens]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n return tokens", "title": "" }, { "docid": "c44069f9ce9e7cceef58239e2100c617", "score": "0.5998301", "text": "def tokenize(document):\n import string\n\n # tokenize using nltk tokenizer\n tokens = nltk.word_tokenize(document)\n\n # convert all letters to lower cases\n tokens = [w.lower() for w in tokens]\n\n # create a mapping table using make.trans()\n # the third argument in mapped to None\n table = str.maketrans('', '', string.punctuation)\n\n # translate tokens using mapping table\n translated = [word.translate(table) for word in tokens]\n\n # Filter out non-alphabetic words\n all_alphabetics = []\n\n for word in translated:\n\n all_alpha = True\n\n for letter in word:\n\n if not letter.isalpha():\n\n all_alpha = False\n\n break\n \n if all_alpha:\n\n all_alphabetics.append(word)\n\n # Finally, remove stopwords\n stop_words = nltk.corpus.stopwords.words(\"english\")\n filtered = [word for word in all_alphabetics if word not in stop_words]\n\n return filtered", "title": "" }, { "docid": "fca9b70feb90cbe921bf46309f62c873", "score": "0.5979051", "text": "def preprocess_text(text: pd.Series,\n lowercase: bool = True,\n regex: str = '(?![A-Za-z0-9]).',\n replace_dict: dict = {},\n nan_handling: str = 'remove',\n lemma: bool = False,\n stem: bool = False,\n token_list: bool = False,\n eng_lang: bool = True,\n stop_words: bool = False):\n nlp = get_spacy_nlp()\n\n if stem and lemma:\n raise Exception('stem and lemma cannot both be true')\n\n if nan_handling == 'remove':\n text = text.dropna()\n else:\n text = text.fillna(nan_handling)\n\n if lowercase:\n text = text.str.lower()\n\n text = text.apply(lambda s: re.sub(regex, ' ', s))\n\n text = text.str.strip()\n text = text.str.replace(r'\\s+', ' ')\n\n if eng_lang:\n text = text[text.apply(lambda t: langid.classify(t)[0]) == 'en']\n\n if stop_words:\n text = text.apply(lambda doc: ' '.join(\n [item for item in doc.split(' ') if item not in stop_set]))\n\n # Full pipeline\n if lemma or stem:\n text = pd.Series(nlp.pipe(text), index=text.index)\n for ind, val in text.iteritems():\n end_str = []\n for item in val:\n if lemma:\n end_str.append(item.lemma_)\n elif stem:\n end_str.append(item._.stem)\n\n text[ind] = end_str\n\n if not token_list:\n text = text.apply(lambda desc: ' '.join([item for item in desc]))\n else:\n if token_list:\n # Only tokenization?\n text = pd.Series([nlp.make_doc(t) for t in text], index=text.index)\n text = text.apply(lambda doc: [tok.text for tok in doc])\n\n dict_replace = {'\\xa0': ' '}\n dict_replace.update(replace_dict)\n text = text.replace(dict_replace, regex=True)\n\n return text", "title": "" }, { "docid": "ea636541b8f0b3997a710ea66177828b", "score": "0.59789664", "text": "def preprocess (self, words, termino):\n\t\t\n\t\t# Tokenization\n\t\ttokens = []\n\t\t\n\t\tfor text in self.content:\n\t\t\ttokens += PunktWordTokenizer().tokenize(text)\n\t\t\t\n\t\tself.content = tokens\n\t\t\n\t\t# Collocations\n\t\tif termino:\n\t\t\tself.collocations = []\n\t\telse:\n\t\t\tself.collocations = bootstrap(self.content)\n\t\t\n\t\t# Corpus statistics\n\t\tself.words = collections.defaultdict(int)\n\t\tstop = ['\\'', '-', '`', '!', ':', ';', '.', ',']\n\t\t\n\t\tfor word in self.content:\n\t\t\tif len(word) < self.config.size_min:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif word[-1] in stop:\n\t\t\t\tword = word[:-1]\n\t\t\tif word[0] in stop:\n\t\t\t\tword = word[1:]\n\t\t\t\t\n\t\t\tif len(word) == 0:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tself.words[word] += 1 # Document counter\n\t\t\twords[word] += 1 # Corpus counter\n\t\t\t\n\t\tself.size = sum(self.words.values())", "title": "" }, { "docid": "d0bb8c674c3a50def90dc099ed190d57", "score": "0.5978034", "text": "def preprocess(text):\r\n #############################################################################\r\n # TODO: Preprocess the text into a desired format. #\r\n # NOTE: This method is completely OPTIONAL. If it is not helpful to your #\r\n # implementation to do any generic preprocessing, feel free to leave this #\r\n # method unmodified. #\r\n #############################################################################\r\n\r\n #############################################################################\r\n # END OF YOUR CODE #\r\n #############################################################################\r\n\r\n return text", "title": "" }, { "docid": "5236118e4955a29ddc2581981e25dd91", "score": "0.5969034", "text": "def word_tokenization(self, sentences):\n\n # this step is performed in the MElt workflow\n return sentences", "title": "" }, { "docid": "3d09b08da8febb54802b4979baa29cb0", "score": "0.59673584", "text": "def pre_process_sentences(self):\n\n for i in range(self.length):\n \n # Normalise extra white spaces\n self.sentence[i] = re.sub(' +', ' ', self.sentence[i])\n self.sentence[i] = self.sentence[i].strip()\n \n # Tokenize the current sentence in word/POS\n sentence = self.sentence[i].split(' ')\n sentence = self.pre_rm_stopwords(sentence)\n # Creating an empty container for the cleaned up sentence\n container = [(self.start, self.start)]\n\n # Looping over the words\n for w in sentence:\n \n # Splitting word, POS\n pos_separator_re = re.escape(self.pos_separator)\n m = re.match(\"^(.+)\" +pos_separator_re +\"(.+)$\", w)\n \n # Extract the word information\n token, POS = m.group(1), m.group(2)\n\n # Add the token/POS to the sentence container\n container.append((token.lower(), POS))\n \n # Add the stop token at the end of the container\n container.append((self.stop, self.stop))\n\n # Recopy the container into the current sentence\n self.sentence[i] = container", "title": "" }, { "docid": "db69eeaf20e2f467ea2989310306bcca", "score": "0.5965883", "text": "def preprocess(review):\n review = review.lower().replace(\"<br />\", \" \")\n review=review.split()\n processed_review=[]\n for word in review:\n if word in emotions:\n processed_review.append(word)\n else:\n new_word_list = []\n for char in word:\n if char not in punctuations:\n new_word_list.append(char)\n new_word=''.join(new_word_list)\n if new_word not in stop_words:\n processed_review.append(new_word)\n return processed_review", "title": "" }, { "docid": "aededec52a43e0174e3254e0de5b570f", "score": "0.5960232", "text": "def clean(doc):\n # doc = \" \".join(doc)\n # stop_free = \" \".join([i for i in doc.lower().split() if i not in stop])\n # punc_free = ''.join(ch for ch in stop_free if ch not in exclude)\n # normalized = \" \".join(lemma.lemmatize(word) for word in punc_free.split())\n # return normalized.split()\n\n cleaned_doc = []\n for word in doc:\n word = word.lower()\n if word not in stop and 25 > len(word) > 1:\n s = re.sub(r'[^\\w\\s]', ' ', word.strip())\n t = re.sub(r'[0-9]', ' ', s.strip())\n cand = t.strip().split()\n if cand:\n for c in cand:\n if c not in stop and 25 > len(c) > 1:\n cleaned_doc.append(c)\n\n cleaned = list(map(lemma.lemmatize, cleaned_doc))\n return cleaned", "title": "" }, { "docid": "ce58f4b0c58fd01d0efff9417442609c", "score": "0.5941776", "text": "def prepare_text_for_lda(text):\n\n text = text.strip()\n text = text.lower()\n text = re.sub('[^A-Za-z .-]+', ' ', text)\n text = text.replace('-', '')\n text = text.replace('.', '')\n tokens = tokenize(text)\n\n if tokens:\n tokens = [token for token in tokens if len(token) > 4]\n tokens = [token for token in tokens if token not in en_stop]\n tokens = [get_lemma(token) for token in tokens]\n return tokens\n else:\n return", "title": "" }, { "docid": "4154388127d6770091234d63a7854151", "score": "0.59294134", "text": "def preprocess(filename):\n list_of_sentences = []\n with open(filename) as f:\n for line in f:\n list_of_sentences.append(re.sub(\"[^a-zA-Z | ^.]+\", \"\", line).lower() + ' <STOP>')\n return list_of_sentences", "title": "" }, { "docid": "6ad6821916875c43fb406b7b27d233c4", "score": "0.59209836", "text": "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = stop(text)\n return text", "title": "" }, { "docid": "5c899a273cd9228236171db3b03dcccb", "score": "0.59156996", "text": "def my_tokenize(string):\n# return [porter.stem(w.lower()) \n return [w.lower() \n for s in nltk.sent_tokenize(string) \n for w in nltk.word_tokenize(s)\n if w.lower() not in stopwords.words('english') and\n w not in [',','.',';','(',')','\"',\"'\",'=',':','%','[',']']]", "title": "" }, { "docid": "bfd3cce2b967841f78a980e55c7d8ba8", "score": "0.59138733", "text": "def clean_doc(doc):\n # Split into \"words\"\n tokens = doc.split()\n # Remove punctuation\n re_punc = re.compile(f\"[{re.escape(string.punctuation)}]\")\n tokens = [re_punc.sub('', word) for word in tokens]\n # Remove non-alphabetic tokens\n tokens = [word for word in tokens if word.isalpha()]\n # Remove short tokens\n tokens = [word for word in tokens if len(word) > 4]\n # Make tokens lowercase\n tokens = [word.lower() for word in tokens]\n # Remove stop words\n stop_words = set(stopwords.words('english'))\n tokens = [word for word in tokens if word not in stop_words]\n # Lemmatization to account for things like plurals\n lem = WordNetLemmatizer()\n tokens = [lem.lemmatize(token) for token in tokens]\n return tokens", "title": "" }, { "docid": "34c04796f42ebbcd6d08b488bdca4957", "score": "0.59127325", "text": "def preprocess(text):\n #############################################################################\n # TODO: Preprocess the text into a desired format. #\n # NOTE: This method is completely OPTIONAL. If it is not helpful to your #\n # implementation to do any generic preprocessing, feel free to leave this #\n # method unmodified. #\n #############################################################################\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return text", "title": "" }, { "docid": "0cbb5d7c43f26d1dde96e47615294529", "score": "0.5911985", "text": "def preProcess(self, docPath):\n\t\tdoc = \"\"\n\t\tencoding=\"UTF-8\"\n\t\tif os.path.isfile(docPath):\n\t\t\tencoding=self.guessFileEncoding(docPath)\n\t\t\tdocFile=codecs.open(docPath,encoding=encoding).readlines()\n\t\t\tlines = [line.strip().lower() for line in docFile if os.path.isfile(docPath)]\n\n\t\tfor line in lines:\n\t\t\tif not self.emailHeader(line) and not self.matchRegex(line) and not self.inPhraseTable(line):\n\t\t\t\tif encoding==\"UTF-8\":\n\t\t\t\t\tfor word in line.split(\" \"):\n\t\t\t\t\t\tif len(word) >= int(self.minwordlength):\n\t\t\t\t\t\t\tword_nospecials = self.removeSpecials(word.lower())\n\t\t\t\t\t\t\tif len(word_nospecials) >= int(self.minwordlength):\n\t\t\t\t\t\t\t\tdoc += word_nospecials + \" \"\n\t\t\t\telse:\n\t\t\t\t\tfor word in line.split(\" \"):\n\t\t\t\t\t\tword_nospecials=self.removeSpecials(str(word),True)\n\t\t\t\t\t\tif len(word_nospecials)>=0:\n\t\t\t\t\t\t\tdoc+=word_nospecials + \" \"\n\t\treturn doc,encoding", "title": "" }, { "docid": "f0df9244e36f03fc9df2303627142526", "score": "0.5910977", "text": "def preprocess_data(data):\n all_text = ''.join([c for c in data if c not in punctuation])\n reviews = all_text.split('\\n')\n all_text = ' '.join(reviews)\n words = all_text.split()\n return reviews, words", "title": "" }, { "docid": "38fe5ed2bb8e3af1f0dc423c5f56cb04", "score": "0.5898871", "text": "def normalize(self):\n self.remove_non_ascii()\n self.to_lowercase()\n self.remove_punctuation()\n self.delete_numbers()\n self.remove_stopwords()\n self.remove_short_words()\n self.lemmatize_verbs() # self.stem_words()", "title": "" }, { "docid": "3d4a70c667222bf1f78cc37e2735a44a", "score": "0.5895979", "text": "def do_tokenize(self, text):\n text = text.lower() if self.lc else text\n if self.collapse_hashtags:\n text = re.sub('#\\S+', 'THIS_IS_A_HASHTAG', text)\n else:\n text = re.sub('#(\\S+)', r'HASHTAG_\\1', text)\n if self.collapse_mentions:\n text = re.sub('@\\S+', 'THIS_IS_A_MENTION', text)\n if self.collapse_urls:\n text = re.sub('http\\S+', 'THIS_IS_A_URL', text)\n if self.limit_repeats:\n text = re.sub(r'(.)\\1\\1\\1+', r'\\1', text)\n if self.collapse_digits:\n text = re.sub(r'[0-9]+', '9', text)\n toks = []\n for tok in text.split():\n tok = re.sub(r'^(' + punc_re + '+)', r'\\1 ', tok)\n tok = re.sub(r'(' + punc_re + '+)$', r' \\1', tok)\n for subtok in tok.split():\n if self.retain_punc_toks or re.search('\\w', subtok):\n toks.append(subtok)\n if self.rt_prefix:\n rt_text = 'rt' if self.lc else 'RT'\n if rt_text in toks:\n toks.remove(rt_text)\n toks = ['RT_' + t for t in toks]\n return toks", "title": "" }, { "docid": "d38824b86047ef76849caff3be60bd15", "score": "0.5894464", "text": "def preprocess(review):\r\n #print(\"aaaa\")\r\n review = decontracted(review.lower())\r\n\r\n processed_review = [word for word in review.lower().translate(\r\n str.maketrans('', '',\r\n string.punctuation)).split() if word not in stop_words]\r\n if len(processed_review) < MAX_WORDS_IN_REVIEW:\r\n replace = [' ']*(MAX_WORDS_IN_REVIEW - len(processed_review))\r\n processed_review.extend(replace)\r\n return processed_review", "title": "" }, { "docid": "cf72493a48e7719bf025f57735446733", "score": "0.5893349", "text": "def preprocess(self, text, use_stem=False, use_lemma=False):\n\n assert type(text) == list, \"input must be a list\"\n text = lowercase(text)\n text = remove_punctuation(text)\n text = remove_stopwords(text)\n if use_stem:\n text = stemming(text)\n if use_lemma:\n text = lemmatization(text)\n\n return text", "title": "" }, { "docid": "0437607810397f385c6b1c5eff926e18", "score": "0.5888597", "text": "def tokenize_docs(docs, tokenizer):\n for i, doc in enumerate(docs):\n words = []\n for sent in get_sents_from_doc(doc):\n words += [w for w in tokenizer.cut(sent)]\n docs[i] = ' '.join(words)\n return docs", "title": "" }, { "docid": "b03ea88d17867b1ad1215b15bb51bed6", "score": "0.58869743", "text": "def pre_process_text(file_path):\n\n #read the train file \n with open(file_path, \"r\", encoding=\"utf-8\") as train_file:\n sent = []\n words = []\n for line in train_file:\n words.append(re.sub(r\"\\d+|\" r\"[\\.\\,\\\"\\'\\(\\)\\?\\%\\!\\?\\&\\@\\#\\€\\$\\∞\\§\\|\\[\\]\\©\\:\\;\\\\\\/]|\" r\"[\\t\\n]\", \"\", line).lower())\n sent.append([re.sub(r\"\\d+|\" r\"[\\.\\,\\\"\\'\\(\\)\\?\\%\\!\\?\\&\\@\\#\\€\\$\\∞\\§\\|\\[\\]\\©\\:\\;\\\\\\/]|\" r\"[\\t\\n]\", \"\", line).lower()])\n \n word = \"\".join(words)\n tokenized_sent = []\n for sentence in sent:\n for token in sentence:\n tokenized_sent.append(word_tokenize(token))\n return word_tokenize(word), tokenized_sent #function returns whole tokenized text (for training) and sentence tokenised (for testing)", "title": "" }, { "docid": "90cf3161b09b59a21cad9cd67e59108e", "score": "0.58819324", "text": "def preprocess_text(self, text) -> str:\n tokens = []\n stop_words = stopwords.words('english')\n for token in gensim.utils.simple_preprocess(text):\n if token not in stop_words and len(token) > 2:\n tokens.append(token)\n processed_text = self.lematize_stem(tokens)\n return processed_text", "title": "" }, { "docid": "49c764209b701acdf7b7af2d3d163b79", "score": "0.5870463", "text": "def tokenize(text):\n # YOUR CODE HERE\n x = re.findall(r'\\b([a-zA-Z]+)\\b',text)\n for y in range(len(x)):\n char = x[y].lower()\n x[y] = char\n return x", "title": "" }, { "docid": "827e787b55da2d83e2f46a4fca454c43", "score": "0.58632606", "text": "def _tokenize_mixed_document(text, entities, min_length=1, stopwords=None):\n result = []\n for np in TextBlob(text).noun_phrases:\n if ' ' in np and np not in entities:\n # break apart the noun phrase; it does not occur often enough in the collection of text to be considered.\n result.extend(_simple_document(np, min_length=min_length, stopwords=stopwords))\n else:\n # filter out stop words\n tmp = \"_\".join(_simple_document(np, min_length=min_length, stopwords=stopwords))\n # if we end up with nothing, don't append an empty string\n if tmp:\n result.append(tmp)\n return result", "title": "" }, { "docid": "449fdb6152d665c29b85cc6deee8a6f6", "score": "0.5860531", "text": "def tokenize_normalize(self):\n # Clean\n self.main_df[self.docs_index] = self.main_df[self.docs_index].apply(lambda row: clean_raw_text([row])[0])\n # Tokenize\n self.main_df['tokenized_sentences'] = self.main_df[self.docs_index].apply(lambda x:\n [word_tokenize(s, model=self.nlp)\n for s in sent_tokenize(x)])\n # Normalize\n self.main_df['normalized_sentences'] = self.main_df['tokenized_sentences'].apply(lambda x:\n [normalize_tokens(s,\n model=self.nlp) for s in x])", "title": "" }, { "docid": "5d878dcd265a5c3b3e7d2fed12342a8d", "score": "0.58522457", "text": "def preprocess(raw_parts):\n parts = []\n for raw_part in raw_parts:\n parts.append(raw_part.lower().replace('\\n', ''))\n return parts", "title": "" }, { "docid": "1740bf42f432273868ba0fbace9435aa", "score": "0.58496267", "text": "def preprocess(text):\n words = re.split('\\s+', text)\n punctuation = re.compile(r'[-.?!,\":;()|0-9]')\n words = [punctuation.sub(\"\", word) for word in words]\n words = filter(None, words)\n words = [word.lower() for word in words]\n words = [word for word in words if not word in stopwords.words('english')]\n return words", "title": "" }, { "docid": "b8808d41e7cd78eb6ef8a03267c7c262", "score": "0.5846631", "text": "def normalize_context(s):\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "23756e3f364f151fd48fc1393264ee33", "score": "0.58444524", "text": "def tokenize(self, text):\n text = self._clean_text(text)\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n # remove URL\n text = self.url_match.sub(\"\", text)\n\n # do printable in here\n text = ''.join(t for t in text if t in string.printable)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case and token not in self.never_split:\n token = token.lower()\n token = self._run_strip_accents(token)\n\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "title": "" }, { "docid": "03f4b8fe537e824ec776563ea0372751", "score": "0.5843736", "text": "def _tokenize(self, bookLines) -> List[str]:\n # convert list of lines to list of list of words\n wordLines = map(\n lambda line: nltk.word_tokenize(line.rstrip('\\n')),\n bookLines)\n return [word.lower() for line in wordLines\n for word in line if word.isalpha()]", "title": "" }, { "docid": "1b1e5dda9c72b14b841fde4d5fa715b1", "score": "0.583266", "text": "def preprocess_text(text: str) -> str:\n\n text = text.lower()\n\n text = remove_urls(text)\n text = remove_emails(text)\n text = remove_times_and_dates(text)\n\n text = customize_wf_anonymization(text)\n\n text = remove_punctuation(text)\n text = remove_multi_whitespaces(text)\n\n text = remove_repetition(text)\n\n # Remove special characters except /_%-\n # text = re.sub('[-]','', text)\n text = re.sub('[^a-zA-Z0-9?/_%\\n-]', ' ', text)\n\n text = '' if text == 'none' else text.strip()\n\n return text", "title": "" }, { "docid": "a34f9b17c14c690c4dc5e967c3e5dc13", "score": "0.5823778", "text": "def naive(self, text):\n\n\t\ttokenizedText = None\n\t\ttokenizedText = []\n \n\t\tfor sentence in text:\n\t\t\ttokenizedText.append(sentence.split())\n\t\t\n\t\tfor index,sentence in enumerate(tokenizedText):\n\t\t\tfor idx,words in enumerate(sentence):\n\t\t\t\tif(words[len(words)-1] == ','):\n\t\t\t\t\twords = words[:-1]\n\t\t\t\t\tsentence[idx] = words.lower()\n\t\t\t\telse:\n\t\t\t\t\tsentence[idx] = words.lower()\n\t\t\ttokenizedText[index] = sentence\n\n\t\t\n \n\t\treturn tokenizedText", "title": "" }, { "docid": "08308cb1821ae08dbb3c67fc27d05d22", "score": "0.582334", "text": "def normal(token):\n return token.string.lower()", "title": "" }, { "docid": "d8ed1f9101a50d317ac5b5d4f6d5837a", "score": "0.5821149", "text": "def clean(doc):\n stop = set(stopwords.words('english'))\n exclude = set(string.punctuation)\n lemma = WordNetLemmatizer()\n\n lower = [token.lower() for token in doc]\n\n stop_free = \" \".join([token for token in lower if token not in stop])\n punc_free = ''.join(char for char in stop_free if char not in exclude)\n normalized = \" \".join(lemma.lemmatize(word) for word in punc_free.split())\n return normalized", "title": "" }, { "docid": "bcf2f9358efa1cf62fee49c8e26e70c7", "score": "0.58187824", "text": "def tokenize(s):\n pass", "title": "" }, { "docid": "78954eb2dd0eddce4929fb0a19c1c14c", "score": "0.5816228", "text": "def preprocess(text):\n \n text = text.translate(str.maketrans('', '', string.punctuation)).lower()\n words = nltk.word_tokenize(text)\n words = [nltk.LancasterStemmer().stem(word) for word in words]\n words = [word for word in words if word not in stopwords.words('english')]\n \n return words", "title": "" }, { "docid": "2e7005fbe5bfbfa1a28e410f2b0b26f7", "score": "0.5814814", "text": "def preprocess(text, irrelevant_pos = ['SPACE'],\n avoid_entities = ['ORG']):\n result = []\n\n for sent in text:\n sent = str(sent).lower()\n\n result_sent = []\n doc = nlp(sent)\n entities = [str(ent) for ent in doc.ents if ent.label_ in avoid_entities]\n # This helps to detect names organization\n\n for token in doc: \n if (token.like_email or\n token.like_url or\n token.pos_ in irrelevant_pos or\n str(token) in entities\n ):\n continue\n else:\n if str(token) in string.punctuation:\n try:\n result_sent[-1] = str(result_sent[-1]) + str(token)\n except:\n result_sent.append(str(token))\n else:\n result_sent.append(str(token))\n result.append(\" \".join(result_sent))\n return result", "title": "" }, { "docid": "163f2eb47456baa8402a7de98a63f0b5", "score": "0.58078206", "text": "def text_preprocess(data, tag='n', output='list'):\n # replace \" to '\n data = data.apply(lambda x: x.replace(\"\\\"\", \"'\")) \n # split\n data = data.apply(lambda x: re.split(r\"', '\", x)) \n # tokenize\n data = data.apply(lambda x: [tokenizer.tokenize(i) for i in x])\n # lemmatize\n data = data.apply(lambda x: [[lemmatizer.lemmatize(word, pos=tag) for word in i] for i in x])\n # stopword\n data = data.apply(lambda x: [[word for word in i if word not in stop_words] for i in x])\n # numbers\n data = data.apply(lambda x: [[word for word in i if not word.isnumeric()] for i in x])\n # remove empty lists\n data = data.apply(lambda x: [i for i in x if i])\n \n # formatting\n if output=='list':\n data = data.apply(lambda x: [j for i in x for j in i])\n elif output=='string':\n data = data.apply(lambda x: [' '.join(i) for i in x]).apply(lambda x: ' '.join(x))\n else:\n pass\n \n return data", "title": "" }, { "docid": "735cb9ac2aa216bcfbca82cad4c596d0", "score": "0.58000654", "text": "def preprocess(self, tokens):\n new_tokens = []\n for token in tokens:\n word, word_normalized, following = token\n\n # 1. Known word forms should be returned to the token stream unchanged.\n if exists_form(word, \"lat\") or exists_form(word_normalized, \"lat\"):\n new_tokens.append(token)\n continue\n\n # 2. Automatically split off enclitics if the word that precedes the clitic is known.\n found_enclitic = False\n for enclitic in LATIN_ENCLITICS:\n if len(word) > len(enclitic) and word.endswith(enclitic):\n preceding_word = word[:-len(enclitic)]\n preceding_word_normalized = word_normalized[:-len(enclitic)]\n if exists_form(preceding_word, \"lat\") or exists_form(preceding_word_normalized, \"lat\"):\n new_tokens.append((preceding_word, preceding_word_normalized, \"\"))\n new_tokens.append((enclitic, enclitic, \" \"))\n found_enclitic = True\n break\n\n # 3. The token is unknown and does not have an enclitic so return to the token stream unchanged.\n if not found_enclitic:\n new_tokens.append(token)\n return new_tokens", "title": "" }, { "docid": "1f79dba30f25197365faec728b1add34", "score": "0.57996625", "text": "def text_preprocessing(text):\n\n text = re.sub(r'http(\\S)+', r'', text)\n text = re.sub(r'http ...', r'', text)\n text = re.sub(r'http', r'', text)\n\n text = re.sub(r'(RT|rt)[ ]*@[ ]*[\\S]+',r'', text)\n text = re.sub(r'@[\\S]+',r'', text)\n\n text = ''.join([i if ord(i) < 128 else '' for i in text])\n text = re.sub(r'_[\\S]?',r'', text)\n\n # Remove '@name'\n text = re.sub(r'(@.*?)[\\s]', ' ', text)\n\n # Replace '&amp;' with '&'\n text = re.sub(r'&amp;', '&', text)\n text = re.sub(r'&lt;',r'<', text)\n text = re.sub(r'&gt;',r'>', text)\n\n text = re.sub(r'[ ]{2, }',r' ', text)\n text = re.sub(r'([\\w\\d]+)([^\\w\\d ]+)', r'\\1 \\2', text)\n text = re.sub(r'([^\\w\\d ]+)([\\w\\d]+)', r'\\1 \\2', text)\n\n # Remove trailing whitespace\n text = re.sub(r'\\s+', ' ', text).strip()\n\n return text", "title": "" }, { "docid": "c112217b9f25b27ca476645d6227adc4", "score": "0.579921", "text": "def preprocess(tokens):\n identifier = '_!'\n within_text = False\n for (idx, tok) in enumerate(tokens):\n if identifier in tok:\n for _ in range(tok.count(identifier)):\n within_text = not within_text\n if ('(' in tok) and (within_text):\n tok = tok.replace('(', '-LB-')\n if (')' in tok) and (within_text):\n tok = tok.replace(')', '-RB-')\n tokens[idx] = tok\n return tokens", "title": "" }, { "docid": "92f6a7a9855e70612484ebef438413f8", "score": "0.5792574", "text": "def word_tokenizer(sentence):\n words = re.split(WORD_SEP, sentence)\n words = filter(lambda x : x != u'', words)\n\n if not words:\n return []\n\n # Eliminate starting capital letter words\n idx = 0\n for w in words:\n if not w[0].isupper():\n break\n idx += 1\n\n words = words[idx:]\n\n proper_words = []\n pword = u''\n\n for i in range(len(words) - 1):\n if words[i] and words[i][0].isupper() and words[i + 1] and words[i + 1][0].isupper() \\\n and is_space_between(sentence, words[i], words[i + 1]):\n pword += words[i] + u' '\n elif words[i] and words[i][0].isupper():\n pword += words[i] + u' '\n proper_words.append(pword.strip())\n else:\n pword = u''\n\n proper_words = filter(word_filter, proper_words)\n\n words = filter(lambda x : x and not x[0].isupper(), words)\n words = filter(word_filter, words)\n\n # Correct misspelled â/î\n words = map(spellchecker, words)\n\n # Concatenate with proper words\n words += proper_words\n\n # Count the number of appearances of each word in the sentence\n words = list(set([(w, words.count(w)) for w in words]))\n\n word_list = []\n # Create the word list\n for word, nr_app in words:\n is_proper = False\n if word[0].isupper():\n is_proper = True\n new_word = Word(word, nr_app, is_proper)\n for suffix in SUFFIXES:\n if word.endswith(suffix) and len(word[:len(suffix)]) > SUFFIX_LEN \\\n and not is_proper:\n new_word.set_suffix(suffix)\n break\n for prefix in PREFIXES:\n if word.startswith(prefix) and len(word[len(prefix):]) > PREFIX_LEN \\\n and not is_proper:\n new_word.set_prefix(prefix)\n for suffix in ACRONYM_SUFFIXES:\n pass\n else:\n if u'-' in word:\n new_word.set_hyphenized()\n\n for ch in LOAN_CHARS:\n if ch in word:\n new_word.set_loan()\n\n for ch in ROU_CHARS:\n if ch in word:\n new_word.set_rou_chars()\n\n word_list.append(new_word)\n\n word_list = filter(post_word_filter, word_list)\n\n return word_list", "title": "" } ]
62c4d12bee7a750060e0852cacd22721
Creates a convolutional layer for the network
[ { "docid": "12b31323bf75ffe525543ae1f187d4a8", "score": "0.6913519", "text": "def create_conv2d(self, x, w, b, stride = 1, name = None):\n x = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding='VALID', name = name)\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)", "title": "" } ]
[ { "docid": "9b18c57393e3ffa0fdd332093dad271f", "score": "0.79682297", "text": "def createConvolutionLayer(inputLayer, kernelHeight, kernelWidth, channelSize, kernelCount, strideX, strideY):\r\n \r\n \r\n weights = tf.Variable(tf.truncated_normal([kernelHeight, kernelWidth, channelSize, kernelCount], stddev=0.03))\r\n bias = tf.Variable(tf.constant(0.05, shape=[kernelCount]))\r\n \r\n \"\"\"Stride is also 4 dimensional tensor\r\n The first and last values should be 1 as they represent the image index and \r\n chanel size padding. Second and Third index represent the X and Y strides\"\"\"\r\n layer = tf.nn.conv2d(input = inputLayer, filter = weights, padding='SAME',\r\n strides = [1, strideX, strideY, 1]) + bias\r\n return layer", "title": "" }, { "docid": "17964557b4a088bca1b134e41294e692", "score": "0.77399904", "text": "def create_conv_net(self):\r\n convolutional_net = Sequential()\r\n\r\n convolutional_net.add(Conv2D(filters=32, kernel_size=(10, 10),\r\n activation='relu',\r\n input_shape=self.input_shape,\r\n kernel_regularizer=l2(\r\n self.l2_penalization['Conv1']),\r\n name='Conv1'))\r\n\r\n convolutional_net.add(MaxPool2D())\r\n\r\n convolutional_net.add(Conv2D(filters=64, kernel_size=(7, 7),\r\n activation='relu',\r\n kernel_regularizer=l2(\r\n self.l2_penalization['Conv2']),\r\n name='Conv2'))\r\n convolutional_net.add(MaxPool2D())\r\n\r\n convolutional_net.add(Flatten())\r\n\r\n convolutional_net.add(\r\n Dense(units=100, activation='sigmoid', kernel_regularizer=l2(self.l2_penalization['Dense1']), name='Dense1'))\r\n return convolutional_net", "title": "" }, { "docid": "00dcbbca7d2361b58f872d667bc70aaf", "score": "0.7649891", "text": "def conv_layer(n, w):\n return Conv2D(filters=n*w, kernel_size=(3,3), padding='same', activation='relu')", "title": "" }, { "docid": "9d8b69abb739a52d52e578a15aff17b6", "score": "0.7570173", "text": "def build_convolution(self, layer):\n\n pass", "title": "" }, { "docid": "bd7eb0d8028b208ceb3e57c74160b56f", "score": "0.7439626", "text": "def setup_convolutional_layer(x, filter_size, input_size, num_filters, s=1, k=2,\n use_pooling=True):\n\n # Shape of layer\n shape = [filter_size, filter_size, input_size, num_filters]\n\n # Set up layer weights and biases\n weights = setup_weights(shape)\n biases = setup_biases(num_filters)\n\n # Construct the layer and add biases\n layer = tf.nn.conv2d(input=x, filter=weights, strides=[1, s, s, 1], padding='SAME')\n layer += biases\n\n # Activate pooling if wanted. If not, apply the ReLU function\n if use_pooling:\n layer = tf.nn.max_pool(\n value=layer, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME'\n )\n else:\n layer = tf.nn.relu(layer)\n\n return layer", "title": "" }, { "docid": "394bea629a0ab67691ff1cf1cb02b440", "score": "0.73662406", "text": "def _construct_conv_layer(self, input, n_filters, name, kernel=[3, 3], stride=1, normalization=True, activation=None):\n with tf.name_scope(name):\n conv = tf.layers.conv2d(\n inputs=input,\n filters=n_filters,\n kernel_size=kernel,\n strides=[stride, stride],\n padding=\"same\",\n activation=activation,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_LOSS_WEIGHT))\n if normalization:\n return conv\n\n return tf.layers.batch_normalization(conv, training=self.training)", "title": "" }, { "docid": "befe70992813f5fdabe8d11353ac296d", "score": "0.7354771", "text": "def convolutional_layer(input_x, shape):\n W = init_weights(shape)\n b = init_bias([shape[3]])\n return tf.nn.relu(conv2d(input_x, W) + b)", "title": "" }, { "docid": "4c84b7d86036189784ea72d92206aaba", "score": "0.71843016", "text": "def convolutional_layer(self, signal, layer_label, layer_number, kernel_size=None, n_kernels=None):\n\n op_name = LAYER_CONVOLUTIONAL + layer_label + str(layer_number)\n if kernel_size is None:\n kernel_size = self._topology.kernel_size\n\n if n_kernels is None:\n n_kernels = self._topology.n_kernels\n\n if self._flags.do_kernel_regularisation:\n regulariser = tf.contrib.layers.l2_regularizer(scale=0.1)\n else:\n regulariser = None\n\n try:\n signal = tf.layers.conv2d(\n inputs=signal,\n filters=n_kernels,\n kernel_size=kernel_size,\n padding=DEFAULT_PADDING,\n activation=None,\n data_format=self.data_format,\n dilation_rate=self._topology.dilation_rates,\n strides=self._topology.strides,\n name=op_name,\n kernel_regularizer=regulariser,\n use_bias=False,\n reuse=True)\n except:\n signal = tf.layers.conv2d(\n inputs=signal,\n filters=n_kernels,\n kernel_size=kernel_size,\n padding=DEFAULT_PADDING,\n activation=None,\n data_format=self.data_format,\n dilation_rate=self._topology.dilation_rates,\n strides=self._topology.strides,\n name=op_name,\n kernel_regularizer=regulariser,\n use_bias=False,\n reuse=False)\n\n return signal", "title": "" }, { "docid": "6d0bec71d4507bb658d31c1b40e6bf0e", "score": "0.71693516", "text": "def build_convnet():\n model = Sequential()\n model.add(Conv2D(32,\n kernel_size=(3, 3),\n activation='relu',\n input_shape=(28, 28, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(10, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n return model", "title": "" }, { "docid": "4b285610f7954fc240393eb74509d83a", "score": "0.7168921", "text": "def convolutional_neural_network():\n # Initialize key variables\n conv1_filter_count = 32\n conv2_filter_count = 64\n fc_units = 1024\n image_height = 28\n image_width = 28\n filter_size = 2\n pooling_kernel_size = 2\n keep_probability = 0.8\n fully_connected_units = 10\n\n # Create the convolutional network stuff\n convnet = input_data(\n shape=[None, image_width, image_height, 1], name='input')\n\n convnet = conv_2d(\n convnet, conv1_filter_count, filter_size, activation='relu')\n convnet = max_pool_2d(convnet, pooling_kernel_size)\n\n convnet = conv_2d(\n convnet, conv2_filter_count, filter_size, activation='relu')\n convnet = max_pool_2d(convnet, pooling_kernel_size)\n\n convnet = fully_connected(convnet, fc_units, activation='relu')\n convnet = dropout(convnet, keep_probability)\n\n convnet = fully_connected(\n convnet, fully_connected_units, activation='softmax')\n convnet = regression(\n convnet,\n optimizer='adam',\n learning_rate=0.01,\n loss='categorical_crossentropy',\n name='targets')\n\n return convnet", "title": "" }, { "docid": "d8a8accb0b5b1210ec8677c433ff6f30", "score": "0.70672244", "text": "def convert_convolution(node, **kwargs):\n from onnx.helper import make_node\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel = convert_string_to_list(attrs.get('kernel', '()'))\n stride = convert_string_to_list(attrs.get('stride', '()'))\n dilate = convert_string_to_list(attrs.get('dilate', '()'))\n pad = convert_string_to_list(attrs.get('pad', '()'))\n num_group = int(attrs.get('num_group', 1))\n no_bias = attrs.get('no_bias', 'False')\n layout = attrs.get('layout', 'NCHW')\n\n if layout not in ['NCHW', 'NCDHW']:\n raise NotImplementedError('Convolution currently does not support layout not in '\n '[\\'NCHW\\', \\'NCDHW\\']')\n\n if no_bias in ['True', '1']:\n assert len(input_nodes) == 2, 'Convolution takes 2 input if no_bias==True'\n else:\n assert len(input_nodes) == 3, 'Convolution takes 3 input if no_bias==False'\n\n kwargs_ = {}\n if kernel:\n kwargs_['kernel_shape'] = tuple(kernel)\n if pad:\n kwargs_['pads'] = tuple(pad) + tuple(pad)\n if stride:\n kwargs_['strides'] = stride\n if dilate:\n kwargs_['dilations'] = dilate\n\n nodes = [\n make_node('Conv', input_nodes, [name], group=num_group, **kwargs_)\n ]\n\n return nodes", "title": "" }, { "docid": "b58ab45a2783a15bd7a304bc64f174e5", "score": "0.70570815", "text": "def conv2d_layer(self, inputs, num_filters=None, kernel_size=None, activation=None):\r\n kernel_size = self.kernel_size if (kernel_size == None) else kernel_size\r\n num_filters = self.initial_num_filters if (num_filters == None) else num_filters\r\n activation = self.activation if (activation == None) else activation\r\n \r\n return keras.layers.Conv2D(num_filters, kernel_size, activation=activation, padding=self.padding, kernel_initializer=self.kernel_initializer)(inputs)", "title": "" }, { "docid": "303f34412194a71b346e9aaa9657f7e8", "score": "0.6988752", "text": "def convolution(image, kernel):\n\n return image", "title": "" }, { "docid": "51626271763d8f9b1158e36c05dfb98e", "score": "0.6987144", "text": "def _conv2d(prev_layer, layer, layer_name):\n\t\tW, b = _weights(layer, layer_name)\n\t\tW = tf.constant(W)\n\t\tb = tf.constant(np.reshape(b, (b.size)))\n\t\treturn tf.nn.conv2d(prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b", "title": "" }, { "docid": "f0c6288a9ff1c53d39a3bfac9bf1c2de", "score": "0.69769835", "text": "def _conv_layer(self, layer_name, inputs, filters, size, stride, padding='SAME',\n freeze = False, xavier = False, relu = True, stddev = 0.001, bias_init_val=0.0):\n mc = self.mc\n use_pretrained_param = False\n if mc.LOAD_PRETRAINED_MODEL:\n cw = self.caffemodel_weight ##? TODO\n if layer_name in cw:\n kernel_val = np.transpose(cw[layer_name][0],[2,3,1,0]) ##TODO\n bias_val = cw[layer_name][1]\n #check the shape\n #TODO\n pass\n\n if mc.DEBUG_MODE:\n print('Input tensor shape to {} :{}'.format(layer_name, inputs.get_shape()))\n\n with tf.variable_scope(layer_name) as scope:\n channels = inputs.get_shape()[3] ## inputs.get_shape =[mc.BATCH_SIZE, mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL, 5],\n # input.get_shape()[3]=5\n #re-order the caffe kernel with shape [out, in , h, w ] ->tf kernel with\n #shape [h, w, in, out]\n if use_pretrained_param:\n pass# TODO\n elif xavier:\n kernel_init = tf.contrib.layers.xavier_initializer_conv2d()\n bias_init = tf.constant_initializer(bias_init_val)\n\n else:\n kernel_init = tf.truncated_normal_initializer(\n stddev =stddev, dtype=tf.float32)\n bias_init = tf.constant_initializer(bias_init_val)\n\n kernel = _variable_with_weight_decay(\n 'kernels', shape=[size, size, int(channels), filters],\n wd =mc.WEIGHT_DECAY, initializer = kernel_init, trainable=(not freeze)\n )\n biases = _variable_on_device('biases',[filters],bias_init,trainable=(not freeze))\n\n self.model_params += [kernel,biases]\n conv = tf.nn.conv2d(\n inputs, kernel, [1, 1, stride, 1], padding=padding, name ='convolution')\n\n conv_bias = tf.nn.bias_add(conv, biases, name = 'bias_add')\n\n if relu :\n out = tf.nn.relu(conv_bias,'relu')\n else :\n out = conv_bias\n\n self.model_size_counter.append((layer_name,(1+size*size*int(channels))*filters))\n\n out_shape = out.get_shape().as_list()\n num_flops = (1+filters*out_shape[1]*out_shape[2]) ##TODO # how to calculate?\n if relu:\n num_flops += 2*filters*out_shape[1]*out_shape[2]\n self.flop_counter.append((layer_name, num_flops))\n\n self.activation_counter.append(\n (layer_name,out_shape[1]*out_shape[2]*out_shape[3])\n )\n return out", "title": "" }, { "docid": "408615ff0917fd4bcf7aa04eb9c2a73d", "score": "0.6970869", "text": "def _conv2d(prev_layer, layer, layer_name):\n W, b = _weights(layer, layer_name)\n W = tf.constant(W)\n b = tf.constant(np.reshape(b, (b.size)))\n return tf.nn.conv2d(\n prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b", "title": "" }, { "docid": "60961706df2a83ac9deda61b48d9eba3", "score": "0.69187635", "text": "def new_conv_layer(bottom, filter_shape, activation=tf.identity, padding='SAME', stride=1, bias=True, name=None):\n with tf.variable_scope(name):\n w = tf.get_variable(\n \"W\",\n shape=filter_shape,\n initializer=tf.truncated_normal_initializer(0., 0.005))\n conv = tf.nn.conv2d( bottom, w, [1,stride,stride,1], padding=padding)\n\n if bias == True:\n b = tf.get_variable(\n \"b\",\n shape=filter_shape[-1],\n initializer=tf.constant_initializer(0.))\n output = activation(tf.nn.bias_add(conv, b))\n else:\n output = activation(conv)\n\n return output", "title": "" }, { "docid": "8abfd2a5a2b56fbd3026e55778f9b2f9", "score": "0.6913316", "text": "def _conv_block(inputs, filters, kernel, strides=1, padding='same', use_activation=False):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = Conv2D(filters, kernel, padding=padding, strides=strides,\n use_bias=False)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n\n if use_activation:\n x = Activation('relu')(x)\n\n return x", "title": "" }, { "docid": "5bb991d6376a040ffb7c2e59ae6ba05e", "score": "0.686754", "text": "def add_convolution(self, num_filters, filter_shape, pool_shape,\n pool_stride=None, activation=None, batch_normalize=True):\n\n activation = self.activation if activation is None else activation\n pool_stride = pool_shape if pool_stride is None else pool_stride\n self.add_conv_pool(\n num_filters=num_filters,\n filter_shape=filter_shape,\n weight_init=activation,\n pool_shape=pool_shape,\n pool_stride=pool_stride,\n use_bias=not batch_normalize)\n if batch_normalize:\n self.add_batch_normalization()\n self.add_nonlinearity(activation)", "title": "" }, { "docid": "9bb7942f5de5cd5603e194e08f5a75e5", "score": "0.6837571", "text": "def create_CNN():\n model = Sequential()\n input_shape=(128, 431, 1)\n\n model.add(Conv2D(24, (5, 5), strides=(1, 1), input_shape=input_shape))\n model.add(AveragePooling2D((2, 2), strides=(2,2)))\n model.add(Activation('relu'))\n\n model.add(Conv2D(48, (5, 5), padding=\"same\"))\n model.add(AveragePooling2D((2, 2), strides=(2,2)))\n model.add(Activation('relu'))\n\n model.add(Conv2D(48, (5, 5), padding=\"same\"))\n model.add(AveragePooling2D((2, 2), strides=(2,2)))\n model.add(Activation('relu'))\n\n model.add(Flatten())\n model.add(Dropout(rate=0.5))\n\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(rate=0.5))\n\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n return model", "title": "" }, { "docid": "a51fb3ab8ab37277b0bfc83de02607e3", "score": "0.68342793", "text": "def __build_convolutional_model(self):\n print('\\n\\033[92mBuilding convolutional model...\\033[0m')\n embedding_models = self.__get_embeddigs()\n self.model = Sequential()\n self.model.add(Merge(embedding_models, mode='concat', concat_axis=1))\n\n self.model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation=Config.conv_activation, strides=1))\n self.model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation=Config.conv_activation, strides=1))\n self.model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation=Config.conv_activation, strides=1))\n self.model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation=Config.conv_activation, strides=1))\n self.model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation=Config.conv_activation, strides=1))\n\n self.model.add(Flatten())\n self.model.add(Dense(32, activation=Config.conv_activation))\n self.model.add(Dense(2, activation='softmax'))", "title": "" }, { "docid": "1ad0975542cca8def4de1f2443d87098", "score": "0.6825829", "text": "def conv_layer(prev_layer, layer_depth):\n strides = 2 if layer_depth % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)\n return conv_layer", "title": "" }, { "docid": "08b8a651c3b749c1c10f0b9eefd95777", "score": "0.6822605", "text": "def cnn_init(cnn_config):\n\n num_images = cnn_config.num_images\n image_dim = cnn_config.image_dim\n num_filters = cnn_config.num_filters\n filter_dim = cnn_config.filter_dim\n pool_dim = cnn_config.pool_dim\n num_classes = cnn_config.num_classes\n\n assert (filter_dim < image_dim),'filterDim must be less that imageDim'\n\n # out_dim should be a multiple of pool_dim\n out_dim = (image_dim - filter_dim + 1)\n assert (out_dim % pool_dim) == 0, 'pool_dim must divide image_dim - filter_dim + 1'\n out_dim = out_dim/pool_dim\n hidden_size = out_dim*out_dim*num_filters\n\n Wc_size = filter_dim * filter_dim * num_filters\n Wd_size = num_classes * hidden_size\n bc_size = num_filters\n bd_size = num_classes\n \n params_array = zeros(Wc_size + Wd_size + bc_size + bd_size)\n (Wc, Wd, bc, bd) = array_to_stack(params_array, cnn_config)\n\n # Initialize parameter randomly from the normal distribution for the convolve\n # layer\n filter_init_randn(Wc);\n \n # we'll choose weights uniformly from the interval [-r, r]\n r = np.sqrt(6) / np.sqrt(num_classes+hidden_size+1)\n filter_init_rand(Wd, (-r, r));\n\n theta_tuple = (Wc, Wd, bc, bd)\n return (theta_tuple, params_array)", "title": "" }, { "docid": "d68a9f9b638aec196acfb0aa6c891e60", "score": "0.68218356", "text": "def ConvLayer(in_dim, out_dim, kernel_size=3, stride=2, padding=1, batch_norm=True):\n layers = [nn.Conv2d(in_dim, out_dim, kernel_size, stride, padding, bias=not batch_norm)]\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_dim))\n layers.append(nn.ReLU(True))\n return layers", "title": "" }, { "docid": "ce92a3319148de5085bbcf429b329a58", "score": "0.6817578", "text": "def cnn_layer(self, input_shape, ziropad, no_filter, conv_filter_size, conv_stride, conv_activ_func,\r\n pool_filter_size):\r\n ###Returns:\r\n # model -- a Model() instance in Keras\r\n \"\"\"\r\n \r\n ### START CODE HERE ###\r\n # Feel free to use the suggested outline in the text above to get started, and run through the whole\r\n # exercise (including the later portions of this notebook) once. The come back also try out other\r\n # network architectures as well. \r\n \"\"\"\r\n self.X_input = Input(input_shape)\r\n self.X = ZeroPadding2D((ziropad, ziropad))(self.X_input)\r\n # CONV -> BN -> RELU Block applied to X\r\n self.X = Conv2D(no_filter, (conv_filter_size, conv_filter_size),\r\n strides=(conv_stride, conv_stride), name='conv0')(self.X)\r\n self.X = BatchNormalization(axis=3, name='bn0')(self.X)\r\n self.X = Activation(conv_activ_func)(self.X)\r\n # MAXPOOL\r\n self.X = MaxPooling2D((pool_filter_size, pool_filter_size), name='max_pool')(self.X)\r\n return self.X,self.X_input", "title": "" }, { "docid": "0a1b19abf9a0c41e7b03ae24ce1e58de", "score": "0.6808007", "text": "def conv_layer(self, inputs, out_channels, filters_length, name, stride=1):\n\n with tf.variable_scope(name):\n inputs_shape = inputs.get_shape().as_list()\n\n self.encode_filt, conv_biases = self.get_conv_var(inputs_shape[1],\n filters_length,\n inputs_shape[-1],\n out_channels,\n name)\n\n conv = tf.nn.conv2d(inputs, self.encode_filt,\n [1, stride, stride, 1], padding='VALID')\n bias = tf.nn.bias_add(conv, conv_biases)\n\n return tf.nn.relu(bias)", "title": "" }, { "docid": "921ed191e963069b4a516bd2d5fa5c4f", "score": "0.6800748", "text": "def first_c_layer(x):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n return h_pool1", "title": "" }, { "docid": "763fc6a4797fa66046a06ef5ce747b08", "score": "0.6795396", "text": "def conv_nxn_with_init(in_channels, out_channels, kernel_size, stride, padding, bias):\n layer_ = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n bias=bias);\n nn.init.xavier_normal(layer_.weight, gain=1.0);\n if bias :\n nn.init.constant(layer_.bias, 0.0);\n return layer_;", "title": "" }, { "docid": "8032b4845e7d6288ec513a30caee3e88", "score": "0.6766807", "text": "def conv_layer(self, dtype,\n N, C, K,\n D=1, H=1, W=1,\n T=1, R=1, S=1,\n pad_d=0, pad_h=0, pad_w=0,\n str_d=1, str_h=1, str_w=1,\n relu=False, bsum=False):\n raise NotImplementedError()", "title": "" }, { "docid": "c4f2154808e1fc9760a021bfabeca6b8", "score": "0.6749863", "text": "def get_basic_conv(kernel_size: int, n_channels: int, out_channels: int = None):\n assert kernel_size % 2 == 1, f\"kernel size should be odd. got {kernel_size}\"\n padding = kernel_size // 2\n if out_channels is None:\n out_channels = n_channels\n return nn.Conv1d(in_channels=n_channels, out_channels=out_channels, kernel_size=kernel_size,\n padding=padding, bias=False)", "title": "" }, { "docid": "527b767526b3c7a51c72dd704c5543f0", "score": "0.6735715", "text": "def conv2d(x, shape, name, strides=(1,1)):\n #weight = weight_variable(shape, \"{}_W\".format(name))\n #bias = bias_variable([shape[-1]], \"{}_b\".format(name))\n #return tf.nn.conv2d(input=x, filters=weight, strides=strides, padding='SAME', name=name) + bias\n return tf.keras.layers.Conv2D(filters=shape[-1], kernel_size=(shape[0],shape[0]),strides=strides, padding=\"same\",name=name,\n use_bias=True,kernel_initializer=tf.keras.initializers.TruncatedNormal(mean=0.0,stddev=0.1),bias_initializer=tf.constant_initializer(value=0.1))(x)", "title": "" }, { "docid": "4c46a25a78cc154e7b226059df803cf8", "score": "0.6723687", "text": "def conv(x,filters=12):\n return Conv2D(filters=filters,\n kernel_size=(5,5),\n strides=(1,1),\n activation='relu',\n padding='same',\n kernel_initializer='glorot_normal',\n #bias_initializer='zeros',\n kernel_regularizer=l2(0.05),\n bias_regularizer=l2(0.05)\n )(x)", "title": "" }, { "docid": "085da908bcb507b221bcdf11f1a2ea4a", "score": "0.6718359", "text": "def create_SimpleCNN(self):\n layers = []\n layers += [self.create_gatedconv_unit('down',\n self.in_ch, self.n_channels,\n self.freezing_method,\n conv_params={\n 'kernel_size': 3,\n 'padding': 1\n })]\n layers += [self.create_gatedconv_unit('down',\n self.n_channels, self.n_channels,\n self.freezing_method,\n conv_params={\n 'kernel_size': 3,\n 'padding': 1\n })]\n layers += [self.create_gatedconv_unit('same',\n self.n_channels, self.n_channels,\n self.freezing_method,\n conv_params={\n 'kernel_size': 3,\n 'padding': 1\n })]\n return nn.Sequential(*layers)", "title": "" }, { "docid": "847c52560f24fdbb53fa0e6c7505437e", "score": "0.67111295", "text": "def convolutional_layer(input_x, shape, name=\"unspecified\"):\n with tf.name_scope(name):\n with tf.name_scope(\"weights\"):\n W = init_weights(shape)\n variable_summaries(W)\n with tf.name_scope(\"biases\"):\n b = init_bias([shape[3]])\n with tf.name_scope(\"Wx_plus_b\"):\n preactive = conv2d(input_x, W) + b\n tf.summary.histogram(\"pre_activations\", preactive)\n activations = tf.nn.relu(preactive, name=\"activation\")\n tf.summary.histogram(\"activations\", activations)\n return activations", "title": "" }, { "docid": "7077ea497aec206825c3395d9c7171b6", "score": "0.6689048", "text": "def layer_create(inputs, filters=64, kernel_size=5, batch_norm=False, \n drop=True, drop_fraction=0.2, pool=True):\n ki=glorot_normal(seed=np.random.randint(10000)) # force random weights\n conv = Conv1D(filters=filters, kernel_size=int(kernel_size), \n activation='relu',kernel_initializer=ki)(inputs)\n if batch_norm:\n conv = BatchNormalization(momentum=0.9)(conv)\n layer = MaxPool1D(pool_size=2)(conv) if pool else conv\n if drop:\n return Dropout(drop_fraction)(layer)\n else:\n return layer", "title": "" }, { "docid": "01c0a8c55bc140c9249c96ca8e210f83", "score": "0.66872674", "text": "def conv(c_in, c_out, k_size, stride=4, pad=1,bn=True):\n layers = []\n layers.append(nn.Conv2d(c_in, c_out, k_size,stride,pad))\n if bn:\n layers.append(nn.BatchNorm2d(c_out))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "87d06a90f6b390557bb7360515a6e616", "score": "0.6672342", "text": "def conv2D_module(inputs, filters, kernel_size=3, padding=\"valid\", pool_size=2):\n \n x = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding,\n kernel_initializer='he_normal')(inputs)\n x = Activation(\"relu\")(x)\n x = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding,\n kernel_initializer='he_normal')(inputs)\n x = Activation(\"relu\")(x)", "title": "" }, { "docid": "be86b2cabf4fe3f0b640d92d4decb5de", "score": "0.66476005", "text": "def _conv_block(input_net, filters, kernel, strides):\n\n\tnet = conv_2d(input_net, filters, kernel, strides, activation='relu6', weights_init='xavier')\n\tnet = batch_normalization(net)\n\treturn net", "title": "" }, { "docid": "2607e0511678136bebc9c37263c48e51", "score": "0.66467106", "text": "def _conv_block(inputs, filters, kernel, strides):\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return Activation(relu6)(x)", "title": "" }, { "docid": "d1b76b290124afa7db5ef632c512fec3", "score": "0.6637125", "text": "def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\r\n layers = []\r\n layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=True))#bias=False\r\n if bn:\r\n layers.append(nn.BatchNorm2d(c_out))\r\n return nn.Sequential(*layers)", "title": "" }, { "docid": "ae464599851f91424edb749ba9e898fe", "score": "0.66223043", "text": "def conv_layer(self, inputs, filters, downsample=False, dropout=True):\n\n strides = 2 if downsample else 1\n #if self.framewise and downsample:\n # strides = (1, 2)\n\n kernel_size = 3\n #if self.framewise and not downsample:\n # kernel_size = (1, 3)\n\n # Apply convolution\n layer = tf.layers.conv2d(\n inputs = inputs,\n filters = filters,\n kernel_size = kernel_size,\n strides = strides,\n padding = 'same',\n data_format = 'channels_first',\n activation = self.activation if not downsample else None,\n kernel_regularizer = tf.contrib.layers.l2_regularizer(0.0001),\n )\n\n # Dropout\n if dropout:\n shape = tf.shape(layer)\n layer = tf.layers.dropout(\n inputs = layer,\n rate = self.dropout,\n training = self.training,\n noise_shape = [shape[0], filters, 1, 1],\n )\n\n return layer", "title": "" }, { "docid": "efaca28179e770c6a037d8cd43ec08d4", "score": "0.6619304", "text": "def featnet4(data, conv_weight, conv_bias, name_prefix): \r\n net=mx.sym.Convolution(data=data, kernel=(7,7), stride=(1,1), num_filter=32, \r\n weight=conv_weight[0], bias=conv_bias[0], name=name_prefix+\"conv0\")\r\n net=mx.sym.Activation(data=net, act_type=\"tanh\", name=name_prefix+\"tanh0\")\r\n net = mx.sym.Pooling(data=net, kernel=(2,2),pool_type=\"max\", \r\n stride=(2,2), name=name_prefix+\"maxpool0\") \r\n net=mx.sym.Convolution(data=net, kernel=(6,6), stride=(1,1), num_filter=64, \r\n weight=conv_weight[1], bias=conv_bias[1], name=name_prefix+\"conv1\") \r\n net=mx.sym.Activation(data=net, act_type=\"tanh\", name=name_prefix+\"tanh1\")\r\n return net", "title": "" }, { "docid": "3191e109e54f61388f3f85d4d55bf86b", "score": "0.66142136", "text": "def _add_conv_block(num_filters, inputs, is_training, name_prefix, kernel_regularizer=None):\n conv = layers.Conv2D(num_filters,\n (3, 3),\n activation=None,\n padding=\"same\",\n kernel_regularizer=kernel_regularizer,\n name=name_prefix)(inputs)\n batchnorm = layers.BatchNormalization(name=\"%s-BN\" % name_prefix)(conv, training=is_training)\n relu = layers.ReLU(name=\"%s-Relu\" % name_prefix)(batchnorm)\n return relu", "title": "" }, { "docid": "72fa9f79b9117891ed79b9dea1646b39", "score": "0.66117764", "text": "def create_basic_conv_block(num_layers, num_in_channels, num_out_channels):\n sequence = []\n for i in xrange(num_layers):\n if i == 0:\n sequence.append(torch.nn.Conv2d(in_channels=num_in_channels, out_channels=num_out_channels, kernel_size=3,\n stride=1, padding=1))\n else:\n sequence.append(torch.nn.Conv2d(in_channels=num_out_channels, out_channels=num_out_channels, kernel_size=3,\n stride=1, padding=1))\n sequence.append(torch.nn.ReLU(inplace=False))\n\n return torch.nn.Sequential(*sequence)", "title": "" }, { "docid": "43ea1f95d5890524879c017ebd2d50aa", "score": "0.66063684", "text": "def convolutional(params, input_shape, output_shape):\n output_activation = params['output_activation']\n output_filter_size = params['output_filter_size']\n nb_channels = np.prod(output_shape)\n inp = Input(input_shape)\n x = inp\n x = _convolutional_stack(x, params)\n x = conv2d_layers(\n x,\n nb_filters=[nb_channels],\n filter_sizes=[output_filter_size],\n activations=[output_activation],\n border_mode='valid',\n stride=1,\n conv_layer=Convolution2D)\n out = x\n if len(output_shape) == 1:\n out = GlobalAveragePooling2D()(out)\n model = Model(inputs=inp, outputs=out)\n check_model_shape_or_exception(model, output_shape)\n return model", "title": "" }, { "docid": "389a69d82f95410913cdf8aa6ed9219f", "score": "0.66062754", "text": "def layer_conv2d(x, nfilters, size, strides, padding, name, stdev,\n droprate, is_train, activation=None):\n\n z = tf.layers.conv2d(x, nfilters, size, strides=strides,\n padding=padding, kernel_initializer=get_init(stdev),\n name=name, activation=activation)\n z = leaky_relu(z)\n z = dropout(z, is_train, droprate)\n return z", "title": "" }, { "docid": "dc012abf0bd15b49b20a7d0cc500fa4a", "score": "0.6574481", "text": "def __init__(self,\n nChannelsPrevious,\n nChannels,\n kernelSize,\n padding=0,\n bias=True,\n **kwargs):\n\n ConstrainedLayer.__init__(self,\n nn.Conv2d(nChannelsPrevious, nChannels,\n kernelSize, padding=padding,\n bias=bias),\n **kwargs)", "title": "" }, { "docid": "94228d071abac7601fa561abe09e6f29", "score": "0.657439", "text": "def Simple_CNN():\n \n # create model\n model = Sequential()\n # add concolutional layers\n model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(32,32,1)))\n model.add(Conv2D(32, kernel_size=3, activation='relu'))\n # fully connected layer\n model.add(Flatten())\n model.add(Dropout(rate=0.35))\n model.add(Dense(3, activation='softmax'))\n\n \n return model", "title": "" }, { "docid": "648fc7e1ab9f8219abe36d1e306fdb50", "score": "0.65725976", "text": "def gen_conv(x, cnum, ksize, stride=1, rate=1, name='conv',\n padding='SAME', activation=tf.nn.elu, training=True,\n kernel_initializer=None):\n x = tf.layers.conv2d(x,cnum, ksize, stride, dilation_rate=rate,\n activation=None, padding=padding, name=name,\n kernel_initializer=kernel_initializer)\n # We empirically found BN to help if not trained (works as regularizer)\n x = tf.layers.batch_normalization(x)\n x = activation(x)\n\n return x", "title": "" }, { "docid": "bbb5499fc2b7d5a52588bb6ee1d7f73c", "score": "0.65537643", "text": "def conv_layer( self, bottom, name, stride = 1): \n \n with tf.variable_scope(name) as scope:\n # The weights are retrieved according to how they are stored in arrays\n w = self.get_conv_weight(name+'_W')\n b = self.get_conv_bias(name+'_b')\n conv_weights = tf.get_variable(\n \"W\",\n shape=w.shape,\n initializer=tf.constant_initializer(w)\n )\n conv_biases = tf.get_variable(\n \"b\",\n shape=b.shape,\n initializer=tf.constant_initializer(b)\n )\n\n conv = tf.nn.conv2d( bottom, conv_weights, [1,stride,stride,1], padding='SAME')\n bias = tf.nn.bias_add( conv, conv_biases)\n \n with self.g.gradient_override_map({'Relu': 'GuidedRelu'}):\n relu = tf.nn.relu(bias, name=name)\n return relu", "title": "" }, { "docid": "9abdec9b3ae0a570f123c554e414404c", "score": "0.65522957", "text": "def _conv_block(self, inputs, filters, kernel, strides):\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return Activation(self.relu6)(x)", "title": "" }, { "docid": "b7e51f9c236d9c88edf590036588b72e", "score": "0.6544085", "text": "def _conv_block(inputs, filters, kernel, strides):\n\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n x = PReLU(cval)(x)\n# x = Activation(relu)(x)\n return x", "title": "" }, { "docid": "a501a8b3dfbc30e4b85bbacdbeb59868", "score": "0.6540521", "text": "def build_conv_block(self, dim_in, dim_out, norm_layer, use_bias=False):\r\n conv_block = []\r\n\r\n padding = 1\r\n\r\n conv_block += [nn.Conv3d(dim_in, dim_out, kernel_size=3, stride=1, padding=padding, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\r\n conv_block += [nn.Conv3d(dim_in, dim_out, kernel_size=3, stride=1, padding=padding, bias=use_bias), norm_layer(dim)]\r\n\r\n return nn.Sequential(*conv_block)", "title": "" }, { "docid": "58b5e878258a2df51d83b1c9700c6d5e", "score": "0.6524475", "text": "def _conv2d(self, \n\t\t\tnet,\n\t\t\tnum_o,\n\t\t\tkernel_size, \n\t\t\tstride=1,\n\t\t\trate=1,\n\t\t\tpadding='SAME',\t\t\t\n\t\t\tweight_decay=0.0001,\n\t\t\tactivation_fn=tf.nn.relu,\n\t\t\tuse_batch_norm=True,\n fine_tune_batch_norm = False,\n\t\t\tname = None):\n batch_norm_params = {\n 'decay': 0.997,\n 'epsilon': 1e-5,\n 'scale': True,\n 'is_training': self.phase and fine_tune_batch_norm,\n 'fused': True, # Use fused batch norm if possible.\n }\n \n net = tf.contrib.layers.conv2d(net,\n num_o,\n kernel_size,\n stride,\n padding = padding,\n rate = rate,\n activation_fn = activation_fn,\n normalizer_fn = tf.contrib.layers.batch_norm if use_batch_norm else None,\n normalizer_params = batch_norm_params,\n weights_initializer = initializers.variance_scaling_initializer(),\n weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay),\n scope = name)\n return net", "title": "" }, { "docid": "19c6d35f4728505d1f637661b231cded", "score": "0.65208095", "text": "def conv(dims, inplanes, outplanes, kernel_size, stride, dilation, bias):\n padding = math.floor((dilation*(kernel_size-1)+2-stride)/2)\n if dims ==2:\n return nn.Conv2d(inplanes, outplanes, kernel_size, stride,\n padding, dilation, bias=bias)\n elif dims == 3:\n return nn.Conv3d(inplanes, outplanes, kernel_size, stride,\n padding, dilation, bias=bias)\n else:\n raise ValueError('dimension of conv must be 2 or 3')", "title": "" }, { "docid": "0e4c5ecf25d81199080c2f1bf66e8acf", "score": "0.650988", "text": "def conv2d(x_, filter_size, filter_num, stride=1):\n # get number of channels in input\n channels = x_.get_shape()[3].value\n\n # create weights tensor\n weights = tf.Variable(tf.random_normal([filter_size, filter_size, channels, filter_num]))\n\n # add weights tensor to collection\n tf.add_to_collection('conv_weights', weights)\n\n # create bias tensor\n bias = tf.Variable(tf.random_normal([filter_num]))\n\n # apply weights and biases\n preactivations = tf.nn.conv2d(x_, weights, strides=[1, stride, stride, 1], padding='SAME')\n preactivations = tf.nn.bias_add(preactivations, bias)\n\n # apply activation function, this is layer output\n activations = tf.nn.relu(preactivations)\n\n # add output to collection\n tf.add_to_collection('conv_output', activations)\n\n return activations", "title": "" }, { "docid": "084de7025c405b9289d8909f3a196b23", "score": "0.6493921", "text": "def featnet1(data, conv_weight, conv_bias, name_prefix):\r\n\r\n net=mx.sym.Convolution(data=data, kernel=(7,7), stride=(3,3),\r\n pad=(3,3),num_filter=96, weight=conv_weight[0],\r\n bias=conv_bias[0], name=name_prefix+\"conv0\")\r\n net=mx.sym.Activation(data=net, act_type=\"relu\", name=name_prefix+\"relu0\")\r\n net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type=\"max\",\r\n stride=(2,2),name=name_prefix+\"maxpool0\")\r\n net=mx.sym.Convolution(data=net, kernel=(5,5),stride=(1,1),\r\n pad=(2,2), num_filter=192, weight=conv_weight[1],\r\n bias=conv_bias[1], name=name_prefix+\"conv1\")\r\n net=mx.sym.Activation(data=net, act_type=\"relu\", name=name_prefix+\"relu1\")\r\n net=mx.sym.Pooling(data=net, kernel=(2,2), pool_type=\"max\",\r\n stride=(2,2),name=name_prefix+\"maxpool1\")\r\n net=mx.sym.Convolution(data=net, kernel=(3,3),stride=(1,1),\r\n pad=(1,1), num_filter=256, weight=conv_weight[2],\r\n bias=conv_bias[2], name=name_prefix+\"conv2\") \r\n net=mx.sym.Activation(data=net, act_type=\"relu\", name=name_prefix+\"relu2\") \r\n return net", "title": "" }, { "docid": "a028e6e1143a68af9508aed740b325f0", "score": "0.6491282", "text": "def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, bw=cfg.WORD_WIDTH, fl=10, rs=0, biased=True,relu=True, padding=DEFAULT_PADDING):\n self.validate_padding(padding)\n c_i = input.get_shape()[-1]\n if self.isHardware: \n convolve = lambda i, k: self.lp_conv(i, k, s_h, s_w, bw, fl, rs, padding) # DLA\n else:\n convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding) # Tensorflow\n\n with tf.variable_scope(name) as scope:\n init_weights = tf.contrib.layers.variance_scaling_initializer(factor=0.01, mode='FAN_AVG', uniform=False)\n init_biases = tf.constant_initializer(0.0)\n #print(\"TF DEBUG, scope: %s\\n\" %(name))\n kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, self.trainable, \\\n regularizer=self.l2_regularizer(0.0005))\n if cfg.ENABLE_TENSORBOARD:\n self.variable_summaries(kernel)\n if biased:\n biases = self.make_var('biases', [c_o], init_biases, self.trainable)\n if cfg.ENABLE_TENSORBOARD:\n self.variable_summaries(biases)\n conv = convolve(input, kernel)\n if relu:\n bias = tf.nn.bias_add(conv, biases)\n if self.isHardware:\n bias_s = self.saturate(bias, cfg.WORD_WIDTH) # New addition for saturation\n return tf.nn.relu(bias_s)\n return tf.nn.relu(bias)\n bias_add = tf.nn.bias_add(conv, biases)\n if self.isHardware:\n return self.saturate(bias_add, cfg.WORD_WIDTH) # New addition for saturation\n return bias_add\n else:\n conv = convolve(input, kernel)\n if relu:\n return tf.nn.relu(conv)\n return conv", "title": "" }, { "docid": "d095cbf0ff2d543a20e81b2fc13a0291", "score": "0.6491024", "text": "def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, \n kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n \n layers.append(conv_layer)\n\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "b8f575362841769b58626acc087a2add", "score": "0.647499", "text": "def conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "b8f575362841769b58626acc087a2add", "score": "0.647499", "text": "def conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "2afbfdfccc7f249f8955ee8f874243bf", "score": "0.64698464", "text": "def build_cnn(input_var=None):\n\n network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),\n input_var=input_var)\n\n network = lasagne.layers.Conv2DLayer(\n network, num_filters=32,\n filter_size=(5, 5), nonlinearity=lasagne.nonlinearities.rectify)\n\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2))\n\n network = lasagne.layers.Conv2DLayer(\n network, num_filters=32,\n filter_size=(5, 5), nonlinearity=lasagne.nonlinearities.rectify)\n\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2))\n\n network = lasagne.layers.DropoutLayer(network, p=0.5)\n\n network = lasagne.layers.DenseLayer(\n network, num_units=500,\n nonlinearity=lasagne.nonlinearities.rectify)\n\n network = lasagne.layers.DropoutLayer(network, p=0.5)\n\n network = lasagne.layers.DenseLayer(\n network, num_units=10,\n nonlinearity=lasagne.nonlinearities.softmax)\n\n return network", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.64650875", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.64650875", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.64650875", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "343ea579067816a838779a1479066a67", "score": "0.6463095", "text": "def conv_net(x, layers):\n layer_constructors = {\n \"conv\": conv2d\n , \"avgpool\": avgpool2d\n , \"maxpool\": maxpool2d\n , \"fc\": fully_connected\n , \"relu\": relu\n , \"flatten\": flatten\n , \"dropout\": dropout\n }\n for name, param in layers:\n param.update({\"x\": x})\n x = layer_constructors[name](**param)\n return x", "title": "" }, { "docid": "6c98afa372f816296c54441407a5e2ec", "score": "0.6461653", "text": "def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same'):\n in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size,\n nonlinearity=nonlinearity, pad=pad, name='conv')\n in_layer = batch_norm(in_layer)\n return in_layer", "title": "" }, { "docid": "ac292cb3b89b088149ac2c9c7ebfcb85", "score": "0.6449387", "text": "def conv_conv_pool(input_, n_filters, training, name, pool=True, activation=tf.nn.relu,pool_square_size = 2, no_act=False):\n net = input_\n\n with tf.variable_scope(\"layer{}\".format(name)):\n for i, F in enumerate(n_filters):\n net = tf.layers.conv2d(net, F, (3, 3), activation=None, padding='same', name=\"conv_{}\".format(i + 1))\n net = tf.layers.batch_normalization(net, training=training, name=\"bn_{}\".format(i + 1))\n if (no_act==False):\n net = activation(net, name=\"relu{}_{}\".format(name, i + 1))\n\n if pool is False:\n return net\n psz = pool_square_size \n pool = tf.layers.max_pooling2d(net, (psz, psz), strides=(psz, psz), name=\"pool_{}\".format(name))\n\n return net, pool", "title": "" }, { "docid": "61ac2eb1a61cb041641af905d3f94b6f", "score": "0.6449301", "text": "def conv(input_tensor, filter_shape, name=None, act=tf.nn.relu):\n with tf.name_scope(name):\n initial_weights = tf.truncated_normal(filter_shape, stddev=0.1)\n weights = tf.Variable(initial_weights, name='weights')\n\n conv = tf.nn.conv2d(input=input_tensor,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME',\n name='convolution')\n\n initial_biases = tf.constant(1.0,\n shape=filter_shape[3],\n dtype=tf.float32)\n biases = tf.Variable(initial_biases, name='biases')\n\n preactivations = tf.nn.bias_add(conv, biases, name='bias_addition')\n if act is None:\n return preactivations\n else:\n return act(preactivations, name='activation')", "title": "" }, { "docid": "41023e07eae7452087466796ac9bd0ba", "score": "0.64459014", "text": "def build_simple_network():\n X = klayers.Input(input_shape)\n\n network = klayers.Conv2D(\n 32,\n activation=None,\n kernel_size=(3, 3),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=regularizers.l2(1e-3),\n )(X)\n network = klayers.BatchNormalization()(network)\n network = klayers.Activation(\"relu\")(network)\n network = klayers.Dropout(0.4)(network)\n\n network = klayers.Conv2D(\n 32,\n activation=None,\n kernel_size=(3, 3),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=regularizers.l2(1e-3),\n )(network)\n network = klayers.BatchNormalization()(network)\n network = klayers.Activation(\"relu\")(network)\n network = klayers.Dropout(0.4)(network)\n\n network = klayers.Conv2D(\n 32,\n activation=None,\n kernel_size=(3, 3),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=regularizers.l2(1e-3),\n )(network)\n network = klayers.BatchNormalization()(network)\n network = klayers.Activation(\"relu\")(network)\n network = klayers.Dropout(0.4)(network)\n\n network = klayers.AveragePooling2D()(network)\n network = klayers.Flatten()(network)\n network = klayers.Dense(len(labels), activation=\"softmax\")(network)\n\n cifar10_network = Model(inputs=X, outputs=network)\n return cifar10_network", "title": "" }, { "docid": "59adae0112704f9ce7be9455601c434a", "score": "0.64417404", "text": "def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)", "title": "" }, { "docid": "03dee7a0e39bf354619f84b27b154f7b", "score": "0.6441145", "text": "def conv_layer(input_tensor, kernel_size_x, kernel_size_y,\n input_feat_maps, output_feat_maps, stride, layer_name, act=tf.nn.relu,is_training=True,use_batch_norm=True):\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([kernel_size_x,kernel_size_y,input_feat_maps,output_feat_maps])\n variable_summaries(weights, layer_name + '/weights')\n with tf.name_scope('biases'):\n biases = bias_variable([output_feat_maps])\n variable_summaries(biases, layer_name + '/biases')\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.nn.conv2d(input_tensor,weights,\n strides=[1,stride,stride,1],padding='SAME') + biases\n tf.summary.histogram(layer_name + '/pre_activations', preactivate)\n if use_batch_norm:\n with tf.name_scope('batch_norm'):\n batch_norm = batch_norm_conv(preactivate, output_feat_maps, phase_train=is_training,scope=layer_name+'_batch_norm')\n tf.summary.histogram(layer_name + '/batch_norm', batch_norm)\n else:\n batch_norm = preactivate\n if act:\n activations = act(batch_norm, name='activation')\n else:\n activations = batch_norm\n tf.summary.histogram(layer_name + '/activations', activations)\n return activations", "title": "" }, { "docid": "ea8124c4beace719ecabc4e289d25f15", "score": "0.6439945", "text": "def setup_convolutional_network(input_size, output_size, args):\n\n logging.debug('Setting up convolutional network')\n\n # Placeholders for input and output variables\n x = tf.placeholder(tf.float32, [None, input_size], name='x')\n y = tf.placeholder(tf.float32, [None, output_size], name='y')\n\n # Reshape image\n x_image = tf.reshape(x, [-1, args.image_size, args.image_size, args.number_of_channels])\n\n # Network parameters\n filter_size1 = 8\n num_filters1 = 32\n filter_size2 = 16\n num_filters2 = 128\n fc_size1 = 256\n fc_size2 = 1024\n\n # Set up the convolutional layers\n conv_layer_1_1 = setup_convolutional_layer(\n x=x_image, filter_size=filter_size1, input_size=args.number_of_channels,\n num_filters=num_filters1, use_pooling=False\n )\n conv_layer_1_2 = setup_convolutional_layer(\n x=conv_layer_1_1, filter_size=filter_size1, input_size=num_filters1,\n num_filters=num_filters1, use_pooling=True\n )\n conv_layer_2_1 = setup_convolutional_layer(\n x=conv_layer_1_2, filter_size=filter_size2, input_size=num_filters1,\n num_filters=num_filters2, use_pooling=False\n )\n conv_layer_2_2 = setup_convolutional_layer(\n x=conv_layer_2_1, filter_size=filter_size2, input_size=num_filters2,\n num_filters=num_filters2, use_pooling=True\n )\n\n layer_flat, num_features = flatten_layer(conv_layer_2_2)\n\n # Set up the fully connected layers\n full_layer_1 = setup_fully_connected_layer(\n x=layer_flat, input_size=num_features, output_size=fc_size1\n )\n full_layer_2 = setup_fully_connected_layer(\n x=full_layer_1, input_size=fc_size1, output_size=fc_size2\n )\n full_layer_3 = setup_fully_connected_layer(\n x=full_layer_2, input_size=fc_size2, output_size=output_size, use_relu=False\n )\n\n # Calculate softmax on the output layer\n output_layer = tf.nn.softmax(full_layer_3)\n\n # Calculate cross entropy, reduce its mean, and optimize\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(output_layer, y)\n cost = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\n\n return x, y, output_layer, cost, optimizer", "title": "" }, { "docid": "780b4e3a93f9b1bdfbb0d779fafe3fcb", "score": "0.64396703", "text": "def conv(self, name, x, shape, stride=1, padding='SAME'):\n with tf.name_scope(name):\n W = tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n b = tf.Variable(tf.constant(0.1, shape=shape[-1:]))\n h_conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)\n h_conv = tf.nn.relu(h_conv + b)\n self.print_tensor(h_conv)\n return h_conv, W, b", "title": "" }, { "docid": "0c3aa0b3c3844dca98338892eaeccbf5", "score": "0.64382726", "text": "def build_conv_block(self, dim_in, dim_out, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim_in, dim_out, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim_out), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim_out, dim_out, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim_out)]\n\n return nn.Sequential(*conv_block)", "title": "" }, { "docid": "cd7008989bf84cfbc7cc62f729eb0037", "score": "0.64371717", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides = [1,2,8,1], padding = 'SAME')", "title": "" }, { "docid": "c9e240af3eed7d4e7a1085e8ce754e17", "score": "0.64286095", "text": "def ConvBlock(self, layers, filters):\n model = self.model\n for i in range(layers):\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(filters, (3, 3), activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))", "title": "" }, { "docid": "621b1cc708d37f124ece697cc2d8c395", "score": "0.6423732", "text": "def initial_conv(input_layer, k_size=3):\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n\n x = Convolution3D(16, (k_size, k_size, k_size), padding='same', kernel_initializer='he_uniform')(input_layer) # keras default uses glorot_uniform\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation('relu')(x)\n return x", "title": "" }, { "docid": "3694958913ec4e923d8f7c49ce2fa0f8", "score": "0.6423429", "text": "def dilated_conv(i):\n if dilation > 0:\n dilation_rate = int(dilation ** i)\n else:\n # If dilation is negative, decrease dilation with depth instead of\n # increasing.\n dilation_rate = int((-dilation) ** (layers_per_stack - i - 1))\n layer = tf.keras.Sequential(name='dilated_conv')\n layer.add(tfkl.Activation(tf.nn.relu))\n layer.add(conv(ch, kernel_size, 1, dilation_rate))\n return layer", "title": "" }, { "docid": "850f50fe56d780285c5a448dbf61e2fe", "score": "0.640426", "text": "def _ConvLay(self, layers, filters):\n for i in range(layers):\n self.model.add(ZeroPadding2D((1,1)))\n self.model.add(Convolution2D(filters, 3, 3, activation='relu'))\n\n self.model.add(MaxPooling2D( (2, 2), strides=(2, 2) ))", "title": "" }, { "docid": "850f50fe56d780285c5a448dbf61e2fe", "score": "0.640426", "text": "def _ConvLay(self, layers, filters):\n for i in range(layers):\n self.model.add(ZeroPadding2D((1,1)))\n self.model.add(Convolution2D(filters, 3, 3, activation='relu'))\n\n self.model.add(MaxPooling2D( (2, 2), strides=(2, 2) ))", "title": "" }, { "docid": "f949e46d8696ce977756e7734f848bef", "score": "0.64030534", "text": "def conv_layer(prev_layer, layer_depth, is_training):\n strides = 2 if layer_depth % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)\n conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n conv_layer = tf.nn.relu(conv_layer)\n\n return conv_layer", "title": "" }, { "docid": "63c8e134320a74996bc066b871b9bb3f", "score": "0.6401746", "text": "def _tensor_convolution(self, input_matrix, param_name):\n\n filters = self._params[self._param_path(param_name) + '/W']\n return tensor.nnet.conv2d(input_matrix,\n filters,\n input_shape=(None, 1, None, None),\n filter_shape=self._filter_shape,\n border_mode='half',\n filter_flip=False)", "title": "" }, { "docid": "f060afa4b1e4b2e943650fde49e84439", "score": "0.6400618", "text": "def conv_block(self, dim_in, dim_out, kernel_size=3, stride=1, padding=0, bias=True):\n\n return nn.Sequential(\n nn.Conv2d(dim_in, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),\n #nn.ReLU(inplace=True),\n nn.LeakyReLU(0.01, inplace=True),\n nn.Conv2d(dim_out, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),\n nn.BatchNorm2d(dim_out),\n #nn.ReLU(inplace=True)\n nn.LeakyReLU(0.01, inplace=True)\n )", "title": "" }, { "docid": "588495eda4a33a7a9a3bb8b3cd98b0c4", "score": "0.6398694", "text": "def CNN3(inputs):\n net = Conv2D(32, (3, 3), activation = 'relu')(inputs)\n net = Conv2D(64, (2, 2), activation = 'relu')(net)\n net = Conv2D(64, (1, 1), activation = 'relu')(net)\n net = Flatten()(net)\n\n return net", "title": "" }, { "docid": "dd4f1e497dee61a3125a443fb5fbdf88", "score": "0.6397961", "text": "def build_layer(self, conv_layer, pool_list, fc_nodes, input_dim):\n\n # ------------------------------------------------------------------------------------------\n # CONVOLUTIONAL PART\n # ------------------------------------------------------------------------------------------\n # First convolutional layer\n conv_list = [torch.nn.Conv2d(input_dim[2], conv_layer[0, 0], conv_layer[0, 1],\n padding=self.pad_size(conv_layer[0, 1], conv_layer[0, 2])),\n self.get_activation_function()]\n # Pooling\n if pool_list[0, 0] != 0:\n conv_list.extend([self.build_pooling_layer(pool_list[0])])\n\n # We need to compute the input size of the fully connected layer\n size = self.conv_out_size(input_dim[0:2], conv_layer[0, 1], conv_layer[0, 2], pool_list[0])\n\n # All others convolutional layers\n for it in range(1, len(conv_layer)):\n # Convolution\n conv_list.extend([torch.nn.Conv2d(conv_layer[it - 1, 0], conv_layer[it, 0], conv_layer[it, 1],\n padding=self.pad_size(conv_layer[it, 1], conv_layer[it, 2])),\n self.get_activation_function()])\n # Pooling\n if pool_list[it, 0] != 0:\n conv_list.extend([self.build_pooling_layer(pool_list[it])])\n\n # Update the output size\n size = self.conv_out_size(size, conv_layer[it, 1], conv_layer[it, 2], pool_list[it])\n\n # We create the sequential of the convolutional network part\n self.conv = torch.nn.Sequential(*conv_list)\n\n # ------------------------------------------------------------------------------------------\n # FULLY CONNECTED PART\n # ------------------------------------------------------------------------------------------\n # Compute the fully connected input layer size\n self.num_flat_features = size[0] * size[1] * conv_layer[-1, 0]\n\n # First fully connected layer\n fc_list = [torch.nn.Linear(self.num_flat_features, fc_nodes[0]), self.get_activation_function(), self.drop]\n\n # All other fully connected layer\n for it in range(1, len(fc_nodes)):\n fc_list.extend([torch.nn.Linear(fc_nodes[it - 1], fc_nodes[it]), self.get_activation_function(), self.drop])\n\n # Output layer\n fc_list.extend([torch.nn.Linear(fc_nodes[-1], self.classes), self.soft])\n\n self.fc = torch.nn.Sequential(*fc_list)", "title": "" }, { "docid": "e6c4c9653421d10997ba08c8ef2553a8", "score": "0.6396162", "text": "def expand_conv(init, n_filters, k=1, k_size=3, strides=(1, 1, 1)):\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n\n x = Convolution3D(n_filters * k, (k_size, k_size, k_size), padding='same', strides=strides, kernel_initializer='he_uniform')(init)\n\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation('relu')(x)\n\n x = Convolution3D(n_filters * k, (k_size, k_size, k_size), padding='same')(x)\n\n skip = Convolution3D(n_filters * k, (k_size, k_size, k_size), padding='same', strides=strides, kernel_initializer='he_uniform')(init)\n\n m = Add()([x, skip])\n\n return m", "title": "" }, { "docid": "8ae19220a531123f4130d002ca65c271", "score": "0.6395697", "text": "def conv_module(x, k, kernel_size, strides=(1, 1), activation=\"relu\", kernel_initializer=\"he_uniform\", padding=\"same\"):\r\n x = Conv2D(k, kernel_size, strides=strides, kernel_initializer=kernel_initializer, padding=padding)(x)\r\n x = BatchNormalization()(x)\r\n x = Activation(activation)(x)\r\n x = Dropout(0.1)(x)\r\n return x", "title": "" }, { "docid": "884178824b6bab8c052628ecc43a7980", "score": "0.63896185", "text": "def __init__(self, filter_size, num_layers):\n super(Block,self).__init__()\n self.layers = []\n for i in range(num_layers):\n self.layers.append(tf.keras.layers.Conv2D(filter_size,3,padding=\"same\",activation=\"relu\"))", "title": "" }, { "docid": "681736b6f22d5ae5bc43ff8e28a619c7", "score": "0.6389257", "text": "def _build_layer_components(self):\n self.conv_layer = Conv2D(filters=self.num_filters,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu)\n\n self.activation_layer = ReLU()\n self._layers = [self.conv_layer, self.activation_layer]", "title": "" }, { "docid": "c58e7466a33384ef35c792fe2f191eef", "score": "0.6387459", "text": "def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):\n net = slim.conv2d(inputs, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)\n net = tf.nn.relu(slim.batch_norm(net, fused=True))\n return net", "title": "" }, { "docid": "0aabd30a4d88e551efc663c3a0a5b070", "score": "0.63874364", "text": "def conv_block(inputs, number_filt, kernel_size, strides = (1,1), padding = 'SAME', activation = tf.nn.relu, max_pool = True, batch_norm = True):\n \n conv_features = layer = tf.layers.conv2d(inputs = inputs, \n filters = number_filt, \n kernel_size = kernel_size, \n strides = strides, \n padding = padding, \n activation = activation)\n \n if max_pool:\n layer = tf.layers.max_pooling2d(layer, \n pool_size = (2,2), \n strides = (2,2), \n padding = 'SAME')\n \n if batch_norm:\n layer = tf.layers.batch_normalization(layer)\n \n return layer, conv_features", "title": "" }, { "docid": "f96ae5a616adf7ba6619d115ecc9b37c", "score": "0.6385791", "text": "def cnnConvolve(filterDim, numFilters, images, W, b):\n imageDim, _, numImages = images.shape\n convDim = imageDim - filterDim + 1\n convolvedFeatures = np.zeros(shape=(convDim, convDim, numFilters, numImages))\n\n # Loop through each filter for each image\n for imageNum in range(numImages):\n for filterNum in range(numFilters):\n convolvedImage = np.zeros(shape=(convDim, convDim))\n filt = np.squeeze(a=W[:, :, filterNum])\n filt = np.rot90(np.squeeze(filt), 2)\n im = np.squeeze(images[:, :, imageNum])\n conI = conv2(im, filt, mode='valid')\n conI = conI + b[filterNum]\n convolvedImage = sigmoid(conI)\n convolvedFeatures[:, :, filterNum, imageNum] = convolvedImage\n return convolvedFeatures", "title": "" }, { "docid": "b7faffc740d6763135e5209e6472885c", "score": "0.6379595", "text": "def conv(x,\n name,\n filters,\n kernel_size,\n strides,\n padding,\n relu=True,\n biased=True,\n bn=True,\n decay=0.9997,\n is_training=True,\n use_global_status=True):\n c_i = x.get_shape().as_list()[-1] # input channels\n c_o = filters # output channels\n\n # Define helper function.\n convolve = lambda i,k: tf.nn.conv2d(\n i,\n k,\n [1, strides, strides, 1],\n padding=padding)\n\n with tf.variable_scope(name) as scope:\n kernel = tf.get_variable(\n name='weights',\n shape=[kernel_size, kernel_size, c_i, c_o],\n trainable=is_training,initializer=tf.random_normal_initializer(stddev=0.02))\n\n if strides > 1:\n pad = kernel_size - 1\n pad_beg = pad // 2\n pad_end = pad - pad_beg\n pad_h = [pad_beg, pad_end]\n pad_w = [pad_beg, pad_end]\n x = tf.pad(x, [[0,0], pad_h, pad_w, [0,0]])\n\n output = convolve(x, kernel)\n\n # Add the biases.\n if biased:\n biases = tf.get_variable('biases', [c_o], trainable=is_training)\n output = tf.nn.bias_add(output, biases)\n\n # Apply batch normalization.\n if bn:\n is_bn_training = not use_global_status\n output = batch_norm(output,\n 'BatchNorm',\n is_training=is_bn_training,\n decay=decay,\n activation_fn=None)\n\n # Apply ReLU as activation function.\n if relu:\n output = tf.nn.relu(output)\n\n return output", "title": "" }, { "docid": "3e6b1754f10075c6b398329396f12ba1", "score": "0.6378885", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "3e6b1754f10075c6b398329396f12ba1", "score": "0.6378885", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "3e6b1754f10075c6b398329396f12ba1", "score": "0.6378885", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "3e6b1754f10075c6b398329396f12ba1", "score": "0.6378885", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "3e6b1754f10075c6b398329396f12ba1", "score": "0.6378885", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" } ]
acf117360a21a1bbc9a3c05a5cf6051f
Add new remote server IP address as additional location, can be used for running dhcp server From all added locations all files on clean up will be downloaded to specific local location
[ { "docid": "fbc6e2a3d0c8f6fd90bec7e2fc231b98", "score": "0.5117198", "text": "def check_remote_address(remote_address):\n if remote_address not in world.f_cfg.multiple_tested_servers:\n world.f_cfg.multiple_tested_servers.append(remote_address)", "title": "" } ]
[ { "docid": "8e447d7652e79472a573a51834cef2af", "score": "0.6541148", "text": "def config_add_path():\n # Name of the remote\n # [positional]\n name: Option\n\n # Source path\n # [positional]\n source: Option\n\n # Destination path\n # [positional]\n dest: Option\n\n cfg = get_config(\"remotes.json\")\n remote = _check_remote(cfg, name, create=True)\n\n paths = remote[\"paths\"]\n paths[source] = dest\n _list_paths(remote)\n write_config(\"remotes.json\", cfg)", "title": "" }, { "docid": "f4619cea71e2dd584ed4f5e795d515b1", "score": "0.6191511", "text": "def remote_add(connection: str):\n\n config = load_cwd_workspace_config()\n _add_remote_host(config, connection)", "title": "" }, { "docid": "75a7da3c7deaae1fba35199e57cb4083", "score": "0.6085926", "text": "def addserver(env, app, version, server):\n\n with hide('running','warnings'), settings(warn_only=True):\n #local(\"curl -XGET -H 'Accept: application/json; indent=4' -u admin:Testing00 'http://10.79.245.31:8000/api/v2/host/\" + env + \"/\" + app + \"/\" + version + \"/\" + server + \"/'\" ) \n local(\"curl -XPUT -H 'Accept: application/json; indent=4' -u admin:Testing00 'http://10.79.245.31:8000/api/v2/host/\" + env + \"/\" + app + \"/\" + version + \"/\" + server + \"'\" )", "title": "" }, { "docid": "027f7603f4f9a43fb3fab74609a5cdc1", "score": "0.6055778", "text": "def add_remote(ctx, name, address):\n P = os.getcwd()\n repo = Repository(path=P)\n click.echo(repo.remote.add(name=name, address=address))", "title": "" }, { "docid": "bc4c03c2544a552eb80bb02bd7f8aaf5", "score": "0.5887049", "text": "def config_add():\n # Name of the remote\n # [positional]\n name: Option\n\n # URL of the remote\n # [positional]\n url: Option\n\n if \"://\" not in url:\n q(\n \"URL should be formatted as type://url -- currently accepted are:\"\n \"\\n* SSH: ssh://user@host\"\n \"\\n* Local directory: file:///path/from/root\"\n )\n\n typ, url = url.split(\"://\", 1)\n if typ not in (\"ssh\", \"file\"):\n q(f\"Unknown protocol: '{typ}'. Accepted protocols are 'ssh' and 'file'.\")\n\n if typ == \"file\":\n url = _realpath(url)\n\n cfg = get_config(\"remotes.json\")\n if name in cfg:\n entry = cfg[name]\n entry[\"type\"] = typ\n entry[\"url\"] = url\n else:\n entry = {\"type\": typ, \"url\": url, \"paths\": {os.getenv(\"HOME\"): \"\"}}\n cfg[name] = entry\n\n print(json.dumps(entry, indent=4))\n\n write_config(\"remotes.json\", cfg)", "title": "" }, { "docid": "47ac4d479ebef0d84cac8857068adba5", "score": "0.56861824", "text": "def add_entry_to_host_file(self, vm_hostname, vm_ipaddress, vm_fqdn, minion_id):\n logger.debug(\"{} add_entry_to_host_file: parameters - {}, {}, {}, {}\"\n .format(BACKUP_LOG_ID, vm_hostname, vm_ipaddress, vm_fqdn, minion_id))\n installer_agent_script_path = get_config(KEY_NETWORKER_SERVER, \"ADD_HOSTNAME_SCRIPT_PATH\")\n salt_api = SaltNetAPI()\n add_response = {}\n try:\n pillar_details = {\n \"pillar\": {'minion_ip': vm_ipaddress, 'minion_hostname': vm_hostname, 'minion_fqdn': vm_fqdn}}\n net_api_response = salt_api.execute_command(minion_id, args=installer_agent_script_path,\n pillar_details=pillar_details)\n\n if not net_api_response:\n raise TASException(\"BACKUP014_DNS_ENTRY_FAILURE\",\n BACKUP_ERRORS[\"BACKUP014_DNS_ENTRY_FAILURE\"], None)\n\n if 'status' not in net_api_response or 'comment' not in net_api_response:\n raise TASException(\"BACKUP015_SALT_EXECUTION_ERROR\",\n BACKUP_ERRORS[\"BACKUP015_SALT_EXECUTION_ERROR\"], None)\n\n if not net_api_response['status']:\n raise TASException(\"SALT ERROR\", net_api_response['comment'], None)\n\n logger.info(\"{} Response received after executing \"\n \"the script to add Client VM DNS entry\".format(BACKUP_LOG_ID))\n logger.debug(\"{} Response for adding Client VM DNS entry {}\"\n .format(BACKUP_LOG_ID, str(net_api_response['comment'])))\n add_response = self.response_parser\\\n .parse_add_host_entry_script_response(net_api_response['comment'])\n return add_response\n\n except TASException as e:\n add_response['status'] = False\n add_response['err_code'] = e.err_code\n add_response['comment'] = e.err_message\n add_response['err_trace'] = e.err_trace\n logger.error('{} {} '.format(BACKUP_LOG_ID, add_response))\n return add_response\n\n except Exception as e:\n message = \"Unknown exception - {}\".format(e)\n logger.debug('{}{}'.format(BACKUP_LOG_ID, message))\n raise Exception(message)", "title": "" }, { "docid": "81320932b02063d8471afe79a4666015", "score": "0.5598838", "text": "def modify_remote(self, label: str, url: str) -> None:\n self.check_is_repo()\n if (\n \"registries\" not in self._local_config\n or label not in self._local_config[\"registries\"]\n ):\n raise fdp_exc.CLIConfigurationError(\n f\"No such entry '{label}' in available remotes\"\n )\n self._local_config[\"registries\"][label][\"uri\"] = url", "title": "" }, { "docid": "5596eacc477251ab9daea7dd330307c5", "score": "0.55722004", "text": "def _add_remote_host(config: WorkspaceConfig, connection: str):\n parts = connection.split(\":\")\n remote_host = parts[0]\n config_medium = get_configuration_medium(config)\n remote_dir = config_medium.generate_remote_directory(config) if len(parts) == 1 else Path(parts[1])\n\n added, index = config.add_remote_host(remote_host, remote_dir)\n if not added:\n click.echo(f\"{connection} already exists in config\")\n sys.exit(0)\n\n # Check if we can connect to the remote host and create a directory there\n workspace = SyncedWorkspace.from_config(config, config.root, index)\n try:\n workspace.create_remote()\n except RemoteError:\n click.secho(f\"Failed to create {workspace.remote.directory} on remote host {remote_host}\", fg=\"yellow\")\n click.secho(\"Please check if host is accessible via SSH\", fg=\"yellow\")\n sys.exit(1)\n\n click.echo(f\"Created remote directory at {workspace.remote.host}:{workspace.remote.directory}\")\n click.echo(\"Remote is configured and ready to use\")\n\n # No errors when executing the above code means we can save the config\n config_medium.save_config(config)", "title": "" }, { "docid": "09e1b966fc2fa90bb79ff8f7472903bc", "score": "0.55588585", "text": "def resolvconf(tortpdir):\n try:\n copy2(\"/etc/resolv.conf\",tortpdir)\n except IOError as e:\n print e\n resolv = open('/etc/resolv.conf', 'w')\n resolv.write('nameserver 127.0.0.1\\n')\n resolv.close()", "title": "" }, { "docid": "1627a10d692b803d48aa02cfea024918", "score": "0.5493226", "text": "def add_ip(self, objectitem):\n\n self.layers['ipaddr'][str(objectitem)] = objectitem", "title": "" }, { "docid": "dc5a90a8dc4b8320f5d39b135a98c676", "score": "0.5485714", "text": "def addservervar(env, app, version, server, key,value):\n\n with hide('running','warnings'), settings(warn_only=True):\n #local(\"curl -XGET -H 'Accept: application/json; indent=4' -u admin:Testing00 'http://10.79.245.31:8000/api/v2/host/\" + env + \"/\" + app + \"/\" + version + \"/\" + server + \"/'\" ) \n local(\"curl -XPUT -H 'Accept: application/json; indent=4' -u admin:Testing00 'http://10.79.245.31:8000/api/v2/hostvar/\" + env + \"/\" + app + \"/\" + version + \"/\" + server + \"/\" + key + \"/\" + value + \"'\" )", "title": "" }, { "docid": "d5e7a9e5871e7dea4593d53862a16586", "score": "0.5453379", "text": "def server(ctx, server):\n serverPath = click.prompt(\n \"Please enter your server path\"\n )\n serverCommand = click.prompt(\n \"Please enter your server run command\"\n )\n configFile = ctx.obj['configFile']\n\n helper.addServerToConfigFile(server, serverPath, serverCommand, ctx.obj['configDirPath'])", "title": "" }, { "docid": "cfd57d524da046729fb74e9ab7f47534", "score": "0.5442468", "text": "def nfs_server_ip(self, value):\n self._nfs_server_ip = value", "title": "" }, { "docid": "73975de92605cdb07d34e82e2478d47d", "score": "0.5431573", "text": "def add_remote(\n self, remote_url: str, token_file: str, label: str = \"origin\"\n ) -> None:\n self.check_is_repo()\n if \"registries\" not in self._local_config:\n self._local_config[\"registries\"] = {}\n if label in self._local_config[\"registries\"]:\n raise fdp_exc.CLIConfigurationError(\n f\"Registry remote '{label}' already exists.\"\n )\n self._local_config[\"registries\"][label] = {\n \"uri\": remote_url,\n \"token\": token_file,\n }", "title": "" }, { "docid": "a96db7c035a861802f95f98a09e8e9f4", "score": "0.5352538", "text": "def ip_add(**kwargs):\n client = Client()\n\n item = client.IpAddresses()\n for key, value in kwargs.items():\n setattr(item, key, value)\n item.save()\n click.echo('Item added successfully.')", "title": "" }, { "docid": "f6c3dec7c8db260d0d921de732cd9b15", "score": "0.53382313", "text": "def _copy_to_remote(\n self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]\n ):\n ...", "title": "" }, { "docid": "453c4f8e9c04d21179ce7f177f8ede22", "score": "0.5336294", "text": "def save_new_ip(self,ip,debug):\n\t\t\n\t\tif(debug):\n\t\t\tprint(\"\\n -> log IP :\"+ip)\n\t\t\n\t\ttext_file = open(self.DEF_LAST_IP, \"w\")\n\t\ttext_file.write(ip)\n\t\ttext_file.close()", "title": "" }, { "docid": "6c13183a422d0208e0be0b4f57895e6a", "score": "0.5308287", "text": "def modify(stackName):\n\n print(\"Fetching IP Addresses\")\n\n database_server_ip = get_database_ip(stackName)\n webserver1_ip = get_ws1_ip(stackName)\n webserver2_ip = get_ws2_ip(stackName)\n\n print(\"Database Server IP: {}\".format(database_server_ip))\n print(\"Web Server 1 IP: {}\".format(webserver1_ip))\n print(\"Web Server 2 IP: {}\".format(webserver2_ip))\n\n # Now to create a custom connect.inc.php in local server\n for line in fileinput.input(\"../synergy/includes/connect.inc.php\", inplace=True):\n if '$servername =' in line:\n print('$servername = \"{}\";'.format(database_server_ip))\n else:\n print(line, end=\"\")\n\n print(\"New Connect.inc.php file created\")\n\n print(\"Copying File to Web Server 1\")\n\n \n copy_file_to_webserver(webserver1_ip)\n\n print(\"File Copied to Web Server 1\")\n\n print(\"Copying File to Web Server 2\")\n\n copy_file_to_webserver(webserver2_ip)\n\n print(\"File Copied to Web Server 2\")\n\n print(\"Fetching DNS name....\")\n dns_name = get_elb_dns_name(stackName)\n print(\"Your Application is live at {}\".format(dns_name))", "title": "" }, { "docid": "12b0c139ac71dee92570ed72348ff391", "score": "0.5295005", "text": "def add(db, server_ip_address, source, port, vrf):\n ctx = click.get_current_context()\n\n server_validator(ctx, db.cfgdb, server_ip_address, False)\n\n table = str(SYSLOG_TABLE_CDB)\n key = str(server_ip_address)\n data = {}\n\n if source is not None:\n source_validator(ctx, server_ip_address, source)\n data[SYSLOG_SOURCE] = source\n if port is not None:\n data[SYSLOG_PORT] = port\n if vrf is not None:\n vrf_validator(ctx, db.cfgdb, vrf)\n data[SYSLOG_VRF] = vrf\n\n source_to_vrf_validator(ctx, source, vrf)\n\n try:\n add_entry(db.cfgdb, table, key, data)\n clicommon.run_command(\"systemctl reset-failed rsyslog-config rsyslog\", display_cmd=True)\n clicommon.run_command(\"systemctl restart rsyslog-config\", display_cmd=True)\n log.log_notice(\"Added remote syslog logging: server={},source={},port={},vrf={}\".format(\n server_ip_address,\n data.get(SYSLOG_SOURCE, \"N/A\"),\n data.get(SYSLOG_PORT, \"N/A\"),\n data.get(SYSLOG_VRF, \"N/A\")\n ))\n except Exception as e:\n log.log_error(\"Failed to add remote syslog logging: {}\".format(str(e)))\n ctx.fail(str(e))", "title": "" }, { "docid": "d233317c36206e7cb71655c2dd4a3aff", "score": "0.5263611", "text": "def ifadd(self, iff, addr):\n addr, plen = (addr.split(\"/\")+[\"128\"])[:2]\n addr = in6_ptop(addr)\n plen = int(plen)\n naddr = inet_pton(socket.AF_INET6, addr)\n nmask = in6_cidr2mask(plen)\n prefix = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))\n self.invalidate_cache()\n self.routes.append((prefix,plen,'::',iff,[addr]))", "title": "" }, { "docid": "3f68f06fbd7fd82458291d700331adff", "score": "0.5262751", "text": "def remoteMoveIP(self, line):\n self.__sendJsonCommand(\"RequestMoveIP\", {\n \"newLine\": line,\n })", "title": "" }, { "docid": "dac756b5d24d412a9d4fe135b4a35207", "score": "0.52523595", "text": "def _configure_location(config, geoip):\n config.add_request_method(\n lambda r: geoip2.database.Reader(str(geoip)), \"geoip\", reify=True)\n config.add_route(\"service-location\", \"/services/location\")\n config.add_view(_location, route_name=\"service-location\", renderer=\"json\")", "title": "" }, { "docid": "8e3fe271673a8d0c360a9c75a40a2921", "score": "0.52422327", "text": "def addServerIP(self, ips, netmask):\n return self.__cQuery('addips', {\n 'ips': ips,\n 'netmask': netmask\n })", "title": "" }, { "docid": "31cbb6a3253d9650349d4898c1d03c83", "score": "0.5239499", "text": "def add_new_data(file):\n path = os.getcwd()\n complete_file_with_path = path + '/' + file[0]\n df = pd.read_csv(complete_file_with_path, header=0)\n correct_file_name = not_alphabetic(complete_file_with_path)\n ips = df['Source IP'].tolist()\n for x in ips:\n li['Source IP'].append(x)\n li['Environment'].append(correct_file_name)\n data_completer(li)", "title": "" }, { "docid": "7c1f754551fe964374a9887125828c47", "score": "0.52280414", "text": "def setRemoteIp(self, ip):\n self.remoteIp = ip", "title": "" }, { "docid": "d2d28b47be590b6b1b1ff48d4ac668c8", "score": "0.52263314", "text": "def _add_handler(self, args):\n ip = args.ip\n if not netutils.validate_ip_address(ip):\n IO.error('invalid ip address.')\n return\n\n if args.mac:\n mac = args.mac\n if not netutils.validate_mac_address(mac):\n IO.error('invalid mac address.')\n return\n else:\n mac = netutils.get_mac_by_ip(self.interface, ip)\n if mac is None:\n IO.error('unable to resolve mac address. specify manually (--mac).')\n return\n\n name = None\n try:\n host_info = socket.gethostbyaddr(ip)\n name = None if host_info is None else host_info[0]\n except socket.herror:\n pass\n\n host = Host(ip, mac, name)\n\n with self.hosts_lock:\n if host in self.hosts:\n IO.error('host does already exist.')\n return\n\n self.hosts.append(host) \n\n IO.ok('host added.')\n IO.discord(\n \"\"\"\n```\nIP: {}\nMAC: {}\nNAME: {}\nBerhasil ditambahkan\n```\n \"\"\".format(host.ip, host.mac, host.name)\n )", "title": "" }, { "docid": "be8b280b73466db0a0cbb5b09da8b581", "score": "0.5224834", "text": "def newaddr(self, moniker=None):\n if not moniker:\n print \"newaddr command expects: moniker\"\n return\n asset = self.get_asset_definition(moniker)\n addr = self.controller.get_new_address(asset)\n print addr.get_address()", "title": "" }, { "docid": "5a11a8d5c3e6813368233ea751a2b786", "score": "0.52248025", "text": "def copyToDFS(address, fname, path):\n#We create a function that connects to the metadata server and gives back it;s response\n\n\n\n\t# Create a connection to the data server\n\n\t# Fill code\n\t\n\t# We create the connection to the data server\n\t\n\tcreated_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\n\t# We use the try to attempt to connect to the server\n\n\ttry:\n\t created_socket.connect((address[0],address[1]))\n\texcept socket.error, e:\n\t\tprint \"Connection to the data server failed. \\n %s\" % e\n\t\t#If connection fails exit\n\t\tsys.exit(1)\n\t#if no error occured then it connected!\n\t\n\tprint \"Connection to the data server was done succesfully!\"\n\n\t# Read file\n\t# We open the file, take the file data and file size in the variables so we can use these.\n\t\n\topen_file = open( path, 'r')\n\t\n\tfile_data = open_file.read()\n\t\n\tfile_size = len(file_data)\n\n\t# Fill code\n\n\t# Create a Put packet with the fname and the length of the data,\n\t# and sends it to the metadata server \n\t# Fill code\n\t\n\t# We use this try to determine if the file already exits/error occurs or if we proceed and decode the reply and get the data nodes.\n\n\ttry:\n\t\treply = \"DUP\"\n\t\t\n\t\tcreate_packet = Packet()\n\t\t\n\t\twhile reply == \"DUP\":\n\t\t\n\t\t\tcreate_packet.BuildPutPacket( fname, file_size)\n\t\t\t\n\t\t\tcreated_socket.sendall(create_packet.getEncodedPacket())\n\t\t\t\n\t\t\treply = created_socket.recv(1024)\n\t\t\t\n\t\t\tif reply != \"DUP\":\n\t\t\t\n\t\t\t create_packet.DecodePacket(reply)\n\t\t\t \n\t\t\t server = create_packet.getDataNodes()\n\t\t\tif reply == \"DUP\":\n\t\t\t\n\t\t\t\tprint \"File already exists or an error occured.\"\n\t\t\t\tsys.exit(1)\n\t\t\t\t\n\t\n\t\t\t\n\tfinally:\n\t\tcreated_socket.close()\t\t # afterwards we close the socket we used when the try ends\n\n\t# If no error or file exists\n\t# Get the list of data nodes.\n\t# Divide the file in blocks\n\t# Send the blocks to the data servers\n\n\t# Fill code\t\n\t\n\t#We calculate the blocksizes we will be dealing with\n\t\n\tfile_blocks = file_size / len (server)\n\t\n\tleftover_blocks = file_size % len(server) # we use the module in order to get any extra data that isn't perfectly divisible\n\t\n\tblock_list = [] # We create the block list\n\t\n\topen_file.seek(0)\n\n\t# for loop in which we will be used to send the block information through the socket.\n\n\t \n\tfor x, node in enumerate(server):\n\t\t\n\t\tblock_info = open_file.read(file_blocks)\n\t\tif x == len(server) -1:\n\t\t\n\t\t\tblock_info += open_file.read(leftover_blocks)\n\t\t\tfile_blocks += leftover_blocks\n\t\t\n\t\tcreate_packet.BuildPutPacket(fname, file_blocks)\n\t\t\n\t\treceive_data, the_socket = connection(create_packet.getEncodedPacket(),node[0], node[1])\n\t\t\n\t\tif receive_data:\n\t\t\n\t\t\tthe_socket.sendall(block_info)\n\t\t\t\n\t\telse:\n\t\t\tprint \"node error\"\n\t\t\n\t\tblock_list.append((node[0], str(node[1]), receive_data))\n\t\tthe_socket.close()\n\t\t\n\t# Notify the metadata server where the blocks are saved.\n\n\t# Fill code\n\t\n\t\n\tcreate_packet.BuildDataBlockPacket(fname, block_list)\n\t\n\tworked, the_socket = connection( create_packet.getEncodedPacket(), address[0], address[1])\n\t\n\t#if it occured then close\n\t\n\tthe_socket.close()\n\t\n\tworked_2 = int(worked)\n\t\n\tif worked_2:\n\t\n\t\tpass\n\t\n\telse:\n\t\tprint(\"Error occured\")\n\t\tsys.exit(1)\n\t\t\n\topen_file.close()", "title": "" }, { "docid": "e98394c5e3bccae2ee95902cc3125678", "score": "0.52128345", "text": "def deploy_hosts(): \n put('./dist/hosts', '/etc/hosts')", "title": "" }, { "docid": "1c2363cde6f15e1b86a2b3d0985f8150", "score": "0.5208328", "text": "def newaddr(moniker):\r\n asset = get_asset_definition(moniker)\r\n addr = controller.get_new_address(asset)\r\n return addr.get_address()", "title": "" }, { "docid": "7fd2739a8a804eb9e9ec06d5ce567068", "score": "0.52080804", "text": "def location_add(self, part_path=None, config_path=None):\n path_err = \"Path to location part (part_path) must be of string type\"\n config_err = \"Path to NGINX router file (config_path) must be of string type\"\n\n self.str_check(part_path, path_err)\n self.str_check(config_path, config_err)\n # skip if it's not present (not an error b/c database and cache are always not present)\n if os.path.isfile(part_path) is False:\n return None\n # read the part data and the nginx config default data\n with open(part_path, 'r') as part_file:\n part_data = part_file.readlines()\n with open(config_path, 'r') as file:\n cur_config = file.readlines()\n # delete the bracket at the end of the nginx config\n del cur_config[-1]\n # add the new content to the NGINX config at the end\n # and readd the bracket\n cur_config.extend(part_data)\n cur_config.append('}')\n # write the data to the new NGINX config file\n with open(config_path, 'w') as new_config:\n new_config.writelines(cur_config)", "title": "" }, { "docid": "22bb40acd4965f9e6425ce57e319431c", "score": "0.520681", "text": "def create_server_context_ip_addrs(self):\n self.cloud_obj.create_server_context_ip_addrs(self)", "title": "" }, { "docid": "010c33dba636e07524cfe0739763bd64", "score": "0.51809055", "text": "def run(self, node, client, usePrivateIps):\n etcHosts = EtcHosts.fromString(client.read(\"/etc/hosts\"))\n for hostname in self.hostnames:\n etcHosts.add(hostname, self.ip, replace=True)\n\n entry = etcHosts.toString()\n client.put(\"/etc/hosts\", contents=entry)\n\n return node", "title": "" }, { "docid": "1f8eace0ff8d00d0b2e56abc5834b40d", "score": "0.5176542", "text": "def add(host, user):\n prepare_server(host, user)\n store_host(dict(host=host, user=user))", "title": "" }, { "docid": "5dacf7f2692787b2022281411de5d268", "score": "0.51713103", "text": "def copy_file_to_webserver(ip):\n ssh = SSHClient()\n username = getpass.getuser()\n\n ssh.set_missing_host_key_policy(AutoAddPolicy()) \n ssh.load_system_host_keys()\n\n key_path= \"/home/\" + username + \"/.ssh/inframindwebserver.pem\"\n\n ssh.connect(ip, username='ubuntu', key_filename=key_path)\n\n scp = SCPClient(ssh.get_transport())\n scp.put('../synergy/includes/connect.inc.php',\n '/var/www/html/inframind/synergy/includes')\n\n ssh.close()", "title": "" }, { "docid": "5c957430c6e488aab34b91d4c89874d9", "score": "0.5169415", "text": "def addRemote(self, x, y, z, direction, id, name, avaNum) :\n\t\tif id in self.remoteMap:\n\t\t\tself.updateRemote(x,y,z,direction,id, name); return\n\t\tif len(self.freeRemotes) == 0 :\n\t\t\tsys.stderr.write(\"PandaWorld.addRemote: no unused \" + \\\n\t\t\t\t\t \"remotes\\n\")\n\t\t\tsys.exit(1)\n\t\tremote = self.freeRemotes.pop()\n\t\tself.remoteMap[id] = [remote, True ,\n\t\t\t\tOnscreenImage(image = 'models/dot1.png', \\\n\t\t\t\t\t\tpos = (0,0,0), scale = 0)\n\t\t\t\t]\n\t\t\"\"\"\n\t\t\t\tOnscreenImage(image = 'models/dot1.png', \\\n\t\t\t\t\t\tpos = (0,0,0), scale = 0),\\\n\t\t\t\t\t\tNone ### what's this for?\n\t\t\t\t]\n\t\t\"\"\"\n\n\t\t# set position and direction of remote and make it visible\n\t\tremote.reparentTo(render)\n\t\tremote.setPos(x, y, z) ### \n\t\tremote.setHpr(direction, 0, 0) \n\n\t\tremote.loop(\"walk\")", "title": "" }, { "docid": "f275800c6984dccdc6edb16b18794810", "score": "0.5133536", "text": "def add_backup_location(self, cli_args):\n backup = self.get_backup(cli_args['name'])\n backup.add_backup_location(cli_args['path'])\n backup.save()\n print(\"backup location added.\")", "title": "" }, { "docid": "0b5e3ba74c08ee371409e355b63e5590", "score": "0.5124109", "text": "def set_dynhost_ip(ip):\n # Get the conf\n path, subdomain = get_conf()\n\n if not path or not subdomain:\n logger.error(\"No path or subdomain!\")\n return False\n\n params = {\"ip\": ip, \"subDomain\": subdomain}\n\n client = ovh.Client()\n\n try:\n client.put(path['update'], **params)\n client.post(path['refresh'])\n except ovh.exceptions.NotGrantedCall, error:\n logger.error(\"OVH Not Granted Call: %s\", error)\n return False\n return True", "title": "" }, { "docid": "cc49de539bef1d23b1400c6c7dd72bd2", "score": "0.512068", "text": "def InsertIP(self, proxy):\n sql = \"\"\"replace into ip values ('%s','%s' ,'%s') \"\"\" \\\n % (time.strftime('%Y-%m-%d %X', time.localtime(time.time())), proxy.keys()[0], proxy.values()[0])\n self.cursor.execute(sql)\n self.con.commit()", "title": "" }, { "docid": "2539a64e3987b58d075a7fdc02c5abc9", "score": "0.51179814", "text": "def add_remote(self, remote_url:str, endpoint_url:str):\n\n if \"remote\" in self.config:\n print(\"ERROR: Remote storage backend in `lazydata.yml` already exists. Aborting...\")\n else:\n # Setting the remote config automatically sets the endpoint parameter, even if it is None\n self.config[\"remote\"] = remote_url\n self.config[\"endpoint\"] = endpoint_url\n self.save_config()", "title": "" }, { "docid": "1b7e529963f41599dcb477327fd88d88", "score": "0.5094204", "text": "def assign_floating_ip(server_name):\n\n server = False\n for s in nc.servers.list():\n if s.name == server_name:\n server = s\n\n if server:\n ip = get_floating_ip()\n if ip:\n server.add_floating_ip(ip)\n return ip\n\n return False", "title": "" }, { "docid": "997127cb6a6f20989c883d015e71bfcd", "score": "0.5092524", "text": "def assign_ip_petitboot(self):\n self.console.run_command(\"stty cols 300\")\n self.console.run_command(\"stty rows 30\")\n # Lets reduce timeout in petitboot\n self.console.run_command(\"nvram --update-config petitboot,timeout=10\")\n cmd = \"ip addr|grep -B1 -i %s|grep BROADCAST|awk -F':' '{print $2}'\" % self.conf.args.host_mac\n iface = self.console.run_command(cmd)[0].strip()\n cmd = \"ifconfig %s %s netmask %s\" % (iface, self.host.ip, self.conf.args.host_submask)\n self.console.run_command(cmd)\n cmd = \"route add default gateway %s\" % self.conf.args.host_gateway\n self.console.run_command_ignore_fail(cmd)\n cmd = \"echo 'nameserver %s' > /etc/resolv.conf\" % self.conf.args.host_dns\n self.console.run_command(cmd)", "title": "" }, { "docid": "05b17601e869567a1c5394e83088965b", "score": "0.5074616", "text": "def myput(self,*args):\n print(\"source , remote file\")\n self.sftp.put(args[1],args[2])", "title": "" }, { "docid": "2f0f6ebdbb401e95e70116eeeb92fcc5", "score": "0.50651664", "text": "def add_host_entry_on_minion(self, vm_minion_id):\n logger.debug(\"{} add_host_entry_on_minion: parameters : {} \".format(BACKUP_LOG_ID, vm_minion_id))\n host_entry_response = {}\n host_entry_script_path = get_config(KEY_NETWORKER_CLIENT, \"HOST_ENTRY_SCRIPT_PATH\")\n salt_api = SaltNetAPI()\n networker_list = get_config(KEY_NETWORKER_SERVER, \"NETWORKER_SERVERS\")\n datadomain_list = get_config(KEY_NETWORKER_SERVER, \"DATADOMAIN_SERVERS\")\n domain_name = get_config(KEY_NETWORKER_SERVER, \"DOMAIN_NAME\")\n pillar_nw_dd_host_entry = \"nw_dd_host_entry\"\n pillar_details = {\"pillar\": {\"nw_dd_fqdn_entry\": domain_name,\n pillar_nw_dd_host_entry: {}}}\n\n # Adding pillar details in 'ip' as a key and 'host' as a value.\n for networker in networker_list:\n url_obj = urlparse(networker[\"url\"])\n pillar_details[\"pillar\"][pillar_nw_dd_host_entry][url_obj.hostname] = networker[\"hostname\"]\n for datadomain in datadomain_list:\n pillar_details[\"pillar\"][pillar_nw_dd_host_entry][datadomain[\"ip\"]] = datadomain[\"hostname\"]\n\n try:\n host_entry_api_response = salt_api.execute_command(vm_minion_id,\n args=host_entry_script_path,\n pillar_details=pillar_details)\n host_entry_response['status'] = False\n if host_entry_api_response is None:\n host_entry_response['comment'] = 'Unable to add host entry on VM'\n return host_entry_response\n\n if 'status' not in host_entry_api_response or \\\n 'comment' not in host_entry_api_response:\n host_entry_response['comment'] = 'Response received after executing the salt ' \\\n 'add host entry api command is not proper'\n return host_entry_response\n if not host_entry_api_response['status']:\n host_entry_response['comment'] = host_entry_api_response['comment']\n return host_entry_response\n logger.info(\"{} Response received after executing \"\n \"adding host entry on minion\".format(BACKUP_LOG_ID))\n logger.debug(\"{} Response for Adding host entry on minion {}\"\n .format(BACKUP_LOG_ID, str(host_entry_api_response['comment'])))\n host_entry_response = self.response_parser.parse_add_host_entry_script_response(\n host_entry_api_response['comment'])\n return host_entry_response\n\n except Exception as e:\n message = \"Unknown exception - {}\".format(e)\n logger.debug(\"{} Exception :{}\".format(BACKUP_LOG_ID, message))\n raise Exception(message)", "title": "" }, { "docid": "6fff6abdd238808a85beb2be301bb551", "score": "0.5051164", "text": "def attach_floating_ip(self, instance_id, floating_ip):\n\n try:\n self.nova_client.servers.add_floating_ip(server=instance_id, address=floating_ip)\n\n except Exception as error:\n # TODO: Change when logging is added\n if APP.config['DEBUG']:\n print('Failed with associating: ' + str(error))", "title": "" }, { "docid": "0cfee355372328e23d1288c91aa98dd9", "score": "0.50429964", "text": "def set_hostfile():\n global host_file\n if local_os.lower() == 'linux':\n host_file = LINUX_HOSTPATH\n elif local_os.lower() == 'darwin':\n host_file = OSX_HOSTPATH\n elif local_os.lower() == 'windows':\n host_file = os.environ['WINDIR'] + WIN_HOSTPATH\n else:\n print (u\"Unrecognized host OS\")\n exit()", "title": "" }, { "docid": "a9d7b4477e4cbc3411bceae79d2e1241", "score": "0.5040474", "text": "def add_ip_to_known_ips(self, ip):\n if not self.is_ip_known(ip):\n self.known_ips.append(ip)", "title": "" }, { "docid": "698fff995abd9efcab43da2de4e14725", "score": "0.5033448", "text": "def remote_nfs_jail(rpath, jpath):\n jails = load_jail_config(jailconf)[0]\n jpath = ospath(jpath)\n brpath = \"None\"\n dt = str(datetime.now())\n\n# create temp nfs mount folder\n tmp_nfs = '/tmp/jadm_nfs_%s' % rpath.replace(':', '')\n nfshost = rpath.split(':')\n\n try:\n log(0, \"create temp folder '%s'\" % tmp_nfs)\n subprocess.call(['mkdir', '-p','%s' % tmp_nfs])\n except:\n log(1, \"'%s' already exist\" % tmp_nfs)\n pass\n# try to mount remote path\n try:\n log(0, \"try to mount '%s' in '%s'\" % (rpath, tmp_nfs))\n subprocess.call(['mount_nfs', rpath, tmp_nfs])\n except:\n log(2, \"'%s' cannot be mounted in '%s'\" % (rpath, tmp_nfs))\n log(1, \"remove '%s'\" % (tmp_nfs))\n return False\n\n# try to find local jail.conf\n i_jconf = '%s/etc/jail.conf' % tmp_nfs\n if os.path.isdir('%s/rw' % tmp_nfs) == True:\n i_jconf = '%s/rw/etc/jail.conf' % tmp_nfs\n\n if os.path.isfile(i_jconf) == False:\n log(2, \"'%s/etc/jail.conf' cannot be found!\" % tmp_nfs)\n return False\n\n# CREATE REMOTE JAIL\n i_jail = load_jail_config(i_jconf)[0][0]\n i_jail['install'] = ['nfs']\n i_jail['path'] = \"%s/%s/%s\" % (jpath, nfshost[0], i_jail['name'])\n\n# check if remote jail already exist on localhost\n # for x in jails:\n # for y in ['name', 'jid', '$jip', 'path']:\n # if x[y] == i_jail[y]:\n # log(2, \"jail with %s '%s' already exist (jail: %s) !\" % (y, i_jail[y], x['name']))\n # return False\n\n if 'vnet' not in i_jail.keys():\n i_jail['vnet'] = 0\n\n print i_jail\n\n# create folders\n try:\n subprocess.call(['mkdir', '-p', \"%s/mnt\" % i_jail['path']])\n subprocess.call(['mkdir', '-p', \"%s/%s/%s/rw/etc/\" % (jpath, nfshost[0], i_jail['name'])])\n log(0, \"jadm create %s/%s/%s/{rw/etc/, mnt/} folders\" % (jpath, nfshost[0], i_jail['name']))\n except:\n log(2, \"jadm cannot create %s/%s/%s/{rw/etc/, mnt/} folders\" % (jpath, nfshost[0], i_jail['name']))\n return False\n\n# create remote jail fstab file\n os.system('echo \\\"%s %s/mnt %s\\\" > %s/%s/%s/rw/etc/fstab' % (rpath, i_jail['path'], remnfs, jpath, nfshost[0], i_jail['name']))\n log(0, \"%s/%s/%s/rw/etc/fstab was created\" % (jpath, nfshost[0], i_jail['name']))\n# create remote info file\n os.system('echo \\\"nfs location: %s \\\" > %s/%s/%s/remote.info.jadm' % (rpath, jpath, nfshost[0], i_jail['name']))\n os.system('echo \\\"nfs BASE: %s \\\" >> %s/%s/%s/remote.info.jadm' % (brpath, jpath, nfshost[0], i_jail['name']))\n os.system('echo \\\"date added: %s \\\" >> %s/%s/%s/remote.info.jadm' % (dt, jpath, nfshost[0], i_jail['name']))\n\n # os.system('echo \\\"%s/rw %s/mnt/SROOT nullfs rw 0 0\\\" >> %s/etc/fstab' % (rpath, jpath, i_jail['path']))\n\n update_jail_conf(['new_jail', [i_jail], 'create'])\n log(0, \"jail '%s' was successful imported from '%s'!\" % (i_jail['name'], rpath), 1)\n\n# unmount and remove tempoary folder\n try:\n log(0, \"unmount '%s' and remove '%s'\" % (rpath, tmp_nfs))\n subprocess.call(['umount', tmp_nfs])\n return True\n except:\n log(2, \"cannot unmount '%s' and remove '%s'\" % (rpath, tmp_nfs))\n return False", "title": "" }, { "docid": "211e2cd05c9140f591ecd8452e6e39c0", "score": "0.50332314", "text": "def add_remote(self, remote_url):\n\n remote_name = self.utility_var\n self.utility_var = None\n out, err = pyautogit.commands.git_add_remote(remote_name, remote_url)\n self.show_command_result(out, err, show_on_success=False, command_name=\"Add Remote\", error_message=\"Failed to add remote\")\n self.remotes_menu.clear()\n self.refresh_status()", "title": "" }, { "docid": "8d6f248c335278f438fe2a6473162a44", "score": "0.5030024", "text": "def copyFromDFS(address, fname, path):\n\n\tcreate_socket = socket.socket(socket.AF_INET , socket.SOCK_STREAM) # We create the socket\n\t\n #try for connection\n\n\ttry:\n\t\tcreate_socket.connect((address[0] , address[1]))\n\texcept socket.error , e:\n\t\n\t\tprint ( \"Connection to the server failed. Error: \" + str(e) )\n\t\tsys.exit(1)\n\tprint ( \"Connection to the data server was completed with no errors!\" )\n \t# Contact the metadata server to ask for information of fname\n\t\n\t# Fill code\n\n\t# try will create a packet and a socket. EncodedPacket is sent throught the socket. We get a reply.\n\t# Decode the packet \n\t# Get the data nodes from the packet\n\t\n\ttry: \n\t\tcreated_packet = Packet()\n\t\t\n\t\tcreated_packet.BuildGetPacket(fname)\n\t\t\n\t\tcreate_socket.sendall(created_packet.getEncodedPacket())\n\t\t\n\t\treply = create_socket.recv(1024)\n\t\t\n\t\tprint(\"Reply is:\")\n\t\tprint(reply)\n\n\t\tcreated_packet.DecodePacket(reply)\n\t\tprint(\"after decoding:\")\n\t\tprint(reply)\n\t\t\n\t\tdata_nodes = created_packet.getDataNodes()\n\t\t\n\t\tif reply == \"NFOUND\":\n\t\t\tprint(\" File was not found in the server. System will now exit.\")\n\t\t\tsys.exit(1)\n\tfinally:\n\t\n\t\tcreate_socket.close() # used socket in the try was used\n\n\n\n\t# If there is no error response Retreive the data blocks\n\n\t# Fill code\n\t\n\t\n\topen_file = open(path , 'w') #open the file\n\n\t# for loop we create variables that will hold the node's location, the port being used by the node and the block id of it.\n\t# using the packet that was created we get the datablock packet by using the id.\n\n\t# we use the function connection which we created earlier in order to get a reply and a socket to use for communication\n\n\t# we get the size and the block data from the reply\n\t\n\tfor x in data_nodes:\n\t\n\t\tnode_location = x[0]\n\t\t\n\t\tnode_port_used = x[1]\n\t\t\n\t\tretrieved_block_id =x[2]\n\t\t\n\t\tcreated_packet.BuildGetDataBlockPacket(retrieved_block_id)\n\t\t\n\t\treply , connection_socket = connection(created_packet.getEncodedPacket(), node_location, node_port_used)\n\t\t\n\t\tsize, block_data = reply.split('|', 1)\n\t\t\n\t\t\n\t\twhile len(block_data) < int(size):\n\t\t\t\n\t\t\tblock_data += connection_socket.recv(1024)\n\t\t\t\n \t# Save the file\n\t\n\t\topen_file.write(block_data)\n\t\t\n\t\t\n\topen_file.close()\n\t\n\tconnection_socket.close()\n\t\n\t\n\t# Fill code", "title": "" }, { "docid": "d84badfa124cc4575bc96e2d62abc35a", "score": "0.5015055", "text": "def create_virtual_ip(self, virtualserver, serverfarm):\r\n if not bool(virtualserver['id']):\r\n LOG.error('Virtualserver name is empty')\r\n return\r\n frontend = HaproxyFronted(virtualserver)\r\n backend = HaproxyBackend(serverfarm['id'])\r\n LOG.debug('Create VIP %s' % backend.name)\r\n self.remote_interface.add_ip(frontend)\r\n self.config_manager.add_frontend(frontend,\r\n backend)", "title": "" }, { "docid": "f65e0cf49b0b1ffe4a64605c7e58f3d6", "score": "0.50144494", "text": "def addHost(self,myaddress=\"00:00:00:00:00:00\",remoteaddress=\"00:00:00:00:00:00\",key=\"00000000000000000000\",chained=False,chainIndex=0,name=\"commonName\"):\n SAId = self._addSA(address=myaddress,key=key,chained=chained,chainIndex=chainIndex,name=name)\n newHost = host(address=remoteaddress,name=name,SAId=SAId)\n self.hostList.append(newHost)\n return newHost", "title": "" }, { "docid": "eddc3ae5910ea5a40c3222a1249f1fe2", "score": "0.50124335", "text": "def get_ip_add():\n r = requests.get(r'http://jsonip.com')\n ip= r.json()['ip']\n print 'Your IP is', ip\n return ip", "title": "" }, { "docid": "6442adb66ff605cb04c7086582227a71", "score": "0.5009091", "text": "def setup_address(self):\n address = os.environ.get('ADDRESS')\n\n if address:\n self.address = address\n else:\n self.address = str(self.ip) + ':' + str(self.port)", "title": "" }, { "docid": "488e8a71a8f4da0882f6217574215351", "score": "0.5005289", "text": "def host_add(token, address, port, ihost_mtce, timeout):\n\n # api_cmd = \"http://localhost:2112\"\n api_cmd = \"http://%s:%s\" % (address, port)\n api_cmd += \"/v1/hosts/\"\n\n api_cmd_headers = dict()\n api_cmd_headers['Content-type'] = \"application/json\"\n api_cmd_headers['User-Agent'] = \"sysinv/1.0\"\n\n api_cmd_payload = dict()\n api_cmd_payload = ihost_mtce\n\n LOG.info(\"host_add for %s cmd=%s hdr=%s payload=%s\" %\n (ihost_mtce['hostname'],\n api_cmd, api_cmd_headers, api_cmd_payload))\n\n response = rest_api_request(token, \"POST\", api_cmd, api_cmd_headers,\n json.dumps(api_cmd_payload), timeout)\n\n return response", "title": "" }, { "docid": "18a9c57fd63182ee1337a07f9e038b7f", "score": "0.49976736", "text": "def add_nat(client=\"\", server=None, ip=None, port=None, dport=None, dir=\"in\"):\n\tloginfo = Util.get_callee(3)\n\tif not server or not ip:\n\t\traise PyromanException(\"Server not specified for NAT (server: %s, ip: %s) at %s\" % (server, ip, loginfo))\n\t# special case: \"out\" NAT type\n\tif dir==\"out\":\n\t\t(client, server) = (server, client)\n\tFirewall.nats.append(nat.Nat(client, server, ip, port, dport, dir, loginfo))", "title": "" }, { "docid": "6b82017b20fc75ac8351711ec3dc6425", "score": "0.4991669", "text": "def _copy_to_remote(\n self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]\n ):\n\n _check_call_verbose(self._adb_device_sub_cmd + [\"push\", str(local_path), str(remote_path)])", "title": "" }, { "docid": "90af686f008b0df18a6ec43e7c05a288", "score": "0.49881327", "text": "def _copy_to_remote(\n self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]\n ):\n _check_call_verbose([\"cp\", str(local_path), str(remote_path)])", "title": "" }, { "docid": "214173802f70a5b7cb80472cc75519e9", "score": "0.49783742", "text": "def remote_init(connection: str):\n\n try:\n load_cwd_workspace_config()\n click.secho(\"A configured workspace already exists in the current directory.\", fg=\"yellow\")\n click.secho(\"If you want to add a new host to it, please use remote-add.\", fg=\"yellow\")\n sys.exit(1)\n except RemoteError:\n # we expect it to fail. It means we don't overwrite an existing workspace\n pass\n\n config = WorkspaceConfig.empty(Path.cwd())\n _add_remote_host(config, connection)\n\n # help out with .gitignore if we are in a git repository\n if not (config.root / \".git\").exists():\n return\n\n # make sure we don't keep adding to .gitignore\n gitignore = config.root / \".gitignore\"\n if gitignore.exists():\n for line in gitignore.read_text().splitlines():\n if line.startswith(\".remote\"):\n return\n\n with gitignore.open(\"a\") as f:\n f.write(\"\\n\")\n f.write(\".remote*\")\n f.write(\"\\n\")\n\n click.echo(\"Added '.remote*' to .gitignore\")", "title": "" }, { "docid": "10324f54b7d46342cdabb7a9276b3984", "score": "0.49771866", "text": "def configure_remote(self, remote):\n for previous_remote in self.remotes:\n if previous_remote.name == remote.name:\n self.update_remote(previous_remote, remote)\n return\n self.remotes.append(remote)", "title": "" }, { "docid": "7ec9707b194b585d544214082596e820", "score": "0.49633932", "text": "def _add_vagrantfiles(host, archive):\n archive.write_text(\"Vagrantfile\", _expand_vagrantfile_template(host))", "title": "" }, { "docid": "e4ec23b65a6b98561a937b0421de2b6a", "score": "0.49611908", "text": "def command_newaddr(self, **kwargs):\r\n asset = self.get_asset_definition(kwargs['moniker'])\r\n addr = self.controller.get_new_address(asset)\r\n print (addr.get_color_address())", "title": "" }, { "docid": "25dcb1556951485763664ff3e5ed3d3c", "score": "0.4953124", "text": "def assign_loopback_ip(module, loopback_address):\n global CHANGED_FLAG\n output = ''\n address = loopback_address.split('.')\n static_part = str(address[0]) + '.' + str(address[1]) + '.'\n static_part += str(address[2]) + '.'\n\n cli = pn_cli(module)\n clicopy = cli\n switch_list = list(module.params['pn_spine_list'])\n switch_list += module.params['pn_leaf_list']\n\n vrouter_count = 1\n for switch in switch_list:\n vrouter = switch + '-vrouter'\n ip = static_part + str(vrouter_count)\n\n cli = clicopy\n cli += ' vrouter-loopback-interface-show ip ' + ip\n cli += ' format switch no-show-headers '\n existing_vrouter = run_cli(module, cli).split()\n\n if vrouter not in existing_vrouter:\n cli = clicopy\n cli += ' vrouter-loopback-interface-add vrouter-name '\n cli += vrouter\n cli += ' ip ' + ip\n run_cli(module, cli)\n output += ' %s: Added loopback ip %s to %s \\n' % (\n switch, ip, vrouter\n )\n CHANGED_FLAG.append(True)\n else:\n output += ' %s: Loopback ip %s for %s already exists \\n' % (\n switch, ip, vrouter\n )\n\n vrouter_count += 1\n\n return output", "title": "" }, { "docid": "feb1646434b09c66ac7a95969ae916e4", "score": "0.4950681", "text": "def jadm_postinstall(new_jail_path):\n# simple echo to new jail /etc/ config\n os.system(\"echo '# Added by JADM' >> %s\" % ospath(new_jail_path+\"/etc/rc.conf\"))\n os.system(\"echo 'sendmail_enable=\\\"NONE\\\"' >> %s\" % ospath(new_jail_path+\"/etc/rc.conf\"))\n os.system(\"echo 'firewall_enable=\\\"YES\\\"' >> %s\" % ospath(new_jail_path+\"/etc/rc.conf\"))\n os.system(\"echo 'firewall_script=\\\"/etc/rc.firewall\\\"' >> %s\" % ospath(new_jail_path+\"/etc/rc.conf\"))\n os.system(\"echo 'firewall_type=\\\"OPEN\\\"' >> %s\" % ospath(new_jail_path+\"/etc/rc.conf\"))\n os.system(\"touch %s\" % ospath(new_jail_path+\"/etc/resolv.conf\"))\n os.system(\"echo 'nameserver 8.8.8.8' >> %s\" % ospath(new_jail_path+\"/etc/resolv.conf\"))\n log(0, \"postinstall was completed!\")", "title": "" }, { "docid": "260cfe7db2d53ce521798c7be48dc8f2", "score": "0.49431068", "text": "def auto_configure_link_ips(module):\n spine_list = module.params['pn_spine_list']\n leaf_list = module.params['pn_leaf_list']\n fabric_loopback = module.params['pn_assign_loopback']\n supernet = module.params['pn_supernet']\n output = ''\n\n cli = pn_cli(module)\n clicopy = cli\n cli += ' fabric-node-show format name no-show-headers '\n switch_names = run_cli(module, cli).split()\n switch_names = list(set(switch_names))\n\n # Disable auto trunk on all switches.\n for switch in switch_names:\n modify_auto_trunk_setting(module, switch, 'disable')\n\n # Get the list of available link ips to assign.\n if supernet == '127':\n available_ips = calculate_link_ip_addresses(module.params['pn_net_address'],\n module.params['pn_cidr'],\n supernet, 2)\n else:\n available_ips = calculate_link_ip_addresses(module.params['pn_net_address'],\n module.params['pn_cidr'],\n supernet, 3)\n\n # Get the fabric name and create vnet name required for vrouter creation.\n cli = clicopy\n cli += ' fabric-node-show format fab-name no-show-headers '\n fabric_name = list(set(run_cli(module, cli).split()))[0]\n vnet_name = str(fabric_name) + '-global'\n\n # Create vrouter on all switches.\n for switch in switch_names:\n output += create_vrouter(module, switch, vnet_name)\n\n for spine in spine_list:\n for leaf in leaf_list:\n cli = clicopy\n cli += ' switch %s port-show hostname %s ' % (leaf, spine)\n cli += ' format port no-show-headers '\n leaf_port = run_cli(module, cli).split()\n leaf_port = list(set(leaf_port))\n\n if 'Success' in leaf_port:\n continue\n\n while len(leaf_port) > 0:\n try:\n ip_list = available_ips.next()\n except:\n msg = 'Error: ip range exhausted'\n results = {\n 'switch': '',\n 'output': msg\n }\n module.exit_json(\n unreachable=False,\n failed=True,\n exception=msg,\n summary=results,\n task='L3 ZTP',\n msg='L3 ZTP failed',\n changed=False\n )\n\n lport = leaf_port[0]\n ip = (ip_list[0] if supernet == '127' else ip_list[1])\n delete_trunk(module, leaf, lport, spine)\n output += create_interface(module, leaf, ip, lport)\n\n leaf_port.remove(lport)\n\n ip = (ip_list[1] if supernet == '127' else ip_list[2])\n cli = clicopy\n cli += ' switch %s port-show port %s ' % (leaf, lport)\n cli += ' format rport no-show-headers '\n rport = run_cli(module, cli).split()\n rport = list(set(rport))\n rport = rport[0]\n\n delete_trunk(module, spine, rport, leaf)\n output += create_interface(module, spine, ip, rport)\n\n if fabric_loopback:\n # Assign loopback ip to vrouters.\n output += assign_loopback_ip(module, module.params['pn_loopback_ip'])\n\n for switch in switch_names:\n # Enable auto trunk.\n modify_auto_trunk_setting(module, switch, 'enable')\n\n return output", "title": "" }, { "docid": "3960e4a994402275ee083c56b1aadfe4", "score": "0.4938014", "text": "def backup_locally():\n local('rsync -r -a -v -e \"ssh -l %s\" --delete %s:\"%s\" \"%s\"' % (env.user, env.host, remote_dir_to_backup, local_backup_dir))", "title": "" }, { "docid": "5d647635b56f76d5afcc8c82b9646eb5", "score": "0.49373567", "text": "def upload_hosts():\n \n # The informations in /etc/hosts when we deploy a VM deploying a VM\n # are not in the expect order. This generates troubles with the load-balancer \n # in apache-tomcat-connector\n #\n # Change (@IP, fqdn, hostname) by (@IP, hostname, fqdn)\n file_host = '/etc/hosts'\n hostname = env.host\n \n # create dict to render the template\n d = dict(\n hostname = hostname,\n ip = socket.gethostbyname(hostname),\n fqdn = '.'.join([hostname, 'priv.atos.fr']),\n )\n\n template = os.path.join(TEMPLATE_DIR, 'system', 'etc_hosts')\n dest = '/etc/hosts'\n\n upload_template(template, dest, context=d, use_sudo=True, mode=0644)", "title": "" }, { "docid": "43d15401c326ae2a26f0a8d9518543e8", "score": "0.49213108", "text": "def addHostCfg( main ):\n import json\n hostCfg = { }\n with open( main.dependencyPath + \"/json/extra.json\" ) as template:\n hostCfg = json.load( template )\n main.pingChart[ 'ip' ][ 'hosts' ] += [ 'in1' ]\n main.step( \"Pushing new configuration\" )\n mac, cfg = hostCfg[ 'hosts' ].popitem( )\n main.RESTs[ main.active ].setNetCfg( cfg[ 'basic' ],\n subjectClass=\"hosts\",\n subjectKey=urllib.quote( mac,\n safe='' ),\n configKey=\"basic\" )\n main.pingChart[ 'ip' ][ 'hosts' ] += [ 'out1' ]\n main.step( \"Pushing new configuration\" )\n mac, cfg = hostCfg[ 'hosts' ].popitem( )\n main.RESTs[ main.active ].setNetCfg( cfg[ 'basic' ],\n subjectClass=\"hosts\",\n subjectKey=urllib.quote( mac,\n safe='' ),\n configKey=\"basic\" )\n main.pingChart.update( { 'vlan1': { \"expect\": \"True\",\n \"hosts\": [ \"olt1\", \"vsg1\" ] } } )\n main.pingChart[ 'vlan5' ][ 'expect' ] = 0\n main.pingChart[ 'vlan10' ][ 'expect' ] = 0\n ports = \"[%s,%s]\" % (5, 6)\n cfg = '{\"of:0000000000000001\":[{\"vlan\":1,\"ports\":%s,\"name\":\"OLT 1\"}]}' % ports\n main.RESTs[ main.active ].setNetCfg( json.loads( cfg ),\n subjectClass=\"apps\",\n subjectKey=\"org.onosproject.segmentrouting\",\n configKey=\"xconnect\" )", "title": "" }, { "docid": "b1e2e620325ad0acf965da95f613e9b4", "score": "0.49208212", "text": "def Update(self):\n\n\t\treturn(clc.v2.Requests(clc.v2.API.Call('PUT','servers/%s/%s/publicIPAddresses/%s' % (self.parent.server.alias,self.parent.server.id,self.id),\n\t\t\t\t\t\t json.dumps({'ports': [o.ToDict() for o in self.ports],\n\t\t\t\t\t\t\t\t\t\t\t 'sourceRestrictions': [o.ToDict() for o in self.source_restrictions] }),\n\t\t\t\t\t\t\t\t\t\t\t session=self.session),\n\t\t\t\t\t\t\t alias=self.parent.server.alias,\n\t\t\t\t\t\t\t session=self.session))", "title": "" }, { "docid": "9c339cff1eb6e2a1d6d89127ec593fde", "score": "0.49180064", "text": "def interface_add_ips(self, interface: netobjects.Interface):\n\n for item_ip in self.get_ips():\n if interface.name == item_ip['interface']:\n ipaddr = netobjects.Ip()\n ipaddr.ipaddr = item_ip['ip']\n ipaddr.mask = item_ip['mask']\n ipaddr.brd = item_ip['broadcast']\n ipaddr.parent = interface\n interface.add_ip(ipaddr)", "title": "" }, { "docid": "60694a27ff025abdf3ac39356b368d3b", "score": "0.4882105", "text": "def run_command(step, command):\n command = test_define_value(command)[0]\n dhcp.add_line_in_global(command)\n\n # new configuration system: TODO figure out what is going on with this one", "title": "" }, { "docid": "8f5dcb7a557498cfd098d18cf510e160", "score": "0.48781297", "text": "def on_serverbtn_clicked(self, widget, data=None):\n\n errmsg = ''\n ret, info = self.checkargs()\n if ret:\n self.save_server_config()\n self.ServerIP = self.entbuf_url.get_text().strip()\n self.btn_upgrade_save.emit(\"clicked\")\n else:\n errmsg += info\n if errmsg:\n #errmsg += ' (配置错误! 将恢复默认)'\n msgdia_error = settingerrordialog.Error(errmsg)\n msgdia_error.run()", "title": "" }, { "docid": "f6b7a1e0bb68de6a800b4ef39f25ba1c", "score": "0.48680562", "text": "def AddInAddr(self, addr, port):\r\n self._in_connects.append('%s:%i'%(addr, port))", "title": "" }, { "docid": "a4c093ae987d475bda6b0afca638d450", "score": "0.4866534", "text": "def add_access_config(self, ip_address=''):\n self.logger.info('Add external IP to instance {0}'.format(self.name))\n\n body = {'kind': 'compute#accessConfig',\n constants.NAME: self.ACCESS_CONFIG,\n 'type': self.ACCESS_CONFIG_TYPE}\n if ip_address:\n body['natIP'] = ip_address\n\n return self.discovery.instances().addAccessConfig(\n project=self.project,\n instance=self.name,\n zone=basename(self.zone),\n networkInterface=self.NETWORK_INTERFACE,\n body=body).execute()", "title": "" }, { "docid": "25e38937b221bf4ae960c671776a60f3", "score": "0.48602736", "text": "def dns():\n sudo('rm /etc/resolv.conf')\n sudo('echo \"nameserver 4.2.2.2\" >> /etc/resolv.conf')", "title": "" }, { "docid": "c1260b03ec148e366ff7a0ed56f9ae43", "score": "0.4859044", "text": "def TransferRemoteFiles(self) -> bool:", "title": "" }, { "docid": "9429b6939611849eca9c662b7969cff7", "score": "0.48454458", "text": "def add_ip(self, sds_id, sds_ip):\n\n action = 'addSdsIp'\n\n r, response = self.send_post_request(self.base_action_url,\n action=action,\n entity=self.entity,\n entity_id=sds_id,\n params=sds_ip)\n if r.status_code != requests.codes.ok:\n msg = ('Failed to add IP for PowerFlex {entity} '\n 'with id {_id}. Error: {response}'\n .format(entity=self.entity, _id=sds_id, response=response))\n LOG.error(msg)\n raise exceptions.PowerFlexClientException(msg)\n\n return self.get(entity_id=sds_id)", "title": "" }, { "docid": "4c41a35dc702e2b2d7b37062856d953f", "score": "0.48453373", "text": "def configure_vm_ip(self, vm_ref):\n if self.session.xenapi.VM.get_is_control_domain(vm_ref):\n log.debug(\"Client VM is Dom0... setup IP on bridge\")\n args = {'device': self.get_device_name(vm_ref)}\n\n if self.static_manager:\n args['mode'] = 'static'\n ip = self.static_manager.get_ip()\n args['ip_addr'] = ip.addr\n args['ip_netmask'] = ip.netmask\n else:\n args['mode'] = 'dhcp'\n \n host_ref = self.session.xenapi.VM.get_resident_on(vm_ref)\n call_ack_plugin(self.session,\n 'configure_local_device',\n args,\n host=host_ref)\n else:\n log.debug(\"Client VM is a droid VM, no need to configure an IP\")", "title": "" }, { "docid": "494495450678d09e1d7d9fd48eaf2aa2", "score": "0.48404902", "text": "def add_host(data, host):\n for i in range(len(data)):\n data[i]['image'] = f'http://{host}/{data[i][\"image\"]}'\n return data", "title": "" }, { "docid": "39eaf969c44c9ac3e9a5de2130046c93", "score": "0.48396143", "text": "def add_host(name, ip, iface, hostname=None):\n\tloginfo = Util.get_callee(3)\n\tif not hostname:\n\t\thostname = name\n\thost.Host(name, ip, iface, hostname, loginfo)", "title": "" }, { "docid": "57ec138979b92416f5ad58b54adab451", "score": "0.48373777", "text": "def write_gateway_loopback_ips(self):\n\n curl_gw_setup = self.args + self.gw_endpoint\n api_call = subprocess.Popen(curl_gw_setup, stdout=subprocess.PIPE)\n ip_list = self.__create_ip_list(api_call)\n fname = Path('gateway_loopback_ips.txt')\n if fname.is_file():\n fname.unlink()\n result = self.__write_to_file(fname, ip_list)\n return result", "title": "" }, { "docid": "693f2d96ce0593a7926cb899ae53a78b", "score": "0.4834525", "text": "def addServer(self, server, password):\n self.serversKnown[server] = password", "title": "" }, { "docid": "4d441b38dfc55bc2bffc7053e63fb040", "score": "0.48339483", "text": "def test_create_ip(monkeypatch):\n self.sonar(\"repo activate test_repo\")\n monkeypatch.setenv(\"SONAR_REPO\", str(repo_path))\n os.chdir(repo_path)\n exit_code = self.sonar(\"create ip ip_0\")\n ip_path = repo_path.joinpath(\"ip_0\")\n missing_files, extra_files = helper.check_filesystem(\n str(ip_path),\n [\n \"build\",\n \"build/bin\",\n \"cad\",\n \"hls\",\n \"src\",\n \"include\",\n \"testbench\",\n \"testbench/build\",\n \"testbench/build/bin\",\n ],\n )\n assert not missing_files\n assert not extra_files\n assert exit_code == ReturnValue.SONAR_OK", "title": "" }, { "docid": "55d01207043493531c61b26c9d97f808", "score": "0.48318204", "text": "def load_local_server(self):\n try:\n\n s_local = self.get_local()\n logger.info('>>> Local server found: ' + str(s_local))\n hostname, ip_address = misc.get_fqdn_ip()\n\n if s_local.ip_address != ip_address:\n\n logger.info('>>> Updating ip_address to: ' + str(ip_address))\n s_local.ip_address = ip_address\n s_local.save()\n\n except Server.DoesNotExist:\n\n logger.warning('>>> Local server NOT found, creating instance')\n Server.objects.create(is_me=True)", "title": "" }, { "docid": "e448d08767ee0f3ec185b4a04ad35c83", "score": "0.48311263", "text": "def _update_servers(self):\n ...", "title": "" }, { "docid": "31f148ce7dde951fa7689c8c42aa6bfd", "score": "0.48285976", "text": "def test_patch_ip(self):\n pass", "title": "" }, { "docid": "1c8d3848d765e98c3caf2c401f9e1600", "score": "0.482739", "text": "def ask_new_remote_url(self, remote_name):\n\n self.utility_var = remote_name\n self.manager.root.show_text_box_popup('Please enter the new remote url.', self.add_remote)", "title": "" }, { "docid": "9f0407194185e73611df3021a8f29aea", "score": "0.48252273", "text": "def remote_ipaddress():\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect(('8.8.8.8', 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip\n except:\n AgentLog.get_logger().info(agent_format_exc())\n return ''", "title": "" }, { "docid": "43eed27945cef1825d5222faf2b3900c", "score": "0.48195153", "text": "def ls_ec2():\n with open('temp/hosts','w') as fh:\n for i,instance in enumerate(SpotInstance.get_spot_instances(REGION,EC2_Tag)):\n print instance.status()\n if instance.public_dns_name:\n fh.write(instance.public_dns_name+'\\n')\n print instance.public_dns_name\n with open(\"temp/connect_\"+str(i)+'.sh','w') as fssh:\n fssh.write(\"#!/usr/bin/env sh\\nssh -i \"+env.key_filename+\" \"+env.user+'@'+instance.public_dns_name)\n with lcd('temp'):\n local('chmod a+x *.sh')\n print \"Information about current spot instance has been added to temp/hosts\"", "title": "" }, { "docid": "a69879f98935271da8447dfe5379538c", "score": "0.4817617", "text": "def put_file_to_remote(src, dest):\n dest = str(dest)\n if g_ssh_host:\n if dest.startswith(\"~/\"):\n dest = dest[2:]\n command = f\"scp -P {g_ssh_port} -r {src} {g_ssh_host}:{dest}\"\n else:\n command = f\"cp -r {src} {dest}\"\n return command", "title": "" }, { "docid": "3207ac4bc1234080db31e1ce0d6c7938", "score": "0.4813804", "text": "def updateIP(entry, **kwargs):\n # Get the URI of the IP address\n try: uri = getIPNodeURI(entry)\n except Exception as err: \n raise\n \n # Update the IP\n for d in kwargs.items(): \n try:\n swis.update(uri, **dict([d]))\n print('Updated \"{}\" to \"{}\"'.format(d[0], d[1]))\n except Exception as err:\n print('Error updating \"{}\" to \"{}\": {}'.format(d[0], d[1], err))\n \n return True", "title": "" }, { "docid": "0a33d61d6922607e9dfd0126de8c9ce5", "score": "0.48106", "text": "def address_append(address: Union[str, Path]) -> str:\n zipped = search(r\"(\\.zip)\", str(address))\n tarred = search(r\"(\\.tar)\", str(address))\n\n try:\n if zipped:\n return \"zip://{}\".format(address)\n elif tarred:\n return \"tar://{}\".format(address)\n else:\n LOGGER.info(\"No changes made to address.\")\n return address\n except Exception as e:\n msg = \"Failed to prefix or parse URL {}: {}\".format(address, e)\n LOGGER.error(msg)", "title": "" }, { "docid": "879e36480111ca67d89b4c1e09e0329e", "score": "0.48057452", "text": "def addDownloadFileFromSocle(self, src_file, dst_file, workdir = '/',\n on_socle = False, as_root = False):\n dst_file_rel = dst_file\n workdir_rel = workdir\n\n if os.path.isabs(dst_file):\n dst_file_rel = os.path.relpath(dst_file,'/')\n if os.path.isabs(workdir):\n workdir_rel = os.path.relpath(workdir,'/')\n \n dst = os.path.join('/',workdir_rel, dst_file_rel)\n file_in_vm = os.path.join('upload', dst_file_rel)\n tmp_file = os.path.join(self.basedir, file_in_vm)\n file_in_vm_on_share = os.path.join('/vagrant',file_in_vm)\n\n if on_socle:\n self.addCpFile(src_file, dst_file, as_root)\n return dst_file\n\n self.addCpFile(src_file, tmp_file, False) # tmp file\n cmd = ['bash', '-c','mkdir', '-p', os.path.dirname(dst), '&&',\n 'cp', '-vf', file_in_vm_on_share, dst]\n if as_root:\n cmd = [ 'sudo' ] + cmd\n\n self.addShellCmdInVm(command = cmd,\n description = 'Uploading file.',\n descriptionDone = 'File uploaded')\n return dst_file", "title": "" }, { "docid": "8ea640e90d99fff365b02705cb5311f9", "score": "0.48053938", "text": "def add_fixed_ip_to_instance(self, context, instance, network_id):\n raise NotImplementedError()", "title": "" }, { "docid": "dde5d295738c9968ed221e08278cdc71", "score": "0.48029846", "text": "def test_07_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = Volume.create(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk-5\" },\n zoneid=self.zone.id,\n diskofferingid=self.disk_offering.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_on_remote.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n volumes = list_volumes(self.apiclient, id = data_disk_2.id)\n vol = sptypes.VolumeUpdateDesc(rename = volumes[0].id)\n name = volumes[0].path.split(\"/\")[3]\n\n rename = self.spapi.volumeUpdate(volumeName = \"~\" + name, json = vol)\n\n destinationHost, vol_list = self.get_destination_pools_hosts(vm2)\n vm2 = self.migrateVm(self.virtual_machine_on_remote, destinationHost)\n\n\n self.virtual_machine_on_remote.attach_volume(\n self.apiclient,\n self.volume_on_remote\n )\n\n destinationHost, vol_list = self.get_destination_pools_hosts(vm2)\n vm2 = self.migrateVm(self.virtual_machine_on_remote, destinationHost)", "title": "" }, { "docid": "98fe880f5eec49ec974684954f52b75f", "score": "0.4796819", "text": "def remote_repository(self):\n if self.remote_location.exists():\n with open(self.remote_location, \"r\") as f:\n return f.readline()\n else:\n with open(self.remote_location, \"w+\") as f:\n f.write(\"http://localhost:3000\")\n return \"http://localhost:3000\"", "title": "" }, { "docid": "35b3fc597e6f1def82947a5cd7e49f70", "score": "0.47968066", "text": "def update_peers_file():\n read_peers_from_file()\n write_peers_to_file()", "title": "" }, { "docid": "929353f0d4d7d24b5d201b123c284019", "score": "0.4791891", "text": "def connect_addrs(self):", "title": "" }, { "docid": "8b4980c18422f6456d4c4b59bdbeccd0", "score": "0.4784786", "text": "def remoteFileStrCallback(self, *args):\n self.writeConfig(self.config, 'gui', 'remoteFile', self.remoteFileStr.get())", "title": "" }, { "docid": "c42b999d631cdc4b31770b8eda03a9b3", "score": "0.47843418", "text": "def set_hostlist_from_file(self, file_path: str) -> None:\n self.run_args[\"hostfile\"] = file_path", "title": "" } ]
11c676fc12f1e4891f99a7e1e073cb55
Calculates needed space for widget content.
[ { "docid": "de4f06c415bdf6dee842b9a400786d81", "score": "0.55907434", "text": "def sizeHint(self):\n #if not self._autosizeFlag:\n # return QSize(self.WIDTH, self.HEIGHT)\n \n self._scaleWidth = 1 # for getDistance()\n self._scaleHeight = 1\n \n neededWidth = self.getDistance('leftMargin', 1) + self.getDistance('rightMargin', 1)\n neededHeight = self.getDistance('topMargin', 1) + self.getDistance('bottomMargin', 1)\n \n titleFieldWidth = 0\n titleFieldHeight = 0\n titleIsSet = self.titleIsSet()\n if titleIsSet:\n titleFieldWidth = self.getDistance('titleFieldWidth', 1)\n titleFieldHeight += self.getDistance('titleFieldHeight', 1)\n \n textFieldWidth = 0\n textFieldHeight = 0\n if self.textFieldIsSet():\n textFieldWidth = self._textField.getWidth()\n textFieldHeight += self._textField.getHeight()\n bodyWidgetWidth = 0\n bodyWidgetHeight = 0\n if self._bodyWidget:\n if self._bodyWidget.parent() != self:\n self._bodyWidget = None\n else:\n sh = self._bodyWidget.sizeHint()\n bodyWidgetWidth = sh.width()\n bodyWidgetHeight = sh.height()\n \n imageSizeF = self.imageSizeF()\n bodyWidth = max(textFieldWidth, bodyWidgetWidth, imageSizeF.width())\n bodyHeight = max(textFieldHeight, bodyWidgetHeight, imageSizeF.height())\n \n if titleIsSet and bodyHeight != 0:\n # gap between title and text\n neededHeight += self.getDistance('bottomMargin', 1)\n \n neededWidth += max(bodyWidth, titleFieldWidth)\n neededHeight += titleFieldHeight + bodyHeight\n \n # evaluate maximum size\n maxWidth = self.maximumSize().width()\n maxHeight = self.maximumSize().height()\n \n maxScaleWidth = min(1.0, 1.0 * maxWidth/neededWidth)\n maxScaleHeight = min(1.0, 1.0 * maxHeight/neededHeight)\n if maxScaleWidth != 1.0 or maxScaleHeight != 1.0:\n # this is not limited by keepAspectRationFlag\n # as it is about absolute sizes here\n # ratio is evaluated in autosize()\n scale = min(maxScaleWidth, maxScaleHeight)\n neededWidth *= scale\n neededHeight *= scale\n \n return QSize(max(self.minimumSize().width(), neededWidth), max(self.minimumSize().height(), neededHeight))", "title": "" } ]
[ { "docid": "1155a86a800520373ac0f383fe60deba", "score": "0.6282941", "text": "def compute_size(self):\n # Get current monitor number\n screen = self.window.get_screen()\n monitor_n = screen.get_monitor_at_window(self.window.get_window())\n # and its width\n scr_width = screen.get_monitor_geometry(monitor_n).width\n # Get gutter width\n view = self.window.get_active_view()\n gutter_win = view.get_window(Gtk.TextWindowType.LEFT)\n gutter_width = gutter_win.get_width() if gutter_win else 0\n # Get scrollbar width\n scrollbar = view.get_parent().get_vscrollbar()\n scrollbar_width = scrollbar.get_allocated_width()\n # Calculate text width (use right_margin_position for column width)\n char_width = self.get_char_width()\n text_width = char_width * view.get_right_margin_position() + 4\n # Get sidepanel width\n sidepanel = self.window.get_side_panel()\n sidepanel_visible = sidepanel.get_visible()\n sidepanel_width = sidepanel.get_allocated_width() if sidepanel_visible else 0\n # Calculate margins\n margins = scr_width - text_width - gutter_width - scrollbar_width - sidepanel_width\n return int(margins / 2)", "title": "" }, { "docid": "4aae876068ccefd89b425ca366fcde00", "score": "0.61855596", "text": "def RecalcSizes(self):\r\n widget = self._widget\r\n if widget:\r\n widget.SetSize(self.GetSize())", "title": "" }, { "docid": "b9b1288d8ff7ea1f2387f9897614ddb9", "score": "0.6118148", "text": "def __updateSize(self):\n if qt.BINDING == \"PyQt5\":\n screenSize = qt.QApplication.desktop().availableGeometry(qt.QCursor.pos()).size()\n else: # Qt6\n screenSize = qt.QApplication.instance().primaryScreen().availableGeometry().size()\n hardLimit = min(screenSize.width() - 480, 1000)\n if screenSize.width() <= 1024:\n hardLimit = screenSize.width()\n softLimit = min(screenSize.width() // 2, 420)\n\n layoutMinimumSize = self.layout().totalMinimumSize()\n width = layoutMinimumSize.width()\n if width > softLimit:\n width = softLimit\n if width > hardLimit:\n width = hardLimit\n\n height = layoutMinimumSize.height()\n self.setFixedSize(width, height)", "title": "" }, { "docid": "3a9c7b24c348585860326bce8b7ceb7a", "score": "0.60917", "text": "def refresh_sizes(self):\r\n widget = self.widget()\r\n widget.setSizeHint(self.compute_best_size())\r\n widget.setMinimumSize(self.compute_min_size())\r\n widget.setMaximumSize(self.compute_max_size())", "title": "" }, { "docid": "5bdced91e10de865449b519cf3f525c7", "score": "0.60702336", "text": "def _compute_size(self):\n self._width, self._height = self.render().get_size()", "title": "" }, { "docid": "e0f694612497ec1f057c6d7fb8096712", "score": "0.60004777", "text": "def width(self):\n if self._desired_width is None:\n # The button adds two braces\n # so we have to add 2 to the width of the content itself\n return len(self.content) + 2\n else:\n return self._desired_width", "title": "" }, { "docid": "a0c9ecc08ab01926ebc97716dfcfa749", "score": "0.5995286", "text": "def _get_control_size(self):", "title": "" }, { "docid": "5ac0e314a25f93674404b0998f421f53", "score": "0.5978485", "text": "def refresh_sizes(self):\r\n widget = self.widget()\r\n widget.SetBestSize(self.compute_best_size())\r\n widget.SetMinSize(self.compute_min_size())\r\n widget.SetMaxSize(self.compute_max_size())", "title": "" }, { "docid": "4524a7dccc9a0511ea50c7c54916e638", "score": "0.5904919", "text": "def get_recommended_part_size(self):", "title": "" }, { "docid": "5ee1ced2df7ecb8b60f08a04aec81f7e", "score": "0.590323", "text": "def compute_min_size(self):\r\n if self._owns_layout and self._layout_manager is not None:\r\n primitive = self.layout_box.primitive\r\n width = primitive('width')\r\n height = primitive('height')\r\n w, h = self._layout_manager.get_min_size(width, height)\r\n res = wx.Size(w, h)\r\n else:\r\n res = wx.Size(-1, -1)\r\n return res", "title": "" }, { "docid": "3a1e210bffcbeca631ee70033b4fce07", "score": "0.58878005", "text": "def __calculate_text_dimensions(self):\n # TODO: make dynamic and not static set\n self.__width = self.guifeedback_dimensions.x\n self.__height = self.guifeedback_dimensions.y", "title": "" }, { "docid": "fa250964545a589a5c381026b733a194", "score": "0.5856634", "text": "def width(self):\n #if self.parent() and self.parent().layout():\n # return QWidget.width(self)\n return self.getDistance('width')", "title": "" }, { "docid": "63c1891981b45a78c54242665e9c5810", "score": "0.5844054", "text": "def get_absolute_minimum_part_size(self):", "title": "" }, { "docid": "12154bad22229f9f44a25b5130e4cdab", "score": "0.57892656", "text": "def minimumSize(self):\r\n item = self._item\r\n if item is not None:\r\n s = item.minimumSize()\r\n left, top, right, bottom = self.getContentsMargins()\r\n s.setHeight(s.height() + top + bottom)\r\n s.setWidth(s.width() + left + right)\r\n return s\r\n return super(QSingleWidgetLayout, self).minimumSize()", "title": "" }, { "docid": "6ec5babf65b26c29b062747e3e5ac72d", "score": "0.5788182", "text": "def calculateDimensions(self):\n #self._width = self._defaultWidth\n #self._height = self._defaultHeight\n \n if self._fontSizeHasChanged and (not self._autosizeFontFlag or self._autoscaleFlag):\n self._font.setPointSize(self.getFontSize())\n if self._defaultHeight == 0:\n self.setDefaultHeight(self.getFontHeight())\n self._fontSizeHasChanged = False\n \n if self._autoscaleFlag:\n self.autoscale()\n \n elif self._autosizeFontFlag:\n self.autosizeFont()\n \n if self._autotruncateTextFlag:\n self.truncate()", "title": "" }, { "docid": "1877214c22cee958ac1517dc7e2939b7", "score": "0.5780617", "text": "def maximumSize(self):\r\n item = self._item\r\n if item is not None:\r\n s = item.maximumSize()\r\n left, top, right, bottom = self.getContentsMargins()\r\n s.setHeight(s.height() + top + bottom)\r\n s.setWidth(s.width() + left + right)\r\n return s\r\n return super(QSingleWidgetLayout, self).maximumSize()", "title": "" }, { "docid": "bd74558a53d82d6669c2b3c550b0fd07", "score": "0.5778925", "text": "def compute_best_size(self):\r\n if self._owns_layout and self._layout_manager is not None:\r\n primitive = self.layout_box.primitive\r\n width = primitive('width')\r\n height = primitive('height')\r\n w, h = self._layout_manager.get_min_size(width, height, weak)\r\n res = wx.Size(w, h)\r\n else:\r\n res = wx.Size(-1, -1)\r\n return res", "title": "" }, { "docid": "e867bddb9c74cbc69ad41e8bf16fb830", "score": "0.5741008", "text": "def size(self):\n return self.widget.size", "title": "" }, { "docid": "1192cc65adc55b4a24d55b4afabd66d3", "score": "0.5730848", "text": "def _get__size_hint(self):\n # XXX we can probably do better than the min size. Maybe have \n # the layout manager compute a preferred size, or something\n # similar to a preferred size. But I don't know at the moment\n # what it would actually mean to have a preferred size from \n # a set of constraints.\n return self.compute_min_size()", "title": "" }, { "docid": "6736ea898bcfb92b461f78601f98904f", "score": "0.5730668", "text": "def _compute_size(self):\n self._height = 0\n self._width = 0\n for column in self.columns:\n if column.height > self._height:\n self._height = column.height\n self._width += column.width", "title": "" }, { "docid": "12c1e39258762ee9779eca5f8831c57f", "score": "0.57298994", "text": "def fit_content(self):\n base_width = self.padding_left + self.padding_right + 2 * self.border_width\n base_height = self.padding_top + self.padding_bottom + 2 * self.border_width\n\n self.rect = self.rect.resize(\n self.layout.content_width + base_width,\n self.layout.content_height + base_height,\n )", "title": "" }, { "docid": "59155346435d080b29d8858afd14795e", "score": "0.5665498", "text": "def spaceRequired(self):\n if self._min_size:\n return Size(self._min_size)\n else:\n return Size(1024*1024*1024)", "title": "" }, { "docid": "6101f769cac15f919fe148fd1b60847e", "score": "0.5660209", "text": "def compute_best_size(self):\r\n if self._owns_layout and self._layout_manager is not None:\r\n primitive = self.layout_box.primitive\r\n width = primitive('width')\r\n height = primitive('height')\r\n w, h = self._layout_manager.get_min_size(width, height, weak)\r\n return QSize(w, h)\r\n return QSize()", "title": "" }, { "docid": "6dad6859568b5d8e42cc986abaaeeecc", "score": "0.5654612", "text": "def get_dimensions(self):\n size = GUI_UNIT * MAX_FISH\n w = (self.col * 4 + 1) * size\n h = (self.row + 1) * size\n return (w, h)", "title": "" }, { "docid": "c282c28957e843bdf7876d5778fc15a1", "score": "0.5650006", "text": "def compute_min_size(self):\r\n resist_width, resist_height = self._resist\r\n shrink = ('ignore', 'weak')\r\n if resist_width in shrink and resist_height in shrink:\r\n return QSize(0, 0)\r\n if self._owns_layout and self._layout_manager is not None:\r\n primitive = self.layout_box.primitive\r\n width = primitive('width')\r\n height = primitive('height')\r\n w, h = self._layout_manager.get_min_size(width, height)\r\n if resist_width in shrink:\r\n w = 0\r\n if resist_height in shrink:\r\n h = 0\r\n return QSize(w, h)\r\n return QSize()", "title": "" }, { "docid": "daa44ec501a1ef89172fbb49664fa24f", "score": "0.5636108", "text": "def fit_content(self):\n self.rect = self.x, self.y, self.layout.content_width, self.layout.content_height", "title": "" }, { "docid": "deedae2b0b1f424be1763554d8e9dd96", "score": "0.56320333", "text": "def compute_max_size(self):\r\n hug_width, hug_height = self._hug\r\n expanding = ('ignore', 'weak')\r\n if hug_width in expanding and hug_height in expanding:\r\n return QSize(16777215, 16777215)\r\n if self._owns_layout and self._layout_manager is not None:\r\n primitive = self.layout_box.primitive\r\n width = primitive('width')\r\n height = primitive('height')\r\n w, h = self._layout_manager.get_max_size(width, height)\r\n if w < 0 or hug_width in expanding:\r\n w = 16777215\r\n if h < 0 or hug_height in expanding:\r\n h = 16777215\r\n return QSize(w, h)\r\n return QSize(16777215, 16777215)", "title": "" }, { "docid": "94a6fa8f4bd71a06db67c0d86c0fd0c9", "score": "0.5612976", "text": "def width(self):\n return self.widget.width", "title": "" }, { "docid": "ea03c6c8340a6c48f871c187a00fd201", "score": "0.55890846", "text": "def DoGetBestSize(self):\n\n w, h, usemin = self._GetLabelSize()\n defsize = wx.Button.GetDefaultSize()\n width = 12 + w\n\n if usemin and width < defsize.width:\n width = defsize.width\n\n height = 11 + h\n\n if usemin and height < defsize.height:\n height = defsize.height\n\n return (width, height)", "title": "" }, { "docid": "6a6863ce7cafaa088b877f7420249f12", "score": "0.55698586", "text": "def compute_ideal_length(self):\n width = os.get_terminal_size()[0] #total width\n number = len(str(self.total))*2 + 6 #width used by the progress display (XXX/XXX) \n title_width = 15 #width of the title\n end_width = 36 #width used by the XXX% and time elapsed/ETA display and the blank after the |\n remaining_size = width - (number+title_width+end_width)\n return remaining_size", "title": "" }, { "docid": "dd90535317f62a96a3201e88051e124f", "score": "0.55686593", "text": "def _OnSize(self, event):\r\n # Pre-fetch some commonly used objects\r\n get_min = self._GetWindowMin\r\n windows = self._windows\r\n sashes = self._sashes\r\n\r\n # Compute the total space available for the sashes\r\n sash_widths = self._GetSashSize() * (len(windows) - 1)\r\n offset = sash_widths + 2 * self._GetBorderSize()\r\n if self._orient == wx.HORIZONTAL:\r\n free_space = self.GetClientSize().GetWidth() - offset\r\n else:\r\n free_space = self.GetClientSize().GetHeight() - offset\r\n\r\n # Compute the effective stretch factors for each window. The\r\n # effective stretch factor is the greater of the current or\r\n # minimum width of the window, multiplied by the window's\r\n # stretch factor.\r\n parts = []\r\n total_stretch = 0\r\n for idx, (sash, window) in enumerate(zip(sashes, windows)):\r\n minw = get_min(window)\r\n if sash < minw:\r\n sash = sashes[idx] = minw\r\n stretch = window.GetStretch() * sash\r\n parts.append((stretch, idx, minw, window))\r\n total_stretch += stretch\r\n\r\n # Add (or remove) the extra space by fairly allocating it to\r\n # each window based on their effective stretch factor.\r\n diff_space = free_space - sum(sashes)\r\n for stretch, idx, minw, window in parts:\r\n if stretch > 0:\r\n d = diff_space * stretch / total_stretch\r\n new = max(sashes[idx] + d, minw)\r\n sashes[idx] = new\r\n\r\n # Since the windows are clipped to their minimum width, it's\r\n # possible that the current space occupied by the windows will\r\n # be too large. In that case, the overage is distributed to the\r\n # windows fairly, based on their relative capacity for shrink.\r\n curr_space = sum(sashes)\r\n if curr_space > free_space:\r\n diffs = []\r\n total_diff = 0\r\n for stretch, idx, minw, window in parts:\r\n diff = sashes[idx] - minw\r\n if diff > 0:\r\n diffs.append((diff, window, idx, minw))\r\n total_diff += diff\r\n remaining = curr_space - free_space\r\n diffs.sort()\r\n for diff, window, idx, minw in reversed(diffs):\r\n delta = remaining * diff / total_diff\r\n old = sashes[idx]\r\n new = max(old - delta, minw)\r\n actual_diff = old - new\r\n remaining -= actual_diff\r\n total_diff -= actual_diff\r\n sashes[idx] = new\r\n\r\n # The superclass handler which will actually perform the layout.\r\n super(wxSplitter, self)._OnSize(event)", "title": "" }, { "docid": "a3cedffcb837a01b38b6adfb111fbb0d", "score": "0.55555016", "text": "def contents_margins(self):\r\n return self.widget().GetContentsMargins()", "title": "" }, { "docid": "34f92a8a0e2614ada3f0eb2b9c3708c0", "score": "0.5533257", "text": "def GetBestSize(self):\r\n size = super(wxMultilineField, self).GetBestSize()\r\n return wx.Size(size.GetWidth() + 246, size.GetHeight() + 176)", "title": "" }, { "docid": "e8d5542538efdf2e1060756fa40f369c", "score": "0.5533203", "text": "def report_used(self):\n return self.initial_space - self.min_space", "title": "" }, { "docid": "d525601e9d46ca0fdfa316d8b961aa9c", "score": "0.55297506", "text": "def CalcMin(self):\r\n widget = self._widget\r\n if not widget:\r\n return self._default_size\r\n return widget.GetEffectiveMinSize()", "title": "" }, { "docid": "6c6a30542e82c038f4ee1abec0534364", "score": "0.5525796", "text": "def setPieceSizes(self):\r\n size = self.parentWidget.size()\r\n height = size.height()-100\r\n smallWidth = size.width()/4\r\n largeWidth = size.width() - smallWidth\r\n self.__right_widget__.setMaximumSize(smallWidth, height)\r\n self.__left_widget__.setMaximumSize(largeWidth, height)", "title": "" }, { "docid": "1322f41eb304a06a222777a1b64e4fdd", "score": "0.55098003", "text": "def space_size(self) -> Int:\r\n return self['space_size']", "title": "" }, { "docid": "9c1b7d02936a475bb93d06e58955d061", "score": "0.5500958", "text": "def DoGetBestSize(self):\n\n label = self.GetLabel()\n #if not label:\n # return wx.Size(112, 48)\n\n dc = wx.ClientDC(self)\n dc.SetFont(self.GetFont())\n if not label:\n retWidth = 0 \n retHeight = 0\n else:\n retWidth, retHeight = dc.GetTextExtent(label)\n \n bmpWidth = bmpHeight = 0\n constant = 24\n if self._bitmap:\n '''\n bmpWidth, bmpHeight = self._bitmap.GetWidth()+10, self._bitmap.GetHeight()\n retWidth += bmpWidth\n retHeight = max(bmpHeight, retHeight)\n '''\n bmpWidth, bmpHeight = self._bitmap.GetWidth()+10, self._bitmap.GetHeight()\n retWidth = max(bmpWidth, retWidth)\n retHeight += bmpHeight\n #'''\n constant = 24\n\n return wx.Size(retWidth+constant, retHeight+constant)", "title": "" }, { "docid": "59d4d51af1be52d7e5a16b0fc21b6c46", "score": "0.5485287", "text": "def calculate_sizes(self):\n sizes = []\n h, w = self.stdscr.getmaxyx()\n w -= 1\n h -= 2\n # Figure out appropriate layout\n min_height = 10\n min_width = 18\n\n partitions = 1\n\n columns = 1\n rows = 1\n\n while partitions < self.num_gpus:\n cut_height = floor(h / rows)\n cut_width = floor(w / columns)\n\n diff_height_width = abs(cut_width - cut_height)\n max_edge = max(cut_width, cut_height)\n\n # If the difference between the two are less than 20%, prefer the\n # partitioning that produces fewer empty spaces. If there isn't\n # one, then use the usual method.\n if diff_height_width < max_edge / 5:\n partitions_if_add_col = rows * (columns + 1)\n partitions_if_add_row = (rows + 1) * columns\n remain_col = partitions_if_add_col - self.num_gpus\n remain_row = partitions_if_add_row - self.num_gpus\n\n if remain_row <= 0:\n rows += 1\n elif remain_col <= 0:\n columns += 1\n elif remain_row < remain_col:\n rows += 1\n elif remain_col < remain_row:\n columns += 1\n else:\n if cut_width > cut_height:\n # Then we cut vertically\n columns += 1\n else:\n # Then we cut horizontally\n rows += 1\n partitions = rows * columns\n\n width = floor(w / columns) - 1\n height = floor(h / rows) - 1\n\n if width < min_width or height < min_height:\n self.sizes = -1\n return\n\n for i in range(self.num_gpus):\n col = i % columns\n row = floor(i / columns)\n\n x_pad = 1 if col > 0 else 0\n y_pad = 1 if row > 0 else 0\n\n x = (col * width) + x_pad\n y = (row * height) + y_pad\n\n sizes.append({'nlines': height, 'ncols': width, 'begin_y': y,\n 'begin_x': x})\n\n self.window_width = sizes[-1]['begin_x'] + width + 1\n\n self.sizes = sizes", "title": "" }, { "docid": "ac13243de62a0d5189f194e75c5596f6", "score": "0.54730016", "text": "def alloc_qty(self):\n return None", "title": "" }, { "docid": "f565d744f40bb80af2ef37c0644d12b2", "score": "0.54655993", "text": "def GetSize(self, x, y, theButton):\n\n bottomY = self._y + theButton.GetLineHeight(self)\n\n if y < bottomY:\n y = bottomY\n\n width = self._x + self._width\n\n if x < width:\n x = width\n\n if self.IsExpanded():\n for child in self._children:\n x, y = child.GetSize(x, y, theButton)\n\n return x, y", "title": "" }, { "docid": "cf3cec83577ad04c8b82e825d342a7f0", "score": "0.5433193", "text": "def _layout_beam_size(self):\n ## Beam size [Vector] [mm]\n beam_size = None\n beam_size_unit = 'mm'\n\n beam_size_txt = wx.StaticText(self, -1, 'Beam size:')\n x_beam_size_txt = wx.StaticText(self, -1, 'x = ')\n self.x_beam_size_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n y_beam_size_txt = wx.StaticText(self, -1, 'y = ')\n self.y_beam_size_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n z_beam_size_txt = wx.StaticText(self, -1, 'z = ')\n self.z_beam_size_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n beam_size_unit_txt = wx.StaticText(self, -1, 'Unit: ')\n self.beam_size_unit_tcl = wx.TextCtrl(self, -1,\n size=(_BOX_WIDTH, 20), style=0)\n self.beam_size_sizer.AddMany([(beam_size_txt, 0, wx.LEFT | wx.RIGHT, 10),\n (x_beam_size_txt, 0, wx.LEFT, 17),\n (self.x_beam_size_tcl, 0, wx.RIGHT, 10),\n (y_beam_size_txt, 0, wx.EXPAND),\n (self.y_beam_size_tcl, 0, wx.RIGHT, 10),\n (z_beam_size_txt, 0, wx.EXPAND),\n (self.z_beam_size_tcl, 0, wx.RIGHT, 10),\n (beam_size_unit_txt, 0, wx.EXPAND),\n (self.beam_size_unit_tcl, 0, wx.RIGHT, 10)])", "title": "" }, { "docid": "205899bda25e8db31adc52b4e76ea7b9", "score": "0.5409018", "text": "def w_width(self):", "title": "" }, { "docid": "a0b985b8781234d082a7327c42e9dc97", "score": "0.53959095", "text": "def sizeHint(self):\r\n if self._cached_hint is None:\r\n size = QSize(0, 0)\r\n for item in self._items:\r\n size = size.expandedTo(item.sizeHint())\r\n left, top, right, bottom = self.getContentsMargins()\r\n size.setWidth(size.width() + left + right)\r\n size.setHeight(size.height() + top + bottom)\r\n self._cached_hint = size\r\n return self._cached_hint", "title": "" }, { "docid": "03816dc8a1a549a9cafb9f41126385a2", "score": "0.5391845", "text": "def compute_max_size(self):\r\n if self._owns_layout and self._layout_manager is not None:\r\n primitive = self.layout_box.primitive\r\n width = primitive('width')\r\n height = primitive('height')\r\n w, h = self._layout_manager.get_max_size(width, height)\r\n res = wx.Size(w, h)\r\n else:\r\n res = wx.Size(-1, -1)\r\n return res", "title": "" }, { "docid": "bd9dc60f11c9398d65241bee200c4116", "score": "0.5376398", "text": "def contents_margins(self):\n return self.widget().GetContentsMargins()", "title": "" }, { "docid": "6cc95876e9679c3d9df0b72f746f7ac5", "score": "0.5372111", "text": "def diff_width(self):\r\n return self.hint_width - self.min_width", "title": "" }, { "docid": "50d0b10d78638998516eec25f5d4fc79", "score": "0.536842", "text": "def height(self):\n return self.widget.height", "title": "" }, { "docid": "bf9739ca82e60f54cce271825a07756b", "score": "0.53577447", "text": "def minimumSize(self):\r\n parent = self.parentWidget()\r\n if parent is not None:\r\n size = parent.explicitMinimumSize()\r\n if size.isValid():\r\n return size\r\n return super(QWindowLayout, self).minimumSize()", "title": "" }, { "docid": "ff31f7d0cca1bdef755e4dbbbc3e721a", "score": "0.53523344", "text": "def compute_min_size(self):\n if self.owns_layout and self.layout_manager.initialized:\n width = self.width\n height = self.height\n w, h = self.layout_manager.get_min_size(width, height)\n res = Size(int(round(w)), int(round(h)))\n else:\n res = Size(-1, -1)\n return res", "title": "" }, { "docid": "456bf44bbd13636a524863ba21139bf8", "score": "0.5346027", "text": "def height(self):\n # TODO: implement this more flexible regarding different QSizePolicies (also width())\n #if self.parent() and self.parent().layout():\n # return QWidget.height(self)\n return self.getDistance('height')", "title": "" }, { "docid": "259a69ca0850a39ef519eb312d3e2645", "score": "0.5345057", "text": "def GetToolSize(self, dc, wnd, item):\r\n \r\n if not item.GetBitmap().IsOk() and not self._agwFlags & AUI_TB_TEXT:\r\n return wx.Size(16, 16)\r\n\r\n width = item.GetBitmap().GetWidth()\r\n height = item.GetBitmap().GetHeight()\r\n\r\n if self._agwFlags & AUI_TB_TEXT:\r\n \r\n dc.SetFont(self._font)\r\n label_size = GetLabelSize(dc, item.GetLabel(), self.GetOrientation() != AUI_TBTOOL_HORIZONTAL)\r\n padding = 6\r\n \r\n if self._text_orientation == AUI_TBTOOL_TEXT_BOTTOM:\r\n \r\n if self.GetOrientation() != AUI_TBTOOL_HORIZONTAL:\r\n height += 3 # space between top border and bitmap\r\n height += 3 # space between bitmap and text\r\n padding = 0\r\n\r\n height += label_size.GetHeight()\r\n \r\n if item.GetLabel() != \"\":\r\n width = max(width, label_size.GetWidth()+padding)\r\n \r\n elif self._text_orientation == AUI_TBTOOL_TEXT_RIGHT and item.GetLabel() != \"\":\r\n \r\n if self.GetOrientation() == AUI_TBTOOL_HORIZONTAL:\r\n \r\n width += 3 # space between left border and bitmap\r\n width += 3 # space between bitmap and text\r\n padding = 0\r\n\r\n width += label_size.GetWidth()\r\n height = max(height, label_size.GetHeight()+padding)\r\n \r\n # if the tool has a dropdown button, add it to the width\r\n if item.HasDropDown():\r\n if item.GetOrientation() == AUI_TBTOOL_HORIZONTAL:\r\n width += BUTTON_DROPDOWN_WIDTH+4\r\n else:\r\n height += BUTTON_DROPDOWN_WIDTH+4\r\n\r\n return wx.Size(width, height)", "title": "" }, { "docid": "b36a5abb51a864110cd592851c0f8a7a", "score": "0.53442013", "text": "def minimumSize(self):\r\n if self._cached_min is None:\r\n size = QSize(0, 0)\r\n for item in self._items:\r\n size = size.expandedTo(item.minimumSize())\r\n left, top, right, bottom = self.getContentsMargins()\r\n size.setWidth(size.width() + left + right)\r\n size.setHeight(size.height() + top + bottom)\r\n self._cached_min = size\r\n # XXX hack! We really need hasWidthForHeight! This doesn't quite\r\n # work because a QScrollArea internally caches the min size.\r\n d = self._options.direction\r\n if d == self.TopToBottom or d == self.BottomToTop:\r\n m = QSize(self._cached_min)\r\n if m.width() < self._cached_wfh:\r\n m.setWidth(self._cached_wfh)\r\n return m\r\n return self._cached_min", "title": "" }, { "docid": "18fad872bd9497cd59281785993069b1", "score": "0.5315094", "text": "def GetBestSize(self):\r\n size = super(wxLineEdit, self).GetBestSize()\r\n return wx.Size(size.GetWidth() + 44, size.GetHeight())", "title": "" }, { "docid": "e512cc0b002e1aee6486c31efb06d284", "score": "0.5310757", "text": "def contents_margins(self):\r\n m = self.widget().contentsMargins()\r\n return (m.top(), m.right(), m.bottom(), m.left())", "title": "" }, { "docid": "bc86cc1ebdfdd072c0914ae53b5cd60d", "score": "0.5310103", "text": "def total_space(self):\r\n return self.dims.h * self.dims.w", "title": "" }, { "docid": "f49f56f45a5db983c2fa33eb2c3a514d", "score": "0.5306734", "text": "def calcSize( self ):\n\tself.size = [ len(self.value)*self.scale[0], self.scale[1] ]\n\tself.halfSize = [self.size[0]/2, self.size[1]/2 ]\n\treturn self.size", "title": "" }, { "docid": "80a663a577db6b0de563abf9ca824bee", "score": "0.5305226", "text": "def _GetSizeParams(self, chart):\n return {'size': '%sx%s' % (int(self._width), int(self._height))}", "title": "" }, { "docid": "3c6955f59bd99858c0dbf685979ff34c", "score": "0.52977186", "text": "def calculate_width(self):\n\n # 计算每一层的商品宽度self.levelid_to_goods_width\n for child_area in self.child_area_list:\n goods_width = child_area.get_goods_width()\n self.total_width += goods_width\n if child_area.level_id in self.levelid_to_goods_width:\n self.levelid_to_goods_width[child_area.level_id] += goods_width\n else:\n self.levelid_to_goods_width[child_area.level_id] = goods_width\n\n # 计算每一层的可用剩余self.levelid_to_remain_width\n for level_id in self.levelid_to_goods_width.keys():\n goods_width = self.levelid_to_goods_width[level_id]\n # FIXME 这里有个问题,无法知晓该层是否被该区域完全占据,如果是整层占据应该不需要分享\n # 分享货架剩余空间\n self.levelid_to_remain_width[level_id] = int(\n self.area_manager.levelid_to_remain_width[level_id] * goods_width / self.area_manager.shelf.width)\n self.total_width += self.levelid_to_remain_width[level_id]\n self.width_tolerance = int(self.total_width / 20)\n if self.width_tolerance > self.max_width_tolerance:\n self.width_tolerance = self.max_width_tolerance", "title": "" }, { "docid": "48d85d2c58badbb33fdec9ef4d61b4e0", "score": "0.5292929", "text": "def getSize():", "title": "" }, { "docid": "6704666f92fe5703298185f3aea76db6", "score": "0.5289886", "text": "def autolayoutAlgorithm(self):\n widgetParent = self.parent()\n min_x = round(self.contentStartX())\n min_y = round(self.contentStartY())\n widgetBefore=None\n leftMargin = VispaWidget.LEFT_MARGIN\n topMargin = VispaWidget.TOP_MARGIN\n for widget in self.children():\n if isinstance(widget, VispaWidget) and hasattr(widget,\"object\"):\n x = min_x\n y = min_y\n if self.arrangeUsingRelations():\n for mother in self.dataAccessor().motherRelations(widget.object):\n w = self.widgetByObject(mother)\n if w:\n # place daughter box on the right of the mother box\n if x < w.x() + w.width():\n x = w.x() + w.width() + leftMargin\n # place right next to mother if its the first daughter\n if w==widgetBefore:\n y = w.y()\n widget.move(x, y)\n widgetBefore=widget\n # remember the position below all other objects as min_y\n min_y = y + widget.height() + widget.getDistance(\"topMargin\")\n self.autosizeScrollArea()\n self.updateConnections()\n return True", "title": "" }, { "docid": "e80001ff973a82e0020801342d3c556f", "score": "0.52842957", "text": "def fit_content(self):\n self.rect = Rect(\n self.x,\n self.y,\n self.layout.content_width,\n self.layout.content_height,\n )", "title": "" }, { "docid": "64104076968ecc0500251c9463a19832", "score": "0.5279544", "text": "def GetSize(self):\n sz = wx.Panel.GetSize(self)\n return wx.Size(sz[0] + 35, sz[1])", "title": "" }, { "docid": "b3187ca18b932f959df1d96aaece125c", "score": "0.52769005", "text": "def size(self):\n return super(AVLBST, self).size()", "title": "" }, { "docid": "1d4291a74a84119d8d8ab8dd566a3470", "score": "0.5267653", "text": "def CalculateNewSplitSize(self):\r\n\r\n # count number of tab controls\r\n tab_ctrl_count = 0\r\n all_panes = self._mgr.GetAllPanes()\r\n\r\n for pane in all_panes:\r\n if pane.name == \"dummy\":\r\n continue\r\n\r\n tab_ctrl_count += 1\r\n\r\n # if there is only one tab control, the first split\r\n # should happen around the middle\r\n if tab_ctrl_count < 2:\r\n new_split_size = self.GetClientSize()\r\n new_split_size.x /= 2\r\n new_split_size.y /= 2\r\n\r\n else:\r\n\r\n # this is in place of a more complicated calculation\r\n # that needs to be implemented\r\n new_split_size = wx.Size(180, 180)\r\n\r\n return new_split_size", "title": "" }, { "docid": "c8f99f646ec38638410715caabe3abe1", "score": "0.52631074", "text": "def CalcMin(self):\r\n # The effective min size computation is correct, but the wx\r\n # scrolled window interprets it with an error of 2px. That\r\n # is we need to make wx think that the min size is 2px smaller\r\n # than it actually is so that scroll bars should and hide at\r\n # the appropriate sizes.\r\n res = super(wxScrollAreaSizer, self).CalcMin()\r\n if res.IsFullySpecified():\r\n res.width -= 2\r\n res.height -= 2\r\n return res", "title": "" }, { "docid": "55a62f69efa50210cd4d2ced50dbdeae", "score": "0.52466387", "text": "def minimumSize(self):\r\n if not self._cached_min.isValid():\r\n self._cached_min = super(QFlowWidgetItem, self).minimumSize()\r\n return self._cached_min", "title": "" }, { "docid": "033e181eabbe5f8c4eba2abf20ceaf08", "score": "0.52450943", "text": "def calc_size(self):\r\n lines = self.text.split(\"\\n\")\r\n width = 0\r\n height = 0\r\n for line in lines:\r\n lineSurface = self.font.render(line, 1, (255, 255, 255)) #returns surface\r\n height += lineSurface.get_height()\r\n if lineSurface.get_width() > width:\r\n width = lineSurface.get_width()\r\n return (width, height)", "title": "" }, { "docid": "454c3d1e89d14e8bb063bb40bf0076bf", "score": "0.5232859", "text": "def size(self, width=4096, height=4096, st=0, at=0):\r\n\r\n if self.dirty or self.displayables is None:\r\n self.update()\r\n\r\n renders = { }\r\n\r\n for i in self.displayables:\r\n renders[i] = renpy.display.render.render(i, width, self.style.size, st, at)\r\n\r\n layout = Layout(self, width, height, renders, size_only=True)\r\n\r\n return layout.size", "title": "" }, { "docid": "deadf17c71bb987224e02de3fa9c5085", "score": "0.522584", "text": "def _calculate_size(self):\n width = 1\n height = 1\n for output in self.get_outputs():\n if not output._mode: continue\n mode = self.get_mode_by_xid(output._mode)\n x = output._x\n y = output._y\n w = get_mode_width(mode, output._rotation)\n h = get_mode_height(mode, output._rotation)\n if x + w > width: width = x + w\n if y + h > height: height = y + h\n if width > self._width_max or height > self._height_max:\n raise RRError(\"The required size is not supported\",\n (width, height), (self._width_max, self._width_min))\n else:\n if height < self._height_min: \n self._height = self._height_min\n else:\n self._height = height\n if width < self._width_min: \n self._width = self._width_min\n else:\n self._width = width\n #FIXME: Physical size is missing", "title": "" }, { "docid": "3680f343bae1c8da7b8e612cad3f89ce", "score": "0.522211", "text": "def space_dimensions(self):\n return self.grid.dimensions", "title": "" }, { "docid": "d235623414db32c5a9f5482f06044b43", "score": "0.52186847", "text": "def __createWidgets ( self ):\n #-- 1 --\n # [ self := self with a new Label added and gridded,\n # with control variable self.__topLabelVar\n # self.topLabel := that new Label ]\n self.__topLabel = Label ( self, font=BUTTON_FONT,\n textvariable=self.__topLabelVar )\n rowx = 0\n self.__topLabel.grid ( row=rowx, column=0 )\n #-- 2 --\n # [ self := self with a new Button widget added and gridded\n # that adds 1 to self.__scaleVar but no higher than\n # MAX_BYTE\n # self.__plusButton := that widget ]\n self.__plusButton = Button ( self, font=BUTTON_FONT,\n text=\"+\", command=self.__plusHandler )\n rowx += 1\n self.__plusButton.grid ( row=rowx, column=0 )\n #-- 3 --\n # [ self := self with a new Scale widget added and gridded,\n # with control variable self.__scaleVar, and a length\n # of (MAX_BYTE+1) pixels\n # self.__scale := that Scale widget ]\n self.__scale = Scale ( self, orient=VERTICAL,\n command=self.__scaleHandler,\n length=(MAX_BYTE+1), from_=MAX_BYTE, to=0,\n variable=self.__scaleVar )\n rowx += 1\n self.__scale.grid ( row=rowx, column=0 )\n #-- 4 --\n # [ self := self with a new Button widget added and gridded\n # that subtracts 1 to self.__scaleVar but no lower than 0\n # self.__minusButton := that widget ]\n self.__minusButton = Button ( self, font=BUTTON_FONT,\n text=\"-\", command=self.__minusHandler )\n rowx += 1\n self.__minusButton.grid ( row=rowx, column=0 )", "title": "" }, { "docid": "5463dbd5f833f968a9e501794e3099c5", "score": "0.5215792", "text": "def UpdateLayout(self): \n self.main_sizer.SetMinSize(self.GetSize()[0],-1)\n self.main_sizer.SetSizeHints(self)\n self.dodatki_sizer.Layout() \n self.Parent.FitInside()", "title": "" }, { "docid": "1e332d32cad085d0a12a2b11ec5e4bee", "score": "0.5203094", "text": "def stretch(self):\r\n return self._layout_data.stretch", "title": "" }, { "docid": "68558bcf9d25b1b82f8cf6fb62073b3a", "score": "0.520129", "text": "def DoGetBestSize(self):\r\n size = self._best_size\r\n if not size.IsFullySpecified():\r\n size = super(wxContainer, self).DoGetBestSize()\r\n return size", "title": "" }, { "docid": "842df1396b6b9cc4032970f9549a3690", "score": "0.5201209", "text": "def size_handler(self):\n\n # --------------------------------------------------------------------------------------------------------------\n # POPULATE ATTRIBUTES PANEL (LEFT PANEL OF GUI)\n self.attributes_box = wx.GridBagSizer(hgap=2, vgap=3)\n r = 1 # CURRENT ROW\n c = 0 # CURRENT COLUMN\n\n # LINE SEP\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5),flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # DENSITY\n r += 1\n self.attributes_box.Add(self.density_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n c += 1\n self.attributes_box.Add(self.density_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # LINE SEP\n r += 1\n c += 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # REFERENCE DENSITY\n r += 1\n c = 0\n self.attributes_box.Add(self.ref_density_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n c += 1\n self.attributes_box.Add(self.ref_density_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # LINE SEP\n r += 1\n c = 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # SUSCEPTIBILITY\n r += 1\n c = 0\n self.attributes_box.Add(self.susceptibility_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c += 1\n self.attributes_box.Add(self.susceptibility_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n\n # ANGLE A\n r += 1\n c = 0\n self.attributes_box.Add(self.angle_a_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c += 1\n self.attributes_box.Add(self.angle_a_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n\n # ANGLE B\n r += 1\n c = 0\n self.attributes_box.Add(self.angle_b_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c += 1\n self.attributes_box.Add(self.angle_b_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n\n # Angle C\n r += 1\n c = 0\n self.attributes_box.Add(self.angle_c_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c += 1\n self.attributes_box.Add(self.angle_c_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n\n # Earth Field\n r += 1\n c = 0\n self.attributes_box.Add(self.earth_field_text, pos=(r, c), span=(1, 1),flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c += 1\n self.attributes_box.Add(self.earth_field_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n\n # LINE SEP\n r += 1\n c = 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # XY NODES\n r += 1\n c = 0\n self.attributes_box.Add(self.node_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n\n # LINE SEP\n r += 1\n c = 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # X NODE\n r += 1\n c = 0\n self.attributes_box.Add(self.x_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c = + 1\n self.attributes_box.Add(self.x_input, pos=(r, c), span=(1, 5), flag=wx.EXPAND | wx.ALL,\n border=1)\n\n # Y NODE\n r += 1\n c = 0\n self.attributes_box.Add(self.y_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL,\n border=1)\n c = + 1\n self.attributes_box.Add(self.y_input, pos=(r, c), span=(1, 5), flag=wx.EXPAND | wx.ALL,\n border=1)\n\n # LINE SEP\n r += 1\n c = 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # SET BUTTON\n r += 1\n c = 0\n self.attributes_box.Add(self.node_set_button, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | \n wx.EXPAND | wx.ALL, border=1)\n\n # LINE SEP\n r += 1\n c = 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND |\n wx.ALL, border=1)\n\n # LABEL TEXT SIZE\n r += 1\n c = 0\n self.attributes_box.Add(self.text_size_text, pos=(r, c), span=(1, 5), flag=wx.ALIGN_CENTER |\n wx.EXPAND | wx.ALL, border=1)\n r += 1\n c = 0\n self.attributes_box.Add(self.text_size_input, pos=(r, c), span=(1, 5), flag=wx.ALIGN_CENTER |\n wx.EXPAND | wx.ALL, border=1)\n\n # LINE SEP\n r += 1\n c = 0\n line = wx.StaticLine(self.fold_panel_item1)\n self.attributes_box.Add(line, pos=(r, c), span=(1, 5), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.ALL, border=1)\n\n # DENSITY SCALE BAR\n # self.attr_box.Add(self.cb1, 0, wx.ALL | wx.LEFT | wx.EXPAND, 5)\n # --------------------------------------------------------------------------------------------------------------\n\n # --------------------------------------------------------------------------------------------------------------\n # CREATE LAYER TREE BOX\n self.tree_box = wx.BoxSizer(wx.VERTICAL)\n self.tree_box.Add(self.tree, 1, wx.EXPAND|wx.ALL|wx.ALIGN_LEFT, border=20)\n # --------------------------------------------------------------------------------------------------------------\n\n # --------------------------------------------------------------------------------------------------------------\n # CREATE FAULT TREE BOX\n self.fault_tree_box = wx.BoxSizer(wx.VERTICAL)\n self.fault_tree_box.Add(self.fault_tree, 1, wx.EXPAND | wx.ALL | wx.ALIGN_LEFT, border=20)\n # --------------------------------------------------------------------------------------------------------------\n\n # --------------------------------------------------------------------------------------------------------------\n # CREATE A BOX SIZER FOR THE MAIN MODELLING FRAME\n self.canvas_box = wx.BoxSizer(wx.HORIZONTAL)\n # ADD THE MAIN MODELLING FRAME TO IT'S A BOX SIZER\n self.canvas_box.Add(self.canvas, 1, wx.EXPAND, border=20)\n # --------------------------------------------------------------------------------------------------------------\n\n # --------------------------------------------------------------------------------------------------------------\n # PLACE BOX SIZERS IN CORRECT PANELS\n # SETUP LEFT PANEL \n self.fold_panel_item3.SetSizerAndFit(self.fault_tree_box)\n self.fold_panel_item2.SetSizerAndFit(self.tree_box)\n self.fold_panel_item1.SetSizerAndFit(self.attributes_box)\n self.left_panel.SetSizerAndFit(self.left_panel_sizer)\n \n # COLLAPSE/EXPAND ENSURES SIZERS INITALISE CORRECTLY\n self.controls_fold_panel.Expand(self.fold_panel_item1)\n self.controls_fold_panel.Expand(self.fold_panel_item2)\n self.controls_fold_panel.Expand(self.fold_panel_item3)\n\n # SETUP RIGHT PANEL\n self.rightPanel.SetSizerAndFit(self.canvas_box)\n self.rightPanel.SetSize(self.GetSize())\n # --------------------------------------------------------------------------------------------------------------", "title": "" }, { "docid": "6bbe69eaebd1844c74f3082ef3d36079", "score": "0.5190533", "text": "def create_widgets(self):\n\t\ttk.Label(self._master, text=\"How many?\").grid(row=0)\n\t\tself.to_set = tk.Label(self._master, text=self.result_text)\n\t\tself.to_set.grid(row=7, columnspan=2)\n\t\tself.rolled = tk.Label(self._master, text=\"\")\n\t\tself.rolled.grid(row=8, columnspan=2)\n\t\tself.history = tk.Label(self._master, text=\"\")\n\t\tself.history.grid(row=9, columnspan=2)\n\t\tself.text1 = tk.Entry(self._master)\n\t\tself.text1.grid(row=0, column=1)\n\t\tself.button = tk.Button(text='d4', command=self.result_d4, width=15).grid(row=2, column=0)\n\t\tself.button = tk.Button(text='d6', command=self.result_d6, width=15).grid(row=2, column=1)\n\t\tself.button = tk.Button(text='d8', command=self.result_d8, width=15).grid(row=3, column=0)\n\t\tself.button = tk.Button(text='d10', command=self.result_d10, width=15).grid(row=3, column=1)\n\t\tself.button = tk.Button(text='d12', command=self.result_d12, width=15).grid(row=4, column=0)\n\t\tself.button = tk.Button(text='d20', command=self.result_d20, width=15).grid(row=4, column=1)", "title": "" }, { "docid": "16ecee589b1b8c1c715c729b71013407", "score": "0.51864564", "text": "def create_widgets(self):\n self.lbl_x_step = Label(self.master,\n text='x_step:',\n anchor='e')\n self.lbl_x_step.grid(row=0, column=0, padx=5, pady=5, sticky=E)\n self.entry_x_step = Entry(self.master)\n self.entry_x_step.grid(row=0, column=1, padx=5, pady=5, sticky=W)\n self.entry_x_step.insert(END, self.pp_settings['x_step'])\n\n self.lbl_x_max = Label(self.master,\n text='x_max:',\n anchor=E)\n self.lbl_x_max.grid(row=1, column=0, padx=5, pady=5, sticky=E)\n self.entry_x_max = Entry(self.master)\n self.entry_x_max.grid(row=1, column=1, padx=5, pady=5, sticky=W)\n self.entry_x_max.insert(END, self.pp_settings['x_max'])\n\n self.lbl_dir_x_right = Label(self.master,\n text='dir_x_right:',\n anchor=E)\n self.lbl_dir_x_right.grid(row=2, column=0, padx=5, pady=5, sticky=E)\n self.entry_dir_x_right = Entry(self.master)\n self.entry_dir_x_right.grid(row=2, column=1, padx=5, pady=5, sticky=W)\n self.entry_dir_x_right.insert(END, self.pp_settings['dir_x_right'])\n\n self.lbl_dir_x_left = Label(self.master,\n text='dir_x_left:',\n anchor=E)\n self.lbl_dir_x_left.grid(row=3, column=0, padx=5, pady=5, sticky=E)\n self.entry_dir_x_left = Entry(self.master)\n self.entry_dir_x_left.grid(row=3, column=1, padx=5, pady=5, sticky=W)\n self.entry_dir_x_left.insert(END, self.pp_settings['dir_x_left'])\n\n self.lbl_y_step = Label(self.master,\n text='y_step:',\n anchor=E)\n self.lbl_y_step.grid(row=4, column=0, padx=5, pady=5, sticky=E)\n self.entry_y_step = Entry(self.master)\n self.entry_y_step.grid(row=4, column=1, padx=5, pady=5, sticky=W)\n self.entry_y_step.insert(END, self.pp_settings['y_step'])\n\n self.lbl_dir_y_right = Label(self.master,\n text='dir_y_right:',\n anchor=E)\n self.lbl_dir_y_right.grid(row=5, column=0, padx=5, pady=5, sticky=E)\n self.entry_dir_y_right = Entry(self.master)\n self.entry_dir_y_right.grid(row=5, column=1, padx=5, pady=5, sticky=W)\n self.entry_dir_y_right.insert(END, self.pp_settings['dir_y_right'])\n\n self.lbl_dir_y_left = Label(self.master,\n text='dir_y_left:',\n anchor=E)\n self.lbl_dir_y_left.grid(row=6, column=0, padx=5, pady=5, sticky=E)\n self.entry_dir_y_left = Entry(self.master)\n self.entry_dir_y_left.grid(row=6, column=1, padx=5, pady=5, sticky=W)\n self.entry_dir_y_left.insert(END, self.pp_settings['dir_y_left'])\n\n self.lbl_step_pen = Label(self.master,\n text='step_pen:',\n anchor=E)\n self.lbl_step_pen.grid(row=7, column=0, padx=5, pady=5, sticky=E)\n self.entry_step_pen = Entry(self.master)\n self.entry_step_pen.grid(row=7, column=1, padx=5, pady=5, sticky=W)\n self.entry_step_pen.insert(END, self.pp_settings['step_pen'])\n\n self.btn_save = Button(self.master,\n text='Save',\n width=15,\n height=2,\n command=self.on_btn_save_click)\n self.btn_save.grid(row=8, column=0, padx=10, pady=10, sticky=E)\n\n self.btn_cancel = Button(self.master,\n text='Cancel',\n width=15,\n height=2,\n command=self.on_btn_cancel_click)\n self.btn_cancel.grid(row=8, column=1, padx=10, pady=10, sticky=E)", "title": "" }, { "docid": "b6292aba5543854b9a7c5cd3363aeb02", "score": "0.5170169", "text": "def _apply_widget_settings(self):\n self.viewport_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n self.viewport_label.setAlignment(Qt.AlignCenter)", "title": "" }, { "docid": "982ecee1c7e4f85bbd461cccc0c7ae62", "score": "0.5169469", "text": "def size(self):\n return (self.width)", "title": "" }, { "docid": "546adcd7ed0e922b77293d01f815b047", "score": "0.5168031", "text": "def DoSetSize(self, x, y, width, height, sizeFlags=wx.SIZE_AUTO):\r\n \r\n parent_size = self.GetParent().GetClientSize()\r\n if x + width > parent_size.x:\r\n width = max(0, parent_size.x - x)\r\n if y + height > parent_size.y:\r\n height = max(0, parent_size.y - y)\r\n\r\n wx.PyControl.DoSetSize(self, x, y, width, height, sizeFlags)", "title": "" }, { "docid": "e27ab42eea146b7eae4e6c1ee6c379c5", "score": "0.51607263", "text": "def get_width(self):\n return 200", "title": "" }, { "docid": "b5dae19a2b91ab8cc24ce879650be2b3", "score": "0.5155258", "text": "def dynamic_size(self):\n return self._implementation._dynamic_size", "title": "" }, { "docid": "dc984832910661fbf39bccd81bb80680", "score": "0.5153004", "text": "def update_geometry(self):\n self.update()\n s_width = self.winfo_screenwidth()\n c_width = self.winfo_reqwidth()\n self.geometry(\"+{0}+0\".format(int(s_width / 2) - int(c_width / 2)))\n self.update()\n s_height = self.winfo_screenheight()\n for key in self.displays:\n top = self.displays[key].main_canvas.winfo_rooty()\n self.displays[key].main_canvas.config(\n height=s_height - 2 * top - 6)\n # Useful dimension info below.\n # print(\"screen_width:\", self.winfo_screenwidth())\n # print(\"screen_height:\", self.winfo_screenheight())\n # print(\"main window size:\", self.winfo_geometry())\n # print(\"canvas size:\", self.main_canvas.winfo_geometry())\n # print(\"top decoration:\", self.main_canvas.winfo_rooty())\n # print(\"left edge:\", self.winfo_rootx())\n # print(\"top decoration canvas:\", self.main_canvas.winfo_rooty())\n # print(\"left edge canvas:\", self.main_canvas.winfo_rootx())\n # print(\"main window required width:\", self.winfo_reqwidth())\n # print(\"main window required height:\", self.winfo_reqheight())", "title": "" }, { "docid": "ace767e3c911f95e3cf1c40aa2df3e22", "score": "0.5150996", "text": "def fit_content(self):\n self.rect = self.x, self.y, self.label.content_width, self.label.content_height", "title": "" }, { "docid": "c84a6b6757bb96461fdb462566603519", "score": "0.5148539", "text": "def GetContentsMargins(self):\r\n label = self._label\r\n height = label.GetCharHeight()\r\n if not label.IsShown():\r\n height /= 2\r\n return (height, 1, 1, 1)", "title": "" }, { "docid": "6584d93933b7846a08866f7962e54f2d", "score": "0.5145659", "text": "def size_hint(self):\n # Since this may be called very often by user code, especially \n # if the toolkit widget is using it as a replacement for its\n # internal size hint computation, we must cache the value or\n # it will be too expensive to use under heavy resize loads.\n # This returns the value from the cached property which is \n # updated whenver the size_hint_updated event is fired.\n return self._size_hint", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "1ede6dcde5eb2bf4937bd25a863612be", "score": "0.5142375", "text": "def size(self):\n return self.width", "title": "" }, { "docid": "e4d8f2f021cc23b39ac80806c60d08b4", "score": "0.5140556", "text": "def _get_size(self):\n return self.w, self.h", "title": "" }, { "docid": "f3caff7e3c47a2d14ff22a58079a295b", "score": "0.51402056", "text": "def maximumSize(self):\r\n parent = self.parentWidget()\r\n if parent is not None:\r\n size = parent.explicitMaximumSize()\r\n if size.isValid():\r\n return size\r\n return super(QWindowLayout, self).maximumSize()", "title": "" }, { "docid": "7b575d7924deeeace3ef4067a9227452", "score": "0.5135271", "text": "def get_available_space(self):\n total = 0\n for server in self._available_servers():\n total += request_space_available(server)\n return total // 2", "title": "" } ]
0c6921aceb94749aa6b6642aaa0ea885
x = load_data() y = load_data(filename='train2014_targets.npy')
[ { "docid": "1072289ac577dec90c9674646ada4184", "score": "0.7426716", "text": "def load_data(path=data_path,filename='train2014_inputs.npy'):\n return np.load(open(path+filename,'r'))", "title": "" } ]
[ { "docid": "cfa423a7262af569aa74eecca0e47e49", "score": "0.7542153", "text": "def load_train_data():\n X = np.load('../input/X_train.npy')\n y = np.load('../input/y_train.npy')\n\n X = X.astype(np.float32)\n X /= 255\n\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n\n return X, y", "title": "" }, { "docid": "e7011612d546171d9f70565170f70c9f", "score": "0.72409594", "text": "def load_data(data_dir):\n\n ### YOUR CODE HERE\n y_train = []\n x_train = []\n for batch in range(1, 2):\n with open(data_dir + '/data_batch_' + str(batch), 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n y_train.extend(dict[b'labels'])\n for x in dict[b'data']:\n x_train.append(x)\n\n with open(data_dir + '/test_batch', 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n y_test = np.array(dict[b'labels'])\n y_test = y_test.astype('int32')\n x_test = dict[b'data']\n x_test = x_test.astype('float32')\n\n x_train = np.array(x_train)\n x_train = x_train.astype('float32')\n y_train = np.array(y_train)\n y_train = y_train.astype('int32')\n ### END CODE HERE\n\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "e53057b90f7482a1e73dd066a42345c9", "score": "0.7179642", "text": "def load_data(path_X,path_Y,n_data):\n with np.load(path_X, allow_pickle=True) as f:\n X = f['X']\n print (\"input X shape\",X.shape)\n X = X#.transpose(1,0,2,3)\n st1_x = 0 \n ed1_x = 54000 \n st2_x = 54000 \n ed2_x = 60000 \n x_train = np.concatenate([X[st1_x:ed1_x],X[60000:114000]],axis=0)\n x_test = np.concatenate([X[st2_x:ed2_x],X[114000:120000]],axis=0) \n\n with np.load(path_Y, allow_pickle=True) as f:\n Y,l_p = f['label'],f['Y']\n Y = Y#.transpose(1,0)\n l_p = l_p.transpose(1,0)\n st1_x = 0 \n ed1_x = 54000 \n st2_x = 54000 \n ed2_x = 60000 \n y_train = np.concatenate([Y[st1_x:ed1_x],Y[60000:114000]],axis=0)\n y_test = np.concatenate([Y[st2_x:ed2_x],Y[114000:120000]],axis=0)\n\n l_p_train = np.concatenate([l_p[st1_x:ed1_x],l_p[60000:114000]],axis=0)\n l_p_test = np.concatenate([l_p[st2_x:ed2_x],l_p[114000:120000]],axis=0) \n\n\n return (x_train, y_train,l_p_train), (x_test, y_test,l_p_test)", "title": "" }, { "docid": "bdc15dc081969442758a2e077a016ea1", "score": "0.7120633", "text": "def load_data_and_labels():\n # Load data from files\n pre_x_file = list(open(\"data/train_1.txt\", \"r\", encoding='utf8').readlines())\n pre_x_file = [k.strip() for k in pre_x_file]\n x_file = np.array(pre_x_file)\n x_file = sequence.pad_sequences(x_file, maxlen=100, dtype='float')\n\n # Generate labels\n labels = list(open(\"./data/label.txt\", \"r\", encoding='utf8').readlines())\n labels = [s.strip() for s in labels]\n labels = [s.split(',') for s in labels]\n labels = [to_categorical(s) for s in labels]\n\n return [x_file, labels]", "title": "" }, { "docid": "b3473b6cc16ec181503b299317401982", "score": "0.7108675", "text": "def load_dataset(data_dir=''):\n x_train = np.load(\"data/x_train.npy\")\n y_train = np.load(\"data/y_train.npy\")\n x_test = np.load(\"data/x_test.npy\")\n y_test = np.load(\"data/y_test.npy\")\n\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "b9a5486e47cf9ae52d216796769616d8", "score": "0.709463", "text": "def load_data():\n Y = np.genfromtxt(training_data, delimiter=',', dtype=None, skip_header=1, usecols=[1], converters={1: lambda x: 0 if b'b'==x else 1}) \n data = np.genfromtxt(training_data, delimiter=',', skip_header=1)\n X = data[:, 2:] \n return X, Y", "title": "" }, { "docid": "c5723b15a17e8e5eed64b2bdd3e54411", "score": "0.708089", "text": "def loadXY():\n X = np.load(\"X.npy\")\n Y = np.load(\"Y.npy\")\n\n return X, Y", "title": "" }, { "docid": "526d82f2f7a41393376614a37f92827c", "score": "0.7047407", "text": "def load_data():\n train_file= \"./data/train.p\"\n test_file= \"./data/test.p\"\n \n with open(train_file, \"rb\") as f:\n train_dict= pickle.load(f)\n \n with open(test_file, \"rb\") as f:\n test_dict= pickle.load(f)\n \n X_train= train_dict[\"features\"]\n Y_train= np.expand_dims(train_dict[\"labels\"], axis= -1)\n X_test= test_dict[\"features\"]\n Y_test= np.expand_dims(test_dict[\"labels\"], axis= -1)\n return X_train, Y_train, X_test, Y_test", "title": "" }, { "docid": "e207ffdf1b957081466b8e41509517b0", "score": "0.7028176", "text": "def loadDataset(path):\n X_train = np.load(os.path.join(path, \"X_train.npy\"))\n Y_train = np.load(os.path.join(path, \"Y_train.npy\")).reshape(1, -1)\n X_test = np.load(os.path.join(path, \"X_test.npy\"))\n Y_test = np.load(os.path.join(path, \"Y_test.npy\")).reshape(1, -1)\n\n return X_train, Y_train, X_test, Y_test", "title": "" }, { "docid": "d1014ad20dd169088b74c2f5ad276ce0", "score": "0.6968713", "text": "def load_data(X_fname, y_fname):\n X_input_data = open(os.path.join(Config.data.base_path, Config.data.processed_path, X_fname), 'r')\n y_input_data = open(os.path.join(Config.data.base_path, Config.data.processed_path, y_fname), 'r')\n\n X_data, y_data = [], []\n for X_line, y_line in zip(X_input_data.readlines(), y_input_data.readlines()):\n X_ids = [int(id_) for id_ in X_line.split()]\n y_id = int(y_line)\n\n if len(X_ids) == 0 or y_id >= Config.data.num_classes:\n continue\n\n if len(X_ids) <= Config.data.max_seq_length:\n X_data.append(_pad_input(X_ids, Config.data.max_seq_length))\n\n y_one_hot = np.zeros(Config.data.num_classes)\n y_one_hot[int(y_line)] = 1\n y_data.append(y_one_hot)\n\n print(f\"load data from {X_fname}, {y_fname}...\")\n return np.array(X_data, dtype=np.int32), np.array(y_data, dtype=np.int32)", "title": "" }, { "docid": "8842771471609c94d1752c40e253bcc0", "score": "0.6934164", "text": "def load_train_data():\n X = np.load('/data/preprocessed/X_train.npy')\n y = np.load('/data/preprocessed/y_train.npy')\n metadata = np.load('/data/preprocessed/metadata_train.npy')\n\n X = X[:, :30*15, :, :]\n X = X.astype(np.float32)\n X /= 255\n\n seed = 12345\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n np.random.seed(seed)\n np.random.shuffle(metadata)\n\n return X, y, metadata", "title": "" }, { "docid": "b463830f8478380b00fdffb8cc4468d8", "score": "0.69187105", "text": "def load_dataset(train=True):\n\n typ = \"training\" if train else \"test\"\n filename_x = f\"./data_np/{typ}_data.npy\"\n filename_y = f\"./data_np/{typ}_labels.npy\"\n x_data = np.load(filename_x)\n y_data = np.load(filename_y)\n return x_data, y_data", "title": "" }, { "docid": "9b1bd30407baaf5af38cdac2ded282f8", "score": "0.682801", "text": "def load_dataset(path):\n data = load_files(path)\n dog_files = np.array(data['filenames'])\n dog_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return dog_files, dog_targets", "title": "" }, { "docid": "9100e11bd55fd9e6966372f76f5b635c", "score": "0.68241954", "text": "def load_data():\n X_train, X_test = load_signals('train'), load_signals('test')\n y_train, y_test = load_y('train'), load_y('test')\n\n return X_train, X_test, y_train, y_test", "title": "" }, { "docid": "9100e11bd55fd9e6966372f76f5b635c", "score": "0.68241954", "text": "def load_data():\n X_train, X_test = load_signals('train'), load_signals('test')\n y_train, y_test = load_y('train'), load_y('test')\n\n return X_train, X_test, y_train, y_test", "title": "" }, { "docid": "6ccffc176da7797814d349fe4c466a7a", "score": "0.681071", "text": "def load_data():\n train_images = idx2numpy.convert_from_file(\n 'data/train-images-idx3-ubyte').astype('float64')\n\n # We know that from the IDX file documentation the max value we will be getting\n # is 255, and we wha the values of our input to be [0, 1] so\n train_images = train_images/255\n\n # # Lets look at one of the images\n # import matplotlib.pyplot as plt\n # plt.imsave(\"test\", train_images[1, :, :])\n\n # Well that was fun\n\n # We also reshape the images so they match the first layer of the nn\n train_images = train_images.reshape(train_images.shape[0], 784, 1)\n\n # Lables\n # Along with the images we need the\n train_lables_ff = idx2numpy.convert_from_file(\n 'data/train-labels-idx1-ubyte')\n\n # These are also in the wrong format, we need them to be activations of nodes\n train_lables = numpy.zeros([train_lables_ff.shape[0], 10, 1])\n\n # This cannot be the most efficient solution by it works\n for i in range(0, train_lables_ff.shape[0]):\n train_lables[i, train_lables_ff[i]] = 1\n\n return(train_images, train_lables)", "title": "" }, { "docid": "5a548a44d07d90f1a3cbaf3bd4e9dec5", "score": "0.6806618", "text": "def load_training_data(base_dir):\n print(os.path.join(base_dir, 'train_X.npy'))\n train_X = np.load(os.path.join(base_dir, 'train_X.npy'))\n train_y = np.load(os.path.join(base_dir, 'train_y.npy'))\n return train_X, train_y", "title": "" }, { "docid": "cc8bb03d887f86696bce6feb45d7a1e7", "score": "0.6793414", "text": "def load_train(self):\n X_mel, y = self.mel_loader.load_train()\n X_ess, _ = self.ess_loader.load_train()\n X = np.array(list(zip(X_mel, X_ess)))\n return X, y", "title": "" }, { "docid": "53ac46f2974603860ca9f7161d71724b", "score": "0.67812866", "text": "def load_data():\n with open(os.path.join(config.path_data, '{}.data.pkl'.format(FN0)), 'rb') as fp:\n X, Y = pickle.load(fp)\n print('number of examples', len(X), len(Y))\n return X, Y", "title": "" }, { "docid": "cc2715cb0feb167e147bd8662bc4fb1d", "score": "0.67779744", "text": "def load_data():\n test_x = np.asarray(LoadImage(\"./MNIST_data/t10k-images.idx3-ubyte\"), dtype=\"float32\")\n test_y = np.asarray(LoadLabel(\"./MNIST_data/t10k-labels.idx1-ubyte\"), dtype=\"int64\")\n training_x = np.asarray(LoadImage(\"./MNIST_data/train-images.idx3-ubyte\"), dtype=\"float32\")\n training_y = np.asarray(LoadLabel(\"./MNIST_data/train-labels.idx1-ubyte\"), dtype=\"int64\")\n training_data = (training_x[:50000], training_y[:50000])\n validation_data = (training_x[50000:], training_y[50000:])\n test_data = (test_x, test_y)\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "5c752d39981966348481825979ff1971", "score": "0.6775103", "text": "def load_data():\n # Load and preprocess data\n x_vec, labels = load_data_and_labels()\n\n return [x_vec, labels]", "title": "" }, { "docid": "121dfeffbc6ea10f4203259273feadf6", "score": "0.6750879", "text": "def _load_training_data(base_dir):\n x_train = np.load(os.path.join(base_dir, 'train_data.npy'))\n y_train = np.load(os.path.join(base_dir, 'train_labels.npy'))\n return x_train, y_train", "title": "" }, { "docid": "073e20eb4b1a90f1e145a340bcc49501", "score": "0.67454094", "text": "def load_data(n, data_path, exp_name):\n ftrain = data_path / f\"{exp_name}_train_{n}.npy\"\n ftest = data_path / f\"{exp_name}_test.npy\"\n\n dftrain = np.load(ftrain)\n dftest = np.load(ftest)\n\n X_train = dftrain[:, :-1]\n y_train = dftrain[:, -1]\n\n X_test = dftest[:, :-1]\n y_test = dftest[:, -1]\n\n return X_train, y_train, X_test, y_test", "title": "" }, { "docid": "09e3842c8afe7e854ab72dda3512b4f1", "score": "0.67353976", "text": "def readnpy(dataset):\n\n np_array = dataset\n imgs = []\n label = []\n for index in range(len(np_array)):\n imgs.append(np_array[index][0])\n label.append(np_array[index][1])\n dataset_data = torch.from_numpy(np.array(imgs))\n dataset_target = torch.from_numpy(np.array(label))\n\n return dataset_data, dataset_target", "title": "" }, { "docid": "bb60c4bde2daf9dda010d888dc1e3087", "score": "0.671674", "text": "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "title": "" }, { "docid": "2629214022397f9f65a98f3eb75406df", "score": "0.67155796", "text": "def load_toy_dataset(filepath, feature_names, label_name=\"y\"):\n df = pd.read_csv(filepath)\n df_train = df[df[\"split\"] == \"train\"]\n X_train, y_train = df_train[feature_names].values, df_train[label_name].values\n df_test = df[df[\"split\"] == \"test\"]\n X_test, y_test = df_test[feature_names].values, df_test[label_name].values\n return X_train, y_train, X_test, y_test", "title": "" }, { "docid": "657c1734c2c1a74aed925cd0b8bdbecc", "score": "0.6692401", "text": "def load_data(trainfile, testfile):\n train = pd.read_csv(trainfile, header=None)\n test = pd.read_csv(testfile, header=None)\n train_label = train.iloc[:, 0]\n test_label = test.iloc[:, 0]\n train = train.drop(labels=train.columns[0], axis=1)\n test = test.drop(labels=test.columns[0], axis=1)\n print('data is loaded')\n return np.array(train), np.array(train_label), np.array(test), np.array(test_label)", "title": "" }, { "docid": "ec31eb5ce9a8ee05f664cb1a14e974f1", "score": "0.6691683", "text": "def load_data():\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n train_images = np.logical_or((y_train == 0), (y_train == 1))\n test_images = np.logical_or((y_test == 0), (y_test == 1))\n x_train, y_train = x_train[train_images], y_train[train_images]\n x_test, y_test = x_test[test_images], y_test[test_images]\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "df2954ef03c2df80a5f387ec4d7c49cc", "score": "0.66873795", "text": "def get_data(x_train_path, y_train_path, x_test_path, verbosity=1):\n if verbosity >= 1:\n start = time.time()\n\n # load data\n x_train = np.load(x_train_path)['arr_0']\n y_train = np.load(y_train_path)['arr_0']\n x_test = np.load(x_test_path)['arr_0']\n\n if verbosity >= 1:\n print('Spending {} seconds to load data.'.format(time.time() - start))\n\n if verbosity >= 2:\n print('x_train:', x_train.shape)\n print(x_train)\n print('y_train:', y_train.shape)\n print(y_train)\n print('x_test:', x_test.shape)\n print(x_test)\n\n return x_train, y_train, x_test", "title": "" }, { "docid": "4ef547d4ea5750a602cecd45f2bb1dc7", "score": "0.66784835", "text": "def load_data_and_labels():\n # Load train data from files\n samples, labels = [],[]\n with open('../../Data/germeval.ensemble.train.txt','r', encoding='utf-8') as fi:\n for line in fi:\n data = line.strip().split('\\t')\n # get sample\n samples.append(data[0])\n # get label\n if data[1] == 'OFFENSE':\n labels.append([0,1]) # label of positive sample\n elif data[1] == 'OTHER':\n labels.append([1,0]) # label of negative sample\n else:\n raise ValueError('Unknown label!')\n\n # Adding Espresso data\n samples, labels, idx_espresso = add_espresso_data(samples, labels)\n\n # Clean and split samples\n Xtrain = [clean_str(sample) for sample in samples]\n Xtrain = [s.split(\" \") for s in Xtrain] # each sample as list of words/strings\n Ytrain = np.array(labels)\n len_train = len(Xtrain)\n # We need to remember the len of train, we will put train + test together to build the vocab. Then we will recognise the first len_train items as coming from the train set\n\n # Load test data,\n Xtest, Ytest = [], []\n with open('../../Data/germeval.ensemble.test.txt','r', encoding='utf-8') as fi:\n for line in fi:\n data = line.strip().split('\\t')\n # get sample\n Xtest.append(data[0])\n # get label\n if data[1] == 'OFFENSE':\n Ytest.append([0,1]) # label of positive sample\n elif data[1] == 'OTHER':\n Ytest.append([1,0]) # label of negative sample\n else:\n raise ValueError('Unknown label!')\n\n Xtest = [clean_str(sample) for sample in Xtest]\n Xtest = [s.split(\" \") for s in Xtest] # each sample as list of words/strings\n Ytest = np.array(Ytest)\n\n return [Xtrain, Ytrain, Xtest, Ytest, len_train, idx_espresso]", "title": "" }, { "docid": "37ea1f7a58b65186291cf5aab2a04904", "score": "0.66630167", "text": "def get_training_data(path: Path):\n X = np.load(path / 'train_X.npy')\n y = np.load(path / 'train_y.npy')\n return X, y", "title": "" }, { "docid": "756f6d15ff9122b656c0328ff477e03d", "score": "0.66385907", "text": "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n y = [] # targets we are predicting for each input\n\n for train_file in os.listdir(self.train_dir):\n\n targ_vec = numpy.zeros(len(self.targ2int))\n enc = train_file.split('.')[0]\n\n if enc not in self.enc2targs:\n continue\n\n no_labels_for_this_file = True\n for targ in self.enc2targs[enc]:\n if targ in self.targ2int:\n targ_vec[self.targ2int[targ]] = 1\n no_labels_for_this_file = False\n\n if no_labels_for_this_file:\n continue # all rare codes\n\n y.append(targ_vec)\n\n tokens = read_tokens(os.path.join(self.train_dir, train_file))\n x.append(' '.join(set(tokens)))\n\n self.tokenizer.fit_on_texts(x)\n pickle_file = open('Model/tokenizer.p', 'wb')\n pickle.dump(self.tokenizer, pickle_file)\n print('input vocabulary size:', len(self.tokenizer.word_index))\n\n x = self.tokenizer.texts_to_sequences(x)\n max_seq_len = max(len(seq) for seq in x)\n x = pad_sequences(x, maxlen=max_seq_len)\n\n return x, numpy.array(y)", "title": "" }, { "docid": "90f5622c70156c743bc997b5075a0757", "score": "0.6616679", "text": "def load_data_and_labels(positive_data_file, negative_data_file):\n # Load data from files and one hot encode\n x_text = pd.read_csv(\"./data/keras_reuters_x_train.csv\")\n x_text = x_text[\"text\"].tolist()\n y = pd.read_csv(\"./data/keras_reuters_y_train.csv\")\n y = y[\"labels\"].tolist()\n y = np.array(y).reshape(-1, 1)\n encoder = OneHotEncoder(n_values=max(y)+1)\n y = encoder.fit_transform(y).toarray()\n return [x_text, y]", "title": "" }, { "docid": "4ee4450eab1324042faf43a807b11740", "score": "0.6611732", "text": "def _load_testing_data(base_dir):\n x_test = np.load(os.path.join(base_dir, 'eval_data.npy'))\n y_test = np.load(os.path.join(base_dir, 'eval_labels.npy'))\n return x_test, y_test", "title": "" }, { "docid": "7b547112a5e27735eb439cb664f0ce55", "score": "0.65969276", "text": "def load_data():\n\n if 'data' not in os.listdir('.'):\n os.mkdir('data') \n \n if 'id_to_word.pkl' not in os.listdir('data'):\n print('Loading data...')\n\n # save np.load\n np_load_old = np.load\n\n # modify the default parameters of np.load\n np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)\n\n (x_train, y_train), (x_val, y_val) = imdb.load_data(num_words=max_features, index_from=3)\n\n # restore np.load for future normal usage\n np.load = np_load_old\n\n word_to_id = imdb.get_word_index()\n word_to_id ={k:(v+3) for k,v in word_to_id.items()}\n word_to_id[\"<PAD>\"] = 0\n word_to_id[\"<START>\"] = 1\n word_to_id[\"<UNK>\"] = 2\n id_to_word = {value:key for key,value in word_to_id.items()}\n\n print(len(x_train), 'train sequences')\n print(len(x_val), 'test sequences')\n\n print('Pad sequences (samples x time)')\n x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n x_val = sequence.pad_sequences(x_val, maxlen=maxlen)\n y_train = np.eye(2)[y_train]\n y_val = np.eye(2)[y_val] \n\n np.save('./data/x_train.npy', x_train)\n np.save('./data/y_train.npy', y_train)\n np.save('./data/x_val.npy', x_val)\n np.save('./data/y_val.npy', y_val)\n with open('data/id_to_word.pkl','wb') as f:\n pickle.dump(id_to_word, f) \n\n else:\n x_train, y_train, x_val, y_val = np.load('data/x_train.npy'),np.load('data/y_train.npy'),np.load('data/x_val.npy'),np.load('data/y_val.npy')\n with open('data/id_to_word.pkl','rb') as f:\n id_to_word = pickle.load(f)\n\n return x_train, y_train, x_val, y_val, id_to_word", "title": "" }, { "docid": "86f5396adf3cd8dbe1e734a9b30f9ff1", "score": "0.659211", "text": "def load_data_files(label, data_files):\n\n data_x = np.empty((0, NUM_FEATURES))\n data_y = np.empty((0))\n\n for filename in data_files:\n try:\n # data = np.loadtxt(BytesIO(zipped_dataset.read(filename)))\n data = np.loadtxt(filename)\n print('... file {0}'.format(filename))\n x, y = process_dataset_file(data, label)\n data_x = np.vstack((data_x, x))\n data_y = np.concatenate([data_y, y])\n except KeyError:\n print('ERROR: Did not find {0} in zip file'.format(filename))\n\n return data_x, data_y", "title": "" }, { "docid": "30b3075878e454d6ea90645adc7c8f77", "score": "0.65882444", "text": "def load_dataset(indir: Path):\n X_train = indir / 'Xtrain.npz/'\n y_train = indir / 'y_train.npy/'\n Xtrain = sparse.load_npz(str(X_train))\n y_train = np.load(str(y_train))\n\n return Xtrain, y_train", "title": "" }, { "docid": "8da99c6adebc7ec2abed1387c38f40de", "score": "0.6580916", "text": "def load_target_data():\n\n target_data = []\n with open('/Users/samimac2/Desktop/PythonProject/testResults.csv') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n target_data.append([float(row[0])])\n target_data = np.array(target_data)\n\n return target_data", "title": "" }, { "docid": "01d7b0d912f5a8c24753b40805d71af2", "score": "0.6573299", "text": "def load_data(train_files, test_files, val_files, commands, data_dir):\n\n train_data = []\n train_label = []\n val_data = []\n val_label = []\n test_data = []\n test_label = []\n\n for fp in tqdm(train_files):\n\n data, label = create_example(fp, commands, data_dir,trainable=True)\n train_data.append(data)\n train_label.append(label)\n\n train_data = np.array(train_data)\n train_label = np.array(train_label)\n print(f\"Train data shape: {train_data.shape}, train label shape: {train_label.shape}\")\n\n for fp in tqdm(val_files):\n\n data, label = create_example(fp, commands, data_dir,trainable=False)\n val_data.append(data)\n val_label.append(label)\n\n val_data = np.array(val_data)\n val_label = np.array(val_label)\n print(f\"Validation data shape: {val_data.shape}, validation label shape: {val_label.shape}\")\n\n for fp in tqdm(test_files):\n\n data, label = create_example(fp, commands, data_dir,trainable=False)\n test_data.append(data)\n test_label.append(label)\n\n test_data = np.array(test_data)\n test_label = np.array(test_label)\n\n print(f\"Test data shape: {test_data.shape}, test label shape: {test_label.shape}\")\n return train_data, train_label, test_data, test_label, val_data, val_label", "title": "" }, { "docid": "50d1377a187c0778f270397dda16014a", "score": "0.6566267", "text": "def load_data():\n # Load and preprocess data\n Xtrain, Ytrain, Xtest, Ytest, len_train, idx_espresso = load_data_and_labels()\n # sentences, labels, idx_espresso = load_data_and_labels()\n # Vocab needs to be build on the basis of the whole dataset, so we put train and test together! TRAIN, then TEST\n X = Xtrain + Xtest # X is list while Y is np.array\n Y = np.concatenate((Ytrain, Ytest), axis=0)\n\n sentences_padded = pad_sentences(X)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n X, Y = build_input_data(sentences_padded, Y, vocabulary)\n return [X, Y, vocabulary, vocabulary_inv, len_train, idx_espresso]", "title": "" }, { "docid": "541d0f8373334d4b48f085d45e60582f", "score": "0.65646", "text": "def npy_loader(dataset, num_classes):\n\n data = np.load('./data/{}/train_images.npy'.format(dataset))\n labels = np.load('./data/{}/train_labels.npy'.format(dataset))\n labels = to_categorical(labels, num_classes=num_classes)\n\n return data, labels", "title": "" }, { "docid": "cde6be393e2eb9a5dc1811cbdb93724d", "score": "0.6561799", "text": "def load(self):\n self.dataset = read_dataset(self.data_config[\"training_path\"])\n self.dataset = clean_dataset(self.dataset, **self.data_config)\n drop_columns = self.data_config.get(\"drop_columns\", [])\n self.feature_columns = self.data_config.get(\n \"feature_columns\",\n list(self.dataset.drop(columns=[self.target] + drop_columns).columns)\n )\n self.X = self.dataset[self.feature_columns]\n self.y = self.dataset[self.target]", "title": "" }, { "docid": "f69272993f6dbc70a10a957485a94ccf", "score": "0.6556588", "text": "def load_fer_2013(filename, expect_labels=True):\n \n X_path = filename[:-4] + '.X.npy'\n Y_path = filename[:-4] + '.Y.npy'\n if os.path.exists(X_path):\n X = np.load(X_path)\n if expect_labels:\n y = np.load(Y_path)\n else:\n y = None\n return X, y\n \n csv_file = open(filename, 'r');\n reader = csv.reader(csv_file);\n \n row = reader.next()\n \n y_list = [];\n X_list = [];\n X_list_outer= [];\n for row in reader:\n \n if expect_labels:\n \n y_str=row[0];\n X_row_str = row[1];\n #img = cv2.imread('X_row_str');\n\n\t if y_str==\"neutral\":\n \t y_str=1\n\t elif y_str==\"joy\":\n \t\t y_str=2\n\t elif y_str==\"disgust\":\n \t y_str=3\n\t elif y_str==\"surprise\":\n \t\t y_str=4\n\t elif y_str==\"fear\":\n \t y_str=5\n\t elif y_str==\"sadness\":\n \t\t y_str=6\n\t elif y_str==\"anger\":\n \ty_str=7\n y = int(y_str)\n y_list.append(y)\n else:\n X_row_str ,= row\n X_row_strs = X_row_str.split('.')\n\t#X_row_strs[0].append('-lo.jpg')\n name = ''\n name += '../scripts/test/'\n name += X_row_strs[0]\n name += '-lo.jpgminimized.jpg'\n\tprint \"The files that are being processed are\" + name\n print \"test test finito\"\n print name\n \n\tX_name = cv2.imread(name);\n\t#print X_name[0].size;\n #X_name1 = X_name.split(',')\n\tX_list.append(np.transpose(X_name,(2,0,1)))\n# X_list.append(X_name)\n\n#\tX_list_outer.append(X_list)\n # print X_list_outer[0].shape; \n\t#print X_list[0].size.Y;\n \n\n # print \"X_list_outer dim \" + X_list_outer.shape\n X_list_new = []\n X = np.asarray(X_list).astype('float')\n for x in range(0, len(X)):\n X_list_new.append(X[x])\n \n\n shape = X_list_new[0]\n X_list_new[0] = len(X)\n X_list_new[1] = shape\n\n \t#x_list_outer = X_list[x][0]\n #X_list_outer.append(X[x])\n \n \n #X_list_new.append(X)\n #X_list_new[0]= X\n print X_list[0]\n print 'next'\n print X_list_new[0];\n print len(X)\n print len(X_list)\n print \"X_list_new dim \";\n print X_list_new[1];\n #X=X.reshape(160, 3, 32, 32).transpose(0,3,2,1).astype(\"float32\");\n #X = X_list\n if expect_labels:\n y = np.asarray(y_list)\n else:\n y = None\n\n np.save(X_path, X)\n if y is not None:\n np.save(Y_path, y)\n \n return X, y;", "title": "" }, { "docid": "eb769cb4c87bb2bf4e615a6e10a2fb67", "score": "0.6552408", "text": "def get_data(data_path):\n # load data\n raw_data = np.genfromtxt(data_path)\n data = np.ones((raw_data.shape[0], raw_data.shape[1] + 1)) # add x_0 = 1\n data[:, :-2] = raw_data[:, :-1]\n data[:, -1] = raw_data[:, -1] == 1\n\n # split x/y & split train/test\n train = data[:TRAIN_NUM]\n test = data[TRAIN_NUM:]\n x_train = train[:, :-1]\n y_train = train[:, -1]\n x_test = test[:, :-1]\n y_test = test[:, -1]\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "72e82f6483533684040bb3cae2465b04", "score": "0.65523016", "text": "def load_data(labels=None):\n # Load the data.\n train_x = numpy.array(vigra.readHDF5(\"data/mnist/train.h5\", \"data\").transpose())\n train_y = vigra.readHDF5(\"data/mnist/train.h5\", \"labels\")\n test_x = numpy.array(vigra.readHDF5(\"data/mnist/test.h5\", \"data\").transpose())\n test_y = vigra.readHDF5(\"data/mnist/test.h5\", \"labels\")\n\n # Reduce the data to the given labels.\n if labels is not None:\n train_indices = numpy.array([i for i, t in enumerate(train_y) if t in labels])\n train_x = train_x[train_indices]\n train_y = train_y[train_indices]\n test_indices = numpy.array([i for i, t in enumerate(test_y) if t in labels])\n test_x = test_x[test_indices]\n test_y = test_y[test_indices]\n\n return train_x, train_y, test_x, test_y", "title": "" }, { "docid": "da8425a4e77930307486c6bd94df3253", "score": "0.6551326", "text": "def load(dataloader):\n\n for data in dataloader:\n x, y = data\n x = x.view(x.shape[0], -1)\n return x, y", "title": "" }, { "docid": "f59866cb2aedba3c7bd8a5f4af6a1174", "score": "0.6545301", "text": "def load_data():\n train_x_data, train_y_data, test_x_data, test_y_data = mnist.load_data(one_hot=True)\n return train_x_data, train_y_data, test_x_data, test_y_data", "title": "" }, { "docid": "316fd89a9f618fb4a5b802994863afff", "score": "0.65452594", "text": "def load_file(filename):\n X, y, df = _load_file(filename)\n\n loader = DatasetIO()\n loader.pickle_files([(X, 'load_X.pkl'), (y, 'load_y.pkl'), (df, 'df.pkl')],\n Status.TEMP_DIR)\n\n # Update appropriate status flags.\n Status.DATASET_LOADED = True\n Status.FILENAME = os.path.basename(filename)\n Status.EXTENSION = filename[filename.rfind('.')]\n\n print 'Feature Array:\\n %s' % X\n print 'Target classifications:\\n %s' % y", "title": "" }, { "docid": "2f17ec59f8128ccfae7b27002d5c8b3e", "score": "0.65342736", "text": "def load_data(self, data_dir):\n temp_data = np.loadtxt(data_dir, delimiter=',')\n self.x_data = temp_data[:,:8]\n self.y_data = Y_data = (np.asmatrix(temp_data[:,-1])).transpose()", "title": "" }, { "docid": "c8227b4303ece53abfe7a8e84f257a36", "score": "0.65337956", "text": "def data_loader(data_path, is_test=False):\n if is_test:\n folder_for_x = 'x_test'\n folder_for_y = 'y_test'\n else:\n folder_for_x = 'train_x'\n folder_for_y = 'train_y'\n\n images_names = os.listdir(os.path.join(data_path, folder_for_x))\n X = []\n y = []\n for i in range(len(images_names)):\n X.append(cv.imread(os.path.join(data_path, folder_for_x, images_names[i])))\n y.append(cv.imread(os.path.join(data_path, folder_for_y, images_names[i])))\n return np.asarray(X), np.asarray(y)", "title": "" }, { "docid": "75a98126d192b0d716fe85753a219a4f", "score": "0.65330046", "text": "def LoadBatch(filename):\n with open(filename, 'rb') as f:\n dataDict = pickle.load(f, encoding='bytes')\n\n X = (dataDict[b\"data\"]).T\n y = dataDict[b\"labels\"]\n Y = (np.eye(10)[y]).T\n\n meanX = np.mean(X)\n stdX = np.std(X)\n X = (X - meanX) / stdX\n\n return X, Y, y", "title": "" }, { "docid": "5254fc5340bec2dc7a248183073317ed", "score": "0.6529924", "text": "def load_data(path):\r\n all_data = h5py.File(path, \"r+\")\r\n x_train = np.array(all_data[\"x_train\"])\r\n x_test = np.array(all_data[\"x_test\"])\r\n y_train = np.array(all_data[\"y_train\"])\r\n y_test = np.array(all_data[\"y_test\"])\r\n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "21596341d23b35fc0581b14a1bb5b776", "score": "0.6525453", "text": "def load_data(path, verbose=False, num_train=40, num_test=10, num_class=15):\n\n X_train =[]\n y_train = []\n X_test = []\n y_test = []\n all_classes = get_all_files(path)\n\n all_classes = all_classes[:num_class]\n\n for i, _class in enumerate(all_classes):\n class_path = os.path.join(path, _class)\n all_images_name = get_all_files(class_path)[:num_train]\n training_samples = [os.path.join(class_path, file_name) for file_name in all_images_name ]\n X_train.extend(training_samples)\n y_train += [i]*len(training_samples)\n\n for i, _class in enumerate(all_classes):\n class_path = os.path.join(path, _class)\n all_images_name = get_all_files(class_path)[-1*num_test:]\n testing_samples = [os.path.join(class_path, file_name) for file_name in all_images_name ]\n X_test.extend(testing_samples)\n y_test += [i]*len(testing_samples)\n\n if verbose:\n print \"X_train\" + str(X_train)\n print \"y_train\" + str(y_train)\n\n if verbose:\n print \"X_test\" + str(X_test)\n print \"y_test\" + str(y_test)\n\n return X_train, np.array(y_train), X_test, np.array(y_test)", "title": "" }, { "docid": "cbd411564b1b63a1059867d5edd98910", "score": "0.6521625", "text": "def read_data(datasets_dir=\"./data\", frac = 0.25):\n print(\"... read data\")\n data_file = os.path.join(datasets_dir, 'data.pkl.gzip')\n \n f = gzip.open(data_file,'rb')\n data = pickle.load(f)\n\n # get images as features and actions as targets\n X = np.array(data[\"state\"]).astype('float32')\n y = np.array(data[\"action\"]).astype('float32')\n\n\n #print(\"y.shape:\", y.shape)\n\n j = 0\n check = False\n\n for i in range(y.shape[0] - 1):\n if all(y[i] == [0., 0. , np.float32(0.2)]):\n y[i] = [0., 0., 1.]\n check = True\n\n #print(\"Breaks changed: \", check)\n\n \"\"\"\n for i in range(60000):\n if all(y[i] == [0., 0. , 0.]):\n if i > 10000:\n X[j] = X[i]\n\t\ty[j] = random_action()\n\t\tj += 1\n\t\tcontinue\n X[j] = X[i]\n y[j] = [0., 1., 0.]\n \n if i % 7 == 0 :\n y[j] = [0., 0., 1.] \n \n j += 1\n else:\n X[j] = X[i]\n y[j] = y[i]\n j += 1\n \"\"\"\n\n \n '''\n X = np.append(X, X[:20000,:,:], axis = 0)\n y = np.append(y, y[:20000,:], axis = 0)\n '''\n\n for i in range(67000):\n if all(y[i] == [0., 0. , 0.]):\n X[j] = X[i]\n y[j] = random_action()\n print(\"y[j]: \", y[j])\n else:\n X[j] = X[i]\n y[j] = y[i]\n j += 1\n \n\n \n\n #print(\"y.shape:\", y.shape)\n\n \n\n # split data into training and validation set\n n_samples = len(data[\"state\"])\n X_train, y_train = X[:int((1-frac) * n_samples)], y[:int((1-frac) * n_samples)]\n X_valid, y_valid = X[int((1-frac) * n_samples):], y[int((1-frac) * n_samples):]\n\n\n ##for i in range(10000):\n ## print(\"\\n\\nIn Read y_train[\",i,\"]:\", y_train[i])\n ## i += 103\n\n \n return X_train, y_train, X_valid, y_valid", "title": "" }, { "docid": "310b764dc1331e6ca3a8b6efb59db4ba", "score": "0.65215963", "text": "def load_data(p,y_name='gname'):\n #train_path, test_path = maybe_download()\n train,test = split_traintest(p)\n #train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES,sep=\"\\t\", header=0)\n #print (list(train))\n train_x, train_y = train, train.pop(y_name)\n\n #test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES,sep=\"\\t\", header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "title": "" }, { "docid": "5af413235e2339b06faafbd83902db3e", "score": "0.651573", "text": "def load_data(self):\r\n temp_train = load_files(self.train_path, encoding='latin1')\r\n self.classes = temp_train['target_names']\r\n self.train = temp_train['data']\r\n self.data['x_train'] = self.vectorizer.fit_transform(self.train).toarray()\r\n self.data['y_train'] = temp_train['target']\r\n if len(self.classes) == 2:\r\n self.data['y_train'] = self.data['y_train'].ravel()\r\n if self.test_path is not None:\r\n temp_test = load_files(self.test_path)\r\n self.test = temp_test['data']\r\n self.data['x_test'] = self.vectorizer.transform(self.test).toarray()\r\n self.data['y_test'] = temp_test['target']", "title": "" }, { "docid": "320670cc850769cc19a41c0d6ed45842", "score": "0.6512426", "text": "def import_data():\n train_set = input_data.Data((FLAGS.input_size, FLAGS.input_size), (FLAGS.label_size, FLAGS.label_size))\n eval_set = input_data.Data((FLAGS.input_size, FLAGS.input_size), (FLAGS.label_size, FLAGS.label_size))\n\n train_set.add_examples(\"../data/train/8300_positives.png\", 8300, 100, None)\n train_set.add_labels(\"../data/train/8300_labels.png\", 8300, 100)\n train_set.add_examples(\"../data/train/20038_negatives.png\", 20038, 100, numpy.zeros([FLAGS.label_size * FLAGS.label_size]))\n \n eval_set.add_examples(\"../data/train/eval_1510_positives.png\", 1510, 100, None)\n eval_set.add_labels(\"../data/train/eval_1510_labels.png\", 1510, 100)\n eval_set.add_examples(\"../data/train/eval_3710_negatives.png\", 3710, 100, numpy.zeros([FLAGS.label_size * FLAGS.label_size]))\n \n train_set.finalize()\n eval_set.finalize()\n \n utils.print_to_file(FLAGS.output_file, 'training: ' + str(train_set.count))\n utils.print_to_file(FLAGS.output_file, 'evaluation: ' + str(eval_set.count))\n\n return train_set, eval_set", "title": "" }, { "docid": "e0600bb9854233ceb37071ea06163551", "score": "0.65072966", "text": "def load_data_multi():\n data = pd.read_csv('data/ex1data2.txt', header=None, names=['Size', 'Bedrooms', 'Price'])\n data.insert(0, 'Ones', 1)\n\n # Set X (training data) and y (target values)\n X = data[['Ones', 'Size', 'Bedrooms']]\n y = data['Price']\n\n # Convert to numpy array\n X = np.array(X.values, dtype=np.float64)\n y = np.array(y.values, dtype=np.float64).reshape((-1, 1))\n\n return X, y", "title": "" }, { "docid": "73228f5eb0a2bcf71b6a97a21650783f", "score": "0.64965814", "text": "def load_data():\n with open('dataset/data_batch_1', 'rb') as f:\n data = pickle.load(f, encoding='latin-1')\n slicer = int(DATASET_NUM*0.8)\n train_images = np.array(data['data'][:slicer]) / 255\n train_labels = np.array(data['labels'][:slicer])\n test_images = np.array(data['data'][slicer:]) / 255\n test_labels = np.array(data['labels'][slicer:])\n reshaped_train_images = np.array([x.reshape([32, 32, 3]) for x in train_images])\n reshaped_train_labels = np.array([gen_onehot_list(i) for i in train_labels])\n reshaped_test_images = np.array([x.reshape([32, 32, 3]) for x in test_images])\n reshaped_test_labels = np.array([gen_onehot_list(i) for i in test_labels])\n\n\n return reshaped_train_images, reshaped_train_labels, reshaped_test_images, reshaped_test_labels", "title": "" }, { "docid": "76cf157249f636d50d5ee1284dfbf047", "score": "0.6492665", "text": "def load_data():\n\n train_data = pd.read_csv('train.csv')\n test_data = pd.read_csv('test.csv')\n\n x_train = train_data.iloc[:, 0: -1]\n y_train = train_data.iloc[:, -1]\n x_test = test_data.iloc[:, 0:]\n\n return x_train, y_train, x_test", "title": "" }, { "docid": "d3da9e1e2d6f095456f08d38351144c4", "score": "0.6486889", "text": "def load_data_and_labels(filepath):\n # Load data from files\n train_datas = []\n\n with open(filepath, 'r', encoding='utf-8',errors='ignore') as f:\n train_datas = f.readlines()\n # with open(entitypath, 'r', encoding='utf-8',errors='ignore') as f:\n # entity_datas = f.readlines()\n\n\n one_hot_labels = []\n x_datas = []\n entity = []\n for line in train_datas:\n print(line)\n parts = line.strip().split(\" \")\n label = parts[0]\n data = parts[1:]\n if(len(data) == 0):\n continue\n data = \" \".join(data)\n x_datas.append(data)\n if label.startswith('-1') :\n one_hot_labels.append([0,0,1])\n elif label.startswith('1') :\n one_hot_labels.append([1,0,0])\n else:\n one_hot_labels.append([0, 1, 0])\n # for line in entity_datas:\n # parts = line.strip().split()\n # parts_pre = []\n # for i in range(len(parts)):\n # js = int(parts[i])\n # parts_pre.append(js)\n # entity.append(parts_pre)\n\n print (' data size = ' ,len(train_datas))\n # print(entity)\n # print(len(entity))\n\n # Split by words\n # x_text = [clean_str(sent) for sent in x_text]\n\n # return [x_datas, np.array(one_hot_labels),np.array(entity)]\n return [x_datas, np.array(one_hot_labels)]", "title": "" }, { "docid": "350ac3a3adca7b81aab6797881dcfa98", "score": "0.64807427", "text": "def load_data(y_name=\"Species\"):\n train_path = tf.keras.utils.get_file(TRAIN_URL.split(\"/\")[-1], TRAIN_URL)\n test_path = tf.keras.utils.get_file(TEST_URL.split(\"/\")[-1], TEST_URL)\n\n train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "title": "" }, { "docid": "f491d091369d1f8b837ba3eeec1cf627", "score": "0.64620626", "text": "def load_data_and_labels(data_files=[],\n labels=[]):\n data_lengths = []\n x_text = []\n for file in data_files:\n x_temp = list(open(file, \"r\", encoding='utf-8').readlines())\n x_temp = [s.strip() for s in x_temp]\n data_lengths = data_lengths + [len(x_temp)]\n x_text = x_text + x_temp\n \n x_text = [clean_str(sent) for sent in x_text]\n \n # Generate labels\n labels_temp = []\n for i in range(0, len(labels)):\n labels_temp = labels_temp + [[labels[i] for _ in range(0, data_lengths[i])]]\n y = np.concatenate(labels_temp, 0)\n return [x_text, y]", "title": "" }, { "docid": "49a98a5344c71e139ac3a82f2ab8778e", "score": "0.6443002", "text": "def load_data(dtype=np.float32, order='F'):\n print(\"Loading dataset...\")\n data = fetch_mldata('MNIST original')\n X = data['data']\n y = data['target']\n X = X / 255\n X, y = unison_shuffled_copies(X, y)\n\n print(\"Creating train-test split...\")\n X_train = X[0:42000]\n y_train = y[0:42000]\n X_test = X[42000:60000]\n y_test = y[42000:60000]\n return X_train, X_test, y_train, y_test", "title": "" }, { "docid": "37cd1b90c8d2c5112488f0f232e66dfa", "score": "0.64367485", "text": "def get_data(data_file, labels_file, mmap=False):\n mmap_mode = None\n if mmap == True:\n mmap_mode = \"r\"\n X_data = np.load(data_file, mmap_mode=mmap_mode)\n y_data = np.load(labels_file, mmap_mode=mmap_mode)\n return X_data, y_data", "title": "" }, { "docid": "69d87166a19acbc8fb255d42ab1bf2e9", "score": "0.64304787", "text": "def learn(self, Xtrain, ytrain):", "title": "" }, { "docid": "69d87166a19acbc8fb255d42ab1bf2e9", "score": "0.64304787", "text": "def learn(self, Xtrain, ytrain):", "title": "" }, { "docid": "69d87166a19acbc8fb255d42ab1bf2e9", "score": "0.64304787", "text": "def learn(self, Xtrain, ytrain):", "title": "" }, { "docid": "69d87166a19acbc8fb255d42ab1bf2e9", "score": "0.64304787", "text": "def learn(self, Xtrain, ytrain):", "title": "" }, { "docid": "fb1e2d15ab38c96ba97ebc8f852feb09", "score": "0.64282364", "text": "def load_data():\n\n filename = 'test_data_{}px.pickle'.format(settings.size[0])\n if settings.reload_data:\n print('Start loading data.')\n x_test = ssd_dataloader.load_ssd(settings.size, settings.test_dir)\n print('SSDs loaded. Start loading resolutions.')\n y_test = ssd_dataloader.load_resos(settings.test_dir)\n pickle.dump([x_test, y_test], open(filename, \"wb\"))\n print('Data saved to disk.')\n else:\n try:\n x_test, y_test = pickle.load(open(filename, \"rb\"))\n print('Data loaded from Pickle')\n except FileNotFoundError:\n print('Start loading data.')\n x_test = ssd_dataloader.load_ssd(settings.size, settings.test_dir)\n print('SSDs loaded. Start loading resolutions.')\n y_test = ssd_dataloader.load_resos(settings.test_dir)\n pickle.dump([x_test, y_test], open(filename, \"wb\"))\n print('Data saved to disk.')\n\n y_test = np.clip(y_test, -settings.max_reso, settings.max_reso)\n reso_resolution = 2 * settings.max_reso / settings.num_classes\n y_test = np.round((y_test + settings.max_reso) / reso_resolution, 0)\n y_test = keras.utils.to_categorical(y_test, settings.num_classes + 1) # creates (samples, num_categories) array\n\n # settings.num_samples = 500\n # x_test = x_test[0:settings.num_samples, :, :, :]\n # y_test = y_test[0:settings.num_samples, :]\n\n return x_test, y_test", "title": "" }, { "docid": "589efdc4d8c8b0b34d88596d20173090", "score": "0.642406", "text": "def get_test_data(path: Path):\n X = np.load(path / 'test_X.npy')\n y = np.load(path / 'test_y.npy')\n return X, y", "title": "" }, { "docid": "7b111424a881ffe345b830cb735ddac5", "score": "0.64238846", "text": "def load_data_and_labels():\n # Load data from files\n positive_examples = list(open('../data/rt-polarity.pos', \"r\", encoding='latin-1').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(open('../data/rt-polarity.neg', \"r\", encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n print(\"Số câu tích cực là positive_labels = \",positive_examples[len(positive_examples)-1], len(positive_examples))\n print(\"Số nhãn tích cực là positive_labels = \",positive_labels[len(positive_labels)-1], len(positive_labels))\n negative_labels = [[1, 0] for _ in negative_examples]\n print(\"Số câu tiêu cực là negative_labels = \",negative_examples[len(negative_examples)-1], len(negative_examples))\n print(\"Số nhãn tiêu cực là negative_labels = \",negative_labels[len(negative_labels)-1], len(negative_labels))\n y = np.concatenate([positive_labels, negative_labels], 0)\n '''\n Số câu tích cực là positive_labels = ...provides a porthole into that noble , trembling incoherence that defines us all . 5331\n Số nhãn tích cực là positive_labels = ...[0, 1] 5331\n Số câu tiêu cực là negative_labels = ...enigma is well-made , but it's just too dry and too placid . 5331\n Số nhãn tiêu cực là negative_labels = ...[1, 0] 5331\n \n x_text = [\n ... 10659 ['as', 'it', 'stands', ',', 'crocodile', 'hunter', 'has', 'the', 'hurried', ',', 'badly', 'cobbled', 'look', 'of', 'the', '1959', 'godzilla', ',', 'which', 'combined', 'scenes', 'of', 'a', 'japanese', 'monster', 'flick', 'with', 'canned', 'shots', 'of', 'raymond', 'burr', 'commenting', 'on', 'the', 'monster', \"'s\", 'path', 'of', 'destruction']\n 10660 ['the', 'thing', 'looks', 'like', 'a', 'made', 'for', 'home', 'video', 'quickie']\n 10661 ['enigma', 'is', 'well', 'made', ',', 'but', 'it', \"'s\", 'just', 'too', 'dry', 'and', 'too', 'placid']\n ]\n len(x) = 10662\n '''\n return [x_text, y]", "title": "" }, { "docid": "2fd1191b2b222a5325c4c953746893da", "score": "0.64160776", "text": "def get_data(dataset):\n y_test = pd.read_csv(os.path.join(reference_dir, dataset + '_reference_test.csv'))\n y_test = np.array(y_test)\n y_pred = np.genfromtxt(os.path.join(prediction_dir, dataset + '.predict'))\n return y_test, y_pred", "title": "" }, { "docid": "d1cc1b685454dbade412c11ab3906acf", "score": "0.6414204", "text": "def load_data(home_dir, test_path, test_label_path, train_path, \n train_label_path):\n # read in data\n test_X = np.array(pd.read_csv(home_dir+test_path, quotechar='\"', delimiter=\"\\t\", skipinitialspace=True, header=None))\n test_y = np.genfromtxt(home_dir+test_label_path)[:,0]\n train_X = np.array(pd.read_csv(home_dir+train_path, quotechar='\"', delimiter=\"\\t\", skipinitialspace=True, header=None))\n train_y = np.genfromtxt(home_dir+train_label_path)[:,0]\n \n # return numpy arrays of words\n train_data = letter_to_words(train_X, 4)\n test_data = letter_to_words(test_X, 4)\n \n # create tf-idf matrix for train and test\n vectorizer = TfidfVectorizer(lowercase=False)\n train_tfidf = vectorizer.fit_transform(train_data)\n test_tfidf = vectorizer.transform(test_data)\n\n return test_tfidf, train_tfidf, test_X, test_y, train_y", "title": "" }, { "docid": "8a6a248c7ef36d6a034959d83decf063", "score": "0.6413236", "text": "def load_data():\n (X_train, y_train), (X_test, y_test) = cifar10.load_data()\n\n Y_train = np_utils.to_categorical(y_train, 10)\n Y_test = np_utils.to_categorical(y_test, 10)\n\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n\n # Normalize\n X_train /= 255\n X_test /= 255\n\n return (X_train, Y_train), (X_test, Y_test)", "title": "" }, { "docid": "3e4db53c191290b0ea29c744116ec723", "score": "0.6409692", "text": "def load_y(file_name):\r\n print('Started loading file', file_name)\r\n data = pd.read_csv(file_name)\r\n print('Finished loading the file.')\r\n data = np.array(data)\r\n return np.eye(2)[data[:, 1]]", "title": "" }, { "docid": "69628d98ca3b819126f7c905b19d00ac", "score": "0.64060724", "text": "def load_data(train_path, test_path):\n # train_path, test_path = maybe_download()\n # here the test is really no lable we need to do CV in train part\n train_X = pickle.load(open(train_path, \"rb\")) # (None, 2048)\n # (None, 2048) 2048 features from xception net\n to_predict_X = pickle.load(open(test_path, \"rb\"))\n\n try:\n labels = pd.read_csv(os.path.join(DATASET_DIR, \"labels.csv\"))\n except FileNotFoundError:\n labels = pd.read_csv(os.path.join(DATASET_DIR2, \"labels.csv\"))\n\n labels = labels[\"breed\"].values.tolist() # for all training data\n global SPECIES\n SPECIES = sorted(list(set(labels)))\n _label_id_map = dict((name, index)\n for index, name in enumerate(SPECIES))\n train_y = [_label_id_map[label] for label in labels]\n\n return (train_X, train_y), to_predict_X", "title": "" }, { "docid": "a4e9d856d52d2cfc44229296f620daab", "score": "0.6404446", "text": "def __load_train(self):\n # Loading from all csv files; * is HMB\n file_paths = glob.glob(os.path.join(self.data_dir_train,'*/center.csv'))\n Xs = np.zeros((0, self.seq_len, 3)) # (N,T,3)\n y = np.zeros((0, 2)) # (N,2)\n\n for file_path in sorted(file_paths):\n path_prefix = os.path.dirname(file_path)\n # load label\n data = pd.read_csv(file_path)\n img_names = data['filename'].values # relative path\n point_names = data['point_filename'].values\n assert len(img_names) == len(point_names)\n \n # combine sequential image path and point path\n xs = [] # (Ni,T,2)\n for i in range(len(img_names)):\n if i < (self.seq_len-1):\n continue\n xt = [] # (T,2)\n for t in reversed(range(self.seq_len)):\n xt.append([os.path.join(path_prefix, img_names[i-t]), \n os.path.join(path_prefix, 'points_bin', point_names[i-t][:-3]+'bin'),\n 0.0]) # CAM_FLAG=0 \n xs.append(xt)\n \n # scale label\n angle = data['angle'].values[self.seq_len-1:] # n-(self.seq_len-1)\n speed = data['speed'].values[self.seq_len-1:]\n angle_s = self.scale_label(angle, y_min=-2.0, y_max=2.0, a=-1.0, b=1.0)\n speed_s = self.scale_label(speed, y_min=0.0, y_max=30.0, a=-1.0, b=1.0)\n ys = np.stack([angle_s, speed_s], axis=1)\n \n # concatenate all data\n assert len(xs) == len(ys) == (len(img_names)-self.seq_len+1)\n Xs = np.concatenate((Xs,xs), axis=0)\n y = np.concatenate((y, ys), axis=0)\n print(\"Loading data from {}: {}\".format(file_path, len(xs)))\n\n if self.use_side_cam:\n file_paths_left = glob.glob(os.path.join(self.data_dir_train,'*/left.csv'))\n \n for file_path_left in sorted(file_paths_left):\n path_prefix_left = os.path.dirname(file_path_left)\n # load label\n data_left = pd.read_csv(file_path_left)\n img_names_left = data_left['filename'].values # relative path\n point_names_left = data_left['point_filename'].values\n assert len(img_names_left) == len(point_names_left)\n \n # combine sequential image path and point path\n xs_left = [] # (Ni,T,3)\n for i in range(len(img_names_left)):\n if i < (self.seq_len-1):\n continue\n xt_left = [] # (T,3)\n for t in reversed(range(self.seq_len)):\n xt_left.append([os.path.join(path_prefix_left, img_names_left[i-t]), \n os.path.join(path_prefix_left, 'points_bin', point_names_left[i-t][:-3]+'bin'),\n -1.0]) \n xs_left.append(xt_left)\n \n # scale label\n angle_left = data_left['angle'].values[self.seq_len-1:] # n-(self.seq_len-1)\n speed_left = data_left['speed'].values[self.seq_len-1:]\n angle_left_adj = self.__camera_adjust(angle_left, speed_left, camera='left')\n\n angle_left_s = self.scale_label(angle_left_adj, y_min=-2.0, y_max=2.0, a=-1.0, b=1.0)\n speed_left_s = self.scale_label(speed_left, y_min=0.0, y_max=30.0, a=-1.0, b=1.0)\n ys_left = np.stack([angle_left_s, speed_left_s], axis=1)\n \n # concatenate all data\n assert len(xs_left) == len(ys_left) == (len(img_names_left)-self.seq_len+1)\n Xs = np.concatenate((Xs,xs_left), axis=0)\n y = np.concatenate((y, ys_left), axis=0)\n print(\"Loading data from {}: {}\".format(file_path_left, len(xs_left)))\n \n ## Load right camera data\n file_paths_right = glob.glob(os.path.join(self.data_dir_train,'*/right.csv'))\n \n for file_path_right in sorted(file_paths_right):\n path_prefix_right = os.path.dirname(file_path_right)\n # load label\n data_right = pd.read_csv(file_path_right)\n img_names_right = data_right['filename'].values # relative path\n point_names_right = data_right['point_filename'].values\n assert len(img_names_right) == len(point_names_right)\n \n # combine sequential image path and point path\n xs_right = [] # (Ni,T,2)\n for i in range(len(img_names_right)):\n if i < (self.seq_len-1):\n continue\n xt_right = [] # (T,2)\n for t in reversed(range(self.seq_len)):\n xt_right.append([os.path.join(path_prefix_right, img_names_right[i-t]), \n os.path.join(path_prefix_right, 'points_bin', point_names_right[i-t][:-3]+'bin'),\n 1.0]) \n xs_right.append(xt_right)\n \n # scale label\n angle_right = data_right['angle'].values[self.seq_len-1:]\n speed_right = data_right['speed'].values[self.seq_len-1:]\n angle_right_adj = self.__camera_adjust(angle_right, speed_right, camera='right')\n\n angle_right_s = self.scale_label(angle_right_adj, y_min=-2.0, y_max=2.0, a=-1.0, b=1.0)\n speed_right_s = self.scale_label(speed_right, y_min=0.0, y_max=30.0, a=-1.0, b=1.0)\n ys_right = np.stack([angle_right_s, speed_right_s], axis=1)\n \n # concatenate all data\n assert len(xs_right) == len(ys_right) == (len(img_names_right)-self.seq_len+1)\n Xs = np.concatenate((Xs,xs_right), axis=0)\n y = np.concatenate((y, ys_right), axis=0)\n print(\"Loading data from {}: {}\".format(file_path_right, len(xs_right)))\n\n if self.balance_angle or self.balance_speed:\n Xs, y = self.balance_data(Xs, y, \n self.balance_angle, \n self.balance_speed,\n bin_count=20,\n fix_times=1)\n # visualize label distribution\n #self.label_distribution(y)\n\n # split data\n self.Xs_train, self.Xs_val, self.y_train, self.y_val = train_test_split(Xs, y, test_size=self.val_ratio, random_state=10, shuffle=True)\n\n self.num_train = len(self.Xs_train)\n self.num_val = len(self.y_val)\n print(\"Train set: {}; Val set: {}\".format(self.num_train, self.num_val))", "title": "" }, { "docid": "2e0edf41c6be50060476d31ad032b4b1", "score": "0.6399697", "text": "def load_datas():\n print('\\nLoading data sets')\n data = load_dataset('train.csv')\n data_modified = data[1:, :]\n train_set_x_orig = data_modified[1:39999, 1:]\n train_set_y_orig = data_modified[1:39999, 0]\n test_set_x_orig = data_modified[40000:, 1:]\n test_set_y_orig = data_modified[40000:, 0]\n\n # turning Xs to transpose for convention\n train_set_x_orig = train_set_x_orig.T\n test_set_x_orig = test_set_x_orig.T\n\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n print('\\nFinished loading data sets')\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig", "title": "" }, { "docid": "0db38f04e597a4d827dd6cbb31430ad9", "score": "0.6398212", "text": "def load_data(y_name='Species'):\n train_path, test_path = maybe_download()\n\n train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "title": "" }, { "docid": "0db38f04e597a4d827dd6cbb31430ad9", "score": "0.6398212", "text": "def load_data(y_name='Species'):\n train_path, test_path = maybe_download()\n\n train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "title": "" }, { "docid": "49d852b5aff5de11a8afba274f435c11", "score": "0.6384664", "text": "def load_data():\r\n\r\n train_data = pd.read_csv(\"train.csv\")\r\n test_data = pd.read_csv(\"test.csv\")\r\n train_x = train_data.iloc[:, 0:595]\r\n train_y = train_data.iloc[:, -1]\r\n\r\n return train_x, train_y, test_data", "title": "" }, { "docid": "be59eec4e831b5e65245ed0a8d8d394d", "score": "0.63831085", "text": "def load_and_prepare_data():\n ecoding_dir = cfg.encoding_dir\n label_dictionary = cfg.label_dictionary\n classes = {}\n train_data = []\n label_data = []\n for index, files in enumerate(os.listdir(ecoding_dir)):\n class_name = os.path.splitext(files)[0]\n class_data = np.load(os.path.join(ecoding_dir, files))\n if class_data.shape[0] != 0:\n classes[class_name] = index\n print class_name, class_data.shape\n no_of_rows = class_data.shape[0]\n label_list = [classes[class_name]] * no_of_rows\n assert no_of_rows == len(label_list)\n label_list = np.array(label_list)[:, np.newaxis]\n train_data.append(class_data)\n label_data.append(label_list)\n assert len(train_data) == len(label_data)\n train_data_npy = np.vstack(train_data)\n label_data_npy = np.vstack(label_data)\n pickle.dump(classes,open(label_dictionary, 'wb'))\n print \"total no of training data %d and label_data %d \" % (train_data_npy.shape[0], label_data_npy.shape[0])\n return train_data_npy, label_data_npy , classes", "title": "" }, { "docid": "526fe26fe6228899f5e2352ab0323fba", "score": "0.6381028", "text": "def load_data(filename, train=True):\n X = []\n y = []\n with open(filename) as f:\n for line in f:\n if (train):\n # remove \\n, split on space, separate into label and weights\n X.append(line.strip().split(' ')[1:])\n y.append(line.strip().split(' ')[0])\n else:\n X.append(line.strip().split(' '))\n \n # convert to np, cast to int, and remove the headers\n X = np.asarray(X[1:]).astype(int)\n if (train):\n y = np.asarray(y[1:]).astype(int)\n \n return X, y", "title": "" }, { "docid": "0a244932f40d931f0714ede19359b5e0", "score": "0.6379256", "text": "def load_data(train_set_path='data/wine_train.csv', \n train_labels_path='data/wine_train_labels.csv', \n test_set_path='data/wine_test.csv',\n test_labels_path='data/wine_test_labels.csv'):\n \n train_set = np.loadtxt(train_set_path, delimiter=',')\n train_labels = np.loadtxt(train_labels_path, delimiter=',', dtype=np.int)\n test_set = np.loadtxt(test_set_path, delimiter=',')\n test_labels = np.loadtxt(test_labels_path, delimiter=',', dtype=np.int)\n \n return train_set, train_labels, test_set, test_labels", "title": "" }, { "docid": "d1c817fd611fafc616d33db12e74604c", "score": "0.6377518", "text": "def load_data(y_name='Species'):\n text_file_names = glob.glob(train_path)\n texts = \"\"\n for text_file_name in text_file_names:\n text_file = open(text_file_name)\n texts+= text_file.read()\n text_file.close()\n\n #train_data = tf.data.TextLineDataset(text_files)\n\n #add .split() to switch from char to word model\n return texts", "title": "" }, { "docid": "51bd569d5d4303311d8021a8eb319ad4", "score": "0.6373919", "text": "def load_data():\n\t# Load the dataset\n\tmnist = tf.keras.datasets.mnist\n\t(x_train, y_train),(x_test, y_test) = mnist.load_data()\n\n\t# Flatten: reshape images into a 1D array\n\tx_train = x_train.reshape((x_train.shape[0], -1))\n\tx_test = x_test.reshape((x_test.shape[0], -1))\n\n\t# Normalization: change pixel values from [0,255] to [0,1]\n\tx_train, x_test = x_train / 255.0, x_test / 255.0\n\n\treturn x_train, y_train, x_test, y_test", "title": "" }, { "docid": "6a5f6dd182955b8dfffa1563db6ae294", "score": "0.6364192", "text": "def load_regression_data(bias=False):\n data = np.load('ames.npz')\n train_X = data[\"Xtrain\"]\n test_X = data[\"Xtest\"]\n train_y = data[\"ytrain\"].reshape(-1)\n test_y = data[\"ytest\"].reshape(-1)\n if bias:\n train_X = np.hstack((train_X, np.ones((train_X.shape[0], 1))))\n test_X = np.hstack((test_X, np.ones((test_X.shape[0], 1))))\n return {\"train\": (train_X, train_y),\n \"test\": (test_X, test_y)}", "title": "" }, { "docid": "ae1b5bac5fd5a190db74b13c099d2deb", "score": "0.6363316", "text": "def load_data(path, negatives=False):\n\n num_train_samples = 50000\n\n x_train_local = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train_local = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train_local[(i - 1) * 10000: i * 10000, :, :, :],\n y_train_local[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test_local, y_test_local = load_batch(fpath)\n\n y_train_local = np.reshape(y_train_local, (len(y_train_local), 1))\n y_test_local = np.reshape(y_test_local, (len(y_test_local), 1))\n\n if negatives:\n x_train_local = x_train_local.transpose(0, 2, 3, 1).astype(np.float32)\n x_test_local = x_test_local.transpose(0, 2, 3, 1).astype(np.float32)\n else:\n x_train_local = np.rollaxis(x_train_local, 1, 4)\n x_test_local = np.rollaxis(x_test_local, 1, 4)\n\n return (x_train_local, y_train_local), (x_test_local, y_test_local)", "title": "" }, { "docid": "2dbcd784aa130e05b2766b1cd9bee482", "score": "0.6361858", "text": "def load_dataset(filename):\n\tif not os.path.exists(filename):\n\t\tsys.exit('\\n.... Oops ERROR: There is NO data file !?!?!? \\n')\n\twith open(path + '/' + filename, 'r', encoding=\"ISO-8859-1\") as f:\n\t\tfile = f.readlines()\n\t\tf.close()\n\n\ty1 = []\n\ty2 = []\n\t# y = [[],[]]\n\tX = []\n\tfor line in file:\n\t\tline = line.replace(':', ' ')\n\t\tline = line.strip().split()\n\n\t\t# y1.append(line[0])\n\t\t# y2.append(line[1])\n\t\ty1.append(line[0])\n\t\ty2.append(line[1])\n\t\tX.append(' '.join(line[2:]))\n\treturn X, y1, y2", "title": "" }, { "docid": "fa6a36168bb5134cd1bd6f5c0dfef9d5", "score": "0.6361615", "text": "def load_data(data_dir):\n\t\n\ttrain_batches_data=[]\n\ttrain_batches_labels=[]\n\tfor batch in os.listdir(data_dir):\n\t\tpath=os.path.join(data_dir,batch)\n\t\tif(len(batch.split(\".\"))==1):\n\t\t\tbatch_f=open(path,\"rb\")\n\t\t\tbatch=pickle.load(batch_f,encoding='bytes')\n\t\t\ttrain_batches_data.append(batch[b'data'])\n\t\t\ttrain_batches_labels.append(batch[b'labels'])\n\tx_test=train_batches_data.pop(-1)\n\ty_test=train_batches_labels.pop(-1)\n\t\n\tx_train= np.reshape(train_batches_data,(50000,3072))\n\ty_train= np.reshape(train_batches_labels,(50000,))\n\n\ty_train_new=np.zeros((50000,10))\n\ty_test_new= np.zeros((10000,10))\n\n\t#converting labels to one-hot encoding for mixup training\n\n\tfor i in range(len(y_train)):\n\t\ttemp=tf.keras.utils.to_categorical(y_train[i], num_classes=10)\n\t\ty_train_new[i]=temp\n\n\tfor i in range(len(y_test)):\n\t\ttemp=tf.keras.utils.to_categorical(y_test[i], num_classes=10)\n\t\ty_test_new[i]=temp\n\n\treturn x_train, y_train_new, x_test, y_test_new", "title": "" }, { "docid": "d86b8e4ed03d2c2efafdb195dee458f4", "score": "0.6352345", "text": "def load_data():\n df = pd.read_csv('spam.csv', sep=' ',header=None)\n data = df[df.columns[0:57]]\n data_scaled = preprocessing.scale(data)\n data_scaled = pd.DataFrame(data_scaled)\n target = df[df.columns[57]]\n target = target.apply(convert_label)\n x_train, x_test, y_train, y_test = train_test_split(data_scaled, target, test_size=0.2, random_state=1)\n y_train = np.asarray(y_train)\n x_train = np.asarray(x_train)\n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "10b25ad21822deebfce973d96743f3ea", "score": "0.63518924", "text": "def load_data_and_labels(pos_data_file, neg_data_file):\n # Load data from files.\n pos_data = np.load(pos_data_file)\n neg_data = np.load(neg_data_file)\n x_data = np.concatenate([pos_data, neg_data], 0)\n # Generate labels.\n pos_labels = [[0, 1] for _ in pos_data]\n neg_labels = [[1, 0] for _ in neg_data]\n y = np.concatenate([pos_labels, neg_labels], 0)\n return [x_data, y]", "title": "" }, { "docid": "7cb32c4afe67dd11633864056b3cacc0", "score": "0.63501465", "text": "def load_data(self):\n train_set_list, test_set_list = [], []\n\n with open(os.path.join(self.data_dir, 'train.txt')) as train_file:\n train_set = []\n last_num = 0\n for idx, line in enumerate(train_file.readlines()):\n num, val = line.strip().split(',')\n num, val = int(num), float(val)\n if idx > 0 and num != last_num + 1 and len(train_set) > 0:\n train_set_list.append(train_set)\n train_set = []\n train_set.append((num, val))\n last_num = num\n train_set_list.append(train_set)\n\n with open(os.path.join(self.data_dir, 'test.txt')) as test_file:\n test_set = []\n last_num = 0\n for idx, line in enumerate(test_file.readlines()):\n num, val = line.strip().split(',')\n num, val = int(num), float(val)\n if idx > 0 and num != last_num + 1 and len(test_set) > 0:\n test_set_list.append(test_set)\n test_set = []\n test_set.append((num, val))\n last_num = num\n test_set_list.append(test_set)\n\n self.train_set_list = np.array(train_set_list)\n self.test_set_list = np.array(test_set_list)", "title": "" }, { "docid": "18734756e07630aacd23461166f576d8", "score": "0.6347016", "text": "def load_data_and_labels(positive_data_file, negative_data_file, good_data_file):\n # Load data from files\n positive_examples = list(open(positive_data_file, \"r\").readlines())\n positive_examples = [s.strip() for s in positive_examples]\n\n negative_examples = list(open(negative_data_file, \"r\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n\n good_examples = list(open(good_data_file, \"r\").readlines())\n good_examples = [s.strip() for s in good_examples]\n\n\n # Split by words\n x_text = positive_examples + negative_examples + good_examples\n x_text = [clean_str(sent) for sent in x_text]\n\n\n # Generate labels\n positive_labels = [[0, 1, 0] for _ in positive_examples]\n negative_labels = [[1, 0, 0] for _ in negative_examples]\n good_labels = [[0, 0, 1] for _ in good_examples]\n\n y = np.concatenate([positive_labels, negative_labels, good_labels], 0)\n return [x_text, y]", "title": "" }, { "docid": "3f513f46a6fb20e0c0c2ff44ca653315", "score": "0.6345546", "text": "def train(self, X, y):", "title": "" }, { "docid": "14263e8fba76561ff3b3e01e0bf1a309", "score": "0.63455456", "text": "def load_data(self):\n if self.mtype == 'myDNN':\n data = pd.read_csv(path.join(config[\"data_directory\"],\n config[\"master_data\"])).dropna()\n data = data.replace({'Speech': 0, 'Music': 1})\n self.features = data.drop(['Filename', 'Label'], axis=1).values\n self.labels = data.loc[:, ['Label']].values\n\n elif self.mtype == 'myCNN':\n self.features = np.load(path.join(config[\"data_directory\"],\n config[\"cnn_X_data\"]))\n self.labels = np.load(path.join(config[\"data_directory\"],\n config[\"cnn_y_data\"]))", "title": "" }, { "docid": "ee8eac3bd41cc992f6bc560395c3e62e", "score": "0.6342397", "text": "def load_data_single():\n data = pd.read_csv('data/ex1data1.txt', header=None, names=['Population', 'Profit'])\n data.insert(0, 'Ones', 1)\n\n # Set X (training data) and y (target values)\n X = data[['Ones', 'Population']]\n y = data['Profit']\n\n # Convert to numpy array\n X = np.array(X.values)\n y = np.array(y.values).reshape((-1, 1))\n\n return X, y", "title": "" }, { "docid": "ffc9b55468566498980bbd591e73ea69", "score": "0.63412887", "text": "def loadData(fileName):\r\n inFile = open(fileName, 'r') #read-only way\r\n \r\n outFile = open(\"testOutFile\", 'w')\r\n \r\n times = []\r\n loss = []\r\n entropy = []\r\n # references for easier and more explicit usage of variables\r\n x = times\r\n y1 = loss\r\n y2 = entropy\r\n \"\"\"\r\n xmax = 0\r\n y1max = 0\r\n y2max = 0\r\n \"\"\"\r\n count = 0\r\n for line in inFile:\r\n if line is '\\n':\r\n continue\r\n \r\n count += 1\r\n trainingSet = line.split(',')\r\n if count == 1:\r\n continue\r\n xtemp = int(trainingSet[0])\r\n x.append(xtemp)\r\n y1temp = float(trainingSet[1])\r\n y1.append(y1temp)\r\n y2temp = float(trainingSet[2])\r\n y2.append(y2temp)\r\n# xmax = xtemp\r\n \r\n # test output\r\n \"\"\"\r\n outFile.write(str(xtemp))\r\n outFile.write(' ')\r\n outFile.write(str(y1temp))\r\n outFile.write(' ')\r\n outFile.write(str(y2temp))\r\n outFile.write('\\n')\r\n \"\"\"\r\n# print(x[len(x) - 1], y1[len(y1) - 1], y2[len(y2) - 1])\r\n \"\"\"\r\n y1num = [float(y1[i]) for i in range(len(y1))]\r\n y1max = max(y1num)\r\n y1maxIndex = argmax(y1num)\r\n y2num = [float(y2[i]) for i in range(len(y2))]\r\n y2max = max(y2num)\r\n y2maxIndex = argmax(y2num)\r\n \"\"\"\r\n \"\"\"\r\n print(y1max)\r\n print(y2max)\r\n \"\"\"\r\n inFile.close()\r\n outFile.close()\r\n return (x, y1, y2)", "title": "" }, { "docid": "cae17c89044194935c8989033474cad5", "score": "0.6332535", "text": "def load_data(path, negatives=False):\r\n\r\n num_train_samples = 50000\r\n\r\n x_train_local = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\r\n y_train_local = np.empty((num_train_samples,), dtype='uint8')\r\n\r\n for i in range(1, 6):\r\n fpath = os.path.join(path, 'data_batch_' + str(i))\r\n (x_train_local[(i - 1) * 10000: i * 10000, :, :, :],\r\n y_train_local[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\r\n\r\n fpath = os.path.join(path, 'test_batch')\r\n x_test_local, y_test_local = load_batch(fpath)\r\n\r\n y_train_local = np.reshape(y_train_local, (len(y_train_local), 1))\r\n y_test_local = np.reshape(y_test_local, (len(y_test_local), 1))\r\n\r\n if negatives:\r\n x_train_local = x_train_local.transpose(0, 2, 3, 1).astype(np.float32)\r\n x_test_local = x_test_local.transpose(0, 2, 3, 1).astype(np.float32)\r\n else:\r\n x_train_local = np.rollaxis(x_train_local, 1, 4)\r\n x_test_local = np.rollaxis(x_test_local, 1, 4)\r\n\r\n return (x_train_local, y_train_local), (x_test_local, y_test_local)", "title": "" } ]
c9546f4c5d727301dbd5d933b1aa8c44
Return True if the coordinate zoom is within the textual range. Range might look like "110" or just "5".
[ { "docid": "5dae3fbc0dbe04dd9f431545d1a8e6dd", "score": "0.70948666", "text": "def in_zoom(coord, range):\r\n zooms = search(\"^(\\d+)-(\\d+)$|^(\\d+)$\", range)\r\n \r\n if not zooms:\r\n raise Core.KnownUnknown(\"Bad zoom range in a Sandwich Layer: %s\" % repr(range))\r\n \r\n min_zoom, max_zoom, at_zoom = zooms.groups()\r\n \r\n if min_zoom is not None and max_zoom is not None:\r\n min_zoom, max_zoom = int(min_zoom), int(max_zoom)\r\n\r\n elif at_zoom is not None:\r\n min_zoom, max_zoom = int(at_zoom), int(at_zoom)\r\n\r\n else:\r\n min_zoom, max_zoom = 0, float('inf')\r\n \r\n return min_zoom <= coord.zoom and coord.zoom <= max_zoom", "title": "" } ]
[ { "docid": "ee8bba99b5ecbbc0604f016688a089fc", "score": "0.6414671", "text": "def is_latlongzoom(instr):\n return (re.match(r'^\\s*[0-9.+-]+\\s*,\\s*[0-9.+-]+\\s*,\\s*[0-9.+-]+\\s*$', instr) != None)", "title": "" }, { "docid": "22f45d12935bef870772e66811690144", "score": "0.63187474", "text": "def in_zoom(self, zoom):\r\n return self.min_zoom <= zoom and zoom <= self.max_zoom", "title": "" }, { "docid": "2eae20f77877a8880ab9ecfb5c3dd67d", "score": "0.61820006", "text": "def like_ons_geography(xycell):\n if isinstance(xycell.value, str) and xycell.value[0].isalpha() and xycell.value[0].isupper() and xycell.value[1:].isnumeric() and len(str(xycell.value)) == 9:\n return True\n else:\n return False", "title": "" }, { "docid": "f898588af9c438eec3886da01ac96d12", "score": "0.599818", "text": "def in_ranges(char, ranges):\n return any(start <= char and char <= stop for start, stop in ranges)", "title": "" }, { "docid": "4a0d4e05fb5d008156b65d274c500e23", "score": "0.5820936", "text": "def _isCompleteIndiceString(self, indice, range):\r\n if indice == '*':\r\n return True\r\n if indice.count(':') != 1:\r\n return False\r\n start, stop = indice.split(':')\r\n return float(start) == range[0] and float(stop) == range[1]", "title": "" }, { "docid": "36f1e2a8568aa61a0a7909a700ac04e2", "score": "0.5817565", "text": "def can_reach_square(start_elevation: str, end_elevation: str) -> bool:\n if start_elevation == \"S\":\n start_elevation = \"a\"\n if end_elevation == \"E\":\n end_elevation = \"z\"\n if ord(end_elevation) - ord(start_elevation) <= 1:\n return True\n return ord(start_elevation) > ord(end_elevation)", "title": "" }, { "docid": "5e730bcec34914804bfb264488452961", "score": "0.57916176", "text": "def covered_by(self, span):\n if isinstance(span, TextSpan):\n return span.start <= self.start and span.stop >= self.stop\n elif isinstance(span, tuple):\n start, stop = span\n return start <= self.start and stop >= self.stop\n else:\n raise NotImplementedError(\"Unknown span type: %s\" % repr(span))", "title": "" }, { "docid": "41a8001a75429af176fbc704ccab24b9", "score": "0.5758876", "text": "def _is_fit(self, text: str, width: int) -> bool:\n return self.font.getsize_multiline(text)[0] <= width", "title": "" }, { "docid": "8653b8b221698f7264b39733af485d11", "score": "0.56321204", "text": "def is_elliptical_range(string):\n return re.match('^\\d{4}-\\d{2}(-\\d{2})?' \\\n '( \\(\\D{2,}\\))? bis \\d{2}(-\\d{2})?', string)", "title": "" }, { "docid": "a7c011e462b082ddfcc572616072ff87", "score": "0.55992955", "text": "def isArabicrange(text):\n if re.search(u\"([^\\u0600-\\u06ff\\ufb50-\\ufdff\\ufe70-\\ufeff\\u0750-\\u077f])\",text):\n return False;\n return True;", "title": "" }, { "docid": "a2b11bc22bea54d80bbba99e38917464", "score": "0.55850697", "text": "def is_in_bounds(value, min, max):\n if (min <= value and value <= max):\n return True\n return False", "title": "" }, { "docid": "a45b7e707febdae85a3e47159d76e44e", "score": "0.5581029", "text": "def validate_hgt(x):\n if x[-2:] == 'cm' and int(x[:-2]) >= 150 and int(x[:-2]) <= 193:\n return True\n if x[-2:] == 'in' and int(x[:-2]) >= 59 and int(x[:-2]) <= 76:\n return True\n return False", "title": "" }, { "docid": "d36d8f78948fa77e3b4f069100ede53d", "score": "0.55653226", "text": "def check_hgt(hgt_entry):\r\n if hgt_entry[-2:] == 'cm':\r\n if 150 <= int(hgt_entry[:-2]) <= 193:\r\n return True\r\n if hgt_entry[-2:] == 'in':\r\n if 59 <= int(hgt_entry[:-2]) <= 76:\r\n return True", "title": "" }, { "docid": "97f63ae9f89e9f4399b5470ccbb7d270", "score": "0.5549433", "text": "def has_range(self):\n return self.min_value and self.max_value", "title": "" }, { "docid": "736428b7e79cbdd31661ff45367ee56a", "score": "0.5549011", "text": "def within_bounds(self, point):\n return all([l <= x < u for x, (l, u) in zip(point, self.ranges)])", "title": "" }, { "docid": "bf5c0596784c3f56eac6af70e772d39b", "score": "0.552655", "text": "def outOfRange(chr, start, end, region):\n if region['chr'] != chr:\n return True\n elif (start < region['start']) or (start > region['end']):\n return True\n elif (end <= region['start']) or (end > region['end']):\n return True\n return False", "title": "" }, { "docid": "aa7e8ed6c81bd510aa973617365c3743", "score": "0.5520401", "text": "def _test_range(self, value, row, column):\n result = True\n try:\n if self.minValue is not None:\n if float(value) < float(self.minValue):\n result = self._report_error(\"Value is less than \" + str(self.minValue), value, row, column)\n if self.maxValue is not None:\n if float(value) > float(self.maxValue):\n result = self._report_error(\"Value is great than \" + str(self.maxValue), value, row, column)\n except ValueError:\n result = False\n return result", "title": "" }, { "docid": "045d36ad1e492599fed2fda475c077bd", "score": "0.5491448", "text": "def test_range_for_string_in_utterance(self):\r\n\r\n word = \"an\"\r\n utterance =\"This is an another example, and an annotation\"\r\n\r\n start_at_pos = 20\r\n\r\n range = self.annotationtree._range_for_string_in_utterance(word, utterance, start_at_pos)\r\n\r\n assert(range == (32,34))", "title": "" }, { "docid": "b4fc0febe844ca8a1f149e554984d39a", "score": "0.5481067", "text": "def is_latlong(instr):\n return (re.match(r'^\\s*[0-9.+-]+\\s*,\\s*[0-9.+-]+\\s*$', instr) != None)", "title": "" }, { "docid": "0ca23b8e984bc9ce7abdb2b18b97efa0", "score": "0.54791814", "text": "def has_cjk(self):\n cjk_codepoint_ranges = [\n (4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),\n (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]\n for char in self.word:\n if any([start <= ord(char) <= end\n for start, end in cjk_codepoint_ranges]):\n return True\n return False", "title": "" }, { "docid": "cec9e451416d4355568ba43c5423c275", "score": "0.54748535", "text": "def in_bounds(self, coord):\n (x,y) = coord\n return 0 <= x < self.width and 0 <= y < self.height", "title": "" }, { "docid": "54e2df6eb6f9f7aeb769358dc2b010e5", "score": "0.5458721", "text": "def _is_bounds(self):\n key = self.data.name\n coords = self.data.coords\n sentinel = object()\n return any(key == da.attrs.get('bounds', sentinel) for da in coords.values())", "title": "" }, { "docid": "0637e2d0066837720dc6d1bae3eaa778", "score": "0.54300165", "text": "def inside_bounding_box(bb_minlat, bb_minlon, bb_maxlat, bb_maxlon, start_lat, start_lon, end_lat, end_lon):\n return (bb_minlat <= start_lat <= bb_maxlat and bb_minlon <= start_lon <= bb_maxlon) and \\\n (bb_minlat <= end_lat <= bb_maxlat and bb_minlon <= end_lon <= bb_maxlon)", "title": "" }, { "docid": "0d5e5b29a4d49a9d914e5c3a5223def8", "score": "0.5409327", "text": "def is_simple_range(string):\n return re.match('(\\d{4}-\\d{2}-\\d{2}|\\d{4}-\\d{2}|\\d{4})' \\\n '( \\(.{2,}\\))?' \\\n ' ?- ?' \\\n '(\\d{4}-\\d{2}-\\d{2}|\\d{4}-\\d{2}|\\d{4})', string)", "title": "" }, { "docid": "e01c599533c4c3a5628c8b5f02b5999a", "score": "0.54060817", "text": "def within_range(num, min, max):\r\n return num >= min and num <= max", "title": "" }, { "docid": "e01c599533c4c3a5628c8b5f02b5999a", "score": "0.54060817", "text": "def within_range(num, min, max):\r\n return num >= min and num <= max", "title": "" }, { "docid": "b39d0ae3b34133b75e830b05dae2ddd8", "score": "0.54036444", "text": "def coordinates_in_bounds(self, coordinates):\n\n (row, col) = coordinates\n return ((row >= 0 and row < self.height) and\n (col >= 0 and col < self.width))", "title": "" }, { "docid": "7370d714eb3e7333438e52a9126f7e83", "score": "0.5401218", "text": "def check_bounds(self, x, y):\n return 0 <= x < self.map_x and 0 <= y < self.map_y", "title": "" }, { "docid": "1e31eb83e6b77e77d3ee54adc7aed287", "score": "0.53987", "text": "def test_coordinated_range(self):\n syllabifier = indian_syllabifier.Syllabifier('hindi')\n current = syllabifier.get_offset('न', 'hi')\n current1 = syllabifier.in_coordinated_range_offset(current)\n self.assertTrue(current1)", "title": "" }, { "docid": "9160c431e26beedb46f50b9897c0d8e5", "score": "0.53883976", "text": "def in_bounds(self, x: int, y: int) -> bool:\n return 0 <= x < self.width and 0 <= y < self.height", "title": "" }, { "docid": "78d77273b7ba3d2d9c5194ce36647962", "score": "0.5380503", "text": "def is_within_bounds(self, point):\n return (0 <= point[0] < self.width) and (0 <= point[1] < self.height)", "title": "" }, { "docid": "2823b18c2c4186d1fbeb99fb17fc2b9d", "score": "0.53566855", "text": "def cotain_chinese(text):\n for char in text:\n if '\\u4e00' <= char <= '\\u9fa5':\n return True\n return False", "title": "" }, { "docid": "693f69edcb31f25b9d330d029a51e6fd", "score": "0.5346687", "text": "def in_region(pos, b, e):\n return b <= pos and pos <= e", "title": "" }, { "docid": "6038a84e77b837b7f976795bdfec1335", "score": "0.5341024", "text": "def _is_in_region(self, box):\n (o_xmin, o_ymin, o_xmax, o_ymax) = (box[0], box[1], box[2], box[3])\n (r_xmin, r_ymin, r_xmax, r_ymax) = (self._region[0], self._region[1],\n self._region[2], self._region[3])\n overlap_region = max(0.0, min(o_xmax, r_xmax) - max(o_xmin, r_xmin)) \\\n * max(0.0, min(o_ymax, r_ymax) - max(o_ymin, r_ymin))\n return overlap_region > 0.0", "title": "" }, { "docid": "9f5d61dd4f850ad919f84c351c663a1b", "score": "0.53352785", "text": "def canTransform(self, start: str, end: str) -> bool:\n if len(start) != len(end):\n return False\n start_list = [(i, s) for i, s in enumerate(start) if s == 'L' or s == 'R']\n end_list = [(j, e) for j, e in enumerate(end) if e == 'L' or e == 'R']\n if len(start_list) != len(end_list):\n return False\n \n for start_ele, end_ele in zip(start_list, end_list):\n i, s = start_ele\n j, e = end_ele\n if s != e:\n return False\n if s == 'L':\n if i < j:\n return False\n if s == 'R':\n if i > j:\n return False\n return True", "title": "" }, { "docid": "e7dc756a57b1544b013861ab443c6a7e", "score": "0.5333804", "text": "def ComputeStringBounds(self, string, p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "title": "" }, { "docid": "8cda903e5dca0bb175cd8b3821506ac3", "score": "0.5324436", "text": "def in_bounds(self, row, col):\n # first check row\n if row < 0 or row >= self.NUM_ROWS:\n return False\n # next check col\n if col < 0 or col >= self.NUM_COLS:\n return False\n return True # Is in bounds ", "title": "" }, { "docid": "c63460c49b616334187aa73314bb14c9", "score": "0.53196305", "text": "def contains(self, extent, res=None):\n # test raw bounds\n if (not extent.srs.IsSame(self.srs) or\n extent.xMin < self.xMin or extent.yMin < self.yMin or\n extent.xMax > self.xMax or extent.yMax > self.yMax):\n return False\n\n if (res):\n # unpack resolution\n try:\n dx, dy = res\n except:\n dx, dy = res, res\n\n # Test for factor of resolutions\n thresh = dx / 1000\n if ((extent.xMin - self.xMin) % dx > thresh or\n (extent.yMin - self.yMin) % dy > thresh or\n (self.xMax - extent.xMax) % dx > thresh or\n (self.yMax - extent.yMax) % dy > thresh):\n return False\n return True", "title": "" }, { "docid": "10af8ed7d1b17356dff01355ae00e9f5", "score": "0.5318626", "text": "def is_in_map(self, pos, offset=None):\n if offset:\n x, y = pos[0] + offset[0], pos[1] + offset[1]\n else:\n x, y = pos\n return x in xrange(0, self.height) and y in xrange(0, self.width)", "title": "" }, { "docid": "4697ced035ebdd993535b7f8efe94d3e", "score": "0.5310327", "text": "def has_bounds(self):\n return False", "title": "" }, { "docid": "ba62c29c02a9101c2ddef17fd479614e", "score": "0.52915573", "text": "def in_bounds(self, x: int, y: int) -> bool: # This is broken\n return self.buffer_x <= x < self.buffer_x + self.map_width and self.buffer_y <= y < self.buffer_y + \\\n self.map_height", "title": "" }, { "docid": "369c63d1aac99523d0b7d0281d3d95c4", "score": "0.52889496", "text": "def in_range(self, other):\n # TODO: this could be better optimized.\n d = math.sqrt( math.pow(self.get('x') - other.get('x'), 2) + math.pow(self.get('y') - other.get('y'), 2) )\n return d <= self.get('scanning_range')", "title": "" }, { "docid": "eafbdb12037c74e6a260ab118f540c46", "score": "0.5280717", "text": "def inRectangle(p):\n (x, y, z) = p\n return (x >= X_RCT_MIN and x <= X_RCT_MAX and\n y >= Y_RCT_MIN and y <= Y_RCT_MAX)", "title": "" }, { "docid": "540a81293907db883201e62651dc3fb2", "score": "0.52619135", "text": "def is_in_boundaries(self, x, y):\n if (x >= 0) and (y >= 0) and (x < self.width) and (y < self.height):\n return True\n return False", "title": "" }, { "docid": "26c6258218aa7b94e2832d86d10125f5", "score": "0.5258319", "text": "def is_within_bounds(self, x: int, y: int) -> bool:\r\n\r\n return (0 <= x < len(self.grid)) and (0 <= y < len(self.grid[0]))", "title": "" }, { "docid": "72a4b77ce7b7d3e157238ab72151219c", "score": "0.5253907", "text": "def in_range(self, user):\n dlat = self.lat - user.lat\n dlng = self.lng - user.lng\n\n a = math.sin(dlat/2)**2 + \\\n math.cos(self.lat)*math.cos(user.lat)*math.sin(dlng/2)**2\n c = 2 * math.asin(math.sqrt(a))\n\n m = 6367000 * c\n rad = min(self.rad, user.rad)\n\n return m <= rad", "title": "" }, { "docid": "e62c2a2201d583e70b4e2ec1b9d236e7", "score": "0.5249093", "text": "def contains(self, x):\n return (x.shape == self.lower.shape\n and (x >= self.lower).all()\n and (x <= self.upper).all())", "title": "" }, { "docid": "6062bb851fc6ac399b10cb3a16171e2a", "score": "0.52453655", "text": "def in_SCAT_box(x, y, low_bound, high_bound, x_max, y_max):\n passing = True\n upper_limit = high_bound(x)\n lower_limit = low_bound(x)\n if x > x_max or y > y_max:\n passing = False\n if x < 0 or y < 0:\n passing = False\n if y > upper_limit:\n passing = False\n if y < lower_limit:\n passing = False\n return passing", "title": "" }, { "docid": "a61d2d474863f10c3e08e40e83295e2f", "score": "0.52450275", "text": "def inrange(key1, key2):\n if '-' in key2:\n minimum = int(key2.split('-')[0])\n maximum = int(key2.split('-')[1])\n else:\n minimum = int(key2)\n maximum = minimum\n if int(key1) >= minimum and int(key1) <= maximum:\n return True\n else:\n return False", "title": "" }, { "docid": "b7ab761098d07768ccb4c903982869cf", "score": "0.5244723", "text": "def isLocationWithinBounds(data, dx0, dy0):\n\n if dx0 > 0 and dy0 > 0:\n if dx0 < (2 * data.width) and dy0 < (2 * data.height):\n return True\n\n return False", "title": "" }, { "docid": "ef3fe3c0abb57993d341c1c31dec950b", "score": "0.5243866", "text": "def in_range(num, min, max):\r\n return num == int(num) and within_range(num, min, max)", "title": "" }, { "docid": "ef3fe3c0abb57993d341c1c31dec950b", "score": "0.5243866", "text": "def in_range(num, min, max):\r\n return num == int(num) and within_range(num, min, max)", "title": "" }, { "docid": "246c138e427fb170cbfbfe5f62240a28", "score": "0.52321225", "text": "def in_target_region(x):\n if x[0] > 0.5 and x[1] > 0.5:\n return True\n else:\n return False", "title": "" }, { "docid": "d51f9dbfd11fef1fc69e34abea04a898", "score": "0.52312547", "text": "def in_bounds(self, point):\n row,col = point\n return ((row>0 and row<self.height) and\n (col>0 and col<self.width))", "title": "" }, { "docid": "993444a05f4699677159310fd19394d8", "score": "0.5230643", "text": "def limit_range_for_scale(self, vmin, vmax, minpos):\n return (vmin <= -1.0 and minpos or vmin,\n vmax <= -1.0 and minpos or vmax)", "title": "" }, { "docid": "dfc63346e03b7bb9e3351df9a0e4ad6c", "score": "0.5218196", "text": "def contains(self, val):\n return (self.lower_cut < val and\n self.upper_cut > val)", "title": "" }, { "docid": "8e2440b7651eef3623f51f26af47cbbb", "score": "0.5215579", "text": "def isValidCell(x, y, mapWidth, mapHeight):\n return 0 <= x < mapWidth and 0 <= y < mapHeight", "title": "" }, { "docid": "b3599b8f36567fc4d38bd06116ea491b", "score": "0.5214336", "text": "def in_bounds(self, id):\n (x, y) = id\n return 0 <= x < self.width and 0 <= y < self.height", "title": "" }, { "docid": "c5a42df6b31261978959078682a6a59b", "score": "0.5212816", "text": "def in_bounds(self, node):\n return 0 <= node.x < self.width and 0 <= node.y < self.height", "title": "" }, { "docid": "6d98e91bd40a7f397f6dfd870b309593", "score": "0.52060866", "text": "def region_test(row):\n\n return row[-2] != \"0\" and float(row[-1]) < 60 and float(row[-1]) > 0", "title": "" }, { "docid": "fbbbcaaaba69b866c8917fe7961cd49a", "score": "0.52001405", "text": "def is_in_bounds(tile, world_width, world_height) -> bool:\n return (0 <= tile[0] < world_width) and (0 <= tile[1] < world_height)", "title": "" }, { "docid": "3e88cdc57f086125b7cb099e8fa62944", "score": "0.51996547", "text": "def check_for_scene(string: str) -> bool:\n\n if re.search(\"^[0-9]\", string.lower()) and\\\n re.search(\"[a-z]$\", string.lower()) and\\\n len(string) <= 5 and len(string) > 2:\n return True\n if string.isdigit() and int(string) >= 30:\n return True\n return False", "title": "" }, { "docid": "1230c2d8f11bde3e50f38b90cfb2560a", "score": "0.5186131", "text": "def __isInBounds(self, c: int, r: int) -> bool:\n if c < self.col and c >= 0 and \\\n r < self.row and r >= 0:\n return True\n return False", "title": "" }, { "docid": "d68b469d8627e3cc5a33f0b81a7f9408", "score": "0.5184509", "text": "def zoom_is_valid(zoom):\n if zoom not in ['minute', 'hour', '6hours', 'day',\n 'week', 'month', 'year']:\n raise InvalidParameterError(\"S3Base:_validate_zoom:%s\" % zoom)\n return True", "title": "" }, { "docid": "f2c7105b0a69bbcaac6923121cb827dd", "score": "0.5184504", "text": "def is_inside_region(self, region):\n return region.point_inside(self.lat, self.lng)", "title": "" }, { "docid": "d9a4f8d54043a4a7d3b365a8b9ff977e", "score": "0.5182251", "text": "def verifyRanges(loRange,hiRange):\n # FUNCION REQ 1, REQ 3, REQ4\n correct = False\n if (loRange <= hiRange) and loRange>=0 and hiRange>=0:\n correct = True\n return correct", "title": "" }, { "docid": "e6f8ff763ab72f1ee10d40f8ee103348", "score": "0.5181961", "text": "def coordinates_within_limits(self, _x_pos: int, _y_pos: int) -> bool:\n True", "title": "" }, { "docid": "7cd0d0564d62b4be67b6ff8f6b705277", "score": "0.5180542", "text": "def inBounds(self, point):\n if 2 * len(point) != len(self):\n raise ValueError('Invalid number of dimensions: {}'.format(point))\n\n for ax, coord in enumerate(point):\n if coord < self.getLo(ax) or coord > self.getHi(ax):\n return False\n\n return True", "title": "" }, { "docid": "abe97b9f401b343fc27d9f8f669b297e", "score": "0.5179511", "text": "def in_bounds(self, location):\n return self.__in_bounds_impl(location)", "title": "" }, { "docid": "56b68fba9ff57270f60db7563db2d071", "score": "0.51792383", "text": "def is_cell(self, cell):\n row, col = eval(cell)\n return 0 <= row < self.nbr_rows and 0 <= col < self.nbr_columns", "title": "" }, { "docid": "bbb89538d2c457d087d23fe81188a683", "score": "0.517915", "text": "def in_threshold_range(mix: Mix, min: numpy.array, max: numpy.array) -> bool:\r\n min_result = numpy.subtract(mix.threshold, min)\r\n # check all elements, if < 0 then return false\r\n assert isinstance(min_result, numpy.array)\r\n if numpy.any(min_result[:, 0] < 0):\r\n return False\r\n max_result = numpy.subtract(mix.threshold, max) \r\n assert isinstance(max_result, numpy.array)\r\n if numpy.any(max_result[:, 0] > 0):\r\n return False\r\n # check all elements, if > 0 then return false\r\n return True", "title": "" }, { "docid": "903d9b4f49927f5aa25281b4acc88310", "score": "0.5174591", "text": "def outside_bounding_box(self, long: float, lat: float) -> bool:\n if long < self.minlong or long > self.maxlong or lat < self.minlat or lat > self.maxlat:\n return True\n else:\n return False", "title": "" }, { "docid": "00ecc144bbc6a5b496df2ea104b0a585", "score": "0.5170589", "text": "def is_within_bounds(tile, ra_min, ra_max, dec_min, dec_max, \n compute_bounds_forcoords=True):\n if compute_bounds_forcoords:\n ra_min, ra_max, dec_min, dec_max = compute_bounds(ra_min, ra_max,\n dec_min, dec_max)\n\n within_ra = (tile.ra >= ra_min) and (tile.ra <= ra_max)\n # Special case for ra_min < 0\n if ra_min < 0.:\n within_ra = within_ra or (tile.ra - 360. >= ra_min)\n\n within_dec = (tile.dec >= dec_min) and (tile.dec <= dec_max)\n\n # print 'Individual RA, dec: %s, %s' % (str(within_ra), str(within_dec))\n within_bounds = within_ra and within_dec\n return within_bounds", "title": "" }, { "docid": "405176b52f26304df6575d1986dc5754", "score": "0.51607454", "text": "def is_inbounds(indices):\n col, row = indices\n return 0 <= col <= 7 and 0 <= row <= 7", "title": "" }, { "docid": "4cd4a641563ef0d11b4f4948eaa0b725", "score": "0.5159621", "text": "def is_valid_range(self) -> bool:\n\n return self.is_valid_v4_range() or self.is_valid_v6_range()", "title": "" }, { "docid": "b908740f12d61b8b98d3268c08409d36", "score": "0.5144613", "text": "def in_range(self, bot):\n # TODO: this could be better optimized.\n d = math.sqrt( math.pow(self.x - bot.get('x'), 2) + math.pow(self.y - bot.get('y'), 2) )\n return d <= (bot.get('scanning_range') + self.r)", "title": "" }, { "docid": "ca6d44b971aa1a54e7a400e2c3e4e008", "score": "0.5143345", "text": "def encloses(self, other):\n if not isinstance(other, Range):\n raise ValueError(\"Range required\")\n return ((self.lower_cut <= other.lower_cut) and\n (self.upper_cut >= other.upper_cut))", "title": "" }, { "docid": "b65cf0aef4e9f16ce0c6a249028caf00", "score": "0.51421875", "text": "def ROIIsRange(*args):\n return _DigitalMicrograph.ROIIsRange(*args)", "title": "" }, { "docid": "a1161c7db9888c3be0a95ed18d0877ac", "score": "0.5139778", "text": "def check_range(self, low, high):\n last_value = self.get_last_value()\n if not (low <= last_value <= high):\n raise KeyplusParseError(\n \"In {}, expected '{}' to be in the range [{},{}], but got '{}'.\"\n .format(\n self.get_current_path(),\n self.last_field,\n low, high,\n last_value\n )\n )", "title": "" }, { "docid": "7e25233f445ad4d25867ebc0fd20c693", "score": "0.51247764", "text": "def box_in_range(self,x,y,z,d, x_range, y_range, z_range, d_range):\n return np.logical_and.reduce((\n x > x_range[0], x < x_range[1],\n y > y_range[0], y < y_range[1],\n z > z_range[0], z < z_range[1],\n d > d_range[0], d < d_range[1]))", "title": "" }, { "docid": "9dcebb6facd51998c031a291da5a35f6", "score": "0.5123197", "text": "def _is_range(r):\n # TODO: prange check goes here\n return (\n isinstance(r, Tuple)\n and (len(r) == 3)\n and (not isinstance(r.args[1], str)) and r.args[1].is_number\n and (not isinstance(r.args[2], str)) and r.args[2].is_number\n )", "title": "" }, { "docid": "4c136c90afa3d135c72e953a0d4f4835", "score": "0.51216143", "text": "def bounds_str(self):\n l, b, r, t = self.extent\n sw = [b, l]\n ne = [t, r]\n return str([sw, ne])", "title": "" }, { "docid": "4529bb8e666da417a9eb799d13c27c9f", "score": "0.5106072", "text": "def vcheck(self, name, vmin, vmax):\n v = float(self[name][0])\n return (v >= vmin and v <= vmax)", "title": "" }, { "docid": "b9240866eb0e8f30fe78fb9534866017", "score": "0.51016176", "text": "def valid_lat(lat):\n if isinstance(lat, np.ndarray):\n if np.any(lat > 90) or np.any(lat < -90):\n return False\n return True\n else:\n return -90 <= lat and lat <= 90", "title": "" }, { "docid": "098a69880ac3374aa57b5944352fc892", "score": "0.51000047", "text": "def intersected_by(self, span):\n if isinstance(span, TextSpan):\n return span.stop > self.start and span.start < self.stop\n elif isinstance(span, tuple):\n start, stop = span\n return stop > self.start and start < self.stop\n else:\n raise NotImplementedError(\"Unknown span type: %s\" % repr(span))", "title": "" }, { "docid": "8d1c5d332d762178fc98128c3bd472cd", "score": "0.5077919", "text": "def _within_bounds(array: np.ndarray, low_bound) -> bool:\n return (array[0] >= low_bound and array[-1] <= abs(low_bound))", "title": "" }, { "docid": "362bff8231692640be2d66848fa58593", "score": "0.50746554", "text": "def is_geo(self):\n return self._dfs.Projection.WKTString == \"LONG/LAT\"", "title": "" }, { "docid": "ae5eb2e84f99631a7b924244fa8bcaf8", "score": "0.5071731", "text": "def within_bounds(p):\n r, c = p\n h, w = semseg.shape[:2]\n return all([r >= 0, r < h, c >= 0, c < w])", "title": "" }, { "docid": "4fbcc0198a9824c9631bd34085251145", "score": "0.50659174", "text": "def _in_map(self, position: Tuple[int]):\n\n # Makes sure that it's less than height/width and more than 0\n valid_x = (position[0] < self.img.width and position[0] >= 0)\n valid_y = (position[1] < self.img.height and position[1] >= 0)\n\n return valid_x and valid_y", "title": "" }, { "docid": "5a9e9c3dc51e17d322dd8c36694f83e8", "score": "0.50633335", "text": "def __contains__(self, address):\n return self.BaseAddress <= address < (self.BaseAddress + self.RegionSize)", "title": "" }, { "docid": "63de2992f55255567e8a925e38ab15e7", "score": "0.5059907", "text": "def check_region(binary, zoom=1.0):\n if np.prod(binary.shape)==0: return \"image dimensions are zero\"\n if len(binary.shape)==3: return \"image is not monochrome %s\"%(binary.shape,)\n if np.amax(binary)==np.amin(binary): return \"image is blank\"\n if np.mean(binary)<np.median(binary): return \"image may be inverted\"\n h,w = binary.shape\n if h<45/zoom: return \"image not tall enough for a region image %s\"%(binary.shape,)\n if h>5000/zoom: return \"image too tall for a region image %s\"%(binary.shape,)\n if w<100/zoom: return \"image too narrow for a region image %s\"%(binary.shape,)\n if w>5000/zoom: return \"image too wide for a region image %s\"%(binary.shape,)\n # zoom factor (DPI relative) and 4 (against fragmentation from binarization)\n slots = int(w*h*1.0/(30*30)*zoom*zoom) * 4\n _,ncomps = measurements.label(binary)\n if ncomps<5: return \"too few connected components for a region image (got %d)\"%(ncomps,)\n if ncomps>slots and ncomps>10: return \"too many connected components for a region image (%d > %d)\"%(ncomps,slots)\n return None", "title": "" }, { "docid": "69d82075054172d57138c27e13272706", "score": "0.5050263", "text": "def matches_range(self, response: str) -> bool:\n try:\n return GameAimMenu.MIN_ANGLE <= int(response) <= GameAimMenu.MAX_ANGLE\n except Exception:\n return False", "title": "" }, { "docid": "ef953f55af9f541d16031b9725314564", "score": "0.5050174", "text": "def at_span(self, sx0, sy0, sx1):\n for name, x0, y0, x1, y1 in self.tiles:\n if y0 <= sy0 < y1:\n if not (x1 <= sx0 or sx1 <= x0):\n return name\n return None", "title": "" }, { "docid": "2217b7f2e8e57105e72a3f44a682602d", "score": "0.50486785", "text": "def _is_time_in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "title": "" }, { "docid": "7ab06b2e6394878fba0a299ad62c8785", "score": "0.50222707", "text": "def check_extent(user_location, extent, extent_5k):\n extend = False\n if extent.contains(user_location):\n pass\n else:\n print('You are not within the islands extent\\n\\nexiting application...')\n exit() # stop application if user is outside box extent\n if not extent_5k.contains(user_location):\n # We must extend the region\n extend = True\n on_land(user_location)\n return extend", "title": "" }, { "docid": "81bd019eee74317981bd64979321ecdf", "score": "0.5021724", "text": "def is_inside(g):\n if g is None:\n return False\n b = line_r.begin()\n # the region, which should contain the selection\n reg = g.regs[0]\n return reg[0] <= sel.begin() - b and sel.end() - b <= reg[1]", "title": "" }, { "docid": "f7644b141549a4d9c4a6b85f2b175b7d", "score": "0.50179046", "text": "def is_chinese(c):\n return c >= u'\\u4e00' and c <= u'\\u9fa5'", "title": "" }, { "docid": "a6d48a4cfcb016b65f61e5e6229cc9ba", "score": "0.50174814", "text": "def in_boundaries(lat, lon):\n east_boundary = float(lon) < -125\n west_boundary = float(lon) > 10\n south_boundary = float(lat) < 20\n return east_boundary or west_boundary or south_boundary", "title": "" }, { "docid": "2b4f709497378931d93a65d8d2587c52", "score": "0.5007033", "text": "def test_check_lat_extents(self):\n\n # create dataset using MockDataset, give it lat/lon dimensions\n ds = MockTimeSeries()\n ds.variables[\"lat\"][:] = np.linspace(\n -135.0,\n -130.0,\n num=500,\n ) # arbitrary, but matches time dim size\n\n # test no values, expect failure\n result = self.acdd.check_lat_extents(ds)\n self.assert_result_is_bad(result)\n\n # give integer geospatial_lat_max/min, test\n ds.setncattr(\"geospatial_lat_min\", -135)\n ds.setncattr(\"geospatial_lat_max\", -130)\n\n result = self.acdd.check_lat_extents(ds)\n self.assert_result_is_good(result)\n\n # give float geospatial_lat_min/max, test\n ds.setncattr(\"geospatial_lat_min\", -135.0)\n ds.setncattr(\"geospatial_lat_max\", -130.0)\n\n result = self.acdd.check_lat_extents(ds)\n self.assert_result_is_good(result)\n\n # give string (in number-form), test\n ds.setncattr(\"geospatial_lat_min\", \"-135.\")\n ds.setncattr(\"geospatial_lat_max\", \"-130.\")\n\n result = self.acdd.check_lat_extents(ds)\n self.assert_result_is_good(result)\n\n # give garbage string -- expect failure\n ds.setncattr(\"geospatial_lat_min\", \"bad\")\n ds.setncattr(\"geospatial_lat_max\", \"val\")\n\n result = self.acdd.check_lat_extents(ds)\n self.assert_result_is_bad(result)", "title": "" }, { "docid": "97245748eaa9461c9e8946cff911012a", "score": "0.50052357", "text": "def contains(self, p):\n return p.x() >= self.xmin and p.x() <= self.xmax and \\\n p.y() >= self.ymin and p.y() <= self.ymax", "title": "" } ]
5c183dd391f4ef6a5586df5028e5cc42
Annotate the plot with spectra line identifications.
[ { "docid": "1eb1271c99282ddc0be6fe229f840681", "score": "0.623306", "text": "def annotate_lines(self, lines):\n # lines is list of tuple (obswlen, name)\n (xlow, xhigh) = self.axplot.get_xlim()\n (_, yhigh) = self.axplot.get_ylim()\n ypos = yhigh * 0.8\n ydelta = yhigh * 0.05\n i = 0\n for line in lines:\n if xlow < line[0] < xhigh:\n self.axplot.text(line[0], ypos-(i*ydelta)-ydelta*1.25, '|',\n horizontalalignment='center',\n verticalalignment='center', fontsize=10)\n self.axplot.text(line[0], ypos-(i*ydelta), line[1],\n horizontalalignment='center',\n verticalalignment='center', fontsize=10)\n i += 1\n self.fig.canvas.draw()\n\n return", "title": "" } ]
[ { "docid": "517a5732970aff8a8cc25ea11fa467ec", "score": "0.64772844", "text": "def plt_spec_lines():\n\n for i in range(0, Molecule.species_count):\n mid_line = (Molecule.right_endpt[i] + Molecule.left_endpt[i]) / 2\n shift1 = Molecule.energy[i] - PlotParameter.energy_vshift\n shift2 = Molecule.energy[i] + PlotParameter.name_vshift\n\n en = '{0:5.2f}'.format(Molecule.energy[i])\n\n plt.plot([Molecule.left_endpt[i], Molecule.right_endpt[i]], [Molecule.energy[i], Molecule.energy[i]],\n color=PlotParameter.species_line_color, lw=PlotParameter.species_line_width, linestyle='-')\n plt.text(mid_line, shift1, en, weight='bold', horizontalalignment='center',\n fontsize=PlotParameter.energy_font_size, color='black')\n plt.text(mid_line, shift2, Molecule.name[i], weight='bold', horizontalalignment='center',\n fontsize=PlotParameter.name_font_size, color='black')", "title": "" }, { "docid": "163a87cc52ab3347ff2a77d216621729", "score": "0.61922896", "text": "def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )", "title": "" }, { "docid": "78d0db685663cdb603c6ee568c75152e", "score": "0.61453223", "text": "def add_line_SFR_obs(line,L_line,ax,plot_fit=True,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # --- Observations compiled in Observations.ipynb ---\n\n L_obs = np.array([])\n SFR_obs = np.array([])\n\n if p.plot: print('\\nObserved galaxies with %s:' % line)\n\n c = 'dimgrey'\n a = 0.8\n mew = 1\n\n # Kamenetzky et al. 2016\n df = pd.read_pickle('data/observations/AHIMSA_sample_lit')\n df = df[(df.sizes < 47) & (df.SFR > 1e-4) & (df[line+ '_Lsun'].values > 0)] \n try:\n if p.plot: ax.plot(np.log10(df.SFR[(df.sizes < 47) & (df.SFR > 1e-4)]),\\\n np.log10(df[line + '_Lsun'][(df.sizes < 47) & (df.SFR > 1e-4)]),'>',ms=6,fillstyle='none',mew=mew,\\\n color=c,alpha=a,label='Mixed type galaxies [Kamenetzky+16]')\n lo_err = np.array(np.log10(df[line+ '_Lsun'].values)-np.log10(df[line+ '_Lsun'].values-df['e_'+line+ '_Lsun'].values))\n up_err = np.array(np.log10(df[line+ '_Lsun'].values+df['e_'+line+ '_Lsun'].values)-np.log10(df[line+ '_Lsun'].values))\n lo_err[df[line+ '_Lsun'].values == 0] = 0\n up_err[df[line+ '_Lsun'].values == 0] = 0\n # ax.errorbar(np.log10(df.SFR),\\\n # np.log10(df[line+ '_Lsun']),\\\n # yerr=np.column_stack([lo_err,up_err]).T,\\\n # elinewidth=1,marker='>',ms=6,mew=1,fillstyle='none',\\\n # color='grey',alpha=0.8,lw=0,label='Mixed z~0 sample [Kamenetzky+16]')\n L_obs = np.append(L_obs,df[line + '_Lsun'].values)\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n if p.plot: print('%i galaxies from Kamenetzky+16 ' % (len(L_obs)))\n except:\n pass\n\n # print('min SFR: ',np.min(df.SFR.values[df.sizes < 47]))\n\n # Brauher et al. 2008\n try:\n df = pd.read_pickle('data/observations/Brauher_2008')\n if p.plot: \n # lo_err = np.array(np.log10(df['L_'+line].values)-np.log10(df['L_'+line].values-df['e_'+line].values))\n # up_err = np.array(np.log10(df['L_'+line].values+df['e_'+line].values)-np.log10(df['L_'+line].values))\n # print(lo_err)\n # print(df['e_'+line].values/df['L_'+line])\n # ax.errorbar(np.log10(df.SFR),np.log10(df['L_'+line]),\\\n # yerr=np.column_stack([lo_err,up_err]).T,\\\n # elinewidth=1,marker='o',ms=7,mew=1,fillstyle='none',\\\n # color='grey',alpha=0.8,lw=0,label='MS/SB galaxies [Brauher+08]')\n ax.plot(np.log10(df.SFR),np.log10(df['L_%s' % line]),'o',fillstyle='none',ms=4,mew=mew,color=c,\\\n alpha=a,label='MS/SB galaxies [Brauher+08]')\n L_ul = np.log10(df['L_%s' % line][df['f_'+line] == -1])\n if len(L_ul) > 0:\n # ax.plot(df.SFR[df['f_'+line] == -1],L_ul,'o',zorder=0,ms=7,mew=1,color='grey',alpha=0.8)\n ax.errorbar(np.log10(df.SFR[df['f_'+line] == -1]),L_ul,capsize=3,color=c,alpha=a,elinewidth=1,\\\n uplims=np.ones(len(L_ul)),\\\n yerr=0.3,lw=0)\n if p.plot: print('%i galaxies from Brauher+08 ' % (len(df)))\n L =df['L_%s' % line].values\n SFR =df['SFR'].values\n L = L[SFR > 0]\n SFR = SFR[SFR > 0]\n L_obs = np.append(L_obs,L)\n SFR_obs = np.append(SFR_obs,SFR)\n # print('min SFR: ',np.min(df.SFR))\n except:\n pass\n\n if p.select != '_MS':\n # Cormier et al. 2015\n try:\n df = pd.read_pickle('data/observations/DGS_Cormier_2015')\n if p.plot: \n # try: \n # lo_err = np.array(np.log10(df['L_'+line].values)-np.log10(df['L_'+line].values-df['e_'+line].values))\n # up_err = np.array(np.log10(df['L_'+line].values+df['e_'+line].values)-np.log10(df['L_'+line].values))\n # ax.errorbar(df.SFR,np.log10(df['L_'+line]),\\\n # yerr=np.column_stack([lo_err,up_err]).T,\\\n # elinewidth=1,marker='x',ms=7,mew=mew,\\\n # color=c,alpha=a,lw=0,label='Dwarf galaxies [Cormier+15]')\n # except:\n ax.plot(df.SFR,np.log10(df['L_%s' % line]),'x',zorder=0,ms=7,mew=mew,color=c,alpha=a,\\\n label='Dwarf galaxies [Cormier+15]')\n L_ul = np.log10(-1.*df['L_'+line][df['L_'+line] < 0])\n if len(L_ul) > 0:\n ax.plot(df.SFR[df['L_'+line] < 0],L_ul,'x',zorder=0,ms=7,mew=mew,color=c,alpha=a)\n ax.errorbar(df.SFR[df['L_'+line] < 0],L_ul,capsize=3,color=c,alpha=a,elinewidth=1,\\\n uplims=np.ones(len(L_ul)),\\\n yerr=0.3,lw=0)\n # np.log10(-1.*L_ul - 10.**(np.log10(-1.*L_ul)-0.3))\n if p.plot: print('%i galaxies from Cormier+15 ' % (len(df)))\n L_obs = np.append(L_obs,df['L_%s' % line].values)\n SFR_obs = np.append(SFR_obs,10.**df.SFR.values)\n except:\n pass\n\n # Schruba et al. 2012\n #try:\n if (line == 'CO(1-0)') | (line == 'CO(2-1)'):\n df = pd.read_pickle('data/observations/Schruba_2012')\n if p.plot: \n if line == 'CO(1-0)': label = 'Mixed type galaxies [Schruba+12]'\n if line == 'CO(2-1)': label = 'Dwarf galaxies [Schruba+12]'\n f_ul = df['f_%s' % line].values\n L = df['L_%s' % line].values\n SFR = df['SFR'].values\n L_obs = np.append(L_obs,L[L > 0])\n SFR_obs = np.append(SFR_obs,SFR[L > 0])\n Z = df['Z'].values\n if line == 'CO(2-1)': \n SFR = SFR[L>0]\n f_ul = f_ul[L>0]\n Z = Z[L>0]\n L = L[L>0]\n print('Schruba min max Z: ',Z.min(),Z.max())\n M_H2 = 1.8e9 * SFR # from S12 paper\n area = np.array([1.33,1.79,1.75,7.74,11.47,12.37,26.69,83.85,12.23,39.40,19.21,7.78,14.75,59.54,31.19,39.19]) # kpc2\n Sigma_M_H2 = M_H2 / (area*1000*1000)\n if p.select == 'Zsfr': \n ax.scatter(np.log10(SFR[L > 0]),np.log10(L[L > 0]),marker='*',zorder=0,facecolors='none',s=30,\\\n linewidth=mew,c=np.log10(Z),alpha=a,label=label,vmin=p.vmin,vmax=p.vmax)\n else:\n ax.scatter(np.log10(SFR[L > 0]),np.log10(L[L > 0]),marker='*',zorder=0,facecolors='none',s=30,\\\n linewidth=mew,c=np.log10(Sigma_M_H2),alpha=a,label=label,vmin=p.vmin,vmax=p.vmax)\n if line == 'CO(1-0)': \n ax.plot(np.log10(SFR[L > 0]),np.log10(L[L > 0]),'*',zorder=0,fillstyle='none',ms=7,mew=mew,color=c,alpha=a,\\\n label=label)\n if len(f_ul) > 0:\n # ax.plot(np.log10(SFR[f_ul == 1]),np.log10(L[f_ul == 1]),'*',zorder=0,fillstyle='none',ms=7,mew=mew,color=c,alpha=a)\n ax.errorbar(np.log10(SFR[f_ul == 1]),np.log10(L[f_ul == 1]),capsize=3,fillstyle='none',color=c,alpha=a,elinewidth=1,\\\n uplims=np.ones(len(L[f_ul == 1])),\\\n yerr=0.3,lw=0)\n if p.plot: print('%i galaxies from Schruba+12 ' % (len(df)))\n #except:\n # pass\n\n # Accurso et al. 2017\n try:\n df = pd.read_pickle('data/observations/xCOLD_GASS_Accurso_2017')\n df = df.loc[np.argwhere(df['L_CO(1-0)'].values > 0).flatten()]\n if p.plot: ax.plot(np.log10(df['SFR']),df['L_%s' % line], 'd', zorder=0,ms=7,fillstyle='none',mew=mew,color=c,alpha=a,label='COLD GASS [Accurso+17]') #c=np.log10(A17['Z']), \n L_obs = np.append(L_obs,10.**df['L_%s' % line].values)\n if p.plot: print('%i galaxies from Accurso+17 ' % (len(df)))\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n except:\n pass\n\n # Vanzi et al. 2009\n if line == 'CO(3-2)':\n df = pd.read_pickle('data/observations/Vanzi_2009')\n df = df.loc[np.argwhere(df['L_CO(3-2)'].values > 0).flatten()]\n if p.plot: ax.plot(np.log10(df['SFR']),np.log10(df['L_%s' % line]), 'D', zorder=0,ms=7,fillstyle='none',mew=mew,\\\n color=c,alpha=a,label='Dwarf galaxies [Vanzi+09]') #c=np.log10(A17['Z']), \n L_obs = np.append(L_obs,df['L_%s' % line].values)\n if p.plot: print('%i galaxies from Vanzi+09 ' % (len(df)))\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n # except:\n # pass\n\n\n # Diaz-Santos et al. 2013\n try:\n df = pd.read_pickle('data/observations/Diaz-Santos_2013')\n if p.plot: ax.plot(np.log10(df.SFR),np.log10(df['L_%s' % line]),'^',ms=6,zorder=0,fillstyle='none',mew=mew,color=c,alpha=a,label='LIRGs [Diaz-Santos+13]')\n if p.plot: print('%i galaxies from Diaz-Santos+17 ' % (len(df)))\n L_obs = np.append(L_obs,df['L_%s' % line].values)\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n # print('min SFR: ',np.min(df.SFR))\n except:\n pass\n # Farrah et al. 2013\n # try:\n # df = pd.read_pickle('data/observations/Farrah_2013')\n # if p.plot: ax.plot(df.SFR,df['L_%s' % line],'<',fillstyle='none',mew=1,color='grey',alpha=0.8,label='Farrah+13 (ULIRGs)')\n # if p.plot: print('%i galaxies from Farrah+13 ' % (len(df)))\n # L_obs = np.append(L_obs,df['L_%s' % line].values)\n # SFR_obs = np.append(SFR_obs,df.SFR.values)\n # except:\n # pass\n # Zhao et al. 2016\n try:\n df = pd.read_pickle('data/observations/Zhao_2016')\n if p.plot: ax.plot(np.log10(df.SFR),np.log10(df['L_%s' % line]),'<',ms=6,fillstyle='none',mew=mew,color=c,alpha=a,label='GOALS (U)LIRGs [Zhao+16]')\n if p.plot: print('%i galaxies from Zhao+16 ' % (len(df)))\n L_obs = np.append(L_obs,df['L_%s' % line].values)\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n # print('min SFR: ',np.min(df.SFR))\n except:\n pass\n\n if line in ['[CII]158','[OI]63','[OIII]88']:\n # De Looze 2014 relation\n if np.min(L_line) == 0 : L_line[L_line == 0] = 1e-30\n if p.plot: print(np.min(np.log10(L_line)),np.max(np.log10(L_line)))\n logL_delooze = np.arange(np.min(np.log10(L_line)) - 3,np.max(np.log10(L_line)) + 3)\n\n if line == '[CII]158':\n logSFR_delooze_DGS = -5.73 + 0.8 * logL_delooze\n logSFR_delooze_SBG = -7.06 + 1.0 * logL_delooze\n\n if line == '[OI]63':\n logSFR_delooze_DGS = -6.23 + 0.91 * logL_delooze\n logSFR_delooze_SBG = -6.05 + 0.89 * logL_delooze\n\n if line == '[OIII]88':\n logSFR_delooze_DGS = -6.71 + 0.92 * logL_delooze\n logSFR_delooze_SBG = -3.89 + 0.69 * logL_delooze\n\n if p.plot: ax.plot(logSFR_delooze_DGS,logL_delooze,'--',color='grey',alpha=0.7,\\\n label='Local dwarf galaxies [de Looze+ 2014]')\n if p.plot: ax.plot(logSFR_delooze_SBG,logL_delooze,':',color='grey',alpha=0.7,\\\n label='Local SB galaxies [de Looze+ 2014]')\n # print(SFR_obs)\n logSFR = np.arange(np.min(np.log10(SFR_obs[SFR_obs > 0])) - 3,np.max(np.log10(SFR_obs[SFR_obs > 0])) + 3)\n # fit = np.polyfit(np.log10(SFR_obs[(L_obs > 0) & (SFR_obs > 0)]),\\\n # np.log10(L_obs[(L_obs > 0) & (SFR_obs > 0)]),1)\n # pfit = np.poly1d(fit)\n # L_fit = 10.**pfit(logSFR)\n\n # Make log-linear fit to SFR-binned luminosities\n SFRs = SFR_obs[(L_obs > 0) & (SFR_obs > 0)]\n Ls = L_obs[(L_obs > 0) & (SFR_obs > 0)]\n SFR_axis = np.linspace(np.log10(SFRs.min()),np.log10(SFRs.max()),20)\n SFR_bins = SFR_axis[0:-1] + (SFR_axis[1]-SFR_axis[0])/2.\n Ls_binned = np.zeros(len(SFR_axis)-1)\n for i in range(len(Ls_binned)):\n Ls1 = Ls[(SFRs >= 10.**SFR_axis[i]) & (SFRs <= 10.**SFR_axis[i+1])]\n Ls_binned[i] = np.mean(np.log10(Ls1))\n SFR_bins = SFR_bins[Ls_binned > 0]\n Ls_binned = Ls_binned[Ls_binned > 0]\n # ax.plot(10.**SFR_bins,10.**Ls_binned,'x',color='orange',mew=3)\n fit = LinearRegression().fit(SFR_bins.reshape(-1, 1),\\\n Ls_binned.reshape(-1, 1))\n L_fit = 10.**fit.predict(logSFR.reshape(-1, 1))\n if p.plot & plot_fit: ax.plot(logSFR,np.log10(L_fit),'--k',lw=1.5,zorder=0)\n\n # print(line)\n # print(np.log10(L_obs[(L_obs > 0) & (SFR_obs > 0)]))\n # print(fit.predict(SFR_obs[(L_obs > 0) & (SFR_obs > 0)].reshape(-1, 1)).flatten())\n\n std = np.std(np.log10(L_obs[(L_obs > 0) & (SFR_obs > 0)]) - \\\n fit.predict(np.log10(SFR_obs[(L_obs > 0) & (SFR_obs > 0)]).reshape(-1, 1)).flatten())\n\n\n # Read literature data from AHIMSA project\n # obsdf = pd.read_pickle(p.d_data+'observations/sample_lit')\n # print(obsdf.keys())\n # print(L_obs)\n # print(SFR_obs)\n\n if not p.plot: \n return(L_obs.flatten(),SFR_obs.flatten(),fit,std)", "title": "" }, { "docid": "7e4797941caab33a6b4fdc9e9cae1c52", "score": "0.61185837", "text": "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "title": "" }, { "docid": "c6be67b42cb233a70d67baec4ed056f6", "score": "0.6094596", "text": "def plot(self):\n\t\tself.plotOfSpect()", "title": "" }, { "docid": "91d1b979db66b5dd31a34e11aa7de662", "score": "0.6028495", "text": "def plot(self):\n\t\tself.plotOfSpect().plot()", "title": "" }, { "docid": "04cae9d82459f500b2eb1d6e349eb77c", "score": "0.59704", "text": "def linePlot(self):\n clf()\n plot(self.x,self.averages)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('line.png')", "title": "" }, { "docid": "c88db06e20a4608fc2df85f6d69eac46", "score": "0.5939987", "text": "def __draw_annotations(self):\n final = self.__resolve_annotation_conflicts(self.annotations)\n\n shrinkB = self.settings.rcParams[\"lines.markersize\"]+self.settings.rcParams[\"lines.markeredgewidth\"]\n for a in final:\n if a.put_circle_around_point:\n self.ax.plot(a.event_point.x, a.event_point.y, marker='o', markeredgecolor=a.color,\n ms=self.settings.rcParams[\"lines.markersize\"]*2.0)\n\n if a.marker is not None:\n self.ax.plot(\n a.marker.x, a.marker.y, markeredgecolor=a.marker.color, marker=a.marker.marker)\n\n self.ax.annotate(a.text, xy=(a.event_point.x, a.event_point.y), xytext=(a.x, a.y),\n weight='bold', color=a.color, va='center', ha='left',\n arrowprops=dict(arrowstyle='-',\n connectionstyle='arc3',\n color=a.color,\n shrinkA=self.settings.otherParams[\"annotation.shrinkA\"],\n shrinkB=shrinkB,\n # search for 'relpos' on https://matplotlib.org/tutorials/text/annotations.html\n relpos=a.relpos,\n linewidth=self.settings.otherParams[\"annotation.line.width\"]))", "title": "" }, { "docid": "94bd3817403c7552af887b32e66b8e4f", "score": "0.5920662", "text": "def create_plot(self, current_segment):\n\n annotations = {}\n visible = []\n data = []\n line_mask_idx = {}\n cont_mask_idx = {}\n\n for seg in range(self.nsegments):\n\n k = len(visible)\n line_mask_idx[seg] = k\n cont_mask_idx[seg] = k + 1\n\n # The order of the plots is chosen by the z order, from low to high\n # Masks should be below the spectra (so they don't hide half of the line)\n # Synthetic on top of observation, because synthetic varies less than observation\n # Annoying I know, but plotly doesn't seem to have good controls for the z order\n # Or Legend order for that matter\n\n if (\n self.mask is not None\n and self.spec is not None\n and self.wave is not None\n ):\n # Line mask\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 1\n )\n\n data += [\n dict(\n x=x,\n y=y,\n fillcolor=fmt[\"LineMask\"][\"facecolor\"],\n fill=\"tozeroy\",\n mode=\"none\",\n name=self.labels.get(\"linemask\", \"Line Mask\"),\n hoverinfo=\"none\",\n legendgroup=2,\n visible=current_segment == seg,\n )\n ]\n visible += [seg]\n\n # Cont mask\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 2\n )\n\n data += [\n dict(\n x=x,\n y=y,\n fillcolor=fmt[\"ContMask\"][\"facecolor\"],\n fill=\"tozeroy\",\n mode=\"none\",\n name=self.labels.get(\"contmask\", \"Continuum Mask\"),\n hoverinfo=\"none\",\n legendgroup=2,\n visible=current_segment == seg,\n )\n ]\n visible += [seg]\n\n if self.spec is not None:\n # Observation\n data += [\n dict(\n x=self.wave[seg],\n y=self.spec[seg],\n line={\"color\": fmt[\"Obs\"][\"color\"]},\n name=self.labels.get(\"obs\", \"Observation\"),\n legendgroup=0,\n visible=current_segment == seg,\n )\n ]\n visible += [seg]\n\n # Synthetic, if available\n if self.smod is not None:\n data += [\n dict(\n x=self.wave[seg],\n y=self.smod[seg],\n name=self.labels.get(\"synth\", \"Synthetic\"),\n line={\"color\": fmt[\"Syn\"][\"color\"]},\n legendgroup=1,\n visible=current_segment == seg,\n )\n ]\n visible += [seg]\n if self.orig is not None:\n data += [\n dict(\n x=self.wave[seg],\n y=self.orig[seg],\n name=self.labels.get(\"orig\", \"Original\"),\n line={\"color\": fmt[\"Orig\"][\"color\"], \"dash\": \"dash\"},\n legendgroup=1,\n visible=current_segment == seg,\n )\n ]\n visible += [seg]\n\n # mark important lines\n if self.lines is not None and len(self.lines) != 0:\n seg_annotations = []\n xlimits = self.wave[seg][[0, -1]]\n xlimits *= 1 - self.vrad[seg] / clight\n lines = (self.lines.wlcent > xlimits[0]) & (\n self.lines.wlcent < xlimits[1]\n )\n lines = self.lines[lines]\n\n # Filter out closely packaged lines of the same species\n # Threshold for the distance between lines\n wlcent, labels = [], []\n threshold = np.diff(xlimits)[0] / 100\n species = np.unique(lines[\"species\"])\n for sp in species:\n sp_lines = lines[\"wlcent\"][lines[\"species\"] == sp]\n diff = np.diff(sp_lines)\n sp_mask = diff < threshold\n if np.any(sp_mask):\n idx = np.where(np.diff(sp_mask))[0]\n idx = [0, *idx, len(sp_mask) + 1]\n idx = np.unique(idx)\n for i, j in zip(idx[:-1], idx[1:]):\n sp_wmid = np.mean(sp_lines[i:j])\n sp_label = f\"{sp} +{j-i-1}\"\n wlcent += [sp_wmid]\n labels += [sp_label]\n else:\n for sp_wmid in sp_lines:\n wlcent += [sp_wmid]\n labels += [sp]\n\n wlcent = np.array(wlcent)\n labels = np.array(labels)\n\n # Keep only the 100 stongest lines for performance\n # if \"depth\" in lines.columns:\n # lines.sort(\"depth\", ascending=False)\n # lines = lines[:20]\n # else:\n # idx = np.random.choice(len(lines), 20, replace=False)\n # lines = lines[idx]\n\n x = wlcent * (1 + self.vrad[seg] / clight)\n if self.smod is not None and len(self.smod[seg]) != 0:\n y = np.interp(x, self.wave[seg], self.smod[seg])\n else:\n y = np.interp(x, self.wave[seg], self.spec[seg])\n\n if self.smod is not None and len(self.smod[seg]) > 0:\n ytop = np.max(self.smod[seg])\n elif self.spec is not None and len(self.spec[seg]) > 0:\n ytop = np.max(self.spec[seg])\n else:\n ytop = 1\n\n for i, line in enumerate(labels):\n seg_annotations += [\n {\n \"x\": x[i],\n \"y\": y[i],\n \"xref\": \"x\",\n \"yref\": \"y\",\n \"text\": f\"{labels[i]}\",\n \"hovertext\": f\"{wlcent[i]}\",\n \"textangle\": 90,\n \"opacity\": 1,\n \"ax\": 0,\n \"ay\": 1.2 * ytop,\n \"ayref\": \"y\",\n \"showarrow\": True,\n \"arrowhead\": 7,\n \"xanchor\": \"left\",\n }\n ]\n annotations[seg] = seg_annotations\n\n self.visible = visible\n self.line_mask_idx = line_mask_idx\n self.cont_mask_idx = cont_mask_idx\n\n return data, annotations", "title": "" }, { "docid": "009369d42c5c1a42548ad8407231c701", "score": "0.5901074", "text": "def _create_line_plot(experiment_param, nus, norms, ax, subtitle):\n for name in sorted(norms):\n errors = [experiment_param[nu][name] for nu in nus]\n ax.plot(nus, errors, label=name)\n\n ax.legend()\n ax.set_xticks(nus[1::2])\n ax.set_xticklabels(nus[1::2])\n ax.set_ylabel('Average error (%)', fontsize=15)\n ax.set_ylim([0,5])\n ax.set_title('Estimating {}\\n'.format(subtitle), fontsize=15)", "title": "" }, { "docid": "e6798199dc58609333e5a13306cda4b1", "score": "0.58649415", "text": "def plotArc(self):\n\n # plot the spectra\n self.spcurve,=self.axes.plot(self.xarr,self.farr,linewidth=0.5,linestyle='-',marker='None',color='b')", "title": "" }, { "docid": "93bf6038b3390cc196597f7d3c005588", "score": "0.5845611", "text": "def plot_Na(self, show=True, **kwargs):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n import matplotlib as mpl\n try: # Nicer view, especially in notebook\n import seaborn as sns; sns.set(context=\"notebook\",font_scale=2)\n except ImportError:\n pass\n mpl.rcParams['font.family'] = 'stixgeneral'\n mpl.rcParams['font.size'] = 15.\n # Check for spec\n gdiline = []\n for iline in self._abslines:\n if isinstance(iline.analy['spec'],Spectrum1D):\n gdiline.append(iline)\n nplt = len(gdiline)\n if nplt == 0:\n print(\"Load spectra into the absline.analy['spec']\")\n return\n atom_cst = (const.m_e.cgs*const.c.cgs / (np.pi * (const.e.esu**2).cgs)).to(u.AA*u.s/(u.km*u.cm**2))\n # Setup plot\n plt.clf()\n ax = plt.gca()\n\n fw_sv = 0.*u.AA\n ymax = 0.\n for qq,iline in enumerate(gdiline):\n # Calculate\n velo = iline.analy['spec'].relative_vel((1+iline.attrib['z'])*iline.wrest)\n cst = atom_cst/(iline.data['f']*iline.wrest) #/ (u.km/u.s) / u.cm * (u.AA/u.cm)\n Na = np.log(1./np.maximum(iline.analy['spec'].flux,\n iline.analy['spec'].sig))*cst\n # Figure out ymnx\n pixmnx = (velo > self.vlim[0]) & (velo < self.vlim[1])\n if iline.data['f']*iline.wrest > fw_sv:\n ymax = max(np.max(Na[pixmnx].value),ymax)\n fw_sv = iline.data['f']*iline.wrest\n # Plot\n ax.plot(velo, Na, '-', linestyle='steps-mid', label=iline.data['name'])\n #ax.plot(velo, iline.analy['spec'].sig, 'r:')\n # Axes\n ax.set_xlim(self.vlim.value)\n ax.set_ylim(-0.2*ymax, 5*ymax)\n #ax.set_ylim(ymnx)\n ax.minorticks_on()\n ax.set_xlabel('Relative Velocity (km/s)')\n ax.set_ylabel(r'Apparent Column (cm$^{-2}$ per km/s)')\n # Legend\n legend = ax.legend(loc='upper left', scatterpoints=1, borderpad=0.3,\n handletextpad=0.3, fontsize='large')\n\n plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)\n if show:\n plt.show()\n plt.close()", "title": "" }, { "docid": "b1ec28c64ef3e2da18a8e5e6cd450e0b", "score": "0.58294", "text": "def add_line_FIR_obs(line,ax,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # --- Observations compiled in Observations.ipynb ---\n\n print('\\nObserved galaxies with %s:' % line)\n # Cormier et al. 2015 (line luminosity compiled in Zanella+08)\n try:\n df = pd.read_pickle('data/observations/Zanella2018_Cormier_2015')\n ax.plot(df.L_IR/1.4,df['L_%s' % line],'o',zorder=0,fillstyle='none',mew=1,color='grey',alpha=0.8,label='Cormier+15 (dwarfs)')\n print('%i galaxies from Cormier+15 ' % (len(df)))\n except:\n pass\n # Diaz-Santos et al. 2013\n try:\n df = pd.read_pickle('data/observations/Diaz-Santos_2013')\n ax.plot(df.L_FIR,df['L_%s' % line],'^',zorder=0,fillstyle='none',mew=1,color='grey',alpha=0.8,label='Diaz-Santos+13 (LIRGs)')\n print('%i galaxies from Diaz-Santos+17 ' % (len(df)))\n except:\n pass\n # Brauher et al. 2008\n try:\n df = pd.read_pickle('data/observations/Brauher_2008')\n ax.plot(df.L_FIR,df['L_%s' % line],'s',fillstyle='none',mew=1,color='grey',alpha=0.8,label='Brauher+08 (MS/SB)')\n print('%i galaxies from Brauher+08 ' % (len(df)))\n L =df['L_%s' % line].values\n F =df['L_FIR'].values\n print(np.min(L[L > 0]))\n print(np.min(F[F > 0]))\n except:\n pass\n # Farrah et al. 2013\n try:\n df = pd.read_pickle('data/observations/Farrah_2013')\n ax.plot(df.L_IR,df['L_%s' % line],'<',fillstyle='none',mew=1,color='grey',alpha=0.8,label='Farrah+13 (ULIRGs)')\n print('%i galaxies from Farrah+13 ' % (len(df)))\n except:\n pass\n # Kamenetzky et al. 2016\n try:\n df = pd.read_pickle('data/observations/AHIMSA_sample_lit')\n print('# of K16 galaxies with major axis < 47 arcsec: ',len(df.log_L_FIR[df.sizes < 47]))\n ax.plot(10.**df.log_L_FIR[df.sizes < 47],df[line + '_Lsun'][df.sizes < 47],'>',fillstyle='none',mew=1,color='grey',alpha=0.8,label='Kamenetzky+16 mixed')\n print('%i galaxies from Kamenetzky+16 ' % (len(df)))\n except:\n pass", "title": "" }, { "docid": "407b3dd6c7ad1413be6e940b586bf81e", "score": "0.5801727", "text": "def stack_plot(self, nrow=6, show=True):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n import matplotlib as mpl\n mpl.rcParams['font.family'] = 'stixgeneral'\n mpl.rcParams['font.size'] = 15.\n # Check for spec\n gdiline = []\n for iline in self._abslines:\n if isinstance(iline.analy['spec'],Spectrum1D):\n gdiline.append(iline)\n nplt = len(gdiline)\n if nplt == 0:\n print(\"Load spectra into the absline.analy['spec']\")\n return\n # Setup plot\n nrow = min(nplt,nrow)\n ncol = nplt // nrow + (nplt % nrow > 0)\n plt.clf()\n gs = gridspec.GridSpec(nrow, ncol)\n ymnx = (-0.1,1.1)\n\n for qq,iline in enumerate(gdiline):\n ax = plt.subplot(gs[qq%nrow, qq//nrow])\n # Plot\n velo = iline.analy['spec'].relative_vel((1+iline.attrib['z'])*iline.wrest)\n ax.plot(velo, iline.analy['spec'].flux, 'k-', linestyle='steps-mid')\n ax.plot(velo, iline.analy['spec'].sig, 'r:')\n # Lines\n ax.plot([0]*2, ymnx, 'g--')\n # Axes\n ax.set_xlim(self.vlim.value)\n ax.set_ylim(ymnx)\n ax.minorticks_on()\n if ((qq+1) % nrow == 0) or ((qq+1) == nplt):\n ax.set_xlabel('Relative Velocity (km/s)')\n else:\n ax.get_xaxis().set_ticks([])\n # Label\n ax.text(0.1, 0.1, iline.data['name'], transform=ax.transAxes, ha='left', va='center', fontsize='x-large')#, bbox={'facecolor':'white'})\n\n plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)\n if show:\n plt.show()\n plt.close()", "title": "" }, { "docid": "182c2b8509fb34c4a761230ad508b566", "score": "0.5790348", "text": "def plot_lines(self):\n self.plot(3)", "title": "" }, { "docid": "00194c5bca5bee05693ae41e11ec7a94", "score": "0.5679626", "text": "def add_line_sSFR_obs(line,L_line,ax,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # --- Observations compiled in Observations.ipynb ---\n\n L_obs = np.array([])\n sSFR_obs = np.array([])\n\n if p.plot: print('\\nObserved galaxies with %s:' % line)\n\n # Cormier et al. 2015 and Madden et al. 2013\n df = pd.read_pickle('data/observations/DGS_Cormier_2015') \n try:\n df = pd.read_pickle('data/observations/DGS_Cormier_2015')\n if p.plot: \n ax.errorbar(10.**df.sSFR,df['L_'+line],yerr=df['e_'+line], elinewidth=1,marker='s',ms=5,mew=0,\\\n color='grey',alpha=0.8,lw=0)\n # ax.plot(10.**df.sSFR,df['L_'+line],'s',ms=5,mew=0,color='grey',alpha=0.8,label='Cormier+15 (dwarfs)')\n L_ul = df['L_'+line][df['L_'+line] < 0]\n if len(L_ul) > 0:\n ax.plot(10.**df.sSFR[df['L_'+line] < 0],L_ul,'s',ms=5,mew=0,color='grey',alpha=0.8)\n ax.errorbar(10.**df.sSFR[df['L_'+line] < 0],L_ul, elinewidth=1,\\\n uplims=np.ones(len(L_ul)),yerr=0.3,color='grey',alpha=0.8,lw=0)\n #-1.*L_ul - 10.**(np.log10(-1.*L_ul)-0.3)\n L_obs = np.append(L_obs,df['L_'+line].values)\n # print(df['L_'+line].values)\n sSFR_obs = np.append(sSFR_obs,df.sSFR.values)\n if p.plot: print('%i galaxies from Cormier+15 with positiv flux' % (len(df['L_'+line].values[df['L_'+line].values > 0])))\n # print('min SFR: ',np.min(df.SFR.values[df.sizes < 47]))\n except:\n pass", "title": "" }, { "docid": "537623470562e558c3bda22eb4f05a66", "score": "0.56345016", "text": "def _draw_line(plot, hori, vert, color, text):\n plot.plot(hori, vert, '-o'+color)\n plot.text(hori[-1]-3, vert[-1]+2, text, color=color)", "title": "" }, { "docid": "40ecb69ee5548aef01bf56525f53f885", "score": "0.56100017", "text": "def plot_trace(self):\n az.plot_trace(self.ifd_)", "title": "" }, { "docid": "219256197eae3b5476dac0f0fcdb74cf", "score": "0.56037825", "text": "def view_annulus(self, ann_ident: int, model: str, figsize: Tuple = (12, 8)):\n # Grabs the relevant spectra using the annular ident\n rel_spec = self.get_spectra(ann_ident)\n # Sets up a matplotlib figure\n plt.figure(figsize=figsize)\n\n # Set the plot up to look nice and professional.\n ax = plt.gca()\n ax.minorticks_on()\n ax.tick_params(axis='both', direction='in', which='both', top=True, right=True)\n\n # Set the title with all relevant information about the spectrum object in it\n plt.title(\"{n} - Annulus {num}\".format(n=self.src_name, num=ann_ident))\n # Boolean flag to check if any spectra have plot data, for the end of this method\n anything_plotted = False\n\n # Set up lists to store the model line and data plot handlers, so legends for fit and data can be put on\n # the same line\n mod_handlers = []\n plot_handlers = []\n # This stores the legend labels\n labels = []\n # Iterate through all matching spectra\n for spec in rel_spec:\n # This grabs the plot data if available\n try:\n all_plot_data = spec.get_plot_data(model)\n anything_plotted = True\n except ModelNotAssociatedError:\n continue\n\n # Gets x data and model data\n plot_x = all_plot_data[\"x\"]\n plot_mod = all_plot_data[\"model\"]\n # These are used as plot limits on the x axis\n lo_en = plot_x.min()\n hi_en = plot_x.max()\n\n # Grabs y data + errors\n plot_y = all_plot_data[\"y\"]\n plot_xerr = all_plot_data[\"x_err\"]\n plot_yerr = all_plot_data[\"y_err\"]\n # Plots the actual data, with errorbars\n cur_plot = plt.errorbar(plot_x, plot_y, xerr=plot_xerr, yerr=plot_yerr, fmt=\"+\",\n label=\"{o}-{i}\".format(o=spec.obs_id, i=spec.instrument), zorder=1)\n # The model line is put on\n cur_mod = plt.plot(plot_x, plot_mod, label=model, linewidth=2, color=cur_plot[0].get_color())[0]\n mod_handlers.append(cur_mod)\n plot_handlers.append(cur_plot)\n labels.append(\"{o}-{i}\".format(o=spec.obs_id, i=spec.instrument))\n\n # Sets up the legend so that matching data point and models are on the same line in the legend\n ax.legend(handles=zip(plot_handlers, mod_handlers), labels=labels,\n handler_map={tuple: legend_handler.HandlerTuple(None)}, loc='best')\n\n # Ensure axis is limited to the chosen energy range\n plt.xlim(lo_en, hi_en)\n\n # Just sets how the figure looks with axis labels\n plt.xlabel(\"Energy [keV]\")\n plt.ylabel(\"Normalised Counts s$^{-1}$ keV$^{-1}$\")\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(ScalarFormatter())\n ax.xaxis.set_minor_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n ax.xaxis.set_major_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n\n plt.tight_layout()\n # Display the spectrum\n\n if anything_plotted:\n plt.show()\n else:\n warnings.warn(\"There are no {m} XSPEC fits associated with this AnnularSpectra, so you can't view \"\n \"it\".format(m=model))\n\n # Wipe the figure\n plt.close(\"all\")", "title": "" }, { "docid": "232630898f055a6a0325cf97e338b403", "score": "0.55994046", "text": "def plot_sine_evaluation(real_samples, fake_samples, idx, identifier):\n ### frequency\n seq_length = len(real_samples[0]) # assumes samples are all the same length\n frate = seq_length\n freqs_hz = np.fft.rfftfreq(seq_length)*frate # this is for labelling the plot\n # TODO, just taking axis 0 for now...\n w_real = np.mean(np.abs(np.fft.rfft(real_samples[:, :, 0])), axis=0)\n w_fake = np.mean(np.abs(np.fft.rfft(fake_samples[:, :, 0])), axis=0)\n ### amplitude\n A_real = np.max(np.abs(real_samples[:, :, 0]), axis=1)\n A_fake = np.max(np.abs(fake_samples[:, :, 0]), axis=1)\n ### now plot\n nrow = 2\n ncol = 2\n fig, axarr = plt.subplots(nrow, ncol, sharex='col', figsize=(6, 6))\n # freq\n axarr[0, 0].vlines(freqs_hz, ymin=np.minimum(np.zeros_like(w_real), w_real), ymax=np.maximum(np.zeros_like(w_real), w_real), color='#30ba50')\n axarr[0, 0].set_title(\"frequency\", fontsize=16)\n axarr[0, 0].set_ylabel(\"real\", fontsize=16)\n axarr[1, 0].vlines(freqs_hz, ymin=np.minimum(np.zeros_like(w_fake), w_fake), ymax=np.maximum(np.zeros_like(w_fake), w_fake), color='#ba4730')\n axarr[1, 0].set_ylabel(\"generated\", fontsize=16)\n # amplitude\n axarr[0, 1].hist(A_real, normed=True, color='#30ba50', bins=30)\n axarr[0, 1].set_title(\"amplitude\", fontsize=16)\n axarr[1, 1].hist(A_fake, normed=True, color='#ba4730', bins=30)\n\n fig.savefig('./experiments/plots/' + identifier + '_eval' + str(idx).zfill(4) +'.png')\n plt.clf()\n plt.close()\n return True", "title": "" }, { "docid": "961eb586dd24817a4c70d35dc48e9e0b", "score": "0.5588844", "text": "def TwoImmedSpectraPlot(Xax, Ydat1, Ydat2, Label1, Label2, xmin, xmax, ymin, ymax,\n XLab, YLab, SupTitle, Title, FileName, currentDate, currentTime, Software_version):\n plt.figure()\n rc('font', size=8, weight='normal')\n plt.plot(Xax, Ydat1, color='r', linestyle='-', linewidth='1.00', label=Label1)\n plt.plot(Xax, Ydat2, color='b', linestyle='-', linewidth='1.00', label=Label2)\n plt.axis([xmin, xmax, ymin, ymax])\n plt.xlabel(XLab)\n plt.ylabel(YLab)\n plt.suptitle(SupTitle, fontsize=9, fontweight='bold')\n plt.title(Title, fontsize=7, x=0.46, y=1.005)\n plt.grid(visible=True, which='both', color='0.00', linestyle='--')\n plt.legend(loc='upper right', fontsize=8)\n plt.text(0.7, 0.03, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=5, transform=plt.gcf().transFigure)\n plt.text(0.03, 0.03, 'Software version: ' + Software_version + ' yerin.serge@gmail.com, IRA NASU',\n fontsize=5, transform=plt.gcf().transFigure)\n pylab.savefig(FileName, bbox_inches='tight', dpi=160)\n plt.close('all')\n return 0", "title": "" }, { "docid": "89866177ca17138401d71dcc919154c1", "score": "0.55823153", "text": "def plot_line(ax, line, wavelength, flux, bandwidth, ray):\n\n ray_filename = str(ray)\n abs_filename = 'abs_' + ray_filename[4:-3] + '.txt'\n\n if not os.path.exists(absorbers_directory + abs_filename):\n\n lambda_0, N, T, absorber_position = get_absorber_chars(ray, line,\n line_list)\n\n else:\n\n lambda_0, N, T, absorber_position = get_absorber_chars_from_file(\n absorbers_directory + abs_filename,\n line)\n\n absorber_region = absorber_region_2Mpc_LG(absorber_position)\n\n velocity, flux = get_line(lambda_0, wavelength=wavelength, flux=flux,\n wavelength_interval=bandwidth)\n\n ax.plot(velocity, flux, label = 'N = {:.2e}\\nT = {:.2e}\\n{}'.format(N, T,\n absorber_region))\n\n #ax.set_xlabel('Velocity [km/s]', fontsize = 15)\n #ax.set_ylabel('Relative Flux', fontsize = 15)\n ax.set_title('{}'.format(line), fontsize = 15)\n ax.set_xlim(-430,430)\n ax.set_ylim(0,1.1)\n ax.legend(loc='best')\n ax.grid(True)", "title": "" }, { "docid": "9312648225eee8561b4062b38263c9ed", "score": "0.55637205", "text": "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "title": "" }, { "docid": "0cdad3fda5d3a2bf360d60778501c7db", "score": "0.55621266", "text": "def plotLine(self):\n minc = 0\n maxc = 500\n num = 500\n levels = np.linspace(minc,maxc,num+1)\n title = textwrap.dedent(\"\"\"\\\n Orography difference between LGM and Modern ICE-5G data\n using {0} meter contour interval\"\"\").format((maxc-minc)/num)\n plt.figure()\n plt.contour(self.difference_in_ice_5g_orography,levels=levels)\n plt.title(title)\n pts.set_ticks_to_zero()\n #if self.save:\n #plt.savefig('something')\n print(\"Line contour plot created\")", "title": "" }, { "docid": "1616600cb72854411da2becba40dd14b", "score": "0.55573034", "text": "def plot(frame, clipped, auto, lag, threshold, freq, save):\n fig, axes = plt.subplots(4, constrained_layout=True)\n fig.set_size_inches(8.0, 8.0)\n fig.canvas.set_window_title('Excercise 4')\n\n ax_frame, ax_clipped, ax_auto, ax_freq = axes\n\n time = np.linspace(0, frame.size / SAMPLE_RATE, num=frame.size)\n for ax in axes:\n ax.set_xlabel('time [s]')\n ax.set_ylabel('y')\n\n\n ax_frame.plot(time, frame)\n ax_clipped.plot(time, clipped)\n\n ax_auto.plot(auto)\n ax_auto.axvline(threshold, color='black', label='Threshold')\n ax_auto.stem([lag[0]], [lag[1]], linefmt='r-', basefmt=None, label='Lag')\n\n ax_freq.plot(freq[0], 'g-', label='mask-on')\n ax_freq.plot(freq[1], 'r-', label='mask-off')\n\n ax_auto.legend(loc=1)\n ax_freq.legend(loc=0)\n\n ax_frame.set_title('Maskon frame')\n ax_clipped.set_title('Central clipping with 70%')\n ax_auto.set_title('Autocorrelation')\n ax_freq.set_title('Primary frequencies of frames')\n\n ax_auto.set_xlabel('frames')\n ax_freq.set_xlabel('frames')\n\n ax_freq.set_ylabel('f0')\n\n if save:\n save_figure(fig, 'ex4')\n else:\n plt.show()", "title": "" }, { "docid": "cb3f10d5d8e2b03538c384065737060e", "score": "0.55428", "text": "def show(self):\n lines, = pylab.plot(self.wavelengths, self.intensities)\n return lines", "title": "" }, { "docid": "f4a12c876681e0acb9c232c46f704c79", "score": "0.5531809", "text": "def animate(i): \n ax1.clear()\n font_dict = {'family':'sans-serif',\n 'color':'darkred',\n 'size':8}\n for i in range(len(xt)):\n ax1.text(xt[i], yt[i], tt[i], fontdict=font_dict)\n ax1.plot(xs, ys)\n #ax1.scatter(xt, yt, 'yo')\n\n # This is for plotting the coordinates and the class of the detected object\n animated_plot = plt.plot(xt, yt, 'yo')[0]\n animated_plot.set_xdata(xt)\n animated_plot.set_ydata(yt)\n plt.draw()", "title": "" }, { "docid": "b3087b755f242faa1ca635309b06037c", "score": "0.55303067", "text": "def add_stat_annot(fig, ax, x_start_list, x_end_list,\n y_start_list=None, y_end_list=None,\n line_height=2, stat_list=['*'],\n text_y_offset=0.2, text_x_offset=-0.01):\n\n if type(x_start_list) is not list:\n x_start_list = [x_start_list]\n\n for x_start, x_end, y_start, y_end, stat in zip(x_start_list, x_end_list,\n y_start_list, y_end_list, stat_list):\n\n if y_start is None:\n y_start = get_axes_object_max(ax, x_loc=x_start, object_type='line') + line_height\n if y_end is None:\n max_at_x_end = get_axes_object_max(ax, x_loc=x_end, object_type='line')\n print(max_at_x_end)\n y_end = max_at_x_end + line_height\n\n y_start_end_max = np.max([y_start, y_end])\n\n sig_line = ax.plot([x_start, x_start, x_end, x_end],\n [y_start, y_start_end_max + line_height, y_start_end_max + line_height, y_end],\n linewidth=1, color='k')\n\n ax.text(x=(x_start + x_end) / 2 + text_x_offset,\n y=y_start_end_max + line_height + text_y_offset,\n s=stat, horizontalalignment='center')\n\n return fig, ax", "title": "" }, { "docid": "d7fb8629d04aa72eace8754d204aba78", "score": "0.5528738", "text": "def plot(self, **kwargs):\n base.plot_homline(self.line, **kwargs)", "title": "" }, { "docid": "6b402ea541c8ff426611d2c5f03d260b", "score": "0.55201197", "text": "def plotSpectrum(self, spectrum=None):\n if spectrum is None:\n spectrum = self.spectrometer.getSpectrum()\n\n if len(self.axes.lines) == 0:\n self.axes.plot(self.spectrometer.wavelength, spectrum, 'k')\n self.axes.set_xlabel(\"Wavelength [nm]\")\n self.axes.set_ylabel(\"Intensity [arb.u]\")\n else:\n self.axes.lines[0].set_data( self.spectrometer.wavelength, spectrum) # set plot data\n self.axes.relim()", "title": "" }, { "docid": "52f98988b144482b1de461e35c280b0c", "score": "0.5514532", "text": "def replot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n line.set_xdata(self.data[i].x)\n for line in self.lines: \n ax.draw_artist(line)", "title": "" }, { "docid": "1073d73a1de7c17d9f917c5d7b9c811b", "score": "0.5507481", "text": "def line_plot(self, x, y, labels, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = None\n ax.plot(x, y, '--o', label=labels[0])\n ax.set_xlabel(labels[1])\n ax.set_ylabel(labels[2])\n ax.set_title(labels[3])\n return fig, ax", "title": "" }, { "docid": "1fe8aaf039f9680792d6a190090b7682", "score": "0.5506677", "text": "def seas_line_plot(df, fwd=None, **kwargs):\n\n fig = go.Figure()\n traces = cptr.seas_plot_traces(df, fwd, **kwargs)\n if \"shaded_range\" in traces and traces[\"shaded_range\"]:\n for trace in traces[\"shaded_range\"]:\n fig.add_trace(trace)\n\n if \"average_line\" in traces:\n fig.add_trace(traces[\"average_line\"])\n\n if \"hist\" in traces:\n for trace in traces[\"hist\"]:\n fig.add_trace(trace)\n\n if \"fwd\" in traces:\n for trace in traces[\"fwd\"]:\n fig.add_trace(trace)\n\n fig.layout.xaxis.tickvals = pd.date_range(\n start=str(dates.curyear), periods=12, freq=\"MS\"\n )\n\n title = cpu.gen_title(df, **kwargs)\n legend = go.layout.Legend(font=dict(size=10), traceorder=\"reversed\")\n yaxis_title = kwargs.get(\"yaxis_title\", None)\n hovermode = kwargs.get(\"hovermode\", \"x\")\n fig.update_layout(\n title=title,\n title_x=0.01,\n xaxis_tickformat=\"%b\",\n yaxis_title=yaxis_title,\n legend=legend,\n hovermode=hovermode,\n margin=preset_margins,\n )\n\n return fig", "title": "" }, { "docid": "9cfb13461b4c504c12398355e409fc9d", "score": "0.55004066", "text": "def line_SFR_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=False,add=True,cb=False)\n\n # Only 1 galaxy\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=True,add=True,cb=False)\n\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/lines_SFR_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "title": "" }, { "docid": "60212bf527b05184d6d8bd8da1543d2b", "score": "0.5496056", "text": "def update_overlaid_plot(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n waveforms = [trigger, trace]\n\n first_peak, second_peak = self.get_windowed_data(waveforms[0], waveforms[1])\n self.overlaid_lines[0].set_ydata(first_peak)\n self.overlaid_lines[0].set_xdata(range(len(first_peak)))\n self.overlaid_lines[1].set_ydata(second_peak)\n self.overlaid_lines[1].set_xdata(range(len(second_peak)))\n\n areas = [integ.simps(first_peak), integ.simps(second_peak)]\n labels = ['%.1f' % areas[0], '%.1f' % areas[1]]\n\n# for area in areas:\n# if area < 0.1:\n# raise RangeError # calculation warning error for example\n self.ax2.legend([self.overlaid_lines[0], self.overlaid_lines[1]],\n labels)\n\n self.draw()", "title": "" }, { "docid": "41d65313251a81d60a2b5f8b748c826f", "score": "0.5488134", "text": "def line_sSFR_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n line_sSFR(line=lines[i],ax=ax,select=p.select,add=True)\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/lines_sSFR_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "title": "" }, { "docid": "62081325732426af5315189f8bad6d0a", "score": "0.54734915", "text": "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "title": "" }, { "docid": "a50810294fe6d4d05215c99f5efd51ae", "score": "0.5456929", "text": "def CALSPECAbsLineIdentificationinPDF(spectra,pointing,all_titles,object_name,dir_top_images,all_filt,date,figname,tagname,NBIMGPERROW=2):\n \n \n NBSPEC=len(spectra)\n \n MAXIMGROW=max(2,int(m.ceil(float(NBSPEC)/float(NBIMGPERROW))))\n \n \n # fig file specif\n NBIMGROWPERPAGE=5 # number of rows per pages\n PageNum=0 # page counter\n \n figfilename=os.path.join(dir_top_images,figname)\n \n pp = PdfPages(figfilename) # create a pdf file\n \n \n titlepage='WL calibrated 1D Spectra 1D for obj : {} date :{}'.format(object_name,date)\n \n \n all_wl= [] # containers for wavelength\n \n \n for index in np.arange(0,NBSPEC):\n \n \n # new pdf page \n if index%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n f, axarr = plt.subplots(NBIMGROWPERPAGE,NBIMGPERROW,figsize=(25,30))\n f.suptitle(titlepage,size=20)\n \n # index of image in the pdf page \n indexcut=index-PageNum*(NBIMGROWPERPAGE*NBIMGPERROW) \n ix=indexcut%NBIMGPERROW\n iy=indexcut/NBIMGPERROW\n \n \n spec = spectra[index]\n \n # calibrate\n grating_name=get_disperser_filtname(all_filt[index])\n X_Size_Pixels=np.arange(spec.shape[0])\n lambdas = Pixel_To_Lambdas(grating_name,X_Size_Pixels,pointing[index],False)\n \n \n all_wl.append(lambdas)\n \n #plot\n axarr[iy,ix].plot(lambdas,spec,'r-',lw=2,label=tagname)\n \n thetitle=\"{} : {} : {} \".format(index,all_titles[index],all_filt[index])\n axarr[iy,ix].set_title(thetitle,color='blue',fontweight='bold',fontsize=16)\n \n \n #axarr[iy,ix].text(600.,spec.max()*1.1, all_filt[index],verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20)\n axarr[iy,ix].legend(loc='best',fontsize=16)\n axarr[iy,ix].set_xlabel('Wavelength [nm]', fontsize=16)\n axarr[iy,ix].grid(True)\n \n YMIN=0.\n YMAX=spec.max()*1.2\n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA or line == HDELTA or line ==O2B or line == O2Y or line == O2Z:\n axarr[iy,ix].plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='red',lw=0.5)\n axarr[iy,ix].text(line['lambda'],0.9*(YMAX-YMIN),line['label'],verticalalignment='bottom', horizontalalignment='center',color='red', fontweight='bold',fontsize=16)\n \n \n axarr[iy,ix].set_ylim(YMIN,YMAX)\n axarr[iy,ix].set_xlim(np.min(lambdas),np.max(lambdas))\n axarr[iy,ix].set_xlim(0,1200.)\n \n if (index+1)%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n PageNum+=1 # increase page Number\n f.savefig(pp, format='pdf')\n f.show()\n \n \n f.savefig(pp, format='pdf') \n f.show()\n pp.close() \n \n return all_wl", "title": "" }, { "docid": "141e3e21fad9428ef782ee6b58c0cedd", "score": "0.5456708", "text": "def plotFeatures(self):\n fl=np.array(self.xp)*0.0+0.25*self.farr.max()\n self.splines=self.axes.plot(self.xp, fl , ls='', marker='|', ms=20, color='#00FF00')\n #set up the text position\n tsize=0.83\n self.ymin, self.ymax = self.axes.get_ylim()\n ppp=(self.ymax-self.ymin)/(self.arcfigure.figure.get_figheight()*self.arcfigure.figure.get_dpi())\n f=self.ymax-10*tsize*ppp\n for x,w in zip(self.xp, self.wp):\n w='%6.2f' % float(w)\n self.axes.text(x, f, w, size='small', rotation='vertical', color='#00FF00')", "title": "" }, { "docid": "86376948a016dff684710f5301a204cb", "score": "0.5452959", "text": "def OneImmedSpecterPlot(Xax, Ydat, Label, xmin, xmax, ymin, ymax, XLab, YLab,\n SupTitle, Title, FileName, currentDate, currentTime, Software_version):\n\n plt.figure()\n rc('font', size=8, weight='normal')\n plt.plot(Xax, Ydat, color='b', linestyle='-', linewidth='1.00', label=Label)\n plt.axis([xmin, xmax, ymin, ymax])\n plt.xlabel(XLab)\n plt.ylabel(YLab)\n plt.suptitle(SupTitle, fontsize=9, fontweight='bold')\n plt.title(Title, fontsize=7, x=0.46, y=1.005)\n plt.grid(visible=True, which='both', color='0.00', linestyle='--')\n plt.legend(loc='upper right', fontsize=8)\n plt.text(0.7, 0.03, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=5, transform=plt.gcf().transFigure)\n plt.text(0.03, 0.03, 'Software version: ' + Software_version + ' yerin.serge@gmail.com, IRA NASU',\n fontsize=5, transform=plt.gcf().transFigure)\n pylab.savefig(FileName, bbox_inches='tight', dpi=160)\n plt.close('all')\n return", "title": "" }, { "docid": "ccc1d74026f246628ef7afe483958ecf", "score": "0.54407465", "text": "def replot(self,ax):\n self.XP_Plotter.replot(ax)\n # theoretical lines\n self.lines_theory[0].set_xdata(self.xx)\n self.lines_theory[1].set_xdata(self.xx)\n self.lines_theory[2].set_xdata(self.xx_itpl)\n for line in self.lines_theory: \n ax.draw_artist(line)", "title": "" }, { "docid": "6769b3c68a6551f320ae817e9a71e9d5", "score": "0.54364705", "text": "def plot_spectra(path):\r\n plt.figure(figsize=(20, 10))\r\n x, y= np.loadtxt(fname=path, delimiter='\\t',dtype=int,\r\n usecols = (1,2), skiprows=100, unpack = True)\r\n plt.plot(x, y)\r\n return plt.show()", "title": "" }, { "docid": "1ab4780fc8cd175a4e47f4e52f0225ad", "score": "0.54272777", "text": "def plot_spectrum(filelist, figname=None, IDlines=None):\n if not isinstance(filelist, list):\n filelist = [filelist]\n\n # Matplotlib plots\n fig, ax = plt.subplots()\n ax.minorticks_on()\n\n ax.set_xlabel(r\"Wavelength ($\\AA$)\")\n ax.set_ylabel(r\"$F_\\nu$ (normalized)\")\n\n for i, inputfile in enumerate(filelist):\n\n # Read the spectrum\n # wav, flux, (imodel, inu) = load_spectre(inputfile)\n spectrum = load_spectrum(inputfile)\n wav, flux = spectrum.data\n imodel = spectrum.imodel\n inu = spectrum.inu\n\n # Units detection\n if np.mean(wav) > 1e10:\n # wav in frequency\n wav = sc.c_ang / wav\n\n if not imodel and np.mean(flux) < 1e-20:\n inu = True\n\n # Conversion from f_lambda to f_nu\n if not inu:\n flux *= wav * wav / sc.c_ang\n\n teff = (-np.trapz(flux, x=sc.c_ang / wav) / sc.sigma * 4.0 * np.pi) ** 0.25\n\n flux = normalize(wav, flux)\n ax.plot(wav, flux, label=basename(inputfile))\n\n ax.legend()\n\n # If elements have been provided\n if IDlines is not None:\n\n # setup for the figure\n xlim = ax.get_xlim()\n tick_ymax = 0.05\n\n # Read the line list\n linedict = load_lines()\n\n # If only one element, convert to list\n if isinstance(IDlines, str):\n IDlines = [IDlines]\n\n # Select only the elements we want\n matches = []\n for ion in IDlines:\n matches += [k for k in linedict if re.match(ion + r\"[IV]+\", k)]\n\n # If all lines are requested\n if \"All\" in IDlines:\n matches = linedict.keys()\n\n # For each ion\n for k in matches:\n\n # for each line of the ion\n for l in linedict[k]:\n\n # Plot vertical line\n ax.axvline(l, ymin=0, ymax=tick_ymax, c=\"k\")\n\n # And label the element above it\n trans = ax.get_xaxis_transform()\n ax.text(\n x=l,\n y=tick_ymax + 0.01,\n s=k,\n ha=\"center\",\n transform=trans,\n clip_on=True,\n )\n\n # Set previous limits\n ax.set_xlim(xlim)\n\n if figname is None:\n plt.show()\n\n else:\n fig.savefig(figname)\n\n plt.close()", "title": "" }, { "docid": "7f201ab3cfbe090808aa1d28d6b1eb7d", "score": "0.54191285", "text": "def plotSpectrum(inp,xrng=[],yrng=[],xlabel='',ylabel='',xlog=False,ylog=False,grid=False,\n legend=[],legend_location='upper right',fontscale=1,legend_fontscale=1,title='',\n color='k',colormap=None,linestyle='-',linewidth=1.5,alpha=1.,\n show_noise=True,color_noise='k',linestyle_noise='-',linewidth_noise=1.5,alpha_noise=0.5,\n comparison=None,color_comparison='grey',linestyle_comparison='-',linewidth_comparison=1.5,alpha_comparison=1,\n residual=False,color_residual='m',linestyle_residual='-',linewidth_residual=1.5,alpha_residual=0.5,\n telluric=False,color_telluric='grey',linestyle_telluric='-',linewidth_telluric=1.5,alpha_telluric=0.2,\n features=[],mdwarf=False,ldwarf=False,tdwarf=False,young=False,binary=False,nsamples=100,\n band=[],band_color='k',band_alpha=0.2,band_label='',band_label_position='bottom',band_width=0.1,\n show_zero=True,stack=0.,zeropoint=0.,color_zero='k',linestyle_zero=':',linewidth_zero=1.5,alpha_zero=0.3,\n inset=False,inset_xrange=[],inset_yrange=[],inset_position=[0.65,0.60,0.20,0.20],inset_features=False,\n output='',multiplot=False,multipage=False,layout=[1,1],figsize=[],tight=True,\n interactive=False,**kwargs):\n\n# keyword parameters (for backward compatability)\n for k in ['showZero','showzero']: show_zero=kwargs.get(k,show_zero)\n for k in ['showNoise','noise','uncertainty','shownoise','showuncertainty','show_uncertainty']: show_noise=kwargs.get(k,show_noise)\n\n for k in ['line_style','lineStyle','ls','linestyles','line_styles']: linestyle=kwargs.get(k,linestyle)\n for k in ['line_width','lineWidth','width','lw','linewidths','line_widths']: linewidth=kwargs.get(k,linewidth)\n for k in ['colors','colour','colours']: color=kwargs.get(k,color)\n for k in ['colorScheme','color_scheme','colorscheme','colorMap','color_map']: colormap=kwargs.get(k,colormap)\n\n for k in ['colornoise','colorNoise','colorUnc','coloruncertainty','color_uncertainty','colorUncertainty']: color_noise=kwargs.get(k,color_noise)\n for k in ['linestylenoise','line_style_noise','linestyleNoise']: linestyle_noise=kwargs.get(k,linestyle_noise)\n for k in ['linewidthnoise','linewidthNoise','line_width_noise']: linewidth_noise=kwargs.get(k,linewidth_noise)\n for k in ['alphanoise','alphaNoise']: alpha_noise=kwargs.get(k,alpha_noise)\n\n for k in ['colorzero','colorZero']: color_zero=kwargs.get(k,color_zero)\n for k in ['linestylezero','line_style_zero','linestyleZero']: linestyle_zero=kwargs.get(k,linestyle_zero)\n for k in ['linewidthzero','linewidthZero','line_width_zero']: linewidth_zero=kwargs.get(k,linewidth_zero)\n for k in ['alphazero','alphaZero']: alpha_zero=kwargs.get(k,alpha_zero)\n\n for k in ['colorcomparison','colorComparison']: color_comparison=kwargs.get(k,color_comparison)\n for k in ['linestyleComparison','line_style_comparison','linestylecomparison']: linestyle_comparison=kwargs.get(k,linestyle_comparison)\n for k in ['linewidthcomparison','linewidthComparison','line_width_comparison']: linewidth_comparison=kwargs.get(k,linewidth_comparison)\n for k in ['alphacomparison','alphaComparison']: alpha_comparison=kwargs.get(k,alpha_comparison)\n\n for k in ['colorresidual','colorResidual']: color_residual=kwargs.get(k,color_residual)\n for k in ['linestyleresidual','line_style_residual','linestyleResidual']: linestyle_residual=kwargs.get(k,linestyle_residual)\n for k in ['linewidthresidual','linewidthResidual','line_width_residual']: linewidth_residual=kwargs.get(k,linewidth_residual)\n for k in ['alpharesidual','alphaResidual']: alpha_residual=kwargs.get(k,alpha_residual)\n\n for k in ['bands']: band=kwargs.get(k,band)\n if len(band) == 2 and isinstance(band[0],list) == False: band = [band]\n for k in ['bandcolors','bandcolor','band_colors']: band_color=kwargs.get(k,band_color)\n for k in ['bandalphas','band_alphas','bandalpha']: band_alpha=kwargs.get(k,band_alpha)\n for k in ['band_labels','bandlabel','bandlabels']: band_label=kwargs.get(k,band_label)\n for k in ['band_label_positions','bandlabelposition','bandlabelpositions']: band_label_position=kwargs.get(k,band_label_position)\n for k in ['bandwidth','bandwidths','band_widths']: band_width=kwargs.get(k,band_width)\n for par in [band_color,band_alpha,band_label,band_label_position,band_width]:\n if not isinstance(par,list): par = [par]*len(band)\n if len(par) < len(band): par.extend([par[-1] for x in range(len(band)-len(par))])\n\n for k in ['legends','label','labels']: legend=kwargs.get(k,legend)\n if not isinstance(legend,list): legend = [legend]\n for k in ['legendfontscale','legendFontscale']: legend_fontscale=kwargs.get(k,legend_fontscale)\n legend_fontscale=legend_fontscale*fontscale\n for k in ['legendLocation','legendlocation','labelLocation','labellocation','label_location']: legend_location=kwargs.get(k,legend_location)\n\n for k in ['xrange','x_range','wave_range','wrange','wrng']: xrng=kwargs.get(k,xrng)\n if not isinstance(xrng,list): xrng = [xrng]\n for k in ['yrange','y_range','flux_range','frange','frng']: yrng=kwargs.get(k,yrng)\n if not isinstance(yrng,list): yrng = [yrng]\n\n for k in ['multilayout','multiLayout','multi_layout']: layout=kwargs.get(k,layout)\n for k in ['file','filename']: output=kwargs.get(k,output)\n if not isinstance(output,str): output=''\n filetype = '.pdf'\n if output!='': filetype=output.split('.')[-1]\n\n if comparison != None and isinstance(comparison,splat.Spectrum) == False and isinstance(comparison,list) == False: \n print('plotSpectrum() Warning: comparison spectrum should be a splat Spectrum object, you passed {}'.format(comparison))\n comparison = None\n\n# some plotting constants\n xlabel_default = 'Wavelength'\n ylabel_deafult = 'Flux'\n\n# telluric bands in micron\n telluric_bands = [[1.1,1.2]*u.micron,[1.3,1.5]*u.micron,[1.75,2.0]*u.micron]\n\n# assign features by group\n if not isinstance(features,list): features = [features]\n if ldwarf==True or mdwarf==True: features.extend(['k','na','feh','tio','co','h2o','h2'])\n if tdwarf==True: features.extend(['k','ch4','h2o','h2'])\n if young==True: features.extend(['vo'])\n if binary==True: features.extend(['sb'])\n\n# clean repeats in features while maintaining order - set does not do this\n if len(features)>0:\n fea = []\n for i in features:\n if i not in fea: fea.append(i)\n features = fea\n\n\n# if a list is passed, use this list\n splist = copy.deepcopy(inp)\n if isinstance(splist,list) == False: splist = [splist]\n \n# set up for multiplot\n if len(splist) == 1: multiplot = False\n \n# array of lists => force multiplot\n elif len(splist) > 1 and isinstance(splist[0],list) == True: multiplot = True\n else: pass\n\n# reformat array of spectra of multiplot is used (i.e., user forgot to set)\n if multiplot == True and isinstance(splist[0],splat.Spectrum):\n splist = [[s] for s in splist]\n\n elif multiplot == False and isinstance(splist[0],splat.Spectrum):\n splist = [splist]\n \n# flatten array if multiplot is not set\n elif multiplot == False and isinstance(splist[0],list) and len(splist) > 1:\n splist = [[item for sublist in splist for item in sublist]] # flatten\n else: pass\n\n# total number of spectra - use to assign default legends\n allsps = [item for sublist in splist for item in sublist] # Total number of spectra\n if len(legend) == 0: legend=[sp.name for sp in allsps]\n if len(legend) < len(allsps):\n legend.extend([allsps[i].name for i in range(len(legend),len(allsps)-len(legend))])\n \n\n# now run a loop through the input subarrays\n plt.close('all')\n\n# set up here for multiple file output\n nplot = 1\n if multipage == True or multiplot == True:\n nplot = layout[0]*layout[1]\n numpages = int(len(splist) / nplot) + 1\n if (len(splist) % nplot == 0):\n numpages -= 1\n fig = []\n \n if multipage == True and filetype.lower() == 'pdf':\n pdf_pages = PdfPages(output)\n \n if multipage == False:\n if len(splist) > 1:\n filebase = output.replace('.{}'.format(filetype),'')\n files = [filebase+'{}.'.format(i+1)+filetype for i in numpy.arange(len(splist))]\n else:\n files = [output]\n\n pg_n = 0 # page counter\n plt_n = 0 # plot per page counter\n lg_n = 0 # legend per plot counter\n\n for plts,sp in enumerate(splist):\n# set specific plot parameters\n if not isinstance(sp[0],splat.Spectrum):\n raise ValueError('\\nInput to plotSpectrum has wrong format:\\n\\n{}\\n\\n'.format(sp[0]))\n\n# set up plotting defaults for the list of spectra - REPLACE THIS\n if not isinstance(zeropoint,list): zeropoint = [zeropoint]*len(sp)\n if len(zeropoint) < len(sp): zeropoint.extend([zeropoint[-1] for x in range(len(sp)-len(zeropoint))])\n if not isinstance(color,list): color = [color]*len(sp)\n if len(color) < len(sp): color.extend([color[-1] for x in range(len(sp)-len(color))])\n if not isinstance(linestyle,list): linestyle = [linestyle]*len(sp)\n if len(linestyle) < len(sp): linestyle.extend([linestyle[-1] for x in range(len(sp)-len(linestyle))])\n if not isinstance(linewidth,list): linewidth = [linewidth]*len(sp)\n if len(linewidth) < len(sp): linewidth.extend([linewidth[-1] for x in range(len(sp)-len(linewidth))])\n if not isinstance(alpha,list): alpha = [alpha]*len(sp)\n if len(alpha) < len(sp): alpha.extend([alpha[-1] for x in range(len(sp)-len(alpha))])\n if not isinstance(color_noise,list): color_noise = [color_noise]*len(sp)\n if len(color_noise) < len(sp): color_noise.extend([color_noise[-1] for x in range(len(sp)-len(color_noise))])\n if not isinstance(linestyle_noise,list): linestyle_noise = [linestyle_noise]*len(sp)\n if len(linestyle_noise) < len(sp): linestyle_noise.extend([linestyle_noise[-1] for x in range(len(sp)-len(linestyle_noise))])\n if not isinstance(linewidth_noise,list): linewidth_noise = [linewidth_noise]*len(sp)\n if len(linewidth_noise) < len(sp): linewidth_noise.extend([linewidth_noise[-1] for x in range(len(sp)-len(linewidth_noise))])\n if not isinstance(alpha_noise,list): alpha_noise = [alpha_noise]*len(sp)\n if len(alpha_noise) < len(sp): alpha_noise.extend([alpha_noise[-1] for x in range(len(sp)-len(color_noise))])\n if not isinstance(color_comparison,list): color_comparison = [color_comparison]*len(sp)\n if len(color_comparison) < len(sp): color_comparison.extend([color_comparison[-1] for x in range(len(sp)-len(color_comparison))])\n if not isinstance(linestyle_comparison,list): linestyle_comparison = [linestyle_comparison]*len(sp)\n if len(linestyle_comparison) < len(sp): linestyle_comparison.extend([linestyle_comparison[-1] for x in range(len(sp)-len(linestyle_comparison))])\n if not isinstance(linewidth_comparison,list): linewidth_comparison = [linewidth_comparison]*len(sp)\n if len(linewidth_comparison) < len(sp): linewidth_comparison.extend([linewidth_comparison[-1] for x in range(len(sp)-len(linewidth_comparison))])\n if not isinstance(alpha_comparison,list): alpha_comparison = [alpha_comparison]*len(sp)\n if len(alpha_comparison) < len(sp): alpha_comparison.extend([alpha_comparison[-1] for x in range(len(sp)-len(alpha_comparison))])\n\n# settings that work if the spectrum was read in as legitmate Spectrum object\n try:\n xlabel = kwargs.get('xlabel','{} ({})'.format(sp[0].wave_label,sp[0].wave.unit))\n ylabel = kwargs.get('ylabel','{} ({})'.format(sp[0].flux_label,sp[0].flux.unit))\n except:\n xlabel = kwargs.get('xlabel',xlabel_default)\n ylabel = kwargs.get('ylabel',ylabel_default)\n# initial plot range\n bound = [numpy.nanmin(sp[0].wave.value),numpy.nanmax(sp[0].wave.value)]\n ymax = [numpy.nanquantile(s.flux.value,0.98) for s in sp]\n bound.extend(numpy.array([-0.02,1.3])*numpy.nanmax(ymax)+\\\n numpy.array([numpy.nanmin(zeropoint),numpy.nanmax(zeropoint)+stack*(len(sp)-1)]))\n\n# set colormap if provided\n if colormap != None:\n values = numpy.arange(len(sp))\n color_map = plt.get_cmap(colormap)\n norm = colmap.Normalize(vmin=0, vmax=1.0*values[-1])\n scalarMap = cm.ScalarMappable(norm=norm, cmap=color_map)\n for i in range(len(sp)): color[i] = scalarMap.to_rgba(values[i])\n\n# GENERATE PLOTS\n if multiplot == True or multipage == True:\n plt_n = plts % nplot\n if (plt_n == 0):\n fig.append(plt.figure())\n pg_n += 1\n ax = fig[pg_n-1].add_subplot(layout[0], layout[1], plt_n+1)\n \n# plotting a single plot with all spectra\n else:\n plt.close('all')\n plt_n = 0\n fig = []\n if len(figsize)>0: fig.append(plt.figure(figsize=figsize))\n else: fig.append(plt.figure())\n ax = fig[0].add_subplot(111)\n \n for ii, a in enumerate(sp):\n# zeropoint and stack\n flx = [i+zeropoint[ii] for i in a.flux.value]\n if stack > 0: flx = [f + (len(sp)-ii-1)*stack for f in flx]\n ax.plot(a.wave.value,flx,color=color[ii],linestyle=linestyle[ii], lw=linewidth[ii], alpha=alpha[ii], zorder = 10, label = legend[lg_n]) \n\n# add comparison\n if comparison != None:\n# zeropoint and stack\n cflx = [i+zeropoint[ii] for i in comparison.flux.value]\n if stack > 0: cflx = [f + (len(sp)-ii-1)*stack for f in cflx]\n ax.plot(comparison.wave.value,cflx,color=color_comparison[ii],linestyle=linestyle_comparison[ii], lw=linewidth_comparison[ii], alpha=alpha_comparison[ii], zorder = 10)\n \n# add residual\n if residual == True and len(sp) == 2:\n # Save flux values from first spectrum\n if ii == 0:\n flx0 = [f - (len(sp)-ii-1)*stack for f in flx]\n \n # Subtract fluxes and plot\n elif ii == 1:\n res = [flx0[f_n] - f for f_n, f in enumerate(flx)]\n ax.plot(a.wave.value, res, alpha = alpha_residual[ii], color = color_residual[ii], linsetyle=linestyle_residual[ii], lw=linewidth_residual[ii])\n \n # Fix bound[2] if residual goes below 0\n if numpy.nanmin(res) < bound[2]:\n b0 = numpy.argmin(a.wave.value[a.wave.value > bound[0]])\n b1 = numpy.argmax(a.wave.value[a.wave.value < bound[1]])\n bound[2] = numpy.nanmin(res[b0:b1])\n\n# noise\n if show_noise == True:\n ns = [i+zeropoint[ii] for i in a.noise.value]\n ax.plot(a.wave.value,ns,color=color_noise[ii],linestyle=linestyle_noise[ii],alpha=alpha_noise[ii], lw=linewidth_noise[ii], zorder = 10)\n\n# zeropoint\n if show_zero == True:\n ze = numpy.ones(len(a.flux))*zeropoint[ii]\n ax.plot(a.wave.value,ze,color=color[ii],linestyle=linestyle_zero,alpha=alpha_zero,lw=linewidth_zero, zorder = 10)\n\n# save maximum flux among all spectra for plotting\n# THIS IS VERY SLOW AND IT WOULD BE BETTER TO FIND AN ALTERNATE APPROACH\n if len(features)>0:\n f = interp1d(a.wave,flx,bounds_error=False,fill_value=0.)\n if ii == 0: \n wvmax = numpy.linspace(bound[0],bound[1],nsamples)\n flxmax = numpy.array(f(wvmax))\n else: flxmax = numpy.maximum(flxmax,numpy.array(f(wvmax)))\n\n# legend counter\n lg_n = lg_n + 1 # Increment legend\n\n\n# label features\n# THIS NEEDS TO BE FIXED WITH GRETEL'S STUFF\n if len(features) > 0:\n yoff = 0.02*(bound[3]-bound[2]) # label offset\n fontsize = int((10-numpy.nanmin([(layout[0]*layout[1]-1),6]))*fontscale)\n for ftr in features:\n ftr = ftr.lower()\n if ftr in FEATURE_LABELS:\n ftrc = checkDict(ftr,FEATURE_LABELS)\n if ftrc != False:\n for ii,waveRng in enumerate(FEATURE_LABELS[ftrc]['wavelengths']):\n wRng = waveRng.to(sp[0].wave.unit).value\n# features must be contained in plot range (may change this)\n if numpy.nanmin(wRng) > bound[0] and numpy.nanmax(wRng) < bound[1]:\n wfeature = numpy.where(numpy.logical_and(wvmax >= numpy.nanmin(wRng),wvmax <= numpy.nanmax(wRng)))\n if len(wvmax[wfeature]) == 0: wfeature = numpy.argmax(numpy.absolute(wvmax-numpy.nanmedian(wRng)))\n y = numpy.nanmax(flxmax[wfeature])+yoff\n flxmax[wfeature] = flxmax[wfeature]+3.*yoff\n\n if FEATURE_LABELS[ftrc]['type'] == 'band':\n ax.plot(wRng,[y+yoff]*2,color='k',linestyle='-')\n ax.plot([wRng[0]]*2,[y,y+yoff],color='k',linestyle='-')\n ax.text(numpy.mean(wRng),y+1.5*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=fontsize)\n else:\n for w in wRng: ax.plot([w]*2,[y,y+yoff],color='k',linestyle='-')\n ax.text(numpy.mean(wRng),y+1.5*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=fontsize)\n bound[3] = numpy.nanmax([numpy.nanmax(flxmax)+2.*yoff,bound[3]])\n\n# add grid\n if grid == True: ax.grid() \n\n# axis labels \n fontsize = (numpy.round(numpy.max([13./((layout[0]*layout[1])**0.33),5]))) * fontscale\n legend_fontsize = (13-numpy.min([(layout[0]*layout[1]-1),8])) * legend_fontscale\n ax.set_xlabel(xlabel, fontsize = fontsize)\n ax.set_ylabel(ylabel, fontsize = fontsize)\n ax.tick_params(axis='x', labelsize=fontsize)\n ax.tick_params(axis='y', labelsize=fontsize)\n\n# add title\n if title!='': ax.set_title(title)\n\n# log scale?\n if kwargs.get('xlog',False): ax.set_xscale('log',nonposx='clip')\n if kwargs.get('ylog',False): ax.set_yscale('log',nonposy='clip')\n\n# place legend\n if len(legend) > 0:\n if legend_location == 'outside':\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.15, box.width * 0.7, box.height * 0.7])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':legend_fontsize})\n else:\n ax.legend(loc=legend_location, prop={'size':legend_fontsize})\n bound[3] = bound[3]+0.1*(bound[3]-bound[2]) # extend axis for in-plot legends\n\n# overplot telluric absorption\n if telluric == True:\n yoff = 0.02*(bound[3]-bound[2]) # label offset\n for waveRng in telluric_bands:\n wR = waveRng.to(sp[0].wave.unit).value\n rect = patches.Rectangle((wR[0],bound[2]),wR[1]-wR[0],bound[3]-bound[2],facecolor=color_telluric,alpha=alpha_telluric,color=color_telluric)\n ax.add_patch(rect)\n ax.text(numpy.mean(wR),bound[2]+3*yoff,r'$\\oplus$',horizontalalignment='center',fontsize=fontsize)\n\n# overplot color swaths for pre-specified bands\n if len(band) > 0:\n for i,b in enumerate(band):\n if not isinstance(b,list): \n try: b = [float(b)-0.5*band_width,float(b)+0.5*band_width]\n except:\n print('\\nWarning: plotSpectrum bands variables should be array of 2-element arrays; you passed {}'.format(band))\n b = [0.,0.]\n rect = patches.Rectangle((b[0],bound[2]),b[1]-b[0],bound[3]-bound[2],facecolor=band_color[i],color=band_color[i],alpha=band_alpha[i])\n ax.add_patch(rect)\n if band_label_position[i].lower() == 'top':\n ax.text(numpy.mean(b),bound[3]-3*yoff,band_label[i],horizontalalignment='center',fontsize=fontsize)\n elif band_label_position[i].lower() == 'middle':\n ax.text(numpy.mean(b),0.5*(bound[2]+bound[3]),band_label[i],horizontalalignment='center',fontsize=fontsize)\n else:\n ax.text(numpy.mean(b),bound[2]+3*yoff,band_label[i],horizontalalignment='center',fontsize=fontsize)\n\n# place inset - RIGHT NOW ONLY SETTING LIMITS WITH FIRST SPECTRUM IN LIST\n if inset == True and len(inset_xrange) == 2:\n ax_inset = fig[pg_n-1].add_axes(inset_position) #, axisbg='white')\n bound2 = inset_xrange\n if len(inset_yrange) == 0:\n b0 = numpy.argmax(sp[0].wave.value > bound2[0])\n b1 = numpy.argmin(sp[0].wave.value < bound2[1])\n inset_yrange = [numpy.nanmin(sp[0].flux.value[b0:b1]),numpy.nanmax(sp[0].flux.value[b0:b1])]\n bound2.extend(inset_yrange)\n db = (bound2[3]-bound2[2])\n bound2[2] = bound2[2]-0.05*db\n bound2[3] = bound2[3]+0.05*db\n ax_inset.axis(bound2)\n inset_fontsize = fontsize*0.7\n\n for ii,a in enumerate(sp):\n flx = [i+zeropoint[ii] for i in a.flux.value]\n ax_inset.plot(a.wave.value,flx,color=colors[ii],linestyle=linestyle[ii],linewidth=linewidth[ii],alpha=alpha[ii]) \n ax_inset.set_xlabel('')\n ax_inset.set_ylabel('')\n ax_inset.tick_params(axis='x', labelsize=inset_fontsize)\n ax_inset.tick_params(axis='y', labelsize=inset_fontsize)\n# ax_inset.legend()\n\n# inset feature labels\n if len(inset_features) > 0:\n yoff = 0.05*(bound2[3]-bound2[2])\n for ftr in inset_features:\n ftrc = checkDict(ftr,FEATURE_LABELS)\n if ftrc != False:\n for ii,waveRng in enumerate(FEATURE_LABELS[ftrc]['wavelengths']):\n wRng = waveRng.to(sp[0].wave.unit).value\n if (numpy.min(wRng) > bound2[0] and numpy.max(wRng) < bound2[1]):\n wfeature = numpy.where(numpy.logical_and(wvmax >= numpy.nanmin(wRng),wvmax <= numpy.nanmax(wRng)))\n if len(wvmax[wfeature]) == 0: wfeature = numpy.argmax(numpy.absolute(wvmax-numpy.nanmedian(wRng)))\n y = numpy.nanmax(flxmax[wfeature])+yoff\n flxmax[wfeature] = flxmax[wfeature]+3.*yoff\n \n if FEATURE_LABELS[ftrc]['type'] == 'band':\n ax_inset.plot(wR,[y+yoff]*2,color='k',linestyle='-')\n ax_inset.plot([wR[0]]*2,[y,y+yoff],color='k',linestyle='-')\n ax_inset.text(numpy.mean(wR),y+2*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=inset_fontsize)\n else:\n for w in waveRng:\n ax_inset.plot([w]*2,[y,y+yoff],color='k',linestyle='-')\n ax_inset.text(numpy.mean(wR),y+2*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=inset_fontsize)\n waveRng = [wR[0]-0.02,wR[1]+0.02] # for overlap\n \n# update offset\n if len(inset_features) > 0: bound2[3] = numpy.nanmax([bound2[3],numpy.nanmax(flxmax)+5.*yoff])\n ax_inset.axis(bound2)\n\n# finalize bounding\n if len(xrng) > 0: bound[0:2] = xrng\n if len(yrng) > 0: bound[2:4] = yrng\n if isUnit(bound[0]): bound = [x.value for x in bound]\n ax.axis(bound)\n \n# save to file or display\n# ERROR HERE - CHECK WHAT FILES\n if multipage == False:\n if files[plts] != '' and (plts % nplot == 3 or plts == len(splist)-1):\n if kwargs.get('tight',True) == True: \n plt.savefig(files[plts], bbox_inches='tight')\n else:\n plt.savefig(files[plts])\n if output == '' and not kwargs.get('web',False):\n plt.show()\n if (kwargs.get('interactive',False) != False): plt.ion()\n else: plt.ioff()\n\n\n# save figures in multipage format and write off pdf file\n if multipage == True: \n for pg_n in numpy.arange(numpages):\n# fig[pg_n].text(0.5, 0.04, xlabel, ha = 'center', va = 'center')\n# fig[pg_n].text(0.06, 0.5, ylabel, ha = 'center', va = 'center', rotation = 'vertical')\n fig[pg_n].tight_layout\n fig[pg_n].suptitle(title, fontsize = int(14*fontsize), fontweight = 'bold')\n pdf_pages.savefig(fig[pg_n])\n if filetype.lower() == 'pdf':\n pdf_pages.close()\n\n plt.clf()\n return fig", "title": "" }, { "docid": "cc213173ebf82239f745ff9a11ad85fc", "score": "0.54133964", "text": "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "title": "" }, { "docid": "c38be8ff9fed90a0ddd4662a3ad9e61a", "score": "0.54123366", "text": "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "title": "" }, { "docid": "b90e89287c3956d1bf3c8f2fed2cb4cb", "score": "0.54119986", "text": "def plot(self, *args, **kwargs):\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)", "title": "" }, { "docid": "f710178da08e23c36509f488f2e956b7", "score": "0.5408171", "text": "def _plot(self):\n # Read results\n path = self.openmc_dir / f'statepoint.{self._batches}.h5'\n x1, y1, _ = read_results('openmc', path)\n if self.code == 'serpent':\n path = self.other_dir / 'input_det0.m'\n else:\n path = self.other_dir / 'outp'\n x2, y2, sd = read_results(self.code, path)\n\n # Convert energies to eV\n x1 *= 1e6\n x2 *= 1e6\n\n # Normalize the spectra\n y1 /= np.diff(np.insert(x1, 0, self._min_energy))*sum(y1)\n y2 /= np.diff(np.insert(x2, 0, self._min_energy))*sum(y2)\n\n # Compute the relative error\n err = np.zeros_like(y2)\n idx = np.where(y2 > 0)\n err[idx] = (y1[idx] - y2[idx])/y2[idx]\n \n # Set up the figure\n fig = plt.figure(1, facecolor='w', figsize=(8,8))\n ax1 = fig.add_subplot(111)\n \n # Create a second y-axis that shares the same x-axis, keeping the first\n # axis in front\n ax2 = ax1.twinx()\n ax1.set_zorder(ax2.get_zorder() + 1)\n ax1.patch.set_visible(False)\n \n # Plot the spectra\n label = 'Serpent' if self.code == 'serpent' else 'MCNP'\n ax1.loglog(x2, y2, 'r', linewidth=1, label=label)\n ax1.loglog(x1, y1, 'b', linewidth=1, label='OpenMC', linestyle='--')\n \n # Plot the relative error and uncertainties\n ax2.semilogx(x2, err, color=(0.2, 0.8, 0.0), linewidth=1)\n ax2.semilogx(x2, 2*sd, color='k', linestyle='--', linewidth=1)\n ax2.semilogx(x2, -2*sd, color='k', linestyle='--', linewidth=1)\n \n # Set grid and tick marks\n ax1.tick_params(axis='both', which='both', direction='in', length=10)\n ax1.grid(b=False, axis='both', which='both')\n ax2.tick_params(axis='y', which='both', right=False)\n ax2.grid(b=True, which='both', axis='both', alpha=0.5, linestyle='--')\n \n # Set axes labels and limits\n ax1.set_xlim([self._min_energy, self.energy])\n ax1.set_xlabel('Energy (eV)', size=12)\n ax1.set_ylabel('Spectrum', size=12)\n ax1.legend()\n ax2.set_ylabel(\"Relative error\", size=12)\n title = f'{self.nuclide}'\n if self.thermal is not None:\n name, suffix = self.thermal.split('.')\n thermal_name = openmc.data.thermal.get_thermal_name(name)\n title += f' + {thermal_name}'\n title += f', {self.energy:.1e} eV Source'\n plt.title(title)\n \n # Save plot\n os.makedirs('plots', exist_ok=True)\n if self.name is not None:\n name = self.name\n else:\n name = f'{self.nuclide}'\n if self.thermal is not None:\n name += f'-{thermal_name}'\n name += f'-{self.energy:.1e}eV'\n if self._temperature is not None:\n name += f'-{self._temperature:.1f}K'\n plt.savefig(Path('plots') / f'{name}.png', bbox_inches='tight')\n plt.close()", "title": "" }, { "docid": "7fb0ff8e5c72ccc4b250658942df0318", "score": "0.54081684", "text": "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "title": "" }, { "docid": "fd9e6fad67d974914f23fd0590abf3e6", "score": "0.5404693", "text": "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "title": "" }, { "docid": "fd9e6fad67d974914f23fd0590abf3e6", "score": "0.5404693", "text": "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "title": "" }, { "docid": "c839d18c2740068d5a75dc63206a392a", "score": "0.5404637", "text": "def addLineStyle(dist, focus, axis, pupil):\n r = 0 #focus / 2\n g = 0 #np.log10(dist) / (25 / 3)\n b = 0 #axis / 20\n a = 0.4\n rgb = [r, g, b, a]\n line = {'style': '-', 'color': rgb}\n return line", "title": "" }, { "docid": "ff92cf467aa5b1e566f1e864d0256cb5", "score": "0.5381914", "text": "def annotate(axis, text, x, y):\n text_annotation = Annotation(text, xy=(x, y), xycoords='data')\n axis.add_artist(text_annotation)", "title": "" }, { "docid": "6dc70a09d352b7ebd922e3bd288fefe9", "score": "0.53802514", "text": "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "title": "" }, { "docid": "3ff66d6737e1b5d1441af7afda7d1407", "score": "0.53763545", "text": "def update_lines(self):\n try:\n for l in self.artists['lines']['lines']:\n l.remove()\n except ValueError:\n pass\n for art in self.artists['lines']['labels']:\n art.remove()\n for art in self.artists['lines']['atmos']:\n art.remove()\n self.artists['lines'] = barak.spec.plotlines(\n self.zp1 - 1, self.ax, labels=1, fontsize=10,\n lcolor='0.3', lines=self.lines, offsets=False)\n\n if self.DLA is not None:\n self.artists['model'].remove()\n s = self.spec[self.i]\n t,_ = calc_DLA_trans(s.wa, self.zp1-1, 2*self.dvpix,\n logN=self.DLA, bHI=15) \n self.artists['model'] = self.ax.plot(\n s.wa, t*s.co, 'k')[0]\n #if not self.showlabels:\n # for t in self.artists['lines']['labels']:\n # t.set_visible(False)\n\n \n # for key in self.artists['zlines']:\n # for l in self.artists['zlines'][key]:\n # try:\n # l.remove()\n # except ValueError:\n # # plot has been removed\n # pass\n # self.artists['zlines'] = barak.spec.plotlines(\n # zp1 - 1, plt.gca(), lines=self.opt.linelist, labels=True)\n # plt.draw()\n print('z =', self.zp1-1)", "title": "" }, { "docid": "6137683cb7af075fbe4f439403158c71", "score": "0.5376237", "text": "def simple_line():\n\n # Make two datasets\n dataset_a = DataSet(sine)\n dataset_b = DataSet(cosine)\n\n # Make plot and add data\n plot = Plot()\n plot.set_text()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_line',fmt='png')\n plot.display()", "title": "" }, { "docid": "fb9af76304beb83d5268cbdc523f89e7", "score": "0.53689307", "text": "def test_line_plot(self):\n clf()\n filename = 'lines_plot.png'\n N = 10\n lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])\n ax = lines.plot()\n self._compare_images(ax=ax, filename=filename)", "title": "" }, { "docid": "c8f1fe374b7f2b3c68f007a516061151", "score": "0.53461874", "text": "def setup_annotation(self):\n annotation = self.ax_fig.annotate(\n '', xy=(0, 0), ha='left',\n xytext=(-20, 20), textcoords='offset points', va='bottom',\n bbox=dict(\n boxstyle='round,pad=0.5', fc='yellow', alpha=0.2),\n arrowprops=dict(\n arrowstyle='->', connectionstyle='arc3,rad=0'))\n return annotation", "title": "" }, { "docid": "099bf2b93a7734a9d24ba63d8cbea93e", "score": "0.53453434", "text": "def plotSeismogram(d, rho, v, wavf, wavA=1., noise = 0., usingT=True, wavtyp='RICKER'):\n\n tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(d, rho, v, wavf, wavA, usingT,wavtyp)\n\n noise = noise*np.max(np.abs(seis))*np.random.randn(seis.size)\n filt = np.arange(1.,15.)\n filtr = filt[::-1]\n filt = np.append(filt,filtr[1:])*1./15.\n noise = np.convolve(noise,filt)\n noise = noise[0:seis.size]\n\n seis = seis + noise\n\n plt.figure(num=0, figsize = (8, 5))\n\n plt.subplot(131)\n plt.plot(wav,twav,linewidth=1,color='black')\n plt.title('Wavelet')\n plt.xlim((-2.,2.))\n plt.grid()\n plt.ylim((tseis.min()-tseis.mean(),tseis.max()-tseis.mean()))\n plt.gca().invert_yaxis()\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],fontsize=9)\n plt.gca().set_xlabel('Amplitude',fontsize=9)\n plt.gca().set_ylabel('Time (s)',fontsize=9)\n\n plt.subplot(132)\n plt.plot(np.zeros(tref.size),(tseis.max(),tseis.min()),linewidth=2,color='black')\n plt.hlines(tref,np.zeros(len(rseriesconv)),rseriesconv,linewidth=2) #,'marker','none'\n plt.title('Reflectivity')\n plt.grid()\n plt.ylim((0,tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-2.,2.))\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],fontsize=9)\n plt.gca().set_xlabel('Amplitude',fontsize=9)\n plt.gca().set_ylabel('Time (s)',fontsize=9)\n\n plt.subplot(133)\n plt.plot(seis,tseis,color='black',linewidth=1)\n plt.title('Seismogram')\n plt.grid()\n plt.ylim((tseis.min(),tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-0.95,0.95))\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],fontsize=9)\n plt.gca().set_xlabel('Amplitude',fontsize=9)\n plt.gca().set_ylabel('Time (s)',fontsize=9)\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "42ba138f994526046049d88b5089a09e", "score": "0.5344901", "text": "def plot_regression_line():\r\n axes = plt.gca()\r\n x_vals = np.array(axes.get_xlim())\r\n y_vals = y_intercept() + slope() * x_vals\r\n plt.plot(x_vals, y_vals)", "title": "" }, { "docid": "83efaecbdb9611304241cf41f160120c", "score": "0.5342107", "text": "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "title": "" }, { "docid": "225dafa3a4772448ae59ae7ebbc12c6f", "score": "0.5340947", "text": "def raster(self, event_times_list, color='k'):\n ax = plt.gca()\n for ith, trial in enumerate(event_times_list):\n plt.vlines(trial, ith + .5, ith + 1.5, color=color)\n plt.ylim(.5, len(event_times_list) + .5)\n return ax", "title": "" }, { "docid": "0ee9b0f107b65db1148145501b5c6efa", "score": "0.5340181", "text": "def updateAnnot( xdata, ydata, pixels, annot, rawdata, **kwargs):\n\ty, x = pol2cart( ydata/180, xdata, pixels )\n\tannot.xy = ( xdata, ydata )\n\t# Inconsistent wrapping; plot the right variable.\n\tif xdata < 0:\n\t\txdata += 2 * np.pi\n\ttext = 'Az=' + str( round( xdata * 180 / np.pi, 1 ) )+ ', El=' + str( round( np.arccos( ydata/180 ) * 180/np.pi, 1) ) + u'\\xb0' + '\\nInt.=' + '{:.3E}'.format((rawdata[int(y),int(x)]))\n\tannot.set_text( text )\n\tannot.get_bbox_patch().set_alpha( 0.66 )\n\tannot.set_color('black')", "title": "" }, { "docid": "bfee3bdcb3ab05ef1098e117c91d4b21", "score": "0.5339049", "text": "def plot_agent_track(track_id, seq_agents_df, colour, line_width, alpha):\n\n agent_track = seq_agents_df[seq_agents_df[\"TRACK_ID\"] == track_id].values\n\n agent_xy = agent_track[:, [RAW_DATA_FORMAT[\"X\"], RAW_DATA_FORMAT[\"Y\"]\n ]].astype(\"float\")\n\n plt.plot(\n agent_xy[:, 0],\n agent_xy[:, 1],\n \"-\",\n color=colour,\n alpha=alpha,\n linewidth=line_width,\n zorder=15,\n )\n\n return agent_xy", "title": "" }, { "docid": "9b8ca4aaa6051fd2794f148c3bfbd9ea", "score": "0.5336847", "text": "def plotSpect(spec, sr):\r\n fig, ax = plt.subplots()\r\n img = librosa.display.specshow(spec, x_axis='time', y_axis='mel', sr=sr, fmax=8000, ax=ax) \r\n fig.colorbar(img, ax=ax, format='%+2.0f dB') \r\n ax.set(title='Mel-frequency spectrogram')", "title": "" }, { "docid": "eaa6187ab9d26be4324c99ff6bea7c45", "score": "0.5336276", "text": "def label_line(line, label_text, ax, near_i=None, near_x=None, near_y=None, rotation_offset=0, offset=(0,0)):\n def put_label(i):\n \"\"\"put label at given index\"\"\"\n i = min(i, len(x)-2)\n dx = sx[i+1] - sx[i]\n dy = sy[i+1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i+1])/2. + offset[0], (y[i] + y[i+1])/2 + offset[1]]\n plt.text(pos[0], pos[1], label_text, size=9, rotation=rotation, color = line.get_color(),\n ha=\"center\", va=\"center\", bbox = dict(ec='1',fc='1',alpha=0.8))\n\n x = line.get_xdata()\n y = line.get_ydata()\n # ax = line.get_axes()\n if ax.get_xscale() == 'log':\n sx = np.log10(x) # screen space\n else:\n sx = x\n if ax.get_yscale() == 'log':\n sy = np.log10(y)\n else:\n sy = y\n\n # find index\n if near_i is not None:\n i = near_i\n if i < 0: # sanitize negative i\n i = len(x) + i\n put_label(i)\n elif near_x is not None:\n for i in range(len(x)-2):\n if (x[i] < near_x and x[i+1] >= near_x) or (x[i+1] < near_x and x[i] >= near_x):\n put_label(i)\n elif near_y is not None:\n for i in range(len(y)-2):\n if (y[i] < near_y and y[i+1] >= near_y) or (y[i+1] < near_y and y[i] >= near_y):\n put_label(i)\n else:\n raise ValueError(\"Need one of near_i, near_x, near_y\")", "title": "" }, { "docid": "a77fe67868ae83d2aa1fde7693150ea6", "score": "0.5335516", "text": "def lineplot(self, name: str, headers: [str], data: [[int]], img_title: str):\n total_lines = len(data)\n x = np.arange(0, len(data[0]))\n\n for line in range(total_lines):\n ax = sns.lineplot(x=x, y=data[line], color=C.IRT_COLORS[line], label=headers[line])\n\n ax.legend(loc='best')\n ax.set(xlabel='Steps', ylabel='IRT', title=img_title)\n self.save_plot(name)\n plt.show()", "title": "" }, { "docid": "9df17f908f5f18fd28af99a48e31873d", "score": "0.53304356", "text": "def plot_noise_distribution(self, ax, mask=None, **kwargs):\r\n trs = self.rec_smooth_pos\r\n ivalid = np.where(np.isfinite(trs.noise))[0]\r\n sigma = np.std(trs.noise[ivalid])\r\n mu = np.average(trs.noise[ivalid])\r\n\r\n kwargs['linestyle']='-'\r\n kwargs['linewidth']=1.0\r\n # fit lognormal\r\n if len(ivalid) > 10:\r\n try:\r\n x = np.linspace(-4.*sigma+mu, 4*sigma+mu, 200)\r\n params = stats.lognorm.fit(trs.noise[ivalid], loc=0.2)\r\n pdf_fit = stats.lognorm.pdf(x, params[0], loc=params[1], \r\n scale=params[2])\r\n ax.plot(x, pdf_fit, color='k', **kwargs)\r\n except:\r\n print(\"lognormal fit failed for \",self.ID)\r\n # fit normal\r\n ax.plot(x, mlab.normpdf(x, mu, sigma), color='b', **kwargs)\r\n\r\n plt.hist(trs.noise[ivalid], bins=50, normed=True, facecolor='g')\r\n ax.set_xlabel('position error (m)')\r\n ax.set_xlim([0, 10])\r\n ax.set_ylim([0, 1.0])\r\n ax.set_ylabel('frequency')\r\n\r\n return", "title": "" }, { "docid": "84d43d04131b4f3e6ffee2a07d69508f", "score": "0.5328412", "text": "def plot_line(self,x_0,y_0,x_1,y_1,col=\"black\",line_width=1,line_type=\"solid\"):\n self._fig.add_shape(\n go.layout.Shape(\n type=\"line\",\n x0=x_0,\n y0=y_0,\n x1=x_1,\n y1=y_1,\n line=dict(\n color=col,\n width=line_width,\n dash=line_type\n )\n )\n )", "title": "" }, { "docid": "6c849b1aa0dfc16fe1bb808b4483299e", "score": "0.53209865", "text": "def generate_labeled_led_arrays_plot(line_indices, search_areas):\n for i in range(len(line_indices)):\n plt.scatter(search_areas[line_indices[i], 2],\n search_areas[line_indices[i], 1],\n s=0.1, label='led strip {}'.format(i))\n\n plt.legend()\n plt.savefig('plots{}led_arrays.pdf'.format(sep))", "title": "" }, { "docid": "beae4bc77e7c5a2ac0ff6193f671b276", "score": "0.53149724", "text": "def _spines_on(ax, *args):\n for s in args:\n ax.spines[s].set_visible(True)", "title": "" }, { "docid": "e1d6e1ef74c104b0825349e8c31bfd2e", "score": "0.53127944", "text": "def _hr_mean_plot_subphase_annotations(phase_dict: Dict[str, Sequence[str]], xlims: Sequence[float], **kwargs):\n ax: plt.Axes = kwargs.get(\"ax\")\n\n num_phases = len(phase_dict)\n num_subphases = [len(arr) for arr in phase_dict.values()]\n\n bg_colors = kwargs.get(\"background_color\", _hr_ensemble_plot_params.get(\"background_color\"))\n if bg_colors is None:\n bg_color_base = kwargs.get(\"background_base_color\", _hr_ensemble_plot_params.get(\"background_base_color\"))\n bg_colors = list(sns.dark_palette(bg_color_base, n_colors=num_phases, reverse=True))\n bg_alphas = kwargs.get(\"background_alpha\", _hr_ensemble_plot_params.get(\"background_alpha\"))\n bg_alphas = [bg_alphas] * num_phases\n\n phase_text = kwargs.get(\"phase_text\", _hr_mean_plot_params.get(\"phase_text\"))\n\n x_spans = _hr_mean_get_x_spans(num_phases, num_subphases)\n\n for (i, phase) in enumerate(phase_dict):\n left, right = x_spans[i]\n bg_color = bg_colors[i]\n bg_alpha = bg_alphas[i]\n ax.axvspan(xlims[left], xlims[right], color=bg_color, alpha=bg_alpha, zorder=0, lw=0)\n name = phase_text.format(phase)\n ax.text(\n x=xlims[left] + 0.5 * (xlims[right] - xlims[left]),\n y=0.95,\n s=name,\n transform=ax.get_xaxis_transform(),\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n zorder=3,\n )\n\n p = mpatch.Rectangle(\n xy=(0, 0.9),\n width=1,\n height=0.1,\n transform=ax.transAxes,\n color=\"white\",\n alpha=0.4,\n zorder=1,\n lw=0,\n )\n ax.add_patch(p)", "title": "" }, { "docid": "07cf41619976a48b551d6b659c99020d", "score": "0.5309429", "text": "def plot_aroon(self, fig: OpenBBFigure, df_ta: pd.DataFrame, subplot_row: int):\n aroon_up_col = columns_regex(df_ta, \"AROONU\")[0]\n aroon_down_col = columns_regex(df_ta, \"AROOND\")[0]\n aroon_osc_col = columns_regex(df_ta, \"AROONOSC\")[0]\n fig.add_scatter(\n name=\"Aroon Up\",\n mode=\"lines\",\n line=dict(width=1.5, color=theme.up_color),\n x=df_ta.index,\n y=df_ta[aroon_up_col].values,\n opacity=0.9,\n row=subplot_row,\n col=1,\n secondary_y=False,\n )\n fig.add_scatter(\n name=\"Aroon Down\",\n mode=\"lines\",\n line=dict(width=1.5, color=theme.down_color),\n x=df_ta.index,\n y=df_ta[aroon_down_col].values,\n opacity=0.9,\n row=subplot_row,\n col=1,\n secondary_y=False,\n )\n\n fig.add_annotation(\n xref=f\"x{subplot_row} domain\",\n yref=f\"y{subplot_row + 1} domain\",\n text=\"<b>Aroon</b>\",\n x=0,\n xanchor=\"right\",\n xshift=-6,\n y=1,\n font_size=14,\n font_color=\"#e0b700\",\n )\n fig.add_annotation(\n xref=f\"x{subplot_row} domain\",\n yref=f\"y{subplot_row + 1} domain\",\n text=(\n f\"<span style='color: {theme.up_color}'>↑</span><br>\"\n f\"<span style='color: {theme.down_color}'>↓</span>\"\n ),\n x=0,\n xanchor=\"right\",\n xshift=-14,\n y=0.75,\n font_size=14,\n font_color=theme.down_color,\n )\n fig.add_hline(\n y=50,\n fillcolor=\"white\",\n opacity=1,\n layer=\"below\",\n line_width=1.5,\n line=dict(color=\"white\", dash=\"dash\"),\n row=subplot_row,\n col=1,\n secondary_y=False,\n )\n\n subplot_row += 1\n\n fig.add_scatter(\n name=\"Aroon Oscillator\",\n mode=\"lines\",\n line=dict(width=1.5, color=\"#e0b700\"),\n x=df_ta.index,\n y=df_ta[aroon_osc_col].values,\n connectgaps=True,\n opacity=0.9,\n row=subplot_row,\n col=1,\n secondary_y=False,\n )\n\n fig.add_annotation(\n xref=f\"x{subplot_row} domain\",\n yref=f\"y{subplot_row + 1} domain\",\n text=\"<b>Aroon<br>OSC</b>\",\n x=0,\n xanchor=\"right\",\n xshift=-6,\n y=0.98,\n font_size=14,\n font_color=\"#e0b700\",\n )\n fig[\"layout\"][f\"yaxis{subplot_row + 1}\"].update(\n tickvals=[-100, 0, 100],\n ticktext=[\"-100\", \"0\", \"100\"],\n nticks=5,\n autorange=True,\n )\n\n return fig, subplot_row + 1", "title": "" }, { "docid": "42c57852c4e08a0cfd0f7d0584fb9fe4", "score": "0.52914405", "text": "def show_line(dict, xlabel=\"x\", ylabel=\"y\", title=\"title\"):\n plt.clf()\n plt.cla()\n plt.plot(list(dict.keys()), list(dict.values()), alpha=0.4, color = 'g')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()", "title": "" }, { "docid": "219ed0d8ec26cc7793aa87b3633a7bab", "score": "0.52805567", "text": "def __draw_era_spans(self):\n for era in self.era_spans:\n radius = .5\n circle1 = plt.Circle((era.start.x, era.start.y), radius,\n color=era.color, fill=False, lw=self.settings.otherParams[\"annotation.edge.width\"])\n circle2 = plt.Circle((era.end.x, era.end.y), radius,\n color=era.color, fill=False, lw=self.settings.otherParams[\"annotation.edge.width\"])\n self.ax.add_artist(circle1)\n self.ax.add_artist(circle2)\n\n # to draw the line between the two circles, we need to find the point on the\n # each circle that is closest to the other circle\n # get the angle from one circle to the other and find the point on the circle\n # that lies at that angle\n x1 = era.end.x - era.start.x\n y1 = era.end.y - era.start.y\n\n x2 = era.start.x - era.end.x\n y2 = era.start.y - era.end.y\n\n # quadrant senstive arctan\n angle1 = np.arctan2(y1, x1)\n angle2 = np.arctan2(y2, x2)\n\n x1 = era.start.x + np.cos(angle1) * radius\n y1 = era.start.y + np.sin(angle1) * radius\n\n x2 = era.end.x + np.cos(angle2) * radius\n y2 = era.end.y + np.sin(angle2) * radius\n\n if era.start_marker is not None:\n self.ax.plot(era.start_marker.x, era.start_marker.y, color=era.start_marker.color, marker=era.start_marker.marker,\n fillstyle=era.start_marker.fillstyle)\n\n if era.end_marker is not None:\n self.ax.plot(era.end_marker.x, era.end_marker.y, color=era.end_marker.color, marker=era.end_marker.marker,\n fillstyle=era.end_marker.fillstyle)\n\n l = mlines.Line2D([x1, x2], [y1, y2], color=era.color, linestyle=self.settings.otherParams[\"era.span.linestyle\"],\n markersize=self.settings.otherParams[\"era.span.markersize\"], linewidth=self.settings.otherParams[\"annotation.line.width\"])\n self.ax.add_line(l)", "title": "" }, { "docid": "f59ed5814d4b6a74c6b8d9679b2e598d", "score": "0.5275274", "text": "def plotSpectrum(self,wavelengths,intensities = 1.0):\n\n fieldAngle,spectralOutput = self.getIntensitySpectum(wavelengths,intensities)\n\n # Do the actual plot\n plot(np.degrees(fieldAngle),spectralOutput)\n grid()\n title(\"Spectral plot\")\n xlabel(\"Angle in degrees\")\n ylabel(\"Intensty\")", "title": "" }, { "docid": "319b0db1f94f40a6870914488e7ed7c7", "score": "0.52703464", "text": "def add_lines(sample, lines=None, alternate=True):\n if lines is None:\n lines = config[\"dates\"]\n ax = plt.gca()\n # date, text = \"2010-05-10\", \"ipython on github\"\n for i, (date, text) in enumerate(lines.items()):\n\n if i % 2 == 0 or not alternate:\n xy = (-64, 100)\n align = \"right\"\n else:\n xy = (64, -80)\n align = \"left\"\n\n ax.annotate(\n text,\n xy=(date, interpolate_one(sample, date)),\n xycoords=\"data\",\n xytext=xy,\n textcoords=\"offset points\",\n arrowprops=dict(facecolor=\"black\", shrink=0.05),\n horizontalalignment=align,\n )", "title": "" }, { "docid": "88c83dd6cf9bd7b8345e7ff5d8cc138b", "score": "0.52648985", "text": "def lens_model_plot(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, sourcePos_x=0, sourcePos_y=0, point_source=False, with_caustics=False):\n from lenstronomy.SimulationAPI.simulations import Simulation\n simAPI = Simulation()\n kwargs_data = simAPI.data_configure(numPix, deltaPix)\n data = Data(kwargs_data)\n _frame_size = numPix * deltaPix\n _coords = data._coords\n x_grid, y_grid = data.coordinates\n lensModelExt = LensModelExtensions(lensModel)\n\n #ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list = lensModelExt.critical_curve_caustics(\n # kwargs_lens, compute_window=_frame_size, grid_scale=deltaPix/2.)\n x_grid1d = util.image2array(x_grid)\n y_grid1d = util.image2array(y_grid)\n kappa_result = lensModel.kappa(x_grid1d, y_grid1d, kwargs_lens)\n kappa_result = util.array2image(kappa_result)\n im = ax.matshow(np.log10(kappa_result), origin='lower',\n extent=[0, _frame_size, 0, _frame_size], cmap='Greys', vmin=-1, vmax=1) #, cmap=self._cmap, vmin=v_min, vmax=v_max)\n if with_caustics is True:\n ra_crit_list, dec_crit_list = lensModelExt.critical_curve_tiling(kwargs_lens, compute_window=_frame_size,\n start_scale=deltaPix, max_order=10)\n ra_caustic_list, dec_caustic_list = lensModel.ray_shooting(ra_crit_list, dec_crit_list, kwargs_lens)\n plot_line_set(ax, _coords, ra_caustic_list, dec_caustic_list, color='g')\n plot_line_set(ax, _coords, ra_crit_list, dec_crit_list, color='r')\n if point_source:\n from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver\n solver = LensEquationSolver(lensModel)\n theta_x, theta_y = solver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens)\n mag_images = lensModel.magnification(theta_x, theta_y, kwargs_lens)\n x_image, y_image = _coords.map_coord2pix(theta_x, theta_y)\n abc_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']\n for i in range(len(x_image)):\n x_ = (x_image[i] + 0.5) * deltaPix\n y_ = (y_image[i] + 0.5) * deltaPix\n ax.plot(x_, y_, 'dk', markersize=4*(1 + np.log(np.abs(mag_images[i]))), alpha=0.5)\n ax.text(x_, y_, abc_list[i], fontsize=20, color='k')\n x_source, y_source = _coords.map_coord2pix(sourcePos_x, sourcePos_y)\n ax.plot((x_source + 0.5) * deltaPix, (y_source + 0.5) * deltaPix, '*k', markersize=10)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.autoscale(False)\n #image_position_plot(ax, _coords, self._kwargs_else)\n #source_position_plot(ax, self._coords, self._kwargs_source)\n return ax", "title": "" }, { "docid": "4e4430154509e1c8fd3a6d80302143cd", "score": "0.5263701", "text": "def line_plot(array_index_start,array_index_finish):\n cv2.line(image, (int(pointstore[array_index_start, 0]), int(pointstore[array_index_start, 1])), (int(pointstore[array_index_finish, 0]), int(pointstore[array_index_finish, 1])),\n (0, 0, 0), 1)", "title": "" }, { "docid": "bfd2fc834e0893aaf5a8986f84a74770", "score": "0.5257321", "text": "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()", "title": "" }, { "docid": "0c425c7d4c386231d65def54833db2c0", "score": "0.52561116", "text": "def plot_all_overlaid(x_data, Z, xlabel, ylabel, sen_list, figsize=(12, 14),\n multi_trace_plot_labels=True,\n id=None, plot_sensor=None, yscale='linear', xrange=None,\n yrange=None, legend=None, color_dict={}, transparency=1.):\n all_data_overlaid = (plot_sensor is not None) and (legend is not None) and (\n id is not None)\n if not(all_data_overlaid):\n pyplot.figure(figsize=figsize)\n legend = []\n\n for sen_i, sen in enumerate(sen_list):\n if all_data_overlaid:\n if sen == plot_sensor:\n if id in color_dict:\n clrdict = {'color': color_dict[id]}\n else:\n clrdict = {}\n lg, = pyplot.plot(x_data, Z[:, sen_i],\n '-', label=id, alpha=transparency, **clrdict)\n if id not in color_dict:\n legend.append(lg)\n color_dict[id] = lg.get_color()\n else:\n continue\n else:\n nsamp, nsen, cmpr_high_variability, median_trace, dev = \\\n aggregate_behavior(Z)\n lg, = pyplot.plot(x_data, Z[:, sen_i],\n (':' if (cmpr_high_variability and\n cmpr_high_variability[sen_i]) else '-'),\n label=sen, alpha=transparency)\n legend.append(lg)\n if multi_trace_plot_labels and median_trace is not None:\n lg, = pyplot.plot(x_data, median_trace, '--',\n label='median', linewidth=5)\n\n pyplot.xlabel(xlabel, fontsize=14)\n pyplot.ylabel(ylabel, fontsize=15)\n\n pyplot.legend(handles=legend, fontsize=7)\n pyplot.title('Sensor traces', fontsize=15)\n\n if yscale:\n pyplot.yscale(yscale)\n if xrange:\n pyplot.xlim(xrange)\n if yrange:\n pyplot.ylim(yrange)\n if not(all_data_overlaid):\n pyplot.show()", "title": "" }, { "docid": "51714e64b67bed785347e1a16d4a16b5", "score": "0.5253872", "text": "def plot(self,ax,**kwargs):\n self.XP_Plotter.plot(ax,**kwargs)\n self.lines_theory[0], = ax.plot(self.xx, self.pp_non_rel,'--g',**kwargs)\n self.lines_theory[1], = ax.plot(self.xx, self.pp_rel,'--m',**kwargs)\n self.lines_theory[2], = ax.plot(self.xx_itpl, self.pp_itpl,'-r',**kwargs)", "title": "" }, { "docid": "7563102ed98dcafa9fead4ffa4aa2fb1", "score": "0.52532315", "text": "def annotate(self, ax):\n annotation = ax.annotate(self.template, xy=(0, 0), ha='right',\n xytext=self.offsets, textcoords='offset points', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')\n )\n annotation.set_visible(False)\n return annotation", "title": "" }, { "docid": "7563102ed98dcafa9fead4ffa4aa2fb1", "score": "0.52532315", "text": "def annotate(self, ax):\n annotation = ax.annotate(self.template, xy=(0, 0), ha='right',\n xytext=self.offsets, textcoords='offset points', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')\n )\n annotation.set_visible(False)\n return annotation", "title": "" }, { "docid": "2a597188005bc3fe04bab989c889c994", "score": "0.5245954", "text": "def abline(slope, intercept, a, b):\n # axes = plt.gca()\n print(slope)\n print(intercept)\n x_vals = np.array(list_xs[ a: b])\n y_vals = intercept + slope * (x_vals-a)\n plt.plot(x_vals, y_vals, '--')\n # print(x_vals)", "title": "" }, { "docid": "e5539bf63f623d8df34068f8ccd0caab", "score": "0.52443016", "text": "def plot_model_spectra(a, Q, d, ax, index, max_x=2.5, padding=0.0, true_freqs=None, ylim=None, scalar=1.0):\n # colour palettes\n greys = ['#393939', '#575757', '#707070', '#898989', '#a4a4a4', '#bfbfbf']\n blues = ['#367bac', '#3787c0', '#4892c6', '#69a6d0', '#8abbdb', '#95c1de']\n greens = ['#265e52', '#3c7e69', '#599d7e', '#79b895', '#9ed0ae', '#c6e5cc']\n\n sns.set(style='ticks')\n cm = a.continuous_model.model\n dcm = a.discontinuous_model.control_model\n dim = a.discontinuous_model.intervention_model\n\n # fig = plt.figure(constrained_layout=True,figsize=(12,15))\n\n # # Continuous spectral GMM\n # gs = GridSpec(3, 1, figure=fig)\n # ax1 = fig.add_subplot(gs[0,index])\n\n continuous_pdfs = plot_kernel_spectrum(Q, cm.kernel, max_x, ax=ax[0], colours=greys, true_freqs=None, scalar=scalar)\n \n # Discontinuous-control spectral GMM\n # ax2 = fig.add_subplot(gs[1,index])\n if true_freqs is not None:\n control_pdfs = plot_kernel_spectrum(Q, dcm.kernel, max_x, ax=ax[1], colours=blues, true_freqs=true_freqs[0], scalar=scalar)\n # Discontinuous-intervention spectral GMM\n intervention_pdfs = plot_kernel_spectrum(Q, dim.kernel, max_x, ax=ax[2], colours=greens, true_freqs=true_freqs[1], scalar=scalar)\n \n else:\n plot_kernel_spectrum(Q, dcm.kernel, max_x, ax=ax[1], colours=blues, true_freqs=None, scalar=scalar)\n # Discontinuous-intervention spectral GMM\n plot_kernel_spectrum(Q, dim.kernel, max_x, ax=ax[2], colours=greens, true_freqs=None, scalar=scalar)\n # return fig, gs\n # fig.suptitle(f\"$d$ = {d}\",size=30)", "title": "" }, { "docid": "c8130f2947721ee27b875523ac40f949", "score": "0.52420056", "text": "def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])", "title": "" }, { "docid": "555870d38ddead8a5d4e50ba49505de0", "score": "0.5240869", "text": "def add_to_plot(self, line_name, points):\n points = [x * 100 for x in points]\n plt.plot(points, label=line_name)", "title": "" }, { "docid": "8a4760cc4d9b0a1b548c1d78385d8d31", "score": "0.5230814", "text": "def onscreen_pres(mpl, screenwidth=1200):\n mpl.rcParams['lines.linewidth'] = 2\n fontsize = round(14 / (800.0 / screenwidth))\n mpl.rcParams['font.size'] = fontsize", "title": "" }, { "docid": "292c45b3569f45bc6b9dd43633e4fbd5", "score": "0.5230249", "text": "def annotate_led_flasher(self):\n if self._mapping is not None:\n pix_size = self._mapping.metadata['size']\n axl = self._mapping.metadata['fOTUpX_l']\n ayl = self._mapping.metadata['fOTUpY_l'] + 2 * pix_size\n\n dxl = [1, -1, 1, -1]\n dyl = [1, 1, -1, -1]\n for i, (dx, dy) in enumerate(zip(dxl, dyl)):\n x = axl * dx\n y = ayl * dy\n self.ax.add_patch(Circle((x, y), radius=0.01, color='red'))\n self.ax.text(x, y, f\"{i}\", fontsize=7, color='white',\n ha='center', va='center')\n else:\n print(\"Cannot annotate, no mapping attached to class\")", "title": "" }, { "docid": "3fa2d9ed91473acdb85d2b3c914852fb", "score": "0.52238405", "text": "def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return", "title": "" }, { "docid": "ac289304ffbaa2ab41c2becaf716c64c", "score": "0.5218643", "text": "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "0ba0ed1c9622b08f8b1ab33c2136c3ce", "score": "0.5218519", "text": "def plot_detects(self, ax, color_by=None, add_cbar=False, \r\n plot_smoothed=False, plot_outliers=False, \r\n plot_changepts=False, plot_changepts_fill=False,\r\n mask=None, max_del_vel=1.0, **kwargs):\r\n\r\n lines = []\r\n if mask is None:\r\n rec_tr = self.rec_track\r\n else:\r\n rec_tr = self.rec_track[mask]\r\n ndetects = len(rec_tr)\r\n for nd in range(ndetects-1):\r\n tr = rec_tr[nd]\r\n xy1 = [tr.X,tr.Y]\r\n tr = rec_tr[nd+1]\r\n xy2 = [tr.X,tr.Y]\r\n lines.append([xy1, xy2])\r\n time_from_entry = rec_tr.Sec - self.t_entry + 1\r\n log_age = np.log10(time_from_entry)\r\n if color_by == 'age':\r\n kwargs['array'] = np.asarray(log_age)\r\n kwargs['cmap'] = cm_age\r\n kwargs['linewidths'] = (0.8)\r\n tr_lines = LineCollection(lines, **kwargs)\r\n if color_by not in ['daynight','routesel']:\r\n ax.add_collection(tr_lines)\r\n elif color_by == 'routesel':\r\n self.identify_route_selection(mask)\r\n # add raw position dots to vertices of lines\r\n\r\n clims = [ticks[0],ticks[-1]]\r\n tr_lines.set_clim(clims)\r\n if add_cbar:\r\n label = 'Time from Entry (seconds)'\r\n c1 = plt.gcf().colorbar(tr_lines)\r\n c1.set_label(label)\r\n c1.set_ticks(ticks)\r\n c1.set_ticklabels(tick_labels)\r\n # plot flagged positions \r\n if plot_outliers:\r\n kwargs['linewidths'] = (0.2)\r\n kwargs['linestyle'] = ('--')\r\n rec_tr_all = self.rec_track\r\n if mask is not None: # plot thin lines to outliers\r\n all_lines = []\r\n for nd in range(self.ndetects-1):\r\n tr = rec_tr_all[nd]\r\n xy1 = [tr.X,tr.Y]\r\n tr = rec_tr_all[nd+1]\r\n xy2 = [tr.X,tr.Y]\r\n all_lines.append([xy1, xy2])\r\n if color_by == 'age':\r\n time_from_entry = rec_tr_all.Sec - self.t_entry + 1\r\n log_age_all = np.log10(time_from_entry)\r\n kwargs['array'] = np.asarray(log_age_all)\r\n tr_lines_all = LineCollection(all_lines, **kwargs)\r\n if color_by in ['daynight','routesel']:\r\n ax.add_collection(tr_lines_all)\r\n tr_lines_all.set_clim(clims)\r\n tr_lines_all.set_zorder(1)\r\n # plot flagged outliers\r\n for nm, method in enumerate(self.outlier_methods):\r\n omarker = outlier_marker[method]\r\n ocolor = outlier_color[method]\r\n if method == 'Dry':\r\n color = ocolor\r\n else:\r\n color = \"None\"\r\n flagged = np.where(rec_tr_all[method] == 1)[0]\r\n ax.scatter(rec_tr_all.X[flagged], rec_tr_all.Y[flagged], \r\n marker=omarker, edgecolor=ocolor, \r\n c=color, s=10.0, zorder=8)\r\n if color_by == 'age':\r\n pos = ax.scatter(rec_tr.X, rec_tr.Y, marker='.', s=2.6, \r\n cmap=cm_age, vmin=ticks[0], vmax=ticks[1])\r\n elif color_by == 'daynight':\r\n i = self.mnames.index('daytime_entry')\r\n day = self.metrics[i]\r\n if day:\r\n colr = 'r'\r\n else:\r\n colr = 'k'\r\n pos = ax.scatter(rec_tr.X, rec_tr.Y, marker='.', s=2.6, c=colr)\r\n elif color_by == 'routesel':\r\n if self.route == 'Old':\r\n colr = 'r'\r\n elif self.route == 'SJ':\r\n colr = 'g'\r\n else:\r\n colr = 'gold'\r\n pos = ax.scatter(rec_tr.X, rec_tr.Y, marker='.', s=2.6, c=colr)\r\n\r\n if plot_smoothed: # plot smoothed positions on top\r\n #trs = self.rec_smooth_pos\r\n trs = self.rec_smooth_fill\r\n ax.scatter(trs.X, trs.Y, marker='o', color='darkviolet', s=0.8,\r\n zorder=9)\r\n\r\n if plot_changepts: # assumes smoothed position record is available\r\n trs = self.rec_smooth_pos\r\n #turn_angle = self.turn_angle(rec_track = trs)\r\n #turn_angle = trs.turn_angle\r\n mask = trs.change_pt_flag1\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='pink', s=8.0, zorder=9)\r\n# c=cm_red(turn_angle[mask1]), s=8.0, zorder=9)\r\n mask = trs.change_pt_flag2\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='salmon', s=16.0, zorder=9)\r\n# c=cm_red(turn_angle[mask2]), s=4.0, zorder=9)\r\n mask = trs.change_pt_flag3\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='r', s=32.0, zorder=9)\r\n if plot_changepts_fill: # assumes smoothed position record is available\r\n trs = self.rec_smooth_fill\r\n mask = trs.change_pt\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='r', s=32.0, zorder=9)\r\n # overwrite smoothed points using p_stat colorbar\r\n ps = ax.scatter(trs.X, trs.Y, marker='.', c=trs.p_stat, \r\n vmin=0, vmax=0.2, cmap=cm_red_r, s=1.0, zorder=9)\r\n cbar_ps = plt.gcf().colorbar(ps)\r\n cbar_ps.set_label('p statistic')\r\n cbar_ps.set_ticks([0,0.2])\r\n c1.set_ticklabels(tick_labels)", "title": "" }, { "docid": "7e34941413c5f4f39a9a9e6766e51085", "score": "0.52067816", "text": "def plottrace_paper(moviedict, figw, figh, figdpi, fontsz, border, xlabel, ylabel, yaxisticks, \n xaxisticks, labels, lw, fs):\n \n for movie, val in moviedict.iteritems():\n os.chdir(movie)\n condition, xlim, color, inum = val\n \n fontv = matplotlib.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')\n fontv.set_size(fontsz)\n \n print(movie)\n td = dil.load_params()\n x, roi_cols = dil.load_results(RESULTS_FILE)\n start = int(td['startshort'])\n end = int(td['endshort'])\n \n \n fig1 = plt.figure(figsize=(figw*xlim/0.6, figh), dpi=figdpi, facecolor='w', edgecolor='k')\n \n xlen = len(x[roi_cols['Mean1']][start:end])\n #print(xlen)\n xvals = np.arange(0, float(xlen)/fs, 1/float(fs))\n #print(xvals)\n \n \n ycib = x[roi_cols['Mean1']][start:end]\n ycib = [v - np.mean(ycib) for v in ycib]\n #print(ycib)\n \n ylab = x[roi_cols['Mean2']][start:end]\n ylab = [v - np.mean(ylab) for v in ylab]\n ylab = [v + 70 for v in ylab]\n \n # Plots the traces\n \n plt.plot(xvals, ylab, label='proboscis tip', linewidth=lw, color='k')\n plt.plot(xvals, ycib, label='cibarium', linewidth=lw, color='b')\n \n \n \n \n \n \n \n if labels == 'yes':\n plt.title(td['condition'], fontproperties=fontv, horizontalalignment='left')\n \n #Plots legend and removes the border around it.\n legend=plt.legend()\n #legend = plt.legend(bbox_to_anchor = (1.5, 1.6))\n legend.draw_frame(False)\n ltext = legend.get_texts() \n plt.setp(ltext, fontproperties=fontv) \n \n ax = plt.gca()\n \n #Uncomment lines below to display without top and right borders.\n \n if border == 'no':\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n pass\n elif loc in ['right','top']:\n spine.set_color('none') # don't draw spine\n else:\n raise ValueError('unknown spine location: %s'%loc)\n \n \n #Uncomment lines below to display ticks only where there are borders.\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n # Specifies the number of tickmarks/labels on the yaxis.\n #ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(yaxisticks)) \n ## Removes tick labels and ticks from xaxis.\n ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n \n if labels == 'yes':\n plt.ylabel(ylabel, fontsize=fontsz, labelpad=12)\n fig1.figsize = (6, 3)\n \n # Adjusts the space between the plot and the edges of the figure; (0,0) is the lower \n #lefthand corner of the figure.\n fig1.subplots_adjust(bottom=0.3)\n fig1.subplots_adjust(left=0.05)\n fig1.subplots_adjust(right=0.95)\n fig1.subplots_adjust(top=0.95)\n \n #ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(XAXISTICKS)) \n \n #Specifies axis labels and axis tick label sizes.\n plt.xlabel(xlabel, fontproperties=fontv)\n plt.ylabel(ylabel, fontproperties=fontv)\n plt.xticks([0, 0.2, 0.4, 0.6], fontproperties=fontv)\n plt.xlim( (0, xlim+0.05) )\n #plt.yticks(fontproperties=fontv)\n \n \n \n # Saves the figures in plots/plots.\n if labels == 'no':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace_nolab')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')\n\n if labels == 'yes':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')", "title": "" }, { "docid": "8dab387da5552c85399500b7fcd1ddcb", "score": "0.5204494", "text": "def plot_individual_tm(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n print str(item[\"path_id\"])\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n fig_title = yprop + item[\"cation_type\"] # Plot by cation\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111)\n ax.scatter(x,y, s=70, zorder=2, color=tm_color_dict[item[\"tm_type\"][0]], linewidths=2.5, edgecolors='black',\n label=item[\"tm_type\"][0])\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]], linestyle='dashed')\n else:\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([7,22])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "2765da8e17f115609551602cc547d7a6", "score": "0.5202412", "text": "def add_annotation_to_2d_hist(self, annotations):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n try:\n import matplotlib.patheffects as PathEffects\n except:\n raise ImportError()\n\n for i in range(0, len(annotations)):\n for j in range(0, len(annotations[0])):\n plt.text(i, j, '%.2f'%annotations[i][j],\n fontsize='7',\n verticalalignment='center',\n horizontalalignment='center',\n color='w',\n path_effects=[PathEffects.withStroke(\n linewidth=2.5,\n foreground='k'\n )])", "title": "" }, { "docid": "7ed52a0165b93b1369298ded64bc0e86", "score": "0.5196912", "text": "def _plot(\n data: ResonatorSpectroscopyAttenuationData,\n fit: ResonatorSpectroscopyAttenuationResults,\n qubit,\n):\n return spectroscopy_plot(data, fit, qubit)", "title": "" }, { "docid": "cf2e2c600b88d7b614f77efa3f5f8903", "score": "0.5196624", "text": "def plot(self, x, y, ax=None, size=None):\n if ax is None:\n ax = plt.gca()\n if size is None:\n size = 1\n if self.final_image is not None:\n imagebox = OffsetImage(self.final_image, zoom=size)\n ab = AnnotationBbox(\n imagebox, (x, y), frameon=False,\n pad=0)\n ax.add_artist(ab)\n zorder = ab.zorder\n else:\n zorder = 0\n if self.marker:\n if self.final_image is not None:\n markersize = max(self.final_image.size)\n else:\n markersize = 50\n markersize = markersize * size\n if self.marker_front:\n plt.plot(x, y, marker=self.marker, markeredgecolor=self.col,\n markerfacecolor=(0, 0, 0, 0), markersize=markersize,\n zorder=zorder + 0.1,\n markeredgewidth=self.markeredgewidth)\n else:\n plt.plot(x, y, marker=self.marker, markeredgecolor=self.col,\n markerfacecolor=self.col, markersize=markersize,\n zorder=zorder - 0.1,\n markeredgewidth=self.markeredgewidth)\n if self.string is not None:\n ax.annotate(self.string, (x, y),\n horizontalalignment='center',\n verticalalignment='center',\n zorder=zorder + 0.2,\n fontsize=self.fontsize, fontname=self.fontname,\n color=self.fontcolor)", "title": "" }, { "docid": "4487e6cd26f639ec48b989a782b7b126", "score": "0.5193149", "text": "def create_line_plot(data):\n data['month'] = pd.DatetimeIndex(data['date']).month\n months_lookup = month_num_name_map()\n data['month'] = data.apply(lambda row: months_lookup[row.month], axis=1)\n line_plt = (alt.Chart(data, title=\"COVID-19 Response Ratio - Canada vs USA\").mark_line().encode(\n alt.X(\"month\", sort=list(months_lookup.values()), title=\"Month(2020)\"),\n alt.Y(\"mean(response_ratio)\", title=\"Mean of Response Ratio\"),\n color=alt.Color(\"iso_code\", legend=alt.Legend(title=\"Country\"))\n )).properties(height=350, width=650)\n\n return line_plt", "title": "" }, { "docid": "a6a2443ee7e92c6089111f27d8d4e390", "score": "0.5192414", "text": "def drawAnnotation(self,i=0):\n #print \"DRAW %s\" % i\n self._annotations[i] = [ self.annotations[i][0](n) for n in self.names ]", "title": "" } ]
3299d307690f88859f4f8c980929956f
Helper that makes ul's of survivors.
[ { "docid": "1221daa2f16253cbc0088a3ef29be390", "score": "0.52719736", "text": "def generation_html(children_list, recursion=False):\n output = '<ul>\\n'\n if children_list == []:\n return \"\"\n for child in children_list:\n grand_children = [Survivor(survivor_id=c[\"_id\"], session_object=self.Session) for c in child.get_children()]\n if not recursion:\n gc_html = \"\"\n else:\n gc_html = generation_html(grand_children, recursion=recursion)\n output += '\\n\\t<a><li>%s</li></a>\\n' % survivor_to_span(child)\n output += '</ul>\\n'\n return output", "title": "" } ]
[ { "docid": "f12da939bd31f73d3f45c0a8db6ef9c1", "score": "0.5558531", "text": "def survivor(names, step):\n\n\tx = step - 1\n\tnext = step - 1\n\n\twhile len(names) > 1:\n\t\tnames.pop(next)\n\t\tnext = (next + x) % len(names)\n\treturn names[0]", "title": "" }, { "docid": "a00d9d1b2b4a136f1c54f5544f2ffe31", "score": "0.5220565", "text": "def __init__(self):\n self.container = SortedList()", "title": "" }, { "docid": "16cbc8766bd5fc88f96532c3967ebf72", "score": "0.5165299", "text": "def __init__(self):\n self.sl = SortedList([])", "title": "" }, { "docid": "60ef5ac9f00074f9e24b47f3bf79f959", "score": "0.5110373", "text": "def _make_pourbaix_diagram(self):\n stable_entries = set()\n self._qhull_data = self._create_conv_hull_data()\n dim = len(self._qhull_data[0])\n if len(self._qhull_data) < dim:\n # TODO: might want to lift this restriction and\n # supply a warning instead, should work even if it's slow.\n raise NotImplementedError(\"Can only do elements with at-least \"\n \"3 entries for now\")\n if len(self._qhull_data) == dim:\n self._facets = [list(range(dim))]\n else:\n facets_hull = np.array(ConvexHull(self._qhull_data).simplices)\n self._facets = np.sort(np.array(facets_hull))\n logger.debug(\"Final facets are\\n{}\".format(self._facets))\n\n logger.debug(\"Removing vertical facets...\")\n vert_facets_removed = list()\n for facet in self._facets:\n facetmatrix = np.zeros((len(facet), len(facet)))\n count = 0\n for vertex in facet:\n facetmatrix[count] = np.array(self._qhull_data[vertex])\n facetmatrix[count, dim - 1] = 1\n count += 1\n if abs(np.linalg.det(facetmatrix)) > 1e-8:\n vert_facets_removed.append(facet)\n else:\n logger.debug(\"Removing vertical facet : {}\".format(facet))\n\n logger.debug(\"Removing UCH facets by eliminating normal.z >0 ...\")\n\n # Find center of hull\n vertices = set()\n for facet in vert_facets_removed:\n for vertex in facet:\n vertices.add(vertex)\n c = [0.0, 0.0, 0.0]\n c[0] = np.average([self._qhull_data[vertex][0]\n for vertex in vertices])\n c[1] = np.average([self._qhull_data[vertex][1]\n for vertex in vertices])\n c[2] = np.average([self._qhull_data[vertex][2]\n for vertex in vertices])\n\n # Shift origin to c\n new_qhull_data = np.array(self._qhull_data)\n for vertex in vertices:\n new_qhull_data[vertex] -= c\n\n # For each facet, find normal n, find dot product with P, and\n # check if this is -ve\n final_facets = list()\n for facet in vert_facets_removed:\n a = new_qhull_data[facet[1]] - new_qhull_data[facet[0]]\n b = new_qhull_data[facet[2]] - new_qhull_data[facet[0]]\n n = np.cross(a, b)\n val = np.dot(n, new_qhull_data[facet[0]])\n if val < 0:\n n = -n\n if n[2] <= 0:\n final_facets.append(facet)\n else:\n logger.debug(\"Removing UCH facet : {}\".format(facet))\n final_facets = np.array(final_facets)\n self._facets = final_facets\n\n stable_vertices = set()\n for facet in self._facets:\n for vertex in facet:\n stable_vertices.add(vertex)\n stable_entries.add(self._qhull_entries[vertex])\n self._stable_entries = stable_entries\n self._vertices = stable_vertices", "title": "" }, { "docid": "065eb7371d72945f88ab304abc140495", "score": "0.5029898", "text": "def ul(cls, lst):\n result = ['<ul class=\"%s\">' % (cls)]\n result += ['%s%s' % (' '*4, l) for l in li(lst)]\n result += ['</ul>']\n return result", "title": "" }, { "docid": "79954f0c681a20fab903edb0639d19c2", "score": "0.49322832", "text": "def __init__(self, vec2d):\n self.l = []\n for i in vec2d:\n while len(i):\n self.l.append(i.pop(0))", "title": "" }, { "docid": "3275bd5db6e1cf7ddb4cb9d094a1ab22", "score": "0.4901795", "text": "def to_ul(self, insert_ul=True):\n children = self.__class__.objects.filter(parent=self).order_by('order')\n childish_html = \"\" if not children else \"<ul>\" + u\"\".join([kid.to_ul(False) for kid in children]) + \"</ul>\"\n output = self._html_tag() + childish_html + \"</li>\"\n if insert_ul:\n output = \"<ul>\" + output + \"</ul>\"\n return output", "title": "" }, { "docid": "541d3cbfa87a6813913138b864929260", "score": "0.48746848", "text": "def __init__(self):\n # Does this seem reasonable?\n self.h = self.ListNode(None) # Left guardian (head)\n self.z = self.ListNode(None) # Right guardian (tail)\n self.h.addAfter(self.z)", "title": "" }, { "docid": "6d85d1e75cc2184c7b9cb47dba961cfb", "score": "0.47599024", "text": "def get_survivors(self, return_type=False):\n\n survivors = list(mdb.survivors.find({\"$or\": [\n {\"email\": self.user[\"login\"]},\n {\"created_by\": self.user[\"_id\"]},\n ], \"removed\": {\"$exists\": False}}\n ).sort(\"name\"))\n\n # user version\n\n if return_type == \"asset_links\":\n output = \"\"\n for s in survivors:\n S = Survivor(survivor_id=s[\"_id\"], session_object=self.Session)\n output += S.asset_link()\n return output\n\n return survivors", "title": "" }, { "docid": "e44b48d989ccdfd00ac48c94b2392159", "score": "0.47498485", "text": "def uniques(alist):\n # Create a copy with no duplicates\n pass # Implement me", "title": "" }, { "docid": "77f836719f9de388b73ecd8e2ed3549d", "score": "0.47410232", "text": "def prepopulate_children_list(self):\n return []", "title": "" }, { "docid": "509e21287dbcd41d8993cd2baa8d4f9f", "score": "0.47366303", "text": "def createList():\n return SlList( None, 0 )", "title": "" }, { "docid": "e327314f916427891f9736cd7a32e477", "score": "0.4732055", "text": "def list(el): \n return cons(el, [])", "title": "" }, { "docid": "ad5ebed483a2b600221a94385eea3268", "score": "0.47228625", "text": "def heapify_up(self) -> None:\n index = self.size - 1\n while self.has_parent(index) and self.parent(index) > self.items[index]:\n self.swap(self.parent_index(index), index)\n index = self.parent_index(index)", "title": "" }, { "docid": "e337a5dfec42deb8491f2656b0295e65", "score": "0.46847558", "text": "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "title": "" }, { "docid": "e1d49fe98e5441b3d5d905590594c83e", "score": "0.46761906", "text": "def get_survivors(self, return_type=False, user_id=None, exclude=[], exclude_dead=False):\n\n query = {\"removed\": {\"$exists\": False}, \"settlement\": self.settlement[\"_id\"], \"_id\": {\"$nin\": exclude}}\n\n if exclude_dead:\n query[\"dead\"] = {\"$exists\": False}\n\n survivors = mdb.survivors.find(query).sort(\"name\")\n\n if self.User is not None:\n user_login = self.User.user[\"login\"]\n elif self.User is None and user_id is not None:\n self.User = User(user_id=user_id)\n user_login = self.User.user[\"login\"]\n else:\n self.User = None\n user_login = None\n\n current_user_is_settlement_creator = False\n if self.User is not None and self.User.user[\"_id\"] == self.settlement[\"created_by\"]:\n current_user_is_settlement_creator = True\n elif self.User is not None and \"admins\" in self.settlement.keys() and self.User.user[\"login\"] in self.settlement[\"admins\"]:\n current_user_is_settlement_creator = True\n\n if return_type == \"hunting_party\":\n hunting_party = []\n for survivor in survivors:\n if \"in_hunting_party\" in survivor.keys():\n hunting_party.append(survivor)\n return hunting_party\n\n if return_type == \"html_buttons\":\n output = \"\"\n for survivor in survivors:\n S = Survivor(survivor_id=survivor[\"_id\"])\n output += S.asset_link()\n return output\n\n if return_type == \"sex_count\":\n male = 0\n female = 0\n for s in survivors:\n if s[\"sex\"] == \"M\":\n male += 1\n elif s[\"sex\"] == \"F\":\n female += 1\n return \"%sM/%sF\" % (male,female)\n\n if return_type == \"html_campaign_summary\":\n # this is our big boy, full-featured controls for survivor management\n if survivors.count() == 0:\n return html.survivor.no_survivors_error\n\n groups = {\n 1: {\"name\": \"Departing\", \"survivors\": [], },\n 2: {\"name\": \"Favorite\", \"survivors\": [], },\n 3: {\"name\": \"Available\", \"survivors\": [], },\n 4: {\"name\": \"Skipping Next Hunt\", \"survivors\": [], },\n 5: {\"name\": \"Retired\", \"survivors\": [], },\n 6: {\"name\": \"The Dead\", \"survivors\": [], },\n }\n\n anonymous = []\n available = []\n for survivor in survivors:\n\n S = Survivor(survivor_id=survivor[\"_id\"], session_object=self.Session)\n annotation = \"\"\n user_owns_survivor = False\n disabled = \"disabled\"\n\n # stylize the survivor name\n savior_dict = {\n \"Lucernae\": \"Dream of the Lantern\",\n \"Caratosis\": \"Dream of the Beast\",\n \"Dormenatus\": \"Dream of the Crown\",\n }\n\n savior_square = \"\"\n for epithet in S.get_epithets():\n if epithet in [\"Lucernae\", \"Caratosis\", \"Dormenatus\"]:\n savior_square = '&ensp; <font id=\"%s\">&#x02588; <i>%s</i></font> <br/>' % (epithet, savior_dict[epithet])\n\n if survivor[\"email\"] == user_login or current_user_is_settlement_creator or \"public\" in survivor.keys():\n disabled = \"\"\n user_owns_survivor = True\n\n button_class = \"\"\n if user_owns_survivor:\n button_class = \"survivor\"\n\n if \"skip_next_hunt\" in S.survivor.keys():\n annotation = \"&ensp; <i>Skipping next hunt</i><br/>\"\n button_class = \"tan\"\n\n for t in [(\"retired\", \"retired_in\", \"tan\"),(\"dead\", \"died_in\", \"silver\")]:\n attrib, event, color = t\n if attrib in S.survivor.keys():\n if event in S.survivor.keys():\n annotation = \"&ensp; <i>%s LY %s</i><br/>\" % (event.replace(\"_\",\" \").capitalize(), S.survivor[event])\n else:\n annotation = \"&ensp; <i>%s</i><br/>\" % attrib.title()\n button_class = color\n\n\n s_id = S.survivor[\"_id\"]\n if not user_owns_survivor:\n s_id = None\n\n\n can_hunt = \"\"\n if \"dead\" in S.survivor.keys() or \"retired\" in S.survivor.keys() or \"skip_next_hunt\" in S.survivor.keys():\n can_hunt = \"disabled\"\n\n in_hunting_party = \"checked\"\n if \"in_hunting_party\" in S.survivor.keys():\n in_hunting_party = None\n can_hunt = \"\"\n\n is_favorite = \"hidden\"\n if \"favorite\" in S.survivor.keys():\n is_favorite = \"favorite\"\n\n avatar_img = \"\"\n if \"avatar\" in S.survivor.keys():\n avatar_img = S.get_avatar(\"html_campaign_summary\")\n\n survivor_html = html.survivor.campaign_asset.safe_substitute(\n avatar = avatar_img,\n survivor_id = s_id,\n settlement_id = self.settlement[\"_id\"],\n hunting_party_checked = in_hunting_party,\n settlement_name = self.settlement[\"name\"],\n b_class = button_class,\n able_to_hunt = can_hunt,\n returning = S.get_returning_survivor_status(\"html_badge\"),\n special_annotation = annotation,\n disabled = disabled,\n name = S.survivor[\"name\"],\n sex = S.get_sex(\"html\"),\n favorite = is_favorite,\n hunt_xp = S.survivor[\"hunt_xp\"],\n survival = S.survivor[\"survival\"],\n insanity = S.survivor[\"Insanity\"],\n courage = S.survivor[\"Courage\"],\n understanding = S.survivor[\"Understanding\"],\n savior = savior_square,\n )\n\n # finally, file our newly minted survivor in a group:\n if \"in_hunting_party\" in S.survivor.keys():\n groups[1][\"survivors\"].append(survivor_html)\n elif \"dead\" in S.survivor.keys():\n groups[6][\"survivors\"].append(survivor_html)\n elif \"retired\" in S.survivor.keys():\n groups[5][\"survivors\"].append(survivor_html)\n elif \"skip_next_hunt\" in S.survivor.keys():\n groups[4][\"survivors\"].append(survivor_html)\n elif \"favorite\" in S.survivor.keys():\n groups[2][\"survivors\"].append(survivor_html)\n else:\n if S.survivor[\"name\"] == \"Anonymous\":\n anonymous.append(survivor_html)\n else:\n available.append(survivor_html)\n\n # build the \"available\" group\n groups[3][\"survivors\"].extend(available)\n groups[3][\"survivors\"].extend(anonymous)\n\n #\n # Start assembling HTML here\n #\n output = html.settlement.campaign_summary_survivors_top\n\n for g in sorted(groups.keys()):\n group = groups[g]\n\n\n if group[\"name\"] in [\"The Dead\", \"Retired\"]:\n color = None\n if group[\"name\"] == \"The Dead\":\n color = \"grey\"\n elif group[\"name\"] == \"Retired\":\n color = \"tan\"\n the_dead = \"\\n\".join(group[\"survivors\"])\n g = group[\"name\"].replace(\" \",\"\").lower() + \"BlockGroup\"\n output += html.survivor.campaign_summary_hide_show.safe_substitute(color=color, group_id=g, heading=group[\"name\"], death_count = len(group[\"survivors\"]), dead_survivors=the_dead)\n else:\n output += \"<h4>%s (%s)</h4>\\n\" % (group[\"name\"], len(group[\"survivors\"]))\n\n\n for s in group[\"survivors\"]:\n output += \" %s\\n\" % s\n\n if group[\"name\"] == \"Departing\" and group[\"survivors\"] != []:\n bonuses = self.get_bonuses(\"departure_buff\")\n if bonuses != {}:\n output += '<hr class=\"invisible\"><span class=\"tiny_break\"></span>'\n for b in sorted(bonuses.keys()):\n output += \"<p><b>%s:</b> %s</p>\" % (b, bonuses[b])\n if bonuses != {}:\n output += '<span class=\"tiny_break\"/></span>'\n\n\n if group[\"name\"] == \"Departing\" and group[\"survivors\"] == []:\n output += \"<p>Use [::] to add survivors to the Departing group.</p>\"\n elif group[\"name\"] == \"Departing\" and group[\"survivors\"] != [] and current_user_is_settlement_creator:\n # settlement admin_controls; only show these if we've got\n # survivors and the current user is the admin\n\n output += html.settlement.hunting_party_macros.safe_substitute(settlement_id=self.settlement[\"_id\"])\n\n # current quarry controls\n quarry_options = []\n for q in self.get_game_asset(\"defeated_monsters\", return_type=\"options\"):\n if \"current_quarry\" in self.settlement.keys() and self.settlement[\"current_quarry\"] == q:\n quarry_options.append(\"<option selected>%s</option>\" % q)\n else:\n quarry_options.append(\"<option>%s</option>\" % q)\n output += html.settlement.current_quarry_select.safe_substitute(options=quarry_options, settlement_id=self.settlement[\"_id\"])\n\n # finally, controls to return the hunting party\n if self.User.get_preference(\"confirm_on_return\"):\n output += html.settlement.return_hunting_party_with_confirmation.safe_substitute(settlement_id=self.settlement[\"_id\"])\n else:\n output += html.settlement.return_hunting_party.safe_substitute(settlement_id=self.settlement[\"_id\"])\n\n output += html.settlement.hunting_party_macros_footer\n\n return output + html.settlement.campaign_summary_survivors_bot\n\n if return_type == \"chronological_order\":\n return mdb.survivors.find(query).sort(\"created_on\")\n\n return survivors", "title": "" }, { "docid": "ae1abae6c065d4b36a960ec9a3df2543", "score": "0.46730578", "text": "def choose_dragonborn_ancestry(self):", "title": "" }, { "docid": "8293616d05a1ea5f135d3cb837b4336b", "score": "0.46707425", "text": "def nest(l):\r\n\tif len(l) == 1:\r\n\t\treturn l\r\n\tstacks = []\r\n\tfor n in l:\r\n\t\tg = nest([x for x in l if x != n])\r\n\t\tif len(g) == 1:\r\n\t\t\tg.insert(0,n)\r\n\t\t\tstacks.append(g)\r\n\t\telse:\r\n\t\t\tfor v in g:\r\n\t\t\t\tv.insert(0,n)\r\n\t\t\tstacks.extend(g)\r\n\treturn stacks", "title": "" }, { "docid": "f44d2455ca9a71215e2980bed9a9ca60", "score": "0.467026", "text": "def prepopulate_virtual_children_list(self):\n return []", "title": "" }, { "docid": "e6f0a61af8c1bda5195f905174bbdf62", "score": "0.46694744", "text": "def urchRemover(self,urchlist):\n for urch in urchlist:\n if urch.x < 0-urch.width:\n urchlist.pop(urchlist.index(urch))\n return urchlist", "title": "" }, { "docid": "b4fd2da4dfcaf11f9adc46a405037f16", "score": "0.46666467", "text": "def generate_re_trees(plist):\n for p in plist:\n rt = p.re_tree", "title": "" }, { "docid": "fb25aa7d75b842c266f0e3813cb819c0", "score": "0.46584854", "text": "def hoover(self):\n if len(self.children) == 1:\n dbg('Paned::hoover: We only have one child, die')\n parent = self.get_parent()\n child = self.children[0]\n self.remove(child)\n parent.replace(self, child)\n del(self)", "title": "" }, { "docid": "52695d424a70b3f0322a52b0ff2360dc", "score": "0.46543986", "text": "def get_children(self, return_type=None):\n children = set()\n children_raw = []\n survivors = self.Settlement.get_survivors()\n for s in survivors:\n survivor_parents = []\n for p in [\"father\",\"mother\"]:\n if p in s.keys():\n survivor_parents.append(s[p])\n for p in [\"father\",\"mother\"]:\n if p in s.keys() and s[p] == self.survivor[\"_id\"]:\n other_parent = None\n survivor_parents.remove(s[p])\n if survivor_parents != []:\n other_parent = survivor_parents[0]\n if other_parent is not None:\n try:\n O = Survivor(survivor_id=other_parent, session_object=self.Session)\n children.add(\"%s (with %s)\" % (s[\"name\"], O.survivor[\"name\"]))\n children_raw.append(s)\n except:\n pass\n else:\n children.add(s[\"name\"])\n children_raw.append(s)\n\n if return_type == \"html\":\n if children == set():\n return \"\"\n else:\n return \"<p>%s<p>\" % (\", \".join(list(children)))\n\n return list(children_raw)", "title": "" }, { "docid": "d1b1d0c6292125b65dc00599be5d4186", "score": "0.4641613", "text": "def merge_survivors(game, tile):\n adjacent_hotels = hotels_adjacent_to_tile(game, tile)\n if len(adjacent_hotels) < 2:\n return None\n elif adjacent_hotels:\n largest = max(map(lambda h: len(h['tiles']), adjacent_hotels))\n return [h for h in adjacent_hotels if len(h['tiles']) == largest]\n else:\n return []", "title": "" }, { "docid": "496b35bbd79e20d968dab4fc5b43ce16", "score": "0.46303403", "text": "def _percolate_up(self):\n index = self.size\n while index // 2 > 0: # stop if we get to the root\n parent = self._store[index // 2]\n if self._store[index] < parent:\n self._store[index // 2] = self._store[index]\n self._store[index] = parent\n index = index // 2\n else:\n index = 0 # Stop iterating", "title": "" }, { "docid": "94988fab62ed56acc064108a7635ddc9", "score": "0.46258965", "text": "def test_self_reference(self):\n lst = []\n lst.append(lst)\n self.assertListEqual(lst, unique(lst))", "title": "" }, { "docid": "8311d02236ea13f321fbc56ce4122728", "score": "0.46236616", "text": "def problem1_space(ll):\n seenvals = set()\n n = ll\n while n:\n if n.value in seenvals:\n prev.next = n.next\n n = prev\n else:\n seenvals.add(n.value)\n prev = n\n n = n.next\n\n return ll", "title": "" }, { "docid": "f891efb9ec1ed77fa5a384a5a94605c7", "score": "0.46204585", "text": "def _get_double_tagged_outer_list(self):\n return self.__double_tagged_outer_list", "title": "" }, { "docid": "b6f58dd36c8f2822f0278781ff22453f", "score": "0.46154115", "text": "def rehash(self):\n if self.sizeOver():\n self._list = self._list + [None] * self._capacity\n temp_front = self._front.getLink()\n orig_back = self._back\n while not temp_front.getPrevious() is orig_back:\n self.add(self.remove(str(temp_front.getPrevious())))\n temp_front = temp_front.getLink()\n self.add(self.remove(str(temp_front.getPrevious())))\n self._capacity *= 2\n elif self.sizeOff() and self._size > 0:\n temp_list = []\n new_capacity = self._capacity // 2\n while not self._front is None:\n temp_list.append(self._remove())\n for _ in range(new_capacity, self._capacity):\n self._list.pop()\n for item in temp_list:\n self.add(item)\n self._capacity = new_capacity", "title": "" }, { "docid": "7a8bd8ef4cad750cd411729555f44909", "score": "0.46072054", "text": "def __init__(self):\n self.l = []", "title": "" }, { "docid": "d6b57c873ee730efb1e2acea4b778464", "score": "0.45957702", "text": "def clean_hierarchy(self):\n ...", "title": "" }, { "docid": "3b83c859b71926be5ec6c93a8bcc5eee", "score": "0.4593707", "text": "def _update_children_lst(self, first: Block, second: Block, third: Block,\n fourth: Block) -> None:\n copy1 = first.create_copy()\n copy2 = second.create_copy()\n copy3 = third.create_copy()\n copy4 = fourth.create_copy()\n self.children[0] = copy1\n self.children[1] = copy2\n self.children[2] = copy3\n self.children[3] = copy4", "title": "" }, { "docid": "34409c751e616635b98150c9e556f970", "score": "0.45927206", "text": "def BUILD_LIST(self, count):\n elts = self.vm.popn(count)\n self.vm.push(elts)", "title": "" }, { "docid": "6a017176a3d24b0c9b30dfed15924865", "score": "0.45880547", "text": "def heapify(items):\n # start at parent node\n index = (len(items) - 2) >> 1\n # Move elements into the appropiate index\n while index >= 0:\n sift_down(items, index)\n index -= 1\n return items", "title": "" }, { "docid": "52f7e58618460fec80a06bd3a043b98c", "score": "0.45750952", "text": "def makelist(self, unordered):\n\t\t\torderedlist = []\n\t\t\ti = 0\n\t\t\tfor t in unordered:\n\t\t\t\torderedlist.append((i, t[1]))\n\t\t\treturn orderedlist", "title": "" }, { "docid": "c6587b09854bddf7e2fe9b4ec5db5aec", "score": "0.45700434", "text": "def make_tree(list, category):\n\t#takes in a list separated by + and - and puts it into a very left-heavy tree\n\ttree = list[0]\n\tlist.pop(0)\n\tfor i in range(0, len(list)):\n\t\tif list[i] in category:\n\t\t\tsubtree = list[i+1]\n\t\t\tif subtree.count('*') or subtree.count('/'):\n\t\t\t\tsubtree = split_ops(subtree, ['*', '/'])\n\t\t\t\tsubtree = make_tree(subtree, ['*', '/'])\n\t\t\ttree = [list[i], tree, subtree] \n\treturn tree", "title": "" }, { "docid": "63f97a29b9e41d1b6eb5ee4e974e95b7", "score": "0.45677245", "text": "def __init__(self):\n # FILL THIS IN\n self.l=[]", "title": "" }, { "docid": "f28815754d247ccbca5c9e9980d1b596", "score": "0.45652473", "text": "def __init__(self):\n self.elements = []\n self.tags = {}\n self.last_index = -1", "title": "" }, { "docid": "8547d4ea43d9df0f4074e93de8d53832", "score": "0.45622692", "text": "def initialiserObjets():\n vaccins.append(vaccin.Vaccin())\n\n for age in range(nb_categories//2):\n objets_categories.append(Individu.Individu(age, None))\n\n for age in range(nb_categories//2, nb_categories):\n objets_categories.append(Individu.Individu(age, vaccins[0]))", "title": "" }, { "docid": "b8409953aed1193715e62c7047071c5c", "score": "0.45603994", "text": "def add_up(lst):\r\n\r\n lst = add_right(rotate_table(lst))\r\n for i in range(3):\r\n lst = rotate_table(lst)\r\n return lst", "title": "" }, { "docid": "cc18a25fd77fefc41d9eca1d0d8a814d", "score": "0.45531574", "text": "def __init__(self):\n self.l = []\n # self.large = []\n # self.small = []", "title": "" }, { "docid": "f3643947a9ee9dd517abc986391ea34b", "score": "0.45493817", "text": "def empty_purgatory(self) -> None:\n\n storage = deque([])\n storage_size = 0\n for _ in range(self.board.purgatory.size):\n card_purgatory = self.board.purgatory.draw()\n # One should apply here the effect on the card\n if card_purgatory.owner == self.owner:\n self.effect_destruction(card_purgatory)\n self.grave.add(card_purgatory)\n else:\n storage_size += 1\n storage.append(card_purgatory)\n for _ in range(storage_size):\n self.board.purgatory.add(storage.pop())", "title": "" }, { "docid": "0a69a09874ed308c7f899e55ee67428d", "score": "0.4544865", "text": "def test_view_z_sorting():\n v = View(z=5)\n v2 = View(z=2)\n v3 = View(z=-1)\n v4 = View(z=3)\n \n vparent = View()\n \n vparent.addchild(v)\n vparent.addchild(v2)\n vparent.addchild(v3)\n vparent.addchild(v4)\n \n assert vparent.childviews[0] == v3, \"Sorting is correct.\"\n assert vparent.childviews[1] == v2, \"Sorting is correct.\"\n assert vparent.childviews[2] == v4, \"Sorting is correct.\"\n assert vparent.childviews[3] == v, \"Sorting is correct.\"\n\n vparent.removechild(v)\n \n assert len(vparent.childviews) == 3, \"Childview correctly removed.\"\n \n v4.removeself()\n assert len(vparent.childviews) == 2, \"Childview correctly removed.\"", "title": "" }, { "docid": "16a51ec595a80516aae17efab13f3dbe", "score": "0.4538591", "text": "def __init__(self):\n self.low = []\n heapq.heapify(self.low)\n self.high = []\n heapq.heapify(self.high)", "title": "" }, { "docid": "ed1bf31aa97fb323450a3ad1eecc1e54", "score": "0.45342547", "text": "def get_surviving_population(population, survivor_num, population_update_pool_size):\n\n # if the number of survivors is equal to or higher than the current population size, return the whole population\n if survivor_num >= len(population):\n return population\n\n surviving_population = list()\n\n # otherwise, select as many survivors as needed\n for _ in range(survivor_num):\n\n # a tournament winner can survive\n survivor = get_tournament_winner(population, population_update_pool_size)\n\n # add the survivor to the new population\n surviving_population.append(survivor)\n\n # remove the survivor from the old population, to avoid duplicity\n population.remove(survivor)\n\n return surviving_population", "title": "" }, { "docid": "9f478cb60901fdc669567dc8b83d99d9", "score": "0.453131", "text": "def __init__(self):\n self.li = []\n self.size = 0", "title": "" }, { "docid": "b3f81c9f78b2ae5660367fb020877061", "score": "0.45126146", "text": "def create_colliders(self):\n size = self.scaled_size\n colliders = []\n for i in range(0, len(self.tiled_map.layers) - 2):\n for x, y, image in self.tiled_map.layers[i].tiles():\n p = self.tiled_map.get_tile_properties(x, y, i)\n if p['walkable'] == 'false':\n rect = pg.Rect(x * size, y * size, self.scaled_size, self.scaled_size)\n colliders.append(rect)\n return colliders", "title": "" }, { "docid": "ac84dca080a3c3c0a900274168688f8e", "score": "0.45012492", "text": "def __init__(self):\n self.l=[]", "title": "" }, { "docid": "4b18ac5aebf78e6e03a320454b3dc3d9", "score": "0.44792622", "text": "def get_sisters(self):\n if self.up is not None:\n return [ch for ch in self.up.children if ch != self]\n else:\n return []", "title": "" }, { "docid": "80ea7387bba28f7b4fb0dd11deed378f", "score": "0.44743478", "text": "def unique(l):\n lu = []\n for l1 in l:\n if l1 not in lu:\n lu.append(l1)\n return lu", "title": "" }, { "docid": "3e5b465ccd507b3de377df3d2a7d2c97", "score": "0.44687083", "text": "def Turn(L):\r\n a=L[0]\r\n del L[0]\r\n L.append(a)", "title": "" }, { "docid": "ec65a68102952d3b551eef6268289c4b", "score": "0.44638366", "text": "def descendants_from_list(t, list_, arity):\n q = Queue()\n q.add(t)\n list_ = list_.copy()\n while not q.is_empty(): # unlikely to happen\n new_t = q.remove()\n for i in range(0, arity):\n if len(list_) == 0:\n return t # our work here is done\n else:\n new_t_child = Tree(list_.pop(0))\n new_t.children.append(new_t_child)\n q.add(new_t_child)\n return t", "title": "" }, { "docid": "ec65a68102952d3b551eef6268289c4b", "score": "0.44638366", "text": "def descendants_from_list(t, list_, arity):\n q = Queue()\n q.add(t)\n list_ = list_.copy()\n while not q.is_empty(): # unlikely to happen\n new_t = q.remove()\n for i in range(0, arity):\n if len(list_) == 0:\n return t # our work here is done\n else:\n new_t_child = Tree(list_.pop(0))\n new_t.children.append(new_t_child)\n q.add(new_t_child)\n return t", "title": "" }, { "docid": "bfa60190f528398867f445b5d555b6a1", "score": "0.44543543", "text": "def restructureHeap(self):\r\n\r\n self.i = 1\r\n # Storing the elements that already exist in a temporary list\r\n tempList = []\r\n for heapElement in self.heap:\r\n if heapElement != \"NaN\" :\r\n tempList.append( heapElement )\r\n\r\n # Initializing new heap\r\n self.heap = [\"NaN\"] * self.noOfElements\r\n\r\n # Storing all the elements in the temporary list in a continuous fashion in the new heap\r\n for element in tempList:\r\n self.insertElement(element, self.i)", "title": "" }, { "docid": "19ca1c46c12b8b598645d43483c783bc", "score": "0.44426703", "text": "def update_returning_survivor_years(self, add_year=None):\n\n r = \"returning_survivor\"\n\n if not r in self.survivor.keys():\n self.survivor[r] = []\n\n if add_year is not None and not \"dead\" in self.survivor.keys():\n add_year = int(add_year)\n self.survivor[r].append(add_year)\n\n self.survivor[r] = list(set(self.survivor[r]))", "title": "" }, { "docid": "0f7ce1de585e38202ca009463e01fbcc", "score": "0.44419265", "text": "def finalize(self):\n #Clean our lists...\n self.l_created = lists.returnListNoDuplicates(self.l_created)\n self.l_return = lists.returnListNoDuplicates(self.l_return)\n \n if self._createMode in ['curve','jointChain','group','follicle'] and self.l_return:\n if self._createMode == 'group':\n bufferList = []\n for i,o in enumerate(self.l_created):\n buffer = rigging.groupMeObject(o,False)\n bufferList.append(buffer) \n try:mc.delete(o)\n except:pass\n self.l_created = bufferList\n \n elif self._createMode =='follicle':\n if self.mode == 'midPoint':\n log.warning(\"Mid point mode doesn't work with follicles\")\n return\n bufferList = []\n for o in self.l_created:\n mesh = attributes.doGetAttr(o,'cgmHitTarget')\n if mc.objExists(mesh):\n uv = distance.returnClosestUVToPos(mesh,distance.returnWorldSpacePosition(o))\n follicle = nodes.createFollicleOnMesh(mesh)\n attributes.doSetAttr(follicle[0],'parameterU',uv[0])\n attributes.doSetAttr(follicle[0],'parameterV',uv[1])\n try:mc.delete(o)\n except:pass \n else:\n for o in self.l_created:\n try:mc.delete(o)\n except:pass\n if self._createMode == 'curve' and len(self.l_return)>1:\n if len(self.l_return) > 1:\n self.l_created = [curves.curveFromPosList(self.l_return)]\n else:\n log.warning(\"Need at least 2 points for a curve\") \n elif self._createMode == 'jointChain':\n self.l_created = []\n mc.select(cl=True)\n for pos in self.l_return: \n self.l_created.append( mc.joint (p = (pos[0], pos[1], pos[2]),radius = 1) ) \n log.debug( self.l_created)\n\tif self.d_tagAndName:\n\t for o in self.l_created:\n\t\ttry:\n\t\t i_o = cgmMeta.cgmNode(o)\n\t\t for tag in self.d_tagAndName.keys():\n\t\t\ti_o.doStore(tag,self.d_tagAndName[tag])\n\t\t i_o.doName()\n\t\texcept StandardError,error:\n\t\t log.error(\">>> clickMesh >> Failed to tag and name: %s | error: %s\"%(i_o.p_nameShort,error)) \t \t\t\n \n self.reset()", "title": "" }, { "docid": "9e0f6037744ae42b09309ab7bf420990", "score": "0.44365597", "text": "def __init__(self):\n self.li = []", "title": "" }, { "docid": "8e430e7a8909a5bffa7276a991243e2e", "score": "0.4435612", "text": "def gen_unordered(self):\n ...", "title": "" }, { "docid": "a93a84c20c1ad1a8c89c48642950c074", "score": "0.4435456", "text": "def orient_colliders(skel, sep_set):\r\n \r\n # only give definite orientation if collider can be uniquely oriented\r\n for edge in skel.edges():\r\n skel[edge[0]][edge[1]]['arrHead'] = False\r\n triples = [(x,y,z) for y in skel for x in skel.predecessors(y)\r\n for z in skel.successors(y) if x<z]\r\n for (x,y,z) in triples:\r\n if (x,z) in sep_set.keys():\r\n this_sepset = sep_set[(x, z)]\r\n else:\r\n this_sepset = set([])\r\n if y not in this_sepset and skel.is_undir_edge((x, y)) and skel.is_undir_edge((y, z)):\r\n skel[x][y]['arrHead'] = True\r\n skel[z][y]['arrHead'] = True\r\n for edge in skel.edges():\r\n x,y = edge\r\n if skel.is_undir_edge(edge) and skel[x][y]['arrHead'] and not skel[y][x]['arrHead']:\r\n skel.remove_edge(*edge[::-1])\r\n return skel", "title": "" }, { "docid": "e9915ce018f09d16a61df32662c83438", "score": "0.4434346", "text": "def create_test_collections(instance):\n # - Ensemble1\n # - Collection1\n # - Collection2\n # - Ensemble2\n # - Collection3\n # - Collection4\n\n instance.ensemble1 = CollectionsEnsemble.objects.create(\n name=\"ensemble1\")\n instance.ensemble2 = CollectionsEnsemble.objects.create(\n name=\"ensemble2\",\n parent=instance.ensemble1)\n instance.collection1 = Collection.objects.create(\n name=\"collection1\",\n ensemble=instance.ensemble1)\n instance.collection2 = Collection.objects.create(\n name=\"collection2\",\n ensemble=instance.ensemble1)\n instance.collection3 = Collection.objects.create(\n name=\"collection2\",\n ensemble=instance.ensemble2)\n instance.collection4 = Collection.objects.create(\n name=\"collection2\")", "title": "" }, { "docid": "01592458891e1627cea2eb93579d3d9f", "score": "0.44330165", "text": "def preevolve(self):", "title": "" }, { "docid": "485912faece59419f0f2e03394c3e86c", "score": "0.4420233", "text": "def make_unique_obj_list(somelist, attr):\n tmp = {}\n for item in somelist:\n tmp[attr(item)] = item\n return tmp.values()", "title": "" }, { "docid": "932db88ab0cd32ef07b079c53396ff57", "score": "0.44152203", "text": "def problem1_no_space(ll):\n n = ll\n while n:\n m = n.next\n prev = n\n while m:\n if n.value == m.value: # Delete m\n prev.next = m.next\n m = prev\n prev = m\n m = m.next\n n = n.next\n return ll", "title": "" }, { "docid": "2d3d9436fdcecb1c4191e2cc0b7c3243", "score": "0.44031587", "text": "def cleanup(fulllist, newlist):\n\t\n#\tindexcounter = -1\n\tfor elm in fulllist:\n#\t\tindexcounter +=1\n\t\tfor item in newlist:\n\t\t\tif elm == item:\n\t\t\t\tnewlist.remove(item)\n\treturn newlist", "title": "" }, { "docid": "4a156c9ece77462ab096ab5141811e1b", "score": "0.4402639", "text": "def _tidy(self):\n if self.no_overlap:\n self.remove_overlap(self.no_contiguous) # will sort\n else:\n self.sort()", "title": "" }, { "docid": "729469ef97143d79f8ce00bcf0ed45ac", "score": "0.44005084", "text": "def unilist(l):\r\n return(list(set(l)))", "title": "" }, { "docid": "7f923d2669823811bd4a55a1008ed39c", "score": "0.4392543", "text": "def __init__(self):\r\n self.elements = set()", "title": "" }, { "docid": "1d2e247123151aee434e265ecd410780", "score": "0.4385557", "text": "def buildFromList(cls, l):\n T = Trie()\n for item in l:\n T.insert(item)\n return T", "title": "" }, { "docid": "6872098bcead6909315f70b094ec51fa", "score": "0.438431", "text": "def __init__(self):\n self.heap = [0] # set first position to zero placeholder", "title": "" }, { "docid": "d334f0eff4de5f680c377fd30a85d4c8", "score": "0.43833053", "text": "def build_heap(self, input_list):\n self.item_list = [0] + input_list\n self.item_list[0] = len(input_list)\n for index in range(self.item_list[0] // 2, 0, -1):\n self.downward_adjust(index)", "title": "" }, { "docid": "b2eee4e8e307a64c363fb6cb3aa2bcc8", "score": "0.43787292", "text": "def new_intervals(l: list) -> list:\n for i, s in enumerate(l):\n for t in l:\n if s == t:\n continue\n if s[0] > t[0] and s[1] < t[1]:\n l.pop(i)\n\n return l", "title": "" }, { "docid": "5c9356a3892fa4ad527692549f4156e9", "score": "0.43728223", "text": "def unique(lst):\n # ...", "title": "" }, { "docid": "b8ea216a898ec03e31f89d3a8082f000", "score": "0.43725646", "text": "def __init__(self):\n self.loheap=[]\n self.hiheap=[]\n self.size=0", "title": "" }, { "docid": "f092968127976bfe665cfa65a1395ad7", "score": "0.4370771", "text": "def __init__(self):\n self.l1 = []\n self.l2 = []", "title": "" }, { "docid": "04c120c78312d7fee9492b95cd232180", "score": "0.43696597", "text": "def strip2list(texel):\n if texel.is_group:\n return texel.childs\n return [texel]", "title": "" }, { "docid": "451e188848662199e322b01a1935b484", "score": "0.4369093", "text": "def removeduplicatenode(self):\n index = np.zeros((1, 2), np.int32)\n for k in range(self.p.shape[0], 1, -1):\n pk = self.p[k - 1, :]\n dis = np.sqrt(\n (self.p[: k - 1, 0] - pk[0]) ** 2 + (self.p[: k - 1, 1] - pk[1]) ** 2\n )\n local = np.where(dis < np.finfo(float).eps * 1e5)[0]\n if len(local) != 0:\n index = np.append(\n index, np.array(([k - 1, local[0]])).reshape(1, 2), axis=0\n )\n index = np.delete(index, 0, axis=0)\n if len(index) > 0:\n self.p = np.delete(self.p, index[:, 0], axis=0)\n for ni in range(index.shape[0]):\n id1, id2 = np.where(self.t == index[ni, 0])\n for mi in range(len(id1)):\n self.t[id1[mi], id2[mi]] = index[ni, 1]\n tca = np.unique(self.t)\n tcb = np.unique(self.t)\n while max(tca) > len(tca) - 1:\n t1 = tca[1::]\n t2 = tca[:-1]\n t0 = t1 - t2\n t0 = np.insert(t0, 0, 0)\n index = np.where(t0 > 1)[0]\n tca[index] = tca[index] - 1\n for ni in range(len(tca)):\n id1, id2 = np.where(self.t == tcb[ni])\n for mi in range(len(id1)):\n self.t[id1[mi], id2[mi]] = tca[ni]", "title": "" }, { "docid": "92f55936e9b45bb7db978f15856fac67", "score": "0.43673435", "text": "def owned_objects(self):\n return (\n [\n self,\n self.__dict__,\n self._head,\n self._tail,\n self._out_edges,\n self._out_edges._keys,\n self._out_edges._values,\n self._in_edges,\n self._in_edges._keys,\n self._in_edges._values,\n self._vertices,\n self._vertices._elements,\n self._edges,\n ] +\n list(six.itervalues(self._out_edges)) +\n list(six.itervalues(self._in_edges))\n )", "title": "" }, { "docid": "000f9ba46c379f57082e03eacc29a2f5", "score": "0.436727", "text": "def urchCreator(self,urchlist): \n r = randrange(1,8)\n #1/8 chance a purple urchin is created\n if r==1:\n #appears randomly from y coordinates 0 to 590\n urchlist.append(urchin(1000,randrange(0,590),100,100,\"purple\"))\n #1/4 chance a brown urchin is created\n elif r==2 or r==3:\n #appears randomly from x coordinates 0 to 700\n urchlist.append(urchin(randrange(0,700),750,100,100,\"brown\"))\n #1/8 chance a green urchin is created\n elif r==4:\n #appears randomly from x coordinates 200 to 700\n urchlist.append(urchin(randrange(200,700),0,100,100,\"green\"))\n #1/2 chance orange urchin is created\n else:\n #appears randomly from y coordinates 0 to 590\n urchlist.append(urchin(1000,randrange(0,590),100,100,\"orange\"))\n #urchinitializer is set back to 0 and returned to main\n self.urchinitializer=0", "title": "" }, { "docid": "18206e61abd6c9600a1273b8ef28da26", "score": "0.43632042", "text": "def copy_skel(self):\n nodes = {\n node.index: node\n for node in [node.skel() for node in self.node_index.values()]\n }\n for node in nodes.values():\n [node.add(nodes[v.index]) for v in self[node.index].children]\n tree = Tree(nodes=nodes, name=self.name)\n [tree.register_modified(node) for node in self.node_index.values()]\n return tree", "title": "" }, { "docid": "9aafa4c9bb02d70f9382c0bdea0119ef", "score": "0.43581724", "text": "def dagger(self, inplace = True):\n if inplace:\n A = self\n else:\n A = self.copy()\n\n for site in range(A.L):\n for i in range(A.Ws[site].shape[0]):\n for j in range(A.Ws[site].shape[1]):\n A.Ws[site][i,j,:,:] = A.Ws[site][i,j,:,:].T.conj()\n\n return A", "title": "" }, { "docid": "8fac173c9eb7772a7dcba4af5f14c53d", "score": "0.43551165", "text": "def heapify(self) -> None:\n i = self.size\n\n while i <=0:\n self.restore_down(i)\n i -=1", "title": "" }, { "docid": "ded4575750e61ac0ebab82ef7950efe9", "score": "0.43506262", "text": "def grow(self, instances):", "title": "" }, { "docid": "44de96a65746ce50d30b77fb59783dee", "score": "0.43463886", "text": "def __heapify_up (self,i):\n\t\tif i > 0:\n\t\t\tp = self.__parent(i)\n\t\t\tif self.__h[p]>self.__h[i]:\n\t\t\t\tself.__h[p], self.__h[i] == self.__h[i], self.__h[p]\n\t\t\t\tself.__heapify_up(p)", "title": "" }, { "docid": "4c54d60b30ea5f63a2e21e9f7d084206", "score": "0.43403128", "text": "def list_to_tree(alist, none_and_holes=False, base_path=[0]):\n def process_one_item(alist, tree, track):\n path = Path(Array[int](track))\n if len(alist) == 0 and none_and_holes: \n tree.EnsurePath(path)\n return\n for i,item in enumerate(alist):\n if hasattr(item, '__iter__'): #if list or tuple\n track.append(i)\n process_one_item(item, tree, track)\n track.pop()\n else:\n if none_and_holes: \n tree.Insert(item, path, i)\n elif item is not None: \n tree.Add(item, path)\n \n tree = Tree[object]()\n if alist is not None: \n process_one_item(alist, tree, base_path[:])\n return tree", "title": "" }, { "docid": "819e449ebbec1c2ef3c1e1981c456203", "score": "0.43359423", "text": "def __init__(self, noOfElements, limitOfRestructuring):\r\n\r\n self.heap = [ 'NaN' ] * noOfElements\r\n self.noOfElements = noOfElements\r\n self.i = 1\r\n self.noOfRemovedElements = 0\r\n self.limitOfRestructuring = limitOfRestructuring", "title": "" }, { "docid": "ece9e86240cdcef840b7055c2de1d202", "score": "0.43336755", "text": "def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\n people.sort(key=lambda x: (-x[0], x[1]))\n res = []\n for cur in people:\n res.insert(cur[1], cur)\n return res", "title": "" }, { "docid": "71c79b9649bc09b12e7ecc3ff1ea5a05", "score": "0.43331078", "text": "def to_unique(simple_list):", "title": "" }, { "docid": "88de85d248465af53599df868edc9fcf", "score": "0.43289107", "text": "def build_heap(self, seq):\n self.size = len(seq)\n self.heaplist = [0] + seq[:] # O(n) space\n i = len(seq) // 2\n while i > 0:\n self.sift_down(i)\n i -= 1", "title": "" }, { "docid": "8a447fec76b393ec98faf22acdcf3b8b", "score": "0.43252972", "text": "def unpair(self):\n if self.IsPaired:\n curr_idx = self.Index\n first = StructureNode(Data=Stem(self.Start))\n last = StructureNode(Data=Stem(self.End))\n \n if self.Length > 1: #not melting the whole helix\n self.Start += 1\n self.End -= 1\n self.Length -= 1\n result = [first, self, last]\n else: #melting the whole helix\n result = [first] + self + [last]\n #replace current record in parent with the result\n #note use of a slice assignment instead of an index! This is to\n #replace with the elements, not with a list of the elements.\n self.Parent[curr_idx:curr_idx+1] = result\n return True\n else:\n return False", "title": "" }, { "docid": "b600f879a14c5235a6b77f87e7a71911", "score": "0.43247727", "text": "def li(lst):\n return ['<li>%s</li>' % element for element in lst]", "title": "" }, { "docid": "e9715083eb9e94483cc64167de15a438", "score": "0.43215057", "text": "def __init__(self):\n self.container = []", "title": "" }, { "docid": "e9715083eb9e94483cc64167de15a438", "score": "0.43215057", "text": "def __init__(self):\n self.container = []", "title": "" }, { "docid": "62977dd1fa1cbe017d2962b69bb92283", "score": "0.432043", "text": "def Slidify(self):\n pass", "title": "" }, { "docid": "9fd5acb70982a51f257e22a38247e885", "score": "0.43195918", "text": "def __init__(self):\n super().__init__()\n self._l = [[]]", "title": "" }, { "docid": "d5fd5d2bcb7b695ac2927f6c6bb575b7", "score": "0.43181065", "text": "def homogenize(stuff):\n l = []\n ll = [l]\n stuff = list(stuff[:])\n d = None\n while len(stuff):\n element = stuff[0]\n delement = depth(element)\n if delement == d or not l:\n l.append(element)\n else:\n l = [element]\n ll.append(l)\n d = delement\n stuff = stuff[1:]\n return join(*ll)", "title": "" }, { "docid": "584a20c3b26e9099d763990fda196b42", "score": "0.43168846", "text": "def __init__(self,l):\n if isinstance(l,list):\n if isinstance(l[0],list):\n l = self._list_from_cycle(l)\n self._list = l \n else:\n # try to turn object into list\n self._list = list(l)\n self.__init__(l)\n\n self._hash = None", "title": "" }, { "docid": "106bed7fe626e0862bdc9245ff7498d8", "score": "0.43151355", "text": "def triangulate(self):\n # To be replaced soon to remove scipy dependency\n from scipy.spatial import Delaunay\n if self._vertices is None:\n return\n pos2 = np.delete(self._vertices, 2, 1)\n tri = Delaunay(pos2)\n self._faces = tri.simplices\n self._convex_hull = tri.convex_hull", "title": "" }, { "docid": "4aa4ef2eae9fd6f387556557414f8148", "score": "0.43150973", "text": "def create_tour():\n return permutation([i for i in range(1, NUM_CITIES + 1)]).tolist()", "title": "" }, { "docid": "56f5fded42f321790e98a7cc5a321d55", "score": "0.43139422", "text": "def __init__(self):\r\n self.elements = []", "title": "" }, { "docid": "f491b211e18dba956eb592d5dd020db2", "score": "0.43132955", "text": "def __init__(self):\n self.heap = []", "title": "" } ]
7df19a930475e0746a2207f71f3a444d
check that detect zero parameters works
[ { "docid": "5f0d8fb03363c103de54e05bdf38ccf2", "score": "0.0", "text": "def test_detect_special_parameters(self): \n \n expected = set(['I3x32', 'etaWS', 'conjg__CKM3x2', 'CKM1x2', 'WT', 'I1x32', 'I1x33', 'I1x31', 'I2x32', 'CKM3x1', 'I2x13', 'I2x12', 'I3x23', 'I3x22', 'I3x21', 'conjg__CKM2x1', 'lamWS', 'conjg__CKM2x3', 'I2x23', 'AWS', 'CKM1x3', 'conjg__CKM3x1', 'I4x23', 'ymc', 'ymb', 'yme', 'CKM3x2', 'CKM2x3', 'CKM2x1', 'ymm', 'conjg__CKM1x3', 'Me', 'ym', 'I2x22', 'WTau', 'lamWS__exp__2', 'lamWS__exp__3', 'yc', 'yb', 'ye', 'MC', 'MB', 'MM', 'conjg__CKM1x2', 'I3x31', 'rhoWS', 'I4x33', 'I4x13'])\n zero, one = self.model.detect_special_parameters()\n result = set(zero)\n self.assertEqual(len(result), len(expected))\n\n self.assertEqual(expected, result)\n \n expected = set(['conjg__CKM3x3', 'conjg__CKM2x2', 'CKM1x1', 'CKM2x2', 'CKM3x3', 'conjg__CKM1x1'])\n result = set(one)\n self.assertEqual(expected, result)", "title": "" } ]
[ { "docid": "17b43d7805ad3ca46fd86dec2647d914", "score": "0.75550795", "text": "def _getzero_check(self):\n raise NotImplementedError", "title": "" }, { "docid": "5eb4d97ff5ea5e534d14ab8d79542583", "score": "0.68417084", "text": "def is_zero(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e80e0404e2a5885056ec94ec92f4ad3c", "score": "0.68210536", "text": "def _getzero_correct(self):\n raise NotImplementedError", "title": "" }, { "docid": "36ef178bfeecfec3c4ec97ab588065de", "score": "0.6778943", "text": "def __nonzero__():", "title": "" }, { "docid": "c460c66042ec62cb9d5e8ea8eb365fca", "score": "0.67253494", "text": "def is_zero(self):\n if self._ovn == 'ZERO' or \\\n (self.narg() == 0 and isinstance(self._ovn,float) and self._ovn == 0.0):\n return True\n else:\n return False", "title": "" }, { "docid": "dcc5e2935432834bb057dc18bb779a41", "score": "0.67061937", "text": "def test__empty_params_routines(self):\n oq_parser = OqasmParser()\n oq_parser.build()\n reverse_dic = {v: k for k, v in oq_parser.standard_gates.items()}\n for op in GATE_DATA:\n success = False\n data = \"\"\n if op[3] > 0:\n if op[0] == \"U\":\n data += reverse_dic[op[0]] + \"(0) q[0];\\n\"\n else:\n data += reverse_dic[op[0]] + \" q[0];\\n\"\n print(data)\n oq_parser = OqasmParser()\n oq_parser.build()\n try:\n oq_parser.parse(HEADER + data)\n except (InvalidParameterNumber, WrongParams) as e:\n success = True\n self.assertTrue(success, \"The parser failed to raise \" +\n \"Invalid Parameter number for \"\n + reverse_dic[op[0]])", "title": "" }, { "docid": "2edd6a6a135fb066beb2eb26cb739ecc", "score": "0.66844743", "text": "def _argcheck(self, t):\n return t >= 0", "title": "" }, { "docid": "df7b09ca9afda4c5bb984ff691faba31", "score": "0.6682295", "text": "def test_check_param(self):\n param_p1 = parameters.Parameter(\"p1\", 1.0, 0.1, 10.0)\n p1_result = parameters.check_parameter(param_p1, 3.0,\n self.arguments) \n self.assertTrue(p1_result == None)\n\n param_p2 = parameters.Parameter(\"p2\", 1.0, 0.0, 10.0)\n p2_result = parameters.check_parameter(param_p2, 0.00001,\n self.arguments)\n self.assertFalse(p2_result == None)\n self.assertTrue(p2_result.too_low)", "title": "" }, { "docid": "d3caa9241221a347f593f216609e91a8", "score": "0.66347766", "text": "def test_if_input_is_zero(self):\n self.assertEquals(primenum(0), \"Numbers less than or equal to zero are not allowed!\")", "title": "" }, { "docid": "31b2a7f0f000791a40ddd44db51d7652", "score": "0.657551", "text": "def checkParameters(self):\n self.DEBUG(\"EDPluginBioSaxsISPyBv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.sample, \"Sample is None\")", "title": "" }, { "docid": "b220e2d2a64c48cee912a47a08f2b668", "score": "0.6510332", "text": "def _no_observation(self, *_):\n return False, 0.0", "title": "" }, { "docid": "253f37790d74156129c7d7acc8deb2d1", "score": "0.65055823", "text": "def test_zeros(self):\n\t\t\tself.assertRaises(ValueError, matrix, 0)", "title": "" }, { "docid": "a6c60bdb788df244e1e518bf4ca045f3", "score": "0.64707655", "text": "def test_ZeroArg(self):\n asf = autoself(_test0)\n self.assertEqual(42,asf(42))", "title": "" }, { "docid": "6815a23e229a23658b35d56e176c39d0", "score": "0.6470355", "text": "def test_none_condition():", "title": "" }, { "docid": "9514478cbf0d08a73669e692a32f4cab", "score": "0.64468265", "text": "def __nonzero__(self):", "title": "" }, { "docid": "f84d68f2660ee63cefaa8c7d4a0a17b5", "score": "0.643971", "text": "def __nonzero__(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "65e270881749e3c7cfc9266c893c9434", "score": "0.6422018", "text": "def check(self): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "080f773b4a559e9d9a1b5dcb9206bb06", "score": "0.64211464", "text": "def __nonzero__(self):\r\n\r\n return self.isValid()", "title": "" }, { "docid": "e36a492658c420d0e7ade55d965a2197", "score": "0.6406167", "text": "def gevent_zero_check(struct_uv_check_s, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "title": "" }, { "docid": "35f19921e9e75251bbb81f3302a7236e", "score": "0.63937426", "text": "def test_check_conserved_bits_none(self):\n self.assertTrue(bitstring.check_conserved_bits(0, 0))", "title": "" }, { "docid": "da5354fbe5b0ef244ce6992cd5ed7212", "score": "0.6391714", "text": "def test1(self):\n ret = DataLayer.tst_test_none(0)\n self.assertEqual(0, ret)", "title": "" }, { "docid": "d0d7e7da3c06786c31c3b03ee9765879", "score": "0.6387313", "text": "def validate_params(self):", "title": "" }, { "docid": "51c3de588e3c23b3c97e7235fb968144", "score": "0.63830584", "text": "def is_valid_0_1(param):\n return param == '0' or param == '1' or param == 0 or param == 1", "title": "" }, { "docid": "6daf34977d2b0bbd718fcfeab4db0d86", "score": "0.63829416", "text": "def test_zero_input(self):\r\n\t\tself.assertEqual(GeneratePrime(0), \"Retry with a positive integer greater than 1\")", "title": "" }, { "docid": "dc2af5676a1b6f68ee67af90ad604b18", "score": "0.6374557", "text": "def test_null_input1(self):\n input_validation = self.algo.validate_input()\n self.assertEqual(input_validation, {'error': 'inputs are not entered'})", "title": "" }, { "docid": "6df3245810e5f0dee2fd0e77252fa6aa", "score": "0.63456774", "text": "def test_for_zero(self):\r\n \tself.assertFalse(isPrime(0))", "title": "" }, { "docid": "76fb7e46d2b1316602df628afeab63f6", "score": "0.634103", "text": "def _setzero_check(self, newval):\n raise NotImplementedError", "title": "" }, { "docid": "4de1d274c730dd9521c13207d815fc48", "score": "0.6335995", "text": "def isZero(self):\n return (self.x == 0) and (self.y == 0) and (self.z == 0)", "title": "" }, { "docid": "09df8edba7f6662b7faf55c10afec34a", "score": "0.6293586", "text": "def isZero(self):\n for scalar in self.mData:\n if scalar != 0:\n return False\n return True", "title": "" }, { "docid": "43bb4edf3391e742e8970107dae5deed", "score": "0.6274113", "text": "def check_preconditions(self):", "title": "" }, { "docid": "2a67f2d761a0f10179d43a678b64234c", "score": "0.6262317", "text": "def test_add_zero_checker(self):\n\n # Normal cases\n assert not self.add_zero_checker(\"5/10\", \"-5/10\")\n assert not self.add_zero_checker(\"-5/10\", \"5/10\")\n assert not self.add_zero_checker(\"5/-10\", \"5/10\")\n assert self.add_zero_checker(\"5/10\", \"5/10\")\n assert self.add_zero_checker(\"521/10\", \"-5/10\")\n assert self.add_zero_checker(\"5/10\", \"32/-10\")\n assert self.add_zero_checker(\"5/-10\", \"5/-10\")\n assert self.add_zero_checker(\"-5/10\", \"-5/10\")\n\n # Invalid arguments\n assert self.add_zero_checker(\"this is\", \"an error\") is None\n assert self.add_zero_checker(\"Error\", \"123/456\") is None\n assert self.add_zero_checker(\"123/456\", \"Error\") is None\n assert self.add_zero_checker(240, 234.5) is None", "title": "" }, { "docid": "ab320f7ec700b90fc3eefc4641c56221", "score": "0.62381965", "text": "def test_zero_length(self):\n self.assertRaises(ValueError, fvec, 0)", "title": "" }, { "docid": "64b053449b61e4a835bf6f688aaddeee", "score": "0.6237972", "text": "def checkParameters(self):\n EDVerbose.DEBUG(\"*** EDPluginControlDiffractionCTv1_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().getImage(), \"No path to input image\")\n self.checkMandatoryParameters(self.getDataInput().getDestinationDirectory(), \"No path to destination directory\")\n self.checkMandatoryParameters(self.getDataInput().getSinogramFileNamePrefix(), \"No sinogram prefix given\")\n self.checkMandatoryParameters(self.getDataInput().getPowderDiffractionSubdirectory(), \"No subdirectory prefix for powder diffraction patterns\")", "title": "" }, { "docid": "5ee30567851d62605943b1237059daff", "score": "0.62347", "text": "def is_zero(x):\n return -epsilon < x < epsilon", "title": "" }, { "docid": "b98fbae20cd312722727f5ab42624ff9", "score": "0.62072676", "text": "def _is_zero(x):\n return np.fabs(x).min() < _ZERO_THRESHOLD", "title": "" }, { "docid": "cb4f1a92d9fbc1dae873249b04b46892", "score": "0.6204366", "text": "def isZero(self):\n return self.x == 0 and self.y == 0", "title": "" }, { "docid": "7e007332681e92f60c2cd3483a477af0", "score": "0.6203695", "text": "def is_zero(self):\n return self.coefficients[0] == 0", "title": "" }, { "docid": "f924d1a6318c1e9aff7d91cca17ac080", "score": "0.6203347", "text": "def isZero(self) -> bool:\n return (self.x == 0) and (self.y == 0) and (self.z == 0)", "title": "" }, { "docid": "428e3500fa0bcfdffb4703835f0133cd", "score": "0.62025976", "text": "def selfcheck():", "title": "" }, { "docid": "42924d3510ccd19bf4cd4ae36cdfa120", "score": "0.61949545", "text": "def test_init_bad_zero_length(self):\n self.assertRaises(ValueError, Rule, 0, 0, 1, 1, 0)", "title": "" }, { "docid": "8e7ab4918b2b78d416e826b272d7e25e", "score": "0.6178418", "text": "def __nonzero__(self):\r\n return True", "title": "" }, { "docid": "934b5ad62d4227168ff669854d7d2288", "score": "0.6172007", "text": "def get_zeros(self):\n return None", "title": "" }, { "docid": "9b3c08d19b00d9971bd8d06a6467ad58", "score": "0.6165287", "text": "def test_isempty(self):\r\n lc = sunpy.lightcurve.NOAAPredictIndicesLightCurve.create()\r\n assert lc.data.empty == False", "title": "" }, { "docid": "33ed054dffe024736202b82d3051a5ce", "score": "0.61619925", "text": "def _empty(x):\n return 0", "title": "" }, { "docid": "782336eba38f0c48bcddd86289b780ef", "score": "0.61585075", "text": "def test_null_input2(self):\n input_validation = self.algo.validate_input()\n self.assertEqual(input_validation, {'error': 'inputs are not entered'})", "title": "" }, { "docid": "02522292b5bd35b919a00bc17b0d5207", "score": "0.6152712", "text": "def verify_args(args):\r\n\trun_args = {\r\n\t\t\t\t'brick length 1': args.brick_1,\r\n\t\t\t\t'brick length 2': args.brick_2,\r\n\t\t\t\t'length': args.length,\r\n\t\t\t\t'height': args.height\r\n\t\t\t\t}\r\n\t\r\n\tfor arg in run_args:\r\n\t\tif run_args[arg] <= 0:\r\n\t\t\tprint \"Error: Negative or zero value entered for %s\" % arg\r\n\t\t\tsys.exit(1)\r\n\t\t\t\r\n\treturn run_args", "title": "" }, { "docid": "4e2fdae66771de1897cbeecffa6d6f99", "score": "0.61371666", "text": "def test_00_00_zeros(self):\n result = feature.canny(\n cp.zeros((20, 20)), 4, 0, 0, cp.ones((20, 20), bool)\n )\n self.assertFalse(cp.any(result))", "title": "" }, { "docid": "aad19b9eb8c81366ac51adb8b02437c2", "score": "0.6136356", "text": "def zero(v):\r\n return (v == numpy.zeros(v.shape, v.dtype)).all()", "title": "" }, { "docid": "e3c957360ec14e44d7492cfa419d329c", "score": "0.6125071", "text": "def __nonzero__(self):\n return self is not S.Zero", "title": "" }, { "docid": "4df755a702e9893b5fe819fdc751dc3f", "score": "0.6120678", "text": "def test_int_inputs(self):\n n1 = 0 \n n2 = 1.5 \n a_deg = 0\n a_deg, Rs, Rp = EQ.reflectance(n1, n2, a_deg = a_deg)\n# print(numpy.count_nonzero(numpy.isnan(Rs)))\n self.assertTrue(numpy.shape(a_deg) == (1,))\n self.assertTrue(numpy.shape(Rs) == (1,1))\n self.assertTrue(numpy.shape(Rp) == (1,1))", "title": "" }, { "docid": "0b49bacd199a98d8015a69efd2b23308", "score": "0.6113941", "text": "def test_sub_zero_checker(self):\n\n # Normal cases\n assert self.sub_zero_checker(\"5/10\", \"-5/10\")\n assert self.sub_zero_checker(\"-5/10\", \"5/10\")\n assert self.sub_zero_checker(\"5/-10\", \"5/10\")\n assert self.sub_zero_checker(\"521/10\", \"-5/10\")\n assert self.sub_zero_checker(\"5/10\", \"32/-10\")\n assert not self.sub_zero_checker(\"5/10\", \"5/10\")\n assert not self.sub_zero_checker(\"5/-10\", \"5/-10\")\n assert not self.sub_zero_checker(\"-5/10\", \"-5/10\")\n\n # Invalid arguments\n assert self.sub_zero_checker(\"this is\", \"an error\") is None\n assert self.sub_zero_checker(\"Error\", \"123/456\") is None\n assert self.sub_zero_checker(\"123/456\", \"Error\") is None\n assert self.sub_zero_checker(240, 234.5) is None", "title": "" }, { "docid": "3f07feb4f45e7f786825562b4b42eb53", "score": "0.6111859", "text": "def verify_policy(self, num_zero=1):\n for i in range(num_zero):\n if (self.smell[i] != \"0\"):\n return False\n return True", "title": "" }, { "docid": "462f7955725972db659d4853292e81d5", "score": "0.6105704", "text": "def test_null_point_find9():\n nullpoint9_args = {\n \"x_range\": [-0.1, 0.1],\n \"y_range\": [-0.1, 0.1],\n \"z_range\": [-0.1, 0.1],\n \"precision\": [0.03, 0.03, 0.03],\n \"func\": lambda x, y, z: [x, y, z],\n }\n with pytest.raises(NonZeroDivergence):\n uniform_null_point_find(**nullpoint9_args)", "title": "" }, { "docid": "8950a18d927f593b0bd0fd1163aaee06", "score": "0.6104337", "text": "def test_Gfit_nonpositive_expecteds(self):\n self.assertRaises(ZeroExpectedError, G_fit, [1, 2, 3], [0, 1, 2])\n self.assertRaises(ZeroExpectedError, G_fit, [1, 2, 3], [-1, 1, 2])", "title": "" }, { "docid": "e4f1f05640b75a7477ba990abe58229f", "score": "0.6102555", "text": "def testZeroOps(t, env):\n c = env.c1\n res = c.compound([])\n check(res)", "title": "" }, { "docid": "d6be96f012119de87139e52849f76405", "score": "0.6099838", "text": "def __nonzero__(self):\n\n return 1", "title": "" }, { "docid": "fac7548ede07ec0fcc47724ca607c6a9", "score": "0.60947776", "text": "def test_Sobol_G_raises_error_if_values_lt_zero():\n evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))", "title": "" }, { "docid": "8b33f910f7aadc93403066f7cbafe334", "score": "0.60928196", "text": "def test_exp_no_params_e1(self):\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.exp()", "title": "" }, { "docid": "8b33f910f7aadc93403066f7cbafe334", "score": "0.60928196", "text": "def test_exp_no_params_e1(self):\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.exp()", "title": "" }, { "docid": "bbdc733a9b2b85a56adceb165cf8f9ff", "score": "0.6091585", "text": "def _all_zero(self, A):\n for x in A:\n if x != 0:\n return False\n return True", "title": "" }, { "docid": "f5a1f1b28889967eefb3b50d6025e5c2", "score": "0.60805", "text": "def _empty(X): #TODO: ad test for _empty\n\n if X is None:\n return True\n else:\n return np.any(np.isclose(X.shape, 0))", "title": "" }, { "docid": "07eb1084a8ac87d37353159f302a6591", "score": "0.6074433", "text": "def validate_num_is_gt_zero(num: Union[int, float, Int, Number]) -> None:\r\n if num > 0:\r\n return\r\n raise ValueError(f'Specified values is less than or equal to zero: {num}')", "title": "" }, { "docid": "26cf638354de2899e4c283a7ef064bff", "score": "0.6072645", "text": "def __nonzero__(self):\r\n return self.is_valid(self.value)", "title": "" }, { "docid": "e138887b65b919f7ea62a2ca6031128c", "score": "0.6069873", "text": "def assert_is_positive(*args: Any) -> Union[bool, NoReturn]:\n for arr in args:\n assert isinstance(arr, np.ndarray), \"All inputs must be of type numpy.ndarray\"\n assert all(arr > 0.0)\n\n return True", "title": "" }, { "docid": "5459bdf7071b34386555c16c8dca12dd", "score": "0.6069811", "text": "def hessian_is_zero(self, module: Linear) -> bool:\n return True", "title": "" }, { "docid": "c56d69239c93019064e402f909886733", "score": "0.60626936", "text": "def test_all_zero_numbers(self):\n result = plus_minus(7, [0, 0, 0, 0, 0, 0, 0])\n self.assertEquals(result, ['0.000000', '0.000000', '1.000000'])", "title": "" }, { "docid": "3133c6c8af37f492c3492a821dcee635", "score": "0.6062518", "text": "def test_default_case_2(self):\n actual = ch1.matrix_zero([2,3,1,0])\n expected = ([0, 0, 0, 0])\n self.assertTrue((actual==expected).all())", "title": "" }, { "docid": "21a8d7eac89d9c25f60ccdc1697fb591", "score": "0.60432124", "text": "def _checkValue( self ):", "title": "" }, { "docid": "0f0759fb894dc36082b765fbcd4aefc8", "score": "0.60321903", "text": "def test_invalid0(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Fetch the used genomes\n cfg = get_config()\n genome = get_invalid0(cfg)\n \n # Test the required nodes\n used_inp, used_hid, used_out, used_conn = required_for_output(\n inputs={a for (a, _) in genome.connections if a < 0},\n outputs={i for i in range(cfg.genome.num_outputs)},\n connections=genome.connections,\n )\n \n # Number of nodes are only the two outputs\n self.assertEqual(len(used_inp), 0)\n self.assertEqual(len(used_hid), 0)\n self.assertEqual(len(used_out), 2)\n # No connections\n self.assertEqual(len(used_conn), 0)", "title": "" }, { "docid": "21eeca004e350a5873d9f1bb37fd2e85", "score": "0.60223913", "text": "def __nonzero__(self):\n if self.value:\n return True\n if self.value is None:\n raise ValueError(\"UncParam value is undefined\")\n return False", "title": "" }, { "docid": "f8ba06333e1e875fb695e345e21a7403", "score": "0.6014596", "text": "def checkHelp(params):\n if len(params) == 0:\n return True\n return False", "title": "" }, { "docid": "9ea07ac3dc88aa780e27b5ac22dcf304", "score": "0.60095036", "text": "def _check_param(self):\n\n d = self.options[\"n_comp\"] * self.nx\n\n if self.options[\"corr\"] != \"act_exp\":\n raise ValueError(\"MGP must be used with act_exp correlation function\")\n if self.options[\"hyper_opt\"] != \"TNC\":\n raise ValueError(\"MGP must be used with TNC hyperparameters optimizer\")\n\n if len(self.options[\"theta0\"]) != d:\n if len(self.options[\"theta0\"]) == 1:\n self.options[\"theta0\"] *= np.ones(d)\n else:\n raise ValueError(\n \"the number of dim %s should be equal to the length of theta0 %s.\"\n % (d, len(self.options[\"theta0\"]))\n )", "title": "" }, { "docid": "472eb5b36e25f724962e66700672f10d", "score": "0.60020196", "text": "def _check_params(self):\n utiles.check_positive(n_features=self.K, eps=self.eps, batchsize=self.batchSize, alpha=self.alpha, steps=self.steps)\n utiles.check_int(n_features=self.K, steps=self.steps)\n utiles.check_between(v_min=0, v_max=min(self.genes, self.cells), n_features=self.K)\n utiles.check_bool(normalize=self.normalize)\n utiles.check_bool(iteration=self.calculateIntialNoiseFactor)", "title": "" }, { "docid": "f027def10abbeaf0571665e0f4d2bf96", "score": "0.59989953", "text": "def test_zero(epsilon):\n # zero equals itself... (duh)\n assert nearly_equal(0.0, 0.0, epsilon=epsilon)\n assert nearly_equal(0.0, -0.0, epsilon=epsilon)\n assert nearly_equal(-0.0, -0.0, epsilon=epsilon)", "title": "" }, { "docid": "d817b9cb56ae44bc9879ecde19250383", "score": "0.599536", "text": "def has_no_empty_params(rule):\n \n defaults = (rule.defaults if rule.defaults is not None else ())\n arguments = (rule.arguments if rule.arguments is not None else ())\n return len(defaults) >= len(arguments)", "title": "" }, { "docid": "70be99d87822b1e21fd803cab901f7fc", "score": "0.59944606", "text": "def test_prepare_propfind_data_empty():\n return prepare_propfind_request_data() is None", "title": "" }, { "docid": "a10012a5617dd5ca949cfa752f2a9ad8", "score": "0.59886473", "text": "def validate_num_is_gte_zero(num: Union[int, float, Int, Number]) -> None:\r\n if num >= 0:\r\n return\r\n raise ValueError(f'Specified values is less than zero: {num}')", "title": "" }, { "docid": "02f6d44096c0946910946cb3fb35a58d", "score": "0.5985556", "text": "def validate_inputs(inputs, _):\n if inputs[\"x\"] + inputs[\"y\"] == 0:\n return \"The sum of `x` and `y` can not be zero.\"", "title": "" }, { "docid": "333e2257ead7717dc67108fd43e6c341", "score": "0.5980513", "text": "def __nonzero__(self):\n return True", "title": "" }, { "docid": "333e2257ead7717dc67108fd43e6c341", "score": "0.5980513", "text": "def __nonzero__(self):\n return True", "title": "" }, { "docid": "457f237a661b7843ad613fc505ccc157", "score": "0.5964236", "text": "def _is_parameters_ok(self):\n if self.url is None:\n raise MissingParameterException(\"Home assistant needs an URL\")\n if self.token is None:\n raise MissingParameterException(\"Home assistant needs a token\")\n if self.action is None:\n raise MissingParameterException(\"Home assistant needs an action\") \n return True", "title": "" }, { "docid": "3bcbb1d152d236aca20d2000c4ab054e", "score": "0.59638405", "text": "def any(*args):\n return 0.0 if 0.0 in args else sum(args)", "title": "" }, { "docid": "740104f80ca0cb30cec2aac02d2aee59", "score": "0.59615827", "text": "def __call__(self, *anything):\n return 0", "title": "" }, { "docid": "fb80c8ea0b04f67c5e1a5bfb7c15861b", "score": "0.5959414", "text": "def isposzero(x):\n # TODO: If copysign or signbit are added to the spec, use those instead.\n shape = x.shape\n dtype = x.dtype\n if is_integer_dtype(dtype):\n return true(shape)\n return equal(divide(one(shape, dtype), x), infinity(shape, dtype))", "title": "" }, { "docid": "430d135709efec11651c34f55b60ea10", "score": "0.59537935", "text": "def is_zero(self):\n return self == Sign.ZERO", "title": "" }, { "docid": "3c6fbdabd6c20a657ac6a6b2ed60336c", "score": "0.5949861", "text": "def test_init_bad_negative_params(self):\n self.assertRaises(ValueError, Rule, -1, 0, 1, 0, 1)\n self.assertRaises(ValueError, Rule, 0, -1, 1, 1, 1)\n self.assertRaises(ValueError, Rule, 0, 0, -1, 0, 5)\n self.assertRaises(ValueError, Rule, 0, 0, 0, -1, 1)\n self.assertRaises(ValueError, Rule, 0, 0, 1, 1, -1)", "title": "" }, { "docid": "8bac4216fff89aaa3dd06ecc7bbf7ae2", "score": "0.59447783", "text": "def test_00_empty(self): \n param = {}\n actual = club_functions.get_average_club_count(param)\n expected = 0.0\n msg = \"Expected {}, but returned {}\".format(expected, actual)\n self.assertAlmostEqual(actual, expected, msg=msg)", "title": "" }, { "docid": "a6ca7b1eb7ff5f0f4de1c6bf463cf72e", "score": "0.5936084", "text": "def IsEmptyMS(self) -> int:", "title": "" }, { "docid": "08a4e1cd89e6a136f5ceaa998ecbae51", "score": "0.59360325", "text": "def isEmpty(self, *args):\n pass", "title": "" }, { "docid": "55bf29cb3d2d0f3307e6f47799fde1ac", "score": "0.5926537", "text": "def test_empty_array(self):\n result = solution([], 9)\n self.assertFalse(result)", "title": "" }, { "docid": "7b0d499c5f83384616c7eaf1de652412", "score": "0.5925938", "text": "def validate_params(self):\n logging.debug(\"Not yet implemented\")\n pass", "title": "" }, { "docid": "1bb5547cc6d1b2c127559116fecd27fb", "score": "0.5925258", "text": "def test_zero_vector(self):\n\n dataset = DenseDesignMatrix(X = as_floatX(np.zeros(())))\n\n #the settings of subtract_mean and use_norm are not relevant to\n #the test\n #std_bias = 0.0 is the only value for which there should be a risk\n #of failure occurring\n preprocessor = GlobalContrastNormalization( subtract_mean = True,\n std_bias = 0.0,\n use_norm = False)\n\n dataset.apply(preprocessor)\n\n result = dataset.get_design_matrix()\n\n assert not np.any(np.isnan(result))\n assert not np.any(np.isinf(result))", "title": "" }, { "docid": "8b58e50522a3dd52ea252622c72a146c", "score": "0.5920778", "text": "def is_zero(self):\n return self.data[\"secs\"] == 0 and self.data[\"nsecs\"] == 0", "title": "" }, { "docid": "9ed9a4b167583a48f66819f0cd2f87fe", "score": "0.5920762", "text": "def __definedParameters__(self):\n\t\tif(self.theta == None and self.phi == None):\n\t\t\treturn(False)\n\t\telse:\n\t\t\treturn(True)", "title": "" }, { "docid": "ec5c01dfb3431fc1c4b96271b5bb69ca", "score": "0.591811", "text": "def test_is_zero_not_prime(self):\n self.assertFalse(is_prime(0))", "title": "" }, { "docid": "def43d244601946bfa249c4a8a47b5c2", "score": "0.59163415", "text": "def checkExit(params):\n \n if len(params) == 0:\n return True\n return False", "title": "" }, { "docid": "84cc5e16da7a022f9a8cfbc13a358776", "score": "0.59148645", "text": "def nonzero(self):\n return self.xsp != 0", "title": "" }, { "docid": "fcbbddf0b8a4555c2e9e61aa8b9feb9f", "score": "0.5914665", "text": "def isZero(self):\n for i in self.__mData:\n if i!=0:\n return False\n\n return True", "title": "" }, { "docid": "077b072117412fc973b2b694cabaeedc", "score": "0.5910287", "text": "def _argcheck(self, *args):\n cond = 1\n for arg in args:\n cond = logical_and(cond, (asarray(arg) > 0))\n return cond", "title": "" }, { "docid": "3ddcaf0f61816f5ab44aa4ea8676a233", "score": "0.59082896", "text": "def _checkparams_(self):\n # check vlan id\n if self.features == 'vlan':\n if not self.vlan_id:\n self.module.fail_json(msg='Error: missing required arguments: vlan_id.')\n\n if self.vlan_id:\n if self.vlan_id <= 0 or self.vlan_id > 4094:\n self.module.fail_json(\n msg='Error: Vlan id is not in the range from 1 to 4094.')\n # check version\n if self.version:\n if self.version <= 0 or self.version > 3:\n self.module.fail_json(\n msg='Error: Version id is not in the range from 1 to 3.')", "title": "" }, { "docid": "08ecdc0d1de6623081e3e7bf7e199f2f", "score": "0.5901938", "text": "def is_zero (expr):\n if isinstance(expr, Tensor):\n return expr.name.startswith('0')\n if isinstance(expr, Transpose):\n return is_zero(expr.args[0])\n if expr == S(0):\n return True", "title": "" } ]
7758ec47526a6a5d91f6c907195b382e
Decorator to register config source. Configuration source is a callable with one required argument configuration object to populate. It may have other required and optional arguments.
[ { "docid": "61adbc54e1fc95858d9a24d3a7aabc04", "score": "0.72159165", "text": "def config_source(source, config_type='dict', force=False):\n def wrapper(f):\n group = _config_sources[config_type]\n if source in group and not force:\n raise AssertionError('Already registered: %s' % source)\n group[source] = f\n return f\n return wrapper", "title": "" } ]
[ { "docid": "4a7b79b255267a1c1647426b5c034cae", "score": "0.6829033", "text": "def with_config_source(self, config_source):\n self.add_config_source(config_source)\n return self", "title": "" }, { "docid": "7b897da9424b7612ad6f0a34ce951dc2", "score": "0.62327915", "text": "def load_to(config, from_source, config_type, *args, **kwargs):\n group = _config_sources.get(config_type)\n if group is None:\n raise ConfigSourceError('Unknown config type: %s' % config_type)\n\n loader = group.get(from_source)\n if loader is None:\n raise ConfigSourceError('Unknown source: %s (config type: %s)'\n % (from_source, config_type))\n\n return loader(config, *args, **kwargs)", "title": "" }, { "docid": "4d93e2cdb41a121c14f7a9a56ac1d2ca", "score": "0.59062517", "text": "def source_parse(configuration, in_source=False):\n # print(\"Parsing conf: \", configuration)\n if not in_source:\n if type(configuration) == list:\n return [source_parse(i, in_source) for i in configuration]\n\n if type(configuration) != dict:\n return configuration\n\n return {k: source_parse(v, k == 'source') for k, v in configuration.items()} # for each key it recurses but checks\n # if the key is source and flags the\n # call if that's the case\n\n if type(configuration) != dict:\n def simple_res(*args, **kwargs):\n return configuration\n return simple_res\n\n if 'source' in configuration:\n return source_parse(configuration['source'], in_source)\n\n \"\"\"\n Here the configuration is parsed based on the type of source\n + fun: means the source of the data will be a function call\n + att: the source is an attribute of the instance\n + kwarg: the source is a keyword argument provided to the call\n \n `parsed` is a callable object which will return the data\n \"\"\"\n t = configuration['type']\n if t == 'fun':\n parsed = function_arg_parser(source_parse, **configuration)\n elif t == 'att':\n parsed = attribute_arg_parser(source_parse, **configuration)\n elif t == 'kwarg':\n parsed = kwarg_arg_parser(source_parse, **configuration)\n else:\n raise AttributeError('Neither function nor attribute')\n\n \"\"\"\n The data is going to be in the format of a pandas dataframe,\n here we provide utilities to modify the columns to either rename\n them or select only ones we're interested in\n \n rename is a dictionary accepted by Pandas' rename method\n \"\"\"\n columns = configuration.get('columns', [])\n columns, rename = src.utils.parse_columns(columns)\n rename.update(configuration.get('rename', {}))\n\n \"\"\"\n How the retrieved data has to be stored\n Possible options:\n + None: the result is simply returned\n + `#var`: the result is stored as the attribute `var`\n of the instance\n + `$var.key`: the result is stored with key `key` in the\n dictionary `var` passed through local variable dictionary\n + `var`: the result is stored in the local variable `var`\n through the local variable dictionary\n \"\"\"\n store_action = configuration.get('store', None)\n\n \"\"\"\n Possible options:\n + NoForward: doesn't forward positional and keyword arguments\n to the lower steps\n \"\"\"\n options = configuration.get('options', [])\n\n def function_returned(local, *args, **kwargs):\n \"\"\"\n @param local: the local environment, allowing the definition\n of variables\n @param args: positional arguments\n @param kwargs: keyword arguments\n @return:\n\n \"\"\"\n if 'NoForward' in options:\n args = []\n kwargs = {}\n r = parsed(local, *args, **kwargs)\n\n if len(columns) > 0:\n r = r[columns]\n\n if len(rename.keys()) > 0:\n r = r.rename(columns=rename)\n\n if store_action is None:\n pass\n elif store_action[0] == '#':\n setattr(local['self'], store_action[1:], r)\n elif store_action[0] == '$':\n d, k = tuple(store_action[1:].split('.')[:2])\n local[d][k] = r\n else:\n local[store_action] = r\n return r\n\n return function_returned", "title": "" }, { "docid": "9ce006a736c5d644fba3d1cff9e1317f", "score": "0.55622447", "text": "def add_config_source(self, config_source, position=None):\n rank = position if position is not None else len(self._config_sources)\n self._config_sources.insert(rank, config_source)", "title": "" }, { "docid": "f60aaa0744172e16fca0b93666de03d2", "score": "0.5469754", "text": "def requires_config_and_backend(func):\n @requires_config\n def wrapper(config, cmdargs, *args, **kwargs):\n\n backend = from_name(config.backend[\"name\"])\n\n return func(config, backend, cmdargs, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "0a01e57ee412c6ce4950b4c75cdfc46a", "score": "0.5413375", "text": "def __call__(cls, *args, **kwargs):\n\n config_values = cls.get_custom_config()\n\n # create new config layer, so when we are out of this process -> config is back to the previous value\n with config(\n config_values=config_values,\n source=cls.task_definition.task_passport.format_source_name(\n \".get_custom_config\"\n ),\n ):\n return super(AdvancedConfigTaskMetaclass, cls).__call__(*args, **kwargs)", "title": "" }, { "docid": "e528dd96b1bab9c2a9476f6a4c026a88", "score": "0.540497", "text": "def _set_dict(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n self, value = args\n if not isinstance(value, dict):\n raise TypeError(\"The from_dict() argument must be type dict\")\n if not value:\n raise ValueError(\"The from_dict() argument must not be empty.\")\n\n self.__dict__.__setitem__(\"source\", value)\n result = func(*args, **kwargs)\n return result\n\n return wrapper", "title": "" }, { "docid": "29163dadfc488b6065366b4d1f14e556", "score": "0.5341242", "text": "def _inject_config_source(self, source_filename, files_to_inject):\n # src_path = os.path.join(self.directory.root_dir, source_filename)\n # src_exec = \"[ -r %s ] && . %s\" % (src_path, src_path)\n src_exec = \"[ -r %s/%s ] && . %s/%s\" % (self.directory.root_dir, source_filename,\n self.directory.root_dir, source_filename)\n # The ridiculous construction above is necessary to avoid failing tests(!)\n\n for config_file in files_to_inject:\n config_path = os.path.expanduser(os.path.join(\"~\", config_file))\n if os.path.exists(config_path):\n self.injections.inject(config_path, src_exec)\n break\n else:\n config_file = files_to_inject[0]\n config_path = os.path.expanduser(os.path.join(\"~\", config_file))\n self.logger.info(\"No config files found to source %s, creating ~/%s!\" % (source_filename, config_file))\n self.injections.inject(config_path, src_exec)\n\n return (config_file, config_path)", "title": "" }, { "docid": "46b2245609cceabc1c71b079b56b060b", "score": "0.53199935", "text": "def config(self, **kwd):", "title": "" }, { "docid": "3d2981b2c9455cdb8cfa5435c0ea754a", "score": "0.53032583", "text": "def of(cls, value):\n if isinstance(value, ConfigSource):\n return value\n elif isinstance(value, dict):\n return ConfigSource(value)\n else:\n raise TypeError(u'source value must be a dict')", "title": "" }, { "docid": "6cb161b2b3c871e9fbe405dbb1b4f5b3", "score": "0.52693045", "text": "def import_config(f):\n def wrapper(*args, **kwargs):\n context = args[1]\n env = get_env(context)\n logger.info('environment: {}'.format(env))\n\n config = __import__(env + '-config')\n logger.info('imported config: {}'.format(config))\n\n args += (config,)\n return f(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "18623e85271e6b9a532ec0068680aaa5", "score": "0.5246965", "text": "def _add_user_source(self):\n filename = self.user_config_path()\n self.add(YamlSource(filename, loader=self.loader, optional=True))", "title": "" }, { "docid": "69d8e43ba9c86d416df0ed742b9da33f", "score": "0.5236078", "text": "def apply_config_property(self, name, value):\n if name == 'source':\n source = pathlib.Path(value)\n self.source = source = source.expanduser().absolute()\n self.source_viz = str(source)\n else:\n setattr(self, name, value)", "title": "" }, { "docid": "b2eda4398533abee180dce4a3cf479ad", "score": "0.5225035", "text": "def SubmitConfigSource(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "72b33840183c27f662a8708033803ef8", "score": "0.5203787", "text": "def from_config(cls, **config_params):\n return cls(cls.DATA_SOURCE_CONFIGURATION_CLS(**config_params))", "title": "" }, { "docid": "614cc07ec596b262808ff2fb4cf6b8e4", "score": "0.5192724", "text": "def register_source(cls, source_cls: Union[Any, Any]) -> Union[Any, Any]:\n cls.sources[source_cls.source] = source_cls\n return source_cls", "title": "" }, { "docid": "83111ca91165ee4889b7ca87a85e84ed", "score": "0.51920205", "text": "def _with_data_source(self, data_source):\n return self.__class__(data_source, self.labels)", "title": "" }, { "docid": "d5536ea8163e6cb87272fb6986496f14", "score": "0.51241827", "text": "def load_from(self, source, *args, **kwargs):\n kwargs = merge_kwargs(kwargs, self._defaults.get(source))\n return load_to(self, source, 'dict', *args, **kwargs)", "title": "" }, { "docid": "53d81984a1fa8fe4c2c40ca5f4d61d68", "score": "0.5117512", "text": "def _with_data_source(self, data_source):\n return self.__class__(data_source, self.labels, self._calibration)", "title": "" }, { "docid": "2cdee6fde90eeb01e6885c95f7ac075d", "score": "0.5107039", "text": "def get_config(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "1e16224beb586a3dcb4bcacfeab24150", "score": "0.5082835", "text": "def register_data_source(name, get_data_func=None, add_coord_func=None):\n global flap_storage\n try:\n flap_storage.data_sources().index(name)\n return\n except ValueError:\n flap_storage.add_data_source(name,\n get_data_func=get_data_func,\n add_coord_func=add_coord_func)\n return ''", "title": "" }, { "docid": "8ee05579feb2a3ed65106ad256d63598", "score": "0.5047004", "text": "def from_config(cls, **config):\n raise NotImplementedError", "title": "" }, { "docid": "cc415ef2c329402af8fb7625279ad7b6", "score": "0.50460035", "text": "def decorator(f):\n def executor(*argv, **kwargs):\n \"\"\"Actual wrapper, that adds some new parameters to the decorated function 'f'\"\"\"\n if databaseConfig :\n if databaseConfig.has_connection_pool(pool_name) :\n # calculate the argument name to be assigned\n n = argument_name if argument_name is not None else pool_name\n p = databaseConfig.get_connection_pool( pool_name )\n if p :\n kwargs[ n ] = p\n else :\n _logger.error( 'Pool named %s is not initialized', pool_name )\n else :\n _logger.error( 'Pool named %s is not defined!', pool_name )\n else :\n _logger.error( 'Database configuration should be initialized with fileConfig(fileName) call.')\n f(*argv, **kwargs) #IGNORE:W0142\n return executor", "title": "" }, { "docid": "f93c4722b08b25cadae67784ca3a3fe4", "score": "0.50441486", "text": "def set_source(source):", "title": "" }, { "docid": "c0c2ae6cd1221c522c45251db32507b8", "score": "0.5035985", "text": "def _build_datasource_from_config(\n self, raw_config: DatasourceConfig, substituted_config: DatasourceConfig\n ) -> Datasource:\n # We convert from the type back to a dictionary for purposes of instantiation\n serializer = DictConfigSerializer(schema=datasourceConfigSchema)\n substituted_config_dict: dict = serializer.serialize(substituted_config)\n\n # While the new Datasource classes accept \"data_context_root_directory\", the Legacy Datasource classes do not.\n if substituted_config_dict[\"class_name\"] in [\n \"BaseDatasource\",\n \"Datasource\",\n ]:\n substituted_config_dict.update(\n {\"data_context_root_directory\": self.root_directory}\n )\n module_name: str = \"great_expectations.datasource\"\n datasource: Datasource = instantiate_class_from_config(\n config=substituted_config_dict,\n runtime_environment={\"data_context\": self, \"concurrency\": self.concurrency},\n config_defaults={\"module_name\": module_name},\n )\n if not datasource:\n raise gx_exceptions.ClassInstantiationError(\n module_name=module_name,\n package_name=None,\n class_name=substituted_config_dict[\"class_name\"],\n )\n\n # Chetan - 20221103 - Directly accessing private attr in order to patch security vulnerabiliy around credential leakage.\n # This is to be removed once substitution logic is migrated from the context to the individual object level.\n raw_config_dict: dict = serializer.serialize(raw_config)\n datasource._raw_config = raw_config_dict\n\n return datasource", "title": "" }, { "docid": "de2bf2c3e446c628ebd37f54607e0002", "score": "0.4989644", "text": "def config_stack(*sources):\n # Hack to prevent Confuse from accidentally finding a config file on the\n # system when were just trying to create an in-memory config object\n n = 'UNLIKELYAPPNAME-' + str(uuid4())\n conf = confuse.Configuration(n, read=False)\n for s in sources:\n if s:\n if isinstance(s, Mapping):\n conf.set(confuse.ConfigSource(s))\n else:\n err = 'Config sources should return Mapping-type objects'\n raise ValueError(err)\n\n # Hack to remove the ordereddicts, they're just ugly to look at and when\n # working with primitive data types coming from json/yaml files they're\n # generally useless.\n return json.loads(json.dumps(conf.flatten()))", "title": "" }, { "docid": "58acb3eb6b926f6b028cef8fc9d376a7", "score": "0.4963166", "text": "def ConfigureFoo(Inputs, _config):\n return Inputs", "title": "" }, { "docid": "85aece3910a04750850c88d19bc48a95", "score": "0.49631393", "text": "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "ea080ed895d7cdc6668e4b2dfac9de67", "score": "0.49532774", "text": "def _add_config_dep(config_src, opdef):\n opdef.dependencies.append(\n guildfile.OpDependencyDef(_op_dep_data(config_src), opdef)\n )", "title": "" }, { "docid": "18f0ef73e4d6cfd617c317c27b79e4a3", "score": "0.49413863", "text": "def register_arguments(logan_config):\n logan_config.add_argument(\"-S\", \"--dsrc-path\", metavar=\"SRCPATH\", type=str,\n dest=\"dsrc_paths\", default=[\"rawdata\"], nargs=\"+\",\n help=\"Paths to source data, passed to datasource; multiple paths will only be used if supported by datasource used. [Default:rawdata]\")\n logan_config.add_argument(\"-g\", \"--dsrc-gen-data\",\n action=\"count\", dest=\"dsrc_gen_data\", default=0,\n help=\"Generate the source data first; specify multiple times to generate multiple times.\")\n logan_config.add_argument(\"-G\", \"--dsrc-gen-data-only\",\n action=\"count\", dest=\"dsrc_gen_data_only\", default=0,\n help=\"Exit after generation of source data (implies --dsrc-gen-data).\")\n logan_config.add_argument(\"-c\", \"--dsrc-cache-load\",\n action=\"store_true\", dest=\"dsrc_cache_load\", default=False,\n help=\"Load processed data from cache, if available and supported by datasource.\")\n logan_config.add_argument(\"-C\", \"--dsrc-cache-save\",\n action=\"store_true\", dest=\"dsrc_cache_save\", default=False,\n help=\"Save processed data to cache, if supported by datasource.\")\n logan_config.add_argument(\"-R\", \"--dsrc-raw-save\",\n action=\"store_true\", dest=\"dsrc_raw_save\", default=False,\n help=\"Save intermediate raw data, if supported by datasource.\")\n logan_config.add_argument(\"--dsrc-compress\", metavar=\"COMPRESS\", type=str,\n dest=\"dsrc_compress\", default=None,\n help=\"Specify compression scheme to compress uncompressed source data files, if supported by datasource.\")\n logan_config.add_argument(\"--dsrc-min-results\", metavar=\"COUNT\", type=int,\n dest=\"dsrc_min_results\", default=3,\n help=\"Require a minimum of COUNT results, if supported by datasource. [Default: 3]\")\n logan_config.add_argument(\"--dsrc-max-results\", metavar=\"COUNT\", type=int,\n dest=\"dsrc_max_results\", default=0,\n help=\"Require a maximum of COUNT results, if supported by datasource.\")\n logan_config.add_argument(\"--dsrc-ranges\",\n action=\"store_true\", dest=\"dsrc_ranges\", default=False,\n help=\"Use data error/ranges, and pass to dataoutput.\")", "title": "" }, { "docid": "98c824754cb746ebc912ddbae99633fd", "score": "0.49346998", "text": "def __init__(self, value, filename=None, default=False,\n base_for_paths=False):\n super(ConfigSource, self).__init__(value)\n if filename is not None and not isinstance(filename, str):\n raise TypeError(u'filename must be a string or None')\n self.filename = filename\n self.default = default\n self.base_for_paths = base_for_paths if filename is not None else False", "title": "" }, { "docid": "749a53eb4b69fbfac0877dba01c620cb", "score": "0.49229094", "text": "def with_secrets(self, kind, source, prefix=\"\"):\n self._secrets.add_source(kind, source, prefix)\n return self", "title": "" }, { "docid": "2e6ea8df6642e6f888652a5204bc04e0", "score": "0.49167517", "text": "def init(cls, config, src):\n cls.inputs['username']['label'] = src.str(\n 'username_label', cls.inputs['username']['label']\n )\n cls.inputs['username']['description'] = src.str(\n 'username_description', cls.inputs['username']['description']\n )\n cls.inputs['password']['label'] = src.str(\n 'password_label', cls.inputs['password']['label']\n )\n cls.inputs['password']['description'] = src.str(\n 'password_description', cls.inputs['password']['description']\n )\n cls.config = config", "title": "" }, { "docid": "3efd702a04b11bc476b0f91cdd166473", "score": "0.49064854", "text": "def fill_in_arguments(config, modules, args):\n def work_in(config, module, name):\n rkeys = getattr(module, 'runtime_keys', {})\n for (attr, cname) in iteritems(rkeys):\n v = args.get(attr, None)\n if v is not None:\n config[cname] = v\n if not isinstance(args, collections.Mapping):\n args = vars(args)\n return _walk_config(config, modules, work_in)", "title": "" }, { "docid": "fcc1a7ea5f4955d11dff9222fcf795c7", "score": "0.48821256", "text": "def do_config(self, *args):\n pass", "title": "" }, { "docid": "5b33170905fa96dbf51d0824a227906c", "score": "0.48787138", "text": "def source_flow_config(self) -> pulumi.Input['FlowSourceFlowConfigArgs']:\n return pulumi.get(self, \"source_flow_config\")", "title": "" }, { "docid": "afe708bb294682f7f82a321aaa450a5f", "score": "0.48566994", "text": "def pydistill_configure(config):", "title": "" }, { "docid": "e8c50b8f08ba3529e790837f8a2e32a7", "score": "0.48523024", "text": "def with_config_defaults(func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n\n config = load()\n if config.has_option('velvet', 'config'):\n env.setdefault('config', config.get('velvet', 'config'))\n else:\n env.setdefault('config', find_config_file())\n\n return func(*args, **kwargs)\n\n return decorated", "title": "" }, { "docid": "f953fd648fbbcd7bdff2dd8efc1af573", "score": "0.48427752", "text": "def configurable(name):\n def _configurable(func):\n # If there is a . assume that the name is fully qualified.\n if '.' in name:\n conf_name = conf_section(*name.split('.', 1))\n else:\n conf_name = conf_section(func.__module__, name)\n conf = func_config(conf_name)\n args_list = util.func_args(func)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n filtered_defaults = dict((a, conf.get(a))\n for a in args_list.args if a in conf)\n arguments = dict(zip(reversed(args_list.args),\n reversed(args_list.defaults or [])))\n arguments.update(kwargs)\n arguments.update(filtered_defaults)\n arguments.update(dict((k, v) for k, v in\n zip(args_list.args, args) if v))\n missing_args = [arg for arg in args_list.args\n if arg not in arguments]\n if missing_args:\n raise Exception(\n 'Configuration section %s is missing values for %s' %\n (conf_name, missing_args))\n\n return func(**arguments)\n return wrapper\n return _configurable", "title": "" }, { "docid": "61d3628ed3164db0d230b850ab515bed", "score": "0.48385224", "text": "def SubmitConfigSource(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "aa4c5f1e4931280a0dddb45e6aad006e", "score": "0.4834468", "text": "def test_collect_device_config_source_config(self):\n pass", "title": "" }, { "docid": "e2b0e59a4c26967e26491d85ef83fcbf", "score": "0.48331013", "text": "def add_datasource(self, datasource):\n config = ConfigObj(infile=self.datasources_filepath, create_empty=True)\n if datasource.name in config:\n raise DuplicateEntry()\n\n config[datasource.name] = datasource.dict()\n config.write()", "title": "" }, { "docid": "02eda5857e25dcf46604880e7e61f910", "score": "0.4823924", "text": "def config_add(context, values):\n return _config_update(context, values, None)", "title": "" }, { "docid": "9bb469e5bed360f473c2b6166aaf9dc0", "score": "0.4821074", "text": "def from_config():\n raise NotImplementedError", "title": "" }, { "docid": "d3fffeba1c482e7f946437de4710dfe9", "score": "0.4809321", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "53c04f8a55918ba043c0dab33df03efe", "score": "0.48072264", "text": "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "53c04f8a55918ba043c0dab33df03efe", "score": "0.48072264", "text": "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "97d3939cd23d1bc5c01219c0cd4ec6a7", "score": "0.47904637", "text": "def __init__(self, source=None, **kwargs):\n\n if not source:\n dictionary = {}\n elif isinstance(source, dict):\n dictionary = source\n else:\n dictionary = {}\n\n for attr in dir(source):\n # Ignore private attributes\n if not attr.startswith(\"_\"):\n value = getattr(source, attr)\n # Ignore functions\n if not callable(value):\n dictionary[attr] = value\n\n super().__init__({**dictionary, **kwargs})", "title": "" }, { "docid": "c9d12cd3029f072f323bbbee3e7e1128", "score": "0.47892445", "text": "def __init__(self, name, sources, **kwargs):\n self._kwarg_keys = list(kwargs.keys())\n self.library_name = kwargs.pop('library_name', None)\n self.strategy = kwargs.pop('strategy', 'lib')\n self.templates = kwargs.pop('templates', [])\n self.f2x_options = kwargs.pop('f2x_options', [])\n self.autosplit = kwargs.pop('autosplit', False)\n self.inline_sources = kwargs.pop('inline_sources', True)\n\n super(Extension, self).__init__(name, sources, **kwargs)\n\n if self.strategy is None or isinstance(self.strategy, str):\n from F2x.distutils.strategy import get_strategy\n self.strategy = get_strategy(self.strategy or 'lib')\n\n if self.templates:\n self.templates = self.strategy.load_templates(self.templates)\n\n else:\n self.templates = self.strategy.templates\n\n self.ext_modules = []", "title": "" }, { "docid": "600ffa36db4f65a52ed49a515a79899b", "score": "0.47890723", "text": "def _internal_source_config(self, request):\n # Reasoning for method imports are in this file's docstring.\n from generated.definitions import RepositoryDefinition\n from generated.definitions import SourceConfigDefinition\n\n def to_protobuf(source_config):\n parameters = common_pb2.PluginDefinedObject()\n parameters.json = json.dumps(source_config.to_dict())\n source_config_protobuf = common_pb2.SourceConfig()\n source_config_protobuf.parameters.CopyFrom(parameters)\n return source_config_protobuf\n\n if not self.source_config_impl:\n raise OperationNotDefinedError(Op.DISCOVERY_SOURCE_CONFIG)\n\n repository_definition = RepositoryDefinition.from_dict(\n json.loads(request.repository.parameters.json))\n\n source_configs = self.source_config_impl(\n source_connection=RemoteConnection.from_proto(\n request.source_connection),\n repository=repository_definition)\n\n # Validate that this is a list of SourceConfigDefinition objects\n if not isinstance(source_configs, list):\n raise IncorrectReturnTypeError(Op.DISCOVERY_SOURCE_CONFIG,\n type(source_configs),\n [SourceConfigDefinition])\n\n if not all(\n isinstance(config, SourceConfigDefinition)\n for config in source_configs):\n raise IncorrectReturnTypeError(\n Op.DISCOVERY_SOURCE_CONFIG,\n [type(config)\n for config in source_configs], [SourceConfigDefinition])\n\n source_config_discovery_response = (\n platform_pb2.SourceConfigDiscoveryResponse())\n source_config_protobuf_list = [\n to_protobuf(config) for config in source_configs\n ]\n source_config_discovery_response.return_value.source_configs.extend(\n source_config_protobuf_list)\n return source_config_discovery_response", "title": "" }, { "docid": "3ecd10ae0ce19aba230411a8a0dd87e6", "score": "0.47820434", "text": "def use_configuration(*scopes_or_paths):\n global config\n\n # Normalize input and construct a Configuration object\n configuration = _config_from(scopes_or_paths)\n config.clear_caches(), configuration.clear_caches()\n\n # Save and clear the current compiler cache\n saved_compiler_cache = spack.compilers._cache_config_file\n spack.compilers._cache_config_file = []\n\n saved_config, config = config, configuration\n\n try:\n yield configuration\n finally:\n # Restore previous config files\n spack.compilers._cache_config_file = saved_compiler_cache\n config = saved_config", "title": "" }, { "docid": "a2c2b715c92a88a26b769b5bfeb03c4c", "score": "0.4775998", "text": "def initialize(self, json_source: Callable[[], dict]):\n self.json_source = json_source", "title": "" }, { "docid": "0d976b621ccf4ef96b2c924a42931b4f", "score": "0.47729608", "text": "def load_from_pyfile(config, source, silent=False):\n is_obj = hasattr(source, 'read')\n\n d = ModuleType('config')\n\n if is_obj:\n d.__file__ = 'config'\n exec(compile(source.read(), 'config', 'exec'), d.__dict__)\n else:\n d.__file__ = source\n\n source = strip_type_prefix(source, 'pyfile')\n if not op.exists(source):\n if not silent:\n raise IOError('File is not found: %s' % source)\n return False\n\n with open(source, mode='rb') as config_file:\n exec(compile(config_file.read(), source, 'exec'), d.__dict__)\n\n return load_to(config, 'object', 'dict', d)", "title": "" }, { "docid": "96fdebd4383940b4c75744dbe8915ae5", "score": "0.4769613", "text": "def synchronize(registration_source,\n registration_class,\n registration_arg=_DEFAULT_REGISTRATION_KWARGS,\n write=False):\n try:\n source_dict = json.loads(registration_source)\n except (TypeError, ValueError):\n raise commandr.CommandrUsageError(\n 'Invalid JSON configuration specified via --registration_source',\n )\n\n source = load_klass_plugin(source_dict,\n klass_field_name='source_class')\n extra_kwargs = {'source': source}\n registerer = load_cli_plugin(registration_class,\n registration_arg,\n extra_kwargs=extra_kwargs)\n registerer.synchronize(write)", "title": "" }, { "docid": "34287216814d89b1d311185595bc2c8a", "score": "0.47633788", "text": "def connectionPoolAware(pool_name, argument_name=None):\n def decorator(f):\n \"\"\"Function wrapper, that returns a callable that wraps decorated function 'f' (passed as a parameter)\"\"\"\n def executor(*argv, **kwargs):\n \"\"\"Actual wrapper, that adds some new parameters to the decorated function 'f'\"\"\"\n if databaseConfig :\n if databaseConfig.has_connection_pool(pool_name) :\n # calculate the argument name to be assigned\n n = argument_name if argument_name is not None else pool_name\n p = databaseConfig.get_connection_pool( pool_name )\n if p :\n kwargs[ n ] = p\n else :\n _logger.error( 'Pool named %s is not initialized', pool_name )\n else :\n _logger.error( 'Pool named %s is not defined!', pool_name )\n else :\n _logger.error( 'Database configuration should be initialized with fileConfig(fileName) call.')\n f(*argv, **kwargs) #IGNORE:W0142\n return executor\n return decorator", "title": "" }, { "docid": "f30847d42fc3a871963877d340411343", "score": "0.47438785", "text": "def basic_logging_configuration_setup(name=None):\n\n def decorate(func):\n logname = name if name else func.__module__ + \".log\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n datestring = datetime.datetime.now().strftime('.%Y%m%d.%f')\n if '.' in logname:\n i = logname.rindex('.')\n prefix = logname[0:i]\n suffix = logname[i:]\n prefix += datestring\n customlogname = prefix + suffix\n else:\n customlogname = logname + datestring\n\n config = configparser.ConfigParser()\n path = os.path.dirname(__file__)\n config.read(os.path.join(path, r'appsettings.cfg'))\n logdir = get_system_config_value('Default', 'logDirectory')\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n # 1. Setup basic logging configuration. This will log to both the console and the specified file\n logging.basicConfig(filename=os.path.join(logdir, customlogname),\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s:%(levelname)s: %(message)s',\n datefmt='%Y/%m/%d %I:%M:%S %p')\n\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n logging.getLogger('').addHandler(console)\n\n # 2. Setup specialized w_print logger. This will only log to a file in order to allow\n # \"pretty-printing\" of the output to the console - we don't want this \"pretty printing\"\n # in the log\n w_print_logger = logging.getLogger('w_print_logger')\n w_print_logger.setLevel(logging.DEBUG)\n w_print_fh = logging.FileHandler(os.path.join(logdir, customlogname))\n w_print_fh.setLevel(logging.DEBUG)\n w_print_formatter = logging.Formatter(fmt='%(asctime)s:%(levelname)s: %(message)s', datefmt='%Y/%m/%d %I:%M:%S %p')\n w_print_fh.setFormatter(w_print_formatter)\n w_print_logger.addHandler(w_print_fh)\n\n return func(*args, **kwargs)\n return wrapper\n return decorate", "title": "" }, { "docid": "5df475ef1f8f70b75901e8c8bb8e186e", "score": "0.47382614", "text": "def __init__(self, source, **kwargs):\n self.source = source", "title": "" }, { "docid": "f4b3a18341a508da2c05248d191b5df7", "score": "0.4735887", "text": "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n if args.opts:\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n set_global_cfg(cfg)\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "5936dab429790746a75c64124b9516ff", "score": "0.4730099", "text": "def add_source(self, source, name=None):\n\t\tif self.randomize:\n\t\t\tif not source.can_shuffle():\n\t\t\t\traise ValueError('Cannot add a non-shuffleable source to an '\n\t\t\t\t\t'already shuffled provider.')\n\n\t\tsuper().add_source(source, name=name)\n\n\t\tif self.randomize is True:\n\t\t\tself._shuffle_len = self.entries", "title": "" }, { "docid": "d8b0d4843b1420d560e1457a854c2afc", "score": "0.47295845", "text": "def _ensure_config_dep_attrs(source):\n if source.always_resolve is None:\n source.always_resolve = True\n if source.replace_existing is None:\n source.replace_existing = True", "title": "" }, { "docid": "088993fe1f92b3e26eb2d93fe1c99e8a", "score": "0.47250885", "text": "def setup_cfg(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "60d53c13e8f79727aee069538b53468f", "score": "0.47216758", "text": "def __init__(self, config=None, **kwargs):\n # Select the appropriate source for configuration\n if config is None:\n use_config = kwargs\n else:\n use_config = config\n\n # Apply any registered defaults before overriding with the given configuration\n if isinstance(use_config, dict):\n use_config = self.apply_registered_defaults(use_config)\n\n # Coerce all strings in use_config to be unicode (both keys and values)\n use_config = ObjectWithSchema.coerce2unicode(use_config)\n\n # Validate the configuration\n self.config = self.validate_config(use_config)", "title": "" }, { "docid": "698b29491b763f1de0c04a543c3e9604", "score": "0.47209436", "text": "def add(config, name, spec, **extra):", "title": "" }, { "docid": "c9513aaf49ffdd1e4ef1283903b5013d", "score": "0.47201395", "text": "def configure(key: str, value) -> None:\n assert key in utils.infer_arguments\n utils.infer_arguments[key] = value", "title": "" }, { "docid": "9c35ef1121d8501fe9864da5eb062e8a", "score": "0.47184935", "text": "def _instantiate_datasource_from_config(\n self,\n raw_config: DatasourceConfig,\n substituted_config: DatasourceConfig,\n ) -> Datasource:\n try:\n datasource: Datasource = self._build_datasource_from_config(\n raw_config=raw_config, substituted_config=substituted_config\n )\n except Exception as e:\n name = getattr(substituted_config, \"name\", None) or \"\"\n raise gx_exceptions.DatasourceInitializationError(\n datasource_name=name, message=str(e)\n )\n return datasource", "title": "" }, { "docid": "8e7390199b53d17b71b822f55ee50915", "score": "0.4711301", "text": "def variable_config(func):\n\n def decorate():\n variables = VariableManager()\n func(variables)\n return decorate", "title": "" }, { "docid": "42fb6931105629176da6f314da92f3e5", "score": "0.47085658", "text": "def source_flow_config(self) -> Optional[pulumi.Input['FlowSourceFlowConfigArgs']]:\n return pulumi.get(self, \"source_flow_config\")", "title": "" }, { "docid": "2b34c9a93d8521369ab947dfc3fcdd2f", "score": "0.4703668", "text": "def setup_config(self, args=None):\n self.config_parse(args=args)", "title": "" }, { "docid": "9a134ae4e7be6962a83ce1278b3ed0cd", "score": "0.4693445", "text": "def configure(self, config):\n pass", "title": "" }, { "docid": "9a134ae4e7be6962a83ce1278b3ed0cd", "score": "0.4693445", "text": "def configure(self, config):\n pass", "title": "" }, { "docid": "c1eb0c08c06f211211e8f6a445aecde3", "score": "0.46915293", "text": "def config():\n pass", "title": "" }, { "docid": "c1eb0c08c06f211211e8f6a445aecde3", "score": "0.46915293", "text": "def config():\n pass", "title": "" }, { "docid": "0f92784f82eefb1be3d6d40ced42b279", "score": "0.46846497", "text": "def __call__(self, func):\n # Cant return arguments because cfgstr wont take them into account\n # def _wrapper(*args, **kwargs):\n # data = self.ensure(func, *args, **kwargs)\n # return data\n def _wrapper():\n data = self.ensure(func)\n return data\n _wrapper.cacher = self\n return _wrapper", "title": "" }, { "docid": "995dda6e8967285f9f2a15b02169cbd8", "score": "0.46759984", "text": "def __store_config(self, args, kwargs):\n signature = ('schema', 'transparent_schema_rules',\n 'ignore_none_values', 'allow_unknown', 'purge_unknown')\n for i, p in enumerate(signature[:len(args)]):\n if p in kwargs:\n raise TypeError(\"__init__ got multiple values for argument \"\n \"'%s'\" % p)\n else:\n kwargs[p] = args[i]\n self._config = kwargs", "title": "" }, { "docid": "a944bc611399bb841b918f72dbfe6887", "score": "0.46752405", "text": "def __init__(self, source, default_stream_key='_stream'):\n warnings.warn(\n \"The QcConfig object is deprecated, please use Config directly\",\n DeprecationWarning\n )\n self._default_stream_key = default_stream_key\n super().__init__(source, default_stream_key=default_stream_key)", "title": "" }, { "docid": "4dbc905e78cb4a337b0b5b5e269b9126", "score": "0.4667018", "text": "def __init__(self, src_dir, config_source):\n\n self.src_dir = src_dir\n\n # initialize the defaults\n self.defaults = {}\n self.init_defaults()\n\n try:\n self.data = config_source\n self.subparams = self.get_subparams_class()(self.data)\n self.subparams.validate(self.defaults, self.get_top_element())\n\n # make a copy of the loaded data, so that I can always tell what was derived and what was not\n self.org_data = copy.deepcopy(self.data)\n\n self.subparams.use_defaults(self.defaults)\n\n # create derived values\n self.derive()\n except RuntimeError as e:\n raise RuntimeError(\"Unexpected error while loading the config file: %s\" % e)", "title": "" }, { "docid": "d916690ee7038d7e6ed3b44cc33cd294", "score": "0.46651766", "text": "def prep_context(\n context: 'Context', mode: 'Mode', source: 'Source', settings: 'Settings'\n):\n # Read config\n obj = read_config_file(os.path.join(source.repo_dir, source.context_file))\n\n # Add the Python object to the context dictionary\n if not context.context_key:\n file_name = os.path.split(source.context_file)[1]\n file_stem = file_name.split('.')[0]\n context.input_dict[file_stem] = obj\n else:\n context.input_dict[context.context_key] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if settings.default_context:\n apply_overwrites_to_inputs(obj, settings.default_context)\n\n # Apply the overwrites/rides\n # Strings are interpretted as pointers to files\n if isinstance(context.overwrite_inputs, str):\n context.overwrite_inputs = read_config_file(context.overwrite_inputs)\n if context.overwrite_inputs:\n apply_overwrites_to_inputs(obj, context.overwrite_inputs)\n else:\n context.overwrite_inputs = {}\n\n # TODO: FIx the override logic in how it is interpreted by hooks\n if not context.override_inputs:\n context.override_inputs = {}\n\n # include template dir or url in the context dict\n context.input_dict[context.context_key]['_template'] = source.repo_dir\n\n logger.debug('Context generated is %s', context.input_dict)\n\n if not context.existing_context:\n context.output_dict = OrderedDict([])\n else:\n context.output_dict = OrderedDict(context.existing_context)\n\n # Entrypoint into providers.py\n get_providers(context, source, settings, mode)\n\n with work_in(context.input_dict[context.context_key]['_template']):\n return parse_context(context, mode, source)", "title": "" }, { "docid": "191e75cbdb4f931f8d31ba2ce89e8e62", "score": "0.46630445", "text": "def process_config(cls, config, **kwargs):\n del kwargs\n return config", "title": "" }, { "docid": "33c601230ecb1d68c1c7f5252206017c", "score": "0.46612802", "text": "def _patched_configure(config: Optional[inject.BinderCallable] = None, bind_in_runtime: bool = True) -> inject.Injector:\n\n if getattr(_LOCAL, \"injector\", None):\n raise inject.InjectorException(\"Injector is already configured\")\n\n _LOCAL.injector = inject.Injector(config, bind_in_runtime=bind_in_runtime)\n\n return _LOCAL.injector", "title": "" }, { "docid": "6a9fa5fa5f004f06875768f6c16faa25", "score": "0.4655759", "text": "def config(self, _config):\n _config = self.schema()(_config)\n return setattr(self, \"_config\", _config)", "title": "" }, { "docid": "51d1d17d6785026f45d0b10cab7e1210", "score": "0.46469542", "text": "def SubmitConfigSource(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "8382849c4ca3fd04aa78bae006167cef", "score": "0.46434993", "text": "def add_datasource(\n self,\n name: None = ...,\n initialize: bool = ...,\n save_changes: bool | None = ...,\n datasource: BaseDatasource | FluentDatasource | LegacyDatasource = ...,\n **kwargs,\n ) -> BaseDatasource | FluentDatasource | LegacyDatasource | None:\n ...", "title": "" }, { "docid": "85467ebe8d17233775dfe30defbb6662", "score": "0.4636636", "text": "def edit_source_config(self, **kwargs: Any) -> None:\n self.edit_source_config_dialog(kwargs.get('data', {}), selected=kwargs['selected'])", "title": "" }, { "docid": "e651322b6fd315bd28331b5a5cca3ea7", "score": "0.46270573", "text": "def add_datasource(\n self,\n name: str = ...,\n initialize: bool = ...,\n save_changes: bool | None = ...,\n datasource: None = ...,\n **kwargs,\n ) -> BaseDatasource | FluentDatasource | LegacyDatasource | None:\n ...", "title": "" }, { "docid": "be0befd238ad305b57001be0e5706108", "score": "0.4622428", "text": "def make_source(self, source_klass=None):\n pass", "title": "" }, { "docid": "1fc288953c73655390391af55ec4746c", "score": "0.46156582", "text": "def set_config(self, config):", "title": "" }, { "docid": "6ec3f67062823543b7c6903f79f76f82", "score": "0.4613567", "text": "def _set_config(c):\r\n raise NotImplementedError", "title": "" }, { "docid": "c2f63c4f9921ed0f225f4cbdd1d3e3fc", "score": "0.46088755", "text": "def __init__(self, cfg):\n self.sources = []\n if not ('sources' in cfg):\n msg = \"Sources are required in %s\" % cfg\n LOG.error(msg)\n raise PipelineException(msg)\n unique_names = set()\n for s in cfg.get('sources', []):\n name = s.get('name')\n if name in unique_names:\n raise PipelineException(\"Duplicated source names: %s\" %\n name, self)\n else:\n unique_names.add(name)\n self.sources.append(PollerSource(s))\n unique_names.clear()", "title": "" }, { "docid": "1a7d33457ea8b9ed2c7e05c50581fb43", "score": "0.46044767", "text": "def _ensure_config_dep(config_src, opdef):\n existing = _find_config_res_source(opdef, config_src)\n if existing:\n _ensure_config_dep_attrs(existing)\n else:\n _add_config_dep(config_src, opdef)", "title": "" }, { "docid": "023ae85489b2d3d8cb29a66b2759edd1", "score": "0.46038577", "text": "def __init__(self, storage_source=None, repo_source=None, artifact_storage_source=None, source_context=None, additional_source_contexts=None, file_hashes=None):\n self.swagger_types = {\n 'storage_source': 'StorageSource',\n 'repo_source': 'RepoSource',\n 'artifact_storage_source': 'StorageSource',\n 'source_context': 'ExtendedSourceContext',\n 'additional_source_contexts': 'list[ExtendedSourceContext]',\n 'file_hashes': 'dict(str, FileHashes)'\n }\n\n self.attribute_map = {\n 'storage_source': 'storageSource',\n 'repo_source': 'repoSource',\n 'artifact_storage_source': 'artifactStorageSource',\n 'source_context': 'sourceContext',\n 'additional_source_contexts': 'additionalSourceContexts',\n 'file_hashes': 'fileHashes'\n }\n\n self._storage_source = storage_source\n self._repo_source = repo_source\n self._artifact_storage_source = artifact_storage_source\n self._source_context = source_context\n self._additional_source_contexts = additional_source_contexts\n self._file_hashes = file_hashes", "title": "" }, { "docid": "da06cdf732f1f1c96ffcdde982826951", "score": "0.46033162", "text": "def set_config(self, input_config, args):\n for key in self.keys():\n val = None\n if key in input_config:\n val = input_config[key]\n if args.get(key) is not None:\n val = args[key]\n if val is None:\n attr = self.get(key)\n if attr.required:\n raise ValueError(f\"Missing configuration option {key}\")\n val = attr.default\n self.__setattr__(key, val)\n\n for key, val in input_config.items():\n if key in self:\n continue\n self[key] = val\n\n for key, val in args.items():\n if key in self:\n continue\n self[key] = val", "title": "" }, { "docid": "6083795701cd1565eaa79a04173cce6b", "score": "0.45996904", "text": "def add_config_argument(parser):\n parser.add_argument(\n \"--config\",\n type=str,\n help=\"Path of the configuration file.\"\n \" The file is created when generating the training dataset.\"\n \" If not set, will look for it in the default location: at\"\n \" {work_dir}/{dataset_name}/config.json\",\n )", "title": "" }, { "docid": "248aea621d494a1da6d38d02059dec0e", "score": "0.45890027", "text": "def fill_in_from_config(\n arg_name: str, arg_value: Any, config_keys: Sequence[Any], allow_none: bool = False\n) -> Any:\n\n # Avoid accessing config if argument value is provided\n if arg_value is not None:\n return arg_value\n\n # raise Exception if both, configuration value not present\n # in config file and CLI argument value is missing\n try:\n config_value = get_from_config(CONFIG.DATA, config_keys)\n except MissingConfigValueError:\n if allow_none:\n return None\n raise MissingConfigAndArgumentValueError(arg_name, config_keys)\n\n # Make sure argument value and\n config_keys_str = \" > \".join(config_keys)\n\n logger.info(\n f\"The '--{arg_name}' argument is being taken from configuration \"\n f\"file ({config_keys_str}), i.e., '{config_value}'.\"\n )\n\n return config_value", "title": "" }, { "docid": "ac38ca8cccd5de980ab3ece417200405", "score": "0.4584984", "text": "def declare_config(self, config):\n pass", "title": "" }, { "docid": "f334c26d79c39f6716c607f610469e82", "score": "0.4580419", "text": "def add_source(self, source: str, adapter: DataAdapter) -> None:\n if not isinstance(adapter, DataAdapter):\n raise ValueError(\"Value must be DataAdapter\")\n\n if hasattr(adapter, \"version\") and adapter.version is not None:\n version = adapter.version\n else:\n version = \"_UNSPECIFIED_\" # make sure this comes first in sorted list\n\n if hasattr(adapter, \"provider\") and adapter.provider is not None:\n provider = adapter.provider\n else:\n provider = adapter.catalog_name\n\n if source not in self._sources:\n self._sources[source] = {}\n else: # check if data type is the same as adapter with same name\n adapter0 = next(iter(next(iter(self._sources[source].values())).values()))\n if adapter0.data_type != adapter.data_type:\n raise ValueError(\n f\"Data source '{source}' already exists with data type \"\n f\"'{adapter0.data_type}' but new data source has data type \"\n f\"'{adapter.data_type}'.\"\n )\n\n if provider not in self._sources[source]:\n versions = {version: adapter}\n else:\n versions = self._sources[source][provider]\n if provider in self._sources[source] and version in versions:\n warnings.warn(\n f\"overwriting data source '{source}' with \"\n f\"provider {provider} and version {version}.\",\n UserWarning,\n )\n # update and sort dictionary -> make sure newest version is last\n versions.update({version: adapter})\n versions = {k: versions[k] for k in sorted(list(versions.keys()))}\n\n self._sources[source][provider] = versions", "title": "" }, { "docid": "f6b807367563e2ca63b49bd112fbb9e0", "score": "0.4577754", "text": "def process_config(cls, args, args_from_config):\n filename = args.pop('config', None)\n if filename is not None:\n # Read configuration\n config = cls.read_config(filename)\n\n # Read common keys\n args.update(\n cls.keys_from_config(config, args_from_config, filename)\n )\n\n args['sentence_filter'] = c.SENTENCE_FILTERS[\n args['sentence_filter']\n ]", "title": "" }, { "docid": "3d1e674512dc695efd37553ae43e4565", "score": "0.45720068", "text": "def __init__(__self__, *,\n registry_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n source: pulumi.Input['ImportPipelineSourcePropertiesArgs'],\n identity: Optional[pulumi.Input['IdentityPropertiesArgs']] = None,\n import_pipeline_name: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n options: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'PipelineOptions']]]]] = None,\n trigger: Optional[pulumi.Input['PipelineTriggerPropertiesArgs']] = None):\n pulumi.set(__self__, \"registry_name\", registry_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"source\", source)\n if identity is not None:\n pulumi.set(__self__, \"identity\", identity)\n if import_pipeline_name is not None:\n pulumi.set(__self__, \"import_pipeline_name\", import_pipeline_name)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if options is not None:\n pulumi.set(__self__, \"options\", options)\n if trigger is not None:\n pulumi.set(__self__, \"trigger\", trigger)", "title": "" }, { "docid": "a770661d7dda0c36db65d12b75652cb3", "score": "0.45672056", "text": "def calculated_property(**settings):\n\n def decorate(wrapped):\n def callback(scanner, factory_name, factory):\n if settings.get('context') is None:\n settings['context'] = factory\n if settings.get('name') is None:\n settings['name'] = factory_name\n scanner.config.add_calculated_property(wrapped, **settings)\n\n info = venusian.attach(wrapped, callback, category='object')\n\n if info.scope == 'class':\n # if the decorator was attached to a method in a class, or\n # otherwise executed at class scope, we need to set an\n # 'attr' into the settings if one isn't already in there\n if settings.get('attr') is None:\n settings['attr'] = wrapped.__name__\n if settings.get('name') is None:\n settings['name'] = wrapped.__name__\n\n elif settings.get('context') is None:\n raise TypeError('must supply context type for function')\n\n return wrapped\n\n return decorate", "title": "" }, { "docid": "21683b6de4ce3add5b738fcc0505d24b", "score": "0.4554913", "text": "def initialize(self, name, module, *args, **kwargs):\n module_name = self[name]['type']\n module_args = dict(self[name]['args'])\n\n msg_overwrite_cfg = ('Overwriting kwargs given in config file '\n 'is not allowed.')\n assert all([k not in module_args for k in kwargs]), msg_overwrite_cfg\n\n module_args.update(kwargs)\n\n return getattr(module, module_name)(*args, **module_args)", "title": "" }, { "docid": "abcdc67640327af3d8633cf36469708c", "score": "0.45505315", "text": "def install(self):\n self.configure_source()\n super().install()", "title": "" } ]
2826709e18c7c69a2f13705c2b42a249
Check whether given key exists in the storage.
[ { "docid": "fa29db7f0ddd9ddbcc8d10b0643c2c42", "score": "0.8047286", "text": "def exists(self, key):\n raise NotImplementedError", "title": "" } ]
[ { "docid": "d342848cde6652d44924809003edcf8c", "score": "0.8223379", "text": "def exists(self, key):\n return key in self.store", "title": "" }, { "docid": "f6e7f2c042f51cced36d1e93683ef87a", "score": "0.80804545", "text": "def key_exists(self, key):\n return self.get_conn().exists(key)", "title": "" }, { "docid": "7d4447edf6df7161082ed79fbf8a819d", "score": "0.79401916", "text": "def exists(self, key):\n r = self.mc.get(key)\n if r not in (None, _empty_slot):\n return True\n else:\n return self.db.exists(key)", "title": "" }, { "docid": "d9fb99282d4d03ae9b47752742ad4a7d", "score": "0.7929692", "text": "def exists(self, key):\n return self.__conn.hexists(self.name, key)", "title": "" }, { "docid": "ddb2b5df0e376284d9c5e141be1f6bff", "score": "0.78462577", "text": "def key_exists(self, path_key: str) -> bool:\n self._assert_path_in_domain(path_key)\n return os.path.exists(path_key)", "title": "" }, { "docid": "9d769ce0835da6292cd56c35452f322c", "score": "0.78340065", "text": "def exists(self, key: str):\n return key in self._data", "title": "" }, { "docid": "6ac0d63704315353e4a1699d44552aca", "score": "0.774976", "text": "def has_key(key):", "title": "" }, { "docid": "e90373868e40c4321f8fa8df912c8f66", "score": "0.77135503", "text": "def exists(self, key):\n response = self._request('HEAD', key)\n return response.status_code == 200", "title": "" }, { "docid": "74f08ea6613f150d8e65ad443abe1a5b", "score": "0.76705456", "text": "def __contains__(self, key):\n return os.path.exists(os.path.join(self._path, str(key)))", "title": "" }, { "docid": "bb0761459b9a3a53033309beff93b7b3", "score": "0.7548233", "text": "def containsKey(self, key: str) -> bool:\n path = self._path + key\n return self._api.getEntryValue(path) is not None", "title": "" }, { "docid": "6a3db7cc64f0dfcc617ebe9412ea71ca", "score": "0.7511806", "text": "def exists(self, key):\n return bool(self.cache.exists(key))", "title": "" }, { "docid": "393b35830df5dcb6ac5cbf4267f03c25", "score": "0.7470086", "text": "def ContainsKey(self, key):\n return self.Get(key) is not None", "title": "" }, { "docid": "e18acd75cea974073a5355c06e703097", "score": "0.7456669", "text": "def __contains__(self, key):\n data = self._conn.exec_one(self._sql.has_key, key)\n if data is None:\n return False\n return True", "title": "" }, { "docid": "582a4319ae3aef4952fa9e2110dada4c", "score": "0.7429126", "text": "def has_key(self, key):\r\n return key in self", "title": "" }, { "docid": "9ba08d3723e7ffef5df39cceceefafa7", "score": "0.7413175", "text": "def key_exists(self, key):\n try:\n self.client.head_object(Bucket=self.bucket, Key=key)\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"404\":\n return False\n else:\n raise e\n\n return True", "title": "" }, { "docid": "9ba08d3723e7ffef5df39cceceefafa7", "score": "0.7413175", "text": "def key_exists(self, key):\n try:\n self.client.head_object(Bucket=self.bucket, Key=key)\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"404\":\n return False\n else:\n raise e\n\n return True", "title": "" }, { "docid": "ec2ce702ba654559795de2c44ae15456", "score": "0.7413059", "text": "def has(self, key):\n data = self.__collect_data()\n if data and key in data:\n return True\n return False", "title": "" }, { "docid": "1ef1629a9dcdec08104c1e0e5cc9c2a8", "score": "0.73997223", "text": "def has_key(self, key):\n return key in self", "title": "" }, { "docid": "1ef1629a9dcdec08104c1e0e5cc9c2a8", "score": "0.73997223", "text": "def has_key(self, key):\n return key in self", "title": "" }, { "docid": "7e8c0384368aa6d05a44dfb487010564", "score": "0.7386559", "text": "def __contains__(self, key):\n return self.redis.exists(key)", "title": "" }, { "docid": "3563d162e7f8048a60e19c8db722f9ff", "score": "0.7384023", "text": "def has_key(self, key):\n return self.dict.has_key(key)", "title": "" }, { "docid": "c2d64e23626b4bc90a62b6eb2903109c", "score": "0.7336986", "text": "def has_key(self, key):\n base, key = self._split_base_from_key(key)\n if base is None:\n return False\n\n return key in base", "title": "" }, { "docid": "bb7284bee6a81729247fe3f002584d3f", "score": "0.7290216", "text": "def _key_exists(self, key):\n try:\n self.s3_client.head_object(\n Bucket=self.bucket,\n Key=self._full_s3_key(key),\n **self.head_extra_args\n )\n except Exception:\n # head_object throws an exception when object doesn't exist\n return False\n else:\n return True", "title": "" }, { "docid": "273748785caf6a4382ae3a17f7cc991f", "score": "0.7234661", "text": "def key_exists(cls, session_key):\n key = cls.get_key(session_key)\n conn = get_redis_connection()\n return conn.exists(key)", "title": "" }, { "docid": "0a3b6fc49fd7bd89574a738ee5915f94", "score": "0.7231511", "text": "def check_key(db, key):\n rst = None\n # Don't allow write, and use buffers to avoid memory copy\n with db.begin(write=False, buffers=True) as txn:\n val = txn.get(key)\n if val is None:\n rst = False\n else:\n rst = True\n return rst", "title": "" }, { "docid": "105e8a266c97a69dd3c163ad2399aab2", "score": "0.7229972", "text": "def key_exists(dic, key):\n if key in dic:\n return True\n else:\n return False", "title": "" }, { "docid": "cd0b113a822049d141ff9006dc47d630", "score": "0.7200808", "text": "def has_key(self, key):\r\n key_type = str(type(key))\r\n if key_type in self.__dict__:\r\n if key in self.__dict__[key_type]:\r\n return True\r\n return False", "title": "" }, { "docid": "e7f0569712a07f02fcf7b17ed377f3f0", "score": "0.7182616", "text": "def has_key(self, key):\n return self._instances.has_key(KeyAsAttr(key))", "title": "" }, { "docid": "ac73698503f91d8ad76b59fcf99cc9ad", "score": "0.7176982", "text": "def has_key(name):", "title": "" }, { "docid": "0e3c0863351b01f24dec44ba012e229c", "score": "0.7147792", "text": "def __contains__(self, key):\n try:\n self[key]\n return True\n except KeyError:\n return False", "title": "" }, { "docid": "2db24e7b5aeb31e8c7dd8c5df9ee1865", "score": "0.71352607", "text": "def has_key(self, k):\n return k in self._dictionary", "title": "" }, { "docid": "500c8437b8cb3c62ed50ca98015f30a8", "score": "0.7124062", "text": "def contains(self, key: long) -> bool:\n ...", "title": "" }, { "docid": "d471155c67f09342828d00169c1fc50f", "score": "0.70953304", "text": "def has_key(key):\n return hasattr(SQLBatisContainer.__local__, key)", "title": "" }, { "docid": "b6dcbcc38a480d179f9626099fb1a376", "score": "0.70945615", "text": "def key_exists(dictionary: dict, key: str) -> bool:\n return key in dictionary.keys()", "title": "" }, { "docid": "fad9517f533624e0001bb833dceba9ec", "score": "0.7090564", "text": "def contains_key(self, key):\r\n # if a link with the given key exist in the hash table, return True\r\n if self.get(key) is not None:\r\n return True\r\n\r\n return False # else return False\r", "title": "" }, { "docid": "3ef7499072137f3077aba594e3f7b5a5", "score": "0.7085787", "text": "def has_key(self, filename):\n return filename in self.keys", "title": "" }, { "docid": "ba652ebe0bec77b272f51f939998fcf3", "score": "0.7073836", "text": "def __contains__(self, path):\n try:\n self.storage.get(self.converted_path(path))\n return True\n except KeyError:\n return False", "title": "" }, { "docid": "3c23c9d87f00f36b39c9b169ba32681d", "score": "0.7063862", "text": "def have(self, key):\n if key in self:\n return True\n return False", "title": "" }, { "docid": "3b653b03e04a3078be52d1238e54784a", "score": "0.7053854", "text": "def __contains__(self, key):\n self._check_exception()\n with self.lock:\n return key in self.data", "title": "" }, { "docid": "4b4a39425ff13ff5b6dbf4e927374445", "score": "0.7044912", "text": "def contains(self, key: int) -> bool:\n return self.buckets[self._hash(key)].exists(key)", "title": "" }, { "docid": "a4a16fc3767a22152d6927e889ebdebe", "score": "0.7042784", "text": "def __contains__(self, key):\n value = self.retrieve(key)\n return value is not None", "title": "" }, { "docid": "a4a16fc3767a22152d6927e889ebdebe", "score": "0.7042784", "text": "def __contains__(self, key):\n value = self.retrieve(key)\n return value is not None", "title": "" }, { "docid": "32734fc2ed99c8ce415d843e1761074e", "score": "0.7035795", "text": "def has(self, key): # pragma: no cover\n\t\traise NotImplementedError", "title": "" }, { "docid": "698c1b27d3d1654fb7f6d496f1320407", "score": "0.70323634", "text": "def __contains__(self, key):\n try:\n self.get_value(key)\n return True\n except KeyError:\n return False", "title": "" }, { "docid": "fc5bb5c9ea1db550dbdcf5c790764d59", "score": "0.7003904", "text": "def __contains__(self, key):\r\n return self.client.hexists(self.name, key)", "title": "" }, { "docid": "08065eff16f799a80fc6296eb0e12fce", "score": "0.69692737", "text": "def check_exists(key, dictionary):\n if key not in dictionary:\n raise YAMLParserError(f\"Key {key} not found!\")\n return True", "title": "" }, { "docid": "4d6081feb6293608296aac7480897cd4", "score": "0.6961871", "text": "def contains(self, key):\n return key in self.cache", "title": "" }, { "docid": "cf57f11030082967029a8b59f166aadc", "score": "0.6952961", "text": "def keyExists(myDictionary, key):\n if key in myDictionary.keys():\n return True", "title": "" }, { "docid": "c5dbd5453fcf0ff318f3cfb1eb4395f4", "score": "0.69080293", "text": "def exists(self, session_key):\n if self.table.get_item(session_key = session_key, consistent = getattr(settings, 'DYNAMODB_SESSIONS_ALWAYS_CONSISTENT', False)).items():\n logger.info(\"Item exists\")\n return True\n else:\n logger.info(\"Item doesn't exist\")\n return False", "title": "" }, { "docid": "03ac6a46f07395aa814cf9a07bd12c64", "score": "0.6898406", "text": "def key_exists(key, data, filename):\n\n if key not in data.keys():\n msg.error(\"'%(key)s' key is missing in %(file)s.\"\n % {\"key\": key, \"file\": filename})\n sys.exit(1)", "title": "" }, { "docid": "502a1eb520f8f86e0b8a46c461782b67", "score": "0.68977696", "text": "def has_key(dictionary, key) -> bool:\n return key in dictionary.keys()", "title": "" }, { "docid": "8ee77417937ff97eb4628ae571ca299e", "score": "0.689637", "text": "def has_key(self, name):\n return self.get(name) is not None", "title": "" }, { "docid": "906c0a3fc360e50ee4e2798b6b879d1b", "score": "0.6892095", "text": "def __contains__(self, key):\r\n hashvalue = self._find(key)\r\n if self.table[hashvalue].key == key:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "e1d186c10eadc4f440c44bab4460b7ec", "score": "0.68910664", "text": "def contains(self, key: int) -> bool:", "title": "" }, { "docid": "8a5c8e895b08c00576ad12a5924d9a49", "score": "0.68897486", "text": "def __contains__(self, key):\n cursor = self._db.cursor()\n query = 'SELECT %s FROM %s WHERE %s = ?' % \\\n (self._queryBy, DBConstants._DICT_TABLE, self._queryBy)\n if cursor.execute(query, (key,)).fetchone() is None:\n return False\n return True", "title": "" }, { "docid": "ddb1e16cfe244dc3f310c2fadf56d41a", "score": "0.6884238", "text": "def contains(self, key):\n index = self._bucket_index(key)\n for _ in range(len(self.buckets)):\n if index >= len(self.buckets):\n bucket = self.buckets[index % len(self.buckets)]\n else:\n bucket = self.buckets[index]\n if not bucket.is_empty():\n if bucket.data[0] == key:\n return True\n index += 1\n return False", "title": "" }, { "docid": "3d0aefe3e2aaf480cff61eb00c8f0a91", "score": "0.6883421", "text": "def _converted_key_exists(self, key: str) -> bool:\n return key in self._converted", "title": "" }, { "docid": "98b27c551991b3e49f53ce64daf139b4", "score": "0.6878857", "text": "def __contains__(self, key):\n index = self._find_key(key)\n return index is not None", "title": "" }, { "docid": "66732c6325e05306e185d9f86a997264", "score": "0.68603206", "text": "def doesKeyExist(self, keyName, keyClass):\n raise RuntimeError(\"doesKeyExist is not implemented\")", "title": "" }, { "docid": "9f33edad54c1d6e00fe4befd948d2196", "score": "0.68481517", "text": "def contains(self, key: int) -> bool:\n hash_key = self.hash(key)\n if not self.set[hash_key]:\n return False\n for k in self.set[hash_key]:\n if k == key:\n return True\n return False", "title": "" }, { "docid": "584619021014b38de62aea77003f452b", "score": "0.6846364", "text": "def lookup(self, key):\n return self[key] is not None", "title": "" }, { "docid": "ba285352ad42ae141af1879495c2a3d6", "score": "0.6846291", "text": "def hasKey(self, key):\n for t in self.getTags():\n if t.getKey().getName() == key:\n return True\n return False", "title": "" }, { "docid": "8b97660f6a29e2dfa1f866935a7d8d14", "score": "0.6843835", "text": "def hasattr(self, key: str) -> bool:\n return key in self.__dict__", "title": "" }, { "docid": "ce9ac11f41cfdf17508e7be8f71a0066", "score": "0.68391687", "text": "def contains(self,key):\n index=self.hash(key)\n if self._buckets[index]:\n return self._buckets[index].includes(key)\n else:\n return False", "title": "" }, { "docid": "4e3eec88df8ff75a2aa3a4d510eefbaf", "score": "0.6837011", "text": "def exists(self, key_name: str) -> bool:\n\n return (\n self.session.query(PrivateKey).filter(PrivateKey.name == key_name).first()\n is not None\n )", "title": "" }, { "docid": "7eb907dbdba0684b15e92f9ff5259623", "score": "0.68306774", "text": "def check_if_exists(bucket, key):\n s3_client = boto3.client('s3')\n try:\n s3_client.head_object(Bucket=bucket, Key=key)\n return True\n except ClientError:\n # Key not found\n return False", "title": "" }, { "docid": "a97982a010c1de9be5e51bda27f67424", "score": "0.6823822", "text": "def contains(self, key):\n key_hash = self.get_hash(key)\n if self.map[key_hash] is not None:\n if key in self.map[key_hash]:\n return True\n \n return False", "title": "" }, { "docid": "886d68024900425df38ae916eb8134bc", "score": "0.6814029", "text": "def exist(self, key):\n if is_binary_str(key):\n return bytes(key) in self.children\n else:\n return key[:2] in self.matches", "title": "" }, { "docid": "a1943946eefba6b4681c0e8e0265e6cf", "score": "0.6813983", "text": "def _test_key_(self,key, softexit=False):\n if key not in self.keys:\n if softexit:\n return False\n raise ValueError(\"'%s' is not a known key, these are: \"%key+\", \".join(self.keys.tolist()))\n return True", "title": "" }, { "docid": "6d475463303eb786ff163e46de4c447d", "score": "0.680974", "text": "def has_key(self, name):\n return self._entries.has_key(name)", "title": "" }, { "docid": "5f660739b012828b386666c6022dee8b", "score": "0.680778", "text": "def HasKey(self, key):\n return self.FixKey(key) in self._GetKeySet()", "title": "" }, { "docid": "4571196654c0104e1f054e1aac91d280", "score": "0.68054587", "text": "def __contains__(self, key):\n try: self._item(key)\n except KeyValuePair.DoesNotExist: \n if self._parent != None: return self.parent.__contains__(key)\n else: return False\n return True", "title": "" }, { "docid": "de9cf0d060426baf6abef2627f82f749", "score": "0.6802955", "text": "def contains(self,key):\n index = self.hash(key)\n if not self._buckets[index]:\n return False\n for value in self._buckets[index] :\n if value[0] == key:\n return True", "title": "" }, { "docid": "071c2d203a9e8ff2320cc8def758cb63", "score": "0.679038", "text": "def __contains__(self, key):\n slot = self._findSlot(key, False)\n return slot is not None", "title": "" }, { "docid": "f32cc972a87f7c69f021d87b1dc02282", "score": "0.67902356", "text": "def does_key_exist(s3_bucket, key, region=DEFAULT_REGION):\n try:\n response = get_resource(\"s3\", region=region).Object(s3_bucket, key).content_length\n LOGGER.info(\"Checking if key %s in bucket %s length: %s\" % (key, s3_bucket, response))\n return response\n except ClientError as exp:\n LOGGER.info(\"Error finding S3 key s3://%s/%s: %s\" % (s3_bucket, key, exp))\n if exp.response['Error']['Code'] == \"404\":\n return False\n raise exp", "title": "" }, { "docid": "9ff50f27854c4cad49770554b3bf441c", "score": "0.6787088", "text": "def key_exists(array, key):\n if key in array:\n return True\n else: \n return False", "title": "" }, { "docid": "b3dabc008e94c5f26dbb84f99558b89f", "score": "0.67748874", "text": "def _isfile(self, bucket: str, key: str) -> bool:\n try:\n self.conn.head_object(Bucket=bucket, Key=key)\n return True\n except Exception:\n return False", "title": "" }, { "docid": "57913e0eac25300f7e45398756bd717c", "score": "0.67703503", "text": "def exists(self, key):\n return key in self.rules", "title": "" }, { "docid": "123e4d5d994777e1037d2ac9e7984ab0", "score": "0.67627615", "text": "def contains(self, key):\n # TODO: Find bucket where given key belongs\n # TODO: Check if key-value entry exists in bucket\n bucket_id =hash(key) % len(self.buckets)\n bucket_ll = self.buckets[bucket_id]\n\n node = bucket_ll.head\n\n while node is not None:\n if key == node.data[0]:\n return True\n node = node.next\n return False", "title": "" }, { "docid": "4f21bff578fa3bdb7ae79960aafd3e32", "score": "0.6751025", "text": "def __contains__(self, key):\n self.lock.acquire()\n x = key in self.dict\n self.lock.release()\n return x", "title": "" }, { "docid": "d0c4429f8d654f09d9bab3472d00e282", "score": "0.6748315", "text": "def has_cached(self, key):\n fname = self.fname(key)\n return os.path.exists(fname)", "title": "" }, { "docid": "22417f7d4d795307e7c5361976511bf0", "score": "0.6720086", "text": "def contains(self, key):\n pass", "title": "" }, { "docid": "6c0410c0c30a9c9546baa9e4a6f01885", "score": "0.67200655", "text": "def exists(self):\n try:\n self.driver.conn.storageVolLookupByKey(self.uuid)\n return True\n except libvirt.libvirtError as e:\n if e.get_error_code() == libvirt.VIR_ERR_NO_STORAGE_VOL:\n return False\n else:\n raise", "title": "" }, { "docid": "4b6f5bcb1ae042c49fa0c95fa9ad5dc6", "score": "0.6687123", "text": "def contains(self, key: int) -> bool:\n h_val = self._hash(key)\n if self.set[h_val] is None: #if the hashed index is empty, return Flase\n return False\n else:\n cur = self.set[h_val]\n while cur: #Traverse through the linkedlist and if find the key, return True.\n if cur.key == key:\n return True\n cur = cur.next\n return False #If while loop ends, we didn't find the key and return False.", "title": "" }, { "docid": "5b25565af36d740c3ffc19185df79c57", "score": "0.6686168", "text": "def contains(self, key):\n return key in self.hs", "title": "" }, { "docid": "406977d03ca70ba7f874139c01635a89", "score": "0.6675165", "text": "def __contains__(self, key):\n raise S3NotImplemented()", "title": "" }, { "docid": "ecbca3cb1a01e542c1a961b0e38a6490", "score": "0.666057", "text": "def file_exists(self, key):\n try:\n self.client.head_object(Bucket=self.get_bucket_for_file_key(key), Key=key)\n return True\n except ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"404\":\n return False\n else:\n LOGGER.warning(exc=e, key=key)\n raise", "title": "" }, { "docid": "a475caa4d4db8836ca5f809257b4b2fc", "score": "0.6648082", "text": "def exists(bucket, key):\n s3 = boto3.resource('s3')\n # print(f\"check if {bucket}/{key} exists in s3\")\n\n try:\n s3.Object(bucket, key).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n print(\"Unknown error.\")\n raise\n \n return True", "title": "" }, { "docid": "b7911a44e8a518c08daee0e5f976bf89", "score": "0.6642231", "text": "def get(self, key):\n try:\n return self.__store[key]\n except KeyError:\n return False", "title": "" }, { "docid": "97cf8d0ff9dcf2c4d349560ff099c4b3", "score": "0.6640306", "text": "def available_key(cls, key : int) -> bool:\r\n\r\n return key in cls.__description_structure.keys()", "title": "" }, { "docid": "9cad43e16f78d3854a0be3deef597a67", "score": "0.6635585", "text": "def __contains__(self, key):\n return key in self._keys", "title": "" }, { "docid": "a306c985d90372ffc2c6c4478b41becd", "score": "0.6626459", "text": "def check_key(shelf_arg: shelve.open, url_arg: str) -> bool:\n keys = shelf_arg.keys\n if url_arg in keys:\n return True\n return False", "title": "" }, { "docid": "542460dff5bcbb8d14a8366d7f39f25c", "score": "0.6620088", "text": "def check_for_key(checkdict, key):\n if key in checkdict.keys():\n raise Exception('Key already used in this dictionary')", "title": "" }, { "docid": "b00756b0c1e63b59cfe16a85955612cc", "score": "0.6606464", "text": "def __contains__(self, key):\n return key in self.cache", "title": "" }, { "docid": "b00756b0c1e63b59cfe16a85955612cc", "score": "0.6606464", "text": "def __contains__(self, key):\n return key in self.cache", "title": "" }, { "docid": "c210078c6ca8174473f2def28611108c", "score": "0.6599837", "text": "def exists(uuid: str, store: StoreInput) -> bool:\n store = ensure_store(store)\n key = naming.metadata_key_from_uuid(uuid)\n\n if key in store:\n return True\n\n key = naming.metadata_key_from_uuid(uuid, format=\"msgpack\")\n return key in store", "title": "" }, { "docid": "9e8044852124febc5f61af2036eed7ee", "score": "0.6584761", "text": "def __contains__(self, key: int) -> bool:\n return self.data[self.search(key)].key == key", "title": "" }, { "docid": "691069a1c4cc781315e209794d1960c6", "score": "0.6580232", "text": "def search( self, key ):\n\t\ti = 0\n\t\twhile i < self._table_size:\n\t\t\tbucket = self.double_hashing( key, i )\n\t\t\tif self._storage[bucket] == key:\n\t\t\t\treturn bucket\n\t\t\telif self._storage[bucket] is None:\n\t\t\t\treturn False\n\t\t\ti += 1\n\t\treturn False", "title": "" }, { "docid": "a3268b420da152f70653b4e2e6d664cd", "score": "0.65801764", "text": "def contains(self, key, *args, **kwargs):\n return self.find(key, *args, **kwargs) is not None", "title": "" } ]
5cf519e7b3d733d205b310c40cab2f3b
Builds the ServerSet based on the string passed to the function
[ { "docid": "56892db44aaf227b717a6ba7dee89fd5", "score": "0.5113373", "text": "def from_slurm_list(cls, string:str):\n servers = []\n partition = []\n regex = r\"([a-z]+)(\\[(\\d+,?|\\d+[-]\\d*)+\\]|\\d)\"\n for name, whole, _ in re.findall(regex, string):\n partition.append(name)\n inner_regex = r\"((\\d+)-?(\\d+)?)+\"\n for _, start, stop in re.findall(inner_regex, whole):\n\n try:\n start = int(start)\n stop = int(stop)\n except:\n start = int(start)\n stop = int(start)\n for i in range(start, stop + 1):\n servers.append(f\"{name}{i}\")\n return cls(servers, partition)", "title": "" } ]
[ { "docid": "c698ba992fc665300714d3a85cffef5b", "score": "0.53885657", "text": "def _make_set(var):\n if var is None:\n return set()\n if not isinstance(var, list):\n if isinstance(var, str):\n var = var.split()\n else:\n var = list(var)\n return set(var)", "title": "" }, { "docid": "7ef16c27be4a43498f5e1063d9beea9e", "score": "0.5203876", "text": "def from_string(cls, string):\n mdict = OrderedDict(json_loads(string))\n builds = []\n for build in mdict:\n builds.append(Build.build_from_dict(build, mdict[build]))\n return BuildSet(builds=builds)", "title": "" }, { "docid": "cfdf7376dc7f3d46cd1e03f14e6bfe8b", "score": "0.51809394", "text": "def search(self, filterstring):\n # TODO(sissel): add keyword arguments for things to search by.\n self.ensure_authenticated()\n params = { \"filter\": filterstring }\n response, content = self.request(\"servers.xml\", parameters=params)\n #print response\n #print content\n servers = Servers(content, self)\n return servers", "title": "" }, { "docid": "3f1a52edc3cf6578ad436ca89f830e22", "score": "0.5121126", "text": "def from_str(str, **kwargs):\n return UrlSet(StringIO(str), 'string', **kwargs)", "title": "" }, { "docid": "7e9a2b6ae45e5c6e061709aeda80ed52", "score": "0.51174617", "text": "def addServer(server):", "title": "" }, { "docid": "e9a46dce5eb8950b0bd3e3b0ea446c2b", "score": "0.5023995", "text": "def from_str(cls, s: str) -> 'ServerAddr':\n # host might be IPv6 address, hence do rsplit:\n host, port, protocol = str(s).rsplit(':', 2)\n return ServerAddr(host=host, port=port, protocol=protocol)", "title": "" }, { "docid": "4ddaee0a3c088e59c087360d62a60ce0", "score": "0.50008374", "text": "def add_set_server_parser(sub_parsers_set: argparse._SubParsersAction) -> None:\n subparser = sub_parsers_set.add_parser('server', help='Set the resto server to use',\n description='Set the resto server to use in subsequent'\n ' commands.')\n subparser.add_argument(SERVER_ARGNAME, help='name of the resto server')\n subparser.set_defaults(func=cli_set_server_parameter)", "title": "" }, { "docid": "b822a9f043f337191eece20d99b01df0", "score": "0.4918371", "text": "def set_servers(self, servers):\r\n pass", "title": "" }, { "docid": "b822a9f043f337191eece20d99b01df0", "score": "0.4918371", "text": "def set_servers(self, servers):\r\n pass", "title": "" }, { "docid": "64fe0d4dc36f7f9b44ebe46ea4f1d8c2", "score": "0.49068037", "text": "def create_servers_from_dictionary(team):\n servers = []\n try:\n for server in team.get('servers'):\n if server.get('vendor').casefold() == 'GitLab'.casefold():\n servers.append(base.Gitlab(server.get('host'), server.get('namespaces'), server.get('users'),\n server.get('repositories')))\n elif server.get('vendor').casefold() == 'GitHub'.casefold():\n servers.append(base.GitHub(server.get('host'), server.get('namespaces'), server.get('repositories')))\n else:\n servers.append(base.Gerrit(server.get('host'), server.get('bot_users'), server.get('repositories')))\n except (ValueError, TypeError) as e:\n logging.warning(f\"No valid servers found: {e}.\")\n return servers", "title": "" }, { "docid": "af52bb1bde5f2b77ef6edcc7ee73abd5", "score": "0.49050543", "text": "async def make_servers(self, sockets):", "title": "" }, { "docid": "216ea162a6f000f284c75c9f891b7910", "score": "0.48618802", "text": "def define_servers(sim_type, warm_type, letters, overall_period, capacity, week, warm):\n if warm_type == 'Itterative':\n total_arrival_days = int(warm*overall_period)\n else:\n total_arrival_days = int(1.5*overall_period)\n all_server_schedules = build_server_schedules(sim_type, capacity, total_arrival_days, week)\n\n if warm_type == 'Warm Start':\n if sim_type == 'Raw Pathways':\n warm = [0] + warm\n all_server_schedules_WS = copy.deepcopy(all_server_schedules)\n for i, wait in enumerate(warm):\n for w in range(wait):\n all_server_schedules_WS[i][w][0] = 0\n Schedules = all_server_schedules_WS\n else:\n Schedules = all_server_schedules\n\n return Schedules", "title": "" }, { "docid": "de9bd51b45c4bd45661c10f8cf7b9268", "score": "0.477701", "text": "def _create_server_dict(self) -> Dict[int, Set]:\n servers = {}\n for request in self.load_balancer.all_requests:\n for server_port in request.port_mapping.keys():\n service: set = servers.setdefault(server_port, set())\n service.update(itertools.product(request.backends, request.port_mapping.values()))\n return servers", "title": "" }, { "docid": "ae0f6940632d0b660fff9d1d7bb38622", "score": "0.4764163", "text": "def set_servers(self, servers):\n pass", "title": "" }, { "docid": "73eeed1ab457da8f95907cc0b351b196", "score": "0.47237366", "text": "def from_str_with_inference(cls, s: str) -> Optional['ServerAddr']:\n if not s:\n return None\n host = \"\"\n if s[0] == \"[\" and \"]\" in s: # IPv6 address\n host_end = s.index(\"]\")\n host = s[1:host_end]\n s = s[host_end+1:]\n items = str(s).rsplit(':', 2)\n if len(items) < 2:\n return None # although maybe we could guess the port too?\n host = host or items[0]\n port = items[1]\n if len(items) >= 3:\n protocol = items[2]\n else:\n protocol = PREFERRED_NETWORK_PROTOCOL\n try:\n return ServerAddr(host=host, port=port, protocol=protocol)\n except ValueError:\n return None", "title": "" }, { "docid": "a5215599ac2c3510b94ba86f1cc7030c", "score": "0.4702024", "text": "def string_set(string_file):\n string_lst = []\n with open(string_file, 'r') as infile:\n for line in infile:\n clean_line = line.lstrip().rstrip()\n parts = clean_line.split()\n string_lst.append(parts[0])\n\n string_set_return = set(string_lst)\n return string_set_return", "title": "" }, { "docid": "14c57dc66f568077d28695ec5099d71a", "score": "0.47011852", "text": "def render_srv(srv_set):\n BUILD_STR = ''\n template = Template(\"{name:$name_just} {ttl} {rclass:$class_just} {rtype:$type_just} {prio:$prio_just} {weight:$extra_just} {port:$extra_just} {target:$extra_just}.\\n\")\n template = template.substitute(name_just=name_just, class_just=class_just,\n type_just=type_just, prio_just=prio_just, extra_just=extra_just)\n for srv in srv_set:\n if srv.ttl == 3600 or srv.ttl is None:\n ttl = ''\n else:\n ttl = str(srv.ttl)\n name = srv.fqdn + '.'\n if srv.target == \".\":\n target = \"\" # There is already a trailing '.' in the template\n else:\n target = srv.target\n BUILD_STR += template.format(\n name=name, ttl=ttl, rclass='IN', rtype='SRV',\n prio=str(srv.priority), weight=str(srv.weight),\n port=str(srv.port), target=target)\n return BUILD_STR", "title": "" }, { "docid": "1758f780faf15b866b67c472dddb4533", "score": "0.4692607", "text": "def create_server(config_file):\n\n try:\n config = ConfigParser.SafeConfigParser()\n config.read(config_file)\n except Exception as e:\n raise Exception(\"Unable to load configuration: %s\" % e)\n\n return Slicer(config)", "title": "" }, { "docid": "821a1e1fa4fa4aa7aa148b808e442dab", "score": "0.46768466", "text": "def _config_win32_nameservers(self, nameservers):\n # we call str() on nameservers to convert it from unicode to ascii\n nameservers = str(nameservers)\n split_char = self._determine_split_char(nameservers)\n ns_list = nameservers.split(split_char)\n for ns in ns_list:\n if ns not in self.nameservers:\n self.nameservers.append(ns)", "title": "" }, { "docid": "346229c6cf3d88c153b625663b58c554", "score": "0.46763653", "text": "def fromString(string):\n etcHosts = EtcHosts()\n for line in string.splitlines():\n # strip comments\n lineWithoutComments = line.split(\"#\")[0]\n if lineWithoutComments.strip():\n entries = lineWithoutComments.split()\n ip = entries.pop(0).strip()\n etcHosts[ip] = set(entries)\n return etcHosts", "title": "" }, { "docid": "35bf7432e090d44bf1f80be3c6445997", "score": "0.46277094", "text": "def load_servers(self):\n if not self.serverlist:\n try:\n self.serverlist.clear()\n except:\n self.serverlist = set()\n\n path = self.db_path\n raw_list = []\n\n if path.exists():\n # loads the content as a raw server\n try:\n with open(path) as file:\n raw_list = json.loads(file.read())\n except:\n pass\n\n # loops through all raw server\n # and then parses all of them into a valid Server object\n for raw in raw_list:\n try:\n guild_id = int(raw['guild'])\n\n # prevent server duplication\n if self.find_server(guild_id):\n continue\n\n guild: Guild = self.bot.get_guild(guild_id)\n channel = guild.get_channel(int(raw['bot_channel'])) if raw['bot_channel'] else None\n prefix = str(raw['prefix']) if raw['prefix'] else None\n\n server = Server(guild=guild, def_prefix=self.def_prefix, prefix=prefix, bot_channel=channel)\n self.serverlist.add(server)\n except:\n print(f'Cannot parse data {raw}!')", "title": "" }, { "docid": "5057429f9b4a9907c3403c966c3c9fdf", "score": "0.46154135", "text": "def __init__(self, servers):\n BaseServerSelection.__init__(self, servers)", "title": "" }, { "docid": "5057429f9b4a9907c3403c966c3c9fdf", "score": "0.46154135", "text": "def __init__(self, servers):\n BaseServerSelection.__init__(self, servers)", "title": "" }, { "docid": "5057429f9b4a9907c3403c966c3c9fdf", "score": "0.46154135", "text": "def __init__(self, servers):\n BaseServerSelection.__init__(self, servers)", "title": "" }, { "docid": "2a767797b16af940040624b56608b32f", "score": "0.4592387", "text": "def test_set_nameservers_through_init(self) -> None:\n\n given = [\"example.org\"]\n\n resolver_provider = Resolver(nameservers=given)\n\n expected = [\n \"192.168.1.1\",\n \"10.47.91.9\",\n \"fe80::6b01:9045:a42a:fb5f\",\n \"fe80::6b01:9049:a42a:fb5f\",\n ]\n actual = resolver_provider.get_nameservers()\n\n self.assertEqual(expected, actual)\n\n expected = {\n \"192.168.1.1\": 53,\n \"10.47.91.9\": 53,\n \"fe80::6b01:9045:a42a:fb5f\": 53,\n \"fe80::6b01:9049:a42a:fb5f\": 53,\n }\n actual = resolver_provider.get_nameserver_ports()\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "ee7c0c964a499136137ee8572ac851f9", "score": "0.45848668", "text": "def known_servers(self):\n ...", "title": "" }, { "docid": "e80b52d4b02951f1c6e923122a45682e", "score": "0.45734283", "text": "def string_to_config(s):\n from .machines import Configuration\n s = lexer(s)\n x = parse_multiple(s, parse_store)\n parse_end(s)\n return Configuration(x)", "title": "" }, { "docid": "ef7f8872e3aa8913a737fe4279a55505", "score": "0.45712277", "text": "def _parse_miners_string(self, miners_string):\n miners = []\n rigs = miners_string.split()\n for rig in rigs:\n parts = rig.split(':')\n host = parts[0]\n port = int(parts[1]) if len(parts) > 1 else DEFAULT_PORT\n miners.append({'host': host, 'port': port})\n return miners", "title": "" }, { "docid": "ffd417949a31e2d12aad4486ea5a6ca9", "score": "0.4569488", "text": "def create_sets(spec, body, namespace, name, logger):\n sets = \"\"\n sqlsettings = spec.get('sqlsettings')\n if not sqlsettings:\n message = \"pipeline name not determined in\"\\\n f\" {namespace}/{name}, using default\"\n logger.debug(message)\n sets = f\"SET pipeline.name = '{namespace}/{name}';\\n\"\n elif all(x for x in sqlsettings if x.get('pipeline.name') is None):\n sets = f\"SET pipeline.name = '{namespace}/{name}';\\n\"\n for setting in sqlsettings:\n key = list(setting.keys())[0]\n value = setting.get(key)\n sets += f\"SET '{key}' = '{value}';\\n\"\n # add savepoint if location is set\n try:\n savepoint_location = body['status'].get(SAVEPOINT_LOCATION)\n if savepoint_location is not None:\n sets += f\"SET execution.savepoint.path = '{savepoint_location}';\\n\"\n logger.debug(f\"Savepoint location {savepoint_location} used for sets.\")\n except KeyError:\n pass\n\n return sets", "title": "" }, { "docid": "6182aeae88d36e271433e3639df18489", "score": "0.45537505", "text": "def _create_server(self, name, tenant, security_groups=None):\r\n self._set_compute_context(tenant)\r\n if security_groups is None:\r\n security_groups = [tenant.security_groups['default'].name]\r\n create_kwargs = {\r\n 'nics': [\r\n {'net-id': tenant.network.id},\r\n ],\r\n 'key_name': tenant.keypair.name,\r\n 'security_groups': security_groups,\r\n 'tenant_id': tenant.creds.tenant_id\r\n }\r\n server = self.create_server(name=name, create_kwargs=create_kwargs)\r\n self.addCleanup(self.cleanup_wrapper, server)\r\n return server", "title": "" }, { "docid": "50f94326bcf8783a0d7920e6b85b9d3a", "score": "0.45519865", "text": "def build_set(context, builder, set_type, items):\n nitems = len(items)\n inst = SetInstance.allocate(context, builder, set_type, nitems)\n\n if nitems > 0:\n\n # Populate set. Inlining the insertion code for each item would be very\n # costly, instead we create a LLVM array and iterate over it.\n array = cgutils.pack_array(builder, items)\n array_ptr = cgutils.alloca_once_value(builder, array)\n\n count = context.get_constant(types.intp, nitems)\n with cgutils.for_range(builder, count) as loop:\n item = builder.load(cgutils.gep(builder, array_ptr, 0, loop.index))\n inst.add(item)\n\n return impl_ret_new_ref(context, builder, set_type, inst.value)", "title": "" }, { "docid": "ab9d73171a9cd4efaceb3e221e913353", "score": "0.45465904", "text": "def buildServer(self, command, tokens, index):\n if not self.currentHouse:\n msg = \"ParseError: Building verb '%s'. No current house\" % (command)\n raise excepting.ParseError(msg, tokens, index)\n\n if not self.currentStore:\n msg = \"ParseError: Building verb '%s'. No current store\" % (command)\n raise excepting.ParseError(msg, tokens, index)\n\n try:\n parms = {}\n init = {}\n name = ''\n connective = None\n period = 0.0\n prefix = './'\n schedule = ACTIVE #globaling.py\n order = MID #globaling.py\n rxa = ''\n txa = ''\n sha = ('', 54321) #empty host means any interface on local host\n dha = ('localhost', 54321)\n\n name = tokens[index]\n index +=1\n\n while index < len(tokens): #options\n connective = tokens[index]\n index += 1\n\n if connective == 'at':\n period = abs(Convert2Num(tokens[index]))\n index +=1\n\n elif connective == 'to':\n prefix = tokens[index]\n index +=1\n\n elif connective == 'be':\n option = tokens[index]\n index +=1\n\n if option not in ['active', 'inactive', 'slave']:\n msg = \"ParseError: Building verb '%s'. Bad server scheduled option got %s\" % \\\n (command, option)\n raise excepting.ParseError(msg, tokens, index)\n\n schedule = ScheduleValues[option] #replace text with value\n\n elif connective == 'in':\n order = tokens[index]\n index +=1\n if order not in OrderValues:\n msg = \"ParseError: Building verb '%s'. Bad order option got %s\" % \\\n (command, order)\n raise excepting.ParseError(msg, tokens, index)\n\n order = OrderValues[order] #convert to order value\n\n elif connective == 'rx':\n rxa = tokens[index]\n index += 1\n\n elif connective == 'tx':\n txa = tokens[index]\n index += 1\n\n elif connective == 'per':\n data, index = self.parseDirect(tokens, index)\n init.update(data)\n\n elif connective == 'for':\n srcFields, index = self.parseFields(tokens, index)\n srcPath, index = self.parsePath(tokens, index)\n if self.currentStore.fetchShare(srcPath) is None:\n console.terse(\" Warning: Init 'with' non-existent share {0}\"\n \" ... creating anyway\".format(srcPath))\n src = self.currentStore.create(srcPath)\n #assumes src share inited before this line parsed\n for field in srcFields:\n init[field] = src[field]\n\n else:\n msg = \"ParseError: Building verb '%s'. Bad connective got %s\" % \\\n (command, connective)\n raise excepting.ParseError(msg, tokens, index)\n\n except IndexError:\n msg = \"ParseError: Building verb '%s'. Not enough tokens.\" % (command, )\n raise excepting.ParseError(msg, tokens, index)\n\n if index != len(tokens):\n msg = \"ParseError: Building verb '%s'. Unused tokens.\" % (command,)\n raise excepting.ParseError(msg, tokens, index)\n\n prefix += '/' + self.currentHouse.name #extra slashes are ignored\n\n if rxa:\n if ':' in rxa:\n host, port = rxa.split(':')\n sha = (host, int(port))\n else:\n sha = (rxa, sha[1])\n\n if txa:\n if ':' in txa:\n host, port = txa.split(':')\n dha = (host, int(port))\n else:\n dha = (txa, dha[1])\n\n server = serving.Server(name=name, store = self.currentStore,)\n kw = dict(period=period, schedule=schedule, sha=sha, dha=dha, prefix=prefix,)\n kw.update(init)\n server.reinit(**kw)\n\n self.currentHouse.taskers.append(server)\n if schedule == SLAVE:\n self.currentHouse.slaves.append(server)\n else: #taskable active or inactive\n if order == FRONT:\n self.currentHouse.fronts.append(server)\n elif order == BACK:\n self.currentHouse.backs.append(server)\n else:\n self.currentHouse.mids.append(server)\n\n msg = \" Created server named {0} at period {2:0.4f} be {3}\\n\".format(\n server.name, name, server.period, ScheduleNames[server.schedule])\n console.profuse(msg)\n\n return True", "title": "" }, { "docid": "3a13d6c7b2c36504e404d0c61ef9021a", "score": "0.45404032", "text": "def fromstring(cls, string):\n\t\treturn cls(checkvar(x) for x in parselits(string))", "title": "" }, { "docid": "2588e5ee238a0ea552237f8d6b6ea206", "score": "0.45365214", "text": "def MakeSet(value):\n if (isinstance(value, collections.abc.Iterable) and\n not isinstance(value, str)):\n return set(value)\n return set([value])", "title": "" }, { "docid": "f96984b2d2e5ff6c587883975f8fd9af", "score": "0.45256498", "text": "def map_server_type(server_type):\n if not server_type:\n return None\n if isinstance(server_type, ServerType):\n return server_type\n elif isinstance(server_type, str):\n for this_type in ServerType:\n if this_type.value == server_type:\n return this_type\n raise ClusterTemplateConfigError(\n 'Invalid server type - ' + server_type)", "title": "" }, { "docid": "454613c35142e34c2c24c067ca412636", "score": "0.45138225", "text": "def get_servers(**kwargs):\n logger.debug(kwargs)\n bays = kwargs['bays']\n servers = []\n for bay in bays:\n servers.append({\n 'bay': str(bay).zfill(2),\n 'firmware version': get_current_firmware(),\n 'servername': 'srv{}.company.com'.format(get_hex()[0:6]),\n })\n sleep(5)\n kwargs['derived_key'] = 'derived_value'\n return (servers, kwargs)", "title": "" }, { "docid": "69dcf906001bb20a228ff2c7d1fcad89", "score": "0.45093885", "text": "def from_server(cls, server, slug):\n\n challenge = server.get(\n 'challenge',\n replacements={'slug': slug})\n return cls(\n **challenge)", "title": "" }, { "docid": "5f4e5e0f5ce438e8908e4a62afdad51c", "score": "0.4506277", "text": "def ConstructRelaySet(sID, neighborSet, relayNodes):\n rSet = []\n if neighborSet:\n for neighborID in neighborSet:\n if relayNodes[neighborID].current_action == sID:\n rSet.append(neighborID)\n return rSet \n# if rSet:\n# return rSet\n# else:\n# rSet.append(neighborRelayID_with_maxReward(neighborSet,\n# relayNodes))\n# return rSet\n else:\n return rSet", "title": "" }, { "docid": "db534fd3e7e05ecdddd0fac1f184cc78", "score": "0.4499999", "text": "def new_set(\n *,\n stype: s_types.Type,\n ctx: context.ContextLevel,\n ircls: Type[irast.Set] = irast.Set,\n **kwargs: Any,\n) -> irast.Set:\n\n skip_subtypes: bool = kwargs.get('skip_subtypes', False)\n ignore_rewrites: bool = kwargs.get('ignore_rewrites', False)\n rw_key = (stype, skip_subtypes)\n\n if not ignore_rewrites and ctx.suppress_rewrites:\n from . import policies\n ignore_rewrites = kwargs['ignore_rewrites'] = (\n policies.should_ignore_rewrite(stype, ctx=ctx))\n\n if (\n not ignore_rewrites\n and rw_key not in ctx.env.type_rewrites\n and isinstance(stype, s_objtypes.ObjectType)\n and ctx.env.options.apply_query_rewrites\n ):\n from . import policies\n policies.try_type_rewrite(stype, skip_subtypes=skip_subtypes, ctx=ctx)\n\n typeref = typegen.type_to_typeref(stype, env=ctx.env)\n ir_set = ircls(typeref=typeref, **kwargs)\n ctx.env.set_types[ir_set] = stype\n return ir_set", "title": "" }, { "docid": "2e41b91ca8e7abd238532914cba2b707", "score": "0.4497398", "text": "def svn_stringbuf_set(*args):\n return apply(_core.svn_stringbuf_set, args)", "title": "" }, { "docid": "dcea6137a13b77c9effd22e5fbfc7343", "score": "0.4493515", "text": "def _load_test_definitions(netstr: str, svcstr: Optional[str] = None) -> naming.Naming:\n defs = naming.Naming()\n if netstr:\n defs._ParseFile(StringIO(netstr), \"networks\")\n if svcstr:\n defs._ParseFile(StringIO(svcstr), \"services\")\n return defs", "title": "" }, { "docid": "2d4e16b79a895cd1fe5efac536de79a2", "score": "0.44881755", "text": "def __init__(self, servers):\n self._servers = servers", "title": "" }, { "docid": "563d2a6799064e8b283367d414bb5140", "score": "0.44875792", "text": "def get_servers2(**kwargs):\n bays = range(1, 17)\n servers = []\n for bay in bays:\n servers.append({\n 'bay': str(bay).zfill(2),\n 'firmware version': get_current_firmware(),\n 'servername': 'srv{}.company.com'.format(get_hex()[0:6]),\n })\n sleep(5)\n return (servers, kwargs)", "title": "" }, { "docid": "9f344c3fc62b9440b31ebbca87ca2400", "score": "0.44794732", "text": "def create_server(self, name, image_ref, flavor_ref, **kwargs):\r\n server = xml_utils.Element(\"server\",\r\n xmlns=xml_utils.XMLNS_11,\r\n imageRef=image_ref,\r\n flavorRef=flavor_ref,\r\n name=name)\r\n\r\n for attr in [\"adminPass\", \"accessIPv4\", \"accessIPv6\", \"key_name\",\r\n \"user_data\", \"availability_zone\", \"min_count\",\r\n \"max_count\", \"return_reservation_id\"]:\r\n if attr in kwargs:\r\n server.add_attr(attr, kwargs[attr])\r\n\r\n if 'disk_config' in kwargs:\r\n server.add_attr('xmlns:OS-DCF', \"http://docs.openstack.org/\"\r\n \"compute/ext/disk_config/api/v1.1\")\r\n server.add_attr('OS-DCF:diskConfig', kwargs['disk_config'])\r\n\r\n if 'security_groups' in kwargs:\r\n secgroups = xml_utils.Element(\"security_groups\")\r\n server.append(secgroups)\r\n for secgroup in kwargs['security_groups']:\r\n s = xml_utils.Element(\"security_group\", name=secgroup['name'])\r\n secgroups.append(s)\r\n\r\n if 'networks' in kwargs:\r\n networks = xml_utils.Element(\"networks\")\r\n server.append(networks)\r\n for network in kwargs['networks']:\r\n s = xml_utils.Element(\"network\", uuid=network['uuid'],\r\n fixed_ip=network['fixed_ip'])\r\n networks.append(s)\r\n\r\n if 'meta' in kwargs:\r\n metadata = xml_utils.Element(\"metadata\")\r\n server.append(metadata)\r\n for k, v in kwargs['meta'].items():\r\n meta = xml_utils.Element(\"meta\", key=k)\r\n meta.append(xml_utils.Text(v))\r\n metadata.append(meta)\r\n\r\n if 'personality' in kwargs:\r\n personality = xml_utils.Element('personality')\r\n server.append(personality)\r\n for k in kwargs['personality']:\r\n temp = xml_utils.Element('file', path=k['path'])\r\n temp.append(xml_utils.Text(k['contents']))\r\n personality.append(temp)\r\n\r\n if 'sched_hints' in kwargs:\r\n sched_hints = kwargs.get('sched_hints')\r\n hints = xml_utils.Element(\"os:scheduler_hints\")\r\n hints.add_attr('xmlns:os', xml_utils.XMLNS_11)\r\n for attr in sched_hints:\r\n p1 = xml_utils.Element(attr)\r\n p1.append(sched_hints[attr])\r\n hints.append(p1)\r\n server.append(hints)\r\n resp, body = self.post('servers', str(xml_utils.Document(server)))\r\n server = self._parse_server(etree.fromstring(body))\r\n return resp, server", "title": "" }, { "docid": "f8fbeb140fa45ea59b2ef19c59759dfd", "score": "0.4479268", "text": "def add_set_collection_parser(sub_parsers_set: argparse._SubParsersAction) -> None:\n subparser = sub_parsers_set.add_parser('collection', help='Set the collection to use',\n description='Set the collection to use in subsequent '\n 'commands.',\n epilog='If the collection does not exist in the current '\n 'server, an error is issued and the previously stored '\n 'collection is kept unmodified.')\n subparser.add_argument(COLLECTION_ARGNAME, help='name of the collection to use')\n subparser.set_defaults(func=cli_set_server_parameter)", "title": "" }, { "docid": "98ad5fecd3940dfd0a575b55c28325e9", "score": "0.44732285", "text": "def serverset(self, name, value):\n self.server.rset(name, value)\n self.ssets.append(name)", "title": "" }, { "docid": "1fc021bfdfefe1b7fec549dc3cf8ae17", "score": "0.4472697", "text": "def test_load_config(self):\n ruleset_str = '[\"vhost\", {\"hostname\": \"photoserver\", \"maxConnections\": 50, \"maxConnectionsPerUser\": 5, \"maxConnectionsPerHost\": 20, \"allowUnknownUser\": true, \"aliases\": \"antialias\",'\n ruleset_str += '\"groups\": {'\n ruleset_str += '\"anonymous\": { \"users\": \"anonymous\", \"remoteHosts\": \"*\", \"maxFrameSize\": 111111, \"maxMessageSize\": 111111, \"maxSessionWindow\": 111111, \"maxSessions\": 1, \"maxSenders\": 11, \"maxReceivers\": 11, \"allowDynamicSource\": false, \"allowAnonymousSender\": false, \"sources\": \"public\", \"targets\": \"\" },'\n ruleset_str += '\"users\": { \"users\": \"u1, u2\", \"remoteHosts\": \"*\", \"maxFrameSize\": 222222, \"maxMessageSize\": 222222, \"maxSessionWindow\": 222222, \"maxSessions\": 2, \"maxSenders\": 22, \"maxReceivers\": 22, \"allowDynamicSource\": false, \"allowAnonymousSender\": false, \"sources\": \"public, private\", \"targets\": \"public\" },'\n ruleset_str += '\"paidsubscribers\": { \"users\": \"p1, p2\", \"remoteHosts\": \"*\", \"maxFrameSize\": 333333, \"maxMessageSize\": 333333, \"maxSessionWindow\": 333333, \"maxSessions\": 3, \"maxSenders\": 33, \"maxReceivers\": 33, \"allowDynamicSource\": true, \"allowAnonymousSender\": false, \"sources\": \"public, private\", \"targets\": \"public, private\" },'\n ruleset_str += '\"test\": { \"users\": \"zeke, ynot\", \"remoteHosts\": \"10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255\", \"maxFrameSize\": 444444, \"maxMessageSize\": 444444, \"maxSessionWindow\": 444444, \"maxSessions\": 4, \"maxSenders\": 44, \"maxReceivers\": 44, \"allowDynamicSource\": true, \"allowAnonymousSender\": true, \"sources\": \"private\", \"targets\": \"private\" },'\n\n if is_ipv6_enabled():\n ruleset_str += '\"admin\": { \"users\": \"alice, bob\", \"remoteHosts\": \"10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255, 10.18.0.0-10.18.255.255, 127.0.0.1, ::1\", \"maxFrameSize\": 555555, \"maxMessageSize\": 555555, \"maxSessionWindow\": 555555, \"maxSessions\": 5, \"maxSenders\": 55, \"maxReceivers\": 55, \"allowDynamicSource\": true, \"allowAnonymousSender\": true, \"sources\": \"public, private, management\", \"targets\": \"public, private, management\" },'\n ruleset_str += '\"superuser\": { \"users\": \"ellen\", \"remoteHosts\": \"72.135.2.9, 127.0.0.1, ::1\", \"maxFrameSize\": 666666, \"maxMessageSize\": 666666, \"maxSessionWindow\": 666666, \"maxSessions\": 6, \"maxSenders\": 66, \"maxReceivers\": 66, \"allowDynamicSource\": false, \"allowAnonymousSender\": false, \"sources\": \"public, private, management, root\", \"targets\": \"public, private, management, root\" },'\n else:\n ruleset_str += '\"admin\": { \"users\": \"alice, bob\", \"remoteHosts\": \"10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255, 10.18.0.0-10.18.255.255, 127.0.0.1\", \"maxFrameSize\": 555555, \"maxMessageSize\": 555555, \"maxSessionWindow\": 555555, \"maxSessions\": 5, \"maxSenders\": 55, \"maxReceivers\": 55, \"allowDynamicSource\": true, \"allowAnonymousSender\": true, \"sources\": \"public, private, management\", \"targets\": \"public, private, management\" },'\n ruleset_str += '\"superuser\": { \"users\": \"ellen\", \"remoteHosts\": \"72.135.2.9, 127.0.0.1\", \"maxFrameSize\": 666666, \"maxMessageSize\": 666666, \"maxSessionWindow\": 666666, \"maxSessions\": 6, \"maxSenders\": 66, \"maxReceivers\": 66, \"allowDynamicSource\": false, \"allowAnonymousSender\": false, \"sources\": \"public, private, management, root\", \"targets\": \"public, private, management, root\" },'\n\n ruleset_str += '\"$default\": { \"remoteHosts\": \"*\", \"maxFrameSize\": 222222, \"maxMessageSize\": 222222, \"maxSessionWindow\": 222222, \"maxSessions\": 2, \"maxSenders\": 22, \"maxReceivers\": 22, \"allowDynamicSource\": false, \"allowAnonymousSender\": false, \"sources\": \"public, private\", \"targets\": \"public\" }'\n ruleset_str += '}}]'\n\n ruleset = json.loads(ruleset_str)\n\n self.create_ruleset(ruleset[1])", "title": "" }, { "docid": "8a7ff47348ac4889641f92993efb7a61", "score": "0.44682318", "text": "def getServers():\n\n return serverList", "title": "" }, { "docid": "3c898622118391297ea18b56d88a3eb9", "score": "0.4460437", "text": "def _get_paths_set_from_string(path_string):\n logger.debug(\"Retrieving paths from {}\".format(path_string))\n\n if os.path.isdir(path_string):\n # Create a set with all the headers in the directory\n paths_set = {\n os.path.join(rootpath, filename)\n for rootpath, dirs, filenames\n in os.walk(path_string)\n for filename\n in filenames\n }\n logger.debug(\"\\nInitial path: {} \\nPaths retrieved: {}\".format(\n path_string,\n paths_set\n ))\n return paths_set\n elif os.path.isfile(path_string):\n # Convert to set if this is a string\n logger.debug(\"File path retrieved: {}\".format(path_string))\n return {path_string}", "title": "" }, { "docid": "9a6123cc2068040ead29f2181e12490c", "score": "0.44578585", "text": "def get_server(args, handler):\n if args['socket']:\n return server_generator('AF_UNIX', args['socket'], handler)\n elif args['port']:\n return server_generator('AF_INET', ('', args['port']), handler)\n else:\n raise ValueError('Either socket or port should be specified.')", "title": "" }, { "docid": "0c7001baa50173da23caa2302fc8e5be", "score": "0.44545066", "text": "def registerServicesFor(self, string: str) -> None:\n ...", "title": "" }, { "docid": "2f204e4f6ce5d6fce5172726707df6b0", "score": "0.4446613", "text": "def test_set_nameservers(self) -> None:\n\n given = [\"example.org\"]\n\n self.resolver_provider.set_nameservers(given)\n\n expected = [\n \"192.168.1.1\",\n \"10.47.91.9\",\n \"fe80::6b01:9045:a42a:fb5f\",\n \"fe80::6b01:9049:a42a:fb5f\",\n ]\n actual = self.resolver_provider.get_nameservers()\n\n self.assertEqual(expected, actual)\n\n expected = {\n \"192.168.1.1\": 53,\n \"10.47.91.9\": 53,\n \"fe80::6b01:9045:a42a:fb5f\": 53,\n \"fe80::6b01:9049:a42a:fb5f\": 53,\n }\n actual = self.resolver_provider.get_nameserver_ports()\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "b5232733d35bc347b77e7f894e80f35c", "score": "0.44428256", "text": "def _parse_build_set(bs_string):\n assert isinstance(bs_string, basestring)\n p = bs_string.split('/')\n if '' in p:\n return None\n\n n = len(p)\n\n if n == 5 and p[0] == 'patch' and p[1] == 'gerrit':\n return common_pb2.GerritChange(\n host=p[2],\n change=int(p[3]),\n patchset=int(p[4])\n )\n\n if n >= 5 and p[0] == 'commit' and p[1] == 'gitiles':\n if p[n-2] != '+' or not is_sha1_hex(p[n-1]):\n return None\n return common_pb2.GitilesCommit(\n host=p[2],\n project='/'.join(p[3:n-2]), # exclude plus\n id=p[n-1],\n )\n\n return None", "title": "" }, { "docid": "ad287feac251cfde504edb0588a9bbb7", "score": "0.44427234", "text": "def gen_set(val_in: str) -> Set:\n\n return {c.upper() for c in val_in if c == c.lower()}", "title": "" }, { "docid": "acc2c5bdb5d1037aacc9f004eae44e6f", "score": "0.44416255", "text": "def _process_set_names(sets: Union[AnyStr, Tuple, List[AnyStr], Set[AnyStr]]) -> Union[AnyStr, Set[AnyStr]]:\n\n if isinstance(sets, (tuple, list)):\n return set(sets)\n\n return sets", "title": "" }, { "docid": "8038ffd1a8f6e63f0f8466c38b6af517", "score": "0.4428524", "text": "def parseSet(bStream):\n desc = '{0:08b}'.format(readUSHORT(bStream))\n role = ComponentRole[desc[:3]]\n if role == SET:\n mySet = Set()\n elif role == RDSET:\n mySet = RedundantSet()\n elif role == RSET:\n mySet = ReplacementSet\n else:\n raise Exception(\"Only set is allowed\")\n assert(int(desc[3])) # set must have type\n mySet._type = readIDENT(bStream)\n if int(desc[4]):\n mySet._name = readIDENT(bStream)\n\n mySet.template = parseTemplate(bStream)\n mySet.objects = parseObjects(bStream, mySet.template)\n\n return mySet", "title": "" }, { "docid": "575b9590cda36314c1d7ab53f932c947", "score": "0.4422081", "text": "def from_string(self, string):\n node = TokenSet()\n root = node\n\n # Iterates throough all characters in the passed string appending\n # a node for each character.\n # When a wildcard character is found then a self referencing edge\n # is introduced to continually match any number of characters\n for i, char in enumerate(string):\n final = i == len(string) - 1\n if char == \"*\":\n node.edges[char] = node\n node.final = final\n else:\n next_ = TokenSet()\n next_.final = final\n node.edges[char] = next_\n node = next_\n\n return root", "title": "" }, { "docid": "fc20d208713afedf5112a82b72ee8ba5", "score": "0.44131258", "text": "def __init__(self, server_description, topology, pool, topology_settings):\n ...", "title": "" }, { "docid": "e5f96408e84ac7ea83aea6ab81eb2008", "score": "0.4412684", "text": "def _rangeset_from_string(rangeset_str: str) -> ranges.RangeSet[int]:\n range_strs = re.findall(r\"[\\[\\(][^\\]\\)]*[^\\[\\(][\\]\\)]\", rangeset_str[1:-1])\n return ranges.RangeSet([_range_from_string(range_str) for range_str in range_strs])", "title": "" }, { "docid": "d4471b582c3bd2d67936130d4c1dc15d", "score": "0.4411961", "text": "def __init__(self, name, address):\n self.servers = dict()\n self.servers[name] = address", "title": "" }, { "docid": "3cc4b9d8369252a02ca5438b8f1662d6", "score": "0.4410202", "text": "def get_server_dict(name, volumes_l, nics_l):\n serv1 = {\n 'name': name,\n 'ram': 2048,\n 'cores': 1,\n 'create_volumes': volumes_l,\n 'nics': nics_l\n }\n return serv1", "title": "" }, { "docid": "2cc2c361d4748552ed308a248977d48a", "score": "0.44047317", "text": "def build_servient(parsed_args, clients_config=None):\n\n logger = logging.getLogger()\n\n logger.info(\"Creating servient with TD catalogue on: {}\".format(parsed_args.port_catalogue))\n\n servient = Servient(\n catalogue_port=parsed_args.port_catalogue,\n hostname=parsed_args.hostname,\n clients_config=clients_config)\n\n if parsed_args.port_ws > 0:\n logger.info(\"Creating WebSocket server on: {}\".format(parsed_args.port_ws))\n servient.add_server(WebsocketServer(port=parsed_args.port_ws))\n\n if parsed_args.port_http > 0:\n logger.info(\"Creating HTTP server on: {}\".format(parsed_args.port_http))\n servient.add_server(HTTPServer(port=parsed_args.port_http))\n\n if parsed_args.mqtt_broker:\n try:\n from wotpy.protocols.mqtt.server import MQTTServer\n logger.info(\"Creating MQTT server on broker: {}\".format(parsed_args.mqtt_broker))\n mqtt_server = MQTTServer(parsed_args.mqtt_broker, servient_id=servient.hostname)\n servient.add_server(mqtt_server)\n logger.info(\"MQTT server created with ID: {}\".format(mqtt_server.servient_id))\n except NotImplementedError as ex:\n logger.warning(ex)\n\n if parsed_args.port_coap > 0:\n try:\n from wotpy.protocols.coap.server import CoAPServer\n logger.info(\"Creating CoAP server on: {}\".format(parsed_args.port_coap))\n servient.add_server(CoAPServer(port=parsed_args.port_coap))\n except NotImplementedError as ex:\n logger.warning(ex)\n\n return servient", "title": "" }, { "docid": "1ff6709379ca5160f2d57ad2f4a71566", "score": "0.4401872", "text": "def _get_server_groups(self, _groups):\r\n\r\n try:\r\n # NOTE(Gueyoung): novaclient v2.18.0 does not have 'all_projects=True' param.\r\n server_group_list = self.nova.server_groups.list()\r\n\r\n for g in server_group_list:\r\n server_group = Group(g.name)\r\n\r\n server_group.uuid = g.id\r\n\r\n # TODO: Check len(g.policies) == 1\r\n # policy is either 'affinity', 'anti-affinity', 'soft-affinity',\r\n # or 'soft-anti-affinity'\r\n if g.policies[0] == \"anti-affinity\":\r\n server_group.group_type = \"diversity\"\r\n else:\r\n server_group.group_type = g.policies[0]\r\n server_group.factory = \"server-group\"\r\n server_group.level = \"host\"\r\n\r\n # Members attribute is a list of server uuids\r\n for s_uuid in g.members:\r\n s_info = {}\r\n s_info[\"stack_id\"] = \"none\"\r\n s_info[\"stack_name\"] = \"none\"\r\n s_info[\"uuid\"] = s_uuid\r\n s_info[\"orch_id\"] = \"none\"\r\n s_info[\"name\"] = \"none\"\r\n s_info[\"flavor_id\"] = \"none\"\r\n s_info[\"vcpus\"] = -1\r\n s_info[\"mem\"] = -1\r\n s_info[\"disk\"] = -1\r\n s_info[\"numa\"] = \"none\"\r\n s_info[\"image_id\"] = \"none\"\r\n s_info[\"tenant_id\"] = \"none\"\r\n s_info[\"state\"] = \"created\"\r\n s_info[\"status\"] = \"valid\"\r\n\r\n server_group.server_list.append(s_info)\r\n\r\n # TODO: Check duplicated name as group identifier\r\n _groups[server_group.name] = server_group\r\n\r\n except Exception:\r\n self.logger.error(traceback.format_exc())\r\n return \"error while setting server-groups from Nova\"\r\n\r\n return \"ok\"", "title": "" }, { "docid": "0f6aa7f939f29b377fdd8ae9a6c96163", "score": "0.43980744", "text": "def _get_serv(ret):\n\n _options = _get_options(ret)\n host = _options.get(\"host\")\n port = _options.get(\"port\")\n\n log.debug(\"memcache server: %s:%s\", host, port)\n if not host or not port:\n log.error(\"Host or port not defined in salt config\")\n return\n\n # Combine host and port to conform syntax of python memcache client\n memcacheoptions = (host, port)\n\n return memcache.Client([\"{}:{}\".format(*memcacheoptions)], debug=0)\n # # TODO: make memcacheoptions cluster aware\n # Servers can be passed in two forms:\n # 1. Strings of the form C{\"host:port\"}, which implies a default weight of 1\n # 2. Tuples of the form C{(\"host:port\", weight)}, where C{weight} is\n # an integer weight value.", "title": "" }, { "docid": "305289f606121945a3ef6ff575b0fcdb", "score": "0.4397652", "text": "def create_ms(self, service, master, master_port, slave, slave_port):\n def get_host_name(host_ip):\n try:\n conf = configparser.ConfigParser()\n conf.read('config.ini')\n ip_list = map(lambda x: conf.get('hostmap', x), conf.options('hostmap'))\n index = list(ip_list).index(host_ip)\n hostname = conf.options('hostmap')[index]\n except:\n print('There is no %s in cluster.' % (host_ip))\n raise ValueError\n return hostname\n # get master's host name\n master_host = get_host_name(master)\n # get slave's host name\n slave_host = get_host_name(slave)\n def append_container_list(hostname, container):\n container_msg = self.zk.get('rds_demo/%s/containers' %(hostname))\n container_str = container_msg[0].decode('utf-8')\n container_list = container_str.split(',')\n container_list.append(container)\n if '' in container_list:\n container_list.remove('')\n container_str = ','.join(container_list)\n container_msg = bytes(container_str, encoding='utf-8')\n self.zk.set('rds_demo/%s/containers' %(hostname), container_msg)\n # master container name\n master_container_name = service +'_m'\n # slave container name\n slave_container_name = service + '_s'\n # append container name\n append_container_list(master_host, master_container_name)\n append_container_list(slave_host, slave_container_name)\n # zookeeper service information\n self.zk.create('/rds_demo/service/%s' %(service))\n self.zk.create('/rds_demo/service/%s/isMS' %(service), b'True')\n self.zk.create('/rds_demo/service/%s/master' %(service))\n self.zk.create('/rds_demo/service/%s/slave' %(service))\n self.zk.create('/rds_demo/service/%s/master/host' %(service),\n bytes(master_host, encoding='utf-8'))\n self.zk.create('/rds_demo/service/%s/master/port' %(service),\n bytes(master_port, encoding='utf-8'))\n self.zk.create('rds_demo/service/%s/master/container' %(service),\n bytes(master_container_name, encoding='utf-8'))\n self.zk.create('/rds_demo/service/%s/slave/host' % (service),\n bytes(slave_host, encoding='utf-8'))\n self.zk.create('/rds_demo/service/%s/slave/port' % (service),\n bytes(slave_port, encoding='utf-8'))\n self.zk.create('rds_demo/service/%s/slave/container' % (service),\n bytes(slave_container_name, encoding='utf-8'))", "title": "" }, { "docid": "a6dcb7191438e9cab7e24ecccb7e0612", "score": "0.439608", "text": "def parse_servers(servers):\n result = {\n 'data': []\n }\n if isinstance(servers, list):\n for server_dict in servers:\n current_server = parse_server(server_dict)\n result['data'].append(current_server)\n else:\n current_server = parse_server(servers)\n result['data'].append(current_server)\n return result", "title": "" }, { "docid": "434c79bd3423f14b377d988d453cffe3", "score": "0.43936583", "text": "def setupServers():\n\n serverList.clear()\n with open(os.path.join(\n os.getcwd(),\n os.path.dirname(__file__),\n \"servers.json\")\n ) as serversFile:\n servers = json.load(serversFile)[\"servers\"]\n for server in servers:\n serverList.append(OPCUAServer(\n name=server.get(\"name\"),\n endPointAddress=server.get(\"endPointAddress\"),\n nameSpaceUri=server.get(\"nameSpaceUri\"),\n browseRootNodeIdentifier=server.get(\"browseRootNodeIdentifier\")\n ))", "title": "" }, { "docid": "a6dc64a55e7e89248437813476d78a4c", "score": "0.4388952", "text": "def _server_namespace_handler(k, v):\r\n atoms = k.split(\".\", 1)\r\n if len(atoms) > 1:\r\n # Special-case config keys of the form 'server.servername.socket_port'\r\n # to configure additional HTTP servers.\r\n if not hasattr(cherrypy, \"servers\"):\r\n cherrypy.servers = {}\r\n \r\n servername, k = atoms\r\n if servername not in cherrypy.servers:\r\n from cherrypy import _cpserver\r\n cherrypy.servers[servername] = _cpserver.Server()\r\n # On by default, but 'on = False' can unsubscribe it (see below).\r\n cherrypy.servers[servername].subscribe()\r\n \r\n if k == 'on':\r\n if v:\r\n cherrypy.servers[servername].subscribe()\r\n else:\r\n cherrypy.servers[servername].unsubscribe()\r\n else:\r\n setattr(cherrypy.servers[servername], k, v)\r\n else:\r\n setattr(cherrypy.server, k, v)", "title": "" }, { "docid": "d17bf46aaae143646aee9af5d842c546", "score": "0.4385352", "text": "def getServers(SID, start, max):\n return call(\"getServers\", SID, start, max)", "title": "" }, { "docid": "172e1671f137bce882a3c2a9528cd9a6", "score": "0.43821278", "text": "def set_servers(self, servers):\r\n self.servers = [_Host(s, self.debuglog) for s in servers]\r\n self._init_buckets()", "title": "" }, { "docid": "fd83c6ff8feb9ae78eb8407d7196a2fd", "score": "0.43812028", "text": "def cfg_srv_init(cfg: configparser.SectionProxy) -> None:\n store.srv_uri = f'http://{cfg[\"server_host\"]}:{cfg[\"server_port\"]}'", "title": "" }, { "docid": "629f2f6e20a44c97f639d7e15e85276d", "score": "0.43805447", "text": "def from_config_dict(config):\n local_address = (\n config['NETWORK']['local_address'],\n int(config['NETWORK']['local_port'])\n )\n print(local_address)\n hydra_server = hydra.HydraServer(local_address)\n return hydra_server", "title": "" }, { "docid": "46b83e058cee99adeea8a86000b0cd30", "score": "0.43801537", "text": "def process_set(substr: str) -> Tuple[int, dict]:\n increment = 0\n while substr[increment] != ']': increment += 1\n if substr[1] == '^':\n d = {'value': substr[0:increment + 1], 'type': 1, 'kind': Kind.NEG_SET,\n Kind.NEG_SET: charset_parser(substr[1:increment])}\n else:\n d = {'value': substr[0:increment + 1], 'type': 1, 'kind': Kind.SET,\n Kind.SET: charset_parser(substr[1:increment])}\n return increment, d", "title": "" }, { "docid": "6867aebc791ff2ccd3245bb24a678a6b", "score": "0.4378145", "text": "def from_str(cls, s: str) -> \"SelectorGroup\":\n i = 0\n selectors = []\n while i < len(s):\n selector, i = Selector.from_str(s, i)\n selectors.append(selector)\n if not selectors:\n raise SelectorParserException(s, i, \"selector group is empty\")\n return cls(selectors)", "title": "" }, { "docid": "bca01a652b3c142debf9a24070cd7d5a", "score": "0.43713254", "text": "def gen_set(c, s_expr):\n if len(s_expr) != 3:\n raise SyntaxError('Invalid set! expression.')\n gen_code(c, s_expr[2])\n c.STORE_NAME(s_expr[1])", "title": "" }, { "docid": "42340ae04c89f5e7993acddaa7dafa72", "score": "0.4371042", "text": "def get_server(metric_id):\n if is_valid_metric(metric_id) and (metric_id == \"vs7\" or metric_id == \"http7\"):\n return server7cm.Server7cm()\n if is_valid_metric(metric_id) and (metric_id == \"cm7\"):\n return cm7.ServerCM7()\n if is_valid_metric(metric_id) and metric_id == \"tcp4\":\n return ServerTCP4()\n if is_valid_metric(metric_id) and (metric_id == \"tcps4\" or metric_id == \"mm7\"):\n return tcps4.ServerTCPS4()\n if is_valid_metric(metric_id) and metric_id == \"syn4\":\n return syn4.ServerSyn4()\n if is_valid_metric(metric_id) and metric_id == \"udp4\":\n return udp4.ServerUDP4()\n if is_valid_metric(metric_id) and (metric_id == \"stls7\" or metric_id == \"smtp7\"):\n return stls7.ServerSTLS7()\n if is_valid_metric(metric_id) and metric_id == \"pop37\":\n return pop37.ServerPOP37()\n if is_valid_metric(metric_id) and metric_id == \"tls4\":\n return tls4.ServerTLS4()\n if is_valid_metric(metric_id) and metric_id == \"voip7\":\n return voip7.ServerVOIP7()\n if is_valid_metric(metric_id) and \\\n (metric_id == \"ndns7\" or metric_id == \"bdns7\" or metric_id == \"trac3\" or metric_id == \"ooni7\"):\n return None\n else:\n raise NotImplemented", "title": "" }, { "docid": "8e71c63eeb1cb8a5c5656cc28b62ab0c", "score": "0.4370755", "text": "def start_server(ServerId=None):\n pass", "title": "" }, { "docid": "31fc2cbdbc04a4cbc2c1c7bd99f59842", "score": "0.43704963", "text": "def _make_switch_set(self, argv):\n\n switchset = set()\n for switch_candidate in argv:\n if \"-\" in switch_candidate[0] and \"=\" not in switch_candidate:\n # ignore everything after the double dash idiom, no longer considered switch context\n if switch_candidate == \"--\":\n break\n else:\n switch_candidate = switch_candidate.lstrip(\"-\")\n switchset.add(switch_candidate)\n\n return switchset", "title": "" }, { "docid": "87b3135e367b7bf4efb31bf1415825ea", "score": "0.4368575", "text": "def parse_irc_server_arg(value):\n fragments = value.split(':', 2)\n if len(fragments) > 1:\n fragments[1] = int(fragments[1])\n return ServerSpec(*fragments)", "title": "" }, { "docid": "9d562880a6696bade182ac901ceed54a", "score": "0.43660402", "text": "def set_server(self, server):\n\n country_id = server['country_id']\n del server['country_id']\n if country_id in self.data:\n self.data[country_id].append(server)\n else:\n self.data[country_id] = []\n self.data[country_id].append(server)", "title": "" }, { "docid": "a910aaab44da8334b1c32e214d82d0b4", "score": "0.43592802", "text": "def getServer(t):\n return t[0].split()[0]", "title": "" }, { "docid": "28b8f0d8368cd2f4408d3462a04ca61c", "score": "0.43417963", "text": "def split_data_into_individual_servers(services):\n server1 = []\n server2 = []\n server3 = []\n \n for i in services: \n if ('s1' in i or 'e1' in i): server1.append(i) # appends to appropriate list, any service start/stop activity\n elif ('s2' in i or 'e2' in i): server2.append(i)\n elif ('s3' in i or 'e3' in i): server3.append(i)\n return (server1, server2, server3)", "title": "" }, { "docid": "a7480fa7fbbaf36b533509815358951e", "score": "0.43411896", "text": "def gen_NS(self):\n cursor.execute(\"SELECT * \"\n \"FROM nameserver \"\n \"WHERE domain='%s';\" % self.domain_id)\n for _, name, _, _ in cursor.fetchall():\n name = name.lower()\n try:\n ns, _ = Nameserver.objects.get_or_create(domain=self.domain,\n server=name)\n ns.views.add(public)\n except ValidationError, e:\n print \"Error generating NS. %s\" % e\n exit(1)", "title": "" }, { "docid": "4ee8613d4037f228c40a46ff0d5132aa", "score": "0.4340538", "text": "def create_topoSet(channel, template_name): # this will create a topoSet file string\n\n # Defining some important variables\n xfluid = channel.channelWidth / 2\n xmax = (channel.channelWidth + channel.wallWidth) / 2\n yfluid = channel.baseThick\n ywall = yfluid + channel.channelHeight\n ymax = ywall + channel.topThick\n zmax = channel.coolerLength\n\n # There is a template prepared. This section cuts it, prepares the block of text with the geometry, and\n # returns block_mesh, a string with the entire text in the topoSetDict\n text_topo_set = read_mesh_file(template_name)\n text_topo_set = text_topo_set.split('// *split here*')\n geometry_block = ('xfluid {:.3e}; \\nxmax {:.3e}; \\nyfluid {:.3e}; \\nywall {:.3e}; \\\n \\nymax {}; \\nzmax {:.3e}; \\n'.format(xfluid, xmax, yfluid, ywall, ymax, zmax))\n topo_set = text_topo_set[0] + geometry_block + text_topo_set[1]\n return topo_set", "title": "" }, { "docid": "ef1d098811830401da9b6a552d8eafaf", "score": "0.43378112", "text": "def from_string(cls, string):\n basis_sets = [GthPotential.from_string(c) for c in chunk(string)]\n return cls(objects=basis_sets)", "title": "" }, { "docid": "876f81cb73f62164fd9df546ab0abd95", "score": "0.43374225", "text": "def from_string(cls, string):\n basis_sets = [GaussianTypeOrbitalBasisSet.from_string(c) for c in chunk(string)]\n return cls(objects=basis_sets)", "title": "" }, { "docid": "f48a2bff0ae2ca863ae3e46d0e4d43f0", "score": "0.43361485", "text": "def __init__(self, topology_type, server_descriptions, replica_set_name, max_set_version, max_election_id, topology_settings):\n ...", "title": "" }, { "docid": "807589213595c9b5356fea5c99dda34d", "score": "0.43305695", "text": "def from_server_data(cls, data):\n case_converted = from_api(data, do_recursive=False)\n return cls.from_data(case_converted)", "title": "" }, { "docid": "82c63c0019a9fc9778cbda65a95f0569", "score": "0.43285573", "text": "def _make_mops_set(self, argv):\n\n mopsset = set()\n for mops_candidate in argv:\n if \"-\" in mops_candidate[0] and \"=\" not in mops_candidate:\n if (\n len(mops_candidate) > 2\n ): # the argument includes '-' and more than one character following dash\n if (\n mops_candidate[1] != \"-\"\n ): # it is not long option syntax (e.g. --long)\n mops_candidate = mops_candidate.replace(\"-\", \"\")\n for switch in mops_candidate:\n mopsset.add(switch)\n return mopsset", "title": "" }, { "docid": "6fce846c23600f957ca5973192b11d30", "score": "0.43262774", "text": "def create_sites(config):\n from django.contrib.sites.models import Site\n\n sites = {}\n for code in config.get_microsite_codes():\n context = config.get_context(code)\n site = {\n 'domain': context['discovery_domain'],\n 'name': context['discovery_domain']\n }\n logger.info('Creating site for {} - {}'.format(code, site))\n site, _ = Site.objects.get_or_create(domain=site['domain'], defaults=site)\n sites[code] = site\n return sites", "title": "" }, { "docid": "a3044ce40b9e7e6d8b78967e8cbfb23c", "score": "0.43257973", "text": "def cb(serverstring):\n servers = serverstring.splitlines()\n # Call the callback function\n callbackfunc([svr for svr in servers if not svr.startswith('__')])", "title": "" }, { "docid": "eab77192071f80f26a554e946ecea702", "score": "0.43138257", "text": "def set_server(oid, dic):\n server = db.server\n update(oid, server, dic)", "title": "" }, { "docid": "77729f2c8fea7f1116f2b5d018bd5aea", "score": "0.43058953", "text": "def add(nitro, server):\n __server = NSServer()\n __server.set_name(server.get_name())\n __server.set_ipaddress(server.get_ipaddress())\n __server.set_domain(server.get_domain())\n __server.set_translationip(server.get_translationip())\n __server.set_translationmask(server.get_translationmask())\n __server.set_domainresolveretry(server.get_domainresolveretry())\n __server.set_state(server.get_state())\n __server.set_ipv6address(server.get_ipv6address())\n __server.set_comment(server.get_comment())\n return __server.add_resource(nitro)", "title": "" }, { "docid": "4d13e46c1a35dfbfab25cc8a82b68571", "score": "0.43051812", "text": "def build_list_of_servers():\r\n # From servers.txt file, create a variable list that will contain\r\n # servers to choose from\r\n list_of_machines = []\r\n with open('servers.txt', 'r') as westmoreland_servers:\r\n # Build list removing /n and making all letters uppercase\r\n for server in westmoreland_servers:\r\n list_of_machines.append(server.replace('\\n', '').upper())\r\n # return servers in alphabetical order\r\n return sorted(list_of_machines)", "title": "" }, { "docid": "f0a39528ddc97ccc44869a07f14212be", "score": "0.43048736", "text": "def get_machineSet(data):\n\n s = VagrantMachineSet()\n\n s.name = getx(data, \"metadata.name\", validator=kubernetes_utils.validate_rfc123_label)\n s.replicas = getx(data, \"spec.replicas\", default=1, validator=utils.validate_integer)\n s.box = getx(data, \"spec.template.spec.providerConfig.value.box\", default=\"ubuntu/xenial64\") # TODO:validator\n s.cpus = getx(data, \"spec.template.spec.providerConfig.value.cpus\", default=2, validator=utils.validate_integer)\n s.memory = getx(data, \"spec.template.spec.providerConfig.value.memory\", default=2048, validator=utils.validate_integer)\n s.roles = getx(data, \"spec.template.spec.roles\", validator=validate_roles)\n \n return s", "title": "" }, { "docid": "ad15ddd8d80c0e7cd487ffaecb57954b", "score": "0.43009984", "text": "def build_set(self, builder, set_type, items):\n from numba.cpython import setobj\n return setobj.build_set(self, builder, set_type, items)", "title": "" }, { "docid": "e7254a9e8fe77e4f8d1c56283719ccac", "score": "0.4298876", "text": "def create_schema_set(self, schema_set_name: str, schema_set: bytes) -> SchemaSet:\n self.send_message(\n \"POST\",\n \"Create schema set\",\n f\"{self._url}/cps/api/v1/dataspaces/{self.name}/schema-sets/\",\n files={\"file\": schema_set},\n data={\"schema-set-name\": schema_set_name},\n headers={}, # Leave headers empty to fill it correctly by `requests` library\n auth=self.auth\n )\n return self.get_schema_set(schema_set_name)", "title": "" }, { "docid": "394bfdd5833c3bdb67f6886cf96cda7a", "score": "0.42986676", "text": "def _create_vpsa_server(self, initiator):\r\n vpsa_srv = self._get_server_name(initiator)\r\n if not vpsa_srv:\r\n xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator)\r\n vpsa_srv = xml_tree.findtext('server-name')\r\n return vpsa_srv", "title": "" }, { "docid": "f40fdc5761f0a1e5e4470d1b8b775195", "score": "0.429724", "text": "def __init__(self, server1_option = None, server2_option= None):\n self.server1 = server1_option if server1_option else server1", "title": "" }, { "docid": "61dec1747b7bca534ac8fc3bdc47ec94", "score": "0.4296161", "text": "def test_set_valid_guild_name_new(cli: ShoulderbirdCLI, message: Mock) -> None:\n guilds = [Guild(10, \"test\"), Guild(9876543210, \"testings\")]\n message.clean_content = \"sb!set testings = (search|find)\"\n message.author.id = 111\n with patch.object(cli, \"client\") as mock_discord:\n mock_discord.guilds = guilds\n result = cli.parse_command(message)\n assert result\n assert \"Search set\" in result\n\n member = cli.config.load_member(\"9876543210\", \"111\")\n assert member.regex == \"(search|find)\"", "title": "" } ]
30ac5fa669a8ecd7bc7812f2e763041d
convert angular distance, position angle, and instrument rotator angle to position on the cold plate
[ { "docid": "9091d304b7eb567d5fa388cb8a5d44b2", "score": "0.0", "text": "def _addpad2xy(ang_dist_d, p_ang_d, inr_d):\n t = 90.0-(p_ang_d-inr_d)\n x = np.cos(np.deg2rad(t))\n y = np.sin(np.deg2rad(t))\n return x, y", "title": "" } ]
[ { "docid": "5a178f5d0c1537269618fad5b084c085", "score": "0.6049731", "text": "def translate_and_rotate(detection_coverage,center,index,angle):\n lobe = detection_coverage[index].lobe\n return translate(rotate(lobe,angle),center)", "title": "" }, { "docid": "3a8815282d60d9031b5edccbf093bdcf", "score": "0.5898739", "text": "def project(self, pos, angle, distance):\n\t\treturn (pos[0] + (cos(angle) * distance), pos[1] - (sin(angle) * distance))", "title": "" }, { "docid": "bc33f3235544ab7eff66fe59549c4c44", "score": "0.58817333", "text": "def _mount_angle_to_servo_angle(self, angle):\n\n\t\tif self.clockwise_servo:\n\t\t\treturn (self.servo_center + angle) % 360\n\t\telse:\n\t\t\treturn (self.servo_center - angle) % 360", "title": "" }, { "docid": "bf43493d892396711c97e8fd34009517", "score": "0.570649", "text": "def global_to_drone_coordinates(mat, angle):\n return [cos(angle) * mat[0] + sin(angle) * mat[1],\n cos(angle) * mat[1] - sin(angle) * mat[0], mat[2], mat[3]]", "title": "" }, { "docid": "781be3a37656fcbf60cb11ec91394609", "score": "0.5702615", "text": "def Conversions():\n #Polar to Cartesian:\n offsetx = distance * math.cos(theta)\n offsety = -distance * math.sin(theta)\n #Cartesian to Polar:\n distance = (offsetx**2+offsety**2) ** 0.5 #Pythagorean Theorum\n theta = math.atan2(-offsety,offsetx)", "title": "" }, { "docid": "e6271837887560ba5191243ee0788160", "score": "0.5653493", "text": "def accel_mag_orient(accel, mag=None):\n\n # accel = accel * (156.9/(2**15))\n\n accel = accel / np.linalg.norm(accel)\n \n accel_x = accel[0]\n accel_y = accel[1]\n accel_z = accel[2]\n\n # compute roll and pitch\n if accel_z == 0.0:\n roll = 0.0\n else:\n # roll = np.rad2deg(math.atan2(accel_y, accel_z))\n roll = np.rad2deg(math.atan2(accel_y, np.sqrt(accel_x**2 + accel_z**2) ) )\n\n if accel_y == 0 and accel_z == 0:\n pitch = 0.0\n else:\n pitch = np.rad2deg(math.atan2(accel_x, np.sqrt(accel_y**2 + accel_z**2) ) )\n\n if mag is not None:\n # mag = mag / np.linalg.norm(mag)\n # print(mag)\n p = np.deg2rad(pitch)\n r = np.deg2rad(roll)\n\n mag_x = mag[0]*np.cos(p) + mag[1]*np.sin(r)*np.sin(p) + mag[2]*np.cos(r)*np.sin(p)\n mag_y = mag[1] * np.cos(r) - mag[2] * np.sin(r)\n\n # mag_x = mag[0]*(1-accel_x**2) - mag[1]*(accel_x*accel_y) - mag[2]*accel_x*(np.sqrt(1 - accel_x**2 - accel_y**2))\n # mag_y = mag[1]*(np.sqrt(1 - accel_x**2 - accel_y**2)) - mag[2]*accel_y\n\n yaw = np.rad2deg(math.atan2(mag_x, mag_y))\n\n # mag_x = mag[0]\n # mag_y = mag[1]\n\n # yaw = np.rad2deg(math.atan2(-mag_y, mag_x))\n\n # print(mag_x)\n # print(mag_y)\n\n # mag_x = 22.720337\n # mag_y = -22.923279\n\n # if mag_x < 0:\n # yaw = 180 - (math.atan2(mag_y,mag_x))*(180/np.pi)\n # if mag_x > 0 and mag_y < 0:\n # yaw = - (math.atan2(mag_y,mag_x))*(180/np.pi)\n # if mag_x > 0 and mag_y > 0:\n # yaw = 360 - (math.atan2(mag_y,mag_x))*(180/np.pi)\n # if mag_x == 0 and mag_y < 0:\n # yaw = 90\n # if mag_x == 0 and mag_y > 0:\n # yaw = 270 \n\n # if mag_y > 0:\n # yaw = (math.atan2(mag_x,mag_y))*(180/np.pi)\n # if mag_y < 0:\n # yaw = 270 - (math.atan2(mag_x,mag_y))*(180/np.pi)\n # if mag_y == 0 and mag_x < 0:\n # yaw = 180\n # if mag_y == 0 and mag_x > 0:\n # yaw = 0\n else:\n yaw = 0\n\n return np.array([roll, pitch, yaw])", "title": "" }, { "docid": "2d4626022350d6f2c30a1186ce854ddd", "score": "0.564048", "text": "def transform(lon, lat, tilt):\r\n\tlon = math.radians(lon)\r\n\tlat = math.radians(lat)\r\n\ttilt = math.radians(tilt)\r\n\r\n\tsinalt = math.sin(lat)\r\n\tcosalt = math.cos(lat)\r\n\tsinazi = math.sin(lon)\r\n\tsintilt = math.sin(tilt)\r\n\tcostilt = math.cos(tilt)\r\n\r\n\tx = cosalt * sinazi * costilt\r\n\ty = sinalt * sintilt\r\n\tx -= y\r\n\ta1 = cosalt\r\n\ty = cosalt * math.cos(lon)\r\n\tl1 = angle(y, x)\r\n\ta1 = a1 * sinazi * sintilt + sinalt * costilt\r\n\ta1 = math.asin(a1)\r\n\r\n\treturn (math.degrees(l1), math.degrees(a1))", "title": "" }, { "docid": "1df1dbce08d593d4171d7ceae4d810d5", "score": "0.5624902", "text": "def plate_angle(self, radius):\n angle_guess = 0.0 * radius\n dist_guess = self.plate_dist(angle_guess) - radius\n while np.any(np.fabs(dist_guess) > 1E-8):\n derivative = (self.plate_dist(angle_guess+1E-5) -\n self.plate_dist(angle_guess))/1E-5\n delta_guess = - dist_guess/derivative\n angle_guess = angle_guess + delta_guess\n dist_guess = self.plate_dist(angle_guess) - radius\n return angle_guess", "title": "" }, { "docid": "410b463e53f3bf7edf049603686509ff", "score": "0.5623462", "text": "def angular_calibration(servo, motor, ang_z, lin_x):\n \n return 0.1*servo", "title": "" }, { "docid": "80e8bd43c9d2fb259f3557b49df9d8cc", "score": "0.55738574", "text": "def aug_translation(elf, img, angle, range_x = 50, range_y = 20, deg_per_pixel = 0.004):\n\t\ttx = np.random.uniform(-range_x, range_x)\n\t\tty = np.random.uniform(-range_y, range_y)\n\n\t\t#adjust steering angle according the horizontal shift\n\t\tangle += tx * deg_per_pixel\n\t\t\n\t\t# apply translation matrix\n\t\tM = np.float32([[1,0,tx],[0,1,ty]])\n\t\trows, cols = img.shape[:2]\n\t\ttranslated = cv2.warpAffine(img, M , (cols, rows))\n\t\treturn translated, angle", "title": "" }, { "docid": "c382681a004fb424f0d23b4bec3da706", "score": "0.5563793", "text": "def get_compass(x, y):\n return get_compass_bearing(x[::-1], y[::-1])", "title": "" }, { "docid": "c4e9805479475c2e88c7c61b97eec3dd", "score": "0.5524283", "text": "def Cartesian(self, BAT):\n # Arrange BAT coordinates in convenient arrays\n offset = 6 if len(BAT)==(3*self.molecule.numberOfAtoms()) else 0\n bonds = BAT[offset+3:self.ntorsions+offset+3]\n angles = BAT[self.ntorsions+offset+3:2*self.ntorsions+offset+3]\n phase_torsions = BAT[2*self.ntorsions+offset+3:]\n torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionInd[n]]) \\\n if self._firstTorsionInd[n]!=n else phase_torsions[n] \\\n for n in range(self.ntorsions)]\n \n # Determine the positions of the first three atoms\n p1 = Vector(0,0,0) # First atom at origin\n p2 = Vector(0,0,BAT[offset]) # Second atom along z-axis\n # Third atom in xz-plane\n sphere = Sphere(p2, BAT[offset+1])\n cone = Cone(p2, -p2, BAT[offset+2])\n plane = Plane(p1, Vector(0,1,0))\n p3 = sphere.intersectWith(cone).intersectWith(plane)[0]\n\n # If appropriate, rotate and translate the first three atoms\n if offset==6:\n p1 = np.array(p1)\n p2 = np.array(p2)\n p3 = np.array(p3)\n # Rotate the third atom by the appropriate value\n (phi,theta,omega) = BAT[3:6]\n co = np.cos(omega)\n so = np.sin(omega)\n Romega = np.array([[co, -so, 0],[so, co, 0], [0, 0, 1]])\n p3 = Romega.dot(p3)\n # Rotate the second two atoms to point in the right direction\n cp = np.cos(phi)\n sp = np.sin(phi)\n ct = np.cos(theta)\n st = np.sin(theta)\n Re = np.array([[cp*ct,-sp,cp*st],[ct*sp,cp,sp*st],[-st,0,ct]])\n p2 = Re.dot(p2)\n p3 = Re.dot(p3)\n # Translate the first three atoms by the origin\n origin = np.array(BAT[:3])\n p1 += origin\n p2 += origin\n p3 += origin\n \n self.root[0].setPosition(Vector(p1))\n self.root[1].setPosition(Vector(p2))\n self.root[2].setPosition(Vector(p3))\n\n # Add fourth and remaining atoms\n for ((a1,a2,a3,a4), bond, angle, torsion) in zip(self._torsionL,bonds,angles,torsions):\n sphere = Sphere(a2.position(), bond)\n cone = Cone(a2.position(), a3.position()-a2.position(), angle)\n plane123 = Plane(a4.position(), a3.position(), a2.position())\n points = sphere.intersectWith(cone).intersectWith(plane123)\n for p in points:\n if Plane(a3.position(), a2.position(), p).normal * plane123.normal > 0:\n break\n # The line points in the opposite direction to the ZMatrix constructor from\n # MMTK, but it seems to be correct\n p = rotatePoint(p, Line(a2.position(), a2.position()-a3.position()), torsion)\n a1.setPosition(p)\n \n return self.universe.configuration().array", "title": "" }, { "docid": "87065dcd92dbadc9e49dec708af2e5c8", "score": "0.5522124", "text": "def draw_car(x,y,deg):\n p1 = np.array([[int(-car_width/2)],[int(-car_length/2)]])\n p2 = np.array([[int(-car_width/2)],[int(car_length/2)]])\n p3 = np.array([[int(car_width/2)],[ int(car_length/2)]])\n p4 = np.array([[int(car_width/2)],[int(-car_length/2)]])\n\n R = np.array([[math.cos(rad(deg)),-math.sin(rad(deg))],\n [math.sin(rad(deg)),math.cos(rad(deg))]])\n p1_=R.dot(p1)\n p2_=R.dot(p2)\n p3_=R.dot(p3)\n p4_=R.dot(p4)\n\n #print('pts=',pts)\n #pts = pts.reshape((-1, 1, 2))\n #cv2.polylines(img, [pts], True, (255, 255, 0), 4)\n #print(p1_[0])\n new = np.array([[int(p1_[0])+x,int(p1_[1])+y],\n [int(p2_[0])+x,int(p2_[1])+y],\n [int(p3_[0])+x,int(p3_[1])+y],\n [int(p4_[0])+x,int(p4_[1])+y]])\n new = new.reshape((-1, 1, 2))\n cv2.polylines(img, [new], True, (255, 255, 255), 4)\n cv2.line(img, (int(p1_[0])+x,int(p1_[1])+y), (int(p4_[0])+x,int(p4_[1])+y), (200, V2, 255), 10)\n cv2.line(img, (int(p2_[0])+x,int(p2_[1])+y), (int(p3_[0])+x,int(p3_[1])+y), (200, V1, 255), 10)\n cv2.putText(img, 'wheel_1_speed=', (10, 40), cv2.FONT_HERSHEY_PLAIN,\n 1, (0, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, str(V2), (190, 40), cv2.FONT_HERSHEY_PLAIN,\n 1, (0, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, 'wheel_2_speed=', (10, 100), cv2.FONT_HERSHEY_PLAIN,\n 1, (0, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(img, str(V1), (190, 100), cv2.FONT_HERSHEY_PLAIN,\n 1, (0, 255, 255), 1, cv2.LINE_AA)", "title": "" }, { "docid": "4eb3923648df357b6ac87183b4394a50", "score": "0.55207074", "text": "def __call__(self, theta, phi, f):\r\n # theta: horizontal angle between travelling direction of vehicle and reciever (0 degrees means receiver in front of vehicle)\r\n # phi: vertical angle between travelling direction of vehicle and receiver (0 degrees means receiver in front of vehicle)\r\n # NOTE: for readability reasons, other symbols are used than in the Imagine document\r\n # restrict the angles to the correct range\r\n while theta < 0.0:\r\n theta += 360.0\r\n while theta > 360.0:\r\n theta -= 360.0\r\n while phi < -180.0:\r\n phi += 360.0\r\n while phi > 180.0:\r\n phi -= 360.0\r\n # enforce horizontal and vertical symmetry\r\n if theta > 180.0:\r\n theta = 360.0 - theta\r\n if phi < 0.0:\r\n phi = -phi\r\n if phi > 90.0:\r\n phi = 180.0 - phi\r\n # calculate shorthands\r\n thetaRad = (numpy.pi/180.0)*theta\r\n phiRad = (numpy.pi/180.0)*phi\r\n pi2theta = numpy.pi/2.0 - thetaRad\r\n sinpi2theta = numpy.sin(pi2theta)\r\n sqrtcosphi = numpy.sqrt(numpy.cos(phiRad))\r\n # calculate horizontal directivity (formulas from Harmonoise model, depending on source height, independent of vehicle category)\r\n if self.h == 0.01:\r\n # horn effect, only for certain frequency range\r\n if (1600.0 <= f) and (f <= 6300.0):\r\n horizontal = (-1.5 + 2.5*abs(sinpi2theta)) * sqrtcosphi\r\n else:\r\n horizontal = 0.0\r\n elif self.h == 0.30:\r\n horizontal = 0.0\r\n elif self.h == 0.75:\r\n # screening by the body of the heavy vehicle\r\n horizontal = (1.546*(pi2theta**3) - 1.425*(pi2theta**2) + 0.22*pi2theta + 0.6) * sqrtcosphi\r\n else:\r\n raise Exception('no directivity defined for sources at height %.2f' % self.h)\r\n # calculate vertical directivity (approximations from Imagine model, depending on vehicle category, independent of source height)\r\n if self.cat == 1:\r\n vertical = -abs(phi/20.0) # phi in degrees\r\n elif self.cat in (2, 3):\r\n vertical = -abs(phi/30.0)\r\n else:\r\n vertical = 0.0 # no corrections for categories 4 and 5\r\n return horizontal + vertical", "title": "" }, { "docid": "2c671bffcf7a69e03acf8072e1666404", "score": "0.5514592", "text": "def get_outer_angle(angle):\n return 180 - angle", "title": "" }, { "docid": "239ddd9276fbb8d5b6140d321f76efdb", "score": "0.55105376", "text": "def map_angle(a):\n if a < -pi:\n return a + tau\n if a > pi:\n return a - tau\n\n return a", "title": "" }, { "docid": "6e98d1777d0ff97b562c3b353173fa56", "score": "0.547807", "text": "def compass(angle):\r\n c = np.array(['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE',\r\n 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N'])\r\n a = np.arange(11.25, 360., 22.5)\r\n if isinstance(angle, (float, int, list, np.ndarray)):\r\n angle = np.atleast_1d(angle)\r\n comp_dir = c[np.digitize(angle, a)]\r\n if len(comp_dir) == 1:\r\n comp_dir[0]\r\n return comp_dir", "title": "" }, { "docid": "9ff8217e7e81a93d5d1af59db3142cc6", "score": "0.5469304", "text": "def output2cart(input):\r\n pan, tilt, output = input.T\r\n # These values were found with MATLAB's cf tool to translate output to actual\r\n # distance. They fit the exponential below.\r\n a = 1736 \r\n b = -0.8824\r\n distance = (output/a)**(1/b)\r\n # Spherical polar coordinates to cartesian coordinates\r\n x = distance * np.cos(np.radians(pan)) * np.sin(np.radians(180 - tilt))\r\n y = distance * np.sin(np.radians(pan)) * np.sin(np.radians(180 - tilt))\r\n z = distance * np.cos(np.radians(180 - tilt))\r\n return x,y,z", "title": "" }, { "docid": "a74cad04f958b3ae423a188ee21e98be", "score": "0.545639", "text": "def computeAngularOffset(desired_heading, current_angle):\n error = desired_heading - current_angle\n # Handle discontinuity at pi\n if error > math.pi:\n error = error - 2*math.pi\n elif error < -math.pi:\n error = error + 2*math.pi\n return error", "title": "" }, { "docid": "a6543459339d3eae75c60e355237c7ad", "score": "0.54555", "text": "def _azimuth2math(azimuth, elevation):\n theta = np.radians((90 - azimuth) % 360)\n phi = np.radians(90 - elevation)\n return theta, phi", "title": "" }, { "docid": "5183d06f6fb38903bc351efe4865aeda", "score": "0.5451052", "text": "def _get_desired_pose(self, idx):\r\n x, y = self._pos[idx]\r\n _, direction = decompose(self._vel[idx])\r\n theta = wrap_to_pi(np.arctan2(direction[1], direction[0]))\r\n return x, y, theta", "title": "" }, { "docid": "d9441cf48ea3e09fb5c04925b5810c30", "score": "0.5446732", "text": "def angsep(ra1,dec1,ra2,dec2):\n ra1*=d2r\n dec1*=d2r\n ra2*=d2r\n dec2*=d2r\n diffCosine=cos(dec1)*cos(dec2)*cos(ra1-ra2)+sin(dec1)*sin(dec2)\n dC='%.10f'%diffCosine#when the source is right at the center of the roi python sometimes adds extraneous digits at the end of the value i.e. instead of 1.0\n #it returns 1.0000000000000024, which throws an error with the acos function\n return acos(float(dC))/d2r #returns values between 0 and 180 degrees", "title": "" }, { "docid": "13d54a3ff904a602ae4116dd48bfa57f", "score": "0.5441058", "text": "def get_angled_point(angle, longueur, cx, cy):\n return (math.cos(angle)*longueur)+cx, (math.sin(angle)*longueur)+cy", "title": "" }, { "docid": "b3230f69cae252abe10db746246d30fc", "score": "0.54103196", "text": "def radec2xy(self, ra, dec):\n self._check_radec(ra, dec)\n\n object_theta = (90.0 - dec)*np.pi/180.0\n object_phi = ra*np.pi/180.0\n o_hat0 = np.sin(object_theta)*np.cos(object_phi)\n o_hat1 = np.sin(object_theta)*np.sin(object_phi)\n o_hat2 = np.cos(object_theta)\n\n tile_theta = (90.0 - self.dec)* np.pi/180.0\n tile_phi = self.ra * np.pi/180.0\n t_hat0 = np.sin(tile_theta)*np.cos(tile_phi)\n t_hat1 = np.sin(tile_theta)*np.sin(tile_phi)\n t_hat2 = np.cos(tile_theta)\n\n # We make a rotation on o_hat, so that t_hat ends up aligned with\n # the unit vector along z. This is composed by a first rotation around\n # z of an angle pi/2 - phi and a second rotation around x by an angle\n # theta, where theta and phi are the angles describin t_hat.\n\n costheta = t_hat2\n sintheta = np.sqrt(1.0-costheta*costheta) + 1E-12\n cosphi = t_hat0/sintheta\n sinphi = t_hat1/sintheta\n # First rotation, taking into account that cos(pi/2 -phi) =\n # sin(phi) and sin(pi/2-phi)=cos(phi)\n n_hat0 = sinphi*o_hat0 - cosphi*o_hat1\n n_hat1 = cosphi*o_hat0 + sinphi*o_hat1\n n_hat2 = o_hat2\n # Second rotation\n nn_hat0 = n_hat0\n nn_hat1 = costheta*n_hat1 - sintheta*n_hat2\n nn_hat2 = sintheta*n_hat1 + costheta*n_hat2\n # Now find the radius on the plate\n theta = np.sqrt(nn_hat0*nn_hat0 + nn_hat1*nn_hat1)\n radius = self.plate_dist(theta)\n x = radius * nn_hat0/theta\n y = radius * nn_hat1/theta\n return (x, y)", "title": "" }, { "docid": "b38305f04d9af852e9b8a62d6b6c4036", "score": "0.5397596", "text": "def optimal_angle_and_tilt(sensors_metadata_clean, latitude, solar_properties, max_rad_Whperm2yr, panel_properties,\n roof_coverage):\n # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.\n optimal_angle_flat_rad = calc_optimal_angle(180, latitude,\n solar_properties.trr_mean) # assume surface azimuth = 180 (N,E), south facing\n sensors_metadata_clean['tilt_deg'] = np.vectorize(acos)(sensors_metadata_clean['Zdir']) # surface tilt angle in rad\n sensors_metadata_clean['tilt_deg'] = np.vectorize(degrees)(\n sensors_metadata_clean['tilt_deg']) # surface tilt angle in degrees\n sensors_metadata_clean['B_deg'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5,\n sensors_metadata_clean['tilt_deg'],\n degrees(optimal_angle_flat_rad)) # panel tilt angle in degrees\n\n # calculate spacing and surface azimuth of the panels for flat roofs\n module_length_m = panel_properties['module_length_m']\n optimal_spacing_flat_m = calc_optimal_spacing(solar_properties, optimal_angle_flat_rad, module_length_m)\n sensors_metadata_clean['array_spacing_m'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5, 0,\n optimal_spacing_flat_m)\n sensors_metadata_clean['surface_azimuth_deg'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],\n sensors_metadata_clean['Ydir'],\n sensors_metadata_clean[\n 'B_deg']) # degrees\n\n # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing\n if panel_properties['type'] == 'PV':\n module_width_m = module_length_m # for PV\n else:\n module_width_m = panel_properties['module_area_m2'] / module_length_m # for FP, ET\n module_flat_surface_area_m2 = module_width_m * (sensors_metadata_clean.array_spacing_m / 2 +\n module_length_m * cos(optimal_angle_flat_rad))\n area_per_module_m2 = module_width_m * module_length_m\n\n # calculate the pv/solar collector module area within the area of each sensor point\n sensors_metadata_clean['area_installed_module_m2'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5,\n sensors_metadata_clean.AREA_m2,\n roof_coverage * area_per_module_m2 *\n (sensors_metadata_clean.AREA_m2 /\n module_flat_surface_area_m2))\n\n # categorize the sensors by surface_azimuth, B, GB\n result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth_deg, sensors_metadata_clean.B_deg,\n sensors_metadata_clean.total_rad_Whm2, max_rad_Whperm2yr)\n sensors_metadata_clean['CATteta_z'] = result[0]\n sensors_metadata_clean['CATB'] = result[1]\n sensors_metadata_clean['CATGB'] = result[2]\n\n return sensors_metadata_clean", "title": "" }, { "docid": "bb793f5213dbc643fd33395167713826", "score": "0.5389069", "text": "def pose_ahead(x, y, theta, meters_ahead):\n\n ahead_x = meters_ahead * np.cos(theta) + x\n ahead_y = meters_ahead * np.sin(theta) + y\n return ahead_x, ahead_y", "title": "" }, { "docid": "bdc71abc2f950845092950a3e0a6ce92", "score": "0.53877604", "text": "def cb_scan(self, data):\n self.target_pose = PoseWithCovarianceStamped()\n # Set max/min angle and increment\n scan_min = data.angle_min\n scan_max = data.angle_max\n scan_inc = data.angle_increment\n #now = rospy.get_rostime()\n scan_time = data.header.stamp.secs\n\n # Build angle array\n if self.physical_robot:\n y = np.arange(scan_min,scan_max,scan_inc)\n else:\n y = np.arange(scan_min,scan_max,scan_inc)\n #y = np.arange(scan_min,scan_max+0.01*scan_inc,scan_inc)\n\n # Pre-compute trig functions of angles\n ysin = np.sin(y)\n ycos = np.cos(y)\n\n # Apply a median filter to the range scans\n x = sg.medfilt(data.ranges,1)\n\n # Calculate the difference between consecutive range values\n x_diff1 = np.power(np.diff(x),2)\n\n # Convert range and bearing measurement to cartesian coordinates\n y_coord = x*ysin\n x_coord = x*ycos\n\n # Compute difference between consecutive values in cartesian coordinates\n y_diff = np.power(np.diff(y_coord),2)\n x_diff = np.power(np.diff(x_coord),2)\n\n # Compute physical distance between measurements\n dist = np.power(x_diff+y_diff,0.5)\n\n # Segment the LIDAR scan based on physical distance between measurements\n x2 = np.split(x,np.argwhere(dist>self.scan_dist_thresh))\n y2 = np.split(y,np.argwhere(dist>self.scan_dist_thresh))\n dist2 = np.split(dist,np.argwhere(dist>self.scan_dist_thresh))\n\n x_coord2 = np.split(x_coord,np.argwhere(dist>self.scan_dist_thresh))\n y_coord2 = np.split(y_coord,np.argwhere(dist>self.scan_dist_thresh))\n ran2 = np.split(data.ranges,np.argwhere(dist>self.scan_dist_thresh))\n\n bearing = np.array([0,0,0,0], dtype=np.float32)\n self.loc = 0\n for i in range(len(y2)):\n # Check if there are at least 4 points in an object (reduces noise)\n ylen = len(y2[i])-0\n dist2_sum = np.sum(dist2[i][1:-2])\n if ylen > self.ylen_lim and dist2_sum > self.dist_min and dist2_sum < self.dist_max and np.median(ran2[i]) <= self.max_range:\n x_pt = np.median(x_coord2[i])\n y_pt = np.median(y_coord2[i])\n if True:\n ang = np.median(y2[i])\n dis = np.median(x2[i])\n mn = min(x2[i][1:ylen])\n mx = max(x2[i][1:ylen])\n dis = ((x_pt**2+y_pt**2))**0.5\n\n if ang > self.ang_min and ang < self.ang_max:\n self.loc = 0.8*np.exp(-dis/20)\n c_mat = [[1,0],[0,1]]\n obs_cov = [[1/(1+10.0*self.loc),0],[0,1/(1+10.0*self.loc)]]\n obs_state = np.array([x_pt,y_pt])\n cov_temp = np.dot(np.dot(c_mat,self. old_cov),np.transpose(c_mat))+obs_cov\n cov_mult = np.dot(self.old_cov,np.transpose(c_mat))\n try:\n gain = np.dot(cov_mult,np.linalg.inv(cov_temp))\n except:\n gain = np.zeros_like(np.dot(cov_mult,cov_temp))\n new_cov = np.dot([[1,0],[0,1]]-np.dot(gain,c_mat),self.old_cov)\n new_state = self.old_state+np.dot(gain,obs_state-self.old_state)\n print(\"Obsx\\tObsy\\tOldx\\tOldy\\tNewx\\tNewy\")\n print(\"%.4f\\t%.4f\\t%.4f\\t%.4f\\t%.4f\\t%.4f\"%(x_pt,y_pt,self.old_state[0],self.old_state[1],new_state[0],new_state[1]))\n self.old_state = new_state\n sl = ylen\n p = PointStamped()\n p.point.x = x_pt\n p.point.y = y_pt\n p.point.z = 0.0\n #new_p = do_transform_point(p, self.R)\n new_p = p\n new_p.point.x = new_state[0] - 0.6\n new_p.point.y = new_state[1]\n new_p.point.z = 0.0\n u = (dist-self.xmin)/(self.xmax-self.xmin)*(self.umax-self.umin)+self.umin\n self.uvar = np.nanstd(u)**2\n #self.loc = (1-(dis-self.xmin)/(self.xmax-self.xmin))*(self.locmax-self.locmin)+self.locmin\n self.target_pose.pose.pose.position.x = new_p.point.x\n self.target_pose.pose.pose.position.y = new_p.point.y\n self.target_pose.pose.pose.position.z = self.loc\n print(ang,dis,x_pt,y_pt,new_p.point.x,new_p.point.y)\n cov = self.target_pose.pose.covariance\n if self.loc > 0.1:\n self.target_pose.pose.covariance[0] = 0.01*0.01/(1+10.0*self.loc) #self.uvar\n self.target_pose.pose.covariance[7] = 0.01*1.0/(1+10.0*self.loc) #self.uvar # Tomo asked me to change this value from (0.1*d) to be (1*d) [Tamer]\n else:\n self.target_pose.pose.covariance[0] = 10**0\n self.target_pose.pose.covariance[7] = 10**0\n print(dis,self.loc)\n else:\n pass\n #print('fail1')\n else:\n pass\n #print('fail2')\n else:\n pass\n #print('fail3',ylen,dist2_sum)\n h = std_msgs.msg.Header()\n h.stamp = rospy.Time.now()\n h.frame_id = 'base_link'\n self.target_pose.header = h\n if self.loc <= 0.1:\n self.target_pose.pose.covariance[0] = 10**0\n self.target_pose.pose.covariance[7] = 10**0\n\n # Publish bearing to ROS on topic /detection\n self.target_pub.publish(self.target_pose)\n\n pass", "title": "" }, { "docid": "faeadeb7b87cfcc92321f791343732c9", "score": "0.53868455", "text": "def comp_angle_opening(self):\n\n alpha0 = self.comp_angle_opening_magnet()\n alpha3 = self.W3\n\n Nmag = len(self.magnet)\n if Nmag > 0:\n return alpha0 * Nmag + alpha3 * (Nmag - 1)\n else:\n return 0", "title": "" }, { "docid": "fa081ae54a85149ac34481de036f9936", "score": "0.5383125", "text": "def calculate_delta_odometry(deltaMetersTraveled,currentAngle,current_RPM):\n global steering_state\n xtranslationComponent=0.0#meters\n ytranslationComponent=0.0#meters\n VelocityVector=0.0#radians\n avgDeltaMetersTraveled=0.0#meters\n if steering_state[\"point_turn\"] == True:\n print \"undefined_odom_state\"\n elif steering_state[\"translation\"] == True:#this also includes driving straight since we're translating straight\n avgDeltaMetersTraveled=(deltaMetersTraveled[0]+deltaMetersTraveled[1]+deltaMetersTraveled[2]+deltaMetersTraveled[3])/4\n VelocityVector=(currentAngle[0]+currentAngle[1]+currentAngle[2]+currentAngle[3])/4\n VelocityVector=np.deg2rad(VelocityVector)\n xtranslationComponent=math.sin(VelocityVector)*avgDeltaMetersTraveled\n ytranslationComponent=math.cos(VelocityVector)*avgDeltaMetersTraveled\n print \"translation\"\n elif steering_state[\"regular_turn\"] == True:\n #Idea here is to assume that translation is along the 0 degree line. This imaginary line is going to be tangent\n #to the circle traced by our turn radius. Knowing that, we can draw a triangle that represents the start+end point\n #of deltaMetersTraveled with the base being along the imaginary line and the angle between the base, center origin\n #point of Bender, and the hypotenuse which will be the actual distance traveled\n #average the angle of the first and second \"axles\" just find the mirror reflection after translation is subtracted out\n avgAngleFirstAxle=(currentAngle[0]+currentAngle[3])/2\n avgAngleSecondAxle=(currentAngle[1]+currentAngle[2])/2\n #again assume we are driving straight so just average them and ignore error\n avgTangentAngle=(avgAngleFirstAxle+avgAngleSecondAxle)/2\n avgTangentAngle=np.deg2rad(avgTangentAngle)\n xtranslationComponent = math.sin(VelocityVector) * avgDeltaMetersTraveled\n ytranslationComponent = math.cos(VelocityVector) * avgDeltaMetersTraveled\n print \"regular_turn\"\n elif steering_state[\"translation_and_rotation\"] == True:\n print \"undefined_odom_state\"\n else:#not moving\n print \"not_moving\"\n return xtranslationComponent,ytranslationComponent", "title": "" }, { "docid": "07c013c362e26c96d76c15c7effb04c4", "score": "0.5380968", "text": "def normalized_compass(starting_compass, turn_angle):\n starting_angle = (- starting_compass + 90.0)\n ending_angle = starting_angle + turn_angle\n if starting_angle > 360.0 or ending_angle > 360.0:\n starting_angle -= 360.0\n ending_angle -= 360.0\n elif starting_angle < -360.0 or ending_angle < -360.0:\n starting_angle += 360.0\n ending_angle += 360.0\n\n return starting_angle, ending_angle", "title": "" }, { "docid": "b4697f8630dabdaa63e2cba6de71a028", "score": "0.53758353", "text": "def rotation(self) -> int:", "title": "" }, { "docid": "aa778753fa149b17bec7918cb0bacb7f", "score": "0.53640544", "text": "def snap(self, angle, counter_weight):\r\n # Load data and compute center of mass\r\n points = COM.load_points(\"data.csv\")\r\n com = COM.compute_com(points)\r\n\r\n # compute torque\r\n torque = COM.compute_torque(com, angle) / 1000.0\r\n com_tilt = COM.compute_new_com(com, angle)\r\n \r\n # Add CounterMass\r\n counter_mass = COM.Point(-480, 0, counter_weight)\r\n torque_cw = COM.compute_torque(counter_mass, angle) / 1000.0\r\n torque += torque_cw\r\n\r\n # compute strut force\r\n struts = Struts.Struts([260,-430], [30,-300])\r\n struts.tilt(angle)\r\n attatch_point = struts.get_att()\r\n extend_length = struts.distance_extend()\r\n struts_force = struts.t2f(torque)\r\n\r\n return com_tilt, torque, attatch_point, extend_length, struts_force", "title": "" }, { "docid": "ad25b34be7b8d061ebf670c545a9b8f1", "score": "0.53613263", "text": "def aileron(self, angle):\n t = geo.rotationAroundAxis(angle, (0, 0, 1))\n self.heli.actOri = self.heli.actOri * t", "title": "" }, { "docid": "42621deb02a06d66fa0e222003ece812", "score": "0.53492206", "text": "def tilt_converter(x,y):\n return (np.sqrt(x**2+y**2), np.arctan2(-x,y))*u.deg", "title": "" }, { "docid": "cad452798ad4538a06d290a127338d2e", "score": "0.5345272", "text": "def calc_spacing_custom_angle(sensors_metadata_clean, solar_properties, max_rad_Whperm2yr, panel_properties,\n panel_tilt_angle, roof_coverage):\n\n # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.\n input_angle_rad = radians(panel_tilt_angle)\n sensors_metadata_clean['tilt_deg'] = np.vectorize(acos)(sensors_metadata_clean['Zdir']) # surface tilt angle in rad\n sensors_metadata_clean['tilt_deg'] = np.vectorize(degrees)(sensors_metadata_clean['tilt_deg']) # surface tilt angle in degrees\n sensors_metadata_clean['B_deg'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5,\n sensors_metadata_clean['tilt_deg'],\n degrees(input_angle_rad)) # panel tilt angle in degrees\n\n # calculate spacing and surface azimuth of the panels for flat roofs\n module_length_m = panel_properties['module_length_m']\n optimal_spacing_flat_m = calc_optimal_spacing(solar_properties, input_angle_rad, module_length_m)\n sensors_metadata_clean['array_spacing_m'] = np.where(sensors_metadata_clean['tilt_deg'] >= 5, 0,\n optimal_spacing_flat_m)\n sensors_metadata_clean['surface_azimuth_deg'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],\n sensors_metadata_clean['Ydir'],\n sensors_metadata_clean[\n 'B_deg']) # degrees\n\n # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing\n if panel_properties['type'] == 'PV':\n module_width_m = module_length_m # for PV\n else:\n module_width_m = panel_properties['module_area_m2'] / module_length_m # for FP, ET\n module_flat_surface_area_m2 = module_width_m * (sensors_metadata_clean.array_spacing_m / 2 +\n module_length_m * cos(input_angle_rad))\n area_per_module_m2 = module_width_m * module_length_m\n\n # calculate the pv/solar collector module area within the area of each sensor point\n sensors_metadata_clean['area_installed_module_m2'] = np.where(\n sensors_metadata_clean['tilt_deg'] >= 5, sensors_metadata_clean.AREA_m2,\n area_per_module_m2 * (roof_coverage * sensors_metadata_clean.AREA_m2 / module_flat_surface_area_m2))\n\n # categorize the sensors by surface_azimuth, B, GB\n result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth_deg, sensors_metadata_clean.B_deg,\n sensors_metadata_clean.total_rad_Whm2, max_rad_Whperm2yr)\n sensors_metadata_clean['CATteta_z'] = result[0]\n sensors_metadata_clean['CATB'] = result[1]\n sensors_metadata_clean['CATGB'] = result[2]\n\n return sensors_metadata_clean", "title": "" }, { "docid": "42b25664a30ac414399a86990a0f03d9", "score": "0.5333477", "text": "def __init__(self, position, orientation):\n\n self.position = np.array(position)\n self.orientation = np.array(orientation)\n\n if self.orientation.size == 2: # interpreted as a east, north component:\n self.orientation = self.orientation / np.linalg.norm(self.orientation)\n else: # interpreted as scalar\n assert self.orientation.size == 1, 'orientation must be either scalar or have 2 elements'\n self.orientation = np.array([np.cos(orientation * d2r), np.sin(orientation * d2r)])\n v = np.array([self.orientation[0], self.orientation[1], 0]).reshape((1, 3))\n\n self.lon0, self.lat0 = position\n\n # the z axis of local coordinat system described in geocentric coords:\n self.z = np.array([np.cos(self.lat0 * d2r) * np.cos(self.lon0 * d2r), \n np.cos(self.lat0 * d2r) * np.sin(self.lon0 * d2r),\n np.sin(self.lat0 * d2r)])\n\n # the x axis is the orientation described in ECEF coords:\n self.y = spherical.enu_to_ecef(v, np.array(self.lon0), np.array(self.lat0)).flatten()\n \n # the y axis completes the system:\n self.x = np.cross(self.y, self.z)\n \n # define rotation matrices for rotations between local and geocentric:\n self.R_geo2local = np.vstack((self.x, self.y, self.z)) # rotation matrix from GEO to rotated coords (ECEF)\n self.R_local2geo = self.R_geo2local.T # inverse", "title": "" }, { "docid": "0bf738fcbae1ca299aa28f708f6a1565", "score": "0.5330575", "text": "def Orient(orientation_system, component, to_marker, from_marker):", "title": "" }, { "docid": "f7d8e46ebd00a72548d084b173ccc87a", "score": "0.5325488", "text": "def setup_gas_injection_hardware_description_east(ods, pulse):\n\n i = 0\n\n def port_angle(port):\n \"\"\"\n Converts a port letter into a toroidal angle\n EAST has 16 segments. If A is segment 0, P is 15. Assumes port centers are equally spaced, which they appear to\n be, or at least nearly so, based on a drawing from the EAST handbook.\n :return: float\n Angle in radians\n \"\"\"\n # I am guessing a toroidal angle coordinate system. I could be wrong by an offset and a direction.\n offset = 0 # radians\n direction = 1 # +/- 1\n import string\n\n return string.ascii_lowercase.find(port.lower()) / 16.0 * 2 * np.pi * direction + offset\n return (ord(port.lower()) - ord('a')) / 16.0 * 2 * np.pi * direction + offset\n\n # OUPEV2\n # I think it's between probes 8 & 9. I am guessing. This gives R, Z\n # I think it's in port O\n pipe = ods['gas_injection']['pipe'][i]\n phi = port_angle('o')\n pipe['name'] = 'OUPEV2_{:03d}'.format(int(round(phi * 180 / np.pi)))\n pipe['exit_position']['r'] = 1.73 # m\n pipe['exit_position']['z'] = 1.057 # m\n pipe['exit_position']['phi'] = phi\n pipe['valve'][0]['identifier'] = 'OUPEV2'\n pipe['second_point']['phi'] = phi\n pipe['second_point']['r'] = 1.729\n pipe['second_point']['z'] = 1.05675\n i += 1\n\n # ODPEV2\n # It's in the lower divertor. I'll have to eyeball from a drawing. Also, I am guessing which tip it is.\n # I think it's in port O\n pipe = ods['gas_injection']['pipe'][i]\n phi = port_angle('o')\n pipe['name'] = 'ODPEV2_{:03d}'.format(int(round(phi * 180 / np.pi)))\n pipe['exit_position']['r'] = 1.811 # m\n pipe['exit_position']['z'] = -0.972 # m\n pipe['exit_position']['phi'] = phi\n pipe['valve'][0]['identifier'] = 'ODPEV2'\n pipe['second_point']['phi'] = phi\n pipe['second_point']['r'] = 1.806\n pipe['second_point']['z'] = -0.9715\n i += 1", "title": "" }, { "docid": "267d32ae882a90a61059b74cd6f966fa", "score": "0.532372", "text": "def plate_dist(self, theta):\n p = np.array([8.297E5, -1750.0, 1.394E4, 0.0])\n radius = 0.0\n for i in range(4):\n radius = theta*radius + p[i]\n return radius", "title": "" }, { "docid": "af9db404b71d3cc59ffb4dc48f84266d", "score": "0.53215325", "text": "def computeCoordinate(start, length, angle):\n pass", "title": "" }, { "docid": "e244d179eb1a5928e3baed015a871bb0", "score": "0.53187877", "text": "def locate(xn_pd, vel, xgsm, ygsm, zgsm):\n#\n#--- pd is the solar wind dynamic pressure (nano-pascals)\n#\n if vel < 0.0:\n pd = xn_pd\n else:\n pd = 1.94e-6 * xn_pd * vel**2\n#\n#--- ratio of pd to the average pressure, assumed as 2 nPa\n#\n rat = pd / 2.0\n#\n#--- the power in the scaling factor is the best-fit value obtained \n#--- from data in the t96_01 version of the model\n#\n rat16 = rat**0.14\n#\n#--- values of the magnetopuase parameters for pd = 2 nPa\n#\n a0 = 70.0\n s00 = 1.08\n x00 = 5.48 \n#\n#--- values f the magnetopause parameters, scaled to the actual pressure\n#\n a = a0 / rat16\n s0 = s00\n x0 = x00 / rat16\n#\n#--- the x-coordinate of the 'seam' between the ellipsoid and the cylinder\n#---- (ref: N.A.TSYGANENKO, SOLUTION OF CHAPMAN-FERRARO PROBLEM FOR AN \n#---- ELLIPSOIDAL MAGNETOPAUSE, PLANET.SPACE SCI., V.37, P.1037, 1989)\n#\n xm = x0 - a\n\n if (ygsm != 0.0) or (zgsm != 0.0):\n phi = math.atan2(ygsm, zgsm)\n else:\n phi = 0.0\n\n rho = math.sqrt(ygsm**2 + zgsm**2)\n\n if xgsm < xm: \n xmgnp = xgsm\n rhomgnp = a * math.sqrt(s0**2 - 1)\n ymgnp = rhomgnp * math.sin(phi)\n zmgnp = rhomgnp * math.cos(phi)\n dist = math.sqrt((xgsm - xmgnp)**2 + (ygsm - ymgnp)**2 + (zgsm - zmgnp)**2)\n\n if rhomgnp >= rho:\n xid = 1\n if rhomgnp < rho:\n xid = -1\n else:\n xksi = (xgsm - x0) / a + 1\n xdzt = rho / a\n sq1 = math.sqrt((1.0 + xksi)**2 + xdzt**2)\n sq2 = math.sqrt((1.0 - xksi)**2 + xdzt**2)\n sigma = 0.5 * (sq1 + sq2)\n tau = 0.5 * (sq1 - sq2)\n#\n#--- calcurate(x, y, z) for the closest point at the magnetpause\n#\n xmgnp = x0 - a * (1.0 - s0 * tau)\n rhomgnp = a * math.sqrt((s0**2 - 1) * (1.0 - tau**2))\n ymgnp = rhomgnp * math.sin(phi)\n zmgnp = rhomgnp * math.cos(phi)\n#\n#--- calculate the shortest distance between the point, xgsm, xgsm, zgsm\n#--- and the magnetpause\n#\n dist = math.sqrt((xgsm - xmgnp)**2 + (ygsm - ymgnp)**2 + (zgsm - zmgnp)**2)\n if sigma > s0:\n xid = -1\n if sigma <= s0:\n xid = 1\n\n return [xmgnp, ymgnp, zmgnp, dist, xid]", "title": "" }, { "docid": "4ced8847b954ecf59f601a7e52ea73ec", "score": "0.53166354", "text": "def coord(self, crds):\r\n i = self.iat[0] - 1\r\n j = self.iat[1] - 1\r\n k = self.iat[2] - 1\r\n return coordinates.Angle(crds,i,j,k)", "title": "" }, { "docid": "6db696a63cee5ebfd7c6888cd0f799f7", "score": "0.5306003", "text": "def get_perifocal():\n \n # 4.37 position array\n r_ar = np.zeros((len(system[\"Mass\"]),3))\n r_ar[:,0] = np.cos(a_long)\n r_ar[:,1] = np.sin(a_long)\n \n #4.38 velocity array\n v_ar = np.zeros((len(system[\"Mass\"]),3))\n v_ar[:,0] = -np.sin(a_long)\n v_ar[:,1] = system[\"Eccentricity\"] + np.cos(a_long)\n \n # equations 4.37 and 4.38 in OMES p. 173\n rp = np.zeros((len(system[\"Mass\"]),3))\n vp = np.zeros((len(system[\"Mass\"]),3))\n\n rp[:,0] = (h**2/mu) * (1/(1 + system[\"Eccentricity\"]*np.cos(a_long))) * r_ar[:,0]\n rp[:,1] = (h**2/mu) * (1/(1 + system[\"Eccentricity\"]*np.cos(a_long))) * r_ar[:,1]\n vp[1:,0] = (mu/h[1:]) * v_ar[1:,0]\n vp[1:,1] = (mu/h[1:]) * v_ar[1:,1]\n\n return rp, vp", "title": "" }, { "docid": "42189b0ca9d85c601271100017fb9601", "score": "0.5305806", "text": "def acceleration():\r\n divider = 1\r\n\r\n #x, y, z = struct.unpack('<hhh', self._read_register(REG_OUT_X_L | 0x80, 6))\r\n\r\n x = read_reg(0x28)\r\n y = read_reg(0x2A)\r\n z = read_reg(0x2C)\r\n\r\n # convert from Gs to m / s ^ 2 and adjust for the range\r\n #x = (x / divider) * 9.81\r\n #y = (y / divider) * 9.81\r\n #z = (z / divider) * 9.81\r\n\r\n return x, y, z", "title": "" }, { "docid": "7adf9450a727452a0874390656a52f03", "score": "0.5296217", "text": "def AR(self):\n X,Y = self.XY\n return atan(Y/(self.cog_distance - X))", "title": "" }, { "docid": "4addef26d618fffba1fb6c4f963967a4", "score": "0.5291016", "text": "def _calculate_bearing_offset(self, angle, bearingangleoffset):\n\n newbearing = angle\n newbearing -= bearingangleoffset\n if newbearing > 180:\n newbearing -= 360\n if newbearing < -180:\n newbearing += 360\n\n return newbearing", "title": "" }, { "docid": "8288bc5bceb247f829a95cdda024fea0", "score": "0.5286887", "text": "def init():\n angle = 90\n set_angle(angle)\n fs.distance \n bs.distance", "title": "" }, { "docid": "6e798140d34a3e0d7fe8fbc4709f4a44", "score": "0.52766854", "text": "def compass():\r\n state_changed = 0\r\n #sense.set_imu_config(True, False, False)\r\n while True:\r\n #orientation = sense.get_orientation_degrees()\r\n #degrees = orientation['yaw']\r\n degrees = sense.get_compass()\r\n print(degrees)\r\n if (degrees <= 15 or degrees >= 345) and state_changed != 1:\r\n sense.clear()\r\n sense.show_letter(\"N\")\r\n state_changed = 1\r\n elif degrees >= 15 and degrees < 30 and state_changed != 2:\r\n sense.clear()\r\n sense.show_letter(\"a\")\r\n state_changed = 2\r\n elif degrees >= 30 and degrees < 45 and state_changed != 3:\r\n sense.clear()\r\n sense.show_letter(\"b\")\r\n state_changed = 3\r\n elif degrees >= 45 and degrees < 60 and state_changed != 4:\r\n sense.clear()\r\n sense.show_letter(\"c\")\r\n state_changed = 4\r\n elif degrees >= 60 and degrees < 75 and state_changed != 5:\r\n sense.clear()\r\n sense.show_letter(\"d\")\r\n state_changed = 5\r\n elif degrees >= 75 and degrees < 105 and state_changed != 6:\r\n sense.clear()\r\n sense.show_letter(\"E\")\r\n state_changed = 6\r\n elif degrees >= 105 and degrees < 120 and state_changed != 7:\r\n sense.clear()\r\n sense.show_letter(\"f\")\r\n state_changed = 7\r\n elif degrees >= 135 and degrees < 150 and state_changed != 8:\r\n sense.clear()\r\n sense.show_letter(\"g\")\r\n state_changed = 8\r\n elif degrees >= 150 and degrees < 165 and state_changed != 9:\r\n sense.clear()\r\n sense.show_letter(\"i\")\r\n state_changed = 9\r\n elif degrees >= 165 and degrees < 195 and state_changed != 10:\r\n sense.clear()\r\n sense.show_letter(\"S\")\r\n state_changed = 10\r\n elif degrees >= 195 and degrees < 210 and state_changed != 11:\r\n sense.clear()\r\n sense.show_letter(\"k\")\r\n state_changed = 11\r\n elif degrees >= 225 and degrees < 240 and state_changed != 12:\r\n sense.clear()\r\n sense.show_letter(\"l\")\r\n state_changed = 12\r\n elif degrees >= 240 and degrees < 255 and state_changed != 13:\r\n sense.clear()\r\n sense.show_letter(\"m\")\r\n state_changed = 13\r\n elif degrees >= 255 and degrees < 285 and state_changed != 14:\r\n sense.clear()\r\n sense.show_letter(\"W\")\r\n state_changed = 14\r\n elif degrees >= 285 and degrees < 300 and state_changed != 15:\r\n sense.clear()\r\n sense.show_letter(\"o\")\r\n state_changed = 15\r\n elif degrees >= 300 and degrees < 315 and state_changed != 16:\r\n sense.clear()\r\n sense.show_letter(\"p\")\r\n state_changed = 16\r\n elif degrees >= 315 and degrees < 330 and state_changed != 17:\r\n sense.clear()\r\n sense.show_letter(\"q\")\r\n state_changed = 17\r\n elif degrees >= 330 and degrees < 345 and state_changed != 18:\r\n sense.clear()\r\n sense.show_letter(\"r\")\r\n state_changed = 18", "title": "" }, { "docid": "62a1bc38fb9e7cccc74d286667a40dc7", "score": "0.5273423", "text": "def copper():\n return Orientation.from_euler((90., 35.264, 45.))", "title": "" }, { "docid": "f6c9455c5ed89aaefb1bf9280e61d568", "score": "0.52713907", "text": "def _calculate_pitch(self, lat_sat, long_sat, alt_sat, lat_drone, long_drone, alt_drone):\n\n R = 6371000\n lat_sat = math.radians(lat_sat)\n lat_drone = math.radians(lat_drone)\n long_sat = math.radians(long_sat)\n long_drone = math.radians(long_drone)\n\n delta_long = long_drone - long_sat\n delta_lat = lat_drone - lat_sat\n\n delta_alt = alt_drone - alt_sat\n a = math.pow(math.sin(delta_lat / 2), 2) + math.cos(lat_sat) * \\\n math.cos(lat_drone) * math.pow(math.sin(delta_long / 2), 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = R * c\n pitch_angle = math.atan2(delta_alt, d)\n\n pitch_angle = math.degrees(pitch_angle)\n\n return pitch_angle", "title": "" }, { "docid": "cca19661ef79d2cbee4ce04648c20524", "score": "0.5257532", "text": "def angle_value(x_value, x_size, unview, in_view):\r\n\tx_ang = (x_value/x_size)*in_view + unview\r\n\r\n\treturn x_ang", "title": "" }, { "docid": "d3564fcd554d3014635b286715c0c9b3", "score": "0.5251465", "text": "def cylindric_to_spheric(height, angle):\n longitude = angle - np.pi\n latitude = np.arcsin(2 * height - 1)\n\n return latitude, longitude", "title": "" }, { "docid": "bff8503bc4441cebb7da9146179cc744", "score": "0.52494633", "text": "def conv_ang(self):\n return arctan2(self.thin.loc[self.bound] - self.loc,self.thick.sep)", "title": "" }, { "docid": "68ac88e52c5a6af0bad5646b80a180b3", "score": "0.5246937", "text": "def update_angle(self) -> None:\n # Calculate angular velocity based on current heat\n heat_range = game_vars.current_heat / MAX_HEAT\n velocity_range = self.max_angle_vel - self.min_angle_vel\n angle_velocity = heat_range * velocity_range + self.min_angle_vel\n # You spin me right round\n self.angle += angle_velocity\n self.angle %= 360", "title": "" }, { "docid": "7b7181223541b0a3f70c399c4f1443ae", "score": "0.5246452", "text": "def sun_angle(self):\n angles = spa_python(self.date, self.loc[0], self.loc[1])\n self.altitude = angles['elevation'][0]\n azimuth = angles['azimuth'][0]\n # map astronomical to navigational az\n self.azimuth = azimuth + (azimuth < 0)*360-180\n \"\"\"\n old pysolar implementation\n #difference between astronomical azimuth and navigational azimuth\n azimuth = pysolar.solar.get_azimuth(*self.loc[:2],self.date)-180\n altitude = max(-1,pysolar.solar.get_altitude(*self.loc[:2],self.date))\n\n \"\"\"", "title": "" }, { "docid": "86ceb76a72748b2df275f7c799953c0f", "score": "0.5243984", "text": "def d_plate(self, samplesx, samplesy): \n dpx = (self.plate_centerx - samplesx)*self.ratio\n dpy = (self.plate_centery - samplesy)*self.ratio\n return dpx, dpy", "title": "" }, { "docid": "c2eef52bc91aff27167276b8cdfd4664", "score": "0.5241887", "text": "def xy2radec(self, x, y):\n self._check_radec(self.ra, self.dec)\n # The implementation of this function takes the same conventions\n # as radec2xy to make rotations, only that it happens in the reverse\n # order.\n #\n # This is the final position of the tile vector, which starts\n # parallel to z_hat.\n tile_theta = (90.0 - self.dec)*np.pi/180.0\n tile_phi = self.ra*np.pi/180.0\n t_hat0 = np.sin(tile_theta)*np.cos(tile_phi)\n t_hat1 = np.sin(tile_theta)*np.sin(tile_phi)\n t_hat2 = np.cos(tile_theta)\n # Define sin and cos of the angles for the final tile vector.\n costheta = t_hat2\n sintheta = np.sqrt(1.0-costheta*costheta) + 1E-10\n cosphi = t_hat0/sintheta\n sinphi = t_hat1/sintheta\n # Find the initial position of the object vector when the tile\n # starts parallel to z_hat.\n radius = np.sqrt(x*x + y*y)\n object_theta = self.plate_angle(radius)\n object_phi = np.arctan2(y, x)\n o_hat0 = np.sin(object_theta)*np.cos(object_phi)\n o_hat1 = np.sin(object_theta)*np.sin(object_phi)\n o_hat2 = np.cos(object_theta)\n # First rotation, around x by an angle -theta.\n n_hat0 = o_hat0\n n_hat1 = costheta*o_hat1 + sintheta*o_hat2\n n_hat2 = -sintheta*o_hat1 + costheta*o_hat2\n # Second rotation around z_hat by -(pi/2-phi), taking into\n # account that cos(pi/2 -phi) = sin(phi) and\n # sin(pi/2-phi)=cos(phi).\n nn_hat0 = sinphi*n_hat0 + cosphi*n_hat1\n nn_hat1 = -cosphi*n_hat0 + sinphi*n_hat1\n nn_hat2 = n_hat2\n # Convert from unit vectors to ra, dec.\n object_theta = np.arccos(nn_hat2)\n object_phi = np.arctan2(nn_hat1, nn_hat0)\n object_dec = 90.0 - (180.0/np.pi)*object_theta\n # Due to rounding imprecisions the remainder has to be taken\n # two times (!).\n object_ra = np.remainder((object_phi*180.0/np.pi), 360.0)\n object_ra = np.remainder(object_ra, 360.0)\n self._check_radec(object_ra, object_dec)\n return (object_ra, object_dec)", "title": "" }, { "docid": "4b70472b558743cfe8e79480273b707c", "score": "0.524164", "text": "def gen_csl(theta, orientation_axis, boundary_plane, target_dir='./', gbid='0000000000',\n alat=2.83, chem_symbol='Fe', gb_type=\"tilt\"):\n grain = crystal(chem_symbol, [(0,0,0)], spacegroup=229,\n cellpar=[alat, alat, alat, 90, 90, 90], size=[10,10,10])\n grain_a = grain.copy()\n grain_b = grain.copy()\n for i in range(3):\n grain_a.positions[:,i] -= 10*alat/2.\n grain_b.positions[:,i] -= 10*alat/2.\n\n rotm = quat.rotation_matrix(theta, orientation_axis)\n rotquat = quat.quaternion_from_matrix(rotm).round(8)\n angle_str = str(round((theta*180./np.pi),2)).replace('.', '')\n\n#Truncate the angle string if it is greater than 4 characters\n if len(angle_str) > 4:\n angle_str = angle_str[:-1]\n elif len(angle_str) < 4:\n angle_str = angle_str + '0'\n\n print '\\t Grain Boundary ID: ', gbid\n print '\\t Rotation Axis: ', orientation_axis, 'Rotation Angle: ', (round(theta,6))*(180./np.pi), round(theta,4)\n print '\\t Rotation quaternion: ', rotquat\n print '\\t Integral Rotation quaternion: ', (1./(np.array([a for a in rotquat if a !=0]).min())*rotquat).round(5)\n print '\\t Boundary plane grain A coordinate system: ', boundary_plane.round(3)\n\n rotm = quat.rotation_matrix(theta, orientation_axis)\n rotmquat = (1./(np.array([a for a in rotquat if a !=0.]).min())*rotquat).round(5)\n\n if np.allclose(orientation_axis, [1,1,0]):\n planequat_1 = np.array([0,0,0,1])\n planequat_2 = np.array([0,1,-1,0])\n elif np.allclose(orientation_axis, [0,0,1]):\n planequat_1 = np.array([0,0,1,0])\n planequat_2 = np.array([0,1,0,0])\n elif np.allclose(orientation_axis, [1,1,1]):\n planequat_1 = np.array([0,1,-1,0])\n planequat_2 = np.array([0,1,1,-2])\n\n print '\\t These vectors satisfy the conditions for a Symmetric Boundary Plane: '\n n1 = quat.quaternion_multiply(planequat_1, rotmquat)\n n2 = quat.quaternion_multiply(planequat_2, rotmquat)\n comm_denom = []\n for a in n1:\n if (a%1).round(1) != 0:\n comm_denom.append(1./(np.abs(a)%1))\n comm_denom = np.array(comm_denom)\n if len(comm_denom)!=0:\n print '\\t {}'.format((comm_denom.max()*n1).round(2))\n print '\\t {}'.format((comm_denom.max()*n2).round(2))\n print '\\n'\n print '\\n'\n else:\n print '\\t', n1\n print '\\t', n2\n print '\\n'\n print '\\n'\n\n\n if theta != 0:\n m2 = 2*(1.+np.cos(theta))/(1.-np.cos(theta))\n else:\n m2 = 0\n\n if theta !=0:\n m = np.sqrt(m2).round(0)\n n = 1\n else:\n m = 0\n n = 0\n\n if gb_type==\"tilt\":\n csl_tilt_factory(orientation_axis, boundary_plane, m, n, gbid, grain_a, grain_b,\n theta=theta, target_dir=target_dir, gb_type=gb_type)\n elif gb_type==\"twist\":\n csl_twist_factory(orientation_axis, boundary_plane, gbid, target_dir, gb_type=gb_type)", "title": "" }, { "docid": "b327ae0ac28e93f9926b6f3809c6fad7", "score": "0.5240541", "text": "def ExtractPD (inUV, ant, err, PDVer=1, inc=1):\n ################################################################\n # Checks\n if not UV.PIsA(inUV):\n print \"Actually \",inUV.__class__\n raise TypeError,\"inUV MUST be a Python Obit UV\"\n \n e1 = []; e2 = []; o1 = []; o2 = []\n # Get table\n PDTab = inUV.NewTable(Table.READONLY,\"AIPS PD\",PDVer,err)\n # Open table, find antenna\n PDTab.Open(Table.READONLY, err)\n nrow = PDTab.Desc.Dict['nrow']\n for irow in range (1,nrow+1):\n row = PDTab.ReadRow(irow, err)\n if row['ANTENNA'][0]==ant:\n break\n for i in range(0,len(row['REAL 1']),inc):\n e = row['REAL 1'][i]\n # Bad soln?\n if e==fblank:\n e1.append(fblank); e2.append(fblank); \n o1.append(fblank); o2.append(fblank); \n else:\n # Fold elipticities to range [-45, 45]\n e = math.degrees(e)\n o = math.degrees(row['IMAG 1'][i])\n if e>45.0:\n e = 90.0 - e\n o = -o\n if o>180.0:\n o -= 360.\n if o<-180.0:\n o += 360\n # Default values probably bad\n if e>=44.999 and o==0.0:\n e = fblank; o = fblank\n e1.append(e)\n o1.append(o)\n e = math.degrees(row['REAL 2'][i])\n o = math.degrees(row['IMAG 2'][i])\n if e<-45.0:\n e = -(90.0 + e)\n o = -o\n if o>180.0:\n o -= 360.\n if o<-180.0:\n o += 360\n if e<=-44.999 and o==0.0:\n e = fblank; o = fblank\n e2.append(e)\n o2.append(o)\n refant = row['REFANT'][0]\n ant = row['ANTENNA'][0]\n del row\n PDTab.Close(err)\n # Get frequencies\n f = GetFreqArr(inUV, err)\n # select by inc in GHz\n frqs = []\n for i in range(0,len(f),inc):\n frqs.append(f[i]*1.0e-9)\n return {'Elip1':e1, 'Ori1':o1, 'Elip2':e2, 'Ori2':o2, 'refant':refant, 'ant':ant, 'Freqs':frqs}", "title": "" }, { "docid": "019e7ea528ab6ffa8e744c601c2c25aa", "score": "0.523962", "text": "def odom_callback(self, odom):\n\tself.odom_time = odom.header.stamp.to_sec()\n X = ar([[odom.pose.pose.position.x], [odom.pose.pose.position.y], [odom.pose.pose.position.z]])\n q = Quaternion(odom.pose.pose.orientation.w, odom.pose.pose.orientation.x,\\\n odom.pose.pose.orientation.y, odom.pose.pose.orientation.z)\n R = q.rotation_matrix\n # ensure that the linear velocity is in inertial frame, ETHZ code multiply linear vel to rotation matrix\n V = ar([[odom.twist.twist.linear.x], [odom.twist.twist.linear.y], [odom.twist.twist.linear.z]])\n\n \n # CROSSCHECK AND MAKE SURE THAT THE ANGULAR VELOCITY IS IN INERTIAL FRAME...HOW?\n Omega = ar([[odom.twist.twist.angular.x], [odom.twist.twist.angular.y], [odom.twist.twist.angular.z]])\n #Omega = np.dot(R.T, Omega) # this is needed because \"vicon\" package from Upenn publishes spatial angular velocities\n\n \n \n t = float(self.odom_time - self.traj_start_time)\n\n if t <= self.traj_end_time[self.number_of_segments]: \n index = bisect.bisect(self.traj_end_time, t)-1\n else: \n index = bisect.bisect(self.traj_end_time, t)-2\n\n\n if index == -1: \n pass\n else: \n\n xx = np.poly1d(self.p1[index])\n vx = np.polyder(xx, 1); aax = np.polyder(xx, 2)\n jx = np.polyder(xx, 3); sx = np.polyder(xx, 4)\n\n yy = np.poly1d(self.p2[index])\n vy = np.polyder(yy, 1); aay = np.polyder(yy, 2)\n jy = np.polyder(yy, 3); sy = np.polyder(yy, 4)\n \n zz = np.poly1d(self.p3[index])\n vz = np.polyder(zz, 1); aaz = np.polyder(zz, 2)\n jz = np.polyder(zz, 3); sz = np.polyder(zz, 4)\n\n if t <= self.traj_end_time[self.number_of_segments]:\n if self.mode == 'hover' or self.mode == 'land': \n self.pdes = ar([[xx(t)], [yy(t)], [zz(t)]])\n else: \n self.pdes = ar([[xx(t)], [yy(t)], [zz(t)]])\n # needed to transform the trajectory back to cog\n #self.pdes = (np.dot(R.T, self.Rcg_vibase[0:3, 3:4]) + np.dot(self.Rcg_vibase[0:3, 0:3], self.pdes))\n self.vdes = ar([[vx(t)], [vy(t)], [vz(t)]])\n self.ades = ar([[aax(t)], [aay(t)], [aaz(t)]])\n self.jdes = ar([[jx(t)], [jy(t)], [jz(t)]])\n\n self.ddes = self.ddir#self.vdes/np.linalg.norm(self.vdes)#self.ddir\n else: \n if self.mode == 'hover' or self.mode == 'land':\n self.pdes = ar([[xx(self.traj_end_time[-1])], [yy(self.traj_end_time[-1])], [zz(self.traj_end_time[-1])]])\n else: \n self.pdes = ar([[xx(self.traj_end_time[-1])], [yy(self.traj_end_time[-1])], [zz(self.traj_end_time[-1])]])\n # needed to transform the trajectory back to cog\n #self.pdes = (np.dot(R.T,self.Rcg_vibase[0:3, 3:4]) + np.dot(self.Rcg_vibase[0:3, 0:3], self.pdes))\n \n self.vdes = ar([[vx(self.traj_end_time[-1])], [vy(self.traj_end_time[-1])], [vz(self.traj_end_time[-1])]])\n self.ades = ar([[aax(self.traj_end_time[-1])], [aay(self.traj_end_time[-1])], [aaz(self.traj_end_time[-1])]])\n self.jdes = ar([[jx(self.traj_end_time[-1])], [jy(self.traj_end_time[-1])], [jz(self.traj_end_time[-1])]])\n self.ddes = self.ddir#self.vdes/np.linalg.norm(self.vdes)#self.ddir\n\n if self.mode == 'hover':\n self.ddes = self.ddir\n\n current_time = time.time()-self.start_time\n if self.counter == 0: \n self.initial_odom_time = current_time\n\n\n Xd = self.pdes; Vd = self.vdes; ad = self.ades; b1d = self.ddes\n b1d_dot = ar([[0],[0],[0]]); ad_dot = self.jdes\n if self.controller == 1: # velocity controller\n ex = ar([[0],[0],[0]])\n else: \n ex = X-Xd\n \n ev = V-Vd\n \n #_b3c = -(mult(self.kx, ex) + mult(self.kv, ev)+ mult(self.ki, ei))/self.m + self.g*self.e[:,2][np.newaxis].T + ad # desired direction \n _b3c = -(mult(self.kx, ex) + mult(self.kv, ev))/self.m + self.g*self.e[:,2][np.newaxis].T + ad # desired direction\n b3c = ar(_b3c/np.linalg.norm(_b3c)) # get a normalized vector\n b2c = tp(np.cross(b3c.T,b1d.T)/np.linalg.norm(np.cross(b3c.T, b1d.T))) # vector b2d \n b1c = tp(np.cross(b2c.T, b3c.T))\n Rc = np.column_stack((b1c, b2c, b3c)) # desired rotation matrix`\n #qc = self.rotmat2quat(Rc)\n #qc = rowan.to_matrix(Rc)\n qc = self.rotmat2quat(Rc)\n \n #print 'qc', qc\n \n omega_des = q.inverse*qc\n \n \n self.f = self.m*np.dot(_b3c.T, tp(R[:,2][np.newaxis]))/self.norm_thrust_constant # normalized thrust \n \n #self.M = -(mult(self.kR, eR) + mult(self.kOmega, eOmega)) + tp(np.cross(Omega.T, tp(np.dot(self.J,Omega))))\n msg = AttitudeTarget()\n msg.header.stamp = odom.header.stamp\n msg.type_mask = 128 \n \n msg.body_rate.x = self.factor*np.sign(omega_des[0])*omega_des[1]\n msg.body_rate.y = self.factor*np.sign(omega_des[0])*omega_des[2]\n msg.body_rate.z = self.factor*np.sign(omega_des[0])*omega_des[3]\n \n msg.thrust = min(1.0, self.f[0][0])\n print 'thrust:', self.f*self.norm_thrust_constant, 'thrust_factor:', min(1.0, self.f[0][0])\n print 'omega_des',msg.body_rate.x, msg.body_rate.y, msg.body_rate.z\n print 'zheight', Xd[2][0]\n\n f1 = open(self.logpath +'trajectory_subscribed.txt', 'a')\n f1.write(\"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n\" % (index, self.traj_end_time[-1], t, Xd[0][0], Xd[1][0], Xd[2][0], Vd[0][0], Vd[1][0], Vd[2][0], ad[0][0], ad[1][0], ad[2][0]))\n f2 = open(self.logpath +'trajectory_followed.txt', 'a')\n f2.write(\"%s, %s, %s, %s, %s, %s\\n\" % (X[0][0], X[1][0], X[2][0], V[0][0], V[1][0], V[2][0]))\n\t f3 = open(self.logpath + 'commands.txt', 'a')\n\t f3.write(\"%s, %s, %s, %s\\n\" % (msg.thrust, msg.body_rate.x, msg.body_rate.y, msg.body_rate.z)) \n\n \n self.counter = self.counter+1\n self.pub.publish(msg)", "title": "" }, { "docid": "7357a42354af40f53903b3fca48d1b9b", "score": "0.5238408", "text": "def snap_to_45(angle):\n return int(round(angle / 45) * 45)", "title": "" }, { "docid": "e6e2583515f6e64fb6af432436c9a4d4", "score": "0.5237847", "text": "def update_position(self,delta_time):\r\n vel,angle_change=self.get_relative_change(delta_time)\r\n\r\n self.inner_angle+=angle_change\r\n self.inner_angle=self.inner_angle%360\r\n\r\n self.x,self.y=mt.cartesian(vel,self.drift_angle,(self.x,self.y))\r\n self.coordinates=self.generate_coordinates(self.radius,self.side_num)\r\n self.inner_coordinates=self.generate_coordinates(self.radius*(1-self.border),self.side_num)", "title": "" }, { "docid": "81637326b5ba4f50452e853c23653b59", "score": "0.5236754", "text": "def transform_to_azimuth(self,\r\n sensor_polar_phase,\r\n input_image,\r\n image_origin_x_pixels,\r\n image_origin_y_pixels,\r\n image_width_pixels,\r\n image_height_pixels,\r\n input_image_bit_depth,\r\n output_max_value):\r\n # type: (POLAR_PHASE, np.array, int, int, int, int, int, int) -> np.array\r\n try:\r\n output_buffer = np.zeros(shape=(image_width_pixels * image_height_pixels,), dtype=c_ushort)\r\n output_buffer_pointer = output_buffer.ctypes.data_as(POINTER(c_ushort))\r\n input_buffer_pointer = input_image.ctypes.data_as(POINTER(c_ushort))\r\n c_image_width = c_int(image_width_pixels)\r\n c_image_height = c_int(image_height_pixels)\r\n c_image_origin_x_pixels = c_int(image_origin_x_pixels)\r\n c_image_origin_y_pixels = c_int(image_origin_y_pixels)\r\n c_input_image_bit_depth = c_int(input_image_bit_depth)\r\n c_output_max_value = c_ushort(output_max_value)\r\n c_sensor_polar_phase = c_int(sensor_polar_phase)\r\n error_code = self._sdk.tl_polarization_processor_transform(self._polarization_processor_handle,\r\n c_sensor_polar_phase,\r\n input_buffer_pointer,\r\n c_image_origin_x_pixels,\r\n c_image_origin_y_pixels,\r\n c_image_width,\r\n c_image_height,\r\n c_input_image_bit_depth,\r\n c_output_max_value,\r\n None, # normalized_stokes_vector_coefficients_x2\r\n None, # total_optical_power\r\n None, # horizontal_vertical_linear_polarization\r\n None, # diagonal_linear_polarization\r\n output_buffer_pointer, # azimuth\r\n None) # DoLP\r\n if error_code != 0:\r\n raise PolarizationError(_create_c_failure_message(self._sdk, \"tl_polarization_processor_transform\",\r\n error_code))\r\n return output_buffer\r\n except Exception as exception:\r\n _logger.error(\"Could not transform to azimuth; \" + str(exception))\r\n raise exception", "title": "" }, { "docid": "f96dcfce2d516a99df086ee9e752c3bf", "score": "0.5236734", "text": "def tomo_angular_step(frame_width):\n return np.arctan(2.0 / frame_width.magnitude) * q.rad", "title": "" }, { "docid": "4d1b1bec3e829be14643f2e3e72e8ab6", "score": "0.5232493", "text": "def normalize_angles(self) -> None:\n self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0", "title": "" }, { "docid": "2252502a912fdc3f8cc4575136c654fe", "score": "0.52291954", "text": "def __computeLookUp__(self, resolution):\n bin_length = (self.max_range - self.min_range)/(self.num_bins + 0.0)\n beamwidth = (self.fov)/(self.num_beams+0.0)\n \n y0 = self.max_range*np.sin(self.fov/2.0) \n width = np.around(2*y0/resolution)\n yres = 2*y0/width\n self.width = int(width)\n \n x0 = self.min_range*np.cos(self.fov/2.0)\n x1 = self.max_range\n height = np.around((x1-x0)/resolution)\n xres = (x1-x0)/height\n self.height = int(height)\n \n print 'Resolution'\n print 'Desired:',resolution,', x:',xres,', y:',yres\n\n row_cart = np.arange(0,height)\n row_cart.shape = (height,1)\n row_cart = np.tile(row_cart, (1,width))\n x = x0 + xres*row_cart\n \n col_cart = np.arange(0,width)\n col_cart.shape = (1,width)\n col_cart = np.tile(col_cart, (height,1))\n y = -y0 + yres*col_cart \n \n (mag,angle) = cv2.cartToPolar(x.flatten(),y.flatten())\n\n angle[angle>np.pi]-=2*np.pi # convert from cv's [0,2pi] range to [-pi,pi]\n\n mag.shape = (height, width)\n angle.shape = (height, width)\n \n row_polar = mag # bin\n col_polar = angle # beam\n\n # distortion removal formula\n\n row_polar-=self.min_range\n row_polar = np.around(row_polar/bin_length)\n\n #col_polar+=self.fov/2.0\n \n # default conversion\n # col_polar = np.around(col_polar/beamwidth)\n # didson-specific converstion (compensates for angular distortion)\n a = np.array([0.0030, -0.0055, 2.6829, 48.04]) # angular distortion model coefficients\n col_polar = np.rad2deg(col_polar)\n col_polar = np.round(a[0]*np.power(col_polar,3) + a[1]*np.power(col_polar,2) + a[2]*col_polar+(1+a[3])+np.ones_like(col_polar)); \n \n col_polar[row_polar<0]=0\n row_polar[col_polar<0]=0\n col_polar[col_polar<0]=0\n row_polar[row_polar<0]=0\n\n col_polar[row_polar>=self.num_bins] = 0\n row_polar[col_polar>=self.num_beams] = 0\n row_polar[row_polar>=self.num_bins] = 0\n col_polar[col_polar>=self.num_beams] = 0\n \n self.row_polar = row_polar.astype(int)\n self.col_polar = col_polar.astype(int)\n self.row_cart = row_cart.astype(int)\n self.col_cart = col_cart.astype(int)", "title": "" }, { "docid": "70ac5690665951327c1d03b71db01831", "score": "0.52277315", "text": "def getTargetAngle(self, bs_patch, image, bbox, prev_angle):\n bbox = np.array(bbox).astype(int)\n max_loc = bbox[2:] // 2\n\n def dist(pt1, pt2):\n return np.linalg.norm(np.array(pt1) - np.array(pt2))\n\n def getMeanValueFromArea(patch):\n tmp_patch = patch.astype(float)\n tmp_patch = tmp_patch[..., 2] / np.sum(tmp_patch, axis=2)\n s_patch = (tmp_patch * 255).astype(np.uint8)\n ret2, s_patch = cv2.threshold(s_patch, 0, 255, cv2.THRESH_TOZERO+cv2.THRESH_OTSU)\n if s_patch is None: # TODO: check why is None\n return 0\n s_area = s_patch[s_patch > 0]\n if s_area.size:\n return np.mean(s_area)\n else:\n return 0\n\n def findColorLoc(patch, bbox):\n bbox = np.array([np.min(bbox[:, 0]), np.min(bbox[:, 1]), np.max(bbox[:, 0]), np.max(bbox[:, 1])])\n bbox[2:] = bbox[2:] - bbox[:2]\n bbox[0] = max(0, bbox[0])\n bbox[1] = max(0, bbox[1])\n bbox = bbox.astype(int)\n if bbox[2] < bbox[3]:\n area1 = patch[bbox[1]:bbox[1]+bbox[3], bbox[0]:max_loc[0]] # left: 180\n area1 = getMeanValueFromArea(area1)\n area2 = patch[bbox[1]:bbox[1]+bbox[3], max_loc[0]:bbox[0]+bbox[2]] # right: 0\n area2 = getMeanValueFromArea(area2)\n angles = [180, 0]\n else:\n area1 = patch[bbox[1]:max_loc[1], bbox[0]:bbox[0]+bbox[2]] # upper: 270\n area1 = getMeanValueFromArea(area1)\n area2 = patch[max_loc[1]:bbox[1]+bbox[3], bbox[0]:bbox[0]+bbox[2]] # bottom: 90\n area2 = getMeanValueFromArea(area2)\n angles = [270, 90]\n return angles[np.argmax([area1, area2])]\n\n def refineAngle(patch, bbox, bbox_rect, max_loc):\n max_loc = np.array(max_loc)\n patch_h, patch_w = patch.shape[:2]\n\n _, (w, h), angle = bbox_rect\n box = cv2.boxPoints(bbox_rect)\n box = np.int0(box)\n patch_draw = patch.copy()\n cv2.drawContours(patch_draw, [box], 0, (0,0,255), 2)\n\n old_c = np.array([np.mean(bbox[:, 0]), np.mean(bbox[:, 1])])\n tmp = np.ones([4, 3], float)\n bbox = bbox + (max_loc - old_c)\n tmp[:, :2] = bbox\n \n M = cv2.getRotationMatrix2D(tuple(max_loc), angle0, 1)\n rotated_patch = cv2.warpAffine(patch, M, (patch_w, patch_h))\n bbox = M.dot(tmp.T).T\n selected_angle = findColorLoc(rotated_patch, bbox)\n\n return self.clip_angle(selected_angle + angle0)\n\n def final_process(angle0, angle1):\n if self.angles_distance(angle0, angle1) > THRESH_ANGLE_DISTANCE:\n if prev_angle is not None: \n if self.angles_distance(angle0, prev_angle) > \\\n self.angles_distance(angle1, prev_angle):\n return angle1\n else:\n return angle0\n else:\n return angle1\n elif prev_angle is None:\n return angle1\n else:\n return self.clip_angle(prev_angle + self.angles_difference(angle1, prev_angle) * GAIN)\n\n def pred_angle(patch):\n def preprocess(image):\n image = image / 255\n image -= image.mean()\n return image\n prob = ANGLE_MODEL.predict(np.array([preprocess(patch)]))[0]\n return MAJOR_ANGLES[np.argmax(prob)]\n\n patch_original = cropImage(image, bbox)\n if patch_original is None:\n return None, None\n\n tight_bbox, tight_rect = self.findTightBboxFromBS(bs_patch)\n if tight_bbox is None:\n return None\n\n\n self.cnn_pred = pred_angle(patch_original.copy())\n\n d1 = dist(tight_bbox[0], tight_bbox[1])\n d2 = dist(tight_bbox[1], tight_bbox[2])\n if d1 > d2:\n if abs(tight_bbox[1][0] - tight_bbox[2][0]) < 1e-10:\n slide = np.inf if tight_bbox[1][1] - tight_bbox[2][1] > 0 else -np.inf\n origin_angle_radian = np.arctan(slide)\n else:\n origin_angle_radian = np.arctan(float(tight_bbox[1][1] - tight_bbox[2][1]) / (tight_bbox[1][0] - tight_bbox[2][0]))\n origin_angle = origin_angle_radian / np.pi * 180\n else:\n if abs(tight_bbox[0][0] - tight_bbox[1][0]) < 1e-10:\n slide = np.inf if tight_bbox[0][1] - tight_bbox[1][1] > 0 else -np.inf\n origin_angle_radian = np.arctan(slide)\n else:\n origin_angle_radian = np.arctan(float(tight_bbox[0][1] - tight_bbox[1][1]) / (tight_bbox[0][0] - tight_bbox[1][0]))\n origin_angle = origin_angle_radian / np.pi * 180\n origin_angle = self.clip_angle(origin_angle)\n if self.angles_distance(self.cnn_pred, origin_angle) > THRESH_ANGLE_DISTANCE:\n angle0 = self.clip_angle(origin_angle + 180)\n else:\n angle0 = origin_angle\n\n angle1 = self.clip_angle(refineAngle(patch_original.copy(), tight_bbox, tight_rect, max_loc))\n return final_process(angle0, angle1)", "title": "" }, { "docid": "55574641c5bda88b8934c52cb6089bab", "score": "0.5227564", "text": "def position_angle(self):\n a, b = self._sky_paxes()\n a = list(a)\n a.pop(self.vaxis)\n return np.degrees(np.arctan2(a[0], a[1])) * u.degree", "title": "" }, { "docid": "252c28255548d695807eafd8ff0e5413", "score": "0.5222523", "text": "def _line_dir_(orig, dest):\r\n orig = np.atleast_2d(orig)\r\n dest = np.atleast_2d(dest)\r\n dxy = dest - orig\r\n ang = np.degrees(np.arctan2(dxy[:, 1], dxy[:, 0]))\r\n return ang", "title": "" }, { "docid": "8f143011a3c19fd0a3e75673ef3395ec", "score": "0.5219152", "text": "def _reduce_single_angle(self, scale=1):\n n_spectra = self.reflected_beam.n_spectra\n n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)\n n_ypixels = np.size(self.reflected_beam.m_topandtail, 2)\n\n # calculate omega and two_theta depending on the mode.\n mode = self.reflected_beam.mode\n\n # we'll need the wavelengths to calculate gravity effects.\n wavelengths = self.reflected_beam.m_lambda\n m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels))\n\n detector_z_difference = (\n self.reflected_beam.detector_z - self.direct_beam.detector_z\n )\n\n beampos_z_difference = (\n self.reflected_beam.m_beampos - self.direct_beam.m_beampos\n )\n\n Y_PIXEL_SPACING = self.reflected_beam.cat.qz_pixel_size[0]\n\n total_z_deflection = (\n detector_z_difference + beampos_z_difference * Y_PIXEL_SPACING\n )\n\n if mode in [\"FOC\", \"POL\", \"POLANAL\", \"MT\"]:\n # omega_nom.shape = (N, )\n omega_nom = np.degrees(\n np.arctan(total_z_deflection / self.reflected_beam.detector_y)\n / 2.0\n )\n\n \"\"\"\n Wavelength specific angle of incidence correction\n This involves:\n 1) working out the trajectory of the neutrons through the\n collimation system.\n 2) where those neutrons intersect the sample.\n 3) working out the elevation of the neutrons when they hit the\n sample.\n 4) correcting the angle of incidence.\n \"\"\"\n speeds = general.wavelength_velocity(wavelengths)\n collimation_distance = self.reflected_beam.cat.collimation_distance\n s2_sample_distance = (\n self.reflected_beam.cat.sample_distance\n - self.reflected_beam.cat.slit2_distance\n )\n\n # work out the trajectories of the neutrons for them to pass\n # through the collimation system.\n trajectories = find_trajectory(\n collimation_distance / 1000.0, 0, speeds\n )\n\n # work out where the beam hits the sample\n res = parabola_line_intersection_point(\n s2_sample_distance / 1000,\n 0,\n trajectories,\n speeds,\n omega_nom[:, np.newaxis],\n )\n intersect_x, intersect_y, x_prime, elevation = res\n\n # correct the angle of incidence with a wavelength dependent\n # elevation.\n omega_corrected = omega_nom[:, np.newaxis] - elevation\n\n m_twotheta += np.arange(n_ypixels * 1.0)[np.newaxis, np.newaxis, :]\n m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]\n m_twotheta *= Y_PIXEL_SPACING\n m_twotheta += detector_z_difference\n m_twotheta /= self.reflected_beam.detector_y[\n :, np.newaxis, np.newaxis\n ]\n m_twotheta = np.arctan(m_twotheta)\n m_twotheta = np.degrees(m_twotheta)\n\n # you may be reflecting upside down, reverse the sign.\n upside_down = np.sign(omega_corrected[:, 0])\n m_twotheta *= upside_down[:, np.newaxis, np.newaxis]\n omega_corrected *= upside_down[:, np.newaxis]\n\n elif mode in [\"SB\", \"DB\"]:\n # the angle of incidence is half the two theta of the reflected\n # beam\n omega = (\n np.arctan(total_z_deflection / self.reflected_beam.detector_y)\n / 2.0\n )\n\n # work out two theta for each of the detector pixels\n m_twotheta += np.arange(n_ypixels * 1.0)[np.newaxis, np.newaxis, :]\n m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]\n m_twotheta += detector_z_difference\n m_twotheta -= self.reflected_beam.detector_y[\n :, np.newaxis, np.newaxis\n ] * np.tan(omega[:, np.newaxis, np.newaxis])\n\n m_twotheta /= self.reflected_beam.detector_y[\n :, np.newaxis, np.newaxis\n ]\n m_twotheta = np.arctan(m_twotheta)\n m_twotheta += omega[:, np.newaxis, np.newaxis]\n\n # still in radians at this point\n # add an extra dimension, because omega_corrected needs to be the\n # angle of incidence for each wavelength. I.e. should be\n # broadcastable to (N, T)\n omega_corrected = np.degrees(omega)[:, np.newaxis]\n m_twotheta = np.degrees(m_twotheta)\n\n self.omega_corrected = omega_corrected\n self.m_twotheta = m_twotheta\n self.n_spectra = n_spectra", "title": "" }, { "docid": "a4aef479242ff937c8d0b6b3b7b27e5e", "score": "0.5218962", "text": "def ang2point(a,b):\n\n\txa=np.cos(rad*a[0])*np.cos(rad*a[1]) \n\tya=np.sin(rad*a[0])*np.cos(rad*a[1]) \n\tza=np.sin(rad*a[1]) \n\ta=[xa,ya,za]\n\t\n\txb=np.cos(rad*b[0])*np.cos(rad*b[1]) \n\tyb=np.sin(rad*b[0])*np.cos(rad*b[1]) \n\tzb=np.sin(rad*b[1])\n\tb=[xb,yb,zb]\n\tc=deg*np.arccos(np.dot(a,b))\n\tc=round(c,2)\n\treturn c", "title": "" }, { "docid": "32cd25c096e7dfed73cbb7957de53012", "score": "0.52189445", "text": "def from_my_angle(angle):\n return deg_to_rad(-angle + 180)", "title": "" }, { "docid": "9d2ba9e64bea3a90edd23e8645a5b02c", "score": "0.52087736", "text": "def update_position_odometry_based(self):\n\n # Shape feedback of sensors (distance measure should not be linear)\n # Closer to wall = exponentially higher sensor value\n # Far away from wall = 0 + 1 (we add 1 to prevent 0 multiplication when there is no wall in sight)\n def shape(x):\n exp = 2\n return ((Sensor.MAX_SENSOR_VALUE - x) ** exp) + 1\n\n sensor_values = [shape(sensor.value) for sensor in self.sensors]\n\n # Propagate ANN\n if self.use_nn:\n outputs = self.nn.propagate(sensor_values) * 5\n self.kinematical_parameters = outputs\n\n # Update position\n self.check_if_rotates()\n if self.is_rotating:\n omega = self.calculate_rate_of_rotation()\n ICC_x, ICC_y = self.get_ICC_coordinates()\n\n # Calculation of the position and angle with the forward kinematics\n rotation_matrix = np.array([[math.cos(omega), -math.sin(omega), 0],\n [math.sin(omega), math.cos(omega), 0],\n [0, 0, 1]])\n coordinate_vector = np.array([self.x - ICC_x, self.y - ICC_y, 0])\n rotation_origin_vector = np.array([ICC_x, ICC_y, omega])\n\n result = np.dot(rotation_matrix, coordinate_vector) + rotation_origin_vector\n self.x = result[0]\n self.y = result[1]\n self.theta -= result[2]\n # print(R)\n self.theta = check_periodicity(self.theta)\n else:\n self.x += self.kinematical_parameters[0] * math.cos(self.theta)\n self.y += self.kinematical_parameters[0] * math.sin(self.theta)", "title": "" }, { "docid": "6adfe41d673240f9319901114db71f6d", "score": "0.5204588", "text": "def get_pos(length, x, y, angle): \n degrees = angle\n to_rad = radians(degrees)\n x_pos = round(cos(to_rad)*length+x)\n y_pos = round(sin(to_rad)*length+y)\n\n return (x_pos,y_pos)", "title": "" }, { "docid": "ea756f1df95ad77ab1be085dc9dc8f59", "score": "0.5201505", "text": "def progress_angle(start_position_lla, center_position_lla, current_position_lla, is_clockwise=True):\n # First we create two vectors. The first vector points from the center of\n # the circle to the drone's start position called p_Center2Start_N.\n origin = NedFrame(center_position_lla)\n p_Center2Start_N = origin.find_ned(start_position_lla)\n\n # And the second vector points from the center of the circle to the drone's\n # current position, call it p_Center2Drone_N.\n p_Center2Drone_N = origin.find_ned(current_position_lla)\n\n # To get the angle between these vectors we can rearrange this formula that\n # uses the dot product:\n # theta = acos ( (x dot y) / (|x| * |y|) )\n numerator = np.dot(p_Center2Start_N, p_Center2Drone_N)\n denominator = magnitude3d(p_Center2Start_N) * magnitude3d(p_Center2Drone_N)\n theta = math.acos(numerator / denominator)\n\n # theta is an angle from 0 to pi radians\n # we want an angle from 0 to 2*pi\n # we need to find out if the drone is more than 90 degrees from the first\n # quarter of the circle.\n # We need to find the waypoint that's 25% around the circle\n # this point can be found using the cross_product(down, start) for \n # clockwise and cross_product(up, start) for counter-clockwise\n # let v = down if clockwise or up if not\n v = [0.0, 0.0, 1.0]\n if not is_clockwise:\n v = v * -1.0\n \n waypoint_25_percent = np.cross(v, p_Center2Start_N)\n # normalize\n waypoint_25_percent = waypoint_25_percent * (1.0 / magnitude3d(waypoint_25_percent))\n # scale to match the radius\n waypoint_25_percent_N = waypoint_25_percent * magnitude3d(p_Center2Start_N)\n\n # now we find the angle between our current position and the waypoint at 25%\n numerator = np.dot(p_Center2Drone_N, waypoint_25_percent_N)\n denominator = magnitude3d(p_Center2Drone_N) * magnitude3d(waypoint_25_percent_N)\n a = math.acos(numerator / denominator)\n\n # if a is bigger than pi / 2 then we are more than 50% around the circle\n if a > math.pi / 2.0:\n theta = 2*math.pi - theta\n return math.degrees(theta)", "title": "" }, { "docid": "11775ab30e276e00db59145b07d3729a", "score": "0.5196597", "text": "def position_angle(self):\n a, b = self._sky_paxes()\n return np.degrees(np.arctan2(a[0], a[1])) * u.degree", "title": "" }, { "docid": "1644b666f0b6c61f545ce1ec0351ef51", "score": "0.51920354", "text": "def cal_angle_data(self):\n angle_space = setting.environment_key['BORE_DISTORTION_ANGLE']\n angle_list = []\n angle = 0\n while True:\n if angle > 360:\n break\n angle_list.append(angle * math.pi / 180)\n angle += angle_space\n self.angle_list = angle_list\n for i in range(len(self.center)):\n temp = []\n for angle in angle_list:\n sum_delta_r = 0\n for j in range(self.fourier_order + 1):\n sum_delta_r += self.fourier_result[i][j][0] * math.cos(j * angle - self.fourier_result[i][j][1])\n temp.append(sum_delta_r / 2000)\n self.angle_data.append(temp)", "title": "" }, { "docid": "a4952bf2c1c72fe280ecf58741f3d3ce", "score": "0.5190048", "text": "def accelerometer(orientation):\n acceleration = SO3.multiply(SO3.inverse(orientation), Gravity)\n return acceleration", "title": "" }, { "docid": "6ccaffc5b576fd5b1c26bfe32ea2c28b", "score": "0.51859736", "text": "def _reduce_single_angle(self, scale=1):\n n_spectra = self.reflected_beam.n_spectra\n n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)\n n_xpixels = np.size(self.reflected_beam.m_topandtail, 2)\n\n m_twotheta = np.zeros((n_spectra, n_tpixels, n_xpixels))\n\n detrot_difference = (\n self.reflected_beam.detector_z - self.direct_beam.detector_z\n )\n\n # difference in pixels between reflected position and direct beam\n # at the two different detrots.\n QZ_PIXEL_SPACING = self.reflected_beam.cat.qz_pixel_size[0]\n dy = self.reflected_beam.detector_y\n\n # convert that pixel difference to angle (in small angle approximation)\n # higher `som` leads to lower m_beampos. i.e. higher two theta\n # is at lower pixel values\n beampos_2theta_diff = -(\n self.reflected_beam.m_beampos - self.direct_beam.m_beampos\n )\n beampos_2theta_diff *= QZ_PIXEL_SPACING / dy[0]\n beampos_2theta_diff = np.degrees(beampos_2theta_diff)\n\n total_2theta_deflection = detrot_difference + beampos_2theta_diff\n\n # omega_nom.shape = (N, )\n omega_nom = total_2theta_deflection / 2.0\n omega_corrected = omega_nom[:, np.newaxis]\n\n m_twotheta += np.arange(n_xpixels * 1.0)[np.newaxis, np.newaxis, :]\n m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]\n # minus sign in following line because higher two theta is at lower\n # pixel values\n m_twotheta *= -QZ_PIXEL_SPACING / dy[:, np.newaxis, np.newaxis]\n m_twotheta = np.degrees(m_twotheta)\n m_twotheta += detrot_difference\n\n # you may be reflecting upside down, reverse the sign.\n upside_down = np.sign(omega_corrected[:, 0])\n m_twotheta *= upside_down[:, np.newaxis, np.newaxis]\n omega_corrected *= upside_down[:, np.newaxis]\n\n self.omega_corrected = omega_corrected\n self.m_twotheta = m_twotheta\n self.n_spectra = n_spectra", "title": "" }, { "docid": "be0c8bf9188174160bacc41bea000eae", "score": "0.5185061", "text": "def get_polarization_psi(beam):\n# return 0.5 * np.arctan2(2.*beam.Jsp.real, beam.Jss-beam.Jpp) * 180 / np.pi\n return 0.5 * np.arctan2(2.*beam.Jsp.real, beam.Jss-beam.Jpp)", "title": "" }, { "docid": "6479ef903cfff8dd3b2b63e13fc33f2c", "score": "0.5182899", "text": "def bian_calibration_o32(metal_det):\n return (-1 / 0.59) * (metal_det - 8.54)", "title": "" }, { "docid": "fc8a335564a31b4f8bc61a9ec95e9bbf", "score": "0.51813895", "text": "def eckartRotate(self,pos,justO=False,cart=False,hydro=False,yz=False): # pos coordinates = walkerCoords numwalkersxnumAtomsx3\n nMolecules=pos.shape[0]\n # allEckVecs = np.zeros((nMolecules,3, 3))\n if self.name in ProtonatedWaterTrimer:\n self.refPos = self.pullTrimerRefPos(yz)\n else:\n if self.isotope == 'notDeuterated':\n self.refPos = self.pullTetramerRefPos(yz)\n else:\n self.refPos = self.pullTetramerRefPos(True)\n if len(pos.shape)<3:\n pos=np.array([pos])\n #Center of Mass\n print 'getting mass'\n mass=self.get_mass()\n print 'got mass, here it is', mass\n #com=np.dot(mass,pos)/np.sum(mass)\n\n if self.name in ProtonatedWaterTetramer:\n if justO: #the OOO plane\n self.refPos=self.refPos[:3]\n refCOM = np.dot(mass[:3], self.refPos) / np.sum(mass[:3]) # same as overal COM\n self.refPos-=refCOM\n com = np.dot(mass[:3],pos[:,:3])/np.sum(mass[:3])\n mass = mass[:3]\n pos = pos[:,:3,:]\n elif cart: #include central oxygen\n\n self.refPos = self.refPos[:4]\n com = np.dot(mass[:4], pos[:, :4]) / np.sum(mass[:4])\n refCOM = np.dot(mass[:4], self.refPos) / np.sum(mass[:4]) #same as overal COM\n self.refPos-=refCOM\n mass = mass[:4]\n pos = pos[:, :4, :]\n elif hydro:\n self.refPos = self.refPos[[4-1,13-1,11-1,12-1]]\n #rotate reference so that Z axis is along OOOO Plane\n\n com = np.dot(mass[[4-1,13-1,11-1,12-1]], pos[:, [4-1,13-1,11-1,12-1]]) / np.sum(mass[[4-1,13-1,11-1,12-1]])\n refCOM = np.dot(mass[[4-1,13-1,11-1,12-1]], self.refPos) / np.sum(mass[[4-1,13-1,11-1,12-1]]) # same as overal COM\n self.refPos -= refCOM\n mass = mass[[4-1,13-1,11-1,12-1]]\n pos = pos[:, [4-1,13-1,11-1,12-1],:]\n else:\n com = np.dot(mass, pos) / np.sum(mass)\n elif self.name in ProtonatedWaterTrimer:\n if justO or cart: # the OOO plane\n self.refPos = self.refPos[:3]\n refCOM = np.dot(mass[:3], self.refPos) / np.sum(mass[:3]) # same as overal COM\n self.refPos -= refCOM\n com = np.dot(mass[:3], pos[:, :3]) / np.sum(mass[:3])\n mass = mass[:3]\n pos = pos[:, :3, :]\n elif hydro:\n self.refPos = self.refPos[[3 - 1, 9-1,8-1,10-1]]\n # rotate reference so that Z axis is along OOOO Plane\n com = np.dot(mass[[3-1, 9-1,8-1,10-1]], pos[:, [3-1, 9-1,8-1,10-1]]) / np.sum(\n mass[[3-1, 9-1,8-1,10-1]])\n refCOM = np.dot(mass[[3-1, 9-1,8-1,10-1]], self.refPos) / np.sum(\n mass[[3-1, 9-1,8-1,10-1]]) # same as overal COM\n self.refPos -= refCOM\n mass = mass[[3-1, 9-1,8-1,10-1]]\n pos = pos[:, [3-1, 9-1,8-1,10-1], :]\n else:\n com = np.dot(mass, pos) / np.sum(mass)\n #First Translate:\n print 'shifting molecules'\n ShiftedMolecules=pos-com[:,np.newaxis,:]\n\n #Equation 3.1 in Eckart vectors, Eckart frames, and polyatomic molecules - James D. Louck and Harold W. Galbraith\n start = time.time()\n print 'starting mathy math'\n # myFF = np.zeros((len(ShiftedMolecules),3,3))\n # myF = np.zeros((len(ShiftedMolecules),3,3))\n #st=time.time()\n asdf = np.sum(ShiftedMolecules[:,:,:,np.newaxis]*self.refPos[np.newaxis,:,np.newaxis,:]*mass[np.newaxis,:,np.newaxis,np.newaxis],axis=1)\n\n myF = np.transpose(asdf,(0,2,1))\n myFF = np.matmul(myF,asdf)\n #If just planar, then we need to do this\n if justO or cart or hydro:\n if not yz:\n #myFF[:,-1,-1]=1.0\n myFF[:,-1]=np.cross(myFF[:,0],myFF[:,1])\n if yz:\n #myFF[:, 0, 0] = 1.0\n myFF[:,0]=np.cross(myFF[:,1],myFF[:,2])\n # test = np.copy(myFF)/1000.\n bigEvals,bigEvecs=la.eigh(myFF)\n # evatest, evetest= la.eigh(test)\n bigEvecsT=np.transpose(bigEvecs,(0,2,1))\n if np.all(np.around(bigEvals[:,0])==0.0) or np.all(np.around(bigEvals[:,1])==0.0) or np.all(np.around(bigEvals[:,2])==0.0):\n print 'DANGER: 0 EIGENVALUE, KILLING'\n stop\n # if np.sum(np.around(bigEvals[:,0])==0.0)+np.sum(np.around(bigEvals[:,1])==0.0)+np.sum(np.around(bigEvals[:,2])==0.0) != 0 :\n # print 'DANGER: 0 EIGENVALUE ONLY SOMEWHERE, KILLING'\n # stop\n invRootDiagF2 = 1.0 / np.sqrt(bigEvals)\n axaxxa=np.where(np.isnan(invRootDiagF2))\n if len(axaxxa[0]) > 0:\n kilkilkil\n # invRootDiagF2Test = 1.0 / np.sqrt(evatest)\n invRootF2=np.matmul(invRootDiagF2[:,np.newaxis,:]*-bigEvecs,-bigEvecsT,) #-bigEvecs\n #print myF\n eckVecs2 = np.matmul(np.transpose(myF,(0,2,1)),invRootF2)\n if not yz and (cart or hydro or justO):\n eckVecs2[:,:,-1]=np.cross(eckVecs2[:,:,0],eckVecs2[:,:,1])\n elif cart or hydro or justO:\n # tec=eckVecs2[:, 1]\n # test=np.cross(eckVecs2[:, :,1], eckVecs2[:, :,2])\n eckVecs2[:,:,0] = np.cross(eckVecs2[:,:,1], eckVecs2[:,:,2])\n print 'done'\n # plus=0\n # minus=0\n # mas = np.where(np.around(la.det(eckVecs2),10)==-1.0)\n # print 'wlks neg for mine'\n # print mas\n # if len(mas[0])!=0:\n # killList2=mas\n # #eckVecs2[mas] = np.negative(eckVecs2[mas])\n # minus = len(mas[0])\n #\n # else:\n # killList2=mas[0]\n #\n # plus=len(ShiftedMolecules)-minus\n # print 'Plus rotation: ',plus\n # print 'Inverted Rotation: ',minus\n killList2=0\n return com, eckVecs2 , killList2", "title": "" }, { "docid": "adfca883db3f47250b343fc09eeb853f", "score": "0.5178376", "text": "def rotate(self, rotation):\n\n # if mouse button pressed then check if stop angle is reached\n if self.mousePressed:\n self.rpm += 1\n\n # move needle\n self.angle = rotation\n\n self.mousePressed = False\n\n # rotate our image \n image = pygame.transform.rotozoom(self.base, np.degrees(self.angle), IMAGE_SCALE)\n\n # reset the center\n rect = image.get_rect()\n rect.center = (0, 0)\n \n return image, rect", "title": "" }, { "docid": "462fa76d2e694b91786a0c7eabf3cce8", "score": "0.51781607", "text": "def get_theta_gal(ra, dec, polang, aG=122.93200023, dG=27.12843):\n torad = np.pi/180.\n dG, aG = dG*torad, aG*torad\n dec, ra = dec*torad, ra*torad\n print(aG)\n print(dG)\n\n X = np.sin(aG - ra)\n Y = np.tan(dG)*np.cos(dec) - np.sin(dec)*np.cos(aG - ra)\n\n diff = np.arctan2(np.sin(aG - ra),\\\n np.tan(dG)*np.cos(dec) - np.sin(dec)*np.cos(aG - ra))\n\n theta_eq = polang*np.pi/180.\n #print(np.mean(polang), np.min(polang), np.max(polang))\n theta_gal = theta_eq + diff # + or - ??\n #print(theta_gal*180/np.pi)\n theta_gal[theta_gal<0.] += np.pi\n theta_gal[theta_gal>=np.pi] += -np.pi\n\n print(np.mean(diff*180/np.pi), np.min(diff)*180/np.pi, np.max(diff)*180/np.pi)\n print(np.mean(theta_gal)*180/np.pi, np.min(theta_gal)*180/np.pi, np.max(theta_gal)*180/np.pi)\n return(theta_gal, diff)#+45*np.pi/180)", "title": "" }, { "docid": "b64c29861b1c05557d5568f9b3a04e33", "score": "0.5177801", "text": "def radec2xy(telra, teldec, ra, dec):\n import numpy as np\n # Inclination is 90 degrees minus the declination in degrees\n dec = np.asarray(dec)\n inc = 90 - dec\n ra = np.asarray(ra)\n #inc = 90 - dec\n x0 = np.sin(np.radians(inc)) * np.cos(np.radians(ra))\n y0 = np.sin(np.radians(inc)) * np.sin(np.radians(ra))\n z0 = np.cos(np.radians(inc))\n coord = [x0, y0, z0]\n \n # Clockwise rotation around y axis by declination of the tile center\n decrotate = np.zeros(shape=(3,3))\n teldec_rad = np.radians(teldec)\n decrotate[0] = [np.cos(teldec_rad), 0, np.sin(teldec_rad)]\n decrotate[1] = [0, 1, 0]\n decrotate[2] = [-np.sin(teldec_rad), 0, np.cos(teldec_rad)]\n \n # Clockwise rotation around the z-axis by the right ascension of the tile center\n rarotate = np.zeros(shape=(3,3))\n telra_rad = np.radians(telra)\n rarotate[0] = [np.cos(telra_rad), np.sin(telra_rad), 0]\n rarotate[1] = [-np.sin(telra_rad), np.cos(telra_rad), 0]\n rarotate[2] = [0, 0, 1]\n \n coord1 = np.matmul(rarotate, coord)\n coord2 = np.matmul(decrotate, coord1)\n x = coord2[0]\n y = coord2[1]\n z = coord2[2]\n \n newteldec = 0\n newtelra = 0\n ra_rad = np.arctan2(y, x)\n dec_rad = (np.pi / 2) - np.arccos(z / np.sqrt((x**2) + (y**2) + (z**2)))\n radius_rad = 2 * np.arcsin(np.sqrt((np.sin((dec_rad - newteldec) / 2)**2) + ((np.cos(newteldec)) * np.cos(dec_rad) * (np.sin((ra_rad - newtelra) / 2)**2))))\n radius_deg = np.degrees(radius_rad)\n \n q_rad = np.arctan2(-z, -y)\n \n radius_mm = get_radius_mm(radius_deg)\n x_focalplane = radius_mm * np.cos(q_rad)\n y_focalplane = radius_mm * np.sin(q_rad)\n \n return x_focalplane, y_focalplane", "title": "" }, { "docid": "2002e0a7161207c050a142b8b0069fb1", "score": "0.5171642", "text": "def _atm_ref(al_d):\n if al_d > 20.0:\n zd_r = np.deg2rad(90.0 - al_d)\n else:\n zd_r = np.deg2rad(70.0)\n r0 = _air_idx()-1.0\n sh = air_sh\n R0 = (1.0 - sh)*r0 - sh*r0**2/2.0 + sh**2*r0*2.0\n R1 = r0**2/2.0 + r0**3/6.0 - sh*r0 - sh*r0**2*11.0/4.0 + sh**2*r0*5.0\n R2 = r0**3 - sh*r0**2*9.0/4.0 + sh**2*r0*3.0\n R = R0*np.tan(zd_r) + R1*(np.tan(zd_r))**3 + R2*(np.tan(zd_r))**5\n return np.rad2deg(R)", "title": "" }, { "docid": "8a998e9361e9e79c6456b3d1fcaf33a4", "score": "0.51650923", "text": "def port_angle(port):\n # I am guessing a toroidal angle coordinate system. I could be wrong by an offset and a direction.\n offset = 0 # radians\n direction = 1 # +/- 1\n import string\n\n return string.ascii_lowercase.find(port.lower()) / 16.0 * 2 * np.pi * direction + offset\n return (ord(port.lower()) - ord('a')) / 16.0 * 2 * np.pi * direction + offset", "title": "" }, { "docid": "6681ec40eb8580a9ed700381a14d6b2c", "score": "0.51578367", "text": "def _get_angle(self):\n x, y, sx, sy = self.coordinates\n angle = self.angle_of_line(np.array([x, y]),\n np.array([sx, sy]))\n # Correct for marker offset\n return (angle + self.offset_angle + 90) % 360", "title": "" }, { "docid": "67ea6e2c93a8e07a3ee092716839f48e", "score": "0.5150131", "text": "def voy(lr_angle):\n return -np.sin(np.radians(lr_angle))*9+np.cos(np.radians(lr_angle))*(12.+220.)", "title": "" }, { "docid": "e4fd257d55132d31c823ab1955fa1b95", "score": "0.51453745", "text": "def to_mercator(df, lon=\"lon\", lat=\"lat\"):\n k = 6378137\n df[\"x\"] = df[lon] * (k * np.pi/180.0)\n df[\"y\"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k\n return df", "title": "" }, { "docid": "bc48d205cfff1a847c207142d9af810a", "score": "0.5142525", "text": "def gier(self, angle):\n t = geo.rotationAroundAxis(angle, (0, 1, 0))\n self.heli.actOri = self.heli.actOri * t", "title": "" }, { "docid": "4f52b3193b6bceeb39acff742fa8dd67", "score": "0.5142522", "text": "def xy2radec(telra, teldec, x, y):\n from math import atan2, acos, radians, degrees\n \n # radial distance on the focal plane in degrees\n r_deg = get_radius_deg(x, y)\n # q signifies the angle the position makes with the +x-axis of focal plane\n q = np.degrees(np.arctan2(y, x))\n \n coord = np.zeros(shape=(3,1))\n coord[0] = 1\n \n # Clockwise rotation around the z-axis by the radial distance to a point on the focal plane in radians\n zrotate = np.zeros(shape=(3,3))\n r_rad = radians(r_deg)\n zrotate[0] = [np.cos(r_rad), np.sin(r_rad), 0]\n zrotate[1] = [-np.sin(r_rad), np.cos(r_rad), 0]\n zrotate[2] = [0, 0, 1]\n \n # Counter-clockwise rotation around the x-axis\n xrotate = np.zeros(shape=(3,3))\n q_rad = radians(q)\n xrotate[0] = [1, 0, 0]\n xrotate[1] = [0, np.cos(q_rad), -np.sin(q_rad)]\n xrotate[2] = [0, np.sin(q_rad), np.cos(q_rad)]\n \n # Counter-clockwise rotation around y axis by declination of the tile center\n decrotate = np.zeros(shape=(3,3))\n teldec_rad = radians(teldec)\n decrotate[0] = [np.cos(teldec_rad), 0, -np.sin(teldec_rad)]\n decrotate[1] = [0, 1, 0]\n decrotate[2] = [np.sin(teldec_rad), 0, np.cos(teldec_rad)]\n \n # Counter-clockwise rotation around the z-axis by the right ascension of the tile center\n rarotate = np.zeros(shape=(3,3))\n telra_rad = radians(telra)\n rarotate[0] = [np.cos(telra_rad), -np.sin(telra_rad), 0]\n rarotate[1] = [np.sin(telra_rad), np.cos(telra_rad), 0]\n rarotate[2] = [0, 0, 1]\n \n coord1 = np.matmul(zrotate, coord)\n coord2 = np.matmul(xrotate, coord1)\n coord3 = np.matmul(decrotate, coord2)\n coord4 = np.matmul(rarotate, coord3)\n \n ra_rad = atan2(coord4[1], coord4[0])\n dec_rad = (np.pi / 2) - acos(coord4[2] / np.sqrt((coord4[0]**2) + (coord4[1]**2) + (coord4[2]**2)))\n ra_deg = degrees(ra_rad)\n dec_deg = degrees(dec_rad)\n # Value can be 360, which should be 0\n ra = ra_deg % 360\n return ra, dec_deg", "title": "" }, { "docid": "2923a5af768c9c87751b236bc1edf676", "score": "0.51423436", "text": "def calculate_compass_angle(pt1, pt2):\n delta_lon = pt2.lon - pt1.lon\n y = math.sin(math.radians(delta_lon)) * math.cos(math.radians(pt2.lat))\n x = math.cos(math.radians(pt1.lat)) * math.sin(math.radians(pt2.lat)) - math.sin(\n math.radians(pt1.lat)) * math.cos(math.radians(pt2.lat)) * math.cos(math.radians(delta_lon))\n angle = math.atan2(y, x)\n angle_deg = math.degrees(angle)\n compass_angle = (angle_deg + 360) % 360\n return compass_angle", "title": "" }, { "docid": "5d739757716ffbc00eefa0681229c6c8", "score": "0.5141926", "text": "def rotatStr(vibFreq,normalModes,polarTens,axialTens):\n \n nAtoms = int(len(axialTens))\n nModes = int(len(vibFreq))\n \n rotStren = np.zeros((2,nModes))\n dipStren = np.zeros((2,nModes))\n \n rotStren[0,:] = vibFreq\n dipStren[0,:] = vibFreq\n \n for nFreq in range(nModes):\n elTensor = np.zeros(3)\n mgTensor = np.zeros(3)\n \n for AAtom in range(nAtoms):\n SnA = normalModes[nFreq,AAtom*3:(AAtom+1)*3]\n \n for i in range(3):\n duidRA = polarTens[AAtom*3:(AAtom+1)*3,i] \n \n elTensor[i] = elTensor[i] + np.dot(duidRA, SnA)\n mgTensor[i] = mgTensor[i] + np.dot(axialTens[AAtom,i,:],SnA)\n \n rotStren[1,nFreq] = np.dot(elTensor[:], mgTensor[:])\n \n if vibFreq[nFreq] > 0.0:\n dipStren[1,nFreq] = np.dot(elTensor[:], elTensor[:]) \\\n / vibFreq[nFreq]\n else :\n dipStren[1,nFreq] = 0.0\n \n # Unit conversion for 10^{-44} esu^2 cm^2\n rotStren[1,:] *= 2586.254135396221624\n # Unit conversion for 10^{-40} esu^2 cm^2\n dipStren[1,:] *= 3892569.714886886399 \n return rotStren, dipStren", "title": "" }, { "docid": "71ae8462c5510bf961ef7b450a23e3db", "score": "0.514021", "text": "def VeraCiro13_acc(x, y, z, v_halo, phi, q1, q2, q3, qz, d, r_a):\n\n x = x * units.kpc\n y = y * units.kpc\n z = z * units.kpc\n v_halo = va_halo * units.km/units.s\n d = d * units.kpc\n\n\n C1, C2, C3 = constants_VC(x, y, z, phi, q1, q2)\n r_A = (x**2 + y**2 +z**2/qz**2)**0.5\n r_T = (C1*x**2 + C2*y**2 + C3*x*y + z**2/q3**2)**0.5\n r_til = r_A*(r_a + r_T)/(r_a + r_A)\n\n dr_dx = (((r_a*x/r_A + r_A*(2*C1*x + C3*y)/(2*r_T)) +\\\n r_T*x/r_A)*(r_a + r_A) - x*(r_a+r_T)/r_A)/\\\n (r_a + r_A)**2.0\n\n dr_dy = (((r_a*y/r_A + r_A*(2*C2*y + C3*x)/(2*r_T)) +\\\n r_T*y/r_A)*(r_a + r_A)- y*(r_a+r_T)/r_A) /\\\n (r_a + r_A)**2.0\n\n dr_dz = (((r_a*z/(q_z**2*r_A) + r_A*z/(r_T*q_3**2)) +\\\n r_T*z/(r_A*q_z**2))*(r_a + r_A)\\\n - z*(r_a+r_T)/(r_A*q_z**2)) / (r_a + r_A)**2.0\n\n ax = -2*v_halo**2 * r_til / (r_til**2 + d**2) * dr_dx\n ay = -2*v_halo**2 * r_til / (r_til**2 + d**2) * dr_dy\n az = -2*v_halo**2 * r_til / (r_til**2 + d**2) * dr_dz\n\n ax = ax.to(units.kpc/units.Gyr**2)\n ay = ay.to(units.kpc/units.Gyr**2)\n az = az.to(units.kpc/units.Gyr**2)\n\n return ax.value, ay.value, az.value", "title": "" }, { "docid": "1b09744a7dd7687c07b19ee00339f5dd", "score": "0.51395094", "text": "def local_viewing_angle(theta_i, phi_i, theta_v, phi_v, slope, aspect):\n # Local incident zenith angle\n mu_i = np.cos(theta_i) * np.cos(slope) + np.sin(theta_i) * \\\n np.sin(slope) * np.cos(phi_i - aspect)\n if mu_i < 0.000001: # Grazing rasante, instable\n mu_i = np.nan\n # Local viewing zenith angle\n mu_v = np.cos(theta_v) * np.cos(slope) + np.sin(theta_v) * \\\n np.sin(slope) * np.cos(phi_v - aspect)\n\n theta_i_eff = np.arccos(mu_i)\n theta_v_eff = np.arccos(mu_v)\n # Remove part of the polar representation that correspond to an observer behind the slope\n theta_v_eff = np.where(theta_v_eff > np.radians(90), np.nan, theta_v_eff)\n # Local relative azimuth angle (dumont et al.2011)\n mu_az_numerator = (np.cos(theta_v) * np.cos(theta_i) +\n np.sin(theta_v) * np.sin(theta_i) * np.cos(phi_v-phi_i)\n - mu_i * mu_v)\n mu_az_denominator = np.sin(theta_i_eff) * np.sin(theta_v_eff)\n # When illumination or observator is at nadir (in the new referential), set RAA to zero\n mu_az = np.where(mu_az_denominator != 0, np.divide(\n mu_az_numerator, mu_az_denominator), 0)\n\n np.clip(mu_az, -1, 1, out=mu_az) # Prevent from numerical instabilities around -1 and 1\n raa_eff = np.arccos(mu_az)\n return theta_i_eff, theta_v_eff, raa_eff", "title": "" }, { "docid": "8caa5d1b92b3e20d4d84713aa5d73669", "score": "0.5139508", "text": "def rotate(x, y, ang):\n x2 = x * np.cos(ang) - y * np.sin(ang)\n y2 = y * np.cos(ang) + x * np.sin(ang)\n return x2, y2", "title": "" }, { "docid": "709a11eeb1c944fde2a97050b2d6bb51", "score": "0.51352674", "text": "def refraction_corrected_elevation(elevation_angle):\n \n alpha_0d = math.degrees(elevation_angle)\n if alpha_0d>-0.56:\n r = 3.51561*(0.1594+0.0196*alpha_0d +0.00002*alpha_0d**2)/(1+0.505*alpha_0d +0.0845*alpha_0d**2)\n elif alpha_0d<=-0.56:\n r = 0.56\n if (alpha_0d+r)>90:\n return(math.pi/2)\n elif (alpha_0d+r<=90):\n return(math.radians(alpha_0d+r))", "title": "" }, { "docid": "d4a2b03952689cad4ecf66d355ec45cf", "score": "0.5135042", "text": "def to_cursor(self):\n cursor_loc = bpy.context.scene.cursor_location\n heading = self.cam.location - cursor_loc\n self.cam.location -= heading/2.0\n rot_quat = heading.to_track_quat('Z', 'X')\n self.cam.rotation_euler = rot_quat.to_euler()", "title": "" }, { "docid": "9c3aba963e5f6cbe206ec7f5010b5315", "score": "0.5131053", "text": "def get_detector_angles():\r\n with open('geom_xb.txt') as f:\r\n lines = f.readlines()\r\n \r\n theta, phi = np.zeros((162,)), np.zeros((162,))\r\n lines = [line.strip() for line in lines]\r\n for i in range(162):\r\n s = lines[i].split(',')\r\n theta[i] = float(s[2])\r\n phi[i] = float(s[3])\r\n return theta*np.pi/180, (phi+180)*np.pi/180", "title": "" }, { "docid": "85a60659c30e565cfc85f0a7af17da82", "score": "0.5125442", "text": "def update():\n \n # right_dist = rc_utils.get_lidar_closest_point(scan, (80,100))[1]\n # left_dist = rc_utils.get_lidar_closest_point(scan, (260,280))[1]\n angle = 0\n speed = 0\n # TODO: Follow the wall to the right of the car without hitting anything.\n\n scan = rc.lidar.get_samples()\n sectors = 10\n window_length = 180\n window_start = -90\n windows = [[window_start + w *window_length//sectors, window_start + (w+1) *window_length//sectors-1] for w in range(sectors)]\n \n sector_index = 0\n sector_distance = 0\n \n for i in range(len(windows)):\n temp = rc_utils.get_lidar_closest_point(scan, windows[i])[1]\n if temp > sector_distance:\n sector_index = i\n sector_distance = temp\n \n\n angle = (windows[sector_index][0] + windows[sector_index][1]) / 2\n angle = rc_utils.remap_range(angle, window_start, window_start+window_length, -1, 1, True) *2\n angle = rc_utils.clamp(angle,-1,1)\n \n speed = 0.85\n \n rc.drive.set_speed_angle(speed, angle)\n\n if rc.controller.is_down(rc.controller.Button.B):\n # print(\"Front:\", front_dist, \"Back:\", back_dist, \"Left:\", left_dist, \"Right\", right_dist)\n print(\"Angle:\", angle,)\n if rc.controller.is_down(rc.controller.Button.A):\n print(\"Windows:\", windows)", "title": "" } ]
0b92ad0a1411b245bfc5ee1adb466918
Sets the orph_acode of this AllClinicalEntityInner.
[ { "docid": "95cd10116ad403c0000388a633f69d17", "score": "0.8471786", "text": "def orph_acode(self, orph_acode: int):\n\n self._orph_acode = orph_acode", "title": "" } ]
[ { "docid": "070880d3e8e73536ab931ec7c6ecbd7b", "score": "0.68790334", "text": "def orph_acode(self) -> int:\n return self._orph_acode", "title": "" }, { "docid": "1e1c422fd06122e03b7e28f3701dd8b8", "score": "0.58737314", "text": "def alcohol_nambca_code(self, alcohol_nambca_code):\n self._alcohol_nambca_code = alcohol_nambca_code", "title": "" }, { "docid": "06435df472b8bf863a3fc3d6f2c91554", "score": "0.5687017", "text": "def alcohol_scc_code(self, alcohol_scc_code):\n self._alcohol_scc_code = alcohol_scc_code", "title": "" }, { "docid": "21d8a1200098a95a81c696e205e230db", "score": "0.5566365", "text": "def alcohol_upc_code(self, alcohol_upc_code):\n self._alcohol_upc_code = alcohol_upc_code", "title": "" }, { "docid": "f8e6bfb9049b99c84bbba39bae9f3cb7", "score": "0.5218967", "text": "def alcohol_unimerc_code(self, alcohol_unimerc_code):\n self._alcohol_unimerc_code = alcohol_unimerc_code", "title": "" }, { "docid": "f317aaf3da7d90732d6e83f6bbf664df", "score": "0.5136539", "text": "def code(self, code: \"str\"):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n self._attrs[\"code\"] = code", "title": "" }, { "docid": "f317aaf3da7d90732d6e83f6bbf664df", "score": "0.5136539", "text": "def code(self, code: \"str\"):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n self._attrs[\"code\"] = code", "title": "" }, { "docid": "cb8b388332d3e7c963cfbef4e07920c1", "score": "0.5116883", "text": "def epc_code(self, epc_code):\n\n self._epc_code = epc_code", "title": "" }, { "docid": "7d6caffecd7261dcacca94199d53b325", "score": "0.50558287", "text": "def code(self, code):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n\n self._code = code", "title": "" }, { "docid": "19023f187b9104c8b033848bf974157a", "score": "0.49451524", "text": "def setCode(self, code):\n if code < 0:\n return\n self._code = int(code)", "title": "" }, { "docid": "55697e995edf2684edcf32f69c699537", "score": "0.49120903", "text": "def code(self, code):\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "01a2ba6549dd7f426e233c6a137e164c", "score": "0.49069995", "text": "def code(self, code):\n\n self._code = code", "title": "" }, { "docid": "778c9e3271207d963f120da33653d604", "score": "0.48122263", "text": "def alcohol_country(self, alcohol_country):\n self._alcohol_country = alcohol_country", "title": "" }, { "docid": "283acf93992eee4bdcc5d9c89565877e", "score": "0.48021886", "text": "def __init__(self, _date: datetime=None, orph_acode: int=None, status: str=None, preferred_term: str=None, definition: str=None): # noqa: E501\n self.swagger_types = {\n '_date': datetime,\n 'orph_acode': int,\n 'status': str,\n 'preferred_term': str,\n 'definition': str\n }\n\n self.attribute_map = {\n '_date': 'Date',\n 'orph_acode': 'ORPHAcode',\n 'status': 'Status',\n 'preferred_term': 'Preferred term',\n 'definition': 'Definition'\n }\n self.__date = _date\n self._orph_acode = orph_acode\n self._status = status\n self._preferred_term = preferred_term\n self._definition = definition", "title": "" }, { "docid": "9bd0cae569b15661bce21176ee69afef", "score": "0.4791313", "text": "def iso6391_code(self, iso6391_code):\n\n self._iso6391_code = iso6391_code", "title": "" }, { "docid": "5f86cdda270307b7a016e73d76e70b73", "score": "0.47603232", "text": "def account_code(self, account_code):\n\n self._account_code = account_code", "title": "" }, { "docid": "87745d0599ccace28d225b869c38c53c", "score": "0.47354802", "text": "def setCode(self, code=''):\n self.code = code\n if self.rightChild != None: self.rightChild.setCode(code + '0')\n if self.leftChild != None: self.leftChild.setCode(code + '1')\n if self.isLeaf() and self.code == '': self.code = '0'", "title": "" }, { "docid": "1c55bcad940635793c6c8f2a04f1fa9a", "score": "0.46944872", "text": "def aluno_codigo_id(self):\n\t\treturn 'al-' + str(self.aluno_id)", "title": "" }, { "docid": "555d5982e912934b3cd436e600c5f690", "score": "0.46860704", "text": "def comp_code(self, comp_code):\n self._comp_code = comp_code", "title": "" }, { "docid": "ffb229a73e2f92b54ae3b0aac7906f44", "score": "0.46324164", "text": "def set_code(self, code):\n self.code = code\n if code >= 400:\n self.ranges = []\n elif code == 206 and len(self.ranges) == 0:\n self.code = 200", "title": "" }, { "docid": "ce945cd3485fc4fe22f5a1c8e4500db3", "score": "0.46277836", "text": "def code(self, code):\n if self.local_vars_configuration.client_side_validation and code is None: # noqa: E501\n raise ValueError(\"Invalid value for `code`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n code is not None and len(code) < 1):\n raise ValueError(\"Invalid value for `code`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._code = code", "title": "" }, { "docid": "52c033a345c84a5b1dd6093a9c4a084b", "score": "0.4618801", "text": "async def set_labs_auth_code(self, auth_code):\n pass", "title": "" }, { "docid": "b0a22b4a8232f98998abb275cd136b04", "score": "0.46065283", "text": "def iso6393_code(self, iso6393_code):\n\n self._iso6393_code = iso6393_code", "title": "" }, { "docid": "c8c2cc3bda28993b4d873bb67b17533f", "score": "0.45945907", "text": "def orcid(self, orcid):\n\n self._orcid = orcid", "title": "" }, { "docid": "2cd480f0e5b77b98260f1207398ba917", "score": "0.45885116", "text": "def country_code(self, country_code):\n\n self._country_code = country_code", "title": "" }, { "docid": "2cd480f0e5b77b98260f1207398ba917", "score": "0.45885116", "text": "def country_code(self, country_code):\n\n self._country_code = country_code", "title": "" }, { "docid": "a69c1cdf3a3aa6ce4317e642fdf887a8", "score": "0.4582707", "text": "def update_code(self, new_code):\n\n self.code = new_code", "title": "" }, { "docid": "a69c1cdf3a3aa6ce4317e642fdf887a8", "score": "0.4582707", "text": "def update_code(self, new_code):\n\n self.code = new_code", "title": "" }, { "docid": "a69c1cdf3a3aa6ce4317e642fdf887a8", "score": "0.4582707", "text": "def update_code(self, new_code):\n\n self.code = new_code", "title": "" }, { "docid": "dec149ddea5e45308746f73ac96d602a", "score": "0.45810592", "text": "def alcohol_nambca_code(self):\n return self._alcohol_nambca_code", "title": "" }, { "docid": "7ddfa925bf0ecd9ed873e95e8efd3b6d", "score": "0.45723182", "text": "def update_code(self, new_code):\n\n self.code = new_code\n # Fill in the rest", "title": "" }, { "docid": "d7036033ce28bf48dcfd3fc08a0918bb", "score": "0.45604983", "text": "def code(self, value):\n self._code = value", "title": "" }, { "docid": "8bbb90bc5351cd55df80744848ac15a3", "score": "0.45068902", "text": "def _aoLoadAthenaData(self, athenaData):\n # version only written on output, not on load\n # set at startup\n self.ao.audioChannels = copy.deepcopy(athenaData['audioChannels']) \n self.ao.audioRate = copy.deepcopy(athenaData['audioRate']) \n\n if athenaData['author'] != '': \n # set at startup\n self.ao.author = copy.deepcopy(athenaData['author']) \n self.ao.tniMode = copy.deepcopy(athenaData['tniMode']) \n self.ao.setEventMode(athenaData['activeEventMode'])\n # will set local orchestra as well", "title": "" }, { "docid": "de187ebb08682f38fe5039c063c0c7c9", "score": "0.44859058", "text": "def _init_code(self, code):\n\n try:\n code = code.upper()\n except AttributeError:\n raise TypeError('UNEXPECTED AIRPORT CODE: %s' % code)\n\n \"\"\"\n flight_codes = [flight['iata'] for flight in self._flight_json]\n\n if code not in flight_codes:\n \n # pdb.set_trace()\n raise AirportNotExistError('FLIGHT CODE NOT FOUND: %s' % code)\n \"\"\"\n\n return code", "title": "" }, { "docid": "a8f5553623d716259716c25e29518c5d", "score": "0.44462824", "text": "def auto_order_code(self, auto_order_code):\n\n self._auto_order_code = auto_order_code", "title": "" }, { "docid": "2d91f3527faca86b33e885a22a963dbd", "score": "0.44400245", "text": "def set_integrator_code(self, code):\n self._project.integrators_code = code", "title": "" }, { "docid": "708a3d91aeac1a0219408e72f8187852", "score": "0.44058383", "text": "def icmp(self, icmp_type_code: IPV4AclICMPTypeCodeEnum.IPV4AclICMPTypeCode = None, operation: OperationEnum.Operation = None):\n icmp_ = SubElement(self, 'ipv4-acl-cfg:icmp')\n\n if is_not_none(icmp_type_code):\n icmp_type_code_ = SubElement(icmp_, 'ipv4-acl-cfg:icmp-type-code')\n icmp_type_code_.text = icmp_type_code.value\n \n if is_not_none(operation):\n icmp_.set('xc:operation', operation.value)", "title": "" }, { "docid": "3f1847a582fef3eb66258744c3ff2c97", "score": "0.4346952", "text": "def result_code(self, result_code):\n\n self._result_code = result_code", "title": "" }, { "docid": "76416346cfe3966717eb17879aa8f487", "score": "0.43038067", "text": "def put_code(self, put_code):\n\n self._put_code = put_code", "title": "" }, { "docid": "1debae342644946493f38b6ac4ac15c6", "score": "0.42945632", "text": "def find_countries_by_code_or_income(self, area_code_or_income):\n area_code_or_income_upper = area_code_or_income.upper()\n area = self._db['areas'].find_one({\"$or\": [\n {\"iso3\": area_code_or_income},\n {\"iso3\": area_code_or_income_upper},\n {\"iso2\": area_code_or_income},\n {\"iso2\": area_code_or_income_upper},\n {\"name\": area_code_or_income}]})\n\n if area is None:\n # Find if code is an income code\n # TODO: This is not working, order by is needed on method call\n countries = self.find_countries_by_continent_or_income_or_type(area_code_or_income_upper)\n if countries is None:\n raise AreaRepositoryError(\"No countries for code \" + area_code_or_income)\n else:\n return countries\n\n self.set_continent_countries(area)\n self.area_uri(area)\n area[\"short_name\"] = area[\"name\"]\n\n return AreaDocumentAdapter().transform_to_area(area)", "title": "" }, { "docid": "08a6fd73fcf2851bb8f15e17db5ef2b1", "score": "0.42705694", "text": "def coupon_code(self, coupon_code):\n\n self._coupon_code = coupon_code", "title": "" }, { "docid": "5ae0bf75c892170f89966707d499ab87", "score": "0.42672956", "text": "def journal_code(self, journal_code):\n\n self._journal_code = journal_code", "title": "" }, { "docid": "1409216c16d35ce6db49bf5c456e4edd", "score": "0.42478308", "text": "def cod_amount(self, cod_amount):\n self._cod_amount = cod_amount", "title": "" }, { "docid": "f135e70fd341d7284890e49ddfb04388", "score": "0.42443255", "text": "def set_accounting_code(self, code):\n is_set = None\n try:\n self.logger.info('Start: set accounting code')\n if code != \"\":\n self._carrier_page.set_accounting_code(code)\n is_set = True\n except WebDriverException as exp:\n is_set = False\n self.logger.error(exp.msg)\n raise\n finally:\n self.logger.info('End: set accounting code')\n return is_set", "title": "" }, { "docid": "77a8a48d37cf0b0a9bee4ae344850a4d", "score": "0.4237173", "text": "def activate_account(self, code: str):\n\n self.account.get_account_activate_code(code)", "title": "" }, { "docid": "83d01fbaf5b15d56fa98039188eb26a4", "score": "0.42335576", "text": "def alcohol_upc_code(self):\n return self._alcohol_upc_code", "title": "" }, { "docid": "ec48474a4b3d460640479ebec1abdc37", "score": "0.4227719", "text": "def _make_mode_ao(self, code: 'Enum_Option', opt: 'Optional[Data_Authentication]' = None, *,\n key_id: 'int' = 0,\n next_key_id: 'int' = 0,\n mac: 'bytes' = b'',\n **kwargs: 'Any') -> 'Schema_Authentication':\n if opt is not None:\n key_id = opt.key_id\n next_key_id = opt.next_key_id\n mac = opt.mac\n\n return Schema_Authentication(\n kind=code,\n length=4 + len(mac),\n key_id=key_id,\n next_key_id=next_key_id,\n mac=mac,\n )", "title": "" }, { "docid": "9cfa93d64780149edcd37965e4043145", "score": "0.42242163", "text": "def __ALMA__(self):\n if 'AIPS' in self.header.tostring():\n self.instr = 'ALMA_AIPS'\n self.__AIPS__()\n else:\n self.instr = 'ALMA_CASA'", "title": "" }, { "docid": "bc12585ca96cc6a2448b3b963bb0fb4a", "score": "0.4205105", "text": "def list_icd10(lang, orphacode): # noqa: E501\n es = config.elastic_server\n\n index = \"rdcode_orpha_icd10_mapping\"\n index = \"{}_{}\".format(index, lang.lower())\n\n query = \"{\\\"query\\\": {\\\"match\\\": {\\\"ORPHAcode\\\": \" + str(orphacode) + \"}},\" \\\n \"\\\"_source\\\":[\\\"Date\\\", \\\"ORPHAcode\\\",\\\"Preferred term\\\", \\\"Code ICD\\\"]}\"\n\n response = single_res(es, index, query)\n # Test to return error\n if isinstance(response, str) or isinstance(response, tuple):\n return response\n else:\n references = response.pop(\"Code ICD\")\n references.sort(key=operator.itemgetter(\"Code ICD10\"))\n response[\"References\"] = references\n\n # return yaml if needed\n response = if_yaml(connexion.request.accept_mimetypes.best, response)\n return response", "title": "" }, { "docid": "89cfe5111044d0a144ca35fbd1127b17", "score": "0.420379", "text": "def distribution_code(self, distribution_code):\n self._distribution_code = distribution_code", "title": "" }, { "docid": "6e3e7a78cbfa8c5d415ad3d77932cc14", "score": "0.42034954", "text": "def evaluation_code(self, evaluation_code):\n\n self._evaluation_code = evaluation_code", "title": "" }, { "docid": "8f0f70299854f7b4e09d0c791fb14648", "score": "0.42025292", "text": "def add_event_code(self, event_code_id, attribute_name):\n\n db_key = \"\".join([\"event_code:\", str(event_code_id)])\n self._r.set(db_key, attribute_name)", "title": "" }, { "docid": "20ee30b56c67a3d085d03b8879404d8f", "score": "0.42006305", "text": "def home_championship(self, home_championship):\n\n self._home_championship = home_championship", "title": "" }, { "docid": "e33f8cd2c1e23fd6298426bbff9cf36f", "score": "0.41997313", "text": "def response_code(self, response_code):\n\n self._response_code = response_code", "title": "" }, { "docid": "970ebc5b60fe7d476d5bbbc0daf19254", "score": "0.41935822", "text": "def alcohol_region(self, alcohol_region):\n self._alcohol_region = alcohol_region", "title": "" }, { "docid": "d19ac6e4b961ca1c8512b3e7ab24f65d", "score": "0.4176731", "text": "def authority(self, authority):\n self._authority = authority\n self._parse_authority()", "title": "" }, { "docid": "255ba44ea8307de3fd18d45bfd2c58e4", "score": "0.41553384", "text": "def country_code(self, country_code):\n if country_code is not None and len(country_code) > 2:\n raise ValueError(\"Invalid value for `country_code`, length must be less than or equal to `2`\") # noqa: E501\n\n self._country_code = country_code", "title": "" }, { "docid": "a151103649004ef284b8477c126bc70b", "score": "0.4153282", "text": "def set_code(self, authorization_code):\n params = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'authorization_code',\n 'code': authorization_code,\n 'redirect_uri': self.redirect_uri\n }\n res = requests.post(self.token_url, data=params)\n token = json.loads(res.text)\n self._assert_error(token)\n\n self.uid = token['uid']\n self.access_token = token['access_token']\n self.expires_at = token['expires_at']\n\n token[u'expires_at'] = int(time.time()) + int(token.pop(u'expires_in'))\n self.session.params = {'access_token': self.access_token}", "title": "" }, { "docid": "40d49a274eab3fc3a1e6816111135e42", "score": "0.41484442", "text": "def encode_amino_acids(orf):\n from helpers import amino_acid\n # Create a list with all the codons which are sequences of\n # three nucleotides.\n codons = []\n for i in range(0, len(orf), 3):\n codons.append(orf[i:i + 3])\n # Create a string of amino acids sequences from the codons if the codons\n # are 3 characters\n amino_acid_sequence = \"\"\n for codon in codons:\n if len(codon) == 3:\n amino_acid_sequence += amino_acid(codon)\n return amino_acid_sequence", "title": "" }, { "docid": "97b819141f4bc50fe6acfd9b12fe85f8", "score": "0.41322866", "text": "def update_amphora_agent_config(self, amphora_id):\n LOG.info(\"Start amphora agent configuration update, amphora's id \"\n \"is: %s\", amphora_id)\n session = db_apis.get_session()\n with session.begin():\n amp = self._amphora_repo.get(session, id=amphora_id)\n lb = self._amphora_repo.get_lb_for_amphora(session,\n amphora_id)\n flavor = {}\n if lb.flavor_id:\n flavor = self._flavor_repo.get_flavor_metadata_dict(\n session, lb.flavor_id)\n\n store = {constants.AMPHORA: amp.to_dict(),\n constants.FLAVOR: flavor}\n\n self.run_flow(\n flow_utils.update_amphora_config_flow,\n store=store)\n LOG.info(\"Finished amphora agent configuration update, amphora's id \"\n \"was: %s\", amphora_id)", "title": "" }, { "docid": "49f9c84bbc5fac3c00bf0837dc671cfb", "score": "0.41292945", "text": "def activations_industrial_core(self, activations_industrial_core):\n\n self._activations_industrial_core = activations_industrial_core", "title": "" }, { "docid": "53f7bb6393b8ec4d40f7771264ff002d", "score": "0.4118232", "text": "def alcohol_scc_code(self):\n return self._alcohol_scc_code", "title": "" }, { "docid": "bfcd9815b2c2ad39d75a90299519bfc4", "score": "0.4117008", "text": "def alcohol_unimerc_code(self):\n return self._alcohol_unimerc_code", "title": "" }, { "docid": "dfd99f05f5a4f63cc035908d540423b6", "score": "0.41100878", "text": "def time_code(self, time_code):\n\n self._time_code = time_code", "title": "" }, { "docid": "976a9f2ad9003aaf439202e78df129ff", "score": "0.41058257", "text": "def nodecode(self, nodecode):\n self._nodecode = bool(nodecode)\n return self", "title": "" }, { "docid": "32413c73e1768cc48e2213b5157de467", "score": "0.40930507", "text": "def error_code(self, error_code):\n if error_code is None:\n raise ValueError(\"Invalid value for `error_code`, must not be `None`\") # noqa: E501\n\n self._error_code = error_code", "title": "" }, { "docid": "be104b8900ff934c31b6d7833ea69830", "score": "0.40877327", "text": "def mean_orf_counter(self):\n orf_lens = np.array([len(orf) for orf in self.orfs])\n self.mean_orf_cov = np.mean(orf_lens) / self.length", "title": "" }, { "docid": "c45c530efdaa72b841a884ad3ad22ac1", "score": "0.40839577", "text": "def code(self):\n return self.iso_3166_1_a2", "title": "" }, { "docid": "12cb9b9ed5384390b54ec70f749a9df5", "score": "0.4079742", "text": "def set_alumno_controller(self, alumno_controller):\n self.__alumnoController = alumno_controller", "title": "" }, { "docid": "41203da47c80d7afe700c3883c93ec8e", "score": "0.40778106", "text": "def iso6392_code(self, iso6392_code):\n\n self._iso6392_code = iso6392_code", "title": "" }, { "docid": "ec5fca5e9bb86ad17bfb1e741d6223c8", "score": "0.4072353", "text": "def account_code_id(self, account_code_id):\n self._account_code_id = account_code_id", "title": "" }, { "docid": "3ed06f666d273139941044d133c08c1b", "score": "0.40696678", "text": "def enable(self, event_code, data=None):\n if isinstance(event_code, InputProperty):\n self._libevdev.enable_property(event_code.value)\n return\n\n try:\n if event_code.type == libevdev.EV_ABS:\n if data is None or not isinstance(data, InputAbsInfo):\n raise InvalidArgumentException('enabling EV_ABS codes requires an InputAbsInfo')\n\n data = {\"minimum\": data.minimum or 0,\n \"maximum\": data.maximum or 0,\n \"fuzz\": data.fuzz or 0,\n \"flat\": data.flat or 0,\n \"resolution\": data.resolution or 0}\n elif event_code.type == libevdev.EV_REP:\n if data is None:\n raise InvalidArgumentException('enabling EV_REP codes requires an integer')\n\n self._libevdev.enable(event_code.type.value, event_code.value, data)\n except AttributeError:\n self._libevdev.enable(event_code.value)", "title": "" }, { "docid": "531fdc3618f57df7cce3820b30fbfecf", "score": "0.4068581", "text": "def set_code(self, label, code):\n _LOGGER.debug(\"setting code at {} with label {}\".format(self._value.index, label))\n self.codelabel = label\n self._value.data = code\n # Setting data will cause a value change and subsequent state update call", "title": "" }, { "docid": "6727e6691ca398d8419cdbda49ea66a0", "score": "0.4066056", "text": "def list_orpha_by_icd10(lang, icd10): # noqa: E501\n es = config.elastic_server\n\n index = \"rdcode_orpha_icd10_mapping\"\n index = \"{}_{}\".format(index, lang.lower())\n\n # Find every occurrences of the queried ICD code and return the associated Date, ORPHAcode, Preferred term, Refs ICD\n query = \"{\\\"query\\\": {\\\"match\\\": {\\\"Code ICD.Code ICD10\\\": \\\"\" + str(icd10) + \"\\\"}},\" \\\n \"\\\"_source\\\":[\\\"Date\\\", \\\"ORPHAcode\\\", \\\"Preferred term\\\", \\\"Code ICD\\\"]}\"\n\n response_icd_to_orpha = multiple_res(es, index, query, 1000)\n\n # Test to return error\n if isinstance(response_icd_to_orpha, str) or isinstance(response_icd_to_orpha, tuple):\n return response_icd_to_orpha\n else:\n response = {}\n references = []\n # Source data are organized from the perspective of ORPHA concept\n # 1 ORPHAcode => X ICD\n # response_icd_to_orpha is a list of object containing \"Code ICD\"\n # \"Code ICD\" is also a list of object that need to be filtrated by ICD\n for ref in response_icd_to_orpha:\n reference = {\"ORPHAcode\": int(ref[\"ORPHAcode\"]),\n \"Preferred term\": ref[\"Preferred term\"],\n \"DisorderMappingRelation\": \"\",\n \"DisorderMappingValidationStatus\": \"\"}\n for CodeICD in ref[\"Code ICD\"]:\n if CodeICD[\"Code ICD10\"] == icd10:\n reference[\"DisorderMappingRelation\"] = CodeICD[\"DisorderMappingRelation\"]\n reference[\"DisorderMappingICDRelation\"] = CodeICD[\"DisorderMappingICDRelation\"]\n reference[\"DisorderMappingValidationStatus\"] = CodeICD[\"DisorderMappingValidationStatus\"]\n references.append(reference)\n # Sort references by Orphacode\n references.sort(key=operator.itemgetter(\"ORPHAcode\"))\n # Compose the final response\n response[\"Date\"] = response_icd_to_orpha[0][\"Date\"]\n response[\"Code ICD10\"] = icd10\n response[\"References\"] = references\n\n # return yaml if needed\n response = if_yaml(connexion.request.accept_mimetypes.best, response)\n return response", "title": "" }, { "docid": "76951695e3eca537b60ea82a302db0cb", "score": "0.40535444", "text": "def wrap_code(self, wrap_code):\n self._wrap_code = wrap_code", "title": "" }, { "docid": "522ff2935fa21286c53e05809347f13f", "score": "0.4049357", "text": "def account_code_id(self):\n return self._account_code_id", "title": "" }, { "docid": "ee69fc346045190fce612783b2988e54", "score": "0.40429142", "text": "def authority(self, reference):\n p = FHIRSearchElement(subject=\"authority\")\n p.reference = reference\n p.supported_profiles = [\n \"Order\"\n ]\n p.previous = self\n return p", "title": "" }, { "docid": "6800b2b6d98ac6569c443e790c823026", "score": "0.4040451", "text": "def test_code(self, test_code):\n\n self._test_code = test_code", "title": "" }, { "docid": "b367d7635a912fe868c8086263c45436", "score": "0.40357766", "text": "def alarm_arm_home(self, code=None):\n if code == str(self._code) or self.code_format is None:\n mqtt.publish(self.hass, self._command_topic,\n self._payload_arm_home, self._qos)\n else:\n _LOGGER.warning(\"Wrong code entered while arming home!\")", "title": "" }, { "docid": "88695465048fbcf95486cced821630e3", "score": "0.40355134", "text": "def get_admin_areas(self, country_code):\n res = self.api_call(\n \"search/geo\",\n params={\"entity\": \"AdminArea\", \"countrycode\": country_code},\n method=\"GET\",\n )[\"data\"]\n return res", "title": "" }, { "docid": "03a658ffa05cd550df5545eeeee9c8bf", "score": "0.4026991", "text": "def numero_aula(self, numero_aula: int):\n\n self._numero_aula = numero_aula", "title": "" }, { "docid": "12deb90441013bd6e7278d96bb3430f3", "score": "0.40222248", "text": "def alcohol_brand(self, alcohol_brand):\n self._alcohol_brand = alcohol_brand", "title": "" }, { "docid": "6fadd201c247df791df732c58f50eed5", "score": "0.40206423", "text": "def country_code(self, country_code):\n if country_code is None:\n raise ValueError(\"Invalid value for `country_code`, must not be `None`\") # noqa: E501\n\n self._country_code = country_code", "title": "" }, { "docid": "8bca4292318420893ff2555374e46783", "score": "0.401395", "text": "def update_session_with_code(self, response):\n if response.status_code == 302 and response._headers.get('location', None):\n re_code = re.search('code=(\\w+)&*?', response._headers.get('location', '')[1])\n\n if re_code is not None:\n code = re_code.groups()[0]\n session = Session.objects.get(session_key=self.request.session.session_key)\n session.oidc_code = code\n session.save()", "title": "" }, { "docid": "4574c34c7edce6a64ac63517ac421695", "score": "0.40094614", "text": "def alcohol_state(self, alcohol_state):\n self._alcohol_state = alcohol_state", "title": "" }, { "docid": "1e0088a23bbbb7992822328372400f1b", "score": "0.39950296", "text": "def __EVLA__(self):\n if 'AIPS' in self.header.tostring():\n self.instr = 'EVLA_AIPS'\n self.__AIPS__()\n else:\n self.instr = 'EVLA_CASA'", "title": "" }, { "docid": "224362a72090033f379b11a9a81b7b80", "score": "0.39813063", "text": "def icon_code(self, icon_code):\n\n self._icon_code = icon_code", "title": "" }, { "docid": "e8899d645e39fe6713ad2b08f1059520", "score": "0.39775348", "text": "def list_fax_in_areacodes(self):\n data = {\n 'country_code': self.country_code,\n 'state': self.state,\n 'apioutputformat': PAMFAX_APIOUTPUTFORMAT,\n 'apikey': PAMFAX_APIKEY,\n 'usertoken': pam_fax.common.usertoken\n }\n\n response = pam_fax.common.api_call(method='Shopping/ListFaxInAreacodes', data=data)\n self.areacode_id = response['AreaCodes']['content'][0]['id']", "title": "" }, { "docid": "800f4c663e421b062c5f65808544c262", "score": "0.3976638", "text": "def laplacian_alpha_enthalpy(self, laplacian_alpha_enthalpy):\n\n self._laplacian_alpha_enthalpy = laplacian_alpha_enthalpy", "title": "" }, { "docid": "01f5c457178c13feb9f42e80a10c2783", "score": "0.39663067", "text": "def update_amphora_agent_config(self, amphora_id):\n LOG.info(\"Start amphora agent configuration update, amphora's id \"\n \"is: %s\", amphora_id)\n amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)\n lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),\n amphora_id)\n flavor = {}\n if lb.flavor_id:\n flavor = self._flavor_repo.get_flavor_metadata_dict(\n db_apis.get_session(), lb.flavor_id)\n\n store = {constants.AMPHORA: amp.to_dict(),\n constants.FLAVOR: flavor}\n\n self.services_controller.run_poster(\n flow_utils.update_amphora_config_flow,\n store=store)", "title": "" }, { "docid": "ac527b30a306de9c3ed05aa2e11a2d7e", "score": "0.3964306", "text": "def alcohol_content(self, alcohol_content):\n self._alcohol_content = alcohol_content", "title": "" }, { "docid": "6ec0239eee90808eb78b7f30e8559711", "score": "0.39639145", "text": "def write_orca(filename, atoms):\n# Since orca prints the geometry directly into the input file, we'll just\n# the write_input method from the orca calculator, and just use the\n# default settings\n calc = orca()\n calc.initialize(atoms)\n calc.write_input(filename, atoms)", "title": "" }, { "docid": "3ef8159807b38658f5ab308d2f83506e", "score": "0.3955519", "text": "def error_code(self, error_code):\n\n self._error_code = error_code", "title": "" } ]
270edec675db2162eb577ddf0a3627a6
Processes a PHEME tweet topic
[ { "docid": "efd8bf64eae990a5ff2be51184961626", "score": "0.0", "text": "def processCategory(path):\n return [processTweetFolder(path + '\\\\' + tweetFolder, tweetFolder)\n for tweetFolder in os.listdir(path)]", "title": "" } ]
[ { "docid": "ec2d35081a91dc4331f1f408c47b8ac5", "score": "0.6758134", "text": "def process_tweet(tweet):\n data = {'cmd': None,\n 'player': None,\n 'id': None,\n 'flag': None}\n text = tweet.full_text.lower()\n parts = text.split()\n\n # if the tweet has more than 3 words then do not process\n if len(parts) <= 3 and parts[1] in COMMANDS:\n data['cmd'] = parts[1]\n data['player'] = \"@\" + tweet.user.screen_name\n data['id'] = tweet.id\n data['flag'] = None\n if len(parts) == 3:\n try:\n data['flag'] = int(parts[2])\n except ValueError:\n data['flag'] = parts[2]\n # updateLastSeen(LAST_SEEN, data['id'])\n return data\n return False", "title": "" }, { "docid": "12e45012e15d29070ebc8ee6c1dd87fd", "score": "0.64803624", "text": "def tweet_process(self,tweet):\n id = str(tweet['id'])\n try: \n #some tweets have longer text. Attempt to extract it \n content = tweet['extended_tweet']['full_text']\n except KeyError: \n content = tweet['text']\n output =sentiments[self.classy(content)]\n return (id,content,output)", "title": "" }, { "docid": "9cce52ad12c7a4f1b48c76beda822530", "score": "0.64020056", "text": "def process_message(tweets_queue):\n\n m = tweets_queue.read() \n if m is not None:\n message = ast.literal_eval(m.get_body())\n print message\n\n #Call to the sentiment API\n data = {\n 'text_list': [message['text']]\n }\n response = requests.post(\n \"https://api.monkeylearn.com/v2/classifiers/cl_qkjxv9Ly/classify/?\",\n data=json.dumps(data),\n headers={'Authorization': \"token \" + MONKEYLEARN_TOKEN,\n 'Content-Type': 'application/json'})\n \n result = dict(json.loads(response.text))['result'][0][0]\n result['tweet_id'] = message['tweet_id']\n return result", "title": "" }, { "docid": "ea57d92e0491a1d5598532b3971d6170", "score": "0.6260986", "text": "async def on_tweet(self, tweet):\n pass", "title": "" }, { "docid": "eb98f498da4c8a33ab0f680e60cc9315", "score": "0.6184183", "text": "def tweet_text(tweet):\n return tweet['text']", "title": "" }, { "docid": "07239129fa860e156a7bf52afc1c7803", "score": "0.6155938", "text": "def on_data(self, data):\n data_json = json.loads(data)\n\n # handle tweets arrived without 'text' property\n if data_json.get('text') is None:\n return True\n str_tweet = data_json['text'] \n logging.info(f\"------- -- {str_tweet}\") # DEBUG\n try:\n self.producer.send(KAFKA_TOPIC, value=str_tweet)\n except Exception as e:\n print(e)\n return False\n return True", "title": "" }, { "docid": "933497c9866cce6891c9b5a41f6de8be", "score": "0.60303056", "text": "def _tweet(pTweetMessage):\n try:\n lAuth = tweepy.OAuthHandler(settings.TWEEPY_CONSUMER_TOKEN, settings.TWEEPY_CONSUMER_SECRET)\n lAuth.set_access_token(settings.TWEEPY_ACCESS_TOKEN_KEY, settings.TWEEPY_ACCESS_TOKEN_SECRET)\n lApi = tweepy.API(lAuth)\n lTweet = ''.join(pTweetMessage.splitlines()) # Remove newlines\n \n # shorten urls if there are any\n if lTweet.find('http://') > -1:\n lUrlStart = lTweet.find('http://')\n lUrlEnd = lTweet[lUrlStart:].find(' ') + lUrlStart\n lUrl = lTweet[lUrlStart:lUrlEnd]\n lNewUrl = shorten_url(lUrl)\n lTweet = lTweet[:lUrlStart] + lNewUrl + lTweet[lUrlEnd:]\n \n # make sure under 140 chars\n if len(lTweet) > 140: \n lTweet = \"%s...\" % lTweet[0:136]\n lApi.update_status(lTweet)\n except Exception as inst:\n if str(inst) != \"[{u'message': u'Status is a duplicate.', u'code': 187}]\":\n lErrorMessage = \"Problems Tweeting, %s %s\\n\\n%s\\n\\n%s\" % (type(inst), inst.reason, str(inst), lTweet)\n send_mail('Error Tweeting', lErrorMessage, 'twitter@brassbandresults.co.uk', ['errors@brassbandresults.co.uk'], fail_silently=True)", "title": "" }, { "docid": "cac534da33a955fab7d9865c662f2d1d", "score": "0.60123116", "text": "def tweet_preprocessing(tweet_data):\n\n if tweet_data[\"lang\"] == \"en\":\n if tweet_data[\"text\"].startswith(\"RT\"):\n if tweet_data[\"retweeted_status\"].get(\"extended_tweet\"):\n tweet_text = tweet_data[\"retweeted_status\"][\"extended_tweet\"]['full_text']\n tweet_hashtags = [item[\"text\"] for item in\n tweet_data[\"retweeted_status\"][\"extended_tweet\"][\"entities\"]['hashtags']]\n tweet_usermentions = [item[\"screen_name\"] for item in\n tweet_data[\"retweeted_status\"][\"extended_tweet\"][\"entities\"]['user_mentions']]\n else:\n tweet_text = tweet_data[\"retweeted_status\"][\"text\"]\n tweet_hashtags = [item[\"text\"] for item in tweet_data[\"retweeted_status\"][\"entities\"]['hashtags']]\n tweet_usermentions = [item[\"screen_name\"] for item in tweet_data[\"retweeted_status\"][\"entities\"]['user_mentions']]\n\n elif tweet_data[\"is_quote_status\"]:\n if tweet_data[\"quoted_status\"].get(\"extended_tweet\"):\n tweet_text = tweet_data[\"quoted_status\"][\"extended_tweet\"]['full_text']\n tweet_hashtags = [item[\"text\"] for item in\n tweet_data[\"quoted_status\"][\"extended_tweet\"][\"entities\"]['hashtags']]\n tweet_usermentions = [item[\"screen_name\"] for item in\n tweet_data[\"quoted_status\"][\"extended_tweet\"][\"entities\"]['user_mentions']]\n else:\n tweet_text = tweet_data[\"quoted_status\"][\"text\"]\n tweet_hashtags = [item[\"text\"] for item in tweet_data[\"quoted_status\"][\"entities\"]['hashtags']]\n tweet_usermentions = [item[\"screen_name\"] for item in tweet_data[\"quoted_status\"][\"entities\"]['user_mentions']]\n else:\n if tweet_data.get(\"extended_tweet\"):\n tweet_text = tweet_data[\"extended_tweet\"]['full_text']\n tweet_hashtags = [item[\"text\"] for item in tweet_data[\"extended_tweet\"][\"entities\"]['hashtags']]\n tweet_usermentions = [item[\"screen_name\"] for item in tweet_data[\"extended_tweet\"][\"entities\"]['user_mentions']]\n else:\n tweet_text = tweet_data[\"text\"]\n tweet_hashtags = [item[\"text\"] for item in tweet_data[\"entities\"]['hashtags']]\n tweet_usermentions = [item[\"screen_name\"] for item in tweet_data[\"entities\"]['user_mentions']]\n\n tweet_id = tweet_data[\"id\"]\n tweet_user_id = tweet_data[\"user\"]['id']\n tweet_user_name = tweet_data[\"user\"]['name']\n tweet_timestamp = tweet_data[\"timestamp_ms\"] # convert in utc if required for cassandra/hbase\n\n tweet_data_list = replicate_tweet_data_for_hashtags_and_user_mentions({\n 'tweet_text': tweet_text,\n 'tweet_id': tweet_id,\n 'tweet_user_id': tweet_user_id,\n 'tweet_user_name': tweet_user_name,\n 'tweet_hashtags': tweet_hashtags,\n 'tweet_usermentions': tweet_usermentions,\n 'tweet_timestamp': tweet_timestamp\n })\n\n for tweet_data in tweet_data_list:\n send_tweet_data_to_kafka.delay(tweet_data)", "title": "" }, { "docid": "389cd2b5bb7b2cd63d9476a0230ac313", "score": "0.59746605", "text": "def process_tweet(tweet):\n output = remove_non_ascii(tweet)\n output = output.lower()\n output = output.strip()\n output = punctuation_stripper(output)\n return output", "title": "" }, { "docid": "1c864d66d84911612ba69d05cc17f0bb", "score": "0.59568715", "text": "def process_tweet(text, target, do_short):\n # print(len(text) > 140, len(text))\n\n if not text:\n return None\n\n if do_short and len(text) > 141:\n return None\n elif not do_short and len(text) <= 140:\n return None\n\n if (text.startswith(\"RT \") or\n \"@\" in text or\n \"#\" in text or\n \"http\" in text):\n return None\n\n text_lower = text.lower()\n if target.lower() not in text_lower:\n return None\n\n exclude = []\n if any(substr in text_lower for substr in exclude):\n return None\n # return split_from(target, text)\n\n return text", "title": "" }, { "docid": "4a20a078beae83a786ae43ff15012a31", "score": "0.59559965", "text": "def reply(tweet):\n message = tweet['text']\n user = tweet['user']['screen_name']", "title": "" }, { "docid": "cba35e61ac18fad3ceac9600445d213a", "score": "0.5955168", "text": "def _process_tweet(tweet):\n\n text = tweet.full_text.replace('\\n', ' ')\n # Replaced with the created_at_in_seconds attribute\n # timestamp = _parse_date(tweet.created_at)\n is_retweet = True if text.startswith('RT') else False\n try:\n city = tweet.place['name']\n country = tweet.place['country']\n except TypeError:\n city = None\n country = None\n\n tweet_instance = Tweet(tweet.created_at, tweet.created_at_in_seconds, tweet.id,\n tweet.full_text, tweet.user.screen_name,\n tweet.user.id,\n tweet.user.followers_count,\n tweet.favorite_count, tweet.retweet_count,\n is_retweet, city, country\n )\n\n return tweet_instance", "title": "" }, { "docid": "1b5baf90be8990e306e9590ae2063ef6", "score": "0.5831636", "text": "def post_to_twitter(tweets, count=200):\n evaluator = Evaluator()\n tw = TwitterWrap()\n tweets = [json.loads(tweet) for tweet in tweets]\n print \"%i tweets processed\" % (len(tweets))\n\n if count == -1:\n count = len(tweets)\n\n for tweet in tweets:\n print tweet\n haiku, breaks, word_val_list = evaluator.evaluate_string(tweet['text'])\n # print tweet['entities']\n\n if haiku and tweet['lang'] == 'en':\n words = [_x[0] for _x in word_val_list]\n\n p1 = \" \".join(words[:breaks[0]+1])\n p2 = \" \".join(words[breaks[0]+1:breaks[1]+1])\n p3 = \" \".join(words[breaks[1]+1:breaks[2]+1])\n\n # print evaluator.check_user_mentions(tweet, p1+\" \"+p2+\" \"+p3)\n if evaluator.check_user_mentions(tweet, p1+\" \"+p2+\" \"+p3):\n to_tweet = tw.tweet_length_check(tweet['user']['screen_name'], tweet['id'], (p1,p2,p3))\n tw.debug_tweet(tweet, to_tweet, word_val_list)\n tw.tweet(to_tweet)\n if count == 0:\n break\n count -= 1", "title": "" }, { "docid": "7c680de3619e6bfd684e09ae567d9418", "score": "0.5819602", "text": "def comot_tweet( self, data ):\n\t\ttweet = os.path.splitext( data )[ 0 ]\n\t\tself.tweet = '{0}\\n{1}'.format( tweet, self.hashtags )\n\t\treturn self", "title": "" }, { "docid": "8df85fd2046987a5e40a1361089e01c5", "score": "0.57378054", "text": "def __run_tweet(self):\n count = 0\n for message in self.__consumer:\n count = count + 1\n self.__data_manager.add_tweet(message)\n '''if (count % 25000) == 0:\n printpers(print_topic, \"TWEET :: Pause\")\n time.sleep(100)\n printpers(print_topic, \"TWEET :: Start\")'''", "title": "" }, { "docid": "4af0ae2fb1c901b01e4f54faabb89d29", "score": "0.57230955", "text": "def getTopic(message):\n return message.split('\\t')[1]", "title": "" }, { "docid": "5c419555f847a0240b273850b797f2aa", "score": "0.56766945", "text": "async def tweet(self):\n\n tag_string = ''\n try:\n for hashtag in self.hashtags:\n tag_string += ' #{}'.format(hashtag)\n except TypeError:\n pass\n\n try:\n tweet_text = (\n 'A {d} {n} appeared! It will be {p} until {e}. {t}').format(\n d=self.description, n=self.name, p=self.place,\n e=self.expire_time, t=tag_string)\n except AttributeError:\n tweet_text = (\n 'A {d} {n} appeared {p}! It will expire sometime between '\n '{e1} and {e2}. {t}').format(\n d=self.description, n=self.name, p=self.place,\n e1=self.min_expire_time, e2=self.max_expire_time,\n t=tag_string)\n\n if len(tweet_text) > 116:\n tweet_text = self.shorten_tweet(tweet_text)\n\n tweet_text += ' ' + self.map_link\n\n media_id = None\n client = self.get_twitter_client()\n if conf.TWEET_IMAGES:\n try:\n image = PokeImage(self.pokemon, self.move1, self.move2, self.time_of_day).create()\n except Exception:\n self.log.exception('Failed to create a Tweet image.')\n else:\n try:\n media = await client.upload_media(image,\n media_type='image/png',\n media_category='tweet_image',\n chunked=True)\n media_id = media['media_id']\n except Exception:\n self.log.exception('Failed to upload Tweet image.')\n try:\n await client.api.statuses.update.post(\n status=tweet_text,\n media_ids=media_id,\n lat=str(self.coordinates[0]),\n long=str(self.coordinates[1]),\n display_coordinates=True)\n except Exception:\n self.log.exception('Failed to tweet about {}.', self.name)\n return False\n else:\n self.log.info('Sent a tweet about {}.', self.name)\n return True\n finally:\n try:\n image.close()\n except AttributeError:\n pass", "title": "" }, { "docid": "5c74090c412ee1758bb956d9b486cd62", "score": "0.5628301", "text": "def tweet_preprocessor(tweets):\n\n # set the global options for the library. These settings define which\n # elements of the tweet to pay attention to\n tp.set_options(tp.OPT.URL, tp.OPT.MENTION, tp.OPT.NUMBER, tp.OPT.HASHTAG)\n\n # create a list to store the results\n clean_tweets = []\n\n # iterate over all tweets in the list\n for tweet in tweets:\n\n # remove emojis\n for e in emoji.emoji_lis(tweet):\n tweet = tweet.replace(e['emoji'], '')\n\n # append the cleaned lowered-cassed tweet\n clean_tweets.append(tp.clean(tweet).lower())\n\n return(clean_tweets)", "title": "" }, { "docid": "7f6f432219e65e4ba2da45766487a53f", "score": "0.5625561", "text": "def on_data(self, data):\n # global i\n # data_dct =[]\n try:\n producer.send('twitter_stream', data.encode('utf-8'))\n print(1)\n except Exception as e:\n print(e)\n return False\n return True # Don't kill the stream", "title": "" }, { "docid": "040e6d74194460f50c4d4011881ccc7a", "score": "0.5623302", "text": "def basic_preprocess(tweet: str) -> str:\n tweet = re.sub('<user>', '', tweet) # remove user tags\n tweet = re.sub('<url>', '', tweet) # remove url tags\n tweet = re.sub('[0-9]', '', tweet) # remove numbers\n tweet = re.sub('#', '', tweet) # remove hashtag symbols (ADDED)\n #tweet = re.sub('#\\w*', '', tweet) # remove hashtags and the words linked to it (REMOVED)\n tweet = re.sub('\\s+', ' ', tweet) # remove excess whitespace\n tweet = re.sub('^\\s', '', tweet) # remove excess whitespace\n tweet = re.sub('\\s$', '', tweet) # remove excess whitespace\n tweet = tweet.strip() # remove star/end whitespaces\n tweet = tweet.lower() # lower case\n return tweet", "title": "" }, { "docid": "f3f842b6cd731dd07b52f3d4fece750e", "score": "0.5611703", "text": "def post_to_twitter(tweet):\n\n # Twitter credentials\n consumer_key = os.environ['TWITTER_CONSUMER_KEY']\n consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']\n access_token = os.environ['TWITTER_ACCESS_TOKEN']\n access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']\n\n # OAuth process\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n # TWEET!!!\n return api.update_status(tweet)", "title": "" }, { "docid": "752488b694b58497bda52a9a784720b3", "score": "0.56040454", "text": "def handle(self, msg, **kwargs):\n auth = tweepy.OAuthHandler(settings.CODE4LIB_TWITTER_OAUTH_CONSUMER_KEY, settings.CODE4LIB_TWITTER_OAUTH_CONSUMER_SECRET)\n auth.set_access_token(settings.CODE4LIB_TWITTER_OAUTH_ACCESS_TOKEN_KEY, settings.CODE4LIB_TWITTER_OAUTH_ACCESS_TOKEN_SECRET)\n twitter = tweepy.API(auth)\n twitter.update_status(msg)", "title": "" }, { "docid": "6c241c6738c0aa9318be419276d43c31", "score": "0.5597758", "text": "def analyse(tweet):\n # Needs to be passed in as a list \n tweet = [tweet]\n # loading the Tokenizer\n tokenizer = None\n with open('models/topic_tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n #loading label_encoder\n label_encoder = None\n with open('models/topic_label_encoder.pickle', 'rb') as handle:\n label_encoder = pickle.load(handle)\n\n vocab_size = len(tokenizer.word_index) + 1\n encoded_tweet = tokenizer.texts_to_sequences(tweet)\n length = 250\n padded_tweet = pad_sequences(encoded_tweet, maxlen=length, padding='post')\n\n multichannel_cnn = define_multichannel_cnn_model(length, vocab_size)\n multichannel_cnn.load_weights('models/topic_analysis_model.h5')\n # evaluate model on training dataset\n predictions = multichannel_cnn.predict([padded_tweet, padded_tweet, padded_tweet], verbose=1)\n\n# prediction = np.argmax(predictions, axis=1)\n# prediction = label_encoder.inverse_transform(predictions)\n\n predictions_dict = {}\n index = 0\n for prediction in predictions[0]:\n label = label_encoder.inverse_transform(index)\n predictions_dict[label] = prediction \n index = index + 1\n \n return predictions_dict.keys(), predictions_dict.values()", "title": "" }, { "docid": "eaee1f9b499e6c6c9a9afbcb87b3ed8d", "score": "0.55709004", "text": "def process_message(self, message):\n #\n #\n # TODO: Process incoming weather messages. Set the temperature and status. (DONE)\n #\n #\n if message.topic() != \"org.chicago.cta.weather.v1\":\n return\n weather_data = message.value()\n self.temperature = weather_data[\"temperature\"]\n self.status = weather_data[\"status\"]", "title": "" }, { "docid": "53d107aca02c1f6a488feed28584bcd0", "score": "0.5567994", "text": "def tell_tweets(self, tweets, channel):\n if not len(tweets):\n self.msg(channel, \"Could not acquire any Tweets for #UberRTS.\")\n return\n\n self.msg(channel, u\"\\x02{0}\\x02 latest Tweets \"\n u\"for #UberRTS:\".format(len(tweets)))\n\n for tweet in tweets:\n self.msg(channel, u\"\\x02{0}\\x02: » {1} « [{2}]\".format(\n tweet[\"name\"], tweet[\"text\"],\n tweet[\"date\"].isoformat(\" \")))", "title": "" }, { "docid": "76033981f742a11b87222bde33e2a91b", "score": "0.5564769", "text": "def handle_message(self, message):\n text = message[self.text_key]\n\n # For now only a single emmm command\n self.eeny_meeny_miny_moe(message)", "title": "" }, { "docid": "ce9740bad8a7a3cd1495ccb72737b173", "score": "0.55486155", "text": "def process_tweet(tweet):\n global start_date\n global end_date\n global geo_enabled_tweets\n global retweets\n\n\n # Check for filters before processing any further\n #print tweet.text\n\n if args.filter and tweet.source:\n if not args.filter.lower() in tweet.source.lower():\n return\n\n tw_date = tweet.created_at\n\n # Updating most recent tweet\n end_date = end_date or tw_date\n start_date = tw_date\n\n # Handling retweets\n try:\n # We use id to get unique accounts (screen_name can be changed)\n rt_id_user = tweet.retweeted_status.user.id_str\n retweeted_users[rt_id_user] += 1\n\n if tweet.retweeted_status.user.screen_name not in id_screen_names:\n id_screen_names[rt_id_user] = \"@%s\" % tweet.retweeted_status.user.screen_name\n\n retweets += 1\n except:\n pass\n\n # Adding timezone from profile offset to set to local hours\n if tweet.user.utc_offset and not args.no_timezone:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=tweet.user.utc_offset))\n\n if args.utc_offset:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=args.utc_offset))\n\n # Updating our activity datasets (distribution maps)\n activity_hourly[\"%s:00\" % str(tw_date.hour).zfill(2)] += 1\n activity_weekly[str(tw_date.weekday())] += 1\n\n # Updating langs\n detected_langs[tweet.lang] += 1\n\n # Updating sources\n detected_sources[tweet.source] += 1\n\n # Detecting geolocation\n if tweet.place:\n geo_enabled_tweets += 1\n tweet.place.name = tweet.place.name\n detected_places[tweet.place.name] += 1\n\n # Updating hashtags list\n if tweet.entities['hashtags']:\n for ht in tweet.entities['hashtags']:\n ht['text'] = \"#%s\" % ht['text']\n detected_hashtags[ht['text']] += 1\n\n # Updating domains list\n if tweet.entities['urls']:\n for url in tweet.entities['urls']:\n domain = urlparse(url['expanded_url']).netloc\n if domain != \"twitter.com\": # removing twitter.com from domains (not very relevant)\n detected_domains[domain] += 1\n\n # Updating mentioned users list\n if tweet.entities['user_mentions']:\n for ht in tweet.entities['user_mentions']:\n mentioned_users[ht['id_str']] += 1\n if not ht['screen_name'] in id_screen_names:\n id_screen_names[ht['id_str']] = \"@%s\" % ht['screen_name']", "title": "" }, { "docid": "738ed29f39fe99a5725e0344cfaf2c79", "score": "0.552728", "text": "async def process_message(message, metadata):\n reply = None\n\n for test, action in ACTION_MAP.items():\n if message.startswith(test):\n reply = await action(message.lstrip(test), metadata)\n break\n\n if reply:\n post_to_slack(reply, metadata)", "title": "" }, { "docid": "7d09f312ea1be18435792eb44f491564", "score": "0.55163366", "text": "def tweet_pre_processing(self):\n \n \n #Condition to check if pre-processing is enabled, if false method retrieves processed data from csv file location\n if self.config[\"isPreProc\"] == True:\n \n #Run troll tweet process\n self.tweedle_collection = self.run_troll_tweets()\n self.troll_tweet_df = self.tweedle_collection[0]\n self.distinct_hashtags = self.tweedle_collection[1]\n \n #Copy twitter data for pre-processing\n self.troll_tweet_df = self.troll_tweet_df[self.troll_tweet_df['language'] == 'English']\n\n #Filter null tweet content\n self.processed_tweets = pd.Series(self.troll_tweet_df['content'],index=self.troll_tweet_df.index, name='processed_content')\n \n #Removing hyperlinks from twitter data, regular expression used to identify URLS, placed before hash tag and punctuation removal to effectively do so\n self.msg_handle(\"msg_preproc_links\")\n self.processed_tweets = self.processed_tweets.str.replace(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*','')\n \n #Removing hash tags from twitter data, regular expression used to replace tweets with nothing\n self.msg_handle(\"msg_preproc_tags\")\n self.processed_tweets = self.processed_tweets.str.replace('#(\\w+)','')\n self.processed_tweets = self.processed_tweets[self.processed_tweets.isna()==False]\n \n #Removing stopwords from twitter data\n self.msg_handle(\"msg_preproc_stop\")\n stop = stopwords.words('english')\n self.processed_tweets = self.processed_tweets.progress_apply(lambda x: \" \".join(x for x in x.split() if x not in stop))\n \n #Removing punctuation from twitter data, regular expression used to replace punctuation with nothing\n self.msg_handle(\"msg_preproc_punct\")\n self.processed_tweets = self.processed_tweets.str.replace('[^\\w\\s]','')\n \n #Removing null tweets, after applying preprocessing for other pre-processing methods\n self.processed_tweets = self.processed_tweets[self.processed_tweets.isna() == False]\n \n #Making twitter data lower case\n self.msg_handle(\"msg_preproc_lower\")\n self.processed_tweets= self.processed_tweets.progress_apply(lambda x: \" \".join(x.lower() for x in x.split()))\n \n \n #Lemmatization\n self.msg_handle(\"msg_preproc_lemma\")\n self.processed_tweets = self.processed_tweets.progress_apply(lambda x: \" \".join([Word(word).lemmatize() for word in x.split()]))\n \n self.processed_tweets = pd.DataFrame(self.processed_tweets, index = self.processed_tweets.index)\n \n #Get Tweet Sentiment - polarity, subjectivity and textblob assessments\n self.msg_handle(\"msg_preproc_sentiment\")\n self.processed_tweets['sentiment'] = self.processed_tweets['processed_content'].progress_apply(self.get_tweet_sentiment)\n \n #Split tweet sentiment containing list into muliple columns\n self.msg_handle(\"msg_preproc_splitcols\")\n self.processed_tweets[['sent_polarity', 'sent_subjectivity', 'sent_assessments']] = pd.DataFrame(self.processed_tweets.sentiment.values.tolist(), index=self.processed_tweets.index)\n \n #Join processed tweets to dataframe\n self.troll_tweet_df = self.troll_tweet_df.join(self.processed_tweets, how='left')\n \n #Classify tweets based on polarity\n self.msg_handle(\"msg_preproc_class\")\n \n conditions = [\n (self.troll_tweet_df['sent_polarity'] == 0.00),\n (self.troll_tweet_df['sent_polarity'] > 0.00),\n (self.troll_tweet_df['sent_polarity'] < 0.00)\n ]\n choices = ['neutral', 'positive', 'negative']\n self.troll_tweet_df['class_sentiment'] = np.select(conditions, choices, default= None)\n #Store processed tweets to CSV for later use\n if self.config[\"isSavePreproc\"] == True:\n self.msg_handle(\"msg_preproc_save\")\n self.troll_tweet_df.to_csv(self.my_path + \"\\\\data\\\\\" + \"Processed\\\\processed_tweets.csv\")\n \n else:\n self.msg_handle(\"msg_import_preproc\")\n self.troll_tweet_df = pd.read_csv(self.my_path + \"\\\\data\\\\\" + \"Processed\\\\processed_tweets.csv\")\n \n #Literal evaluation of hashtag column for it is read as a string value instead of a list\n self.msg_handle(\"msg_import_preproc_convert\")\n self.troll_tweet_df['hash_tags'] = self.troll_tweet_df['hash_tags'].progress_apply(lambda x: literal_eval(x))\n self.distinct_hashtags = pd.read_csv(self.my_path + \"\\\\data\\\\\" + \"Processed\\\\distinct_hashtags.csv\")\n \n self.hashtag_set_list = self.get_cat_hash_tags()\n \n self.msg_handle(\"msg_preproc_complete\")\n return (self.troll_tweet_df, self.distinct_hashtags, self.hashtag_set_list)", "title": "" }, { "docid": "df681f561317157cb730e63912278da9", "score": "0.5511987", "text": "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if not check_tweepy_data_json(data):\n # print('useless tweet: %s', data['id'])\n return True\n \n elif 'retweeted_status' in data.keys():\n data = data['retweeted_status']\n # print(\"this was a RT... \")\n if not check_tweepy_data_json(data):\n # print('useless tweet: %s', data['id'])\n return True\n \n # else: # only text tweets get here alive\n text = sub(\"\\n\", \" \", data['text'])\n if __name__ == '__main__':\n print(text)\n\n ts = int(time() / (60 * 5))\n print(str(ts))\n with open(os.path.join(datadir, '%s.txt' % ts), 'a') as out:\n out.write(text + '\\n')\n\n return True", "title": "" }, { "docid": "50debc4e1ee82f55efad6821fa367089", "score": "0.55114824", "text": "def on_response(self, response):\r\n\r\n # get tweet's username, text and location\r\n tweet_data = get_tweet_content(response) \r\n \r\n # ignore retweets and tweets that do not contain the topic\r\n if (tweet_data['text'].startswith('RT') or\r\n self.topic.lower() not in tweet_data['text'].lower()):\r\n return\r\n\r\n self.counts_dict['total_tweets'] += 1 # it's an original tweet\r\n\r\n # ignore tweets with no location\r\n if not tweet_data.get('location'): \r\n return\r\n\r\n self.counts_dict['locations'] += 1 # user account has location\r\n self.tweets_list.append(tweet_data) # store the tweet\r\n print(f\"{tweet_data['username']}: {tweet_data['text']}\\n\")\r\n\r\n # if TWEET_LIMIT is reached, terminate streaming\r\n if self.counts_dict['locations'] == self.TWEET_LIMIT:\r\n self.disconnect()", "title": "" }, { "docid": "9c8db7bacc95b5db564fd9f2b5c6e237", "score": "0.55105436", "text": "def process(self, element):\n tweet = element.properties.get('Actualtweet', None)\n tweet_text = ''\n if tweet:\n tweet_text = tweet.string_value\n orgName = element.properties.get('userName', None)\n orgName = \"\" if orgName is None else orgName.string_value\n # We fill figure it out while doing entity extraction\n if not tweet_text:\n self.empty_tweet_counter.inc()\n jobs = []\n urls = \"\"\n else:\n jobs = re.findall(job_Regex, tweet_text, re.IGNORECASE)\n if len(jobs) == 0:\n self.no_job_counter.inc()\n urls = re.findall(url_Regex, tweet_text, re.IGNORECASE)\n if len(urls) == 0:\n self.no_url_counter.inc()\n location = re.findall(location_Regex, tweet_text, re.IGNORECASE )\n location = location[0] if len(location) > 0 else default_location\n print {\n \"Tweet\":tweet_text,\n \"Job List\": jobs,\n \"Company Name\": orgName,\n \"Location\": location,\n \"Job Url\": urls[0] if len(urls) > 0 else \"\"\n }\n return {\n \"Tweet\":tweet_text,\n \"Job List\": jobs,\n \"Company Name\": orgName,\n \"Location\": location,\n \"Job Url\": urls[0] if len(urls) > 0 else \"\"\n }", "title": "" }, { "docid": "7b280861a322e1067154ff4d74efea47", "score": "0.55098236", "text": "def parse(text_msg):\r\n words = set ( text_msg.lower().split())\r\n action = list(verbs & words)\r\n application = list(words & software_app)\r\n social_media_action = list(words & social_media ) \r\n result = [elem for elem in software_app if elem in application]\r\n \r\n\r\n if (\"what is\" in text_msg):\r\n result = wikipedia.summary(text_msg, sentences = 2) \r\n print(result) \r\n tts.speak(result)\r\n elif(\"exit\" in text_msg or \"quite\" in text_msg):\r\n sys.exit()\r\n elif(\"do not\" in text_msg or \"don't\" in text_msg):\r\n pass \r\n elif action and result:\r\n mystr=\"\"\r\n for x in result:\r\n mystr += x + \" && \" \r\n os.system((mystr[:-3]).lower())\r\n tts.speak(\"Running.. stay there\")\r\n elif social_media_action :\r\n for app in social_media_action:\r\n if(app.lower() == \"facebook\"):\r\n os.system(\"chrome www.facebook.com\")\r\n tts.speak(\"opening facebook.com for you.\")\r\n elif(app.lower() == \"linkedin\"):\r\n os.system(\"chrome www.linkedin.com\")\r\n tts.speak(\"opening linkedin for you\")\r\n elif(app.lower()== \"whatsapp\"):\r\n if \"shubham\" in words:\r\n os.system(\"chrome http://wa.me/\"+whatsapp_contacts['shubham'])\r\n tts.speak(\"sending whatsapp to shubham\")\r\n if \"papa\" in words:\r\n os.system(\"chrome http://wa.me/\"+whatsapp_contacts['papa'])\r\n tts.speak(\"sending whatsapp to papa\")", "title": "" }, { "docid": "7210912d918d9243f8dcbce7dfe39e52", "score": "0.5502125", "text": "def process(self, message, **kwargs):\n\n #from mtranslate import translate\n texts=translate(message.text).lower()\n\n intents, probabilities, ranking= tarun_nlp(txt=texts)\n intent, intent_ranking=self.convert_to_rasa(intents, probabilities, ranking, texts)\n\n #message.set(\"intent\", [intent], add_to_output=True)\n #message.set(\"intent_ranking\", [intent_ranking], add_to_output=True)\n\n message.set(\"intent\", intent, add_to_output=True)\n message.set(\"intent_ranking\", intent_ranking, add_to_output=True)", "title": "" }, { "docid": "7ba52c3f7d1384cb39f770c5477386ee", "score": "0.54862225", "text": "def on_data(self, data):\n tweet = json.loads(data)\n #if 'retweeted_status' not in tweet and 'quoted_status' not in tweet:\n self.latest_tweet = tweet\n #self.q.put(tweet)\n return True", "title": "" }, { "docid": "e8ff7a4fe818c34e7c03e8b16a21d424", "score": "0.548127", "text": "def preprocess_tweet_text(tweet):\n\n # Lowers text\n tweet = tweet.lower()\n\n # Removes urls\n tweet = re.sub(r\"http\\S+|www\\S+|https\\S+\", '', tweet, flags=re.MULTILINE)\n\n # Removes user @ references and '#' from tweet\n tweet = re.sub(r'\\@\\w+|\\#', '', tweet)\n\n # Removes punctuations\n tweet = tweet.translate(str.maketrans('', '', string.punctuation))\n\n # Removes extra 'twitter-specific' words not caught from other normalization\n stop_words.add('rt')\n stop_words.add('im')\n stop_words.add('u')\n\n # Removes stopwords, tokenizes\n tweet_tokens = word_tokenize(tweet)\n filtered_words = [w for w in tweet_tokens if not w in stop_words]\n\n # Lemmatizes tokenized text\n lemmatizer = WordNetLemmatizer()\n lemma_words = [lemmatizer.lemmatize(w, pos='a') for w in filtered_words]\n\n # Returns the joined result from the preprocessing\n return \" \".join(lemma_words)", "title": "" }, { "docid": "cf8b22a9674e3496f38a2697579f6519", "score": "0.54709035", "text": "def processTweet(tweet, removeFreqWords=False):\n\n # tweet tokeniser\n tweetTokeniser = nltk.tokenize.TweetTokenizer()\n # tweet lemmatizer\n tweetLemmatizer = nltk.stem.WordNetLemmatizer()\n\n # list of stopwords\n Stopwords = nltk.corpus.stopwords.words('english') # english stopwords\n Stopwords = Stopwords + list(string.punctuation)\n Stopwords = Stopwords + [\"u\", \"w\", \"b\", \"n\", \"—\", \"la\", \"ur\"]\n Stopwords = Stopwords + ['rt', 'via', '...', '…', '’', '“', '”', '', '..', \"️\", \"‍\"]\n Stopwords = Stopwords + [\".\", \"’\", \"…\", \"!\", \"?\", \"...\", \":\", \"“\", \"”\", \"$\", '\"', \"/\", \"-\", \"..\"]\n Stopwords = Stopwords + [\"(\", \")\", \"*\", \"&\"]\n\n # some patterns that are not desired\n # pattern to remove all strings of digits or fractions, e.g., 6.15\n regexDigit = re.compile(\"^\\d+\\s|\\s\\d+\\s|\\s\\d+$\")\n # regex pattern for http\n regexHttp = re.compile(\"http\")\n\n # if removeFreqWords==True then we will include frequent words also as stopwords\n if removeFreqWords == True:\n Stopwords = Stopwords + [\"uber\", \"driver\", \"get\"]\n\n # start text pre-processing\n # covert all to lower case\n tweet = tweet.lower()\n # tokenise tweets\n tweetTokens = tweetTokeniser.tokenize(tweet)\n # remove white spaces\n tweetTokens = [word.strip() for word in tweetTokens]\n\n # irrelevant word removal\n tweetTokens = [word for word in tweetTokens if word not in Stopwords and not word.isdigit() and regexHttp.match(word) == None and regexDigit.match(word) == None]\n\n # obtain part of speech tag for each word in tweetTokens\n tweetTokens_pos = nltk.pos_tag(tweetTokens)\n # lemmatise the words\n lemmedTokens = set([tweetLemmatizer.lemmatize(word[0], getWordPOS(word[1])) for word in tweetTokens_pos])\n\n return lemmedTokens", "title": "" }, { "docid": "506622172cfbf31888ca8b114801f7ac", "score": "0.54703516", "text": "def _main(app_key, app_secret, token_key, token_secret, follow, track,\n locations, languages, kafka, topic, hdfs, path):\n follow = follow.split(',') if follow else None\n track = track.split(',') if track else None\n locations = [float(l) for l in locations.split(',')] if locations else None\n languages = languages.split(',') if languages else None\n\n if not any([follow, track, locations]):\n click.echo(\"Error: At least one of follow, track or locations should\"\n \" be specified\")\n sys.exit(-1)\n\n if follow: click.echo(\"Follow: %s\" % follow)\n if track: click.echo(\"Track: %s\" % track)\n if locations: click.echo(\"Locations: %s\" % locations)\n if languages: click.echo(\"Languages: %s\" % languages)\n\n # Auth in Twitter and check if it's succeded\n auth = tweepy.OAuthHandler(app_key, app_secret)\n auth.set_access_token(token_key, token_secret)\n api = tweepy.API(auth)\n click.echo(\"Twitter authenticated user: %s\" % api.me().screen_name)\n\n\n if kafka:\n click.echo(\"Kafka bootstrap servers: %s\" % kafka)\n producer = kafka_client.KafkaProducer(bootstrap_servers=kafka,\n value_serializer=str.encode)\n stream_listener = TwitterStreamListener(producer, topic)\n else:\n click.echo(\"HDFS server: %s\" % hdfs)\n from pyhdfs import HdfsClient\n client = HdfsClient(hosts=hdfs, user_name='root')\n stream_listener = HDFSStreamListener(client, path)\n\n stream = tweepy.Stream(auth=api.auth, listener=stream_listener)\n stream.filter(follow=follow, track=track, locations=locations, languages=languages)\n producer.flush()", "title": "" }, { "docid": "71b1fb838a43ba05ca6ba822e1425ee7", "score": "0.54694664", "text": "def post(self):\n topic_title = request.json['topic_title']\n tags = request.json['tags']\n maxAmount = request.json['maxAmount']\n since = _getDate(request.json['since'])\n until = _getDate(request.json['until'])\n language = request.json['language'] if request.json['language'] else \"en\"\n\n try:\n tweets_dict = ts.getTweetsFromAPI(topic_title=topic_title, search_tags=tags, maxAmount=maxAmount,\n since=since, until=until, language=language) \n return tweets_dict\n except TweetAnalyzerException:\n return 'El tema de los tweets ya existe para otro idioma.', 400", "title": "" }, { "docid": "a5940c3b0bbf4f5b9c92723e7d0baca7", "score": "0.5465912", "text": "def preprocess_tweet_general(self, tweet):\n tweet = tweet.lower()\n tweet = re.sub(self.re_web, \" \", tweet)\n mnts = re.findall(self.re_atx, tweet)\n mnts = [x.replace(\"@\", \"\").strip() for x in mnts]\n tweet = re.sub(self.re_atx, \" \", tweet)\n tweet = re.sub(self.re_num, \" \", tweet)\n tags = re.findall(self.re_hashtag, tweet)\n tags = [x.replace(\"#\", \"\").strip() for x in tags]\n tweet = re.sub(self.re_hashtag, \" \", tweet)\n tweet = re.sub(self.re_nonan, \" \", tweet)\n tweet = re.sub(self.re_pun, '', tweet)\n tweet = re.sub(self.re_rep_let, r'\\1', tweet)\n tweet = re.sub(self.re_wspace, \" \", tweet).strip()\n\n return (tweet, mnts, tags)", "title": "" }, { "docid": "d627f0781053a4b7ed280a00169a05df", "score": "0.54651755", "text": "def on_message(topic, msg):\n global DEBUG, MQTT_SUB_TOPIC_CMD, MQTT_PUB_TOPIC_STATUS, MQTT_SUB_TOPIC_CONF, MQTT_SUB_TOPIC_HASS_PREFIX\n m = msg.decode(\"utf-8\")\n t = topic.decode(\"utf-8\")\n if DEBUG: print('MQTT received: %s from %s' % (m, t))\n if (t == MQTT_SUB_TOPIC_CMD):\n process_command(m)\n if (t == MQTT_PUB_TOPIC_STATUS):\n process_status(m)\n if (t == MQTT_SUB_TOPIC_CONF):\n process_config(m)\n if (t.startswith(MQTT_SUB_TOPIC_HASS_PREFIX)):\n process_hass(t, m)", "title": "" }, { "docid": "fdd1f548cea62385b739272e7febbf9d", "score": "0.5449696", "text": "def process_msg(self, msg: str):\n line: list[str] = msg.split()\n\n if len(line) > 1:\n code = line[1]\n\n if line[0] == \"PING\":\n self.push_msg(\"PONG {0}\".format(code))\n else:\n print(msg)\n\n if code == \"396\":\n self.join_channels()\n\n self.manager.parse(self, code, line)", "title": "" }, { "docid": "54c671bedabe6cfca2338c29cb51a957", "score": "0.5447688", "text": "def topic(app='tweettalk', content_id=None):\n try:\n q = form_query(content_keywords(request.content),\n content_entities(request.content)) \n credentials = get_twitter_credentials(app)\n params = {'q': q, 'count': 100, 'result_type': 'mixed'}\n result = search(params, section=app, credentials=credentials) \n tweets = filter_tweets_by_stoplist(result.statuses, 'media')\n return render( {'tweets': tweets }, template='topic.jinja2')\n except TwitterAuthError:\n # This redirect is for the HTML UI. JSON clients should execute the\n # auth-check / auth-verify cycle before making API calls\n return redirect(url_for('auth_check', app=app))\n except Exception, e:\n traceback.print_exc()\n return jsonify({'error': str(e)})", "title": "" }, { "docid": "8df40bb4caf34ea175dae2ba742f0254", "score": "0.54196286", "text": "def get_text_features(tweet_body):\n result = \" |text \"\n # If retweet, don't return text body (not checking Staff vs. Trump style)\n if (tweet_body.startswith(\"\\\"@\")):\n return(result)\n else:\n # Make all characters lowercase\n tweet_body = tweet_body.lower()\n\n # Handle links that lack a leading space by adding one\n tweet_body = re.sub('[^\\s]https', ' https', tweet_body)\n\n # Add dummys, 1st pass\n tweet_body = add_dummys(tweet_body)\n\n # Replace punctuation with empty string\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_|`~'''\n for x in tweet_body: \n if x in punctuations: \n tweet_body = tweet_body.replace(x, \"\") \n\n # Add dummys, 2nd pass\n tweet_body = add_dummys(tweet_body)\n\n # Remove extra spaces\n tweet_body = tweet_body.replace(\" \", \" \") \n tweet_body = tweet_body.replace(\" \", \" \")\n\n # Handle leading space\n if (tweet_body[0] == \" \"):\n tweet_body = tweet_body[1:]\n\n # Remove trailing newlines\n tweet_body = tweet_body.rstrip(\"\\n\\r\")\n\n result += tweet_body\n return(result)", "title": "" }, { "docid": "35e4179c4671c1d27fa07b49ecfdf218", "score": "0.5409887", "text": "def topic_message(client, msg):\n LOGGER.info(msg.topic+\" \"+msg.payload.decode('utf-8'))\n topic = msg.payload.decode('utf-8') + \"/motion\"\n TOPIC.set(topic)", "title": "" }, { "docid": "550a6291cdb935843d8dfa2addc210e5", "score": "0.54093546", "text": "def preprocess_twitter(infile, outfile):\n for line in infile:\n if \"\\t\" in line:\n line = line.split(\"\\t\", 1)[1]\n text = line.rstrip()\n text = TWITTER_HANDLE_RE.sub(\"\", text)\n text = TCO_RE.sub(\"\", text)\n text = fix_surrogates(unescape_html(text)).replace(\"\\n\", \" \")\n lang, _confidence = detect_language_checked(text)\n if lang != 'und':\n print(f\"{lang}\\t{text}\", file=outfile)", "title": "" }, { "docid": "4b38f013078c9b754576afd323a872d1", "score": "0.5407868", "text": "def define_topic(body):\r\n # Split the body and create a dictionary\r\n wordCounts = defaultdict(int)\r\n words = body.split(' ')\r\n for word in words:\r\n wordCounts[word] += 1\r\n\r\n Trump_count = wordCounts['Trump'] + wordCounts['president'] + wordCounts['President'] + wordCounts['Pence'] + \\\r\n wordCounts['Mike'] + wordCounts['GOP'] + wordCounts['Republican'] + wordCounts['Republicans'] + \\\r\n wordCounts['Donald'] + wordCounts['Mitch'] + wordCounts['McConnell'] + wordCounts['right'] + \\\r\n wordCounts['Barrett']\r\n Biden_count = wordCounts['Biden'] + wordCounts['Kamala'] + wordCounts['Vice'] + wordCounts['Joe'] + \\\r\n wordCounts['Harris'] + wordCounts['DNC'] + wordCounts['Democrat'] + wordCounts['Democrats'] + \\\r\n wordCounts['Barack'] + wordCounts['Obama'] + wordCounts['Nancy'] + wordCounts['Pelosi'] + wordCounts[\r\n 'left'] + wordCounts['Schumer'] + wordCounts['Pete'] + wordCounts['Buttigieg']\r\n msft_count = wordCounts['Microsoft'] + wordCounts['MSFT'] + wordCounts['$MSFT']\r\n aapl_count = wordCounts['Apple'] + wordCounts['AAPL'] + wordCounts['$AAPL']\r\n amzn_count = wordCounts['Amazon'] + wordCounts['AMZN'] + wordCounts['$AMZN']\r\n goog_count = wordCounts['Google'] + wordCounts['Alphabet'] + wordCounts['GOOG'] + wordCounts['$GOOG']\r\n googl_count = wordCounts['Google'] + wordCounts['Alphabet'] + wordCounts['GOOGL'] + wordCounts['$GOOGL']\r\n fb_count = wordCounts['Facebook'] + wordCounts['FB'] + wordCounts['$FB']\r\n brk_count = wordCounts['Berkshire'] + wordCounts['BRK.B'] + wordCounts['$BRK.B']\r\n jnj_count = wordCounts['Johnson'] + wordCounts['JNJ'] + wordCounts['$JNJ']\r\n v_count = wordCounts['Visa'] + wordCounts['V'] + wordCounts['$V']\r\n pg_count = wordCounts['Proctor'] + wordCounts['Gamble'] + wordCounts['PG'] + wordCounts['$PG']\r\n jpm_count = wordCounts['JPMorgan'] + wordCounts['JPM'] + wordCounts['$JPM']\r\n unh_count = wordCounts['UnitedHealth'] + wordCounts['UNH'] + wordCounts['$UNH']\r\n ma_count = wordCounts['Mastercard'] + wordCounts['MA'] + wordCounts['$MA']\r\n intc_count = wordCounts['Intel'] + wordCounts['INTC'] + wordCounts['$INTC']\r\n vz_count = wordCounts['Verizon'] + wordCounts['VZ'] + wordCounts['$VZ']\r\n hd_count = wordCounts['Home'] + wordCounts['Depot'] + wordCounts['HD'] + wordCounts['$HD']\r\n t_count = wordCounts['AT&T'] + wordCounts['T'] + wordCounts['$T']\r\n dow_count = wordCounts['Dow'] + wordCounts['DJI'] + wordCounts['$DJI']\r\n nasdaq_count = wordCounts['NASDAQ'] + wordCounts['IXIC'] + wordCounts['$IXIC']\r\n sp_count = wordCounts['S&P500'] + wordCounts['INX'] + wordCounts['$INX']\r\n tesla_count = wordCounts['TSLA'] + wordCounts['Tesla'] + wordCounts['$TSLA']\r\n\r\n topic_count = {'Trump': Trump_count, 'Biden': Biden_count, 'MSFT': msft_count, \\\r\n 'AAPL': aapl_count, 'AMZN': amzn_count, 'GOOG': goog_count, \\\r\n 'GOOGL': googl_count, 'FB': fb_count, 'BRK': brk_count, \\\r\n 'JNJ': jnj_count, 'V': v_count, 'PG': pg_count, \\\r\n 'JPM': jpm_count, 'UNH': unh_count, 'MA': ma_count, \\\r\n 'INTC': intc_count, 'VZ': vz_count, 'HD': hd_count, \\\r\n 'T': t_count, 'DOW': dow_count, 'NASDAQ': nasdaq_count, \\\r\n 'SP': sp_count, 'TSLA' : tesla_count}\r\n\r\n topic_values = topic_count.values()\r\n\r\n # looks for the max value in topic_count and makes that the keyword\r\n # if all of the values are 0 the topic will be undefined and the\r\n # keyword is N/A\r\n keyword = max(topic_count, key=topic_count.get)\r\n\r\n if max(topic_values) == 0:\r\n topic = 'Undefinded'\r\n keyword = 'N/A'\r\n elif keyword == 'Trump' or keyword == 'Biden':\r\n topic = 'Election'\r\n else:\r\n topic = 'Stocks'\r\n return [topic, keyword]", "title": "" }, { "docid": "fa4372fd939a2b90527ab6cfed97b41e", "score": "0.5398213", "text": "def preprocess_tweet(datapath):\n with open(datapath, 'rt', encoding=\"utf8\") as f:\n data = f.read()\n\n # Split the data into separate tweet\n data = data.split('\\n')\n # Remove artifects that are too short\n data = [entry for entry in data if len(entry) > 20]\n # Define pattern for different field\n date_pattern = r'\"created_at\":\".+?\",\"'\n tweet_pattern = r',\"full_text\":\".+?\"id_str\":\".+?\",\"'\n id_pattern = r'\"id_str\":\".+?\",\"'\n\n created_date = []\n tweet = []\n tweet_id = []\n\n #Extract date, id, and tweet text\n for i,t in enumerate(data):\n temp_date = re.findall(date_pattern, t)[0][14:-3]\n temp_tweet = re.findall(tweet_pattern, t)[0]\n temp_id = re.findall(id_pattern, temp_tweet)[0][10:-3]\n temp_tweet = temp_tweet.split(',\"')[1][14:-1]\n\n created_date.append(temp_date)\n tweet.append(temp_tweet)\n tweet_id.append(temp_id)\n\n # Pack extracted data into datafram\n df = pd.DataFrame(data={'created_date': created_date,\n 'tweet': tweet,\n 'tweet_id': tweet_id})\n\n return df", "title": "" }, { "docid": "bc283415bcfccac3d48ea8b35eb31f1e", "score": "0.53905416", "text": "def preprocess(self, tweet):\n if self.twitterPreprocesser:\n tweet = self.twitterPreprocesser.preprocess(tweet)\n\n tokens = self.tokenizer.tokenize(tweet)\n\n if self.stemmer:\n tokens = self.stemmer.stem(tokens)\n\n return tokens", "title": "" }, { "docid": "6e2929f846572875783e6552273698e5", "score": "0.5378535", "text": "def post_tweet(tweet):\n # authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n api.update_status(tweet)", "title": "" }, { "docid": "6276138d73d8b20421eb62ad110414f1", "score": "0.5373081", "text": "def parse(wmsg):", "title": "" }, { "docid": "90d9ffd7c916ce460c4e378b65261d52", "score": "0.53725016", "text": "def tweet(markov_chain_text):\n\n api = twitter.Api(\n consumer_key=os.environ['TWITTER_CONSUMER_KEY'],\n consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'],\n access_token_key=os.environ['TWITTER_ACCESS_TOKEN_KEY'],\n access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET'])\n\n # This will print info about credentials to make sure \n # they're correct\n print api.VerifyCredentials()\n\n # Send a tweet\n status = api.PostUpdate(markov_chain_text)\n print status.text", "title": "" }, { "docid": "62fdfefc6410cd7ebac3438054a0e075", "score": "0.53681177", "text": "def send_tweets(self, tweets):\n if self.twitter_api is None:\n self.log(\"No Twitter Consumer Key set; not tweeting\")\n return\n\n for tweet in tweets:\n previous_status_id = None\n\n if tweet[\"in_reply_to_time\"] is not None:\n # This tweet is a reply, so check that it's a reply to the\n # immediately previous tweet.\n # It *should* be, but if something went wrong, maybe not.\n previous_status_time = self.redis.get(\"previous_tweet_time\")\n\n if tweet[\"in_reply_to_time\"] == previous_status_time:\n previous_status_id = self.redis.get(\"previous_tweet_id\")\n\n self.log(\n \"Tweeting: {} [{} characters]\".format(tweet[\"text\"], len(tweet[\"text\"]))\n )\n\n try:\n status = self.twitter_api.PostUpdate(\n tweet[\"text\"], in_reply_to_status_id=previous_status_id\n )\n except twitter.TwitterError as e:\n self.error(e)\n else:\n # Set these so that we can see if the next tweet is a reply\n # to this one, and then one ID this one was.\n self.redis.set(\"previous_tweet_time\", tweet[\"time\"])\n self.redis.set(\"previous_tweet_id\", status.id)\n\n time.sleep(2)", "title": "" }, { "docid": "12feec71ca94c840659644b5d9d953db", "score": "0.5359838", "text": "def on_message(mqtt, userdata, msg):\n payload = msg.payload.decode('utf-8')\n topic = msg.topic.split('/', 1)[-1]\n\n if topic and topic != 'discord':\n discord_msg(f'{topic}: {payload}')\n else:\n discord_msg(f':interrobang: {payload}')", "title": "" }, { "docid": "4aadbe401560567a2f60c4088711a902", "score": "0.5341229", "text": "def tokenize_random_tweet(self):\n try:\n import twitter\n except ImportError:\n print \"Apologies. The random tweet functionality requires the Python twitter library: http://code.google.com/p/python-twitter/\"\n from random import shuffle\n api = twitter.Api()\n tweets = api.GetPublicTimeline()\n if tweets:\n for tweet in tweets:\n if tweet.user.lang == 'en': \n return self.tokenize(tweet.text)\n else:\n raise Exception(\"Apologies. I couldn't get Twitter to give me a public English-language tweet. Perhaps try again\")", "title": "" }, { "docid": "989e66341e2516ca3dab227b76d9b5f2", "score": "0.5332599", "text": "def handleTweet(userName):\n users[userName] = getOverAllMood()\n trackMood(userName)", "title": "" }, { "docid": "19e4f474994e742183f4f3fad2f356a9", "score": "0.53303355", "text": "def test_create_text_detect_mention_ignores_casing_of_username(self):\n user = make_user()\n\n headers = make_authentication_headers_for_user(user=user)\n\n mentioned_user = make_user(username='Joel')\n\n post_text = 'Hello @joel'\n\n data = {\n 'text': post_text,\n }\n\n url = self._get_url()\n response = self.client.put(url, data, **headers, format='multipart')\n\n get_worker('high', worker_class=SimpleWorker).work(burst=True)\n\n post = Post.objects.get(text=post_text, creator_id=user.pk)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(PostUserMention.objects.filter(post_id=post.pk, user_id=mentioned_user.pk).exists())", "title": "" }, { "docid": "bd44bf6c9f164d1b814a3aac9da2e30d", "score": "0.53299403", "text": "def on_data(self, data):\n try:\n\n global conn\n\n # load the tweet JSON, get pure text\n full_tweet = json.loads(data)\n tweet_text = full_tweet['text']\n\n # print the tweet plus a separator\n print (\"------------------------------------------\")\n print(tweet_text + '\\n')\n\n # send it to spark\n conn.send(str.encode(tweet_text + '\\n'))\n except:\n\n # handle errors\n e = sys.exc_info()[0]\n print(\"Error: %s\" % e)\n\n\n return True", "title": "" }, { "docid": "e5cc37740804f5fee995420eb2cfd24a", "score": "0.53296363", "text": "def process_phonetize_hts(self, txt_fpath, style=''):\n self.process_phonetize(txt_fpath)\n self.utt_total_mix = self.phonetizer_obj.utt_total_mix\n\n self.pfs_xtr_hts_obj = PfsExtractorHts(self.param_module, self.utt_total_mix)\n\n self.pfs_xtr_hts_obj.process_pfeats_extr(style)\n\n self.pfs_xtr_hts_obj.gen_hts_lab_fts()\n self.hts_lab_gen_prn = self.pfs_xtr_hts_obj.hts_lab_gen_prn", "title": "" }, { "docid": "b42415c0a49c79bfead9901f23dfd384", "score": "0.5303099", "text": "def clean_text(self, tweet):\n tweet_list = tweet.split(\" \")\n\n # Removing twitter handle\n tweet_list = [w for w in tweet_list if not w.startswith(\"@\")]\n\n tweet = \" \".join(tweet_list)\n\n # removing hashtags from tweet\n tweet = re.sub('#', '', tweet)\n\n #removing numbers from tweet as they are not present in the embedding dictionary\n tweet = re.sub('[0-9]+', '', tweet)\n\n # finding all words from the tweet to also get rid of punctuations\n words = re.findall(r'\\w+', tweet)\n\n # Removing stopwords\n for word in words:\n if word in self.stopwords or word.startswith('@'):\n words.remove(word)\n\n cleaned_tweet = \" \".join(words)\n\n return cleaned_tweet", "title": "" }, { "docid": "58d3c4343d866d7dcc964f6ede690c86", "score": "0.5273932", "text": "def post(self, msg):\n self.twitter.statuses.update(status=msg)", "title": "" }, { "docid": "e1ed40f17c9067b13994faca45cfd076", "score": "0.5268021", "text": "def __process_tweet(self, word_counts, tweet):\n original_words = self.tweet_cleaner.process_string(tweet).split() # cleans original tweet & splits\n # standard dict count algorithm\n for word in original_words:\n if word not in word_counts:\n word_counts[word] = 0\n word_counts[word] += 1", "title": "" }, { "docid": "eb398c19a5570431b0a4d9f3ad5df8b7", "score": "0.5264655", "text": "def on_data(self, data):\n tweet = json.loads(data)\n\n tweet_dict = {\n 'id': tweet['id_str'],\n 'text': tweet['text'],\n 'user': tweet['user']['screen_name'],\n 'followers_count': tweet['user']['followers_count'],\n 'date': tweet['created_at'],\n 'location': tweet['user']['location'],\n # 'quote_count': tweet['quote_count'],\n # 'reply_count': tweet['reply_count'],\n # 'retweet_count': tweet['retweet_count'],\n # 'favorite_count': tweet['favorite_count'],\n 'entities': tweet['entities'],\n 'sentimented':0\n }\n\n # write_tweet(tweet_dict)\n load_into_mongo(tweet_dict)", "title": "" }, { "docid": "d020197df6486012e87b2c17073561e8", "score": "0.5264378", "text": "def text_handler(update: Update, context: CallbackContext) -> None:\n\n logger.info(f\"Parsing: {update.message.text}\")\n if handle_message_text(update.message.text):\n update.message.reply_text(\n 'The video will be submitted for downloading promptly.'\n )\n else:\n update.message.reply_text(\n f'\\\"{update.message.text}\\\" is not a YouTube link.'\n )", "title": "" }, { "docid": "e839818046ccdf59ab85426fc40034b6", "score": "0.5259511", "text": "def handle_publish(mqttClient, userdata, mid):\n pass", "title": "" }, { "docid": "ae3d4cf774f98bff531fa09ae01753d8", "score": "0.52542436", "text": "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Typing Test\")\n parser.add_argument('topic', help=\"Topic word\", nargs='*')\n parser.add_argument('-t', help=\"Run typing test\", action='store_true')\n\n args = parser.parse_args()\n if args.t:\n run_typing_test(args.topic)", "title": "" }, { "docid": "9d563c9b4427bd31b7ba19295219da08", "score": "0.52520823", "text": "def test_tweet_hashtags_content(self):\n tg = TweetGenerator(text_path=TweetGeneratorTest.data_path,\n config=TweetGeneratorTest.config,\n train=True,\n debug=True)\n hastags = [\"#AI\", \"#tensorflow\"]\n tweet_list = tg.generate_tweet_list(50, \"i am\", hashtag_list=hastags)\n result = True\n debug = \"NoProblemo\"\n for tweet in tweet_list:\n condition1 = tweet.find(\"#AI\") != -1\n condition2 = tweet.find(\"#tensorflow\") != -1\n if not (condition1 and condition2):\n debug = tweet\n result = False\n break\n self.assertTrue(result, msg=\"\\nProblematic tweet = {}\".format(debug))", "title": "" }, { "docid": "6ced10b2268ce41218e1562adbd99edd", "score": "0.52503216", "text": "def process(self, session, message):", "title": "" }, { "docid": "314f09286259476cc600157933052eeb", "score": "0.5245813", "text": "def process_msg(self):\r\n pass", "title": "" }, { "docid": "7f0626b23054df3be27ef37137a3cfa3", "score": "0.52325547", "text": "def retweet_follow():", "title": "" }, { "docid": "f787f4e5f65cb85bcb52f09b58a490c7", "score": "0.52312434", "text": "def countPOS(procTxt, hr, featureVals = {}, FKEY = 'countPOS'): #function for extracting features from a tweet and returning feature list\n if haskey(featureVals, FKEY): return featureVals\n tokens = procTxt.tokens #procTxt[PTKEY_TOKENS]\n tags = procTxt.tags #procTxt[PTKEY_TAGS]\n pols = procTxt.pols #[ pol for sentence in procTxt[PTKEY_PRECHUNK] for pol in sentence.pols]\n# print tokens\n# print pols \n tweet = [(tok,tag, pol) for tok, tag, pol in zip(tokens, tags, pols)]\n# if any(pols):\n# print tweet\n \n# posWords = hr.resources[RESKEY_POLAR_NGRAMS].getDicts(1, KEY_POLARITY_POSITIVE) #hr.posWords\n# negWords = hr.resources[RESKEY_POLAR_NGRAMS].getDicts(1, KEY_POLARITY_NEGATIVE) #hr.negWords\n negation = hr.resources[RESKEY_NEGATORS].getDicts(1, KEY_NEGATION) #hr.negation\n hap_verbs = set(hr.resources[RESKEY_HAPPENINGVERBS])\n soft_verbs = set(hr.resources[RESKEY_SOFTVERBS])\n #openClauseComp = set(hr.resources[RESKEY_OPENCLAUSALCOMPLIMENT])\n probNouns = set(hr.resources[RESKEY_PROBNOUNS])\n #noParticle = set(hr.resources[RESKEY_NO_PARTICLE])\n nhap_verbs = set(hr.resources[RESKEY_NOTHAPPENINGVERBS])\n #tweet = tokTag \n# adj=[] #list containing all the adjectives\n# adv=[]\n \n retpos = {'adjective': 0, 'adverb':0, 'verb': 0,'noun':0,'interjection':0}\n retneg = {'adjective': 0, 'adverb':0, 'verb': 0,'noun':0,'interjection':0}\n retneu = {'adjective': 0, 'adverb':0, 'verb': 0,'noun':0,'interjection':0}\n retval = {KEY_POLARITY_POSITIVE:retpos, KEY_POLARITY_NEGATIVE:retneg, KEY_POLARITY_NEUTRAL:retneu}\n #noun_keys=[\"N\",\"O\",\"S\"]\n #check if txt contains relevant tags.\n #adj = [tw[0] for tw in tweet if tw[1] == POSKEY_ADJ]\n containsRelTags = [tw[0] for tw in tweet if tw[1] in [POSKEY_ADJ,POSKEY_ADV, POSKEY_VRB,POSKEY_NOUN,POSKEY_INTJ]]\n tweet = [(tw[0].replace('#',\"\"), tw[1], tw[2]) for tw in tweet]\n \n if containsRelTags:\n #POS counters\n posj, negj, neuj, posadv, negadv, neuadv, posver, negver, neuver, posnou, negnou, neunou, posintj, negintj, neuintj= [0]*15 # #posj, negj, neuj = [0]*3 #posj=0; negj=0; neuj=0; posadv=0; negadv=0; neuadv=0; posver=0; negver=0; neuver=0\n #prev tok position vars\n prev_adj, prev_verb, prev_adver, prev_noun = [-1]*4 #prev_adj=-1; prev_verb=-1; prev_adver=-1 \n \n #flag for storing whether polarity of previous POS was reversed or not, if 1 then it was reversed else 0\n prev_act, prev_act_verb, prev_act_adver, prev_act_noun = [0]*4 #prev_act=0; prev_act_verb=0; prev_act_adver=0 \n\n for i, tw in enumerate(tweet): #in range(len(tweet)): #for loop number :1\n #print tweet[i]\n pos, neg = [0]*2 #flag for telling whether the current word is positive/negative or not\n adjec, ver, adver, nou, intj = [0]*5 #flag for denoting whether current polar word is an adverb or not\n\n #if(tweet[i][1]!=POSKEY_ADJ and tweet[i][1]!=POSKEY_ADV and tweet[i][1]!=POSKEY_VRB):\n if not tweet[i][1] in [POSKEY_ADJ, POSKEY_VRB, POSKEY_ADV, POSKEY_NOUN, POSKEY_INTJ]: \n continue\n elif(tweet[i][1]==POSKEY_ADJ):#if word is adjective\n if tweet[i][0] in negation:# if word is a negation then got to the next iteration of the loop :1\n continue\n elif tweet[i][2] > 0: #tweet[i][0] in posWords :#if word is positive \n posj=posj+1 #increase positive counter\n pos=1\t #flag for polarity of the word\n elif tweet[i][2] < 0: #tweet[i][0] in negWords: #if word is negative\n negj=negj+1 \n neg=1\t \n else:\n neuj=neuj+1 #if none of the above cases are true then word is neutral, hence incrementing neutral counter\n continue #since there is no polarity to be assigned hence moving to next iteration of loop :1\n adjec=1\n\n elif(tweet[i][1]==POSKEY_ADV):#if word is adverb\n if tweet[i][0] in negation:# if word is a negation then got to the next iteration of the loop :1\n continue\n elif tweet[i][2] > 0: #tweet[i][0] in posWords :#if word is positive \n posadv=posadv+1#increase positive counter\n pos=1 #flag for polarity of the word\n elif tweet[i][2] < 0: #tweet[i][0] in negWords:#if word is negative\n negadv=negadv+1 #increase negative counter\n neg=1 #flag for polarity of the word\n else:\n neuadv=neuadv+1 #if none of the above cases are true then word is neutral hence incrementing neutral counter\n continue #since there is no polarity to be assigned hence moving to next iteration of the loop :1\n adver=1\n\t\t\t\t\n elif(tweet[i][1]==POSKEY_VRB): #if the word is a verb\n if tweet[i][0] in negation:# if word is a negation then got to the next iteration of the loop\n continue\n elif tweet[i][2] > 0: #tweet[i][0] in posWords : #if word is positive\n posver=posver+1 #increase positive counter\n pos=1 #flag for polarity of the word\n elif ((tweet[i][2] < 0)): # or (tweet[i][0] in hap_verbs) or (tweet[i][0] in soft_verbs)) : #tweet[i][0] in negWords: #if word is negative\n negver=negver+1 #increase negative counter\n neg=1 #flag for polarity of the word\n else:\n neuver=neuver+1 #if none of the above cases are true then word is neutral hence incrementing neutral counter\n continue #since there is no polarity to be assigned hence moving to next iteration of the loop :1\n ver=1\n if(i<(len(tweet)-1)):#if there ia an adjective or adverb next to a verb , don`t change the verb`s polarity and move to next iteration of loop :1\n if(tweet[i+1][1] == POSKEY_ADJ or tweet[i+1][1]==POSKEY_ADV):\n prev_verb=i\n prev_act_verb=0\n continue\n elif(tweet[i][1]==POSKEY_NOUN): #if the word is a verb\n if tweet[i][0] in negation:# if word is a negation then got to the next iteration of the loop\n continue\n elif tweet[i][2] > 0: #in posWords : #if word is positive\n posnou=posnou+1 #increase positive counter\n pos=1 #flag for polarity of the word\n elif ((tweet[i][2] < 0)): # or (tweet[i][0] in probNouns)): #if word is negative\n negnou=negnou+1 #increase negative counter\n neg=1 #flag for polarity of the word\n else:\n neunou=neunou+1 #if none of the above cases are true then word is neutral hence incrementing neutral counter\n continue #since there is no polarity to be assigned hence moving to next iteration of the loop :1\n nou=1\n \t\t\t\n \n elif(tweet[i][1]==POSKEY_INTJ): #if the word is a verb\n if tweet[i][0] in negation:# if word is a negation then got to the next iteration of the loop\n continue\n elif tweet[i][2] > 0 : #if word is positive\n posintj=posintj+1 #increase positive counter\n pos=1 #flag for polarity of the word\n elif tweet[i][2] < 0 : #if word is negative\n negintj=negintj+1 #increase negative counter\n neg=1 #flag for polarity of the word\n else:\n neuintj=neuintj+1 #if none of the above cases are true then word is neutral hence incrementing neutral counter\n intj=1\n continue\n \n\n\n\t\t\t\t\n for k in reversed(range(i)):#running a reversed loop from start of the current word to its left\n if tweet[k][0] in negation:#checking for negation tagging\n counter=0 #counter for telling number of permitted POS between negation and word\n m=k+1\n while(m<i):#checking the pos between word and negation\n if(adjec==1):#for adjectives\n if (tweet[m][1] == POSKEY_DET or tweet[m][1]==POSKEY_PREP or tweet[m][1]==POSKEY_CC or tweet[m][0]==POSKEY_PUNC or tweet[m][1]==POSKEY_VRB):\n counter=counter+1 #increment the counter if the word belongs to above POS\n elif(tweet[m][1] == POSKEY_ADJ or tweet[i][1]==POSKEY_ADV):\n# if(tweet[m][0] not in posWords):\n# if(tweet[m][0] not in negWords):\n# counter=counter+1#increment the counter if word is a neutral adjective or adverb\n if tweet[m][2] == 0:\n counter += 1 \n elif(adver==1):# for adverbs (same as adjectives)\n if (tweet[m][1] == POSKEY_DET or tweet[m][1]==POSKEY_PREP or tweet[m][1]==POSKEY_CC or tweet[m][0]==POSKEY_PUNC or tweet[m][1]==POSKEY_VRB ):\n counter=counter+1\n elif(tweet[m][1] == POSKEY_ADJ or tweet[i][1]==POSKEY_ADV ):\n# if(tweet[m][0] not in posWords):\n# if(tweet[m][0] not in negWords):\n# counter=counter+1\n if tweet[m][2] == 0:\n counter += 1\n \n elif(ver==1):#for verbs\t\n if(tweet[m][1]==POSKEY_VRB):#increment the counter only when the word between negation and verb is a verb\n counter=counter+1\n \t\n elif(nou==1):#for verbs\t\n if(tweet[m][1]==POSKEY_VRB or tweet[m][1] == POSKEY_DET or tweet[m][1]==POSKEY_PREP or tweet[m][1]==POSKEY_CC or tweet[m][0]==POSKEY_PUNC or tweet[m][1] == POSKEY_ADJ or tweet[i][1]==POSKEY_ADV or tweet[i][1]=='Z' ):#increment the counter only when the word between negation and verb is a verb\n counter=counter+1\n m=m+1\n\t\t\t\t\t\n word_len=i-(k+1) #this counter tells the actual number of words between negation and current polar POS\n if (counter==word_len): #if all the words are of permitted POS type then change the poarity\n if(adjec==1):\t\t\t\t\t\t\t\t\n prev_act=1 #flag stores whether polarity was changed or not in case of adjectives\n elif(ver==1):\n prev_act_verb=1 #flag stores whether polarity was changed or not in case of verb\n elif(adver==1):\n prev_act_adver=1 #flag stores whether polarity was changed or not in case ofadverb\n if(pos == 1): #check the polarity for being positive and change correspoding flag accordingly\t\t\t\t\t\t\t\n if(adjec==1):\n posj=posj-1\n negj=negj+1\n elif(ver==1):\n posver=posver-1\n negver=negver+1\n elif(adver==1):\n posadv=posadv-1\n negadv=negadv+1\n elif(nou==1):\n posnou=posnou-1\n negnou=negnou+1\n\t\t\t\t\t\t\t\t\n elif(neg==1):\n if(adjec==1):\n posj=posj+1\n negj=negj-1\n elif(ver==1):\n posver=posver+1\n negver=negver-1\n elif(adver==1):\n posadv=posadv+1\n negadv=negadv-1\n elif(nou==1):\n posnou=posnou+1\n negnou=negnou-1\n \n else: #now we have to check whether there are any previous polar adverbs ,a djectives or verbs between negation and current polar word\n if(adjec==1): # check for adjectives\n if(prev_adj !=-1): #if this is not the first polar adjective\n count=0\n position=prev_adj+1\n while(position<i): #check from current polar word till prev polar adjective\n if ((tweet[position][1] == POSKEY_DET or tweet[position][1]==POSKEY_PREP or tweet[position][1]==POSKEY_CC or tweet[position][0]==POSKEY_PUNC) and tweet[position][0] != 'but'): # if word is mentioned POS tag except but then increment the counter\n count=count+1\n elif(tweet[position][1] == POSKEY_ADJ or tweet[position][1] == POSKEY_ADV):# if word is a non polar adjective or adverb then increment the counter\n# if(tweet[position][0] not in posWords):\n# if(tweet[position][0] not in negWords):\n# count=count+1\n if tweet[position][2] == 0:\n count += 1\n \n position=position+1\n word_bet=i-position# this counter provides the number of words between current word and previous polar word\n if (count==word_bet): # if number of words belong to permitted class of POS tags then change the polarity\n if(prev_act==1):\n if(pos == 1):\n posj=posj-1\n negj=negj+1\n elif(neg==1):\n negj=negj-1\n posj=posj+1\n elif(prev_act==0):\n prev_act=0\n else:\n prev_act=0\n elif(adver==1): #check for adjverbs , same as adjectives\n if(prev_adver !=-1):\n count=0\n position=prev_adj+1\n while(position<i):\n if ((tweet[position][1] == POSKEY_DET or tweet[position][1]==POSKEY_PREP or tweet[position][1]==POSKEY_CC or tweet[position][0]==POSKEY_PUNC) and tweet[position][0] != 'but'):\n count=count+1\n elif(tweet[position][1] == POSKEY_ADJ or tweet[position][1] == POSKEY_ADV):\n# if(tweet[position][0] not in posWords):\n# if(tweet[position][0] not in negWords):\n# count=count+1\n if tweet[position][2] == 0:\n count += 1\n position=position+1\n word_bet=i-position\n if (count==word_bet):\n if(prev_act_adver==1):\n if(pos == 1):\n posadv=posadv-1\n negadv=negadv+1\n elif(neg==1):\n negadv=negadv-1\n posadv=posadv+1\n elif(prev_act_adver==0):\n prev_act_adver=0\n else:\n prev_act_adver=0\n elif(ver==1): #if current polar word is a verb\n if(prev_verb !=-1 ):# if this is not the first polar verb\n count=0\n position=prev_verb+1\n while(position<i):#check from current polar word till prev polar verb\n if (tweet[position][0] == POSKEY_PUNC or tweet[position][0]=='and' or tweet[position][0]=='or'): #increment the counter only when word belongs to these POS\n count=count+1\n position=position+1\n word_bet=i-position #this counter tells the actual number of words between the current verb and previous polar verb\n if (count==word_bet):\n if(prev_act_verb==1):\n if(pos == 1):\n posver=posver-1\n negver=negver+1\t\n elif(neg==1):\n negver=negver-1\n posver=posver+1\n prev_act_verb=1\n elif(prev_act_verb==0):\n prev_act_verb=0\n else:\n prev_act_verb=0\n break\n else:\n continue\n if(adjec==1):#storing position of current polar adjective as previous position for next polar adjective\n prev_adj=i\n elif(ver==1):#storing position of current polar verb as previous position for next polar verb\n prev_verb=i\n elif(adver==1):#storing position of current polar adverb as previous position for next polar adverb\n prev_adver=i\n \n retval[KEY_POLARITY_POSITIVE]['adjective'] = posj; retval[KEY_POLARITY_NEGATIVE]['adjective'] = negj; retval[KEY_POLARITY_NEUTRAL]['adjective'] = neuj;\n retval[KEY_POLARITY_POSITIVE]['adverb'] = posadv; retval[KEY_POLARITY_NEGATIVE]['adverb'] = negadv; retval[KEY_POLARITY_NEUTRAL]['adverb'] = neuadv;\n retval[KEY_POLARITY_POSITIVE]['verb'] = posver; retval[KEY_POLARITY_NEGATIVE]['verb'] = negver; retval[KEY_POLARITY_NEUTRAL]['verb'] = neuver;\n retval[KEY_POLARITY_POSITIVE]['noun'] = posnou; retval[KEY_POLARITY_NEGATIVE]['noun'] = negnou; retval[KEY_POLARITY_NEUTRAL]['noun'] = neunou;\n retval[KEY_POLARITY_POSITIVE]['interjection'] = posintj; retval[KEY_POLARITY_NEGATIVE]['interjection'] = negintj; retval[KEY_POLARITY_NEUTRAL]['interjection'] = neuintj;\n #featureVals[FKEY] = retval\n #print(retval)\n return retval\n ####normal features\n\n #return features", "title": "" }, { "docid": "a92a8c7ebd47bee5df3648928b19fe70", "score": "0.52284473", "text": "def preprocess_tweets(infile: str, outfile: str) -> None:\n logger = logging.getLogger(\"preprocessor\")\n\n # Number of Tweets read\n counter: int = 0\n\n # List of all Tweets\n tweets: List[Tweet] = []\n\n # Begin reading\n with open(infile, \"r\") as csv_file:\n\n # CSV reader\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n logger.info(\"Attached CSV reader\")\n\n # Number of Tweets deleted due to URL\n url_blocked = 0\n\n # Iterate\n for tweet in csv_reader:\n\n # Messaging checkpoints\n if not counter % DIVISION:\n logger.info(\"Processed %s Tweets\", counter)\n\n # Break at limit\n if counter == MAX_TWEETS:\n break\n\n # Only add Tweet if it doesn't contain a URL.\n # As per Ejieh's master's thesis, the vast majority\n # of posts with URLs lack any subjectivity.\n ptn = r\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\"\n if not bool(re.search(ptn, tweet[0])):\n tweets.append(Tweet(tweet))\n else:\n url_blocked += 1\n counter += 1\n\n logger.info(\"Read %s Tweets in total\", counter)\n\n # Finishing message\n logger.info(\"Only %s Tweets were kept\", len(tweets))\n with open(outfile, \"w\", encoding=\"utf-8\") as output_file:\n tweet_writer = csv.writer(output_file)\n i = 1\n\n for tweet in tweets: # type: ignore\n tweet_writer.writerow(\n [\n tweet.full_text, # type: ignore\n tweet.created_at, # type: ignore\n tweet.source, # type: ignore\n tweet.tweet_id, # type: ignore\n tweet.retweet_count, # type: ignore\n tweet.favorite_count, # type: ignore\n tweet.user_name, # type: ignore\n tweet.user_id_str, # type: ignore\n tweet.user_handle, # type: ignore\n tweet.user_location, # type: ignore\n tweet.user_desc, # type: ignore\n tweet.user_protected, # type: ignore\n tweet.user_followers, # type: ignore\n tweet.user_created, # type: ignore\n tweet.user_verified, # type: ignore\n tweet.user_tweet_count, # type: ignore\n tweet.cleaned_text, # type: ignore\n json.dumps(tweet.cleaned_tokens), # type: ignore\n ]\n )\n\n if not i % DIVISION:\n logger.info(\"Wrote Tweet #%s\", i)\n i += 1\n logger.info(\"Wrote %s Tweets in total\", len(tweets))", "title": "" }, { "docid": "739a08ecf09b42d5c43f5cf0288093ef", "score": "0.5210536", "text": "def get_tones(tweets: list) -> list:\n tweet_str = tweets_to_string(tweets) \n ret = get_watson_api().tone(\n tone_input=tweet_str,\n content_type='text/plain',\n sentences=True).get_result()\n if ret:\n return ret\n else:\n print('Error in watson.get_tones(). No sentiment data was retrieved by watson.')\n return None", "title": "" }, { "docid": "76985e99c74f3c0f6dcbd5888b6edaf8", "score": "0.5202895", "text": "def process_message(self, message):\n pass", "title": "" }, { "docid": "33917181c603c631350fb3a84332bbf6", "score": "0.5195346", "text": "def publish(self, topic, *args, **kwargs):", "title": "" }, { "docid": "8baa6822a5675fbef39af79b0c848b1e", "score": "0.5182338", "text": "def handle_message(self, msg):", "title": "" }, { "docid": "e451363a8205c3f0217e3d7fc39b9060", "score": "0.5173826", "text": "def converse(self, tweet):\n # if tweet['user']['screen_name'] != 'j_t_allen':\n # print \"Don't want to talk to\", tweet['user']['screen_name']\n # return\n pattern_me = r'@[Tt][Hh][Ee][Bb][Rr][Ee][Xx][Ii][Tt][Bb][Oo][Tt]'\n pattern_count = pattern_me + r' [Cc]ount (?P<group_name>.+?) (?P<group_value>.+?)\\b'\n match_count = re.search(pattern_count, tweet['text'])\n if match_count:\n group_name = match_count.group('group_name').lower()\n group_value = match_count.group('group_value')\n self.summarise('president', group_name, group_value,\n in_reply_to=tweet)\n pattern_result = pattern_me + r' [Rr]esult (?P<location>.+?) (?P<sentiment>.+?) (?P<result>\\d+(\\.\\d+)?)\\b'\n match_result = re.search(pattern_result, tweet['text'])\n if match_result and tweet['user']['screen_name'] == 'j_t_allen':\n location = match_result.group('location').upper()\n sentiment = match_result.group('sentiment').lower()\n result = float(match_result.group('result'))\n success = self.add_result(location, sentiment, result)\n if success:\n self.post_tweet('Confirm: {} {} {}'.format(location, sentiment, result), in_reply_to=tweet)\n else:\n self.post_tweet('?! Try something like: result NY clinton 62.1', in_reply_to=tweet)", "title": "" }, { "docid": "d5ce1ac5503730cdcc775bc362baf29e", "score": "0.5168999", "text": "def on_data(self, data):\n try:\n dict_data = json.loads(data)\n tweet = TextBlob(dict_data['text'])\n username = str(dict_data['user']['name'])\n\n # Take data stream and assign it either Negitive, Netural, Positive sentiment\n if tweet.sentiment.polarity < 0:\n sentment = \"Negitive\"\n elif tweet.sentiment.polarity == 0:\n sentment = \"Netural\"\n else:\n sentment = \"Positive\"\n\n # Send data stream into a dict then write to csv file\n try:\n C = { \n 'Username': [dict_data['user']['name']],\n 'Text': [dict_data['text']],\n 'Set': [sentment],\n } \n df = DataFrame(C, columns= ['Username', 'Text', 'Set'])\n if \"RT\" not in tweet:\n export_csv = df.to_csv ('SetData.csv', index = False, header=False, mode='a')\n else:\n pass\n\n # Console can't show emojis so just pass\n except UnicodeTranslateError as e:\n print(e)\n pass\n return True\n\n except KeyError as e: # Pass KeyError when I reach twitter stream limit and let it catch up\n print(e)\n pass", "title": "" }, { "docid": "c32ef2efcc8c7b430de9211f9c088217", "score": "0.5163567", "text": "def handle_message(m):\n if m.sentence_type == FIRST_MSG:\n ticks += 1", "title": "" }, { "docid": "1c2a5e461c7d0c1691d36dfd7d6d7e84", "score": "0.5158969", "text": "def retweet(api):\n for tweet in tweepy.Cursor(api.home_timeline, count=25, include_entities=True,\n tweet_mode='extended').items(25):\n try:\n for keyword in main_keywords:\n if valid_tweet(api, tweet, keyword):\n post_retweet(tweet)\n return\n for keyword in tweet_keywords:\n if valid_tweet(api, tweet, keyword):\n article = Article(get_url(tweet), language=lang)\n article.download()\n article.parse()\n if (author in article.authors):\n post_retweet(tweet)\n return\n except StopIteration:\n break", "title": "" }, { "docid": "59a905e0cb765bc0d832e92275ff66d7", "score": "0.5141745", "text": "def handle_command_twitter(self, channel, nick, args):\n n = int(args) if args and args in (\"3\", \"5\", \"10\") else 1\n self.tweets.latest(n).addCallback(self.tell_tweets, channel)", "title": "" }, { "docid": "191756dfc952609e53c87dbe79780e63", "score": "0.51296955", "text": "def handle(self, msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n\n if content_type == 'text':\n logging.info(\"Got text message.\")\n self.parse_message(msg)\n else:\n logging.info(\"Got non-text message.\")", "title": "" }, { "docid": "cc0d9db2ca33cd1b0e4ced76f0c4029a", "score": "0.512609", "text": "def process_text(name, input):\n speak(name + ', you said: \"' + input + '\".')\n return", "title": "" }, { "docid": "741a0b4ff6a458682103f129f8798316", "score": "0.51238805", "text": "def processTweetFolder(path, tweetid):\n thread = processTweetJSON(path + '\\\\source-tweets\\\\' +\n tweetid + '.json', False)\n with open(path + '\\\\annotation.json') as f:\n thread.thread_annotation = json.load(f)\n thread.thread_annotation['tweetid'] = tweetid\n with open(path + '\\\\structure.json') as f:\n thread.thread_structure = json.load(f)\n root = Node(str(tweetid), tweet=thread)\n processTree(thread.thread_structure[tweetid], root, path + '\\\\reactions\\\\')\n thread.reply_chain = root\n # print(RenderTree(root, style=AsciiStyle))\n thread.thread_id = tweetid\n return thread", "title": "" }, { "docid": "0c8c1e174afdb6d8c9e32f3f70a8a270", "score": "0.5122982", "text": "def on_response(self, response):\r\n \r\n try:\r\n # get username of user who sent the tweet\r\n username = response.includes['users'][0].username\r\n print(f'Screen name: {username}')\r\n print(f' Language: {response.data.lang}')\r\n print(f' Tweet text: {response.data.text}')\r\n\r\n if response.data.lang != 'en' and response.data.lang != 'und':\r\n english = self.translator.translate(response.data.text)\r\n print(f' Translated: {english}')\r\n\r\n print()\r\n self.tweet_count += 1 \r\n except Exception as e:\r\n print(f'Exception occured: {e}')\r\n self.disconnect()\r\n \r\n # if TWEET_LIMIT is reached, terminate streaming\r\n if self.tweet_count == self.TWEET_LIMIT:\r\n self.disconnect()", "title": "" }, { "docid": "e9b4d4d0c511fc3bd614e5652cde0fb1", "score": "0.5122919", "text": "def __tweet_at_stream_thread(self, message, nth_tweet=None, users=None, terms=None, limit=None):\n\n stream = self.stream(users=users, terms=terms)\n\n stream_results = []\n reply_results = []\n\n for item in stream:\n if len(stream_results) < limit:\n print(item)\n stream_results.append(item)\n else:\n break\n\n if nth_tweet == -1:\n for status in stream_results:\n if self.human_like_delays:\n time.sleep(random.uniform(self.min_response_time, self.max_response_time))\n reply_results.append(self.reply(status['id'], message))\n elif nth_tweet != -1:\n status = stream_results[nth_tweet]\n if self.human_like_delays:\n time.sleep(random.uniform(self.min_response_time, self.max_response_time))\n reply_results.append(self.reply(status['id'], message))\n\n self.__log({\n 'action': 'Tweeted at stream',\n 'message': message,\n 'nth_tweet': str(nth_tweet),\n 'users': str(users),\n 'terms': str(terms),\n 'limit': str(limit),\n 'response': str(reply_results)\n })\n\n return reply_results", "title": "" }, { "docid": "8b0faf2eca5a127bb3678098892050e8", "score": "0.51219416", "text": "def stream_tweets(self, keyword):\n stream_listener = StreamListener(keyword)\n auth = OAuthHandler(API_KEY, API_KEY_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n stream = Stream(auth, stream_listener)\n stream.filter(track=[keyword], languages=[\"pt\"], is_async=True)\n time.sleep(10)\n stream.disconnect()", "title": "" }, { "docid": "12a3bcadb29de7dc3419f67caa9d304b", "score": "0.51206636", "text": "def topic(update, context):\n try:\n topic_name = ' '.join(list(context.args))\n with sqlite3.connect('data.db') as conn:\n descript = select_topic(conn, topic_name)\n rows = select_new_doc_from_topic(conn, topic_name, 5)\n texts = 'Заголовки 5 самых свежих новостей в этой теме:\\n'\n for index, value in enumerate(rows, start=1):\n texts += '{}. {}\\n'.format(index, value)\n update.message.reply_text(descript + '\\n' + texts)\n\n except (IndexError, ValueError):\n update.message.reply_text('Input Error!')", "title": "" }, { "docid": "999bf65c2d3f709b942531089fb95989", "score": "0.51194954", "text": "def run(self, tweets):\n self.connect_twitter_api()\n\n with io.open(self.cfg_tw_api['outfile_ent'] % self.inpath, 'w+',\n newline='', encoding='utf-8') as csvfile,\\\n io.open(self.cfg_tw_api['outfile_info'] % self.inpath, 'w+',\n newline='', encoding='utf-8') as csvfile2,\\\n io.open(self.cfg_tw_api['outfile_text'] % self.inpath, 'w+',\n newline='', encoding='utf-8') as csvfile3:\n\n # Outfile Entities Check\n _writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n # Outfile Summary\n _writer2 = csv.writer(csvfile2, quoting=csv.QUOTE_ALL)\n # Write Headers\n _writer.writerow(['TWEET_ID', 'ENT', 'I', 'E', 'IOB_TAG', 'TYPE'])\n _writer2.writerow(['TWEET_ID', 'DATE', 'TEXT', 'ENT'])\n\n for tweet in tqdm(tweets):\n # Retrieve tweet info when possible, else skip\n tweet_status = self.hydrate_tweet(tweet['tweet_id'])\n if not tweet_status:\n continue\n\n # Replace line breaks in tweet text\n tweet_text = tweet_status.full_text.replace('\\n', ' ')\n # Remove URLs from text\n tweet_text = \" \".join(filter(\n lambda x: x[0:4] != 'http',\n tweet_text.split()))\n\n # Summary file row\n row = [tweet['tweet_id'], tweet_status.created_at, tweet_text\n ] + tweet['entities']\n\n try:\n _writer2.writerow(row)\n except Exception, ex:\n logging.error(ex)\n\n # Write out tweet text tokenized\n try:\n csvfile3.write(' '.join(\n [x for x in twk.tokenize(tweet_text)])+\"\\n\")\n except Exception, ex:\n logging.error(ex)\n\n # Check Entity annotations\n if tweet['entities']:\n for entity in tweet['entities']:\n try:\n i, e, t = entity.split(',')\n except ValueError, er:\n print(\"Problem with tweet ID\"\n \"%d\" % tweet['tweet_id'])\n print(er)\n sys.exit()\n\n i, e = int(i), int(e)\n\n # Check Entity types\n if t not in ['Contributor', 'Work']:\n logging.error('Entity not allowed: %s' % t)\n continue\n entity = tweet_status.full_text[i:e]\n\n # Add IOB tags\n start = True\n for token in entity.split():\n if start:\n iob_tag = 'B'\n start = False\n else:\n iob_tag = 'I'\n ent_token = tweet_status.full_text[i:i+len(token)]\n\n # Write out Entity annotated\n row = [unicode(tweet['tweet_id']),\n ent_token, i, i + len(token),\n iob_tag, t]\n try:\n _writer.writerow(row)\n except Exception, ex:\n logging.error(ex)\n\n # Update index\n i = i + len(token) + 1", "title": "" }, { "docid": "5501e777452a54b423fb656dd10b0e66", "score": "0.5116459", "text": "def tweet(self, post):\r\n hashTags = self.get_hashtags(self.setting.hashTags)\r\n trend = self.twit.get_trend()\r\n if trend is not None:\r\n hashTags += \" %s\" %trend\r\n\r\n if post.mediaType == imgur.MediaType.IMAGE.value:\r\n media = self.download_image(self.log, post.media)\r\n if media is None:\r\n self.log.log(logger.LogLevel.WARNING, 'post.mediaType: %s. Unable to download_image' % post.media)\r\n return False\r\n return self.twit.tweet_image('%s %s' % (post.title, hashTags), media)\r\n else:\r\n media = self.download_video(self.log, post.media)\r\n if media is None:\r\n self.log.log(logger.LogLevel.WARNING, 'post.mediaType: %s. Unable to download_video' % post.media)\r\n return False\r\n return self.twit.tweet_video('%s %s' % (post.title, hashTags), media)\r\n return False", "title": "" }, { "docid": "f1cd73ce66d53aea522c2186cf82ed74", "score": "0.51123595", "text": "def process(self, data):\n try:\n status = json.loads(data)\n except:\n return None\n \n for status_type in self.commands:\n # see if it is of typs 'text' or 'friends' or something else\n if status.has_key(status_type):\n # if it is, find out the modules associated with it\n commands = self.commands[status_type]\n # for each module that handles say 'text', \n for command in commands:\n # for triggers that should send data to process\n # in that module,\n triggers = command['triggers']\n for t in triggers:\n # compiled regex match:\n if t.match(data):\n # currently, we're just printing the output\n # later there will be facility to reply\n # or better - send a tweepy api object to the\n # processing module so it can take actions\n # independently.\n print self.plugins[command['plugin']].process(status)", "title": "" }, { "docid": "1ed1bf9fdf187c848e0f050f4ba1eb33", "score": "0.5110873", "text": "def process_synthesize_hts(self, txt_fpath, style=''):\n self.process_phonetize(txt_fpath)\n\n # self.pfs_xtr_hts_obj = PfsExtractorHts(self.param_module, self.utt_total_mix)\n\n # self.pfs_xtr_hts_obj.process_pfeats_extr(style)\n\n utt_pfs_obj = TextFeatures(self.param_module, self.phonetizer_obj.utt_ph)\n\n utt_pfs_obj.process_pfeats(style)\n\n utt_pfs_obj.process_lab_prn_gen()\n\n self.hts_lab_gen_prn = utt_pfs_obj.hts_lab_gen_prn\n\n # self.pfs_xtr_hts_obj.gen_hts_lab_fts()\n # self.hts_lab_gen_prn = self.pfs_xtr_hts_obj.hts_lab_gen_prn", "title": "" }, { "docid": "9d4d803cf2d73c80d0d0e5b1e0a5f5c6", "score": "0.51053894", "text": "def handle_message_text(text: str) -> bool:\n\n text = text.strip()\n if text.find('https://youtu.be/') != -1 or text.find('https://www.youtube.com/watch?v=') != -1:\n url_start_index = text.find('https://youtu.be/')\n if url_start_index != -1:\n url_end_index = url_start_index + 17 + 11\n else:\n url_start_index = text.find('https://www.youtube.com/watch?v=')\n url_end_index = url_start_index + 32 + 11\n redis_conn = Redis(host=redis_host, port=redis_pwd, db=redis_db)\n redis_conn.publish('yt-urls', text[url_start_index:url_end_index])\n redis_conn.close()\n return True\n else:\n return False", "title": "" }, { "docid": "8dbde02753b210bcae9eef8156337991", "score": "0.51049244", "text": "def postprocess(self, scribe: Scribe) -> None:", "title": "" }, { "docid": "791e5cbc08ec70950af9beb3c3106815", "score": "0.5104352", "text": "def preprocess(data):\n\n def remove_url(text): return re.sub(r'http\\S+', '', text)\n\n def remove_sym(text): return re.sub(r'\\&#\\d*;{1}', '', text)\n\n def remove_amp(text): return re.sub(r'\\&[a-zA-Z0-9]+;', '', text)\n\n def remove_rt(text): return re.sub(r'RT', '', text)\n\n def remove_redundant(text): return re.sub(\n r'[!\\.\\?\\:\\'\\\"]{2,}', lambda x: ' '+x.group(0)[0]+' ', text)\n\n def remove_spaces(text): return re.sub(r'[\\s]{2,}', ' ', text)\n\n def handle(text): return text.group(\n 0)[1:-1] if text.group(0)[-1] == ':' else text.group(0)[1:]\n\n def proc_handles(text): return re.sub(\n r'@([A-Za-z0-9_]+)[\\:]*', handle, text)\n\n def remove_slash(text): return re.sub(r\"[a-zA-Z]+\\\\\\'[a-zA-Z]+\", \"'\", text)\n\n def segmnt(text): return \" \".join(ws.segment(text.group(0)))\n\n def remove_hashtag(text): return re.sub(r'\\#([a-zA-Z0-9_]+)', segmnt, text)\n\n data = [remove_hashtag(sent) for sent in data] #not working\n data = [remove_url(sent) for sent in data]\n data = [remove_sym(sent) for sent in data]\n data = [remove_amp(sent) for sent in data]\n data = [remove_rt(sent) for sent in data]\n data = [remove_redundant(sent) for sent in data]\n data = [remove_spaces(sent) for sent in data]\n data = [remove_slash(sent) for sent in data]\n data = [proc_handles(sent) for sent in data] # not working. FIX\n\n return data", "title": "" }, { "docid": "1470a68a6191defbaa333076e10657bd", "score": "0.5102612", "text": "def post_to_twitter(msg):\n try:\n api = twitter.Api(consumer_key=settings.CONSUMER_KEY,\n consumer_secret=settings.CONSUMER_SECRET,\n access_token_key=settings.ACCESS_TOKEN_KEY,\n access_token_secret=settings.ACCESS_TOKEN_SECRET)\n api.PostDirectMessage(text=msg,\n screen_name=settings.TWITTER_SCREEN_NAME)\n except twitter.TwitterError as e:\n logger.error(\"Twitter error: {0}\".format(e))", "title": "" }, { "docid": "ee451a87727f7e2a60c58f76ccbd9609", "score": "0.5099237", "text": "def tag_tweets(db_raw, db_pro, multipol):\n results = db_raw.view('raw_tweets/unprocessed')\n for res in results:\n\n # Get tweet id.\n id = res['id']\n tweet = db_raw[id]\n\n # Look for exact coordinates in tweet.\n if tweet['coordinates']:\n raw = tweet['coordinates']\n coords = tuple(raw['coordinates'])\n # Get the midpoint of place.\n elif tweet['place']:\n # Don't take midpoint of city, set own coords.\n if (tweet['place']['name'] == 'Melbourne'):\n coords = MELBOURNE_COORDS\n else:\n coords = average_bounding_box(\n tweet['place']['bounding_box']['coordinates']\n )\n\n # Attempt to process if location exists.\n if coords:\n point = Point(coords)\n code = None\n for multi in multipol:\n if point.within(shape(multi['geometry'])):\n code = multi['properties']['SA2_Code_2011']\n\n sentiment = TweetAnalyzer(tweet).analyzeSentiment()\n stored_tweet = {\n '_id': id, 'code': code,\n 'text': tweet['text'], 'sentiment': sentiment,\n 'created_at': tweet['created_at'],\n 'lang': tweet['lang']\n }\n db_pro.save(stored_tweet)\n break\n else:\n logging.info(\"No coordinates found.\")\n\n # Tag tweet as processed.\n doc = db_raw.get(id)\n doc['processed'] = True\n db_raw.save(doc)", "title": "" }, { "docid": "3825f612a574a20ffff3d2eddb97e937", "score": "0.5099218", "text": "def twitter_punctuation_count_feature_():\n d_tw = load_twitter_data()\n tr_x_texts = branchify_twitter_extract_feature_loop(d_tw['train'], 'text')\n dv_x_texts = branchify_twitter_extract_feature_loop(d_tw['dev'], 'text')\n\n for i in range(len(tr_x_texts)):\n punctuations_counter = Counter(re.findall(\"[^\\w\\s]+\", tr_x_texts[i]))\n punctuations = punctuations_counter[key]\n tr_x_texts[i] = punctuations\n\n for i in range(len(dv_x_texts)):\n punctuations_counter = Counter(re.findall(\"[^\\w\\s]+\", dv_x_texts[i]))\n punctuations = punctuations_counter[key]\n dv_x_texts[i] = punctuations\n\n tr_x_texts, dv_x_texts = scale(tr_x_texts, dv_x_texts)\n\n return tr_x_texts, None, dv_x_texts", "title": "" }, { "docid": "1d470de0209762ff23594048867a3abb", "score": "0.50975126", "text": "def analyze(topic):\n\n # load tweets\n with open('outputs/' + topic + '.txt', 'r') as file:\n file_content = [unwrap_line_to_dictionary(line) for line in file]\n # load word blacklist\n with open('assets/exclude.txt', 'r') as file:\n blacklist = [line.strip() for line in file]\n # get the tweets words thet are not in the blacklist\n tweets_words = [extract_tweet_words(tweet, blacklist) for tweet in file_content]\n\n\n # count the words occurences\n words = {}\n for tweet in tweets_words:\n for word in tweet:\n if word in words.keys():\n words[word] += 1\n else:\n words[word] = 1\n\n # put the words into a sorted list, skip the topic (we know its everywhere)\n word_list = [[word, words[word]] for word in words if word != topic]\n word_list = sorted(word_list, key=lambda row: row[1], reverse=True)\n\n # select top50 most popular and put them into hierarchy\n # top50 = word_list[:50]\n # top50words = [word[0] for word in top50]\n top = [word for word in word_list if word[1] >= 5]\n topWords = [word[0] for word in top]\n\n hierarchy = {'words' : [ {'word':topic+'.'+word[0], 'appearances': word[1], 'connections':[]} for word in top ]}\n\n # find connections\n # for every word in hierarchy\n for node in hierarchy['words']:\n anchor = node['word'][len(topic)+1:]\n # checks all tweets containing it\n for tweet in tweets_words:\n if anchor in tweet:\n # gets all co-apprearing words without repeats\n for word in tweet:\n if not word == anchor and word in topWords and topic+'.'+word not in node['connections']:\n node['connections'].append(topic+'.'+word)\n\n save_the_analysis(topic, hierarchy['words'])\n # return hierarchy['words']", "title": "" }, { "docid": "5dc7242c2468cba6e149ea456eddd923", "score": "0.50754756", "text": "def on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n send_message_to_kafka(msg.payload)", "title": "" }, { "docid": "3eb7cf98e5f0a345d66bc3bd0c71203e", "score": "0.50744104", "text": "def help_read_tweet(dic, key, content):\n \n for s in content:\n if len(s.split(',')) == HEADER_LENGTH and s.split(',')[0].isnumeric():\n info = s.split(',')\n else:\n txt = s + ' '\n text = txt[:len(txt)]\n value = (key, text, int(info[1]), info[3], int(info[4]), int(info[5][:-1]))\n dic[key].append(value)", "title": "" } ]
687306ade1c3e9045e854107329a2b13
Solves the linear equation system ax = b for x. Inputs
[ { "docid": "3e2a107766d9ac334518820327d3047d", "score": "0.58857983", "text": "def linearsolver(a, b):\n n, k = a.shape \n augmented = np.c_[a, b] # augment coeff matrix with constants\n # ranks\n rank_a = np.linalg.matrix_rank(a)\n rank_augmented = np.linalg.matrix_rank(augmented)\n # no solution\n if rank_augmented > rank_a:\n print(\"There is no solution.\")\n return [np.NaN for _ in range(k)]\n # exactly one solution with n=k\n elif n == k and n == rank_a:\n print(\"There is exactly one solution (n = k case).\")\n augmented_rref = gauss_eliminate(augmented) # do gauss elimination\n # rref coeff matrix and constants\n a_rref = augmented_rref[:, :-1]\n #print(\"a_rref = \\n\", a_rref)\n b_rref = augmented_rref[:, -1]\n x = linearsolver_uppertriangle(a_rref, b_rref)\n return x\n # exactly one solution with n > k\n elif n > k:\n print(\"There is exactly one solution (n > k case).\")\n # get rid off superflous rows/equations\n augmented_rref = gauss_eliminate(augmented) # do gauss elimination\n # rref coeff matrix and constansts\n a_rref = augmented_rref[:, :-1]\n b_rref = augmented_rref[:, -1]\n # the part of the rref matrix where the rows are not all-zeros (cut down all-zero rows at the end)\n nonzero_rows = get_nonzerorows(a_rref)\n a_rref_nonzero = a_rref[nonzero_rows, :]\n b_rref_nonzero = b_rref[nonzero_rows]\n # solve the system with the derived square rref matrix\n x = linearsolver_uppertriangle(a_rref_nonzero, b_rref_nonzero)\n return x\n # infinitely many solutions\n else:\n print(\"There are infinitely many solutions.\")\n augmented_rref = gauss_eliminate(augmented) # do gauss elimination\n # rref coeff matrix and constansts\n a_rref = augmented_rref[:, :-1]\n b_rref = augmented_rref[:, -1]\n # the part of the rref matrix where the rows are not all-zeros (cut down all-zero rows at the end)\n nonzero_rows = get_nonzerorows(a_rref)\n a_rref_nonzero = a_rref[nonzero_rows, :]\n b_rref_nonzero = b_rref[nonzero_rows]\n number_nonzerorows = sum(nonzero_rows)\n pivots = getpivot(a_rref_nonzero)\n # initialise symbolic variables\n x = np.array(sympy.symbols('x0:%d' % k))\n # go up row-by-row and figure out free variables\n for row in range(number_nonzerorows - 1, -1, -1):\n for col in range(k - 1, -1, -1):\n if col == pivots[row]: # x_col is a pivot, compute its value, else x_col is a free variable, leave it as is\n try:\n s = a_rref_nonzero[row, col + 1:].dot(x[col + 1:])\n except IndexError: # need this for the last row/column\n s = 0\n x[col] = b_rref_nonzero[col] - s # no division as pivots nomalised to one\n return x", "title": "" } ]
[ { "docid": "2d9985fbf8b50443a132f1b5e0cef9bb", "score": "0.8001566", "text": "def LinearEquationSolver(A, b):\n return solve(A,b)", "title": "" }, { "docid": "6aa2401580e0cacb84c3e48056587f51", "score": "0.78103673", "text": "def linear_equation_solver(A, b):\n A = np.array(A)\n b = np.array(b)\n return list(np.linalg.solve(A, b))", "title": "" }, { "docid": "75f0545d0a45aa00702a70caed7eabaf", "score": "0.7181677", "text": "def linear(x, a, b):\n return b + a*x", "title": "" }, { "docid": "ad37629ac6094e3d6d23d7460261e706", "score": "0.7060513", "text": "def linear(x, a, b):\n\treturn a * x + b", "title": "" }, { "docid": "2a30040e0f997af3349d4688683d9532", "score": "0.70399624", "text": "def linear_expression(A, b, x, tol=1.e-9):\n\n # linear term (explicitly state that it is a LinExpr since it can be that A[i] = 0)\n\n return A @ x + b[:,0]", "title": "" }, { "docid": "ea552b76c7778bec26e9211afae3f643", "score": "0.688501", "text": "def _linear_solve(self, A: np.ndarray, b: np.ndarray) -> np.ndarray:\n M = np.linalg.cholesky(A.T @ A)\n v = solve_triangular(M, A.T @ b, lower=True)\n return solve_triangular(M.T, v)", "title": "" }, { "docid": "b995edc31e9f91c9a286cea49a023181", "score": "0.6741406", "text": "def solve(LU, b):\n # list since it will be appended and no numpy attributes required from y.\n y = []\n # Forward pass through matrix to solve Ly = b for y.\n for i in range(len(b)):\n # Dot product used instead of sum\n y.append(b[i] - LU[i,:i].dot(y[:i]))\n # Backward pass through matrix to solve Ux = y for x.\n for j in reversed(range(len(b))):\n # Dot product used instead of sum\n b[j] = 1/LU[j,j] * (y[j] - LU[j,j+1:].dot(b[j+1:]))\n return b", "title": "" }, { "docid": "654d4d80736e115893b2796bb8de8433", "score": "0.6626394", "text": "def linear(self, W: np.ndarray, X: np.ndarray, b: np.ndarray) -> np.ndarray:\n\n return np.dot(X,W)+ b", "title": "" }, { "docid": "be957dd7dac73303f4a96a272e12607f", "score": "0.66104645", "text": "def solveb(a, b):\r\n return la.solve(b.T, a.T).T", "title": "" }, { "docid": "e37181c38b6c1a68df86e48b40b7b381", "score": "0.6548849", "text": "def solve(A, b):\n return binaryMatrixFunction(A, b, 'solve')", "title": "" }, { "docid": "eb6aa08c1460798b49e5b3e7f8f0d131", "score": "0.65299815", "text": "def linear_function(W, X, b):\n return np.dot(W.T, X) + b", "title": "" }, { "docid": "f90357bfca15471ba856d5fa049f6d4b", "score": "0.6386702", "text": "def linear(x, y):\n return np.dot(x, y)", "title": "" }, { "docid": "3b3464e9e25b51f37e441c7fef6deb21", "score": "0.6383369", "text": "def solve_linear(Q,A):\n n = len(Q) \n M = create_matrix(Q, n)\n v = create_column(Q, n, A)\n return np.linalg.solve(M, v)", "title": "" }, { "docid": "d025e8d1403980ae367732429ff93590", "score": "0.6322593", "text": "def model(x, a, b):\n return a + b*x", "title": "" }, { "docid": "4f5d0d8e880d963174b43dc0971882ef", "score": "0.62444085", "text": "def linear_forward(x_input, W, b):\n output = np.dot(x_input,W) + b\n \n return output", "title": "" }, { "docid": "341bbae3be99edc0bf6921720050497c", "score": "0.62338847", "text": "def test_linear_solution():\n def exact_solution(t):\n return c*t + I\n\n def a(t):\n return t**0.5 # can be arbitrary\n\n def b(t):\n return c + a(t)*exact_solution(t)\n I = 0.1; dt = 0.1; c = -0.5\n T = 4\n N = int(round(T/dt))\n u,t = solver(I=I, a=a, b=b, T=N*dt, dt=dt)\n u_e = exact_solution(t)\n difference = abs(u_e - u).max()\n print difference", "title": "" }, { "docid": "11abe098cd212eba595606bff260ecf1", "score": "0.6167898", "text": "def solve_system(am,bm,*args):\n # make the a's and b's symbolic\n p = 3\n y = \" \".join(am(*args))\n x = \" \".join(bm(*args))\n am = symbols(y)\n bm = symbols(x)\n sys = []\n for k in range(1, p+1):\n Am = [am[i]*(i**k) for i in range(len(am)) ]\n Bm = [bm[i]*(i**(k-1)) for i in range(len(bm)) ]\n sys.append(sum(Am)-k*sum(Bm))\n \n #variables = am+bm\n equations = [Eq(equation) for equation in sys]\n solutions = solve(equations)\n return solutions", "title": "" }, { "docid": "5133c376b9a4f2264e4962e27dceff41", "score": "0.6161489", "text": "def solve(a, b):\n return Solve().forward(a, b)", "title": "" }, { "docid": "052e487884ecc963d6a21787cd6a4bcf", "score": "0.60888934", "text": "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n return np.linalg.solve(a, b)", "title": "" }, { "docid": "ae551f3686c2131d7f2eac3a61faaf06", "score": "0.6070113", "text": "def solve(a, b):\n pass", "title": "" }, { "docid": "5344b7afdb64ed2af329964a5ebf815b", "score": "0.6030955", "text": "def main():\n a, b = map(int, input().split())\n solve(a, b)", "title": "" }, { "docid": "ad14661b770a5fd476d983f91f2bb99a", "score": "0.6010189", "text": "def linear(x):\n\n return x", "title": "" }, { "docid": "52afce04a63335011911a8a9d26302b9", "score": "0.59962344", "text": "def _numba_linalg_solve_ol(a, b):\n numba_xgesv = _LAPACK().numba_xgesv(a.dtype)\n kind = ord(_blas_kinds[a.dtype])\n\n def _numba_linalg_solve_impl(a, b): # pragma: no cover\n n = a.shape[-1]\n if b.ndim == 1:\n nrhs = 1\n else: # b.ndim == 2\n nrhs = b.shape[-1]\n F_INT_nptype = np.int32\n ipiv = np.empty(n, dtype=F_INT_nptype)\n\n r = numba_xgesv(\n kind, # kind\n n, # n\n nrhs, # nhrs\n a.ctypes, # a\n n, # lda\n ipiv.ctypes, # ipiv\n b.ctypes, # b\n n # ldb\n )\n return r\n\n return _numba_linalg_solve_impl", "title": "" }, { "docid": "51c7ec608b07216ece1443ce64e7c207", "score": "0.59102327", "text": "def ridge_regression(y, tx, lamb):\n transpose = tx.T\n lambdaIden = lamb/(2*y.shape[0])*np.eye(tx.shape[1])\n LHS = np.dot(transpose,tx)+lambdaIden\n RHS = np.dot(transpose,y)\n beta = np.linalg.solve(LHS,RHS)\n return beta", "title": "" }, { "docid": "e8d689eed98c75eb9d73905d450b9f69", "score": "0.5902497", "text": "def l1nls(A, b):\n [m, n] = A.shape\n prob = pulp.LpProblem(\"Linear System\", pulp.LpMinimize)\n pos_vars = pulp.LpVariable.dicts(\"Positive\", range(n), 0)\n neg_vars = pulp.LpVariable.dicts(\"Negative\", range(n), 0)\n prob += pulp.lpSum(pos_vars[i] for i in range(n)) + pulp.lpSum(neg_vars[i] for i in range(n))\n for eq in range(m):\n prob += pulp.lpSum((pos_vars[i] - neg_vars[i]) * A[eq][i] for i in range(n)) == b[eq]\n\n prob.solve()\n x = [pulp.value(pos_vars[i]) - pulp.value(neg_vars[i]) for i in range(n)]\n val = pulp.value(prob.objective)\n return (x, val)", "title": "" }, { "docid": "cfd07e823dec5ce0b61dcd4447f62e17", "score": "0.5902124", "text": "def solve_equation(a, b, c):\n print(f\"{a} * x^2 + {b} * x + {c} = 0\")\n if (a == 0):\n if (b == 0):\n if (c == 0):\n print(\"x - any number\")\n else:\n print(\"no solution\")\n else:\n x = (-1) * c / b\n print(\"x =\", x)\n else:\n discriminant = b**2 - 4 * a * c\n if (discriminant >= 0):\n x1 = round(((-1) * b - math.sqrt(discriminant)) / (2 * a), 2)\n x2 = round(((-1) * b + math.sqrt(discriminant)) / (2 * a), 2)\n print(f\"x1 = {x1}\\nx2 = {x2}\")\n else:\n real = round(((-1) * b) / (2 * a), 2)\n image = round(math.sqrt(abs(discriminant)) / (2 * a), 2)\n print(f\"x1 = {complex(real, image)}\")\n print(f\"x1 = {complex(real, image).conjugate()}\")\n print()", "title": "" }, { "docid": "d1c0f973058a730fbda36f502c737812", "score": "0.589902", "text": "def _model(X, b):\n return np.dot(X, b)", "title": "" }, { "docid": "28ffa892d898357bd9c7c1682141e1dc", "score": "0.5888368", "text": "def approxSolve(self, b):\n return (self.t()*self).inv()*self.t()*b", "title": "" }, { "docid": "7e3d5cd735e506e9b15a00a187d5068b", "score": "0.58794475", "text": "def stationary_solve(r, b):\n\n db = r[0:1]\n\n dim = b.ndim\n if b.ndim == 1:\n b = b[:, None]\n x = b[0:1, :]\n\n for j in range(1, len(b)):\n rf = r[0:j][::-1]\n a = (b[j, :] - np.dot(rf, x)) / (1 - np.dot(rf, db[::-1]))\n z = x - np.outer(db[::-1], a)\n x = np.concatenate((z, a[None, :]), axis=0)\n\n if j == len(b) - 1:\n break\n\n rn = r[j]\n a = (rn - np.dot(rf, db)) / (1 - np.dot(rf, db[::-1]))\n z = db - a*db[::-1]\n db = np.concatenate((z, np.r_[a]))\n\n if dim == 1:\n x = x[:, 0]\n\n return x", "title": "" }, { "docid": "4481e12545f783b64307038fd0ad379b", "score": "0.5876238", "text": "def dualL(alpha, y, x):\n return np.sum(alpha, axis = 0) - .5*(sum(sum(alpha*y*x.transpose(), sum=0), sum=1)", "title": "" }, { "docid": "6d4554dd7aae3732fe8e1e6beccab946", "score": "0.5872043", "text": "def constrained_least_squares(A, b):\n x = optimize.lsq_linear(A, b)['x']\n x = x.reshape((3, 4))\n x = np.concatenate((x, np.array([[0, 0, 0, 1]])), axis=0)\n return x", "title": "" }, { "docid": "464d863a6920e4d8a91ec6da17bbe625", "score": "0.58704436", "text": "def linear_eq(x):\n if x[1][0] != x[0][0]:\n slope = (x[1][1] - x[0][1]) / (x[1][0] - x[0][0])\n else:\n return [0, 1, x[0][0]]\n y_intercept = x[0][1] - slope * x[0][0]\n return [1, slope * (-1), y_intercept]", "title": "" }, { "docid": "a6ea8a387e80302d68f859ce98dd0243", "score": "0.58462733", "text": "def linearsolver_uppertriangle(a, b):\n n = a.shape[0]\n x = np.zeros(n) # preallocation\n for i in range(n-1, -1, -1):\n if i == n - 1:\n x[i] = b[i] / a[i, i]\n else:\n # sum of the coeffs times already-solved-for-x's\n s = a[i, i+1:].dot(x[i+1:])\n x[i] = (b[i] - s) / a[i, i]\n return x", "title": "" }, { "docid": "45ff29d73e574f40700cae0f6573d7a8", "score": "0.5843107", "text": "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w_star = np.linalg.solve(a, b)\n return w_star", "title": "" }, { "docid": "54ed7c0fa059dc846fea64b2295a9d4e", "score": "0.5836561", "text": "def linear_func(W,X):\r\n return W[0]+W[1]*X[0]+W[2]*X[1]", "title": "" }, { "docid": "fe59e757c49b8f6028ed7c56860aa73a", "score": "0.5828867", "text": "def __call__(self, X,y):\n beta = np.linalg.pinv(X)@y\n return beta", "title": "" }, { "docid": "6dacda1e96dc3b98dfcb2e028abef7fb", "score": "0.58129454", "text": "def linearKernel(x, y, *args):\n return np.dot(x, y)", "title": "" }, { "docid": "8ba434c4cf0c8e5e5c93b559516a04f8", "score": "0.57973784", "text": "def solve(L, b):\n def forward(L, b):\n # This avoids in-place modifications of pytorch variables and allows\n # the computation of a gradient.\n x0 = b[:,:,0] / L[:,:,0]\n x1 = (b[:,:,1] - L[:,:,1] * x0)/L[:,:,2]\n return torch.stack((x0, x1), dim=-1)\n\n def backward(L_t, b):\n x1 = b[:,:,1] / L_t[:,:,2]\n x0 = (b[:,:,0] - L_t[:,:,1] * x1)/L_t[:,:,0]\n return torch.stack((x0, x1), dim=-1)\n\n # Use forward-backward substitution: efficient and numerically stable.\n return backward(L, forward(L, b))", "title": "" }, { "docid": "1ee0b83054c194cfa38df5316be0d4da", "score": "0.5796", "text": "def examples():\n mtx_a = Matrix([1, 1, 1], [0, 2, 5], [2, 5, -1])\n mtx_a_inverse = mtx_a.inverse()\n vtr_b = Vector(6, -4, 27)\n vtr_x = mtx_a.solve_for_x(vtr_b)\n\n print(\"\\nMatrix A:\")\n print(mtx_a)\n\n # single matrix operations\n print(\"\\n> Single matrix operations\")\n print(\"-\" * 40)\n print(\"\\nMatrix transpose A^T:\")\n print(mtx_a.transpose())\n print(\"\\nMatrix inverse A^(-1):\")\n print(mtx_a_inverse)\n print(\"\\nMatrix multiplication A^(-1)*A = I\")\n print(mtx_a_inverse * mtx_a)\n print(\"\\nReduced row echelon form H ~ A:\")\n print(mtx_a.reduced_row_echelon_form())\n\n # solving for vector x\n print(\"\\n> Ax = b, solving for x\")\n print(\"-\" * 40)\n print(\"\\nVector b:\")\n print(vtr_b)\n print(\"\\nResult vector x:\")\n print(vtr_x)\n\n print(\n # end of examples\n )", "title": "" }, { "docid": "d2e562ad47ef3dd0ce55401ce2523134", "score": "0.57733715", "text": "def linear_function(a):\n\n return a", "title": "" }, { "docid": "cebcb4f34dd4bf9cc5d029fea140d855", "score": "0.57571036", "text": "def ridge_regression(y, tx, lambda_):\n \n aI = 2 * tx.shape[0] * lambda_ * np.identity(tx.shape[1])\n a = tx.T.dot(tx) + aI\n b = tx.T.dot(y)\n \n w = np.linalg.solve(a, b)\n loss = compute_loss(y,tx,w)\n \n return w, loss", "title": "" }, { "docid": "d0827b7210c507c45e92dd8af01c0702", "score": "0.5748912", "text": "def solve(self):\n A = assemble(lhs(self.dI))\n l = assemble(rhs(self.dI))\n\n print \"::: solving adjoint BP velocity :::\"\n solve(A, self.model.Lam.vector(), l)", "title": "" }, { "docid": "06545e9f7f1152e210ac2b7f7f155ab7", "score": "0.5740423", "text": "def __applyQuadraticFormula(self, a, b, n):\n\t\treturn n * n + a * n + b", "title": "" }, { "docid": "472e39df8146c560f7039b2b79752c7f", "score": "0.57401097", "text": "def linearEquation(k, d):\n return (lambda x: k * x + d)", "title": "" }, { "docid": "351d25f38e9f86af24f9a41e307b87a9", "score": "0.57361853", "text": "def OLS3(X,y):\n beta = np.linalg.pinv(X)@y\n return beta", "title": "" }, { "docid": "da3ddfc5a94912782d2f97f88d858204", "score": "0.5712596", "text": "def linear_regression(x, y): \n n = np.size(x) # number of values\n \n # mean of x and y vector \n m_x, m_y = np.mean(x), np.mean(y) # means of both\n \n # calculating cross-deviation and deviation about x \n SS_xy = np.sum(y*x) - n*m_y*m_x # \n SS_xx = np.sum(x*x) - n*m_x*m_x \n \n # calculating regression coefficients \n b_1 = SS_xy / SS_xx # there is an analytic answer so no need for any minimisation functions\n b_0 = m_y - b_1*m_x # with 15 mins effort you can rederive this\n \n return(b_0, b_1) # tuple bc doesn't change", "title": "" }, { "docid": "25592eb6c7b76f62975958fa919159a6", "score": "0.5704694", "text": "def fit_parabola(x, y_meas, errors, start_params):\n X = np.array([np.ones_like(x), x, x**2]).T\n V = np.diag(1/errors**2)\n return np.linalg.inv(X.T @ V @ X) @ X.T @ V @ y_meas", "title": "" }, { "docid": "bec7af2aaf5203a57ffa1a1bb1d85c93", "score": "0.5691757", "text": "def solve_lu(A, b):\n L, U = lu(A)\n b1 = solve_lower_triang(L, b)\n return solve_upper_triang(U, b1)", "title": "" }, { "docid": "9a773337c5ceb2813c46cf20ac310d51", "score": "0.5684381", "text": "def linear_forward(A,W,b):\n \n Z = np.dot(W,A) + b\n return Z", "title": "" }, { "docid": "cf069fa1cdb08b7f5cf0eff9fcc0123b", "score": "0.5672301", "text": "def solve_lower_triang(A, b):\n raise NotImplementedError\n return x", "title": "" }, { "docid": "6c2067273e998fc722fbee4696027495", "score": "0.56704223", "text": "def linear_function(coefs, args):\n sigma = 3\n dataType = str(type(args))[8:-2]\n # if args is a number, use the formula: y=ax+b\n if dataType==\"int\" or dataType==\"float\":\n noise = math.floor(100 * rand.gauss(0, sigma)) / 100\n return coefs[0] * args + coefs[1] + noise\n # if args is a tuple/list, use the formula: y=a1x+a2x+...+anx+b\n val = 0\n for i in range(len(args)):\n val += coefs[i] * args[i]\n val += coefs[len(coefs)-1] \n # add noise\n noise = math.floor(100 * rand.gauss(0,sigma)) / 100\n val += noise\n return val", "title": "" }, { "docid": "5022cd27b74933312f78b79a3601faec", "score": "0.56655335", "text": "def linear(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b", "title": "" }, { "docid": "124181e5d96c8acd180aa8707af23a90", "score": "0.5662148", "text": "def lsq(X, y):\n\n # add column of ones for the intercept\n ones = np.ones((len(X), 1))\n X = np.concatenate((ones, X), axis=1)\n\n # calculate the coefficients\n beta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y))\n\n return beta", "title": "" }, { "docid": "73940669946b3234b9f662b30fab5793", "score": "0.5660426", "text": "def linear_kernel(x, y, b=1):\n \n return x @ y.T + b # Note the @ operator for matrix multiplication", "title": "" }, { "docid": "77fd26b4fe842d63ef6765dd3a1267d4", "score": "0.5658089", "text": "def linear_forward(X, W, b):\n\n N = X.shape[0]\n D = np.prod(X.shape[1:])\n return np.dot(X.reshape(N, D), W) + b", "title": "" }, { "docid": "96791c3209f02cfa1aadeaf7d363762b", "score": "0.5656311", "text": "def linear(X, y, work=None):\n if work == None:\n work = linear_workspace(X.shape[0], X.shape[1])\n return _callback.gsl_multifit_linear(X, y, work._ptr)", "title": "" }, { "docid": "8ef2c9402d6a1f79a263a2d2ce5cabd0", "score": "0.5655731", "text": "def ridge_regression(y, tx, lambda_):\n n, d = tx.shape\n lambda_ = 2 * n * lambda_\n\n a = tx.T.dot(tx) + lambda_ * np.eye(d)\n b = tx.T.dot(y)\n\n return np.linalg.solve(a, b)", "title": "" }, { "docid": "95b5a240e8e1ca25cf81392355a9338c", "score": "0.5647954", "text": "def lsqr( m, n, aprod, b, damp, atol, btol, conlim, itnlim, show, wantvar = False, callback = lambda x: None):\n\n # Initialize.\n\n msg=['The exact solution is x = 0 ',\n 'Ax - b is small enough, given atol, btol ',\n 'The least-squares solution is good enough, given atol ',\n 'The estimate of cond(Abar) has exceeded conlim ',\n 'Ax - b is small enough for this machine ',\n 'The least-squares solution is good enough for this machine',\n 'Cond(Abar) seems to be too large for this machine ',\n 'The iteration limit has been reached ']\n\n if wantvar:\n var = zeros(n,1)\n else:\n var = None\n \n# if show:\n# print ' '\n# print 'LSQR Least-squares solution of Ax = b'\n# str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)\n# str2 = 'damp = %20.14e wantvar = %-5s' % (damp, repr(wantvar))\n# str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)\n# str4 = 'btol = %8.2e itnlim = %8g' % (btol, itnlim)\n# print str1; print str2; print str3; print str4;\n \n itn = 0; istop = 0; nstop = 0\n ctol = 0.0\n if conlim > 0.0: ctol = 1.0/conlim\n anorm = 0.;\tacond = 0.\n dampsq = damp**2;\tddnorm = 0.;\t\tres2 = 0.\n xnorm = 0.;\txxnorm = 0.;\t\tz = 0.\n cs2 = -1.;\tsn2 = 0.\n \n # Set up the first vectors u and v for the bidiagonalization.\n # These satisfy beta*u = b, alfa*v = A'u.\n \n u = b[:m];\tx = zeros(n)\n alfa = 0.;\tbeta = norm( u )\n if beta > 0:\n u = (1.0/beta) * u;\tv = aprod(2, m, n, u)\n alfa = norm( v );\n\n if alfa > 0:\n v = (1.0/alfa) * v; w = v.copy();\n \n arnorm = alfa * beta;\n if arnorm == 0:\n# print(msg[0])\n return (x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var)\n\n rhobar = alfa;\t\tphibar = beta;\t\tbnorm = beta;\n rnorm = beta\n r1norm = rnorm\n r2norm = rnorm\n head1 = ' Itn x(1) r1norm r2norm '\n head2 = ' Compatible LS Norm A Cond A'\n \n if show:\n# print ' '\n# print head1+head2\n test1 = 1.0;\t\ttest2 = alfa / beta\n str1 = '%6g %12.5e' % (itn, x[0])\n str2 = ' %10.3e %10.3e' % (r1norm, r2norm)\n str3 = ' %8.1e %8.1e' % (test1, test2)\n# print str1+str2+str3\n \n # ------------------------------------------------------------------\n # Main iteration loop.\n # ------------------------------------------------------------------\n while itn < itnlim:\n itn = itn + 1\n # Perform the next step of the bidiagonalization to obtain the\n # next beta, u, alfa, v. These satisfy the relations\n # beta*u = a*v - alfa*u,\n # alfa*v = A'*u - beta*v.\n\n u = aprod(1, m, n, v) - alfa*u\n beta = norm( u );\n if beta > 0:\n u = (1.0/beta) * u\n anorm = normof4(anorm, alfa, beta, damp)\n v = aprod(2, m, n, u) - beta*v\n alfa = norm( v )\n if alfa > 0: v = (1.0/alfa) * v\n \n # Use a plane rotation to eliminate the damping parameter.\n # This alters the diagonal (rhobar) of the lower-bidiagonal matrix.\n\n rhobar1 = normof2(rhobar, damp)\n cs1 = rhobar / rhobar1\n sn1 = damp / rhobar1\n psi = sn1 * phibar\n phibar = cs1 * phibar\n \n # Use a plane rotation to eliminate the subdiagonal element (beta)\n # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.\n \n rho = normof2(rhobar1, beta)\n cs = rhobar1/ rho\n sn = beta / rho\n theta = sn * alfa\n rhobar = - cs * alfa\n phi = cs * phibar\n phibar = sn * phibar\n tau = sn * phi\n \n # Update x and w.\n \n t1 = phi /rho;\n t2 = - theta/rho;\n dk = (1.0/rho)*w;\n \n x = x + t1*w\n w = v + t2*w\n ddnorm = ddnorm + norm(dk)**2\n if wantvar: var = var + dk*dk\n \n # Use a plane rotation on the right to eliminate the\n # super-diagonal element (theta) of the upper-bidiagonal matrix.\n # Then use the result to estimate norm(x).\n \n delta = sn2 * rho\n gambar = - cs2 * rho\n rhs = phi - delta * z\n zbar = rhs / gambar\n xnorm = sqrt(xxnorm + zbar**2)\n gamma = normof2(gambar, theta)\n cs2 = gambar / gamma\n sn2 = theta / gamma\n z = rhs / gamma\n xxnorm = xxnorm + z**2\n \n # Test for convergence.\n # First, estimate the condition of the matrix Abar,\n # and the norms of rbar and Abar'rbar.\n \n acond = anorm * sqrt( ddnorm )\n res1 = phibar**2\n res2 = res2 + psi**2\n rnorm = sqrt( res1 + res2 )\n arnorm = alfa * abs( tau )\n \n # 07 Aug 2002:\n # Distinguish between\n # r1norm = ||b - Ax|| and\n # r2norm = rnorm in current code\n # = sqrt(r1norm^2 + damp^2*||x||^2).\n # Estimate r1norm from\n # r1norm = sqrt(r2norm^2 - damp^2*||x||^2).\n # Although there is cancellation, it might be accurate enough.\n \n r1sq = rnorm**2 - dampsq * xxnorm\n r1norm = sqrt( abs(r1sq) )\n if r1sq < 0: r1norm = - r1norm\n r2norm = rnorm\n \n # Now use these norms to estimate certain other quantities,\n # some of which will be small near a solution.\n \n test1 = rnorm / bnorm\n test2 = arnorm/( anorm * rnorm )\n test3 = 1.0 / acond\n t1 = test1 / (1 + anorm * xnorm / bnorm)\n rtol = btol + atol * anorm * xnorm / bnorm\n \n # The following tests guard against extremely small values of\n # atol, btol or ctol. (The user may have set any or all of\n # the parameters atol, btol, conlim to 0.)\n # The effect is equivalent to the normal tests using\n # atol = eps, btol = eps, conlim = 1/eps.\n \n if itn >= itnlim: istop = 7\n if 1 + test3 <= 1: istop = 6\n if 1 + test2 <= 1: istop = 5\n if 1 + t1 <= 1: istop = 4\n \n # Allow for tolerances set by the user.\n \n if test3 <= ctol: istop = 3\n if test2 <= atol: istop = 2\n if test1 <= rtol: istop = 1\n \n # See if it is time to print something.\n \n prnt = False;\n if n <= 40 : prnt = True\n if itn <= 10 : prnt = True\n if itn >= itnlim-10: prnt = True\n if itn % 10 == 0 : prnt = True\n if test3 <= 2*ctol : prnt = True\n if test2 <= 10*atol : prnt = True\n if test1 <= 10*rtol : prnt = True\n if istop != 0 : prnt = True\n \n if prnt and show:\n str1 = '%6g %12.5e' %( itn, x[0] )\n str2 = ' %10.3e %10.3e' %(r1norm, r2norm )\n str3 = ' %8.1e %8.1e' %( test1, test2 )\n str4 = ' %8.1e %8.1e' %( anorm, acond )\n# print str1+str2+str3+str4\n \n if istop > 0: break\n callback(x) # added for OpenOpt kernel\n\n # End of iteration loop.\n # Print the stopping condition.\n \n# if show:\n# print ' '\n# print 'LSQR finished'\n# print msg[istop]\n# print ' '\n# str1 = 'istop =%8g r1norm =%8.1e' %(istop, r1norm )\n# str2 = 'anorm =%8.1e arnorm =%8.1e' %(anorm, arnorm )\n# str3 = 'itn =%8g r2norm =%8.1e' %( itn, r2norm )\n# str4 = 'acond =%8.1e xnorm =%8.1e' %(acond, xnorm )\n# str5 = ' bnorm =%8.1e' % bnorm\n# print str1 + ' ' + str2\n# print str3 + ' ' + str4\n# print str5\n# print ' '\n \n return ( x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var )", "title": "" }, { "docid": "d3ee7adc1e8f9eb6cc98e7cda50e3542", "score": "0.5623435", "text": "def SolveQuadratic(a, b, c):\n x2 = (-b + math.sqrt(b ** 2 -4*a*c)) / (2*a)\n\n x1 = (-b - math.sqrt(b ** 2 -4*a*c)) / (2*a)\n\n \n return x1,x2", "title": "" }, { "docid": "b04f86c9cdcf438744f065fdc0351a83", "score": "0.56232136", "text": "def solve_ls(a_mat: np.ndarray, b: np.ndarray, offset: bool = False,\n non_neg: bool = False,\n ret_fit: bool = False):\n\n # Choose least squares solver\n def ls_fun(a_mat_temp, b_temp):\n if non_neg:\n return scipy.optimize.nnls(a_mat_temp, b_temp)[0]\n else:\n return np.linalg.lstsq(a_mat_temp, b_temp, rcond=None)[0]\n\n n, m = a_mat.shape\n if offset:\n # Add a bias regression term\n a_mat_off = np.empty((n, m + 1), dtype=a_mat.dtype)\n a_mat_off[:, 0] = 1.0\n a_mat_off[:, 1:] = a_mat\n a_mat = a_mat_off\n\n ret_val = ls_fun(a_mat, b)\n if ret_fit:\n # Add fitted values to return value\n fit_values = np.matmul(a_mat, ret_val)\n ret_val = (ret_val, fit_values)\n\n return ret_val", "title": "" }, { "docid": "492ecb5ed2cd531dc4de1c4ec294ee94", "score": "0.56168", "text": "def perform_linprog(A,b,c,maxit=-1,tol=1e-10):\n \n c = np.ravel(c, order = \"F\")\n b = np.ravel(b, order = \"F\")\n m,n = np.shape(A)\n if maxit == -1:\n maxit = 10*m\n it=0\n \n D=np.sign(np.sign(b)+.5)\n D = np.diag(D) # initial (inverse) basis matrix\n A = np.hstack((A,D)) # incorporate slack/artificial variables\n B = np.arange(n,n+m) # initial basis\n N = np.arange(0,n) # non-basis\n \n phase = 1\n xb = abs(b)\n s = np.hstack((np.zeros(n), np.ones(m))) # supercost\n\n while phase < 3:\n df = -1\n t = float(\"inf\")\n yb= np.dot(np.transpose(D),s[B]) # multipliers for Ax=b\n while (it < maxit):\n if len(N) == 0:\n break \n # no freedom for minimization\n r = s[N] - np.dot(np.transpose(A[:,N]),yb) # reduced costs\n rmin = np.min(r) # determine new basic variable\n q = np.argmin(r)\n if rmin >= -tol*(linalg.norm(s[N],float(\"inf\")) + 1):\n break # optimal!\n it = it+1\n if df >= 0: # apply Bland's rule to avoid cycling\n J = np.where(r<0)[0]\n Nq = np.min(N[J])\n q = np.where(N==Nq)[0]\n d = np.ravel(np.dot(D,A)[:,N[q]])\n I = np.where(d > tol)[0]\n \n if len(I) == 0:\n print(\"Solution is unbounded\")\n it = -it\n break\n xbd=xb[I]/d[I]\n r = np.min(xbd)\n p = np.argmin(xbd)\n p = I[p]\n if df >= 0: # apply Bland's rule to avoid cycling\n J = np.where(xbd == r)[0]\n Bp = np.min(B[I[J]])\n p = np.where(B == Bp)[0] \n xb= xb - np.dot(r,d) # CAREFUL \n xb[p] = r # update x\n df=r*rmin; # change in f \n v = (D[p,:]/d[p]) # row vector\n yb= yb + np.dot(np.transpose(v),s[N[q]] - np.dot(np.transpose(d),s[B]))\n d[p] = d[p] - 1\n v = np.ravel(v)\n D = D - np.dot(np.reshape(d,(np.shape(d)[0],1)),np.reshape(v,(1,np.shape(v)[0])) ) # update inverse basis matrix\n t = B[p]\n B[p] = N[q]\n\n q = int(q); # modified by GP: bug.\n if t >= n:\n N = np.hstack((N[:q],N[(q+1):]))\n else:\n N[q] = t\n xb = xb + np.dot(D,b-np.dot(A[:,B],xb)) # iterative refinement\n I = np.where(xb < 0)[0] # must be due to rounding error\n if len(I) > 0:\n xb[I]=xb[I]-xb[I] # so correct\n if phase == 2 or it < 0:\n break # B, xb,n,m,res=A(:,B)*xb-b\n\n if np.dot(np.transpose(xb),s[B]) > tol:\n it=-it\n print(\"No feasible solution\")\n break\n phase=phase+1 # re-initialise for Phase 2\n s=1e6*linalg.norm(c,float(\"inf\"))*s\n s[:n]=c\n \n x = np.zeros(2*n)\n x[B]=xb\n x=x[:n]\n f=np.dot(np.transpose(c),x)\n if it >= maxit:\n print(\"Too many iterations\")\n it=-it\n return x", "title": "" }, { "docid": "371ddd656c7e6d7bdad9245095d96b8e", "score": "0.5611177", "text": "def linear_fitting(self):\n\n n = self._N\n sxy = self._U\n sx = self._P\n sy = self._T\n sx2 = self._Q\n d = n * sx2 - sx * sx\n\n if abs(d) < TOL:\n raise ZeroDivisionError(\"Input data leads to a division by zero\")\n\n a = (n * sxy - sx * sy) / d\n b = (sy * sx2 - sx * sxy) / d\n return (a, b)", "title": "" }, { "docid": "4280af1432b5e85e16628edd1cfd66b0", "score": "0.5604967", "text": "def solve(self, c, A, b, epsilon=0.0001):\n\n\t\t# ensure dimensions are okay\n\t\tassert A.shape[0] == b.shape[0], 'first dims of A and b must match, check input!'\n\t\tassert A.shape[1] == c.shape[0], 'second dim of A must match first dim of c, check input!'\n\n\t\t# ensure A is full rank, drop redundant rows if not\n\t\tif matrix_rank(A) < min(A.shape[0], A.shape[1]):\n\t\t\tprint('A is not full rank, dropping redundant rows')\n\t\t\t_, pivots = sympy.Matrix(A).T.rref()\n\t\t\tA = A[list(pivots)]\n\t\t\tprint('Shape of A after dropping redundant rows is {}'.format(A.shape))\n\n\t\tm = A.shape[0]\n\t\tn = A.shape[1]\n\n\t\t# initial solution (x_0, lambda_0, s_0) > 0 [lambda is variable l in code]\n\t\t# note that this is not a feasible solution in general\n\t\t# but it should tend towards feasibility by itself with iterations\n\t\t# therefore initially duality gap might show negative\n\t\t# since this is the infeasible-interior-point algorithm\n\t\tx = np.ones(shape=(n, ))\n\t\tl = np.ones(shape=(m, ))\n\t\ts = np.ones(shape=(n, ))\n\n\t\t# set iteration counter to 0 and mu_0\n\t\tk = 0\n\n\t\t# main loop body\n\t\twhile abs(np.dot(x, s)) > epsilon:\n\n\t\t\t# print iteration number and progress\n\t\t\tk += 1\n\t\t\tprimal_obj = np.dot(c, x)\n\t\t\tdual_obj = np.dot(b, l)\n\t\t\tprint('iteration #{}; primal_obj = {:.5f}, dual_obj = {:.5f}; duality_gap = {:.5f}'.format(k, primal_obj, dual_obj, primal_obj - dual_obj))\n\n\t\t\t# choose sigma_k and calculate mu_k\n\t\t\tsigma_k = 0.4\n\t\t\tmu_k = np.dot(x, s) / n\n\n\t\t\t# create linear system A_ * delta = b_\n\t\t\tA_ = np.zeros(shape=(m + n + n, n + m + n))\n\t\t\tA_[0:m, 0:n] = np.copy(A)\n\t\t\tA_[m:m + n, n:n + m] = np.copy(A.T)\n\t\t\tA_[m:m + n, n + m:n + m + n] = np.eye(n)\n\t\t\tA_[m + n:m + n + n, 0:n] = np.copy(np.diag(s))\n\t\t\tA_[m + n:m + n + n, n + m:n + m + n] = np.copy(np.diag(x))\n\n\t\t\tb_ = np.zeros(shape=(n + m + n, ))\n\t\t\tb_[0:m] = np.copy(b - np.dot(A, x))\n\t\t\tb_[m:m + n] = np.copy(c - np.dot(A.T, l) - s)\n\t\t\tb_[m + n:m + n + n] = np.copy( sigma_k * mu_k * np.ones(shape=(n, )) - np.dot(np.dot(np.diag(x), np.diag(s)), np.ones(shape=(n, ))) )\n\n\t\t\t# solve for delta\n\t\t\tdelta = np.linalg.solve(A_, b_)\n\t\t\tdelta_x = delta[0:n]\n\t\t\tdelta_l = delta[n:n + m]\n\t\t\tdelta_s = delta[n + m:n + m + n]\n\n\t\t\t# find step-length alpha_k\n\t\t\talpha_max = 1.0\n\t\t\tfor i in range(n):\n\t\t\t\tif delta_x[i] < 0:\n\t\t\t\t\talpha_max = min(alpha_max, -x[i]/delta_x[i])\n\t\t\t\tif delta_s[i] < 0:\n\t\t\t\t\talpha_max = min(alpha_max, -s[i]/delta_s[i])\n\t\t\teta_k = 0.99\n\t\t\talpha_k = min(1.0, eta_k * alpha_max)\n\n\t\t\t# create new iterate\n\t\t\tx = x + alpha_k * delta_x\n\t\t\tl = l + alpha_k * delta_l\n\t\t\ts = s + alpha_k * delta_s\n\n\t\t# print difference between Ax and b\n\t\tdiff = np.dot(A, x) - b\n\t\tprint('Ax - b = {}; ideally it should have been zero vector'.format(diff))\n\t\tprint('norm of Ax - b is = {}; ideally it should have been zero'.format(np.linalg.norm(diff)))\n\n\t\treturn x", "title": "" }, { "docid": "759b839c418bb13a02b1075a215cb7a6", "score": "0.56008464", "text": "def least_squares(y,tx):\n \"\"\"calculate the least squares solution.\"\"\"\n transpose = tx.T\n w = np.linalg.solve(np.dot(transpose,tx),np.dot(transpose,y))\n return w", "title": "" }, { "docid": "acd481c5fe5ddebb8594f4ec88841580", "score": "0.5596981", "text": "def fit_linear_bf_1d(x: np.ndarray, y: np.ndarray, b_fun: Callable, offset: bool = False) -> np.ndarray:\n\n if offset:\n raise NotImplementedError(\"Not implemented with offset.\")\n\n # Get shapes\n # dummy = b_fun(0.0)\n # d = dummy.shape[0]\n # n = x.shape[0]\n\n # Fill matrix\n # ls_mat = np.empty((n, d), dtype=np.float32)\n # for ct, x_el in enumerate(x):\n # ls_mat[ct, :] = b_fun(x_el)\n ls_mat = b_fun(x)\n\n # Solve and return\n coeffs = np.linalg.lstsq(ls_mat, y, rcond=None)[0]\n return coeffs", "title": "" }, { "docid": "f542da8bac77b721ce6d7ee8de71d6f4", "score": "0.55920243", "text": "def solve(A, l, b):\n n = np.shape(A)[0]\n x = np.zeros(n)\n\n # Process the b vector with the stored values below the diagonal of A. This will change b to \n # what it would be, had it been directly edited by the forward elimination phase.\n for k in range(n-1):\n for i in range(k+1, n):\n b[l[i]] -= A[l[i], k]*b[l[k]]\n\n # Start creating x\n for i in range(n-1, -1, -1):\n x[i] = b[l[i]]\n for j in range(i+1, n):\n x[i] -= A[l[i], j]*x[j]\n x[i] /= A[l[i], i]\n\n return x", "title": "" }, { "docid": "bf9bbfbfdced98e75458a29410975445", "score": "0.5591784", "text": "def linear_svm(W, b, x, y):\n\n # TODO: implement the function\n\n return loss", "title": "" }, { "docid": "8c54830eaf9535e5dbd84c9f84f8a810", "score": "0.5590682", "text": "def LS_regression(x, y):\n x_t = np.transpose(x)\n theta = np.linalg.inv(x @ x_t) @ x @ y\n return theta", "title": "" }, { "docid": "902fce2903ba5c672b36dee0b02a640b", "score": "0.55901164", "text": "def linear(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim]) \n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b", "title": "" }, { "docid": "acf8fe7b335b210458c6903356726f4d", "score": "0.55866325", "text": "def solve(self, degree, x, t):\n\t\tdef res(w, x, t):\n\t\t\treturn self.model(w, x) - t\n\t\treturn optimize.least_squares(res, x0=np.ones(degree + 1), args=(x, t))", "title": "" }, { "docid": "4a383cecec268c598d1c0c0d40b8a5dd", "score": "0.5585927", "text": "def compute_Ax_vector(x, a, b):\n xax = np.einsum(\"ck,ck -> c\", x @ a, x)\n bx = np.einsum(\"ik,ck -> ic\", b, x)\n Ax = -0.5*xax + bx\n return Ax", "title": "" }, { "docid": "3b44cf65e2a982c0c79d85f74d1e7d56", "score": "0.5569447", "text": "def solve_constrained_lsq(a, t_or_p, b=None):\n \n # Get N - number of columns\n n = np.shape(a)[1]\n\n # Define matrix of ones\n p = np.ones((n + 1, n + 1))\n\n # Get A^TA\n a1 = np.dot(a.T, a)\n\n # Plug this into appropriate place in P \n p[0:n, 0:n] = a1\n\n # Set last element in N,N position to 0 \n p[n, n] = 0\n\n if t_or_p == 0:\n # Define Q\n q = np.zeros((n+1, 1))\n\n # Set last element to N (number of edges) \n # Effectively says average value is 1\n q[n, 0] = n\n\n if t_or_p == 1:\n # Define Q\n q = np.zeros((n+1, 1))\n c = np.dot(a.T, b)\n c = c.reshape((len(c), ))\n\n if len(c) >= len(q[0:n,0]):\n q[0:n, 0] = c[0:n]\n else:\n q[0:len(c), 0] = c[0:len(c)]\n # Effectively says average is 0 \n q[n, 0] = 0\n\n # Solve PX = Q\n try:\n # By QR decomposition\n r1, r2 = linalg.qr(p) # QR decomposition with qr function\n y = np.dot(r1.T, q) # Let y=R1'.Q using matrix multiplication\n x = linalg.solve(r2, y) # Solve Rx=y\n\n # By least squares - gives same result\n # x = linalg.lstsq(R2, y)\n\n # By LU decomposition - Both give same results \n # L, U = scipy.linalg.lu_factor(P)\n # x = scipy.linalg.lu_solve((L, U), Q)\n\n except np.linalg.LinAlgError as err:\n if 'Matrix is singular' in str(err):\n return None, p\n else:\n print('Couldnt invert matrix')\n raise\n\n return x[0:n][:, 0], p # use this if solved using linalg.solve\n # return x[0][0:N], P # use this if solved using linalg.lstsq", "title": "" }, { "docid": "23405fbde09a3ddae5653c6f08c41063", "score": "0.5566966", "text": "def linear_fit (x,y) :\n A = np.vstack([x, np.ones(x.size)]).T\n return np.linalg.lstsq(A,y)[0]", "title": "" }, { "docid": "ff2c3cbc14e41e0edb5fe5b81c990614", "score": "0.5561392", "text": "def solve_lp_scipy(C, A_ub, b_ub, A_eq, b_eq, bounds=None):\n result = scipy.optimize.linprog(C, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method='interior-point')\n\n return result", "title": "" }, { "docid": "06326dc611032c4c62f83ba669e43a06", "score": "0.5559194", "text": "def eval(self, x, y):\n a, b, c, d, e, f = self.form()\n return a*x*x + b*x*y + c*y*y + d*x + e*y + f", "title": "" }, { "docid": "ed68b4d8a04e3ebb3210076e7c0e4cb6", "score": "0.5547415", "text": "def least_squares(y, tx):\n ttx = np.transpose(tx)\n A = np.dot(ttx,tx)\n b = np.dot(ttx,y)\n w = np.linalg.solve(A,b)\n loss = compute_loss(y, tx, w)\n return w, loss", "title": "" }, { "docid": "6366b0888d340fb9339353585ca1bd91", "score": "0.5546364", "text": "def linear_model(args):\n def f(x):\n return args[0]*x + args[1]\n return f", "title": "" }, { "docid": "1ed8d57f1c14a84682d973dc7fd24a97", "score": "0.5545759", "text": "def compute(self, x, a, b):\n return np.exp(-np.power((x-a), 2) / (2.0 * np.power(b, 2)))", "title": "" }, { "docid": "53cf9ff8f04dcb0f4354b8086dd78558", "score": "0.5545433", "text": "def linear(x0: int, z0: int, x1: int, z1: int, x: float):\n return z0 + (z1 - z0) / (x1 - x0) * (x - x0)", "title": "" }, { "docid": "4de1ac29ee07b05ab88e91516690b8c4", "score": "0.5541177", "text": "def lstsqb(a, b):\r\n return la.lstsq(b.T, a.T, rcond=None)[0].T", "title": "" }, { "docid": "f91e07e10d2d8af393a5d904874b98a8", "score": "0.5539694", "text": "def quadratic_formula(a,b,c):\n \n d = b**2-4*a*c # discriminant\n\n if d < 0:\n raise ValueError(\"This equation has no real solution\")\n elif d == 0:\n x = (-b+np.sqrt(b**2-4*a*c))/2*a\n sol = [x]\n return sol\n else:\n x1 = (-b+np.sqrt(b**2-4*a*c))/2*a\n x2 = (-b-np.sqrt(b**2-4*a*c))/2*a\n sol = [x1, x2]\n return sol", "title": "" }, { "docid": "fd81e1dcce4f8983be708b3c0b82e3c3", "score": "0.5524462", "text": "def fit_linear_1d(x: np.ndarray, y: np.ndarray, x_new: np.ndarray = None):\n n = x.shape[0]\n ls_mat = np.empty((n, 2), dtype=np.float32)\n ls_mat[:, 0] = 1\n ls_mat[:, 1] = x\n m, c = np.linalg.lstsq(ls_mat, y, rcond=None)[0]\n if x_new is None:\n return m, c\n else:\n return c * x_new + m", "title": "" }, { "docid": "4fa904b2a3a8fcf5ebdbef912c7aa4f9", "score": "0.5521552", "text": "def least_squares(self, b) -> np.ndarray:\n assert self.transforms_into(b)\n x = sla.lsqr(self._scipy, np.ravel(b))[0]\n return np.reshape(x, self.Vshape)", "title": "" }, { "docid": "5b5ba0f874674eb397c64153d3444576", "score": "0.5520389", "text": "def equation_eval(self, x):\n a, b, c = self.equation()\n return x.scalar(GeometryPoint(a, b)) + c", "title": "" }, { "docid": "a5ff89e398e8658ece1364ff664c1c7f", "score": "0.5515833", "text": "def least_squares(y, tx):\n # We want to solve the linear system Aw = b...\n # ...with A being the Gram Matrix...\n A = tx.T.dot(tx)\n # ... and b being the transpose of tx times y\n b = tx.T.dot(y)\n # solve linear system using the QR decomposition\n w=np.linalg.solve(A, b)\n loss = compute_loss(y,tx,w)# compute the loss using the entire sets\n return w,loss", "title": "" }, { "docid": "abc5adf3c381c11a01e9902a47a80590", "score": "0.5511447", "text": "def axpy(a, x, y):\n y_ = blas.saxpy(x.ravel(), y.ravel(), a=a).reshape(y.shape)\n if y is not y_:\n y[:] = y_\n return y", "title": "" }, { "docid": "5ed29909ec02fe98f1fff5746b6dcc64", "score": "0.55109346", "text": "def solve(self, alpha=0.):\n \n # Fill argument call dictionary\n arg = {\n 'x0' : self.var.vars_in,\n 'lbx' : self.var.vars_lb,\n 'ubx' : self.var.vars_ub,\n\n 'lbg' : np.concatenate(self.constraints_lb),\n 'ubg' : np.concatenate(self.constraints_ub),\n\n 'p' : alpha,\n }\n\n # Call the solver\n self._result = self._solver(arg)\n\n # Process the optimal vector\n self.var.vars_op = self._result['x']\n\n try: self._plot_setup()\n except AttributeError: pass\n\n return float(self._result['f'])", "title": "" }, { "docid": "172109c8666a97e8975e5676bc4ea18a", "score": "0.55058724", "text": "def lstsq(A, b):\n q, r = qr(A)\n n = r.shape[1]\n x = solveUpperTriangular(getSubmatrix(r, 0, 0, n - 1, n - 1), \n np.dot(q.T, b))\n return x.ravel()", "title": "" }, { "docid": "89d91cbe0d75bc5ac1490a89b04ad6a6", "score": "0.5505418", "text": "def eval(self, x, y):\n return self.a*x + self.b*y + self.c", "title": "" }, { "docid": "7a3afda4e7a0a2df44a95f0fabcbf147", "score": "0.5503551", "text": "def func(x, a, b):\n y = np.dot(a, x) + b\n return -np.exp(-np.sum(np.square(y))/200)", "title": "" }, { "docid": "021f0d26bba58492693eda45051b5cd6", "score": "0.55031127", "text": "def model(x, beta):\n \n return polyval(x, beta)", "title": "" }, { "docid": "b7df9b7b47dd1b50ecc2bf7be888641b", "score": "0.5499983", "text": "def tridisolve(d, e, b, overwrite_b=True):\n n = len(b)\n # work vectors\n dw = d.copy()\n ew = e.copy()\n if overwrite_b:\n x = b\n else:\n x = b.copy()\n for k in range(1, n):\n # e^(k-1) = e(k-1) / d(k-1)\n # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\n t = ew[k - 1]\n ew[k - 1] = t / dw[k - 1]\n dw[k] = dw[k] - t * ew[k - 1]\n for k in range(1, n):\n x[k] = x[k] - ew[k - 1] * x[k - 1]\n x[n - 1] = x[n - 1] / dw[n - 1]\n for k in range(n - 2, -1, -1):\n x[k] = x[k] / dw[k] - ew[k] * x[k + 1]\n\n if not overwrite_b:\n return x", "title": "" }, { "docid": "91a28753b06f75493a2dfd3ec9c4e91f", "score": "0.54997075", "text": "def solve(\n geom_xx: geometry.Geometry,\n geom_yy: geometry.Geometry,\n geom_xy: Optional[geometry.Geometry] = None,\n fused_penalty: float = 1.0,\n scale_cost: Optional[Union[bool, float, str]] = False,\n a: Optional[jnp.ndarray] = None,\n b: Optional[jnp.ndarray] = None,\n loss: Union[Literal[\"sqeucl\", \"kl\"], quadratic_costs.GWLoss] = \"sqeucl\",\n tau_a: Optional[float] = 1.0,\n tau_b: Optional[float] = 1.0,\n gw_unbalanced_correction: bool = True,\n ranks: Union[int, Tuple[int, ...]] = -1,\n tolerances: Union[float, Tuple[float, ...]] = 1e-2,\n **kwargs: Any,\n) -> GWOutput:\n prob = quadratic_problem.QuadraticProblem(\n geom_xx,\n geom_yy,\n geom_xy=geom_xy,\n fused_penalty=fused_penalty,\n scale_cost=scale_cost,\n a=a,\n b=b,\n loss=loss,\n tau_a=tau_a,\n tau_b=tau_b,\n gw_unbalanced_correction=gw_unbalanced_correction,\n ranks=ranks,\n tolerances=tolerances\n )\n solver = GromovWasserstein(**kwargs)\n return solver(prob)", "title": "" }, { "docid": "9f0e0e7cd8162ff27a25a78873d9c7f8", "score": "0.54964954", "text": "def forward_linear(self, x):\n x = self.linear(x)\n\n if self.norm_before:\n x = self.bn1(x)\n\n return x", "title": "" }, { "docid": "91232ab916407fdba77acba8ef5bb2e8", "score": "0.54928946", "text": "def eval_b(theta):\n pass;", "title": "" }, { "docid": "f11cd4052ad5cbcb667360b589b8a787", "score": "0.54832274", "text": "def solveRegression(list_x, list_y):\n \n from numpy.linalg import lstsq\n \n X = np.matrix(list_x) \n X = np.row_stack((np.ones(X.shape[1]), X)).T # Add column of 1s\n y = np.matrix(list_y).T\n \n #p = (X.T * X).I * X.T * y\n (p, residuals, rank, s) = lstsq(X, y)\n \n print(\"y = %.2fx %s %.2f\" % (p[1,0], getSign(p[0,0]), abs(p[0,0])))", "title": "" }, { "docid": "e8f5737d0aea5ba859a5b091f5d756cf", "score": "0.5472299", "text": "def test_linprog(self):\n\n try:\n from scipy.optimize import linprog\n except ImportError:\n raise SkipTest('SciPy version >= 0.15.0 is required for linprog support!!')\n\n print '\\n------------------------------ SciPy linprog ------------------------------'\n results = linprog(self.f,\n A_eq=self.Aeq, b_eq=self.beq,\n A_ub=self.A, b_ub=self.b,\n bounds=zip(self.lb, self.ub),\n options={ 'maxiter': 100, 'disp': True })\n print results\n print 'x:', np.round(results.x, decimals=2)", "title": "" }, { "docid": "e366b5523e1af113b6e817a1cf0da9dc", "score": "0.54684454", "text": "def differential_equations(sum_of_squares):\n a, b = sy.symbols('a b')\n differential_a = sy.diff(sum_of_squares, a)\n differential_b = sy.diff(sum_of_squares, b)\n solution = list(sy.linsolve([differential_a, differential_b], (a, b)))\n return solution[0]", "title": "" }, { "docid": "4825b4c9f7b5a0103162b2b17272a240", "score": "0.5466415", "text": "def parabola(y, a, b, c):\n return (a * y**2) + (b * y) + c", "title": "" }, { "docid": "80c28dfd9875f9cae5471afb07f552cf", "score": "0.5457913", "text": "def solve(self,rhs,x=None,level=0):\n status = 0\n if x == None:\n x = scipy.zeros(len(rhs))\n for iter in range(self.param['nb_iter']):\n x = self.point.system.integrate(x,self.point.system.lambd)\n return x,status", "title": "" } ]
4c4319526216d75cad864ec588286e07
Initialize and archive a directory.
[ { "docid": "32f305018df589536570c77793b3707f", "score": "0.7136352", "text": "def _zip_directory():\n # compress directory\n shutil.make_archive(path, ZIP, path)\n\n # delete uncompressed directory\n if remove:\n shutil.rmtree(path)", "title": "" } ]
[ { "docid": "1962984ac69b2fae8fc56421507535c0", "score": "0.66676486", "text": "def _unzip_directory():\n if not os.path.exists(archive_path):\n os.mkdir(archive_path + \"/\")\n shutil.unpack_archive(\"%s.%s\" % (archive_path, ZIP), extract_dir=archive_path)", "title": "" }, { "docid": "83cb5ff6d1b5ebeb718bb899612172b1", "score": "0.66670364", "text": "def archive_directory(dir_path):\n archive_path = dir_path + \".tar.xz\"\n\n # Remove the previous archive if it exists\n if os.path.exists(archive_path):\n os.remove(archive_path)\n\n subprocess.check_call([\"tar\", \"-caf\", archive_path, \"-C\", dir_path, \".\"])\n\n return archive_path", "title": "" }, { "docid": "84f849f6296716cbb8695dc2d424a549", "score": "0.6579168", "text": "def create_archive(self, path):\n path = Path(path)\n print \"Building archive of %s\" % path.root()\n\n archive = Archive()\n meta_path, local_path = self.get_archive_paths(archive)\n\n # Build the archive\n zip_file = zipfile.ZipFile(\n local_path,\n 'w',\n zipfile.ZIP_DEFLATED,\n allowZip64=True)\n\n # Walk through the directory we want to create an archive for.\n for root, _, filenames in os.walk(path.root()):\n for name in filenames:\n # The absolute path of the file on this filesystem\n real_path = os.path.join(root, name)\n # The destination path to the file inside the zip\n relative_path = real_path[len(path.root()):]\n zip_file.write(real_path, relative_path)\n print 'Wrote %s' % relative_path\n\n # Extract a file list from the archive\n file_list = []\n for item in zip_file.infolist():\n file_list.append(item.filename)\n\n # Get the text of the description if it exists\n description = ''\n description_path = path.relative('description.md')\n if os.path.exists(description_path):\n description = open(description_path, 'r').read()\n print 'Using archive description %s' % description_path\n\n # Store information about the archive\n archive.create_time = datetime.datetime.utcnow()\n archive.description = description\n archive.file_list = file_list\n # Note that archive.size is the size of the archive\n # in bytes BEFORE adding the meta file\n archive.size = os.path.getsize(local_path)\n\n # Write archive metadata to both the repo and the\n # archive itself\n archive.to_file(meta_path)\n zip_file.write(meta_path, 'info.json')\n zip_file.close()\n\n self.manager.add(archive)\n print 'Archive %s of %s created.' % (archive.uid[:8], path.root())\n\n return archive.uid", "title": "" }, { "docid": "c32a7d44eb670bee46cfafbd67721709", "score": "0.6481662", "text": "def init_dir(*args):\n directory = os.path.join(*args)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n return directory", "title": "" }, { "docid": "bb4781e06fec046a7a014d7dfd5476c9", "score": "0.64800376", "text": "def _init_dir(_dir_name):\n\n if not os.path.exists(_dir_name):\n os.makedirs(_dir_name)", "title": "" }, { "docid": "c6cf67e47c50753eb4e912d18ca56904", "score": "0.6432895", "text": "def _create_archive_directory(files, archive_name):\n log.info('_create_archive_directory with name %s' % archive_name)\n archive_file_path = os.path.join(_archive_dir, archive_name)\n\n try:\n if not os.path.exists(archive_file_path):\n os.makedirs(archive_file_path)\n except OSError:\n log.error('Could not create the initial archive directory')\n return None\n\n for f in files:\n archive_file_dir = os.path.join(_archive_dir, archive_name, files[f]['archive_path'])\n archive_file = os.path.abspath(os.path.join(archive_file_dir, f))\n\n try:\n if not os.path.exists(archive_file_dir):\n os.makedirs(archive_file_dir)\n except OSError:\n log.error('Could not create archive subdirectory')\n return None\n try:\n with open(os.path.abspath(archive_file), 'w') as tmp_file:\n contents = cache_utils.get(files[f]['key'])\n tmp_file.write(contents)\n except OSError:\n log.error('Could not write archive file into directory')\n return None\n\n return archive_file_path", "title": "" }, { "docid": "6324767e0825c01e61f73ab382e73310", "score": "0.640831", "text": "def zip_directory(self, directory, skip_if_empty = False):\n self.dbg_out(\"creating archive for directory {0}\".format(directory))\n try:\n if skip_if_empty:\n if not os.path.exists(directory) or not os.listdir(directory):\n self.dbg_out(\"Empty directory. Skipping archive creation for {0}\".format(directory))\n return\n\n zip_file_path = os.path.join(directory, self.ARCHIVE_NAME)\n zf = zipfile.ZipFile(zip_file_path, \"w\")\n abs_src = os.path.abspath(directory)\n for root, dirs, files in os.walk(directory):\n for filename in files:\n # Avoid zipping previous archive and hash file and binary pyc files\n if not self.is_ignored(filename):\n absname = os.path.abspath(os.path.join(root, filename))\n arcname = absname[len(abs_src) + 1:]\n self.dbg_out('zipping %s as %s' % (os.path.join(root, filename),\n arcname))\n zf.write(absname, arcname)\n zf.close()\n os.chmod(zip_file_path, 0o755)\n except Exception, err:\n raise KeeperException(\"Can not create zip archive of \"\n \"directory {0} : {1}\".format(directory, str(err)))", "title": "" }, { "docid": "765fe78dac82b344f851f121d1215415", "score": "0.6226251", "text": "def archive(self):\n if not self.tarball:\n return\n os.chdir(os.path.join(self.directory, '..'))\n os.system('tar cfz {0} {1} 2>/dev/null'.format(self.tarball, self.directory))", "title": "" }, { "docid": "e018cf6c0e7567a43cf4d02d59637370", "score": "0.62252855", "text": "def __init__(self, path):\n self.path = path\n _make_dir(path)", "title": "" }, { "docid": "d5241a976f2438c61611e286b9a15f64", "score": "0.62179196", "text": "def update_directory_archive(self, directory):\n skip_empty_directory = True\n\n cur_hash = self.count_hash_sum(directory)\n saved_hash = self.read_hash_sum(directory)\n\n directory_archive_name = os.path.join(directory, self.ARCHIVE_NAME)\n\n if cur_hash != saved_hash:\n if not self.nozip:\n self.zip_directory(directory, skip_empty_directory)\n # Skip generation of .hash file is directory is empty\n if (skip_empty_directory and (not os.path.exists(directory) or not os.listdir(directory))):\n self.dbg_out(\"Empty directory. Skipping generation of hash file for {0}\".format(directory))\n else:\n self.write_hash_sum(directory, cur_hash)\n pass\n elif not os.path.isfile(directory_archive_name):\n self.zip_directory(directory, skip_empty_directory)", "title": "" }, { "docid": "cbce48a6658430839e7fbac221ea88e4", "score": "0.6217032", "text": "def _init_dir(self):\n for dir in [self.outdir_poe, self.outdir_res]:\n if not os.path.isdir(dir):\n os.makedirs(dir)", "title": "" }, { "docid": "8bc602c628e6f084d92f205e928795b7", "score": "0.6192985", "text": "def as_directory(self):\n return create_directory(self.as_text())", "title": "" }, { "docid": "b60d7d9700fbb827af8e9a19dd465df6", "score": "0.6152353", "text": "def init_dir(dir_name):\r\n if dir_name is not None:\r\n if not os.path.exists(dir_name):\r\n os.makedirs(dir_name)", "title": "" }, { "docid": "7e6389d31db743c615e7c8de9a4173a7", "score": "0.6146547", "text": "def _store_dir(self, directory_path, storage_fname=None, storage_subdir='', suffix=None, arcname=None):\n ext = 'tar.gz' if not suffix else suffix\n filename = storage_fname if storage_fname else self._get_unique_filename(ext)\n object_name = os.path.join(storage_subdir, filename)\n object_args = {\n 'ContentType': 'application/x-gzip',\n 'ContentEncoding': 'gzip'\n }\n\n with tempfile.TemporaryDirectory() as tmpdir:\n archive_path = os.path.join(tmpdir, filename)\n self.compress(archive_path, directory_path, arcname)\n self.upload(object_name, archive_path, ExtraArgs=object_args)\n\n if self.cache_root:\n os.makedirs(self.cache_root, exist_ok=True)\n cached_fp = os.path.join(self.cache_root, filename)\n shutil.copy(archive_path, cached_fp)\n\n self.logger.info('Stored S3: {} -> {}'.format(directory_path, object_name))\n if self.shared_bucket:\n # Return Object Key\n return os.path.join(self.location, object_name)\n else:\n # Return URL\n return self.url(object_name)", "title": "" }, { "docid": "0d29d26a5b52661e0ee9831937d7d55a", "score": "0.60682195", "text": "def __init__(self, archive_path):\n self.path = Path(archive_path)", "title": "" }, { "docid": "e4f0d54bcc713792c31098352749e5f1", "score": "0.605277", "text": "def pack(self):\n\n dirname = self._get_export_root_directory()\n logging.debug(\"pack(): dirname: %s\" % dirname)\n\n # --- Add files to export, if necessary\n self.add_files_to_export(dirname)\n\n zipname = self._get_archive_name()\n logging.debug(\"pack(): zipname: %s\" % zipname)\n\n try:\n zipdir(dirname, zipname)\n except IOError:\n logging.error(u\"pack(): Cannot create archive '%s' in directory '%s'\" \\\n % (zipname, dirname))\n raise\n except Exception, e:\n message = u\"pack(): Exception during archive creation. \"\n message += u'Exception ``%s`` raised: %s ' \\\n % (e.__class__.__name__, e.message)\n logging.error(message, exc_info=True)\n raise\n\n self._archive_name = os.path.basename(zipname)\n logging.debug(\"pack(): archive_name: %s\" % self._archive_name)\n\n self._local_checksum = TreeHash(dirname).hash()\n logging.debug(\"pack(): local_checksum: %s\" % self._local_checksum)\n\n return [zipname]", "title": "" }, { "docid": "f93fae4ddfe2b1d67e84fc483e9feb16", "score": "0.604868", "text": "def create_archive(input_dir, zip_file):\n zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)\n def addDir(dir):\n for f in os.listdir(dir):\n full_path = os.path.join(dir, f)\n if os.path.isdir(full_path):\n addDir(full_path)\n else:\n zf.write(full_path, os.path.relpath(full_path, \\\n os.path.join(input_dir, os.pardir)))\n addDir(input_dir)\n zf.close()", "title": "" }, { "docid": "dd09cfcb1625f27957c2886d5239fee5", "score": "0.60439956", "text": "def _archive_dir(self, identifier: str) -> str:\n return os.path.join(self.basedir, identifier)", "title": "" }, { "docid": "be0e71a1029cbbdd658f6a206e5f3580", "score": "0.6031243", "text": "def update_directory_archives(self):\n # archive stacks\n self.dbg_out(\"Updating archives for stack dirs at {0}...\".format(self.stacks_root))\n valid_stacks = self.list_stacks(self.stacks_root)\n self.dbg_out(\"Stacks: {0}\".format(pprint.pformat(valid_stacks)))\n # Iterate over stack directories\n self._iter_update_directory_archive(valid_stacks)\n\n # archive common services\n common_services_root = os.path.join(self.resources_dir, self.COMMON_SERVICES_DIR)\n self.dbg_out(\"Updating archives for common services dirs at {0}...\".format(common_services_root))\n valid_common_services = self.list_common_services(common_services_root)\n self.dbg_out(\"Common Services: {0}\".format(pprint.pformat(valid_common_services)))\n # Iterate over common services directories\n self._iter_update_directory_archive(valid_common_services)\n\n # archive extensions\n extensions_root = os.path.join(self.resources_dir, self.EXTENSIONS_DIR)\n self.dbg_out(\"Updating archives for extensions dirs at {0}...\".format(extensions_root))\n valid_extensions = self.list_extensions(extensions_root)\n self.dbg_out(\"Extensions: {0}\".format(pprint.pformat(valid_extensions)))\n # Iterate over extension directories\n self._iter_update_directory_archive(valid_extensions)\n\n # stack hooks\n self._update_resources_subdir_archive(self.STACK_HOOKS_DIR)\n\n # custom actions\n self._update_resources_subdir_archive(self.CUSTOM_ACTIONS_DIR)\n\n # agent host scripts\n self._update_resources_subdir_archive(self.HOST_SCRIPTS_DIR)\n\n # custom service dashboards\n self._update_resources_subdir_archive(self.DASHBOARDS_DIR)", "title": "" }, { "docid": "d0615c0538ff50ad91087aa8d22ce979", "score": "0.6030138", "text": "def zip_directory(zf, prefix, dn):\n\n for dirname, dirs, files in os.walk(dn):\n for fn in files:\n fn = os.path.join(dirname, fn)\n archive_fn = os.path.join(prefix, os.path.relpath(fn, dn))\n zf.write(fn, archive_fn)", "title": "" }, { "docid": "5be682cc53883e9761a0806d6ab73ee2", "score": "0.601903", "text": "def init_directory(directory_path):\n if directory_path == \"\":\n directory_path = \".\"\n if not os.path.isdir(directory_path):\n os.makedirs(directory_path)\n\n return directory_path", "title": "" }, { "docid": "2a372c4bfdf9cfbc345d17516c167ede", "score": "0.6010535", "text": "def __init__(self, directory):\r\n\t\tself.directory = directory", "title": "" }, { "docid": "7dbdee3ffceb6af2c37ffddbe04b9d20", "score": "0.59845054", "text": "def create_downloadable_archive(self):\n _, zip_filename = os.path.split(self.folder)\n zip_filename = zip_filename + \".zip\"\n return os.path.join(\n self.folder,\n zip_dir(self.folder, os.path.join(self.folder, zip_filename)),\n )", "title": "" }, { "docid": "47a0b1860996902b698f9fcddccca453", "score": "0.5970733", "text": "def _create_directories(self, epub_dir: str):\n self.EPUB_DIR = epub_dir or tempfile.mkdtemp()\n self.OEBPS_DIR = os.path.join(self.EPUB_DIR, 'OEBPS')\n self.META_INF_DIR = os.path.join(self.EPUB_DIR, 'META-INF')\n self.IMAGE_DIR = os.path.join(self.OEBPS_DIR, 'images')\n self.STYLE_DIR = os.path.join(self.OEBPS_DIR, 'styles')", "title": "" }, { "docid": "65c52882c598244fdb65c71212e33962", "score": "0.592226", "text": "def _write_archive(self, path, directory_list, exclude):\n # Algorithm is to walk the directory tree for each directory\n # passed in by the user, and add its files to the archive. Since we\n # may be run from any directory, the path calculations all need to be\n # relative to the project base directory absolute path; however, inside the\n # zipfile, we want to name the files relative to the name of the project\n # directory so that the archive is all contained under a root named for\n # the project base dir. In this code, absolute paths are prefaced with\n # 'abs' to make that clear. The final filename in the archive is resolved\n # in the call to os.path.relpath.\n with zipfile.ZipFile(path, 'w') as arczip:\n for d in directory_list:\n absd = os.path.join(self.project_directory, d)\n if self.verbose:\n print 'Archiving directory %s' % d\n for root, dirs, files in os.walk(absd):\n if exclude:\n for ex in exclude:\n if ex in dirs:\n dirs.remove(ex)\n for f in files:\n absf = os.path.join(root, f)\n absr = os.path.relpath(absf,\n os.path.dirname(self.project_directory))\n if self.verbose:\n print '--> Archiving \"%s\" as \"%s\"' % (absf, absr)\n arczip.write(absf, absr)", "title": "" }, { "docid": "b5ca096fd9387a056ffc6222b744a374", "score": "0.5919111", "text": "def CompressDirectory(self, irrelevant, dir_path, dir_contents):\n \n # construct the queue of files to be added that this method will use\n # it seems that dir_contents is given in reverse alphabetical order,\n # so put them in alphabetical order by inserting to front of the list\n dir_contents.sort()\n zip_queue = []\n if dir_path[len(dir_path) - 1:] == os.sep:\n for filename in dir_contents:\n zip_queue.append(''.join([dir_path, filename]))\n else:\n for filename in dir_contents:\n zip_queue.append(''.join([dir_path, os.sep, filename]))\n compress_bit = zipfile.ZIP_DEFLATED\n if not self.compress:\n compress_bit = zipfile.ZIP_STORED\n\n # zip all files in this directory, adding to existing archives and creating\n # as necessary\n while len(zip_queue) > 0:\n target_file = zip_queue[0]\n if os.path.isfile(target_file):\n self.AddFileToArchive(target_file, compress_bit)\n \n # see if adding the new file made our archive too large\n if not self.ArchiveIsValid():\n \n # IF fixing fails, the last added file was to large, skip it\n # ELSE the current archive filled normally, make a new one and try\n # adding the file again\n if not self.FixArchive('SIZE'):\n zip_queue.pop(0)\n else:\n self.current_archive = '%i.zip' % (\n int(self.current_archive[\n 0:self.current_archive.rfind('.zip')]) + 1)\n else:\n\n # if this the first file in the archive, write an index record\n self.WriteIndexRecord()\n zip_queue.pop(0)\n else:\n zip_queue.pop(0)", "title": "" }, { "docid": "daf1bda5fa26918a4afbc8d6a5b3e168", "score": "0.5837809", "text": "def __init__(self, datadir):\n self.datadir = remkdir(datadir)", "title": "" }, { "docid": "d303fc2f709841e581bf13afc5357a24", "score": "0.583613", "text": "def __init__(self, download_dir: str):\n self.dir = os.path.join(download_dir, STORAGE_DIR, \"\")\n Path(self.dir).mkdir(parents=True, exist_ok=True)\n\n download_dir_full_path = os.path.join(self.dir, DOWNLOAD_DIR_FILENAME)\n print(download_dir_full_path)\n if os.path.exists(download_dir_full_path):\n with open(download_dir_full_path, \"r\") as f:\n self.download_dir = json.load(f)\n else:\n self.download_dir = []", "title": "" }, { "docid": "3089e2bd9ca2f810e61f5b6027df2b2f", "score": "0.5817338", "text": "def initFileSystem():\n\n return newDir('/', None)", "title": "" }, { "docid": "a3b864f4f4b434d8459c5c1bbcb74475", "score": "0.5815459", "text": "def addArchive(self, path):\n\t\tpass", "title": "" }, { "docid": "26703ae37eb6317dda2e5cd2f490d8e9", "score": "0.58023113", "text": "def _zipdir(path: str, zf: zipfile.ZipFile) -> None:\n lastdir = os.getcwd()\n os.chdir(path)\n for root, dirs, files in os.walk('.'):\n for file in files:\n zf.write(os.path.join(root, file))\n os.chdir(lastdir)", "title": "" }, { "docid": "835430c3cd556b631a0b7d8fd3e29cce", "score": "0.5800534", "text": "def create_archive_folder(archive_folder):\n if not archive_folder.is_dir():\n archive_folder.mkdir(parents=True)", "title": "" }, { "docid": "5807c8681ee22abd1e92e05af3d33f30", "score": "0.5785993", "text": "def folder_make():\n\n current_directory = os.getcwd()\n # print(current_directory)\n folder1 = os.path.join(current_directory, r'archive')\n folder2 = os.path.join(current_directory, r'store')\n if not os.path.exists(folder1):\n print(\"path doesn't exist. trying to make\")\n os.makedirs(folder1)\n if not os.path.exists(folder2):\n print(\"path doesn't exist. trying to make\")\n os.makedirs(folder2)", "title": "" }, { "docid": "3683926dc0e89cfc06cb23e530115376", "score": "0.57697004", "text": "def OnDirectory(self,event):\n debug(\"channel_archiver.directory = %r\" % str(self.Directory.Value))\n channel_archiver.directory = str(self.Directory.Value)", "title": "" }, { "docid": "d1b73767411901ba41f9e18ae7bea76c", "score": "0.57630974", "text": "def _create_archive(self):\n filename = getattr(settings, 'surveyArchive', '%Y-%m-%d--%H-%M-%S')\n fmt = getattr(settings, '.zip', 'bz2')\n absolute_path = path.join(\n getattr(settings, 'SurveyRepo', ''),\n '%s.tar.%s' % (datetime.today().strftime(filename), fmt)\n )\n return TarFile.open(absolute_path, 'w:%s' % fmt)", "title": "" }, { "docid": "2c3824fe6b4c87fec3a66c4e473e148a", "score": "0.5754039", "text": "def makedirs(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "336404bc61c263f523f9f48c64a68449", "score": "0.57531875", "text": "def __init__(self, directory=None):\n dict.__init__(self)\n self.setdefault('directory', directory)", "title": "" }, { "docid": "b34adca7baa3e62bf9cc290e933c3d72", "score": "0.57411927", "text": "def __init__(self, output_dir):\n self.output_dir = output_dir\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)", "title": "" }, { "docid": "89b60840c4dd5c268a3cec70ae9bb670", "score": "0.5736653", "text": "def __init__(self, input_filename, dir):\n\t\tself.input_filename = input_filename\n\t\tself.dir = dir\n\t\tself.zipped_files = []", "title": "" }, { "docid": "0883e7ebf476db7ce566195f2a883160", "score": "0.5704039", "text": "def archive_dir(self) -> str:\n return full_archive_path(self.configuration,\n self.cli_config, self.lineagename)", "title": "" }, { "docid": "5c63e40c76caf2a4ef07098bea374843", "score": "0.5690244", "text": "def subdir(self, subdir: str, root: Optional[str] = \"/\"):\n root = os.path.abspath(root)\n\n class _Context:\n @staticmethod\n def add(path: str, arcname: Optional[str] = None):\n path = os.path.abspath(path)\n arcname = arcname or os.path.join(subdir, os.path.relpath(path, root))\n\n self._lock.acquire()\n self.tar.add(path, arcname=arcname)\n self._lock.release()\n\n yield _Context()", "title": "" }, { "docid": "b19f42b39712bf1fb651b4a84d153567", "score": "0.5680039", "text": "def make_archive(self, dirlist, archive_path, copyto=None, exclude=None):\n arcabs = os.path.join(self.output_directory, archive_path)\n if os.path.exists(arcabs):\n os.remove(arcabs)\n\n if self.verbose:\n print 'Creating archive at: %s' % arcabs\n\n now = datetime.datetime.now()\n now_string = now.strftime('%Y_%m_%d_%H%M.%S.%f')\n tmp = arcabs + str(os.getpid()) + now_string\n\n self._write_archive(tmp, dirlist, exclude)\n\n os.rename(tmp, arcabs)\n if self.verbose:\n print 'Archive complete at: %s' % arcabs\n\n if copyto:\n if self.verbose:\n print 'Copying archive to: %s' % copyto\n shutil.copy2(arcabs, copyto)", "title": "" }, { "docid": "a823e80f5749bfada423ef160cb5d3c1", "score": "0.5679212", "text": "def _create_archive(self,a_archive_filename,a_dir_data):\n \n arch_type = self._conf.get('AutomaticEmailingInformation','archiveType','tar')\n \n arch_name = None\n \n self._log.info(\"*************************************************************\")\n \n if arch_type == 'zip':\n self._log.info(\"Create a zip archive file\") \n arch_name = \"%s.zip\"%(a_archive_filename)\n z = zipfile.ZipFile(arch_name,\"w\",zipfile.ZIP_DEFLATED)\n for f_name in ctbto.common.utils.dirwalk(a_dir_data):\n z.write(f_name, arcname=os.path.basename(f_name))\n z.close()\n elif arch_type == 'tar' or arch_type == 'tar.gz':\n self._log.info(\"Create a tar.gz archive file\")\n arch_name = \"%s.tar.gz\"%(a_archive_filename)\n t = tarfile.open(name = arch_name, mode = 'w:gz')\n t.add(a_dir_data,arcname=os.path.basename(a_dir_data))\n t.close()\n else:\n self._log.info(\"Unknown archive type %s. Create a tar.gz archive file.\"%(arch_type))\n arch_name = \"%s.tar.gz\"%(a_archive_filename)\n t = tarfile.open(name = arch_name, mode = 'w:gz')\n t.add(a_dir_data,arcname=os.path.basename(a_dir_data))\n t.close()\n \n self._log.info(\"*************************************************************\\n\")\n \n return arch_name", "title": "" }, { "docid": "9930b4dd66c46706e74251963c65e000", "score": "0.5651278", "text": "def archive_dir(dirname):\n fname_archive = dirname.with_suffix('.tgz')\n system = platform.system()\n if system == 'Linux':\n # (c)reate archive as a (f)ile, use (z)ip compression\n cmd = ['tar', 'cfz', str(fname_archive), str(dirname)]\n run_cmd(cmd, expected_returncode=0)\n elif system == 'Windows':\n # Using 7z to mimic 'tar cfz' as per this post:\n # https://superuser.com/questions/244703/how-can-i-run-the-tar-czf-command-in-windows\n cmd = f'7z -ttar a dummy {dirname}\\* -so | 7z -si -tgzip a {fname_archive}'\n run_cmd(cmd, expected_returncode=0, shell=True)\n else:\n raise BackupError('Do not recognize system: %s' % system)\n return fname_archive", "title": "" }, { "docid": "23e27fa4c6d6cfe8c52fb03d64e83ac1", "score": "0.56442946", "text": "def ZipDirectory(dirz, zipf):\n with zipfile.ZipFile(zipf, 'w',zipfile.ZIP_DEFLATED) as myzip:\n for dirpath,_,filenames in os.walk(dirz):\n for f in filenames:\n myzip.write(os.path.join(dirpath,f))", "title": "" }, { "docid": "06a7107455bb43555fc0f6a537d0ae9c", "score": "0.56260383", "text": "def mk_dir(directory):\n # Beware race-condition???\n if not os.path.exists(directory):\n os.makedirs(directory)", "title": "" }, { "docid": "08cd88f815614615ab2bf2bb45f5b669", "score": "0.56214446", "text": "def __init__(self, directory=None):\n if directory == None:\n self._directory = os.getcwd()\n else:\n self._directory = directory", "title": "" }, { "docid": "518a1db186de39907f2f17fb378b75d7", "score": "0.5616188", "text": "def init_path(self):\n logger.debug(f\"Creating directory: {self._path}\")\n try:\n os.makedirs(self._path, mode=0o700, exist_ok=True)\n except OSError as e:\n # todo: log e.errno - 13 means bad permissions\n # 17: file exists\n msg = f\"Cannot create directory {self._path}. Please check directory permissions.\"\n raise AE5ConfigError(msg)", "title": "" }, { "docid": "54ce0b1511159077783798f2d3407f73", "score": "0.5614489", "text": "def zip_dir(self, dir_path):\n out = io.BytesIO()\n zip = zipfile.ZipFile(out, \"w\", compression=zipfile.ZIP_DEFLATED)\n\n # files to skip\n skip_re = re.compile(\"\\.pyc$\") # no compiled python files pls\n for root, dirs, files in os.walk(dir_path):\n # add dir itself (needed for empty dirs\n zip.write(os.path.join(root, \".\"))\n # add files\n for file in files:\n file_path = os.path.join(root, file)\n in_zip_path = file_path.replace(dir_path, \"\", 1).lstrip(\"\\\\/\")\n print(\"Adding file to lambda zip archive: '{}'\".format(in_zip_path))\n if skip_re.search(in_zip_path): # skip this file?\n continue\n zip.write(file_path, in_zip_path)\n zip.close()\n if False:\n # debug\n zip.printdir()\n return out", "title": "" }, { "docid": "6f5bb97b3129942892602b1ad42c6cbe", "score": "0.56113756", "text": "def __init__(self, directory_name: str, create: bool = False) -> None:\n self.dir = Path(directory_name)\n self.old = None\n if create and not self.dir.exists():\n self.dir.mkdir(parents=True)", "title": "" }, { "docid": "9910723c8a005b6d2f694579e87dd0d1", "score": "0.56086147", "text": "def compress_harvest_dir(self):\n shutil.rmtree(\n os.path.join(self.data_dir, str(self.source.id)),\n ignore_errors=True\n )\n utils.ZipHelper.compress_dir(self.harvest_dir, self.data_dir, str(self.source.id))\n shutil.rmtree(self.harvest_dir, ignore_errors=True)", "title": "" }, { "docid": "246fdf774e224611c870d60d3180d782", "score": "0.56074816", "text": "def test_createCombineArchiveFromDirectory():\n omexPath = tempfile.NamedTemporaryFile(suffix=\"omex\")\n directory = os.path.join(TESTDATA_DIR, \"utils\", \"omex_from_zip\")\n omex.combineArchiveFromDirectory(omexPath=omexPath.name, directory=directory)\n assert omexPath is not None\n # TODO: additional checks via extracting information from the archive again", "title": "" }, { "docid": "4e20d6d36e4695ea300b6c5c4f89be56", "score": "0.5596828", "text": "def setUp(self):\n directory = str(uuid4())\n os.mkdir(directory)\n self.directory = os.path.abspath(directory)", "title": "" }, { "docid": "d1ef6809c4e29e9c165d9189d3ff0d05", "score": "0.55940986", "text": "def create_directory(self):\n #print \"DIRECTORY: %s\" % (self.directory,)\n try:\n os.mkdir(self.directory)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "title": "" }, { "docid": "d4f7148a820675edd8c4a32b339c1ccc", "score": "0.5585524", "text": "def do_zip(self):\n tmpdir = tempfile.mkdtemp()\n inner_zip_file_name = self.package.zipname\n try:\n inner_zip_name = os.path.join(tmpdir, inner_zip_file_name)\n inner_zip = ZipFile(inner_zip_name, \"w\")\n self.add_dir_to_zip(inner_zip, Path(self.output_dir))\n inner_zip.close()\n\n outer_zip = ZipFile(self.file_obj, \"w\")\n self.add_dir_to_zip(outer_zip, Path(self.output_dir))\n self.add_dir_to_zip(outer_zip, Path(tmpdir))\n outer_zip.close()\n\n finally:\n shutil.rmtree(tmpdir)", "title": "" }, { "docid": "5449ca5604e18100d9fcaecf2a2f4b32", "score": "0.55843186", "text": "def __init__(self, dir):\n self.dir = \"../\"+dir", "title": "" }, { "docid": "72a5bdb7bdd34e3178807359d5730ccd", "score": "0.5568852", "text": "def __init__(self, directory):\n # Make absolute\n directory = os.path.normpath(os.path.abspath(directory))\n\n self.nonce_dir = os.path.join(directory, 'nonces')\n\n self.association_dir = os.path.join(directory, 'associations')\n\n # Temp dir must be on the same filesystem as the assciations\n # directory\n self.temp_dir = os.path.join(directory, 'temp')\n\n self.max_nonce_age = 6 * 60 * 60 # Six hours, in seconds\n\n self._setup()", "title": "" }, { "docid": "645e6dba15df3bd5fe1bb643d78715c1", "score": "0.5557982", "text": "def init(dir_):\n\n try:\n os.makedirs(TODO_DIR)\n except OSError as exc:\n if os.path.isdir(path):\n pass\n else:\n raise\n\n open(TODO_FILE, 'w')\n print('Initialized file and directory for todo...')\n\n os.system('git init')", "title": "" }, { "docid": "138974c043f3e79a476b5da81ab5f74e", "score": "0.55555063", "text": "def compress_harvest_dir(self):\n shutil.rmtree(\n os.path.join(get_current_data_dir(), str(self.source.id)),\n ignore_errors=True\n )\n utils.ZipHelper.compress_dir(\n self.harvest_dir, get_current_data_dir(),\n str(self.source.id)\n )\n shutil.rmtree(self.harvest_dir, ignore_errors=True)", "title": "" }, { "docid": "98d512f985387e21fadcc5781768cc64", "score": "0.5539604", "text": "def _makedir(self):\n try:\n os.mkdir(f'../{self.folder}')\n except FileExistsError:\n pass", "title": "" }, { "docid": "b6e27258879803eaa631b2623b348b91", "score": "0.55344605", "text": "def createDir(self):\n if not os.path.exists(self.path):\n os.mkdir(self.path)\n return self", "title": "" }, { "docid": "38966cf2ae356a2ccd397b3df9aa6b14", "score": "0.5532382", "text": "def init_dirs(self):\n utils.make_dir(self.exons_dir)\n utils.make_dir(self.const_exons_dir)\n utils.make_dir(self.introns_dir)\n utils.make_dir(self.utrs_dir)", "title": "" }, { "docid": "954af1c96ec95ec3e343c3567e7b238f", "score": "0.55320555", "text": "def create_analysis_archive(config: SettingConfig) -> None:\n zip_path = os.path.join(config.archives_path, f\"{config.identifier}_{time.time()}.zip\")\n with ZipFile(file=zip_path, mode='w') as zipObj:\n # Iterate over all the files in directory\n for folderName, subfolders, filenames in os.walk(config.merged_reports_root):\n for filename in filenames:\n # create complete filepath of file in directory\n filepath = os.path.join(folderName, filename)\n # Add file to zip\n zipObj.write(filepath)\n zipObj.close()", "title": "" }, { "docid": "7c87dc067d93404cf4e1717a1143cf81", "score": "0.5526102", "text": "def make_archive(file_list, archive, root):\n a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)\n for f in file_list:\n print \"archiving file %s\" % f\n a.write(f, os.path.relpath(f, root))\n a.close()", "title": "" }, { "docid": "aa408f39406fe0d69281a7c8adba6497", "score": "0.5518229", "text": "def perform_housekeeping(self):\n self.update_directory_archives()\n # probably, later we will need some additional operations", "title": "" }, { "docid": "ddc7413cecee50ea68eb85e128aa62b7", "score": "0.55181164", "text": "def _create_dir(self, directory):\n magic.log.info(_(u\"create remote directory %s...\") % directory)\n res = self.mkdir_RECURSIVE(directory, \"0700\")\n magic.log.info(_(u\"returns %s\"), res)\n return res", "title": "" }, { "docid": "98539a1b9a304da1002b61168ef68322", "score": "0.5512162", "text": "def compress_harvest_dir(self):\n shutil.rmtree(\n os.path.join(self.data_dir, str(self.id)),\n ignore_errors=True\n )\n utils.ZipHelper.compress_dir(self.harvest_dir, self.data_dir, str(self.id))\n shutil.rmtree(self.harvest_dir, ignore_errors=True)\n return os.path.join(self.data_dir, str(self.id))", "title": "" }, { "docid": "64738c86dc4a2b769bb6ce64fa70dc68", "score": "0.5506567", "text": "def _init_file_system(self):\n\n self.out_dir = easy_directory(self.params.output.out_dir)\n fm = self.initialise_file_manager(rootdir=self.params.output.out_dir)\n # Top level folders\n for d in ['logs','model','structures','results','analysis']:\n fm.add_dir(dir_name=d, dir_tag=d)\n # Analysis sub-folders\n for d in ['table_ones']:\n fm.add_dir(dir_name=d, dir_tag=d, top_dir_tag='results')", "title": "" }, { "docid": "584f387c02f069e71b5f0b75c6aafb1e", "score": "0.549923", "text": "def make_dir(dir_) -> None:\n if not Path(dir_).exists():\n Path(dir_).mkdir()\n return None", "title": "" }, { "docid": "4a812e089cbfde30f2547b5ed8a77d3c", "score": "0.549726", "text": "def make_dir(self, sub_path):\n path = self.resolve_path(sub_path)\n makedirs(path, exist_ok=True)\n return path", "title": "" }, { "docid": "1f21816430cb5e093a0f6e65a49bf295", "score": "0.5495417", "text": "def make_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "title": "" }, { "docid": "e470f3dc5206932d6c26b7b11f774f14", "score": "0.549307", "text": "def __init__(self, destDir):\n\n self.destDir = destDir", "title": "" }, { "docid": "393a153782ce1055597ee6664453487d", "score": "0.54909146", "text": "def _write_dir(self, dir_path, zipfile):\n\n real_path = self._real_path(dir_path)\n\n for dirpath, _, filenames in os.walk(real_path):\n for filename in filenames:\n relpath = self._relative_path(os.path.join(dirpath, filename))\n self._write_file(relpath, zipfile)", "title": "" }, { "docid": "a27fab3286448984f2a6751aad8bfe74", "score": "0.5488341", "text": "def directory(self, value):\n self._directory = value", "title": "" }, { "docid": "e3b13cd418a53f520b77172eafa75bd6", "score": "0.54839647", "text": "def initialize(self):\n # create folder for site\n # create_directory(DIR_FOR_CRAWLER_FILES)\n\n # create folder for storing crawled web pages\n # create_directory(DIR_FOR_DATA_FILES)", "title": "" }, { "docid": "fd3b281a75922b7ba284908276e37a40", "score": "0.5480702", "text": "def SetDirectory(self, dir):\n self._directory = dir", "title": "" }, { "docid": "33a96ea41669cc815eae2048a00bcc1f", "score": "0.5476934", "text": "def make_archive(self, path):\n zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED)\n\n for dirpath, dirnames, filenames in os.walk(self.path):\n relative_path = dirpath[len(self.path) + 1:]\n if relative_path and not self._ignore(relative_path):\n zf.write(dirpath, relative_path)\n for name in filenames:\n archive_name = os.path.join(relative_path, name)\n if not self._ignore(archive_name):\n real_path = os.path.join(dirpath, name)\n self._check_type(real_path)\n if os.path.islink(real_path):\n self._check_link(real_path)\n self._write_symlink(\n zf, os.readlink(real_path), archive_name)\n else:\n zf.write(real_path, archive_name)\n zf.close()", "title": "" }, { "docid": "95ad1891b85505b2e2237ec95f00ef91", "score": "0.54616344", "text": "async def archiver_main(searchdir: Union[os.PathLike, str], inddir: Union[os.PathLike, str] = '.ind', archdir: Union[os.PathLike, str] = '.compressed'):\n archive_path = None\n async with Stopwatch('Archiving index...'):\n with MultiProcessArchiver(inddir, Path(searchdir).stem, archdir) as archiver:\n archive_path = archiver.archive()\n print()\n return archive_path", "title": "" }, { "docid": "cf77f06e0dc9a60d0a282d2cead20e75", "score": "0.54571444", "text": "def Mkdir(d):\n try:\n os.mkdir(d)\n except:\n pass", "title": "" }, { "docid": "b2dabadb3fa8e2b300ef2b71c0f418bd", "score": "0.5455798", "text": "def ensureDir(dir):\n\tif not exists(dir):\n\t\tensureParent(dir)\n\t\tmakeDir(dir)", "title": "" }, { "docid": "f4a37eb23e6c908a23f7c78850af31e7", "score": "0.54509735", "text": "def zipdir(dirpath: str):\r\n\r\n if not isinstance(dirpath, str):\r\n raise ValueError(f'expected dirpath is str, got {dirpath}')\r\n if not os.path.exists(dirpath):\r\n raise FileNotFoundError(f'cannot zip {dirpath} (doesnt exist)')\r\n if os.path.exists(dirpath + '.zip'):\r\n raise FileExistsError(f'cannot zip {dirpath} (zip already exists)')\r\n\r\n cwd = os.getcwd()\r\n shutil.make_archive(dirpath, 'zip', dirpath)\r\n os.chdir(cwd)\r\n shutil.rmtree(dirpath)\r\n os.chdir(cwd)", "title": "" }, { "docid": "67054b7c8e423787497117a014ff45f7", "score": "0.5450478", "text": "def __init__(self, save_folder):\n # Make folder if it does not exist.\n if not gfile.IsDirectory(save_folder):\n gfile.MkDir(save_folder)", "title": "" }, { "docid": "460ea9143aa111e7b96c700d5f8d7292", "score": "0.5447905", "text": "def archive(self, root=None, compression='gz', fast=True, keep_originals=False) -> Optional[str]:\n root = Path(root) if root else self.inddir\n if keep_originals:\n self.tmpdir.mkdir(parents=True, exist_ok=True)\n shadowroot = self.tmpdir / root.name\n copy2(root, shadowroot)\n root = shadowroot\n\n if fast:\n self._default_pool().map(partial(PatriciaArchiver._archive, compression=compression),\n (item for item in Path(root).iterdir() if item.is_dir()))\n if archname := self._archive(root):\n archive_path = Path(self.archdir) / (\n self.name + \"\".join(Path(archname).suffixes) if self.name else archname)\n Path(self.archdir).mkdir(parents=True, exist_ok=True)\n os.renames(archname, archive_path)\n return archive_path", "title": "" }, { "docid": "338f3a466869eab2215c4058388e5317", "score": "0.54399985", "text": "def create_dirs(self):\n for dir_ in (\n self.raw_dir,\n self.input_dir,\n self.output_dir,\n self.logs_dir,\n ):\n if dir_:\n storage.mkdir(dir_)", "title": "" }, { "docid": "c49b5372eaee97347d125640adebd9b6", "score": "0.5426769", "text": "def make_directory(self,filename):\n from os.path import exists,dirname; from os import makedirs\n if filename == None or filename == \"\": return\n directory = dirname(filename)\n if directory == \"\" or directory == \".\": return\n if exists(directory): return\n try: makedirs(directory)\n except Exception,message:\n self.log_error(\"Failed to create directory %r for file %r: %r\" %\n (directory,filename,message))", "title": "" }, { "docid": "02ece0bc17dcf9bf5146211361a54748", "score": "0.5426734", "text": "def __init__(self, filename):\n\t\tself.filename = args[0]\n\t\tself.zip = zipfile.ZipFile(self.filename, 'a', zipfile.ZIP_DEFLATED, True)\n\t\tself.tmp = tempfile.mkdtemp(prefix=prefix)", "title": "" }, { "docid": "215d57da4bbb1a2614b63b5fd684d647", "score": "0.54217625", "text": "async def get_directory(self, source_dir, dest, as_root=False):\n # Create all file names\n tar_file_name = source_dir.lstrip(self.path.sep).replace(self.path.sep, '.')\n # Host location of dir\n outdir = os.path.join(dest, tar_file_name)\n # Host location of archive\n tar_file_name = '{}.tar'.format(tar_file_name)\n tmpfile = os.path.join(dest, tar_file_name)\n\n # If root is required, use tmp location for tar creation.\n tar_file_cm = self._xfer_cache_path if as_root else nullcontext\n\n # Does the folder exist?\n await self.execute.asyn('ls -la {}'.format(quote(source_dir)), as_root=as_root)\n\n async with tar_file_cm(tar_file_name) as tar_file_name:\n # Try compressing the folder\n try:\n await self.execute.asyn('{} tar -cvf {} {}'.format(\n quote(self.busybox), quote(tar_file_name), quote(source_dir)\n ), as_root=as_root)\n except TargetStableError:\n self.logger.debug('Failed to run tar command on target! ' \\\n 'Not pulling directory {}'.format(source_dir))\n # Pull the file\n if not os.path.exists(dest):\n os.mkdir(dest)\n await self.pull.asyn(tar_file_name, tmpfile)\n # Decompress\n with tarfile.open(tmpfile, 'r') as f:\n safe_extract(f, outdir)\n os.remove(tmpfile)", "title": "" }, { "docid": "342435b999622b3cd5cdeb069b9df358", "score": "0.54211706", "text": "def toZip(directory, zipFile):\n import zipfile\n\n zipFile = path(zipFile)\n if zipFile.exists():\n zipFile.remove()\n\n z = zipfile.ZipFile(\n zipFile, 'w', compression=zipfile.ZIP_DEFLATED\n )\n if not directory.endswith(os.sep):\n directory += os.sep\n\n directory = path(directory)\n\n for subdir in directory.dirs('[a-z]*') + [directory]:\n print(\"adding \", subdir)\n for fname in subdir.files('[a-z]*'):\n archiveName = fname.replace(directory, '')\n z.write(fname, archiveName, zipfile.ZIP_DEFLATED)\n z.close()\n return zipFile", "title": "" }, { "docid": "fc0e0979347c3c5f51642ffc7e480d94", "score": "0.5408498", "text": "def __init__(self, work_directory):\n if not os.path.exists(work_directory):\n os.makedirs(work_directory)\n self._new_dir = True\n elif not os.path.isdir(work_directory):\n raise Error(work_directory + ' exists but is not a directory')\n self._new_dir = False\n\n self.StorageLocation = work_directory\n self.IndexFile = anydbm.open(os.path.join(self.StorageLocation,\n self.INDEX_FILE_NAME), 'c')\n self.Index = self._BuildIndex()", "title": "" }, { "docid": "53714e990d3f1cfd8d08499d476bcb61", "score": "0.5405292", "text": "def build_archive(name, archived_location, archiving_dir):\n\n archive_name = name + str(today)\n #print(archive_name)\n print('Starting archiving Jenkins home located at:' + \" \" +\n archiving_dir)\n archived = shutil.make_archive(archived_location + archive_name,\n 'gztar', archiving_dir)\n print(\"Archive has been built in:\" + \" \" + archive_name)\n return archived", "title": "" }, { "docid": "7eeae91e43dce967b2a0617b47734a62", "score": "0.54027003", "text": "def archive(env):\n savedPath = os.getcwd()\n os.chdir(env['root'])\n tar = shlex.split((\"tar -cjvf %(filename)s makefile *.apk *.sh *.reference *.ref *.master *.toolreference \" + \n \"*.address *.record obj-%(target_arch)s \") % env)\n to_tar = flatten([glob.glob(arg) if '*' in arg else [arg] for arg in tar])\n info((to_tar))\n subprocess.check_call(to_tar)\n os.chdir(savedPath)", "title": "" }, { "docid": "3a138e9cc890b1c597e55c1914ee3fc7", "score": "0.54002064", "text": "def createDir(self):\n try:\n os.makedirs(os.path.join(self.path))\n except OSError:\n # Fail silently on any OS errors\n pass", "title": "" }, { "docid": "a419b914ec08c0429597524a543eaf63", "score": "0.5396808", "text": "def create_dir(dirpath):\n os.makedirs(dirpath)", "title": "" }, { "docid": "3ac978c00ac3380a4b3578187b4cf896", "score": "0.5393857", "text": "def __init__(self, directory: str = None):\n directory = directory if directory else bcore._get_temp_path()\n self.directory: str = bcore._mkdir(directory)\n self.saved_agents: List[Tuple[int, float, str]] = []", "title": "" }, { "docid": "10887cf37883744ef426bc5b33d1b511", "score": "0.53928405", "text": "def makedirs(self, dirname):\n self.log.info('makedirs {}'.format(dirname))\n os.makedirs(os.path.join(self.root, os.path.relpath(dirname, '/')), exist_ok=True)", "title": "" }, { "docid": "18dcede701e9f0c96c1645eae63a7b8a", "score": "0.53913236", "text": "def mkd(self, dirname):\n if dirname:\n self._conn.mkd(dirname)", "title": "" }, { "docid": "8bb72d669b576626858ecff355ca4101", "score": "0.5383616", "text": "def zip_to_metadata_dir(zip_url, data_dir_path):\n # Returns a zipped directory\n zipfile = read.zip_from_url(zip_url)\n\n # Check if data dir exists\n if not os.path.exists(data_dir_path):\n os.mkdir(data_dir_path)\n else:\n # Delete metadata folder s-- should always be the top level directory\n shutil.rmtree(os.path.join(data_dir_path, zipfile.namelist()[0]))\n\n # TODO: (?) Add .gitignore to the data dir immediatly\n\n\n # Write zip data to dir for preparing metadat files\n zipfile.extractall(data_dir_path)\n\n logging.debug(zipfile.printdir())\n\n return", "title": "" }, { "docid": "26e43c947966faf8facc239c68f097db", "score": "0.53799665", "text": "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "title": "" }, { "docid": "2fb18cfe2750f71c440781a794bcb9b5", "score": "0.5375303", "text": "def create_dir(self, dir_path, perm=0o777):\n if not dir_path:\n return\n existing_path = dir_path\n components = []\n while existing_path and not self.os.path.exists(existing_path):\n existing_path, component = self.os.path.split(existing_path)\n if not component and existing_path:\n # existing path is a drive or UNC root\n if not self.os.path.exists(existing_path):\n self.filesystem.add_mount_point(existing_path)\n break\n components.insert(0, component)\n for component in components:\n existing_path = self.os.path.join(existing_path, component)\n self.os.mkdir(existing_path)\n self.os.chmod(existing_path, 0o777)\n self.os.chmod(dir_path, perm)", "title": "" }, { "docid": "55835ce5e264c386b1ae8470621f3f40", "score": "0.5375223", "text": "def downloadarchive(self, id, bool, ext):\n nfolder = os.path.join(\n self.targetfolder, Project.OUTPUT_FOLDER, _zipdir\n )\n targetname = os.path.join(\n self.targetfolder, Project.OUTPUT_FOLDER, str(_clamID)\n )\n if not os.path.exists(nfolder):\n os.mkdir(nfolder)\n shutil.make_archive(targetname, \"zip\", nfolder)", "title": "" } ]
20caadb329100a99690e943e8f6c8a1c
r"""Creates a new ConsumerGroup in a given project and location.
[ { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.0", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" } ]
[ { "docid": "26ed96200cd06ef37e551ffd0f266087", "score": "0.66833925", "text": "def create_group(self, name, description=None):", "title": "" }, { "docid": "4dcac98d45f9863597f62c26f4108fc2", "score": "0.632166", "text": "def create_rg(self):\n azure_cli_run(\"group create -l {0} -n {1}\".format(self.loc, self.rg))", "title": "" }, { "docid": "6560b75a0863903223e44ec39bd22de5", "score": "0.625286", "text": "def create_group(Path=None, GroupName=None):\n pass", "title": "" }, { "docid": "96022ca4934a493c711af56aacd46590", "score": "0.6236449", "text": "def create_group(OrganizationId=None, Name=None):\n pass", "title": "" }, { "docid": "96022ca4934a493c711af56aacd46590", "score": "0.6236449", "text": "def create_group(OrganizationId=None, Name=None):\n pass", "title": "" }, { "docid": "2ff9ee8c1c102f38e7ff6686c8f0ec60", "score": "0.6178752", "text": "def create_group(self, context, group):\n return self.common.create_group(context, group)", "title": "" }, { "docid": "9cb046769aa3bcaf99563d81899854da", "score": "0.6156796", "text": "def create_group(ctx: click.core.Context, group_name: str):\n logger.debug(f\"Create group {group_name}\")\n foss = ctx.obj[\"FOSS\"]\n try:\n foss.create_group(group_name)\n logger.debug(f\" group {group_name} created\")\n except FossologyApiError as e:\n if f\"Group {group_name} already exists\" in e.message:\n logger.debug(f\"Group {group_name} already exists.\")\n else:\n logger.fatal(f\"Error adding group {group_name} \", exc_info=True)\n raise e", "title": "" }, { "docid": "a4960862bdad3f83894f16ea6c6929c0", "score": "0.60986507", "text": "def create_security_group(\n name,\n region,\n description,\n authorized=(),\n aws_key=None,\n aws_secret=None,\n ecconn=None):\n if ecconn is None:\n ecconn = __salt__['aws_util.ecconn'](region, aws_key, aws_secret)\n\n ecconn.create_cache_security_group(name, description)\n\n modify_security_group(name, region, authorized, False, aws_key, aws_secret,\n ecconn)", "title": "" }, { "docid": "c805666acb7464517006683e7f7198bf", "score": "0.59876615", "text": "def create_group(group_name, creator_id):\n\n new_group = Group.create(group_name, share=True)\n new_bot = Bot.create(\"JobPostBot\", new_group)\n db_group = models.Group(group_name=new_group.name,\n group_id=new_group.group_id,\n bot_name=new_bot.name,\n bot_id=new_bot.bot_id,\n active=True,\n creator_id=creator_id)\n db.session.add(db_group)\n db.session.commit()", "title": "" }, { "docid": "a03c8bd060113676f3a9f33b5bc8bf95", "score": "0.598386", "text": "def create_resource_group(self):\n try:\n result = self.client.resource_groups.create_or_update(\n self.resource_group,\n {\n 'location': self.location\n }\n )\n self.resource_group_output = result\n print('Created or updated resource group {}'.format(self.resource_group_output.name))\n except azure_exceptions.CloudError as e:\n print(e)", "title": "" }, { "docid": "e7f405089dadad3c5f2873c42c205a5b", "score": "0.58975476", "text": "def Create_Group(name: str,tag='/',creds=''):\n\n\t\t\tiam = CloudManager.IAM.service_router[CloudManager.IAM.service](creds)\n\t\t\tresponse = AWS.IAM.Group.Create(iam,name,tag=tag)\n\n\t\t\treturn response", "title": "" }, { "docid": "0d8cfb2e3ff3da1d0d6b873196dc05d7", "score": "0.5887107", "text": "def create_group(name='nombre'):\n group = Group.objects.create(name=name)\n return group", "title": "" }, { "docid": "e989b442e17e5ef3b77789f859bfcaaa", "score": "0.58582544", "text": "def create_resource_group(credentials, subscription_id, **kwargs):\n resource_client = ResourceManagementClient(credentials, subscription_id)\n resource_client.resource_groups.list()\n for i in range(3):\n try:\n resource_group = resource_client.resource_groups.create_or_update(\n resource_group_name=kwargs.get(\"resource_group\", DefaultSettings.resource_group),\n parameters={\n 'location': kwargs.get(\"region\", DefaultSettings.region),\n }\n )\n except CloudError as e:\n if i == 2:\n raise AccountSetupError(\n \"Unable to create resource group in region {}\".format(kwargs.get(\"region\", DefaultSettings.region)))\n print(e.message)\n print(\"Please try again.\")\n kwargs[\"resource_group\"] = prompt_with_default(\"Azure Region\", DefaultSettings.region)\n return resource_group.id", "title": "" }, { "docid": "6eef0c868a0951f6dbac6627c8867108", "score": "0.5854395", "text": "def create_new_group(self, group_key, group_description):\n path = self._compose_path(\"groups\")\n payload = {'name':group_key, 'description':group_description}\n return self._post(path, payload)", "title": "" }, { "docid": "594355278491b18876757cc9e66722d9", "score": "0.5841612", "text": "def create(cls, **kwargs):\n kwargs[\"name\"] = kwargs[\"name\"].strip()\n if \"username\" in kwargs:\n username = kwargs[\"username\"]\n del kwargs[\"username\"]\n else:\n username = radon.cfg.sys_lib_user\n # Make sure name id not in use.\n existing = cls.objects.filter(name=kwargs[\"name\"]).first()\n if existing:\n raise GroupConflictError(kwargs[\"name\"])\n grp = super(Group, cls).create(**kwargs)\n state = grp.mqtt_get_state()\n payload = grp.mqtt_payload({}, state)\n Notification.create_group(username, grp.name, payload)\n return grp", "title": "" }, { "docid": "4a5850ab58d96cd4fdf62842e4ec10a7", "score": "0.58369225", "text": "def _create_hostgroup(self, location, id_num):\n node_name = '%s-%s' % (self.name, id_num)\n desc = '%s %s subcluster nodes' % (location, self.name)\n data = {'description': desc}\n folder = 'hostgroups'\n processed_params = self._process_params(node_name, folder)\n data.update(processed_params)\n self.lg.debug('%s data updated with %s', node_name, processed_params)\n path = os.path.join(\n self.path_repo, folder, node_name)\n self.created.append(entities.FreeIPAHostGroup(node_name, data, path))\n self.lg.debug('%s created successfully', node_name)", "title": "" }, { "docid": "99f953a2f7512e3e94117feec3d144ee", "score": "0.582855", "text": "def _create_group(fake, users):\n name = fake.company()\n g = Group(name=name)\n g.save()\n g.members = users\n g.save()\n return g", "title": "" }, { "docid": "e490f531be98c97dada20a6d5ce48926", "score": "0.5817178", "text": "def create_empty_security_group(self, client=None, project_id=None,\n namestart='secgroup-smoke'):\n\n if client is None:\n client = self.security_groups_client\n if not project_id:\n project_id = client.project_id\n sg_name = data_utils.rand_name(namestart)\n sg_desc = sg_name + \" description\"\n sg_dict = dict(name=sg_name,\n description=sg_desc)\n sg_dict['project_id'] = project_id\n result = client.create_security_group(**sg_dict)\n\n secgroup = result['security_group']\n self.assertEqual(secgroup['name'], sg_name)\n self.assertEqual(project_id, secgroup['project_id'])\n self.assertEqual(secgroup['description'], sg_desc)\n\n self.addCleanup(test_utils.call_and_ignore_notfound_exc,\n client.delete_security_group, secgroup['id'])\n return secgroup", "title": "" }, { "docid": "c908e1a3cdea6896987218fa09ca432c", "score": "0.5789492", "text": "def create(self, name, description, tenant_id):\n\n try:\n body = {'security_group': {'name': name, 'description': description, 'tenant_id':\n tenant_id}}\n return self.neutron_client.create_security_group(body=body)\n\n except Exception as error:\n print(error)", "title": "" }, { "docid": "25e56edfc2de6ec6c45458eb37a2db15", "score": "0.57826155", "text": "def share_group_create(context, values):\n return IMPL.share_group_create(context, values)", "title": "" }, { "docid": "8c856da7f96ca2ab82a0d237983cf443", "score": "0.57506716", "text": "def create_group(self, group):\n new = self.copy()\n return new.open(group, history=False)", "title": "" }, { "docid": "c6e8f705b01e4335ea64d8962da8f69d", "score": "0.5726684", "text": "def create_security_group(self, name, description, cloud_provider, is_default=True):\n\n data = {\n \"name\": name,\n \"description\": description,\n \"cloud_provider\": cloud_provider,\n \"is_default\": is_default\n }\n return self._post(endpoint, data=json.dumps(data), jsonify=True)", "title": "" }, { "docid": "6b271375ab7d1b530ac9966927dcd270", "score": "0.57210267", "text": "def CreateMembership(project,\n membership_id,\n description,\n gke_cluster_self_link=None,\n external_id=None,\n release_track=None,\n issuer_url=None):\n client = gkehub_api_util.GetApiClientForTrack(release_track)\n messages = client.MESSAGES_MODULE\n parent_ref = ParentRef(project, 'global')\n request = messages.GkehubProjectsLocationsMembershipsCreateRequest(\n membership=messages.Membership(description=description),\n parent=parent_ref,\n membershipId=membership_id,\n )\n if gke_cluster_self_link:\n endpoint = messages.MembershipEndpoint(\n gkeCluster=messages.GkeCluster(resourceLink=gke_cluster_self_link))\n request.membership.endpoint = endpoint\n if external_id:\n request.membership.externalId = external_id\n if issuer_url:\n request.membership.authority = messages.Authority(issuer=issuer_url)\n op = client.projects_locations_memberships.Create(request)\n op_resource = resources.REGISTRY.ParseRelativeName(\n op.name, collection='gkehub.projects.locations.operations')\n return waiter.WaitFor(\n waiter.CloudOperationPoller(client.projects_locations_memberships,\n client.projects_locations_operations),\n op_resource, 'Waiting for membership to be created')", "title": "" }, { "docid": "10e6361112895e0b33f5662fc2695cef", "score": "0.57081914", "text": "def launch_group(test_obj):\n client = marathon.create_client()\n client.create_group(group(test_obj.count, test_obj.instance))", "title": "" }, { "docid": "86c96f1703cf78e752128dce62ff8fe7", "score": "0.5676739", "text": "def partner_create(self):\n group = Group.objects.get(name='Partner')\n self.groups.add(group)\n self.save()", "title": "" }, { "docid": "432421e51c5c878f1c3e50adfc35ca27", "score": "0.56457233", "text": "def create_parameter_group(\n name,\n region,\n family,\n description,\n parameters,\n aws_key=None,\n aws_secret=None,\n ecconn=None):\n if ecconn is None:\n ecconn = __salt__['aws_util.ecconn'](region, aws_key, aws_secret)\n\n ecconn.create_cache_parameter_group(name, family, description)\n\n modify_parameter_group(name, region, parameters, False, aws_key,\n aws_secret, ecconn)", "title": "" }, { "docid": "f36fa04003c8f636b057398d9bac577a", "score": "0.56319326", "text": "def createGroup(self, parent, name):\n self.__id_count += 1\n task_group = Group(parent=parent, id=self.__id_count, name=name)\n parent.appendGroup(task_group)", "title": "" }, { "docid": "fbc71962d4da4e3c7c12f112b07f33b5", "score": "0.5585208", "text": "def create_group(group_name):\n if not _group_regex_compiled.match(group_name):\n raise GroupCreationError(\"Group name must be only letters and numbers.\")\n g = YoGroup(name=group_name)\n query = {'new_account_username': g.name,\n 'new_account_passcode': g.passcode,\n 'api_token': settings.YOPARTY_API_TOKEN,\n 'needs_location': \"false\",\n 'description': \"Group %s\" % g.name,\n 'callback': settings.BASE_URL + reverse('group_callback', kwargs={'cb_code': g.cb_code})}\n # Returns true if there is not an error, false if there was an error\n resp = requests.post('https://api.justyo.co/accounts/', data=query).json()\n if 'error' in resp:\n if resp['error'] == 'User already exists.':\n raise GroupCreationError(\"Yo user already exists.\")\n raise GroupCreationError(resp['error'])\n g.api_token = resp['api_token']\n g.save()", "title": "" }, { "docid": "34388310a6dd63df7d07d0557833c721", "score": "0.5584904", "text": "def create_group(sender, instance, **kwargs):\n if not instance.pk:\n group = Group.objects.create(name=instance.name)\n instance.group = group", "title": "" }, { "docid": "26fe86363d85c6ad2f07e14e9cef3545", "score": "0.5577378", "text": "def create_security_group(self, context, group_name, **group):\n raise NotImplementedError('ironic driver does not support '\n 'security group operations.')", "title": "" }, { "docid": "938de7cbb1cd8f5481b9e2864e1d9d59", "score": "0.55640537", "text": "def create_consistencygroup(ctxt,\n host='test_host',\n name='test_cg',\n description='this is a test cg',\n status='available',\n availability_zone='fake_az',\n volume_type_id=None,\n cgsnapshot_id=None,\n **kwargs):\n cg = {}\n cg['host'] = host\n cg['user_id'] = ctxt.user_id\n cg['project_id'] = ctxt.project_id\n cg['status'] = status\n cg['name'] = name\n cg['description'] = description\n cg['availability_zone'] = availability_zone\n if volume_type_id:\n cg['volume_type_id'] = volume_type_id\n for key in kwargs:\n cg[key] = kwargs[key]\n return db.consistencygroup_create(ctxt, cg)", "title": "" }, { "docid": "22cf56d8956357988ea89dd147698941", "score": "0.5561619", "text": "def create_group(self,group):\n self.groups[group] = Group(group)\n self.__add_section(\"%s%s\" % (self.__GROUP_PREFIX,group))\n self.__add_section_value(\"%s%s\" % (self.__GROUP_PREFIX,group),self.__MEMBER_AREA,'',)", "title": "" }, { "docid": "7aa9ee287c6cc36e1adff5820866212a", "score": "0.5531841", "text": "def _create_ec2_security_group(group):\n try:\n for authorizations in env.vm_ec2_available_security_groups[group]:\n app = _ec2().create_security_group(\n group,\n group,#security_group_desc,\n )\n for protocol, inport, outport, ip_range in authorizations:\n print 'Authorizing:',protocol, inport, outport, ip_range\n if ip_range:\n app.authorize(protocol, inport, outport, ip_range)\n else:\n app.authorize(protocol, inport, outport, None, app)\n print \"Created security group %s.\" % (security_group_name,)\n \n except boto.exception.EC2ResponseError:\n print \"Security group called %s already exists, continuing.\" % group\n return False\n return True", "title": "" }, { "docid": "fa6cb30a3286becdc5c397245032d700", "score": "0.55291545", "text": "def group_create(name, gid=None):\n options = []\n if gid: options.append(\"-g '%s'\" % (gid))\n sudo(\"groupadd %s '%s'\" % (\" \".join(options), name))", "title": "" }, { "docid": "f33756c78e4af66812ad9ba63404e141", "score": "0.5525518", "text": "def createInstanceGroup(self, **kwargs):\n if not name in kwargs:\n raise RuntimeError(\"Missing required argument name\")\n return self.call(\"createInstanceGroup\", args)", "title": "" }, { "docid": "0632b87cd21e62665a933c863c218686", "score": "0.55134743", "text": "def share_group_type_create(context, values, projects=None):\n return IMPL.share_group_type_create(context, values, projects)", "title": "" }, { "docid": "f4e647745be7eb0fb59ee6c42d1574ae", "score": "0.55115134", "text": "def groups_create(deployment_group_name, inputs, default_blueprint,\n description, client, logger):\n client.deployment_groups.put(\n deployment_group_name,\n default_inputs=inputs,\n blueprint_id=default_blueprint,\n description=description\n )\n logger.info('Group %s created', deployment_group_name)", "title": "" }, { "docid": "6265650fe3026738d35a65e01ca8a6b2", "score": "0.55023766", "text": "def _create_consumer_id(self, consumer_group_id):\n hostname = platform.node()\n ms_since_epoch = int(time.time() * 1000)\n uuid_top_hex = uuid.uuid4().hex[:8]\n consumer_uuid = \"{0}-{1}-{2}\".format(hostname, ms_since_epoch, uuid_top_hex)\n\n return \"{0}_{1}\".format(consumer_group_id, consumer_uuid)", "title": "" }, { "docid": "cd88557d78c458f0843d921ce33dd666", "score": "0.5501176", "text": "def selenium_create_group(self, group_name):\n # click create group button\n self.selenium.find_element_by_name(\"create_group_button\").click()\n # filling the form and creating a group of the name \"Example group\"\n group_name_input = self.selenium.find_element_by_name(\"group_name\")\n group_name_input.send_keys(group_name)\n self.selenium.find_element_by_name(\"save_button\").click()", "title": "" }, { "docid": "bd0224551d75142e88953bdc6d5cf10d", "score": "0.54984987", "text": "def create_group(self, name):\n header = {'X-API-Key': self.write_key} \n parameters = {'name': name}\n \n response = requests.post(self.__api_endpoint[\"groups\"], headers=header, params=parameters)\n return response.status_code, response.json()", "title": "" }, { "docid": "b960cf2d82c4006e34f40d37343da623", "score": "0.54959124", "text": "def create_test_nodegroup(**kw):\n nodegroup = get_test_nodegroup(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' in nodegroup:\n del nodegroup['id']\n dbapi = db_api.get_instance()\n return dbapi.create_nodegroup(nodegroup)", "title": "" }, { "docid": "d3ef0644853064090f32d22b2e8ae422", "score": "0.54899997", "text": "def createGroupTask(creds, groupId):\n\turl = \"https://habitica.com/api/v3/tasks/group/\" + groupId\n\tpayload = {\"groupId\": groupId}\n\treturn(postUrl(url, creds, payload))", "title": "" }, { "docid": "b1c1372148112e5bf8439edd2cb0fa9f", "score": "0.5488678", "text": "def create_group(self, group_data: dict) -> dict:\n return self.API.create_request(data=group_data, expected_response=[200], url=self.URL)", "title": "" }, { "docid": "4c3719a60e7138f2c1d4e13595ba4108", "score": "0.54694515", "text": "def create_project(name=None, id=None, description=None, clientRequestToken=None, sourceCode=None, toolchain=None, tags=None):\n pass", "title": "" }, { "docid": "dd8d7bc7c1fbdded1be7d0f2b4a7a145", "score": "0.54658026", "text": "def create_project(create_project_request: CreateProject):\n try:\n logging.info(f\"Create Project Router: {create_project_request}\")\n project_arn = ProjectController().create_project_controller(\n project_name=create_project_request.project_name\n )\n return CreateProjectResponse(**{\"project_arn\": project_arn})\n except Exception as error:\n logging.error(f\"{error=}\")\n raise error", "title": "" }, { "docid": "9e3189ef5570fc189dbd9f1450fbf447", "score": "0.54603034", "text": "def create_security_group(self, name=None, project_id=_NONE,\n description=_NONE, stateful=_NONE):\n body = _clean_dict(\n name=name or self.generate_random_name(),\n tenant_id=project_id,\n description=description,\n stateful=stateful\n )\n resp = self.client.create_security_group({\"security_group\": body})\n return resp[\"security_group\"]", "title": "" }, { "docid": "464c08aead18d9c1f62cc4a424a27da6", "score": "0.5441703", "text": "def create_new_group(self, group_name, owner_id, invite_list):\n query = { \"group_counter\": {\"$exists\": \"true\"} }\n if self.collection.count_documents(query, limit = 1) > 0:\n q = self.collection.find_one(query)\n q[\"group_counter\"] = q[\"group_counter\"] + 1\n self.collection.update_one(query, {\"$set\": { \"group_counter\": q[\"group_counter\"]}})\n else:\n return False\n\n if self.check_if_group_exists(q[\"group_counter\"], group_name) == False:\n new_group_data = {\n \"group_id\": q[\"group_counter\"],\n \"group_name\": group_name,\n \"owner_id\": owner_id,\n \"member_list\": [owner_id],\n \"invite_list\": invite_list,\n \"playlists\": []\n }\n self.collection.insert_one(new_group_data)\n else:\n return False\n\n return True", "title": "" }, { "docid": "64b6f961a8bc952d1f956bdb3747e6d5", "score": "0.5438229", "text": "def create(project_type, creator):\n raise NotImplementedError()", "title": "" }, { "docid": "bcb9c2dc2157416b10eb886e53a635b3", "score": "0.54067487", "text": "def _create_groups(self, location, id_num):\n for prefix in ('foreman', 'primitive'):\n for perm in ('view', 'full'):\n if perm == 'full' or self.data['separate_foreman_view']:\n node_name = '%s-%s-%s-%s-%s' % (\n prefix, self.name, location, id_num, perm)\n if prefix == 'primitive':\n node_name = '%s-access' % node_name\n if perm == 'view':\n continue\n desc = '%s-%s-%s-%s' % (\n self.name, id_num, location, perm)\n folder = 'groups'\n data = {'description': desc}\n processed_params = self._process_params(node_name, folder, prefix)\n data.update(processed_params)\n self.lg.debug('%s data updated with %s', node_name, processed_params)\n path = os.path.join(\n self.path_repo, folder, node_name)\n self.created.append(entities.FreeIPAUserGroup(node_name, data, path))\n self.lg.debug('%s created successfully', node_name)", "title": "" }, { "docid": "7ab0625870bd7619b3df251492b6ec41", "score": "0.5399392", "text": "def create_group(self, kwargs):\n if kwargs['GroupName'] in self.groups:\n raise client_error('CreateGroup',\n 'EntityAlreadyExists',\n 'Group with name %s already exists.'\n % kwargs['GroupName'])\n\n group = Group(kwargs['GroupName'])\n self.groups[group.name] = group\n return responses.group_response(group)", "title": "" }, { "docid": "805a864232884b7800b4267131465fd3", "score": "0.53842455", "text": "def create_group():\n # IF form is submitted an valid:\n # Create new Group object + save to database,\n # flash sucess message,\n # redirect user to group page\n \n form = GroupForm()\n\n if form.validate_on_submit():\n new_group = Group(\n created_by = flask_login.current_user,\n name = form.name.data,\n max_atendees = form.max_atendees.data,\n location = form.location.data,\n code = form.code.data,\n )\n current_user.groups.append(new_group)\n db.session.add(new_group)\n db.session.commit()\n flash('Group was created successfully')\n return redirect(url_for('main.profile', group=new_group))\n return render_template('create_group.html', form=form)", "title": "" }, { "docid": "e4e436b38484650f08dbc18aacc87855", "score": "0.5381839", "text": "def create_project(credentials, options):\n raise GFedv1NotImplementedError(\"Method not implemented\")", "title": "" }, { "docid": "d49e9892f76cc892eee168a026b3b5a8", "score": "0.536725", "text": "def _Create(self):\n create_cmd = util.AWS_PREFIX + [\n '--region', self.region,\n 'elbv2', 'create-target-group',\n '--target-type', 'ip',\n '--name', self.name,\n '--protocol', self.protocol,\n '--port', str(self.port),\n '--vpc-id', self.vpc_id\n ]\n stdout, _, _ = vm_util.IssueCommand(create_cmd)\n response = json.loads(stdout)\n self.arn = response['TargetGroups'][0]['TargetGroupArn']", "title": "" }, { "docid": "dd4058153d3593c40b593ccc91b9d3e4", "score": "0.53645545", "text": "def init_sec_group(self, category, extras):\n # do not recreate default openstack security groups\n if category.scheme == \\\n 'http://schemas.openstack.org/infrastructure/security/group#':\n return\n\n context = extras['nova_ctx']\n\n group_name = category.term.strip()\n group_description = (category.title.strip()\n if category.title else group_name)\n\n security.create_group(group_name, group_description, context)", "title": "" }, { "docid": "041d851dd7c8013df8cd9d6a98adeefa", "score": "0.5362179", "text": "def create_group(self, name, type_):\n util.check_entity_name_and_type(name, type_)\n groups = self._h5group.open_group(\"groups\")\n if name in groups:\n raise exceptions.DuplicateName(\"open_group\")\n grp = Group._create_new(self, groups, name, type_)\n return grp", "title": "" }, { "docid": "64835cc3a3988be4f34126301615e7b5", "score": "0.53583175", "text": "def create_group(self, group, thumbnail=None):\n\n postdata = self._postdata()\n postdata.update(unicode_to_ascii(group))\n\n # Build the files list (tuples)\n files = []\n if thumbnail:\n if _is_http_url(thumbnail):\n thumbnail = urllib.urlretrieve(thumbnail)[0]\n file_ext = os.path.splitext(thumbnail)[1]\n if not file_ext:\n file_ext = imghdr.what(thumbnail)\n if file_ext in ('gif', 'png', 'jpeg'):\n new_thumbnail = thumbnail + '.' + file_ext\n os.rename(thumbnail, new_thumbnail)\n thumbnail = new_thumbnail\n files.append(('thumbnail', thumbnail, os.path.basename(thumbnail)))\n\n # Send the POST request, and return the id from the response\n resp = self.con.post('community/createGroup', postdata, files)\n if resp and resp.get('success'):\n return resp['group']['id']", "title": "" }, { "docid": "b012824e183e8a6763b85b3d74085218", "score": "0.5356554", "text": "def test_create_group(self):\n\n url = reverse('grouplist')\n response = self.client.post(url, self.new_group, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "b8c0afa0b79ce2d522f506ec9fb23b90", "score": "0.5348531", "text": "def create_s_group(self, s_group_name, s_group_description):\n self.tester.send_keys_by_id(self._s_group_name_field_id, s_group_name)\n self.tester.send_keys_by_id(self._s_group_description_field, s_group_description)\n self.tester.click_element_by_id(self._create_s_group_button_id)", "title": "" }, { "docid": "94dc982b2ea87ae928fd903fbeb7f662", "score": "0.5330315", "text": "def test_new_group_creation(self):\n url = u\"/composers/translate/edit?appid=%s&srclang=all_ALL&editSelectedSourceButton=&targetlang=all_ALL&srcgroup=ALL&targetgroup=TEST\" % (\n self.firstApp.unique_id)\n print \"URL: \" + url\n rv = self.flask_app.get(url)\n assert rv.status_code == 200\n\n data = rv.data.decode(\"utf8\") # This bypasses an apparent utf8 FlaskClient bug.\n\n # Ensure that the identifiers appear in the response.\n # (that is, that it appears to be translate-able)\n assert u\"hello_world\" in data\n assert u\"black\" in data\n assert u\"Hello World.\" in data", "title": "" }, { "docid": "82cf3f6af535220708b4780dc95c4bb6", "score": "0.52750903", "text": "def create_procurement_group(self):\n proc_group_obj = self.env['procurement.group']\n return proc_group_obj.create({'removal_order_id': self.id})", "title": "" }, { "docid": "5aaac3141834d5baef81a33b13a7b3a1", "score": "0.52636063", "text": "def create_project(self, name: str, id: str, description: str = None, clientRequestToken: str = None, sourceCode: List = None, toolchain: Dict = None, tags: Dict = None) -> Dict:\n pass", "title": "" }, { "docid": "4e4857501c1806f9a4b35a31bc571e5d", "score": "0.5250789", "text": "def __init__(self, name, config):\n path = os.tempnam(settings.groups_dir, \".new.\")\n os.mkdir(path)\n Group.__init__(self, name, path, config)\n self.save_config()", "title": "" }, { "docid": "06c7fbe168eedf64aec7bdcb28b1ab99", "score": "0.5240678", "text": "def obj_create(self, bundle, **kwargs):\n logger.info(\"Creating a new group...\")\n bundle = super(GroupResource, self).obj_create(bundle, **kwargs)\n logger.info(\"Group '{0}' created.\".format(bundle.obj.name))\n \n #Add permission to the group\n if \"permissions\" in bundle.data:\n for p in bundle.data['permissions']:\n try:\n perm = Permission.objects.get(pk=p['id'])\n bundle.obj.permissions.add(perm)\n logger.info(\"Permission '{0}' granted to '{1}' group.\".format(perm.name,\n bundle.obj.name))\n except Permission.DoesNotExist:\n pass\n \n return bundle", "title": "" }, { "docid": "435331b98039b4c7d5d7ef6602b98716", "score": "0.52353966", "text": "def create():\n project_name = name_gen.generate()\n project = Project(id=project_name, name=project_name)\n return project.put().get()", "title": "" }, { "docid": "15b45b97c5c822ac69426bb380b3fb48", "score": "0.5232568", "text": "def create_security_group(self, ec2_session, vpc_id, security_group_name):\n return ec2_session.create_security_group(GroupName=security_group_name,\n Description=SecurityGroupService.CLOUDSHELL_SECURITY_GROUP_DESCRIPTION,\n VpcId=vpc_id)", "title": "" }, { "docid": "0f10f508b6c3c00228f2adcb51c3fa17", "score": "0.52321595", "text": "def create_auth_group():\n teacher_group = Group.objects.create(name='Teachers')\n for permission_name in PERMISSION_LIST:\n permission = Permission.objects.get(codename=permission_name)\n teacher_group.permissions.add(permission)\n return teacher_group", "title": "" }, { "docid": "a4ce1a0dc40131bfdd210a2b0402e400", "score": "0.5227897", "text": "def create(\n name,\n description,\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n if not vpc_id and vpc_name:\n try:\n vpc_id = _vpc_name_to_id(\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except boto.exception.BotoServerError as e:\n log.debug(e)\n return False\n\n created = conn.create_security_group(name, description, vpc_id)\n if created:\n log.info(\"Created security group %s.\", name)\n return True\n else:\n msg = \"Failed to create security group {}.\".format(name)\n log.error(msg)\n return False", "title": "" }, { "docid": "7bbfa1212513abb6bddc541a6c4f8d33", "score": "0.5226622", "text": "def createGroup(self, name, checkPathConflicts = True, scripts = None,\n imageGroup = False):\n return self._action(name, checkPathConflicts = checkPathConflicts,\n ActionClass = CreateNewGroupAction,\n imageGroup = imageGroup,\n scripts = scripts)", "title": "" }, { "docid": "395cdfdc3d0d329fc77a41defe62f6bd", "score": "0.5179948", "text": "def create_security_groups(env=ENV):\n security_groups = []\n admin = ec2.SecurityGroup('admin',\n [ec2.SecurityGroupInbound('tcp',\n 873, 873, ['web',\n 'web-proxy',\n 'celery']),\n ec2.SecurityGroupInbound('tcp',\n 8140, 8140, ['base'])])\n\n base = ec2.SecurityGroup('base',\n [ec2.SecurityGroupInbound('tcp',\n 22, 22, ['admin'])])\n\n rabbit_elb = ec2.SecurityGroup('rabbitmq-elb',\n [ec2.SecurityGroupInbound('tcp',\n 5672, 5672,\n ['web',\n 'admin',\n 'celery'])])\n\n syslog = ec2.SecurityGroup('syslog',\n [ec2.SecurityGroupInbound('udp',\n 514, 514, ['base'])])\n\n security_groups.append(admin)\n security_groups.append(base)\n security_groups.append(rabbit_elb)\n security_groups.append(syslog)\n\n security_groups += [ec2.SecurityGroup('celery'),\n ec2.SecurityGroup('graphite'),\n ec2.SecurityGroup('graphite-elb'),\n ec2.SecurityGroup('rabbitmq'),\n ec2.SecurityGroup('sentry'),\n ec2.SecurityGroup('sentry-elb'),\n ec2.SecurityGroup('web-proxy'),\n ec2.SecurityGroup('web'),\n ec2.SecurityGroup('web-elb')]\n\n ec2.create_security_groups(security_groups, 'solitude', env)", "title": "" }, { "docid": "2e5107c44f3d22a58c237e526ac00406", "score": "0.51797426", "text": "def groupCreate(request):\n message = \"\"\n if request.method == 'POST': # Process the form\n form = forms.GroupCreate(request.POST)\n if form.is_valid():\n group = form.save()\n # temporarily it is not nessessary to specify a server, so we take\n # the first one we get.\n server = PadServer.objects.all()[0]\n pad_group = PadGroup(group=group, server=server)\n pad_group.save()\n request.user.groups.add(group)\n return HttpResponseRedirect('/py_etherpadlite/accounts/profile/')\n else:\n message = _(\"This Groupname is allready in use or invalid.\")\n else: # No form to process so create a fresh one\n form = forms.GroupCreate()\n con = {\n 'form': form,\n 'title': _('Create a new Group'),\n 'message': message,\n }\n con.update(csrf(request))\n return render(\n\t\trequest,\n 'etherpad-lite/groupCreate.html',\n con\n )", "title": "" }, { "docid": "7a3e15811b77226df1b7e8336c882729", "score": "0.51746684", "text": "def new(name: str, username: str, password: str, **kwargs):\n os.mkdir(name)\n path = Path(name).resolve()\n name = path.name\n init_project(\"Created\", name, path, username, password, **kwargs)", "title": "" }, { "docid": "02e3012e83f604858eac157513760994", "score": "0.5174491", "text": "def create_group(self, name: str):\n try:\n group = Group()\n self.add_subsystem(name, group, promotes=['*'])\n yield group\n finally:\n group.setup()\n pass", "title": "" }, { "docid": "7abf80188c6c2f9ff9b0fae761bb6eb8", "score": "0.5174172", "text": "def create_group(self, group_config):\n endpoint = {'endpoint': self.endpoint}\n params = keyname_formatd(group_config)\n params.update(endpoint)\n cmd = \"CreateAutoScalingGroup\"\n log.info(\"create the auto-scaling group\")\n log.info(\">> %s\" % group_config['AutoScalingGroupName'])\n try:\n self.handle_response(operate(self.service, cmd, params))\n except Exception, e:\n raise IcsASException(e)\n log.info(\"OK\")", "title": "" }, { "docid": "518d438a88ff5fc69b85a67dc142ae2b", "score": "0.5172497", "text": "def _create_instance(self):\n self._init_security_group()", "title": "" }, { "docid": "9664dfce00ad5266203254898ebdb453", "score": "0.51715", "text": "def create_consistency_group(self, name=None, description=None,\n share_type_ids=(), share_network_id=None,\n source_cgsnapshot_id=None,\n version=LATEST_MICROVERSION):\n uri = 'consistency-groups'\n post_body = {}\n if name:\n post_body['name'] = name\n if description:\n post_body['description'] = description\n if share_type_ids:\n post_body['share_types'] = share_type_ids\n if source_cgsnapshot_id:\n post_body['source_cgsnapshot_id'] = source_cgsnapshot_id\n if share_network_id:\n post_body['share_network_id'] = share_network_id\n body = json.dumps({'consistency_group': post_body})\n resp, body = self.post(uri, body, headers=EXPERIMENTAL,\n extra_headers=True, version=version)\n self.expected_success(202, resp.status)\n return self._parse_resp(body)", "title": "" }, { "docid": "23258fd8b38b5189d1849d5f6b527616", "score": "0.51714593", "text": "def create_project(context, env='localhost', flight_num='1'):\n invoke_cmd = f'python run_cmd.py create_project {env} {flight_num}'\n context.run(invoke_cmd)", "title": "" }, { "docid": "227da6146462a8c6cdc01c172ff0aa2b", "score": "0.5169308", "text": "def create_replication_group(\n name,\n region,\n primary,\n description,\n test=False,\n aws_key=None,\n aws_secret=None):\n ecconn = __salt__['aws_util.ecconn'](region, aws_key, aws_secret)\n\n group = get_replication_group(name, region, aws_key, aws_secret, ecconn)\n\n if group is None:\n if not test:\n\n ecconn.create_replication_group(name, primary, description)\n return {'action': 'create'}\n return {'action': 'noop'}", "title": "" }, { "docid": "8c6ce3dd4308e2a9e2c06c765fc2f6de", "score": "0.51676774", "text": "def createSecurityGroup(self, **kwargs):\n if not name in kwargs:\n raise RuntimeError(\"Missing required argument name\")\n return self.call(\"createSecurityGroup\", args)", "title": "" }, { "docid": "5cc1ac0fdd79d299e67ca9342064a9f6", "score": "0.51672864", "text": "def create_or_update_resource_group(rg_name, location=DEFAULT_LOCATION):\n return rcli.resource_groups.create_or_update(\n resource_group_name=rg_name,\n parameters={'location': location},\n )", "title": "" }, { "docid": "f34a7cbcb0fe36b415ed4c0ead65ba7d", "score": "0.51645476", "text": "def create_project(name: str) -> Response:\n\n pass", "title": "" }, { "docid": "ff8fd1dfccdb652c76dbf15d4f4279b5", "score": "0.51643586", "text": "def create_group():\n request = flask.request.get_json()\n\n try:\n group = _lookup_group(request[\"name\"])\n except KeyError:\n flask.abort(422)\n\n if group:\n flask.abort(409)\n\n # NOTE(jk0): The spec says the body should only contain a `name` parameter,\n # but `flask.jsonify` purposely prevents us from doing that:\n #\n # http://bit.ly/1S1tDes\n GROUPS[request[\"name\"]] = {\"users\": []}\n\n return flask.jsonify(GROUPS[request[\"name\"]]), 201", "title": "" }, { "docid": "dcfc266e894e8ff1c62bf5d5ed47d708", "score": "0.5163974", "text": "def create_group(self, group_name, display_name, users=[]):\n group = Group()\n group.name = group_name\n group.display_name = display_name\n for user in users:\n user.groups.append(group)\n DBSession.add(group)\n return group", "title": "" }, { "docid": "7bb94463e5b5173d6f6603aa080c9312", "score": "0.5139741", "text": "def CreateSharedGroup(request, callback, customData = None, extraHeaders = None):\n if not PlayFabSettings.DeveloperSecretKey:\n raise PlayFabErrors.PlayFabException(\"Must have DeveloperSecretKey set to call this method\")\n\n def wrappedCallback(playFabResult, error):\n if callback:\n callback(playFabResult, error)\n\n PlayFabHTTP.DoPost(\"/Server/CreateSharedGroup\", request, \"X-SecretKey\", PlayFabSettings.DeveloperSecretKey, wrappedCallback, customData, extraHeaders)", "title": "" }, { "docid": "a4f189fda227e5f23b885b7981f23e05", "score": "0.5122294", "text": "def create(cls):\n \n # special checks for group put in override fields \n data = get_user_data([\"group\"])\n group = force_attribute_type(\"group\", str, data[\"group\"])\n if group == \"incr\":\n abort(400, \"'incr' is no a valid group name\")\n\n # create new group \n Rest.create.__func__(cls, current_app.mongo.db.groups,\n rule_dn = \"/groups/\",\n required_attr = [],\n override_attr = {\"group\": group}\n )\n\n # add group to list of users if 'members' provided\n if \"members\" in data and len(data[\"members\"])>0:\n Users.add_groups(data[\"members\"], group)\n\n # create returns dict, not json, allowing calling function to\n # add/remove attributes as required by api\n return {\"success\": True, \"group\":group}", "title": "" }, { "docid": "2f7932111a0f5027b2fd686c89fc8b31", "score": "0.5108959", "text": "def __call__(\n self,\n request: migrationcenter.CreateGroupRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operations_pb2.Operation:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"post\",\n \"uri\": \"/v1/{parent=projects/*/locations/*}/groups\",\n \"body\": \"group\",\n },\n ]\n request, metadata = self._interceptor.pre_create_group(request, metadata)\n pb_request = migrationcenter.CreateGroupRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = operations_pb2.Operation()\n json_format.Parse(response.content, resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_group(resp)\n return resp", "title": "" }, { "docid": "2d988f23e8c0782917b81db3f55db50a", "score": "0.5105741", "text": "def make_group(self):\n group = Group(self.data['groupname'])\n self._set_common_attributes(group)\n self.group = group\n return group", "title": "" }, { "docid": "e15ba2e38cc5fa5bdff693be912e3523", "score": "0.50988156", "text": "def share_group_snapshot_member_create(context, values):\n return IMPL.share_group_snapshot_member_create(context, values)", "title": "" }, { "docid": "9eabdd84a3b4afae7f53977aa738809a", "score": "0.5083791", "text": "def create_project(self, project_id, project):\n raise exception.NotImplemented() # pragma: no cover", "title": "" }, { "docid": "637e9db1bc8db065e02dd01e0fbcef98", "score": "0.5069073", "text": "def _create_group_files(self) -> typing.NoReturn:\n LOGGER.info('Creating group files')\n for role in [\n r for r in self._roles.values() if r['type'] == constants.GROUP\n ]:\n data = {\n 'name': role['role'],\n 'grants': {\n '{}s'.format(k.lower()): v\n for k, v in role['grant'].items() if v\n },\n 'revocations': {\n '{}s'.format(k.lower()): v\n for k, v in role['revoke'].items() if v\n },\n 'options': role.get('options'),\n 'settings': role.get('settings')\n }\n file_path = constants.PATHS[constants.GROUP] / '{}.{}'.format(\n role['role'], YAML_EXTENSION)\n if str(file_path) in self.ignore:\n LOGGER.debug('Skipping %s', file_path)\n continue\n self.files_created.append(\n storage.save(self.project_path, file_path,\n constants.GROUP, role['role'], data))", "title": "" }, { "docid": "7d1ae4824c912d2ee76eeac086c99858", "score": "0.5068228", "text": "def groups_add(name, description, **kwargs):\n new_group = {\"profile\": {\"name\": name, \"description\": description}}\n return okta_manager.call_okta(f\"/groups\", REST.post, body_obj=new_group)", "title": "" }, { "docid": "7fa514b3c30800b9bd8969eb097ff408", "score": "0.50651836", "text": "def add_subparser_create(self):\n self.add_subparser(\n 'create',\n help='create the cluster',\n )", "title": "" }, { "docid": "31d2454766815c260f11e9f80ca6d72e", "score": "0.5040054", "text": "def create_group(self, argu):\n\n va_name = self.get_va_name(argu)", "title": "" }, { "docid": "607df7f34acb23899fe87e0ddf8cc0b1", "score": "0.50374806", "text": "def get_or_create_security_group(env, vpc, security_groups, group_name, description=\"\"):\n dryRun = True\n if env != \"test\":\n \tdryRun = False\n print \"Group name: \" + group_name\n groups = [g for g in security_groups if g.name == group_name]\n group = groups[0] if groups else None\n if not group:\n print \"Creating group '%s'...\"%(group_name,)\n try:\n \tgroup = vpc.create_security_group(group_name, description, vpc_id = my_vpc['id'], dry_run = dryRun)\n except Exception,e:\n \tprint e\n\n if env == \"test\":\n \tprint \"Since Environment mode is Test, will assign group 'test' just for script to continue..\"\n \tgroup = [g for g in security_groups if g.name == \"test\"][0]\n else:\n \tprint \"Group \" + group.name + \" already exists.\"\n return group", "title": "" }, { "docid": "4585e16884f2383b30a7dd0019faa659", "score": "0.50320476", "text": "def create_project(self, target, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "a219befa527c5d6496e34244e71d9378", "score": "0.5029609", "text": "def create_bad_group(self):\n self.test_runner.run_create_bad_group()", "title": "" }, { "docid": "5e1517159d8cca536e2404524f93210b", "score": "0.5011492", "text": "def create_group_from_create_group_response(response):\n group_dict = json.loads(response.text)\n return Group.from_dict(group_dict)", "title": "" }, { "docid": "845eab55aca341288f0af94c6a495203", "score": "0.50091344", "text": "def create_lock_group(l_uuid):\n creds = initialize_creds()\n customer = 'my_customer'\n\n directory = discovery.build('admin', 'directory_v1', credentials=creds)\n groups = directory.groups()\n \n name = '.'.join(('lock', uuid.UUID(l_uuid).hex[:32-7]))\n assert(len(name) <= 30) # Google restriction\n desc = 'This group is autogenerated by lock with uuid ' + l_uuid + '. '\\\n 'You are needed to approve the lock. ' \\\n 'Do not forget change group name and, optionally, description. ' \\\n 'In order to add access to lock for user just add the user to this group ' \\\n 'and wait for another day.'\n new_email = name + '@miem.hse.ru'\n print('name: ', name)\n print('email: ', new_email)\n return groups.insert(\n body={\n 'email': new_email,\n 'description': desc,\n 'name': name\n }\n ).execute()", "title": "" }, { "docid": "1c235e23a2aa9434ad69324adfd019bd", "score": "0.50047404", "text": "def test_create_existing_group(self):\n\n url = reverse('grouplist')\n response = self.client.post(url, self.good_group, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "d5a2e05fb71151870161f14b02141132", "score": "0.50043756", "text": "def share_group_snapshot_create(context, values):\n return IMPL.share_group_snapshot_create(context, values)", "title": "" }, { "docid": "7f534dacadd087413ff83863d12aafcf", "score": "0.49968848", "text": "def new(name: str, books: T.Sequence[int] = ()):\n with models.db:\n try:\n group = models.Group.create(name=name)\n except peewee.IntegrityError as e:\n if str(e).startswith('UNIQUE'):\n raise BuchSchlossError('Group_exists', 'Group_{}_exists', name)\n else:\n raise\n else:\n group.books = books", "title": "" }, { "docid": "77fcf9e73d28598e2446d8c7a7b7141d", "score": "0.49942842", "text": "def create_group(\n name: typing.Union[str, typing.Dict[str, str]],\n description: typing.Union[str, typing.Dict[str, str]],\n initial_user_id: int\n) -> Group:\n if isinstance(name, str):\n name = {\n 'en': name\n }\n if isinstance(description, str):\n description = {\n 'en': description\n }\n\n try:\n for language_code, name_text in list(name.items()):\n # check language code\n language = get_language_by_lang_code(language_code)\n # Check name\n if not 1 <= len(name_text) <= MAX_GROUP_NAME_LENGTH:\n if language.id != Language.ENGLISH and not name_text:\n del name[language_code]\n else:\n raise errors.InvalidGroupNameError()\n\n existing_group = groups.Group.query.filter(groups.Group.name[language_code].astext.cast(db.Unicode) == name_text).first()\n if existing_group is not None:\n raise errors.GroupAlreadyExistsError()\n except errors.LanguageDoesNotExistError:\n raise errors.LanguageDoesNotExistError(\"There is no language for the given lang code\")\n except errors.GroupAlreadyExistsError:\n raise errors.GroupAlreadyExistsError()\n except errors.InvalidGroupNameError:\n raise errors.InvalidGroupNameError()\n if 'en' not in name:\n raise errors.MissingEnglishTranslationError()\n\n for item in list(description.items()):\n # delete empty descriptions\n if item[1] == '':\n del description[item[0]]\n\n user = get_mutable_user(initial_user_id)\n group = groups.Group(name=name, description=description)\n db.session.add(group)\n group.members.append(user)\n db.session.add(group)\n db.session.commit()\n return Group.from_database(group)", "title": "" } ]
9d4222829eb58cdfc1cf654e4354aac9
Calculate Brightness Temperature Again, you'll have to access appropriate metadata variables by their index number.
[ { "docid": "5846bf56d5212a0821deb9e3554955c7", "score": "0.0", "text": "def bt_calc(rad, var_list):\n bt = (var_list[3] / np.log(var_list[2]/rad) + 1) - 273.15\n return bt\n #plt.imshow(bt, cmap='RdYlGn')\n #plt.colorbar()", "title": "" } ]
[ { "docid": "b6e43d70f458cfb622bd4efcd671dbb0", "score": "0.6685459", "text": "def temperature(self):\n # perform one measurement in high res, forced mode\n self._write_register_byte(_BMP280_REGISTER_CONTROL, 0xFE)\n\n # Wait for conversion to complete\n while (self._read_byte(_BMP280_REGISTER_STATUS) & 0x08):\n time.sleep(0.002)\n # lowest 4 bits get dropped\n UT = self._read24(_BMP280_REGISTER_TEMPDATA) / 16\n #print(\"raw temp: \", UT)\n\n var1 = (UT / 16384.0 - self.dig_T1 / 1024.0) * self.dig_T2\n var2 = ((UT / 131072.0 - self.dig_T1 / 8192.0) * (\n UT / 131072.0 - self.dig_T1 / 8192.0)) * self.dig_T3\n self.t_fine = int(var1 + var2)\n #print(\"t_fine: \", self.t_fine)\n\n temp = (var1 + var2) / 5120.0\n return temp", "title": "" }, { "docid": "c06410e4379dda827c9a6c9769579c7c", "score": "0.6524922", "text": "def color_temp(self) -> int:\n if not self.is_variable_color_temp:\n raise SmartDeviceException(\"Bulb does not support colortemp.\")\n\n light_state = self.get_light_state()\n if not self.is_on:\n return int(light_state[\"dft_on_state\"][\"color_temp\"])\n else:\n return int(light_state[\"color_temp\"])", "title": "" }, { "docid": "4190481e8209b9bd0668e6a3f337c6ec", "score": "0.65228736", "text": "def read_temperature(self):\n raw_temp = self.read_raw(BMP280_TEMPDATA)\n compensated_temp = self._compensate_temp(raw_temp)\n temp = float(((compensated_temp * 5 + 128) >> 8)) // 100\n\n self._logger.debug('Calibrated temperature {0}'.format(temp))\n return temp", "title": "" }, { "docid": "64e0bbac58aed6320d04e2a164fd7a13", "score": "0.6450315", "text": "def get_temperature(self):\n if self.height <= TROPOPAUSE:\n self.temp = T0 + self.delta_t + self.lapse_rate * self.height\n elif self.height > TROPOPAUSE: # if above tropopause - temp == temp at tropopause\n self.temp = self.trop_temp", "title": "" }, { "docid": "55989a4bb38430c6174e38f2a4e7560f", "score": "0.638132", "text": "def blynk_read_temperature():\n blynk.virtual_write(blynk.VPIN_TEMP, filter.result())", "title": "" }, { "docid": "ca47ea0f83c2e03ebfc21bbd06c8223a", "score": "0.63769025", "text": "def getTemperature(self):\n return self.__bmp085Temp.getTemperature()", "title": "" }, { "docid": "d7d5acfea431b79ad4fef69317011980", "score": "0.6341307", "text": "def temperature(self):\n temperature = struct.unpack(\">H\", self._read_register(_AM2320_REG_TEMP_H, 2))[0]\n if temperature >= 32768:\n temperature = 32768 - temperature\n return temperature/10.0", "title": "" }, { "docid": "5741bc7d6d8c7083d698691ae7d64e9d", "score": "0.6337031", "text": "def get_temperature(self):\n temp = self.i2c.regrd(self.addr, self.MCP_AMBIENT_TEMP_REG, \">H\")\n temp_msb = (temp & 0xFF00) >> 8\n temp_lsb = temp & 0xFF\n\n temp_msb = temp_msb & 0x1F\n if temp_msb & 0x10 == 0x10:\n temp_msb = temp_msb & 0x0F\n temp = 256 - (temp_msb * 16 + temp_lsb / 16)\n else:\n temp = temp_msb * 16 + temp_lsb / 16\n\n return temp", "title": "" }, { "docid": "b5fdc223a6b001e88b9d7dea93f5c4ed", "score": "0.63169307", "text": "def raw_temperature() -> int:\n ...", "title": "" }, { "docid": "4637a6a2b56e391a381d40a505f9f169", "score": "0.6248118", "text": "def color_temp(self):\n if (\n self.light_device_type == \"tuneablelight\"\n or self.light_device_type == \"colourtuneablelight\"\n ):\n return self.session.light.get_color_temp(self.node_id)", "title": "" }, { "docid": "7632239f72c7dc222f42c5bf1042fd11", "score": "0.6225518", "text": "def Temperature(self):\n dev = self.pio.i2c_open(self.piBus, self.address)\n self.pio.i2c_write_byte(dev, self.SI7021_CMD_MEAS_TEMP_NO_HOLD_MASTER)\n time.sleep(0.2) # 20ms. Minimum of 11ms\n (count, data) = self.pio.i2c_read_device(dev, 2)\n self.pio.i2c_close(dev)\n lsb = data[1] & 0xFC\n msb = data[0]\n measurement = int(msb << 8 | lsb)\n temperature = (175.72 * measurement / 65536) - 46.85\n return temperature", "title": "" }, { "docid": "e894b60532539b9da3d747b2ec182672", "score": "0.6213249", "text": "def brightness(self):\n if \"target\" in self.values:\n return round((self.values.target.value / 99) * 255)\n return round((self.values.primary.value / 99) * 255)", "title": "" }, { "docid": "fe8f9671c11d52e61222c95fec31ee01", "score": "0.6178376", "text": "def get_temperature(self):\n while True:\n string = self.dev.read(8)\n if string.startswith('\\x03'):\n frac, = struct.unpack('H', string[1:])\n return -200 + frac * 0.1", "title": "" }, { "docid": "04ef6d2fd18c8c0e16ba01a90b50dd72", "score": "0.6176709", "text": "def doRuuviTag_temperature( data):\n\tif data[1:2] == 0x7FFF:\n\t\treturn 0\n\n\ttemperature = twos_complement((data[1] << 8) + data[2], 16) / 200\n\treturn round(temperature, 1)", "title": "" }, { "docid": "cb38ac0231c4b9677132620d3869a326", "score": "0.61659455", "text": "def color_temp(self) -> Optional[float]:\n if self.control_result:\n color_temp = self.control_result[\"temp\"]\n else:\n color_temp = self.block.colorTemp\n\n # If you set DUO to max mireds in Shelly app, 2700K,\n # It reports 0 temp\n if color_temp == 0:\n return self.max_mireds\n\n return int(color_temperature_kelvin_to_mired(color_temp))", "title": "" }, { "docid": "64ad2f26f14805d46195c3d08c928e9b", "score": "0.61213166", "text": "def temperature(self):\n return 25 + int.from_bytes([self.device.readReg(0x26)],\n byteorder='little', signed=True)", "title": "" }, { "docid": "881467731ad7b80e9188a441801f2613", "score": "0.61168057", "text": "def measure_temperature(self):\n self.mode = 'temperature'", "title": "" }, { "docid": "aec1c0ee554f46a3aad39627a70a89df", "score": "0.6096235", "text": "def getTempF(self):\r\n tempC = self.getTemp()\r\n return tempC * 9./5 + 32", "title": "" }, { "docid": "3f7836ce7b5b01f38efd5aa4055d8435", "score": "0.6063763", "text": "def temperature(bot, update) -> None:\n sensor = Adafruit_DHT.DHT22\n pin = 4\n humidity_data, temperature_data = Adafruit_DHT.read_retry(sensor, pin)\n update.message.reply_text('Temperatur: {0:.2f}'.format(round(temperature_data, 2)))", "title": "" }, { "docid": "48e21493af6fb1ee2ae1a823d97c5413", "score": "0.6042905", "text": "def _set_brightness(self):\n self.Tb_f = self.Tb0 * (self.r_sky_f / self.r0)**(-self.Tbq) * \\\n np.exp(-(self.r_sky_f / self.r_l)**self.Tbeps)\n self.Tb_f = self.Tb0 * (self.r_sky_f / self.r0)**(-self.Tbq)\n Rtrans = 90.\n Tb_trans = self.Tb0 * (Rtrans / self.r0)**(-self.Tbq)\n outer = (self.r_sky_f >= Rtrans)\n self.Tb_f[outer] = Tb_trans * (self.r_sky_f[outer] / Rtrans)**(-5.)\n gap1 = (self.r_sky_f >= 15.) & (self.r_sky_f <= 18.)\n gap2 = (self.r_sky_f >= 70.) & (self.r_sky_f <= 82.)\n self.Tb_f[gap1] *= 0.01\n self.Tb_f[gap2] *= 0.05\n self.Tb_f = np.clip(self.Tb_f, 0.0, self.Tbmax)\n self.Tb_f = np.where(self._in_disk_f, self.Tb_f, 0.0)\n if self._flat_disk:\n self.Tb_b = None\n else:\n self.Tb_b = self.Tb0 * (self.r_sky_f / self.r0)**(-self.Tbq) * \\\n np.exp(-(self.r_sky_f / self.r_l)**self.Tbeps)\n self.Tb_b = np.clip(self.Tb_b, 0.0, self.Tbmax_b)\n self.Tb_b = np.where(self._in_disk_b, self.Tb_b, 0.0)", "title": "" }, { "docid": "1640d8ed56eba9c3c9509973e4ecf09a", "score": "0.6012328", "text": "def get_temperature(self):\r\n if self.temperature > 0.0:\r\n return self.temperature\r\n else:\r\n raise Exception(\"SpectralDensity was not assigned temperature\")", "title": "" }, { "docid": "3e676863b1e08086638a4d03228d6956", "score": "0.601219", "text": "def temperature_c(self) :\n return round(((self.temperature*500/10000.0) - 32 ) * 5 / 9, 1)", "title": "" }, { "docid": "7f6060a619ba6e888be0bb4b241f6ed3", "score": "0.6007617", "text": "def temperature(self):\n return (self.temp * self.thermal_mass).sum() / self.thermal_mass.sum()", "title": "" }, { "docid": "0b730e85bf1c910ec633b728bbe0bd59", "score": "0.60016745", "text": "def get_color_temperature(self, channel=0, *args, **kwargs) -> Optional[int]:\n self.check_full_update_done()\n info = self._channel_light_status.get(channel)\n if info is None:\n return None\n return info.temperature", "title": "" }, { "docid": "04a83ae832ef5d059f603944bb96e823", "score": "0.5977383", "text": "def get_temperature():\n temperature_sensor.read()\n temperature = temperature_sensor.temperature()\n return temperature", "title": "" }, { "docid": "6a89cb87a33e0539670c8387166f65fa", "score": "0.59579533", "text": "def color_temp(self):\n return color_util.color_temperature_kelvin_to_mired(self._ct)", "title": "" }, { "docid": "9a6be8856bc81ae8da049458d4b80445", "score": "0.595037", "text": "def get_actual_brightness():\n return int(_get_value(\"actual_brightness\"))", "title": "" }, { "docid": "e6bf2ab993ce1f6238c7a75cd40a92ed", "score": "0.5936341", "text": "def calcTemperature(self):\n return eq.estimateStellarTemperature(self.M)", "title": "" }, { "docid": "4d09fb76bf1356147d62e7b05b685641", "score": "0.59237343", "text": "def temperature(self):\n return self.sensor['Enviro/air.temperature']", "title": "" }, { "docid": "ca1fa15477910958234f4ff41c9c566b", "score": "0.5899875", "text": "def blackbody_temperature(bmag, vmag):\n t_bb = 4600 * (\n (1 / (0.92 * (bmag - vmag) + 1.7))\n + (1 / (0.92 * (bmag - vmag) + 0.62))\n )\n return t_bb", "title": "" }, { "docid": "b421f0bec508969676a36f9e439109ed", "score": "0.58994883", "text": "def IntToTemp(reading):\n CheckReadingType(reading)\n CheckReadingBounds(reading)\n\n voltage = (reading * VOLTAGE) / 4096.0\n tempC = (voltage - 0.5) * 100.0\n tempF = (tempC * 9.0 / 5.0) + 32.0\n\n return tempF", "title": "" }, { "docid": "6db392a8ab6ee525395c15b746e53621", "score": "0.5887536", "text": "def brightnessconversions(self):\n # Brightnes unit of file.\n bunit = fits.getval(self.filename, 'bunit', 0).lower()\n bunit = bunit.replace('/', '').replace('per', '').replace(' ', '')\n if bunit not in ['jypixel', 'k']:\n raise ValueError('Cannot read brightness unit.')\n # Conversion dictionary.\n tounit = {}\n if bunit == 'jypixel':\n tounit['jypixel'] = 1.\n tounit['mjypixel'] = 1e3\n tounit['k'] = self.JanskytoKelvin()\n else:\n tounit['k'] = 1.\n tounit['jypixel'] = 1. / self.JanskytoKelvin()\n tounit['mjypixel'] = 1e-3 / self.JanskytoKelvin()\n return bunit, tounit", "title": "" }, { "docid": "c280c1f38ed63eeb7e46b6bb4da68f78", "score": "0.5882336", "text": "def temperature(self):\n reg_value = self.device.readBytesReg(0x7c, 0xa2)\n return self._calc_temp(reg_value[0], reg_value[1])", "title": "" }, { "docid": "a60546817ade93efd0d924fe405175c3", "score": "0.58749783", "text": "def get_brightness(p):\n return (299.0 * p[0] + 587.0 * p[1] + 114.0 * p[2]) / 1000.0", "title": "" }, { "docid": "a19d2adc06c5d3fb0161adc0ee79ee68", "score": "0.58733046", "text": "def get_brightness(self):\n icon = Icons.Icons() # Icon\n\n # Opening file containing current brightness\n try:\n brightcur_file = open(BRIGHT_C)\n brightcur = brightcur_file.read()\n brightcur_file.close()\n except:\n sys.stdout.write(\"^fg(%s)!E! BrightCur^fg()\" % Colors.CL_BASE08)\n\n # Opening file containing maximal brightness\n try:\n brightmax_file = open(BRIGHT_M)\n brightmax = brightmax_file.read()\n brightmax_file.close()\n except:\n sys.stdout.write(\"^fg(%s)!E! BrightMax^fg()\" % Colors.CL_BASE08)\n\n # Casting into integers\n brightcur_level = int(brightcur)\n brightmax_level = int(brightmax)\n\n # Calculating brightness percentage and printing information\n brightness = (100 * brightcur_level) / brightmax_level\n icon.show_icon(\"brightness\") \n sys.stdout.write(\"%d%%\" % brightness)", "title": "" }, { "docid": "042b95b7b0a1e17ecc74ffd5424d0d0a", "score": "0.5873213", "text": "def temperature(self):\r\n return self._reg_temperature.value", "title": "" }, { "docid": "1aaee8a325ccba8c74713a0150c79ee9", "score": "0.5868254", "text": "def color_temperature_kelvin(self):\n return self._last_reading.get('color_temperature')", "title": "" }, { "docid": "0ed8aec600dc9b3142c2261c015fcd64", "score": "0.58679515", "text": "def get_color_value(self):\n color_value = self.color_sensor.reflected_light_intensity\n self.mqtt_client.send_message('get_power_up', [color_value])", "title": "" }, { "docid": "16f41fc024cdfa5de41ff9cd941ea3e6", "score": "0.5849557", "text": "def get_temperature(self):\n temp=lib.PCO_GetTemperature(self.handle)\n return (temp.ccd/10.,temp.cam,temp.pow)", "title": "" }, { "docid": "2da17857f03a4ab455bb0a680f11f12d", "score": "0.58258367", "text": "def temperature_c(self):\n return round((self.temperature - 32) / 9.0 * 5.0, 1)", "title": "" }, { "docid": "8fce2ef537b11b68edb3a227774fe59f", "score": "0.5813892", "text": "def specific_heat(self):\n return np.var(self.energy_hist) / self.temp ** 2", "title": "" }, { "docid": "94af0da8d5b79841fb152baaf566cfe2", "score": "0.58064777", "text": "def get_brightness(image_path):\n\n try:\n im = Image.open(image_path)\n stat = ImageStat.Stat(im)\n r,g,b = stat.mean\n return int(math.sqrt(0.241*(r**2) + 0.691*(g**2) + 0.068*(b**2)))\n except Exception as e:\n print \"Cannot get brightness: %s\" % e\n return 0", "title": "" }, { "docid": "2163fb08a56edf546306e101f71a8b77", "score": "0.57801753", "text": "def brightness(self):\n return self._bright", "title": "" }, { "docid": "e2cdf4762b0e7a1f6150aca0f59cbf9b", "score": "0.5777002", "text": "def getOnboardTemp(self):\n\n\t\ttry:\n\t\t\ttemp = float(self.inst.query('SENS1:TEMP:DATA?'))\n\t\texcept:\n\t\t\twarnings.warn('Could not read onboard probe temperature')\n\t\t\ttemp = 999.9\n\t\treturn temp", "title": "" }, { "docid": "445ab0eb3744fe93ca3dd697788dd517", "score": "0.57750857", "text": "def temperature(self):\n # Get the actual temperature using the formule given in the\n # MPU-6050 Register Map and Descriptions revision 4.2, page 30\n return (self._read_temp_raw() / 340.0) + 36.53", "title": "" }, { "docid": "c307c2dac8d8c0ce8dc066069d488b4c", "score": "0.5773482", "text": "def brightness(self) -> int | None:\n value = self.executor.select_state(OverkizState.CORE_LIGHT_INTENSITY)\n if value is not None:\n return round(cast(int, value) * 255 / 100)\n\n return None", "title": "" }, { "docid": "54fdd3b6619c2a4bdac43080e25c24fa", "score": "0.5769774", "text": "def calculate_thermal_diffusivity(self):\n diffusivity_array = np.zeros((len(self.temperature_profile), 2))\n g10_thermal_conductivity = self.calculate_thermal_conductivity()\n g10_cv = self.calculate_volumetric_heat_capacity()\n diffusivity_array[:, 0] = self.temperature_profile\n diffusivity_array[:, 1] = g10_thermal_conductivity[:, 1] / g10_cv[:, 1]\n return diffusivity_array", "title": "" }, { "docid": "8be77961a476fd0e56b582c62bbc5be4", "score": "0.57695013", "text": "def temperature(self):\n return sum(self.readings)/len(self.readings)", "title": "" }, { "docid": "ebf78cfb39654e26a8c5143b673d85d6", "score": "0.5769122", "text": "def __temperature (self, step, aTemp = 0):\n if (aTemp == 0):\n aTemp = self.mMaxTemperature\n alpha = 0.999\n return aTemp * pow(alpha, step)", "title": "" }, { "docid": "977792750adca1ebb525601e8a7a8276", "score": "0.57673043", "text": "def temperature(self):\n self._convert_temp()\n return self._read_temp()", "title": "" }, { "docid": "2ab4c4277be0dbc27a662ef61909cc10", "score": "0.5759865", "text": "def target_temperature(self):\n return 21.0", "title": "" }, { "docid": "44ebd96e01710ab6f24704c79cc6a1bc", "score": "0.57517606", "text": "def BD_Temperature(self):\n\t\ttemperature = database.databaseThermocoupleRegulation.getLastMeasureByName(\"Pool Temperature Sensor\")[0]\n\t\t#print(\"Temperature = {} \".format(temperature))\n\t\treturn temperature", "title": "" }, { "docid": "9365ff783eec6b49998778c8b37a094d", "score": "0.57401156", "text": "def temperature(self) -> int:\n bytes = i2clib.byteRead(self.i2c,ADDR,TEMP_H,len_array=2)\n raw = struct.unpack(\">H\",bytes)[0] # 2 bytes, big-endian, unsigned\n\n return 510 * (raw / 65535) # conversion according to datasheet", "title": "" }, { "docid": "01fec665140cbff41abe85e4208381db", "score": "0.5738794", "text": "def brightness(self) -> int:\n if not self.is_dimmable: # pragma: no cover\n raise SmartDeviceException(\"Bulb is not dimmable.\")\n\n light_state = self.get_light_state()\n if not self.is_on:\n return int(light_state[\"dft_on_state\"][\"brightness\"])\n else:\n return int(light_state[\"brightness\"])", "title": "" }, { "docid": "98e6e7ac83df5e190b9b81a6af476c32", "score": "0.5734448", "text": "def augment_brightness(image):\n image = image.astype(np.uint8)\n image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n random_bright = .25 + np.random.uniform()\n # print(random_bright)\n image1[:, :, 2] = image1[:, :, 2] * random_bright\n image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)\n return image1", "title": "" }, { "docid": "d7a95c120c4bcfcc611ae84af822de7c", "score": "0.5730748", "text": "def temperature_c(self):\n try:\n return round((self.temperature - 32) / 9.0 * 5.0, 1)\n except TypeError:\n return None", "title": "" }, { "docid": "8dfdf83eb0d37bb581b9c6d8a91c2b16", "score": "0.5729529", "text": "def gettemperature(result,base,interp):\n \n\n temperaturechain=interp['temperature'](result['pdfs'])\n hist,edge=np.histogram(temperaturechain,bins=base)\n hist=hist/(1.*len(temperaturechain))\n\n return (hist,np.median(temperaturechain))", "title": "" }, { "docid": "18b135195c42b6298201ed411816f9b0", "score": "0.5727044", "text": "def thermal_sensor(image):\n h, w, _ = image.shape\n res = cv2.applyColorMap(image, cv2.COLORMAP_JET)\n return res", "title": "" }, { "docid": "269e52da2eee54665d04e018896ed255", "score": "0.57219124", "text": "def temperature(self):\n v = self.entry['temperature.gpu']\n return int(v) if v is not None else None", "title": "" }, { "docid": "7ae30ff6c7cd435bbbf59de3d75ba58c", "score": "0.5719221", "text": "def skin_temperature(upward_longwave_irradiance_W_m2, emissivity=1.0):\n sigma = 5.673067e-8\n return (upward_longwave_irradiance_W_m2 / (emissivity * sigma)) ** 0.25", "title": "" }, { "docid": "78d5b8a52e6aad528f5258e090873d63", "score": "0.57136106", "text": "def brightness_255(self):\n return self.raw_data[\"params\"]['channel0']", "title": "" }, { "docid": "be917119f3c21b7c005efe314cc0085a", "score": "0.57038975", "text": "def brightness(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"brightness\")", "title": "" }, { "docid": "7ee19851ce0e0e15f1513552faa7be7a", "score": "0.57026446", "text": "def temperature(self):\n Ti = self.initial_temperature\n Tm = self.min_temperature\n decay = log(2)/self.temperature_half_life\n return Tm + (Ti - Tm) * exp( -decay * self.total_steps )", "title": "" }, { "docid": "b0c88fd51e55c7c2df05015283f8e54b", "score": "0.5696974", "text": "def read_temperature(self):\n temp_celsius = self._bno.read_temp()\n return {'c': float(temp_celsius), 'f': (float(temp_celsius) * 9.0/5.0) + 32.0}", "title": "" }, { "docid": "512c20a8103de41d05998e6939247feb", "score": "0.5691923", "text": "def current_temperature(self):\n return self.get(ATTR_TEMPERATURE)", "title": "" }, { "docid": "a311a79dc75b684cfe3dfb238506fc3e", "score": "0.5687041", "text": "def readtemperature(self):\r\n\t\tdata0 = bus.read_byte(SI7013_A20_DEFAULT_ADDRESS)\r\n\t\tdata1 = bus.read_byte(SI7013_A20_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tcTemp = ((data0 * 256 + data1) * 175.72 / 65536.0) - 46.85\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "title": "" }, { "docid": "cb18c612e6776ce7c359da4c541ec02e", "score": "0.5682802", "text": "def temperature(self):\n\n return int(self.temperature_sensor)", "title": "" }, { "docid": "556f7255f76ee31bb6c2f265996b216e", "score": "0.5679193", "text": "def get_temperature_and_humidity(self):\n out = {}\n while len(out) < 2:\n string = self.dev.read(8)\n if string.startswith('\\x03'):\n frac, = struct.unpack('H', string[1:])\n out['temperature'] = -200 + frac * 0.1\n elif string.startswith('\\x02'):\n frac, = struct.unpack('B', string[1:])\n out['humidity'] = frac * 0.5\n return out", "title": "" }, { "docid": "db5f78e23788116c7bbed086380b9894", "score": "0.5675316", "text": "def ideal_resting_temperature(self, water, flour, starter, scale='C'):\n ideal_temp = 72\n water = 72 - flour - starter\n if scale == 'C':\n return 72", "title": "" }, { "docid": "4d58620fe46be27bfd4ca94c427d36d7", "score": "0.567377", "text": "def read_temperature(self):\n self.set_mode(OpMode.Standby)\n reg = Temperature1()\n reg.start = True\n self.write_register(reg)\n wait_for(lambda: not self.read_register(Temperature1).running)\n\n return 168 - self.spi_read(Register.TEMP2)", "title": "" }, { "docid": "6a0c158e4d29cbb8c1266d4db566e7af", "score": "0.5671043", "text": "def intensity_to_temperature(tif_filename, start_frame_temps, end_frame_temps, x_crop_left_temps, x_crop_right_temps,\n y_crop_bottom_temps, y_crop_top_temps):\n\n intensity_cropped = cropping_for_intensity(tif_filename, start_frame_temps, end_frame_temps, x_crop_left_temps, x_crop_right_temps,\n y_crop_bottom_temps, y_crop_top_temps)\n\n # print(np.max(intensity_cropped))\n # print(np.min(intensity_cropped))\n\n threshold = 16290\n\n for i in range(len(intensity_cropped)):\n intensity_cropped[i][intensity_cropped[i] > threshold] = 16294 # if temp is above smolder threshold set to 0\n\n\n # below is a black body calibration curve used to convert photon counts to temperatures\n # this was found experimentally using the exact setup to ensure temps are accurate\n # the following black body test was performed with an emissivity around 0.98\n emissivity = 0.85\n # bb_temp = np.arange(50, 600, 50) #[50,100,150,200,250,300,350,400,450,500,550,600]\n bb_temp = [50*(f+1) for f in range(12)]\n print(bb_temp)\n\n bb_intensity = [828, 926, 1185, 1648, 2404, 3510, 5166, 7270, 10029, 13695, 16273, 16294]\n bb_temp_ary1 = np.asarray(bb_temp) # Converts BB_temps into an array\n bb_temp_ary1_K = bb_temp_ary1 + 273.15 # converts from Celsius to Kelvin\n bb_intensity_ary = np.asarray(bb_intensity)\n bb_temp_ary = (bb_temp_ary1_K * 0.98) / emissivity\n bb_temp_ary = bb_temp_ary - 273.15 # converts back from K to C\n\n #print('linear interpolation failed')\n\n linear_function = interpolate.interp1d(bb_intensity, bb_temp_ary) # interpolates BB profile to find temperatures\n # linear_function = InterpolatedUnivariateSpline(bb_intensity, bb_temp_ary, k=3) # interpolates BB profile to find temperatures\n #print('temperatures failed')\n temperatures = linear_function(intensity_cropped)\n\n\n\n return temperatures", "title": "" }, { "docid": "ab4c60b9fa204366cf5932360b9105a2", "score": "0.5665432", "text": "def getTemperature(self, servo):\n\n temp = self.ax12.readTemperature(servo)\n return temp", "title": "" }, { "docid": "95315e762fab6b49436965f2d2968f3e", "score": "0.5665216", "text": "def get_temperature(self):\n\n temperature = sum([(self._pin.read()/1024) * 3300 * 0.1 for i in range(500)])/500\n\n return json.dumps({\"code\": 0, \"temperature\": temperature})", "title": "" }, { "docid": "5ac5a324c454e0bce21da25e349de90b", "score": "0.56644315", "text": "def thermal_conductivity(self):\n return 1.0 * units.watt / (units.meter * units.kelvin)", "title": "" }, { "docid": "8cc38a7694b505905b122f17cbe0b247", "score": "0.5662459", "text": "def get_temperature(self, root):\n point_log_id = self.get_point_log_id(root, 'temperature')\n measurement = self.get_measurement_from_point_log(root, point_log_id)\n\n return float(measurement)", "title": "" }, { "docid": "912d59b5a8cdcccb1652da20d07e14dd", "score": "0.5658729", "text": "def ReadTemperature(self):\n\t\t# return sensor.MPL3115A2_ReadTemperature()\n\t\tt_MSB = self._device.readU8(OUT_T_MSB)\n\t\tt_LSB = self._device.readU8(OUT_T_LSB)\n\t\ttemplsb = float((t_LSB >>4) / 16.0) \t\t\t#temp, fraction of a degree\n\t\ttemperature = float(t_MSB + templsb)\t\t\t#\n\t\treturn temperature", "title": "" }, { "docid": "78ea61003d9c56035c2ccc4670329b9b", "score": "0.5657752", "text": "def brightness_img(image):\n\tbr_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\tcoin = np.random.randint(2)\n\tif coin == 0:\n\t random_bright = 0.2 + np.random.uniform(0.2, 0.6)\n\t br_img[:, :, 2] = br_img[:, :, 2] * random_bright\n\tbr_img = cv2.cvtColor(br_img, cv2.COLOR_HSV2RGB)\n\treturn br_img", "title": "" }, { "docid": "add4ed8568f74f32a021183b765d5526", "score": "0.5641897", "text": "def getTemperatureInFahrenheit(self):\n return self.__bmp085Temp.getTemperatureInFahrenheit()", "title": "" }, { "docid": "6bcc28f1afa81ece909417d6c4a0484f", "score": "0.56233186", "text": "def readtemperature(self):\n\t\tdata = bus.read_i2c_block_data(TMP007_DEFAULT_ADDRESS, TMP007_REG_TOBJ, 2)\n\t\t\n\t\t# Convert the data to 14-bits\n\t\tcTemp = ((data[0] * 256 + (data[1] & 0xFC)) / 4)\n\t\tif cTemp > 8191 :\n\t\t\tcTemp -= 16384\n\t\tcTemp = cTemp * 0.03125\n\t\tfTemp = cTemp * 1.8 + 32\n\t\t\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "title": "" }, { "docid": "9c22b58ec96c04752fdf9336cb9f0376", "score": "0.5620017", "text": "def radiated_heat_loss(\n ambient_temperature,\n conductor,\n conductor_temperature,\n):\n return (\n 17.8\n * conductor.diameter\n * conductor.emmisivity\n * (\n ((conductor_temperature + 273) / 100) ** 4\n - ((ambient_temperature + 273) / 100) ** 4\n )\n )", "title": "" }, { "docid": "02307af20bb361a8076b6dfdc3fa8c35", "score": "0.56173396", "text": "async def get_sensor_info(self):\n resp = await api.request_camera_sensors(\n self.sync.blink, self.network_id, self.camera_id\n )\n try:\n self.temperature_calibrated = resp[\"temp\"]\n except (TypeError, KeyError):\n self.temperature_calibrated = self.temperature\n _LOGGER.warning(\"Could not retrieve calibrated temperature.\")", "title": "" }, { "docid": "5773053c7376b4de69770bebf64bd36d", "score": "0.5606421", "text": "def read_temp(self):\r\n\t\tdata0 = bus.read_byte_data(0x39, 0x00)\r\n\t\tdata1 = bus.read_byte_data(0x39, 0x02)\r\n\t\t\r\n\t\t# Convert the data to 12-bits\r\n\t\ttemp = ((data0 * 256) + (data1 & 0xF0)) / 16\r\n\t\tif temp > 2047 :\r\n\t\t\ttemp -= 4096\r\n\t\tcTemp = temp * 0.0625;\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "title": "" }, { "docid": "5350c8f8d6df5d129aa873de91c811c2", "score": "0.56056106", "text": "def getdarkcurrent(self,exten):\n darkrate = 0.005 # electrons / s\n if self.proc_unit == 'native':\n darkrate = darkrate / self.getGain(exten) #count/s\n\n try:\n chip = self._image[0]\n darkcurrent = chip.header['DARKTIME'] * darkrate\n\n except:\n msg = \"#############################################\\n\"\n msg += \"# #\\n\"\n msg += \"# Error: #\\n\"\n msg += \"# Cannot find the value for 'DARKTIME' #\\n\"\n msg += \"# in the image header. WFPC2 input #\\n\"\n msg += \"# images are expected to have this header #\\n\"\n msg += \"# keyword. #\\n\"\n msg += \"# #\\n\"\n msg += \"# Error occured in the WFPC2InputImage class#\\n\"\n msg += \"# #\\n\"\n msg += \"#############################################\\n\"\n raise ValueError(msg)\n\n return darkcurrent", "title": "" }, { "docid": "3bd9ee957c184b3d43820c2f840f9daa", "score": "0.55980223", "text": "def temperature(self) -> int:\n return self._temperature", "title": "" }, { "docid": "3bd9ee957c184b3d43820c2f840f9daa", "score": "0.55980223", "text": "def temperature(self) -> int:\n return self._temperature", "title": "" }, { "docid": "16197c1b62e28331cb14513c297dbed4", "score": "0.55948305", "text": "def getTemperatureInKelvin(self):\n return self.__bmp085Temp.getTemperatureInKelvin()", "title": "" }, { "docid": "9dace6db54a47cf98035dedd0d945dfb", "score": "0.55912864", "text": "def getProbeTemp(self):\n\n\t\ttry:\n\t\t\ttemp = float(self.inst.query('SENS3:TEMP:DATA?'))\n\t\texcept:\n\t\t\twarnings.warn('Could not read outboard probe temperature')\n\t\t\ttemp = 999.9\n\t\treturn temp", "title": "" }, { "docid": "ad98e0382f86824af0e06fdac75d80de", "score": "0.55885684", "text": "def brightness(self):\n return self.session.light.get_brightness(self.node_id)", "title": "" }, { "docid": "4d1ca40b010ecf38fe6017d6c3d04ac0", "score": "0.5586958", "text": "def thermal_conductivity_of_air(ambient_temperature, conductor_temperature):\n Tfilm = (conductor_temperature + ambient_temperature) / 2\n\n return 2.424e-2 + 7.477e-5 * Tfilm - 4.407e-9 * Tfilm ** 2", "title": "" }, { "docid": "31c0a4e3527e15762b07656da23b3e46", "score": "0.55759996", "text": "def color_temp(self):\n return self._color_temp", "title": "" }, { "docid": "3e2c56a403ed87f668804d70def40251", "score": "0.55716884", "text": "def brightness(name, brightness, colour_temp):\n dev_props = get_device_from_config(config, name)\n dev = BulbDevice(dev_props[\"id\"], dev_props[\"ip\"], dev_props[\"key\"])\n if colour_temp is None:\n dev.set_brightness(brightness)\n else:\n dev.set_white(brightness=brightness, colour_temp=colour_temp)", "title": "" }, { "docid": "471c49fb72cabc8e6f9fddd5cc882c75", "score": "0.5563528", "text": "def getTemp(self):\r\n self.i2c.write(self.I2C_ADDRESS, [self.CMD_TEMP])\r\n bbio.delay(50)\r\n msb, lsb, crc = self.i2c.read(self.I2C_ADDRESS, 3)\r\n\r\n raw_value = (msb<<8) | lsb\r\n assert self.checkCRC(raw_value, crc), \"received invalid data\"\r\n # Should that really throw an error?\r\n\r\n # Conversion formula from datasheet:\r\n return -46.85 + 175.72 * (raw_value/65536.0)", "title": "" }, { "docid": "9431a2e4ba38f173017f61d1eeab88d7", "score": "0.5559558", "text": "def brightness(self) -> int:\n return int((self._func_channel.dimLevel or 0.0) * 255)", "title": "" }, { "docid": "2fc32cfb378cfec55110634a84c2eb21", "score": "0.55528134", "text": "def target_temperature(self):\n\n if self.controller.set_point.status is None:\n return MIN_TEMP\n\n if self.hvac_mode == HVAC_MODE_HEAT:\n return self.controller.set_point.status.heating_set_point\n else:\n return self.controller.set_point.status.cooling_set_point", "title": "" }, { "docid": "1cff07cc322c1abde6b665cdf006f7d6", "score": "0.5552621", "text": "def temperature(self) -> float | None:\n if not self._requested_state:\n return None\n return self._state[\"temperature\"]", "title": "" }, { "docid": "04ba9e0e0cc35ff764f2f19e4d7f566a", "score": "0.5542178", "text": "def T(self):\n paramTemp = self.getParam('temperature')\n\n if not paramTemp is np.nan:\n return paramTemp\n elif ed_params.estimateMissingValues:\n self.flags.addFlag('Calculated Temperature')\n return self.calcTemperature()\n else:\n return np.nan", "title": "" }, { "docid": "55533bace37964f7888ce0f2494ea223", "score": "0.55379486", "text": "def on_temperature(self, value):\n pass", "title": "" }, { "docid": "6387216242e22f8a7b53820621663aee", "score": "0.5534451", "text": "def target_temperature(self):\n return self.get(ATTR_TARGET_TEMPERATURE)", "title": "" }, { "docid": "f8f7b2f0eaa644a1e9316526242510ac", "score": "0.55313236", "text": "def light(self):\n self.ldrvalue[0] = float(self.LDR_front)\n self.ldrvalue[1] = float(self.LDR_top)\n return self.ldrvalue", "title": "" }, { "docid": "02d720bdcc5f58db58740cf747d1fc77", "score": "0.5526033", "text": "def get_temp(self):\n success, result = self.connection.send_command(self.device_id, 'measureTemperature', [])\n if not success:\n raise Exception(result)\n return {'temp': float(result)}", "title": "" }, { "docid": "818f65a9ae57f36e740f024775d659cd", "score": "0.5524115", "text": "def coolant_temp(value):\n # The data returned in the OBD response is in hexadecimal with a zero\n # offset to account for negative temperatures. To return the current\n # temperature in degrees Celsius, we must first convert to decimal and\n # then subtract 40 to account for the zero offset.\n value = __digit(value) - 40\n # English - > F\n if unit_english:\n # C - > F\n value = value * 9 / 5 + 32\n return value", "title": "" } ]
cffab3920c4d83557a98391a400c30c3
Returns a queryset of users who joined within a given timeframe
[ { "docid": "b8a9691c85d8128d7a7b7c46ce33ac75", "score": "0.5213686", "text": "def user_ids(self):\n return just_joined(\n minutes=self.minutes_since_signup,\n days=self.days_since_signup\n )", "title": "" } ]
[ { "docid": "2aa2474f62b89a7863b309674faace28", "score": "0.73507196", "text": "def users_registered_within_period(users, window=1):\n threshold = utcnow() - datetime.timedelta(hours=window)\n filtered = users.filter(\n date_joined__gte=threshold\n ).order_by(\n '-date_joined'\n )\n return filtered", "title": "" }, { "docid": "442c423f052b90f28a3d9898fc40c03b", "score": "0.68437624", "text": "def just_joined(minutes=None, days=None):\n User = get_user_model()\n users = User.objects.none()\n if days:\n # Return all users who joined within the given day\n since = timezone.now() - timedelta(days=days)\n joined_on = dateutils.date_range(since)\n users = User.objects.filter(date_joined__range=joined_on)\n elif minutes:\n # Return all users who've joined in the past `minutes` time.\n since = timezone.now() - timedelta(minutes=minutes)\n users = User.objects.filter(date_joined__gte=since)\n return users.values_list(\"id\", flat=True)", "title": "" }, { "docid": "a0a7e27f1487761fee32e70532425030", "score": "0.64992833", "text": "def users_currently_at_local_time(users, start_hour, end_hour, isoweekdays=None):\n from htk.utils.datetime_utils import get_timezones_within_current_local_time_bounds \n timezones = get_timezones_within_current_local_time_bounds(start_hour, end_hour, isoweekdays=isoweekdays)\n filtered = users.filter(\n profile__timezone__in=timezones\n )\n return filtered", "title": "" }, { "docid": "ac886686ab3c5774f764238093a65ab5", "score": "0.6479726", "text": "def users_logged_in_within_period(users, window=1):\n threshold = utcnow() - datetime.timedelta(hours=window)\n filtered = users.filter(\n last_login__gte=threshold\n ).order_by(\n '-last_login'\n )\n return filtered", "title": "" }, { "docid": "034612dc7e49e47ef4fc4ba9e19f930f", "score": "0.6044797", "text": "def active_users(self, interval):\n after = timezone.now() - datetime.timedelta(interval)\n queryset = self.all()\n queryset = queryset.filter(history_log__action__in=ACTIVE_USER_ACTIONS,\n history_log__created_at__gte=after).distinct()\n return queryset", "title": "" }, { "docid": "66ff0179175dfc3ddd5dbae451048e73", "score": "0.5642026", "text": "def get_current_members(self):\n today = datetime.date.today().isoformat()\n\n return Person.objects.filter(Q(memberships__start_date='') |\n Q(memberships__start_date__lte=today),\n Q(memberships__end_date='') |\n Q(memberships__end_date__gte=today),\n memberships__organization_id=self.id\n )", "title": "" }, { "docid": "9ad25971b063df403bb82397eb56c3ef", "score": "0.55873615", "text": "def get_logged_in_users():\n return User.objects.exclude(session=None).filter(session__expire_date__gte=timezone.now()).distinct()", "title": "" }, { "docid": "7bdfe443f2180fbe560f0db7d9a36b8d", "score": "0.5557368", "text": "def join_date_read(self, username=None, uid=None):\n return self._get_profile(username, uid)['joined']", "title": "" }, { "docid": "f45589f4ad3a961b17685e20f3141fe6", "score": "0.55126965", "text": "def get_members(self):\n\n active_subscriptions = self.subscriptions.all().filter(\n models.Q(end_date__isnull=True)\n | models.Q(end_date__gte=datetime.date.today()))\n\n return Member.objects.filter(subscriptions__in=active_subscriptions)\\\n .order_by('last_name')", "title": "" }, { "docid": "c418fe105153d67a7cd51d5404e31189", "score": "0.5482295", "text": "def users_registered_on_date(self, date):\n return get_user_model().objects.all().filter(date_joined__contains=date).count()", "title": "" }, { "docid": "64326b8bcdea971942fd9ff4d453ad0b", "score": "0.5466527", "text": "def get_all_users(hours=0):\n q = DataUser.query\n if hours:\n since = datetime.utcnow() - timedelta(hours=hours)\n q = q.filter(DataUser.registration_date >= since)\n return q.all()", "title": "" }, { "docid": "5e8d3664d7406c2ad4e06c6c06d55c2d", "score": "0.54498637", "text": "def get_queryset(self):\n session_id = self.kwargs['session_id']\n return Attendance.objects.get_session_users(session_id)", "title": "" }, { "docid": "5213939de383a152dbef997b68ab3d80", "score": "0.5445458", "text": "def get_participating_users(event):\n registrations = (Registration.query\n .filter(Registration.is_active,\n ~RegistrationForm.is_deleted,\n Registration.user_id.isnot(None),\n RegistrationForm.event_id == event.id)\n .filter(~UserSetting.query\n .filter(UserSetting.user_id == Registration.user_id,\n UserSetting.module == 'plugin_outlook',\n UserSetting.name == 'enabled',\n UserSetting.value == db.func.to_jsonb(False))\n .correlate(Registration)\n .exists())\n .join(Registration.registration_form)\n .options(joinedload(Registration.user)))\n return {reg.user for reg in registrations}", "title": "" }, { "docid": "98882075a1660996b1329ed920c85410", "score": "0.5313214", "text": "def get_join():\n post_id = int(request.vars.post_id)\n # We get directly the list of all the users who liked the post.\n # rows = db(db.user_join.post_id == post_id).select(db.user_join.user_email)\n rows = db(db.user_join.post_id == post_id).select()\n\n # If the user is logged in, we remove the user from the set.\n joined_set = set([r for r in rows])\n # the if statement is so that you do not display yourself\n # if auth.user:\n # joined_set -= {auth.user.email}\n joined_list = list(joined_set)\n joined_list.sort()\n # We return this list as a dictionary field, to be consistent with all other calls.\n return response.json(dict(joined=joined_list))", "title": "" }, { "docid": "e5a5d96ec7be4f7720f961816d6dc750", "score": "0.5222256", "text": "def get_recipients(self):\n from bluebottle.members.models import Member\n from bluebottle.members.models import MemberPlatformSettings\n\n year = now().year\n do_good_hours = timedelta(hours=MemberPlatformSettings.load().do_good_hours)\n\n members = Member.objects.annotate(\n hours=Sum(\n 'contributor__contributions__timecontribution__value',\n filter=(\n Q(contributor__contributions__start__year=year) &\n Q(contributor__contributions__status__in=['new', 'succeeded'])\n )\n ),\n ).filter(\n Q(hours__lt=do_good_hours) | Q(hours__isnull=True),\n is_active=True,\n receive_reminder_emails=True\n ).distinct()\n return members", "title": "" }, { "docid": "8de5323b8280405cb6a07efd86aec2e7", "score": "0.518419", "text": "def filter_queryset(self, queryset):\n queryset = super(TimeSlotViewSet, self).filter_queryset(queryset)\n if self.request.user.is_staff:\n return queryset\n return queryset.filter(period__is_active=True)", "title": "" }, { "docid": "b00fd828d48d0d3559c08341148291d7", "score": "0.5167176", "text": "def joined(self, request):\n\n # A fan wants to sign up for a watchparty\n if request.method == \"GET\":\n\n joined_watch_parties = []\n fan = Fan.objects.get(user=request.auth.user)\n\n watchparties = WatchParty.objects.all()\n for watchparty in watchparties:\n\n try:\n WatchPartyFan.objects.get(\n watchparty=watchparty.id, fan=fan.id)\n watchparty.joined = True\n joined_watch_parties.append(watchparty)\n except WatchPartyFan.DoesNotExist:\n watchparty.joined = False\n\n serializer = WatchPartySerializer(\n joined_watch_parties, many=True, context={'request': request})\n return Response(serializer.data)", "title": "" }, { "docid": "645cc713bc39c43b5b6b0079cc857fbd", "score": "0.516675", "text": "def get_active(cls, session, date=None):\n from datetime import datetime\n date = date or datetime.now().date()\n try:\n return cls.find(session,\n join=(cls.user),\n where=(cls.date_from <= date,\n cls.date_to >= date,\n cls.status == 'APPROVED_ADMIN',),\n order_by=cls.user_id)\n except:\n return []", "title": "" }, { "docid": "810bcd20db073373b45ed892b0649b16", "score": "0.51666397", "text": "def get_queryset(self):\n queryset = Room.objects.all()\n username = self.request.query_params.get('username', None)\n if username is not None:\n queryset = queryset.filter(participants__in=[username])\n return queryset", "title": "" }, { "docid": "19b021f4c6184d87d5f7b0a1b0e331e0", "score": "0.5108792", "text": "def list_user_attendances(request):\n sessions = request.user.profile.sessions_attending\\\n .only('title', 'start', 'end')\\\n .prefetch_related('location')\\\n .order_by('start')\\\n .all()\n return TemplateResponse(\n request=request,\n context={\n 'sessions': sessions,\n },\n template='schedule/attending_sessions.html'\n )", "title": "" }, { "docid": "cfccd012e2fb53fd80906c35d0f58097", "score": "0.51025677", "text": "def get_interested_users(self,crypto,threshold,considered_date):\n interestedusers=self.cursor.execute(f\"\"\"SELECT chat_id from dbo.users WHERE (\n \"{crypto}\"<{abs(threshold)} AND active=1 AND \n ((\"latest_update_{crypto}\" IS NULL) OR (\"latest_update_{crypto}\"<{considered_date}))\n );\"\"\").fetchall()\n return interestedusers", "title": "" }, { "docid": "0af7a7c2ff507a011cb41b0a40c40512", "score": "0.50716144", "text": "def get_active_users(user_data, last_active_days=180):\n\n now = datetime.datetime.now()\n active_threshold = now - datetime.timedelta(days=last_active_days)\n for user in user_data:\n last_seen_at = datetime.datetime.fromtimestamp(\n int(user['last_seen_at']))\n if last_seen_at > active_threshold:\n print \"{: <20} {: <20}\".format(user['username'], str(user['id']))", "title": "" }, { "docid": "49496d891350971d61dffebe005d7a57", "score": "0.50565773", "text": "def owned_by(self, user=None, future=True):\n today = datetime.date.today()\n\n if user.is_staff:\n retval = self.all()\n else:\n retval = self.filter(author=user)\n\n if future:\n retval = retval.filter(end_date__gte=today)\n\n return retval.order_by('-start_date') #order by descensing `-`", "title": "" }, { "docid": "df48b2ce53b755b173cb3d1cae039b43", "score": "0.50464857", "text": "def active_users(account, days_back):\n query_string = f\"\"\"SELECT DISTINCT useridentity.arn\n FROM behold\n WHERE account = '{account}'\n AND useridentity.type = 'IAMUser'\n AND useridentity.arn IS NOT NULL\n AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now());\"\"\"\n return (query_string, f\"athena_results/active_users/{account}\")", "title": "" }, { "docid": "ebc8dc2724a5c890d410394f37db8455", "score": "0.50415313", "text": "def get_tournaments(self,\n region: str = None,\n year: int = None,\n tournament_level: str = 'Primary',\n is_playoffs: bool = None,\n **kwargs) -> List[dict]:\n # We need to cast is_playoffs as an integer for the cargoquery\n if is_playoffs is not None:\n is_playoffs = 1 if is_playoffs else 0\n\n # This generated the WHERE part of the cargoquery\n where_string = ''.join([\" AND Tournaments.{}='{}'\".format(field_name, value) for field_name, value in\n [('Region', region),\n ('Year', year),\n ('TournamentLevel', tournament_level),\n ('IsPlayoffs', is_playoffs)] if value is not None])[5:] # Cutting the leading AND\n\n return self._cargoquery(tables='Tournaments, Leagues',\n join_on=\"Tournaments.League = Leagues.League\",\n fields='Tournaments.Name = name, '\n 'Tournaments.DateStart = date_start, '\n 'Tournaments.Date = date_end, '\n 'Tournaments.Region = region, '\n 'Tournaments.League = league, '\n 'Leagues.League_Short = league_short, '\n 'Tournaments.Rulebook = rulebook, '\n 'Tournaments.TournamentLevel = tournament_level, '\n 'Tournaments.IsQualifier = is_qualifier, '\n 'Tournaments.IsPlayoffs = is_playoffs, '\n 'Tournaments.IsOfficial = is_official, '\n 'Tournaments.OverviewPage = overview_page',\n order_by=\"Tournaments.Date\",\n where=where_string,\n **kwargs)", "title": "" }, { "docid": "61a80aab450c0c5fd33ac195e0d75264", "score": "0.50391746", "text": "def get_queryset(self):\n return ApplyUser.objects.order_by('-apply_date')[:20]\n return ApplyUser.objects.filter(apply_date__lte=timezone.now(a)).order_by('-apply_date')[:20]", "title": "" }, { "docid": "56434330f9f5a83d0530f0a9c4f87685", "score": "0.50122744", "text": "def get_queryset(self):\n username = self.kwargs['username']\n users = User.objects.filter(username=username)\n return users", "title": "" }, { "docid": "cd9d80118653b765fe95655f86093ad4", "score": "0.49982208", "text": "def upcoming(self):\n now = datetime.datetime.now()\n return self.active().filter(Q(start_time__gte=now) | Q(end_time__gte=now)).order_by('start_time')", "title": "" }, { "docid": "ec868220ab5770116225ac8a5160f0ec", "score": "0.49948737", "text": "def attending_users(self):\n if self.uses_tickets:\n return set([t.user for t in self.tickets.all() if\n (not t.order or t.order.complete) and not t.cancelled])\n if self.uses_rsvps:\n return [t.user for t in self.rsvps.filter(going=True)]\n return []", "title": "" }, { "docid": "aee50d61b0087d5ee7851494ae67bdaa", "score": "0.4991672", "text": "def get_watch_users(session_id):\n try:\n user = Users.objects.get(session=session_id)\n except:\n return 0\n game = user.game\n now = datetime.datetime.now()\n time_old = now + datetime.timedelta(minutes = -1)\n return Watchusers.objects(game=game, time__gte=time_old).count()", "title": "" }, { "docid": "0330b820769dcb17a3084b173864b730", "score": "0.49902934", "text": "def get_participants_needing_checked(cls, session: Session, earliest_signup_time: date,\n latest_signup_time: date = None):\n query = session.query(Participant.participantId, Participant.isGhostId).filter(\n Participant.participantOrigin == 'vibrent',\n Participant.signUpTime > cls._date_to_datetime(earliest_signup_time)\n )\n if latest_signup_time:\n query = query.filter(Participant.signUpTime < cls._date_to_datetime(latest_signup_time))\n return query.all()", "title": "" }, { "docid": "58fea94422c205d076a6d2dd1df46089", "score": "0.49728063", "text": "def get_queryset(self):\n offset_hours = 12\n now = timezone.now()\n dt = now - timedelta(hours=offset_hours)\n return Lineup.objects.filter(user=self.request.user, draft_group__end__lte=dt)", "title": "" }, { "docid": "7c277536d00ca4cdb9936767b2e14ede", "score": "0.49715686", "text": "def get_queryset(self):\n\t\t\treturn get_all_teams().select_related('league').select_related('user')", "title": "" }, { "docid": "b925d16b5927dab78d03f6ca9818ab7f", "score": "0.49577186", "text": "def active_date(self):\n now = datetime.date.today()\n qs = (self.filter(start_date__lte=now) |\n self.filter(start_date__isnull=True))\n qs = (qs.filter(end_date__gte=now) |\n qs.filter(end_date__isnull=True))\n return qs", "title": "" }, { "docid": "c31d2204455e3d034a162869557ca05d", "score": "0.49434844", "text": "def get_active_vouchers(self):\n return Voucher.objects.filter(end_datetime__gt=now())", "title": "" }, { "docid": "074a205ac19db1eed9fd0ff0b8e60c0c", "score": "0.49419677", "text": "def get_current_users(self, **query):\n query.update({\"account\": self})\n return User.objects(**query)", "title": "" }, { "docid": "3993da570af58fb8365536298d8ea12e", "score": "0.4914325", "text": "async def on_room_join(self, room: Room, invitees: List[Contact],\n inviter: Contact, date: datetime):", "title": "" }, { "docid": "9c040f004181b16412ffce1f819eee6a", "score": "0.49125132", "text": "def get_queryset(self):\n base_qs = super(Overview, self).get_queryset()\n current_user = self.request.user\n queryset = base_qs.filter(Q(creator=current_user) |\n Q(participants=current_user)).distinct()\n for event in queryset:\n user_payments = event.payment_set.filter(user=current_user)\n event.user_inbound_payments = event.user_open_inbound_payments(current_user)\n event.user_outbound_payments = event.user_open_outbound_payments(current_user)\n return queryset", "title": "" }, { "docid": "f6390030ef75a713a9f22179ed36ce8f", "score": "0.49045753", "text": "def get_records(cls, date_from, date_to=None):\n\n date_from, date_to = process_date_range(date_from, date_to)\n\n return db.query(Visit).filter(Visit.timestamp >= date_from,\n Visit.timestamp < date_to).all()", "title": "" }, { "docid": "d156e3f9896929353163c4080be6b52f", "score": "0.4897541", "text": "def get_users_subscribed_audiobook_plays(start, end):\n\n df_users = runQuery(f\"\"\"\n WITH first_time_played(user_id, first_play, last_play, has_been_subscribed) AS (\n SELECT DISTINCT ap.user_id\n , DATE_TRUNC('day', FIRST_VALUE(ap.created_at) OVER (PARTITION BY ap.user_id ORDER BY ap.created_at))::date\n , DATE_TRUNC('day', FIRST_VALUE(ap.created_at) OVER (PARTITION BY ap.user_id ORDER BY ap.created_at DESC))::date\n , has_been_subscribed\n FROM audiobook_plays ap\n INNER JOIN users us\n ON ap.user_id = us.id\n WHERE ap.created_at BETWEEN '{start}'::TIMESTAMP AND '{end}'::TIMESTAMP\n )\n SELECT first_play,\n last_play,\n has_been_subscribed\n FROM first_time_played;\n \"\"\")\n\n return df_users", "title": "" }, { "docid": "483e8f00d78762a2726d334db0e85ac8", "score": "0.48902902", "text": "def members(self):\n members = Member.objects.filter(\n self.date_filter('created'),\n is_active=True\n )\n return len(members)", "title": "" }, { "docid": "189672189e96380c2aa5b8e5abf8d4e1", "score": "0.4880752", "text": "def get_users(user_list):\n try:\n users = RegisteredUsers.objects.filter(is_active=True, dev_id__in=user_list)\n except Exception, e:\n logger.error(\"[GetUsersException Occurred]:: %s\", str(e))\n return False\n\n return users", "title": "" }, { "docid": "3310ce24148ce4114b29a568159e0f38", "score": "0.48790216", "text": "def get(self):\n tournaments = Tournament.query.filter(\n Tournament.is_featured & (Tournament.ends_at > time.time())\n ).order_by(Tournament.ends_at).all()\n return tournaments_schema.jsonify(tournaments)", "title": "" }, { "docid": "2795802880e7963415ec88fd65a7c95e", "score": "0.48689646", "text": "def get_queryset(self):\n queryset = Trade.objects.all()\n\n start = self.__extract_from_querystring('timestamp_begin', datetime)\n if start is not None:\n queryset = queryset.filter(time_stamp__gt=start)\n\n finish = self.__extract_from_querystring('timestamp_end', datetime)\n if finish is not None:\n queryset = queryset.filter(time_stamp__lte=finish)\n\n return queryset", "title": "" }, { "docid": "5fbcc0cfeab81846d369d38d53890609", "score": "0.4866895", "text": "def get_queryset(self):\n\t\tusername = self.kwargs['username']\n\t\treturn User.objects.filter(username=username)", "title": "" }, { "docid": "f942d99106fbfed9bae7c734d155b2e3", "score": "0.48620555", "text": "def get_time_series(user, owned=True, read_only=True):\n id_list = [user.identifier] + [obj.identifier for obj in user.groups.all()]\n qset = TimeSeries.objects.select_related(\n 'eoobj', 'owner', 'source', 'source__eoobj',\n )\n if owned and read_only:\n qset = qset.filter(Q(owner=user) | Q(readers__identifier__in=id_list))\n elif owned:\n qset = qset.filter(owner=user)\n elif read_only:\n qset = qset.filter(readers__identifier__in=id_list)\n else: #nothing selected\n return []\n return qset", "title": "" }, { "docid": "3b209f725adf91b300a03a6c54341248", "score": "0.48529342", "text": "def filter_from(qs, from_date, q_func=Q):\n\n from_date = as_datetime(from_date)\n return qs.filter(\n q_func(end__isnull=False, end__gte=from_date) |\n q_func(start__gte=from_date) |\n (~q_func(repeat='') & (q_func(repeat_until__gte=from_date) |\n q_func(repeat_until__isnull=True)))).distinct()", "title": "" }, { "docid": "282f191c12b9a20114f6ecd1c8c9416f", "score": "0.48436454", "text": "def list_users(request):\n user_list = User.objects.exclude(\n profile__isnull=True).order_by('date_joined')\n return render(request, \"profiles/list_users.html\",\n { 'user_list' : user_list })", "title": "" }, { "docid": "abb222f73b2e7052c86b8306ae667467", "score": "0.48413017", "text": "def active(self, queryset=None):\n\n qs = queryset or self.get_queryset()\n\n tl_ids = get_utc_tl_ids(active=True)\n return qs.filter(\n Q(test__testlistmembership__test_list__in=tl_ids) |\n Q(test__testlistmembership__test_list__sublist__child__in=tl_ids)\n ).distinct()", "title": "" }, { "docid": "5bf658f64573261ff7d7a98e1f57f3ef", "score": "0.48356232", "text": "def get_environment_users(self, environment_id, filter_name=None, start=0, limit=25):\n params = {\"limit\": limit, \"start\": start}\n if filter_name:\n params = {\"name\": filter_name}\n resource = \"permissions/environment/{}/users\".format(environment_id)\n return self.get(self.resource_url(resource), params=params)", "title": "" }, { "docid": "31d80ace149b80d3d233655b64818bf6", "score": "0.48151678", "text": "def list(self, request, format=None):\n qqueryset = UserGroupSet.objects.filter(Q(group=request.user)|Q(user=request.user)).filter(status='ACTIVE')\n sync_ts = request.QUERY_PARAMS.get('last_sync', None)\n if sync_ts is None:\n qqueryset.filter(is_deleted=False)\n\n f = '%Y-%m-%d %H:%M:%S'\n\n users = []\n users = list(set(users))\n #users = [p.user for p in qqueryset]\n for p in qqueryset:\n if p.group.id != request.user.id:\n if sync_ts is not None:\n ut = p.group.updated_at.replace(tzinfo=None)\n but = p.group.bmi_profile.updated_at.replace(tzinfo=None)\n st = datetime.datetime.strptime(sync_ts, f)\n if st < ut or st < but:\n p.group.is_deleted = p.is_deleted\n users = [p.group] + users\n else:\n if p.is_deleted == False:\n p.group.is_deleted = p.is_deleted\n users = [p.group] + users\n if p.user.id != request.user.id:\n if sync_ts is not None:\n ut = p.user.profile.updated_at.replace(tzinfo=None)\n but = p.user.bmi_profile.updated_at.replace(tzinfo=None)\n st = datetime.datetime.strptime(sync_ts, f)\n if st < ut or st < but:\n p.user.is_deleted = p.is_deleted\n users = [p.user] + users\n else:\n if p.is_deleted == False:\n p.user.is_deleted = p.is_deleted\n users = [p.user] + users\n\n self_user = request.user\n \n\n if sync_ts is not None:\n ut = self_user.profile.updated_at.replace(tzinfo=None)\n but = self_user.bmi_profile.updated_at.replace(tzinfo=None)\n st = datetime.datetime.strptime(sync_ts, f)\n if st < ut or st < but:\n self_user.is_deleted = False\n users = [self_user] + users\n else:\n self_user.is_deleted = False\n users = [self_user] + users\n \n\n \n sync_ts = request.QUERY_PARAMS.get('last_sync', None)\n if sync_ts is None:\n serializer = UserListSerializer(users, fields=('id', 'username', 'email', 'first_name', 'last_name', 'profile', 'bmi_profile'), many=True)\n else:\n serializer = UserListSerializer(users,fields=('id', 'username', 'email', 'first_name', 'last_name', 'profile', 'bmi_profile','is_deleted'), many=True) \n\n return Response(serializer.data)", "title": "" }, { "docid": "4cea9044889b7a08f9b1c0710350c787", "score": "0.4798971", "text": "def associated_users(cls):\n return UserSocialAuth.objects.filter(provider='twitter')", "title": "" }, { "docid": "7aff277e5c3ff41e0a12ea60eac8211e", "score": "0.4798274", "text": "def just_logged_in(nth, minutes=10):\n User = get_user_model()\n since = timezone.now() - timedelta(minutes=minutes)\n users = User.objects.filter(last_login__gte=since, userprofile__app_logins=nth)\n return users.values_list(\"id\", flat=True)", "title": "" }, { "docid": "b15564f2aad780faa89295ff3a49c192", "score": "0.47891602", "text": "def get_queryset(self):\n session_id = self.kwargs['session_id']\n status = self.kwargs['status']\n print status\n print session_id\n if status == 'present':\n return User.objects.get_present_user(session_id, status)\n else:\n return User.objects.get_absent_user(session_id, status)", "title": "" }, { "docid": "d13ffb6d9ec624ac8d22b509838a7c60", "score": "0.47832882", "text": "def within_time_range(self, start_time: time, end_time: time) -> \"Ezlink\":\n if self.meta.get(\"within_time_range\"):\n raise RuntimeError(\"Ezlink is already within_time_range=\"\n .format(self.meta.get(\"within_time_range\")))\n \n start_time_str = start_time.strftime(\"%H:%M:%S\")\n end_time_str = end_time.strftime(\"%H:%M:%S\")\n predicate_board = (F.date_format(self.tap_in_time, \"HH:mm:ss\") \n .between(start_time_str, end_time_str)) \n predicate_alight = (F.date_format(self.tap_out_time, \"HH:mm:ss\") \n .between(start_time_str, end_time_str)) \n dataframe = (self.dataframe\n .filter(predicate_board | predicate_alight))\n\n return (Ezlink(dataframe, **self.columns)\n .annotate(\"within_time_range\", (start_time, end_time)))", "title": "" }, { "docid": "510f80dd044e0b42025163e2767841d2", "score": "0.47830117", "text": "def get_all_active_users():\n try:\n users = RegisteredUsers.objects.filter(is_active=True)\n except Exception, e:\n logger.error(\"[GetAllActiveUsersException Occurred]:: %s\", str(e))\n return False\n\n return users", "title": "" }, { "docid": "a4d55d7fe989271f0694dce9dea5d666", "score": "0.47809353", "text": "def get_queryset(self):\n\n queryset = Dweet.objects.select_related().all().order_by('-id',)\n uid = self.request.query_params.get('uid', None)\n\n if uid is not None:\n user = UserProfile.objects.only('id').filter(pk=uid)\n queryset = queryset.filter(dweeted_user__in=user) \n\n return queryset", "title": "" }, { "docid": "21f1779403726458fa25c622d75121f3", "score": "0.47730246", "text": "def get_available_instructors(day: date, start: time, finish: time):\n instructor_query = Instructor.select()\n available_instructors = []\n for instructor in instructor_query:\n available = True\n for lesson in instructor.lessons:\n l_day = lesson.day\n l_s = lesson.expected_start\n l_f = lesson.expected_finish\n if (l_day == day) and (((l_s <= finish) and (finish <= l_f)) or ((l_s <= start) and (start <= l_f)) or ((start < l_s) and (l_f < finish))):\n available = False\n break\n if available:\n available_instructors.append(\n str(instructor.ID) + '- ' + str(instructor.name))\n\n return available_instructors", "title": "" }, { "docid": "94f23167cf0958e83271652b631db848", "score": "0.47729623", "text": "def get_users_list(self):\n return User.objects.all()", "title": "" }, { "docid": "74046343caca73643dc0d17d55c42793", "score": "0.47562927", "text": "def get_queryset(self):\n query = self.request.GET.get('q')\n object_list = User.objects.filter(Q(username__icontains=query))\n return object_list", "title": "" }, { "docid": "8a1144049b40e6554c4333c835d37638", "score": "0.47531694", "text": "def subscribed_users(context):\n return list(set(context.readers + context.subscribed + context.owners))", "title": "" }, { "docid": "22ba6f5f32f140b745adfff81dd36404", "score": "0.47471672", "text": "def get_queryset(self):\n offset_hours = 24\n now = timezone.now()\n dt = now - timedelta(hours=offset_hours)\n return Lineup.objects.filter(\n user=self.request.user,\n draft_group__start__lte=now,\n draft_group__end__gt=dt\n ).exclude(\n entries__contest_pool=None\n ).order_by(\n 'draft_group__start'\n ).select_related(\n 'draft_group', 'user'\n ).prefetch_related(\n 'entries', 'players'\n ).distinct()", "title": "" }, { "docid": "45886ade44142cb538b0a6dfe96e1063", "score": "0.4738599", "text": "def get_queryset(self):\n return User.objects.filter(username=self.request.user.username)", "title": "" }, { "docid": "0efc69d2296a0f17ea8785fd1c5290ec", "score": "0.4722542", "text": "def queryset(self):\n return User.objects", "title": "" }, { "docid": "77461f9e8347e8c724a9194bddbc3722", "score": "0.47214913", "text": "def get_queryset(self):\n return sorted(User.objects.all(), key=lambda x: -x.profile.stats_for_all_time)", "title": "" }, { "docid": "ee3abbe74ad9f5f1c754f1824ad619c7", "score": "0.47206953", "text": "def users(self, event):\r\n return \"Current users: %s\" % \", \".join(sorted(self.joined.keys()))", "title": "" }, { "docid": "d6dd8de5f32451abe36cc9ec7322c251", "score": "0.4719677", "text": "def for_period(self, from_date=None, to_date=None, exact=False):\n\n filtered_qs = self\n\n # to_date filtering is accurate\n if to_date:\n to_date = as_datetime(to_date, True)\n filtered_qs = filtered_qs.filter(Q(start__lte=to_date)).distinct()\n\n if from_date:\n # but from_date isn't, due to uncertainty with repetitions, so\n # just winnow down as much as possible via queryset filtering\n filtered_qs = filter_from(filtered_qs, from_date)\n\n # filter out invalid results if requested\n if exact:\n filtered_qs = filter_invalid(filtered_qs, from_date, to_date)\n\n return filtered_qs", "title": "" }, { "docid": "fe8d91c3b1063f5326f4d78fdb9d8ac8", "score": "0.47190756", "text": "def recent_users(cls, resource, n_users=5, days=60):\n return User.objects\\\n .filter(visitor__session__variable__resource=resource,\n visitor__session__variable__name='visit',\n visitor__session__variable__timestamp__gte=(datetime.now()\n - timedelta(days)))\\\n .distinct()\\\n .annotate(last_accessed=models.Max('visitor__session__variable__timestamp'))\\\n .filter(visitor__session__variable__timestamp=F('last_accessed'))\\\n .order_by('-last_accessed')[:n_users]", "title": "" }, { "docid": "300883cf6c9085aab52a043c7774449c", "score": "0.47187987", "text": "def get_users_in_global_permissions(self, start=0, limit=25):\n params = {\"limit\": limit, \"start\": start}\n url = \"rest/api/latest/permissions/global/users\"\n return self.get(url, params=params)", "title": "" }, { "docid": "d48b06a2077f268929c325ce995ed9d1", "score": "0.4716369", "text": "def get_user_range_view(\n username: str, types: list, start: str, end: str\n ) -> ResultProxy:\n type_query = generate_exercise_filter_sql_query(types)\n # pylint: disable=no-member\n return db.session.execute(\n f\"\"\"\n SELECT date, SUM(miles) AS miles, CAST(AVG(feel) AS UNSIGNED) AS feel \n FROM logs \n WHERE username=:username \n AND deleted IS FALSE\n AND date >= :start \n AND date <= :end\n AND {type_query}\n GROUP BY date\n \"\"\",\n {\"username\": username, \"start\": start, \"end\": end},\n )", "title": "" }, { "docid": "b8d1c796a10db6bffdb4c12b82d43cef", "score": "0.4716062", "text": "def getTimesFromTo(userid, startdate, enddate):\r\n times = Time.gql(\"where userid = :userid and start > :startdate and start < :enddate order by start\",\r\n userid=userid, startdate=startdate, enddate=enddate).fetch(1000)\r\n for time in times:\r\n Converter.model_to_UTC1(time)\r\n return times", "title": "" }, { "docid": "29cfa3cae0bca7b27a47993f5555509a", "score": "0.47105032", "text": "def filter_time_range(traces, start_time, end_time):\n traces = traces.loc[traces['startTime'] >= start_time]\n traces = traces.loc[traces['startTime'] <= end_time]\n return traces", "title": "" }, { "docid": "29763c7e549f793c6b7592d25099db49", "score": "0.47082093", "text": "def get_queryset(self):\n user = self.request.user\n return User.objects.filter(username=user.username)", "title": "" }, { "docid": "2f88d08aa6e0023b94db4303d8212f93", "score": "0.470107", "text": "def get_queryset(self):\n self.raise_if_requesting_user_not_target_user()\n\n target_username = self.kwargs['username']\n return Meal.objects.filter(owner__username=target_username)", "title": "" }, { "docid": "a445ac4d77d0292472e37864fbf2911b", "score": "0.46927434", "text": "def get(self):\n parties = Party.query.filter(Party.is_active.is_(True), Party.party_end_datetime > datetime.utcnow(),\n Party.club_owner_id == current_user.club_owner_id) \\\n .order_by(Party.party_start_datetime).all()\n parties = [p for p in parties if p.party_start_datetime.date() >= datetime.utcnow().date()]\n return [p.json() for p in parties]", "title": "" }, { "docid": "e5e19f43446a03a44eaffbc46416c099", "score": "0.46895987", "text": "def associated_users(self):\n return UserSocialAuth.objects.filter(provider='foursquare')", "title": "" }, { "docid": "41f6df612f1afdd5167128f460a69183", "score": "0.46855626", "text": "def get_time_period_queryset(time_period, user):\n if ':' in time_period:\n dates = time_period.split(':')\n if not dates[0] or not dates[1]:\n return 'Check that both dates are filled'\n from_date = datetime.datetime.strptime(dates[0], '%Y-%m-%d')\n till_date = datetime.datetime.strptime(dates[1], '%Y-%m-%d')\n if till_date < from_date:\n return 'Check that \"From\" date is less than \"Till\" date'\n queryset = return_queryset(user, time_period, from_date, till_date)\n else:\n queryset = return_queryset(user, time_period)\n return queryset", "title": "" }, { "docid": "a47b2f8f87bfef57a6275fc484e280c6", "score": "0.46766478", "text": "def get_tweets_for_time_frame(self, user_id, start, end):\n tweets = []\n max_id = None\n total_tweets = 0\n while True:\n page = self.api.GetUserTimeline(user_id=user_id, count=200, max_id=max_id)\n tweets += page\n total_tweets += len(page)\n if len(page) > 0 and is_newer_than(page[-1], start):\n max_id = page[-1].id - 1\n else:\n break\n tweets = [t for t in tweets if (is_newer_than(t, start) and is_older_than(t, end))]\n return tweets", "title": "" }, { "docid": "8a798f59c2175314a3a2c423a023f6db", "score": "0.46765104", "text": "def get_inactive_users(self) -> int:\n body = models.WriteQuery(\n model = \"system__activity\",\n view = \"user\",\n fields = [\n \"user.id\",\n \"user.name\"\n ],\n filters = {\n \"user_facts.is_looker_employee\": \"no\",\n \"user.is_disabled\": \"no\"\n },\n filter_expression = {\n \"diff_days(${user_facts.last_api_login_date}, now()) > 90 OR diff_days(${user_facts.last_ui_login_date}, now()) > 90\"\n },\n sorts = [\n \"user.id\"\n ],\n limit = \"500\"\n )\n users_query = self.looker_client.create_query(body)\n inactive_users = self.looker_client.run_query(users_query.id, result_format='json')\n return len(inactive_users)", "title": "" }, { "docid": "3f977d60940d0c861e0b7cf7c3c1203b", "score": "0.46748537", "text": "def _filter_included_users(self, users_list, queryset):\n user_queryset = self._filter_users(users_list, queryset)\n user_attributes = Attribute.objects.filter(\n value_type__in=[Attribute.TYPE_USER, Attribute.TYPE_PERSONNEL]\n )\n\n attribute_data_users = self.queryset.none()\n for attribute in user_attributes:\n if attribute.fieldsets.exists():\n parent_attr = attribute.fieldsets.last()\n attribute_filter = {\n f\"attribute_data__{parent_attr.identifier}__contains\": [{f\"{attribute.identifier}\": users_list[0]}]\n }\n else:\n attribute_filter = {\n f\"attribute_data__{attribute.identifier}__in\": users_list\n }\n attribute_data_users = attribute_data_users | queryset.filter(\n **attribute_filter\n )\n\n return attribute_data_users | user_queryset", "title": "" }, { "docid": "05325aa0129267bfbfba342583100c1d", "score": "0.46681604", "text": "def active_within_range(self, query_start, query_end):\n\n query_start = self.to_timezone(query_start)\n query_end = self.to_timezone(query_end)\n\n for range in self.daily_periods(range_start=query_start.date(), range_end=query_end.date()):\n if (\n ((range.start < query_start) and (range.end > query_end))\n or (query_start <= range.start <= query_end)\n or (query_start <= range.end <= query_end)):\n return True\n return False", "title": "" }, { "docid": "7c5721c060538d7e994c09dfe24d97e6", "score": "0.46676", "text": "def associated_users(self):\n return UserSocialAuth.objects.filter(provider='facebook')", "title": "" }, { "docid": "2084debb774e65ccf4095d4daa05e273", "score": "0.46664542", "text": "def find_users(self, **kwargs):\n usernames = set()\n num_found = True\n while num_found:\n num_before = len(usernames)\n profiles = self.search(matchorder='RANDOM', **kwargs)\n usernames.update(profile['username'] for profile in profiles)\n num_after = len(usernames)\n num_found = num_after - num_before \n print(\"Found {} new users\".format(num_found))\n time.sleep(settings.SLEEP_TIME)\n return usernames", "title": "" }, { "docid": "1e2efb3e6d7c02ef001d0c355e152646", "score": "0.46638194", "text": "def get_queryset(self):\n user = self.request.user\n taskList = user.tasks.all().values_list('id', flat=True)\n return Interval.objects.filter(task__in=taskList)", "title": "" }, { "docid": "557db486fe9b68f51fa6e3960a6bf452", "score": "0.46635854", "text": "def get_wait_users():\n now = datetime.datetime.now()\n time_old = now + datetime.timedelta(minutes = -2)\n users_wait = Users.objects(status=0, last_time__gte=time_old)\n for user in users_wait:\n try:\n field = Fields.objects.get(id=user.field_battle.id)\n except:\n field = Fields()\n field.save()\n try:\n game = Games.objects.get(id=user.game.id)\n game.fields = [field]\n game.save()\n except:\n game = Games(fields=[field])\n game.save()\n user.field_battle = field\n user.game = game\n user.save()\n return users_wait", "title": "" }, { "docid": "570505d20d18fa6316f95da2785e2427", "score": "0.46632406", "text": "def get_by_frame_id(frame_id):\n frame = Frames.get_by_id(frame_id)\n users = Users.collection.find({'username': {'$in': frame['users']}})\n return users", "title": "" }, { "docid": "82b9e50b3564d3ccf7d8d9c060d40d67", "score": "0.4663092", "text": "def get_queryset(self):\n if self.request.user.is_staff:\n queryset = Period.objects.all()\n else:\n queryset = Period.objects.filter(is_active=True)\n\n return queryset", "title": "" }, { "docid": "527ad5ef70668038f1bb2c78b8b5d99c", "score": "0.4656222", "text": "def get_users_of_current_depth(self):\n\t\treturn(self.db_session.query(User).filter(User.depth == self.cur_depth).all())", "title": "" }, { "docid": "fe5b6b4d67d3c2f2d416b17dd0a969c7", "score": "0.4650105", "text": "def test_userJoined(self):\n eventData = _buildEventData(\"user\", \"channel\")\n\n self._test_clientMessage(eventData=eventData,\n hook=self.f.onUserJoin,\n trigger=self.client.userJoined)", "title": "" }, { "docid": "e4a176fdf65c1a4ba1b27843bd566663", "score": "0.46480823", "text": "def get_users():\n # get user search query\n user_query = flask.request.args.get(\"search\")\n \n # make query to db\n returned_user = chat.User.query.filter_by(username=user_query).first()\n\n context = {}\n if returned_user:\n context = {\n \"username\": returned_user.username\n }\n return flask.jsonify(**context)", "title": "" }, { "docid": "a6c860763c53f32d657dda459090e62c", "score": "0.46475896", "text": "def get_queryset(self) -> QuerySet:\n return AccountUserRole.objects.filter(user=self.request.user).select_related(\n \"account\"\n )", "title": "" }, { "docid": "9f4ec93b5a0badf5a83828dce1012f5e", "score": "0.46436855", "text": "def get_created_in_range(cls, start, end):\n return cls.objects.filter(created_at__gte=start, created_at__lt=end)", "title": "" }, { "docid": "a828522e1a7902d04a631bff4118392f", "score": "0.46396932", "text": "def get_models_between(start_year, end_year):\n\n return Model.query.filter((Model.year >= start_year) & (Model.year < end_year)).all()", "title": "" }, { "docid": "4bc4f623d018ebb40c01443e6f5512c0", "score": "0.46386397", "text": "def get_myActiveStudents(self):\n return User.objects.filter(is_active = True, id__in = self.guardianstudentrelation_set.values('student'))", "title": "" }, { "docid": "2f52f89c514d55572bd5d4fc98e54cd5", "score": "0.46379423", "text": "def get_list_between_date(start, end):\n return Message.query.filter_by(Message.date.between(start, end))", "title": "" }, { "docid": "3bac44360d5af0112897b74aa777e76f", "score": "0.46354872", "text": "def get_models_between(start_year, end_year):\n\n query = db.session.query(Model).filter(Model.year >= start_year, Model.year < end_year).all()\n\n return query", "title": "" }, { "docid": "6df513a02abf452bec416509af0975b2", "score": "0.46281704", "text": "def get_queryset(self):\n queryset = User.objects.all()\n active_filter = self.request.query_params.get('is_active')\n if active_filter is not None:\n # using permission for view full info here because it's mean that\n # user is admin and can access is_active filter\n if self.request.user.has_perm('profiles.view_full_info'):\n # We need to convert provided active_filter to boolean,\n # built-in function from distutils.util is used in convertion.\n # If active_filter is neither True nor False, nothing will\n # happen.\n try:\n active_filter = bool(strtobool(active_filter))\n queryset = queryset.filter(is_active=active_filter)\n except ValueError:\n pass\n query = self.request.query_params.get('q')\n if query is not None:\n # If we chain birthday as another Q object filter we will keep\n # getting erorrs that passed query does not correspond to db date\n # format if passed param isn't date in format '%Y-%m-%d' thus every\n # request that meant to search by either part of the name or by\n # email will raise error.\n # To avoid this, we try to convert query string to date explicitly\n # if everything is ok, we just filter query against date object we\n # got after converting.\n # Otherwise we just chain other filters.\n\n input_formats = ['%Y-%m-%d', '%d-%m-%Y', '%d-%m-%y']\n date = convert_date(input_formats, query)\n\n if date is not None:\n queryset = queryset.filter(birthday=date)\n else:\n queryset = queryset.filter(\n Q(first_name__icontains=query) |\n Q(last_name__icontains=query) |\n Q(email=query)\n )\n return queryset", "title": "" }, { "docid": "45319f6e63a4dff00c0993561d1d8753", "score": "0.46178046", "text": "def _populate_participated_offers(request):\n return Offer.objects.filter(volunteers=request.user)", "title": "" }, { "docid": "e0d97fa7bce1fcabb766b07fbc772d57", "score": "0.46086335", "text": "def get_models_between(start_year, end_year):\n\n return Model.query.filter(Model.year >= start_year, Model.year < end_year).all()", "title": "" }, { "docid": "ccdea4aba35f308ff27322e5ac3d173d", "score": "0.4607546", "text": "def get_models_between(start_year, end_year):\n\n return Model.query.filter(Model.year >= start_year, \n Model.year < end_year).all()", "title": "" } ]
ac3d2581b95010523b87ccb2cb547941
Make message from multiple sensor readings for device key.
[ { "docid": "5ce38a831fc692b171186a567589b81f", "score": "0.6766938", "text": "def make_sensor_readings_message(\n self,\n device_key: str,\n sensor_readings: List[SensorReading],\n timestamp: int = None,\n ) -> Message:\n topic = self.SENSOR_READING + self.DEVICE_PATH_PREFIX + device_key\n\n payload = {}\n for sensor_reading in sensor_readings:\n if isinstance(sensor_reading.value, tuple):\n sensor_reading.value = \",\".join(map(str, sensor_reading.value))\n elif isinstance(sensor_reading.value, bool):\n sensor_reading.value = str(sensor_reading.value).lower()\n\n payload[sensor_reading.reference] = sensor_reading.value\n\n if timestamp is not None:\n payload[\"utc\"] = timestamp\n\n message = Message(topic, json.dumps(payload))\n self.log.debug(\n f\"Made {message} from {sensor_readings} and {device_key} \"\n f\"and timestamp {timestamp}\"\n )\n\n return message", "title": "" } ]
[ { "docid": "b17e67957534721525784ecdcbb173d8", "score": "0.60640067", "text": "def make_sensor_reading_message(\n self, device_key: str, sensor_reading: SensorReading\n ) -> Message:\n topic = (\n self.SENSOR_READING\n + self.DEVICE_PATH_PREFIX\n + device_key\n + self.CHANNEL_DELIMITER\n + self.REFERENCE_PATH_PREFIX\n + sensor_reading.reference\n )\n\n if isinstance(sensor_reading.value, tuple):\n sensor_reading.value = \",\".join(map(str, sensor_reading.value))\n elif isinstance(sensor_reading.value, bool):\n sensor_reading.value = str(sensor_reading.value).lower()\n\n if sensor_reading.timestamp is not None:\n payload = json.dumps(\n {\n \"data\": str(sensor_reading.value),\n \"utc\": int(sensor_reading.timestamp),\n }\n )\n else:\n payload = json.dumps({\"data\": str(sensor_reading.value)})\n\n message = Message(topic, payload)\n self.log.debug(\n f\"Made {message} from {sensor_reading} and {device_key}\"\n )\n\n return message", "title": "" }, { "docid": "aa87c8e817954989ff5ec6ca4e26ccfc", "score": "0.58715117", "text": "def make_from_sensor_reading(self, reading):\n self.logger.debug(\"make_from_sensor_reading called\")\n if reading.timestamp is None:\n\n if isinstance(reading.value, tuple):\n\n delimiter = \",\"\n\n values_list = list()\n\n for value in reading.value:\n if value is True:\n value = \"true\"\n elif value is False:\n value = \"false\"\n if \"\\n\" in str(value):\n value = value.replace(\"\\n\", \"\\\\n\")\n value = value.replace(\"\\r\", \"\")\n if '\\\"' in str(value):\n value = value.replace(\"\\\"\", '\\\\\\\"')\n values_list.append(value)\n values_list.append(delimiter)\n\n values_list.pop()\n\n reading.value = \"\".join(map(str, values_list))\n\n if reading.value is True:\n reading.value = \"true\"\n elif reading.value is False:\n reading.value = \"false\"\n\n if \"\\n\" in str(reading.value):\n reading.value = reading.value.replace(\"\\n\", \"\\\\n\")\n reading.value = reading.value.replace(\"\\r\", \"\")\n if \"\\\"\" in str(reading.value):\n reading.value = reading.value.replace(\"\\\"\", \"\\\\\\\"\")\n\n message = OutboundMessage(\n \"readings/\" + self.device_key + \"/\" + reading.reference,\n '{ \"data\" : \"' + str(reading.value) + '\" }',\n )\n self.logger.debug(\n \"make_from_sensor_reading - Channel: %s ; Payload: %s\",\n message.channel,\n message.payload,\n )\n return message\n\n else:\n\n if isinstance(reading.value, tuple):\n\n delimiter = \",\"\n\n values_list = list()\n\n for value in reading.value:\n if value is True:\n value = \"true\"\n elif value is False:\n value = \"false\"\n if \"\\n\" in str(value):\n value = value.replace(\"\\n\", \"\\\\n\")\n value = value.replace(\"\\r\", \"\")\n values_list.append(value)\n values_list.append(delimiter)\n\n values_list.pop()\n\n reading.value = \"\".join(map(str, values_list))\n\n if reading.value is True:\n reading.value = \"true\"\n elif reading.value is False:\n reading.value = \"false\"\n\n if \"\\n\" in str(reading.value):\n reading.value = reading.value.replace(\"\\n\", \"\\\\n\")\n reading.value = reading.value.replace(\"\\r\", \"\")\n\n message = OutboundMessage(\n \"readings/\" + self.device_key + \"/\" + reading.reference,\n '{ \"utc\" : \"'\n + str(reading.timestamp)\n + '\", \"data\" : \"'\n + str(reading.value)\n + '\" }',\n )\n self.logger.debug(\n \"make_from_sensor_reading - Channel: %s ; Payload: %s\",\n message.channel,\n message.payload,\n )\n return message", "title": "" }, { "docid": "f04ea0b993a8defcf6c1058022d5575c", "score": "0.58262044", "text": "def read_data(device, command, indices, output_conversion, message):\n for index in indices:\n device.ctrl_transfer(0xa1, 2, 0x0100, 0x2900, 16)\n device.ctrl_transfer(0xa1, 2, 0x0100, 0x2900, 50)\n result = device.ctrl_transfer(0xc0, command, 0, index, 1, None)\n print(message.format(index, output_conversion[result[0]]))", "title": "" }, { "docid": "480a3d4018c93db6cb9145fc774c03e1", "score": "0.5781694", "text": "def background_thread():\n count = 0\n while True:\n \"\"\"Collect Data right here\"\"\"\n serialRaw = serialPort.readline()\n data = serialRaw.decode(\"utf-8\").split(\",\")\n print(data)\n\n if len(data) == 4:\n output = {}\n for i in range(len(sensors)):\n cData = data[i]\n output[sensors[i]] = cData\n count += 1\n print(output)\n socketio.emit('sensor',\n output,\n namespace='/test')\n socketio.sleep(0.2)", "title": "" }, { "docid": "77d1eeaaef5a94c88062772d41d63a9f", "score": "0.56798846", "text": "def communicate_sensors():\n payload = \"\\n\".join(\n '%s value=%f' % (s.name, s.value) for s in hal.sensors.values())\n payload += \"\\ntx_bytes value=%d\\nrx_bytes value=%d\" % (\n hal.tx_bytes, hal.rx_bytes)\n\n response = yield from aiohttp.post(INFLUX_URL, data=payload.encode(),\n headers={'Accept-encoding': 'identity'})\n yield from response.release()", "title": "" }, { "docid": "251ef1133942e1c015d6f2c0a289a474", "score": "0.56575346", "text": "def read(self):\n while self.serial.inWaiting():\n message = self.serial.readline().decode(\"ascii\").strip()\n self.msg_in.append((self.device_id, message))", "title": "" }, { "docid": "0a16c71907a294aba73cc6508ec4e34e", "score": "0.5575014", "text": "def sensor_message(sensor_id, value, timestamp=None):\n if timestamp is None:\n timestamp = long(time.time() * 1e6)\n\n return dict(sensor_id=str(sensor_id), time=long(timestamp), value=value)", "title": "" }, { "docid": "2e482792d302d5e038dd78f31908aed7", "score": "0.5547065", "text": "def GetSensorData(self): \n with self.lock:\n for j in range(0, 4): # loop through all the delayboards\n control = self.getControlByte(flags=(ACK + EOR), sn=self.SequenceNumber, an=self.AckNumber)\n self.Message = self.CompileSmallMessage(control=control, command=0, length=2)\n self.MessageAddendum = chr(11) + chr(j) # [11,j]\n self.MessageAddendum += self.ComputeChecksum16(self.MessageAddendum)\n self.Message += self.MessageAddendum\n ok = self.DoCommand()\n if not ok:\n return False\n for i in range(0, 4): # loop through all the delayboard channels and store the information in the correct attributes\n self.LNA1Currents[i + (j * 4)] = float(self.RxMessage[2 + (i * 6)] + (self.RxMessage[1 + (i * 6)] << 8)) / 10.0\n self.LNA2Currents[i + (j * 4)] = float(self.RxMessage[4 + (i * 6)] + (self.RxMessage[3 + (i * 6)] << 8)) / 10.0\n self.AntennaCurrents[i + (j * 4)] = float(self.RxMessage[6 + (i * 6)] + (self.RxMessage[5 + (i * 6)] << 8)) / 10.0\n self.DelayBoardTemperatures[j] = float(self.RxMessage[26] + (self.RxMessage[25] << 8)) / 10.0\n logger.debug(\"(%s) 'Retrieved delay board sensor data\" % self.name)\n return True", "title": "" }, { "docid": "d94f0013ea9dde6b906216ec8c2bbd44", "score": "0.55421984", "text": "def on_reader_read(moqs, obj, msg):\n obj.logger_debug(\"MQTT: topic: {}, data: {}\".format(msg.topic, msg.payload.decode(\"utf-8\")))\n default_values = [DATA_NONE] * 254\n try:\n if obj.config[\"use_registers\"]:\n raw = json.loads(msg.payload.decode(\"utf-8\"))\n values = list()\n for i in range(0, len(raw[\"uid\"])):\n values.append(raw[\"uid\"][i]) # add uid to registers 0 - 6\n values.append(DATA_DELIMETER) # add 65534 as delimiter\n values.append(raw[\"tag\"][\"tag_protocol\"]) # add tag protocol to address 8\n values.append(raw[\"tag\"][\"tag_size\"]) # add tag size to address 9\n values.append(raw[\"tag\"][\"user_memory_offset\"]) # add user memory offset to address 10\n values.append(DATA_DELIMETER) # add 65534 as delimiter\n for page in range(0, len(raw[\"data\"])):\n for val in range(0, 4):\n values.append(raw[\"data\"][page][val])\n values.append(DATA_DELIMETER) # add 65534 as delimiter between pages\n elif obj.config[\"use_msgpack\"]:\n raw = json.loads(msg.payload.decode(\"utf-8\"))\n packed = msgpack.packb(raw)\n\n values = [v for v in packed]\n else: # if msgpack or registers are not allow use bare json in ascii\n values = [ord(v) for v in msg.payload.decode(\"utf-8\")]\n aas.logger_debug(\"Reader data read: new values: \" + str(values))\n # setup default values and then set received values\n obj.context[aas.config[\"slave_id\"][\"reader_data_read\"]].setValues(3, 0, default_values)\n obj.context[aas.config[\"slave_id\"][\"reader_data_read\"]].setValues(4, 0, default_values)\n obj.context[aas.config[\"slave_id\"][\"reader_data_read\"]].setValues(3, 0, values)\n obj.context[aas.config[\"slave_id\"][\"reader_data_read\"]].setValues(4, 0, values)\n except:\n obj.logger_error(\"MQTT: received msq cannot be processed\")", "title": "" }, { "docid": "589029e8acbe361f488cdc507c5f9c3b", "score": "0.55338734", "text": "def send_data(self, device_id, nodesensor, sensordevice, value):\n data = {}\n devicename = self.mysensorsmanager.nodes[nodesensor][\"name\"]\n data[self.sensors[device_id][sensordevice]] = value # sensordevice = \"v_temp\" or i_battery_level or \"nodetype\", ... \n self.log.info(\"==> Publish value '%s' for node '%s' (%s/%s) sensor\" % (value, nodesensor, devicename, sensordevice))\n\n try:\n self._pub.send_event('client.sensor', data)\n except:\n self.log.error(u\"### Bad MQ message to send : {0}\".format(data))\n pass", "title": "" }, { "docid": "a59ff78783cfd36275ed4208b72a155d", "score": "0.5459333", "text": "def send_data(self, countersensorid, sensortype, value):\n data = {}\n device_id = self.flowmetermanager.flowMeterSensorsList[countersensorid][\"device_id\"]\n devicename = self.flowmetermanager.flowMeterSensorsList[countersensorid][\"name\"]\n data[self.sensors[device_id][sensortype]] = value \n self.log.info(\"==> Publish '%s' value '%s' for device '%s'\" % (sensortype, value, devicename))\n\n try:\n self._pub.send_event('client.sensor', data)\n except:\n self.log.error(u\"### Bad MQ message to update sensor : {0}\".format(data))\n pass", "title": "" }, { "docid": "ff050d9f8ad6f7c62ee295528977eb99", "score": "0.5375972", "text": "def sdi_collect(address, command=\"M\", sdi_bus=\"Port1\"):\n\n # create the SDI-12 cmd_to_sensor using the provided address\n cmd_to_sensor = '{0}{1}!'.format(address, command)\n\n # issue the cmd_to_sensor and get the reply\n sensor_reply = sdi_send_command_get_reply(cmd_to_sensor, sdi_bus)\n\n # parse out the returned values\n parsed = re.match('(\\d)(\\d\\d\\d)(\\d)', sensor_reply)\n if parsed is None or int(parsed.group(1)) != address:\n raise Sdi12Error('No reply or bad reply', sensor_reply)\n\n # figure out how long and then wait for sensor to be ready\n time_till_reply = int(parsed.group(2))\n utime.sleep(time_till_reply)\n\n # how many parameters did the sensor return?\n values_returned = int(parsed.group(3))\n\n # all the parameters returned by the sensor end up here\n result = []\n\n # we will use this expression to parse the values form the sensor reply\n float_match = re.compile('([-+][0-9]*\\.?[0-9]+[eE][-+]?[0-9]+)|([-+][0-9]*\\.?[0-9]*)')\n\n # we need to issue one or more send data commands to the sensor\n data_index = 0\n while len(result) < values_returned and data_index <= 9:\n # create and issue the get data cmd_to_sensor\n cmd_to_sensor = '{0}D{1}!'.format(address, data_index)\n sensor_reply = sdi_send_command_get_reply(cmd_to_sensor, sdi_bus)\n\n if (sensor_reply is None) or (sensor_reply == \"No reply\"):\n raise Sdi12Error('Missing data at pos', len(parsed) + 1)\n\n # parse out all the values returned by the sensor\n while len(result) < values_returned:\n parsed = float_match.search(sensor_reply)\n if parsed is None:\n break\n result.append(float(parsed.group(0)))\n sensor_reply = sensor_reply.replace(parsed.group(0), '', 1)\n\n data_index += 1\n return result", "title": "" }, { "docid": "1da2a12a9d36112673bb1282e4a256da", "score": "0.5226992", "text": "def collect(devices, is_sensor=False):\n self._state.update({'_' * is_sensor + str(device['id']): device\n for device in devices or {}\n if device['name'] and\n not (is_sensor and\n 'data' not in device)})", "title": "" }, { "docid": "1ed1b80457c5e7ee3318e104eba5ae29", "score": "0.519295", "text": "def on_message(self, client, userdata, msg):\n raw_data = msg.payload\n\n #quant = (256/65536)\n np_data = np.array([ch for ch in raw_data], dtype=np.int16)\n gen_data = np.zeros((BLOCK_SIZE, 2))\n\n quant = (65536/256)\n index = 0\n for ch in np_data:\n value = round(ch*quant-32768)\n gen_data[index][0] = value\n gen_data[index][1] = value\n index += 1\n self.recv_data = gen_data\n print(np_data)\n #print(self.recv_data)\n #print(len(raw_data), raw_data)\n print(\"----------------\")", "title": "" }, { "docid": "26c5299fe50c9779870b9b88ba7aae88", "score": "0.5136321", "text": "def updateThread():\n global c, scanForDevices, updateDuration, UPDATE_PERIODE\n while True: # Enter the infinite loop\n startTime = time.time() # Save start time of update cycle\n messagesSend = [] # List of messages to send\n messagesRecv = c.getMessages() # Get new messages\n\n inputUpdate() # Update input devices\n outputUpdate() # Update output devices\n pwmUpdate() # Update PWM devices\n adcUpdate() # Update ADC devices\n i2cUpdate() # Update I2C devices\n\n for messageString in messagesRecv:\n message = json.loads(messageString) # Parse message from string to JSON\n if message['type'] == 'DeviceList': # Create device register message for all devices\n for device in inputList:\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n for device in outputList:\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n for device in pwmList:\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n for device in adcList:\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n for device in i2cList:\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n if message['type'] == 'Set': # Get set message for a device and check for devices\n output.setValue(message['name'], message['dim'], message['value'])\n pwm.setValue(message['name'], message['dim'], message['value'])\n i2c.setValue(message['name'], message['dim'], message['value'])\n\n if message['type'] == 'Settings': # Change settings for a device\n input.settings(message)\n output.settings(message)\n pwm.settings(message)\n adc.settings(message)\n i2c.settings(message)\n\n if message['type'] == 'Scan': # Change scan for a device\n scanForDevices = message['value']\n\n dataMessage = {'type': 'D', 'data': []} # Create data message\n for device in inputList: # Create input device data message\n dataMessage['data'].append({'name': device['name'], 'values': device['vals']})\n for device in outputList: # Create output device data message\n dataMessage['data'].append({'name': device['name'], 'values': device['vals']})\n for device in pwmList: # Create pwm device data message\n dataMessage['data'].append({'name': device['name'], 'values': device['vals']})\n for device in adcList: # Create adc device data message\n dataMessage['data'].append({'name': device['name'], 'values': device['vals']})\n for device in i2cList: # Create i2c device data message\n dataMessage['data'].append({'name': device['name'], 'values': device['vals']})\n\n if len(dataMessage['data']) > 0:\n messagesSend.append(json.dumps(dataMessage)) # Send data message\n\n if c.getState() == 'Connected':\n c.sendMessages(messagesSend) # Send the messages\n endTime = time.time() # Save end time of update cycle\n updateDuration = endTime - startTime # Calculate time used to update values\n\n if (updateDuration < UPDATE_PERIODE):\n time.sleep(UPDATE_PERIODE - updateDuration) # Sleep until next update period\n\n if exit: # Exit\n break;", "title": "" }, { "docid": "decfe95164060b5d31a07ec705fa4b1d", "score": "0.5127697", "text": "def generate_payload(self, command, data=None):\n #print('Generating payload:')\n json_data = payload_dict[self.dev_type][command]['command']\n command_hb = payload_dict[self.dev_type][command]['hexByte']\n \n \n clock.ntp_sync()\n \n #print(\"json_data: \", json_data)\n\n if 'gwId' in json_data:\n json_data['gwId'] = self.id\n if 'devId' in json_data:\n json_data['devId'] = self.id\n if 'uid' in json_data:\n json_data['uid'] = self.id # still use id, no seperate uid\n if 't' in json_data:\n json_data['t'] = str(clock.utcnow()) #\"1626017050\" \n\n if data is not None:\n json_data['dps'] = data\n \n # Create byte buffer from hex data\n json_payload = json.dumps(json_data)\n json_payload = json_payload.replace(' ', '') # if spaces are not removed device does not respond!\n json_payload = json_payload.encode('utf-8')\n \n #json_payload = b'{\"devId\":\"52585621c4dd571b91ea\",\"uid\":\"52585621c4dd571b91ea\",\"t\":\"1626418169\",\"dps\":{\"1\":false}}'\n print('json_payload=', json_payload)\n \n #if command == SET:\n #if command == STATUS:\n # need to encrypt\n self.cipher = AESCipher(self.local_key) # expect to connect and then disconnect to set new\n payload = self.cipher.encrypt(json_payload)\n self.cipher = None # expect to connect and then disconnect to set new\n \n if command_hb != '0a' and command_hb != '12':\n # add the 3.3 header\n payload = PROTOCOL_33_HEADER + payload\n\n cipher = payload\n \n #print(\"cipher:\\n\",cipher)\n\n assert len(cipher) <= 0xff\n \n payload_hex_len = '%x' % (len(cipher) + 8) # TODO this assumes a single byte 0-255 (0x00-0xff)\n \n #print(\"payload_hex_len: \", payload_hex_len)\n \n buffer = hex2bin( payload_dict[self.dev_type]['prefix'] +\n '00000000' +\n '000000' +\n payload_dict[self.dev_type][command]['hexByte'] +\n '000000' + payload_hex_len ) \n \n #print(\"buffer: prefix + seq + cmd + len:\\n\", bin2hex(buffer))\n \n buffer += cipher\n \n #print(\"buffer: prefix + seq + cmd + len + cipher:\\n\", bin2hex(buffer))\n \n crc = ubinascii.crc32(buffer)\n \n crc_hex = '%x' % crc #hex(crc)\n \n #print(\"hex crc32: \", crc_hex)\n \n buffer += hex2bin(crc_hex)\n \n buffer += hex2bin(payload_dict[self.dev_type]['suffix'])\n \n print(\"Final buffer:\\n\", bin2hex(buffer))\n \n return buffer", "title": "" }, { "docid": "a515be1578285c9ee81f47f5ac0a53ab", "score": "0.5120891", "text": "def on_touch(moqs, obj, msg):\n obj.logger_debug(\"MQTT: topic: {}, data: {}\".format(msg.topic, msg.payload.decode(\"utf-8\")))\n default_values = [DATA_NONE] * 254\n try:\n if obj.config[\"use_registers\"]:\n raw = json.loads(msg.payload.decode(\"utf-8\"))\n values = list()\n values.append(raw[\"button\"])\n elif obj.config[\"use_msgpack\"]:\n raw = json.loads(msg.payload.decode(\"utf-8\"))\n packed = msgpack.packb(raw)\n\n values = [v for v in packed]\n else: # if msgpack or registers are not allow use bare json in ascii\n values = [ord(v) for v in msg.payload.decode(\"utf-8\")]\n aas.logger_debug(\"Buttons: new values: \" + str(values))\n # setup default values and then set received values\n obj.context[aas.config[\"slave_id\"][\"buttons\"]].setValues(3, 0, default_values)\n obj.context[aas.config[\"slave_id\"][\"buttons\"]].setValues(4, 0, default_values)\n obj.context[aas.config[\"slave_id\"][\"buttons\"]].setValues(3, 0, values)\n obj.context[aas.config[\"slave_id\"][\"buttons\"]].setValues(4, 0, values)\n except:\n obj.logger_error(\"MQTT: received msq cannot be processed\")", "title": "" }, { "docid": "0103cb96d7a36ecc17bba37d9d1fcfa1", "score": "0.50767976", "text": "def _process_message(data):\n\n \n reading_date = parser.parse(data['time'])\n try:\n # Check if the reading for this time already exists (ignore if so)\n Reading.objects.get(time = reading_date)\n return False\n except:\n pass\n \n # Save the reading into the DB\n reading = Reading()\n reading.time = reading_date\n reading.temperature = Decimal(str(data['temperature']))\n reading.meter_id = data['sensor_id']\n reading.meter_type = int(data['meter_type'])\n reading.ch1_wattage = Decimal(str(data['ch1']))\n if data.has_key('ch2'):\n reading.ch2_wattage = Decimal(str(data['ch2']))\n if data.has_key('ch3'):\n reading.ch3_wattage = Decimal(str(data['ch3']))\n \n reading.save()", "title": "" }, { "docid": "c33abc9f648c488cbe960ea6c1723f85", "score": "0.50399524", "text": "def emissionOn(self, c, sensor):\n print(sensor)\n dev = self.selectedDevice(c)\n yield dev.rw_line('#0031U'+sensor+'\\r')", "title": "" }, { "docid": "19da72324afbd48b2954794fe0f4b2f6", "score": "0.5036905", "text": "def receive_msg(self, device: str) -> Dict[float, float]:\n raise NotImplementedError", "title": "" }, { "docid": "b5cce4fcace7c9b7e026a52361bad2d5", "score": "0.50199866", "text": "def add_doc(msg):\n if not msg.topic.startswith(\"sensor_data/\"):\n return\n device_id = msg.topic.split('/')[0]\n body = {\n \"device_id\": device_id,\n \"timestamp\": datetime.utcnow()\n }\n if msg.topic.endswith('moist'):\n body['moisture'] = analog_to_percent(str(msg.payload.decode(\"utf-8\")))\n body['raw_moist'] = str(msg.payload.decode(\"utf-8\"))\n elif msg.topic.endswith('temp'):\n body['raw_temp'] = str(msg.payload.decode(\"utf-8\"))\n elif msg.topic.startswith('sensor_data'):\n body['device_id'] = msg.topic.split('/')[1]\n try:\n \"\"\" {\"soil_moist\": \"\", \"raw_temp\": \"\", \"air_hum\": \"\", \"air_temp\": \"\", \"ip_addr\": \"\", \"device_name\": \"\"} \"\"\"\n result = json.loads(str(msg.payload.decode(\"utf-8\")))\n body['moisture'] = analog_to_percent(result.get(\"soil_moist\", 0))\n body['raw_moisture'] = result.get(\"soil_moist\", 0)\n body['raw_temp'] = result.get(\"raw_temp\", 0)\n body['air_humidity'] = result.get(\"air_hum\", 0)\n body['air_temp'] = result.get(\"air_temp\", 0)\n body['ip_address'] = result.get(\"ip_addr\", '0.0.0.0')\n body['device_name'] = result.get(\"device_name\", 'New Device')\n except:\n traceback.print_exc()\n else:\n try:\n \"\"\" {\"soil_moist\": \"\", \"raw_temp\": \"\", \"air_hum\": \"\", \"air_temp\": \"\", \"ip_addr\": \"\"} \"\"\"\n result = json.loads(str(msg.payload.decode(\"utf-8\")))\n body['moisture'] = analog_to_percent(result.get(\"soil_moist\", 0))\n body['raw_moisture'] = result.get(\"soil_moist\", 0)\n body['raw_temp'] = result.get(\"raw_temp\", 0)\n body['air_humidity'] = result.get(\"air_hum\", 0)\n body['air_temp'] = result.get(\"air_temp\", 0)\n body['ip_address'] = result.get(\"ip_addr\", '0.0.0.0')\n except:\n traceback.print_exc()\n try:\n es.index(index=configs['default']['index_name'], body=body)\n except:\n logging.error(traceback.format_exc())\n try:\n alerter.check_alert(body.get('device_id'), body.get(\"device_name\", \"Unknown Device\"), body)\n except:\n logging.error(traceback.format_exc())", "title": "" }, { "docid": "db9b5ef2263e37572381c7aed8bd8b5a", "score": "0.50180006", "text": "def _build_parsed_values(self):\n \n SENSOR = \"Sensor\"\n TYPE = \"type\"\n ID = \"id\"\n PCB_SERIAL_NUMBER = \"PCBSerialNum\"\n ASSEMBLY_NUMBER = \"AssemblyNum\"\n SERIAL_NUMBER = \"SerialNumber\"\n FIRMWARE_VERSION = \"FirmwareVersion\"\n FIRMWARE_DATE = \"FirmwareDate\"\n COMMAND_SET_VERSION = \"CommandSetVersion\"\n PCB_ASSEMBLY = \"PCBAssembly\"\n MANUFATURE_DATE = \"MfgDate\"\n INTERNAL_SENSORS = \"InternalSensors\"\n TEMPERATURE_SENSOR_ID = \"Main Temperature\"\n CONDUCTIVITY_SENSOR_ID = \"Main Conductivity\"\n PRESSURE_SENSOR_ID = \"Main Pressure\"\n \n # check to make sure there is a correct match before continuing\n match = SBE16HardwareDataParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed hardware data: [%s]\" %\n self.raw_data)\n \n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\" %root.tagName)\n serial_number = int(root.getAttribute(SERIAL_NUMBER))\n \n firmware_version = self._extract_xml_element_value(root, FIRMWARE_VERSION)\n firmware_date = self._extract_xml_element_value(root, FIRMWARE_DATE)\n command_set_version = self._extract_xml_element_value(root, COMMAND_SET_VERSION)\n manufacture_date = self._extract_xml_element_value(root, MANUFATURE_DATE)\n \n pcb_assembly_elements = self._extract_xml_elements(root, PCB_ASSEMBLY)\n pcb_serial_number = []\n pcb_assembly = []\n for assembly in pcb_assembly_elements:\n pcb_serial_number.append(assembly.getAttribute(PCB_SERIAL_NUMBER))\n pcb_assembly.append(assembly.getAttribute(ASSEMBLY_NUMBER))\n \n internal_sensors_element = self._extract_xml_elements(root, INTERNAL_SENSORS)[0]\n sensors = self._extract_xml_elements(internal_sensors_element, SENSOR)\n for sensor in sensors:\n sensor_id = sensor.getAttribute(ID)\n if sensor_id == TEMPERATURE_SENSOR_ID:\n temperature_sensor_serial_number = int(self._extract_xml_element_value(sensor, SERIAL_NUMBER))\n elif sensor_id == CONDUCTIVITY_SENSOR_ID:\n conductivity_sensor_serial_number = int(self._extract_xml_element_value(sensor, SERIAL_NUMBER))\n elif sensor_id == PRESSURE_SENSOR_ID:\n pressure_sensor_serial_number = int(self._extract_xml_element_value(sensor, SERIAL_NUMBER))\n pressure_sensor_type = self._extract_xml_element_value(sensor, TYPE) \n\n result = [{DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: serial_number},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.FIRMWARE_VERSION,\n DataParticleKey.VALUE: firmware_version},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.FIRMWARE_DATE,\n DataParticleKey.VALUE: firmware_date},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.COMMAND_SET_VERSION,\n DataParticleKey.VALUE: command_set_version},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.MANUFATURE_DATE,\n DataParticleKey.VALUE: manufacture_date},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.PCB_SERIAL_NUMBER,\n DataParticleKey.VALUE: pcb_serial_number},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.ASSEMBLY_NUMBER,\n DataParticleKey.VALUE: pcb_assembly},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.TEMPERATURE_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: temperature_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.CONDUCTIVITY_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: conductivity_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.QUARTZ_PRESSURE_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: pressure_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE16HardwareDataParticleKey.PRESSURE_SENSOR_TYPE,\n DataParticleKey.VALUE: pressure_sensor_type},\n ]\n \n return result", "title": "" }, { "docid": "86da896164e23e78364d8355455b1573", "score": "0.50143576", "text": "def deserialize(self, str):\n try:\n if self.devices is None:\n self.devices = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.devices = []\n for i in range(0, length):\n val1 = rocon_device_msgs.msg.Device()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.type = str[start:end].decode('utf-8')\n else:\n val1.type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.uuid = str[start:end].decode('utf-8')\n else:\n val1.uuid = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.data = []\n for i in range(0, length):\n val2 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.key = str[start:end].decode('utf-8')\n else:\n val2.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.value = str[start:end].decode('utf-8')\n else:\n val2.value = str[start:end]\n val1.data.append(val2)\n self.devices.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "title": "" }, { "docid": "9e742fb86fd3a780802c114a346ad1de", "score": "0.49881446", "text": "def send_39(msg):\n telemetry_address_hex = hex(int(msg.topic.split('/')[2]))\n telemetry_address = telemetry_address_hex[2:].upper().zfill(6)\n logging.debug('_39s: send_39 telemetry_address={0}'.format(telemetry_address))\n # Fill DI with 0s not used\n data_packet = '00000000'\n # Get elements from mqtt:\n data = msg.payload.split(',')\n for element in data:\n try:\n element_hex = hex(int(element))\n except ValueError:\n logging.warning('_39s: Not able to convert Element {0} to hex'.format(element))\n # Remove '0x'\n element_string = str(element_hex)[2:]\n if len(element_string) > 4:\n # Fix length to 4 chars\n logging.warning('_39s: Element {0} longer than 4 chars, fixing!'.format(element_string))\n element_string = element_string[:4]\n element_string = element_string.rjust(4, '0')\n logging.debug('_39s: send_39 element:, hex={0}, hex string={1}'.format(element_hex, element_string))\n data_packet += element_string\n data_packet = data_packet.ljust(64, '0')\n _39 = 'GET /A/B/7F26{0}39{1} HTTP/1.1'.format(telemetry_address, data_packet)\n\n try:\n s = socket.socket()\n s.connect(('nebulalisten.com', 3001))\n logging.debug('_39 to send: {0}'.format(_39))\n s.send(_39)\n r = s.recv(255)\n s.close()\n logging.debug('Rx from server: {0}'.format(r))\n if r == 'Respuesta=9':\n pass\n else:\n raise\n except Exception as e:\n logging.warning('Failed to open connection to server! Error = %s', e.__str__())", "title": "" }, { "docid": "fa3cc9180c5e444af50f742926253a40", "score": "0.4987267", "text": "def send_inputs(client, devices):\n print('About to prepare inputs')\n (x_train, y_train), (x_test, y_test) = prepare_inputs()\n print('Prepared inputs')\n inputs = x_train[:4] # ? Only getting 4 for dev purposes\n outputs = y_train[:4] # ? Only getting 4 for dev purposes\n\n print('@@ EXPECTED OUTPUTS @@')\n print(outputs)\n print('@@ ---------------- @@')\n device = devices[0]\n for image in inputs:\n image = np.array([image])\n task = {\n 'data': image.tolist(),\n 'for': devices[1:], # * List of recipients (aka 1 to last)\n 'is_inferencing': True\n }\n client.publish(device + '/tasks', json.dumps(task))", "title": "" }, { "docid": "4d3f209659936b0665fc9e99b22bfa08", "score": "0.49846217", "text": "def sensors(self, packet_id):\n # Need to make sure the packet_id is a string\n packet_id = str(packet_id)\n # Check to make sure that the packet ID is valid.\n if packet_id in self.config.data['sensor group packet lengths']:\n # Valid packet, send request (But convert it back to an int in a list first)\n packet_id = [int(packet_id)]\n self.SCI.send(self.config.data['opcodes']['sensors'], tuple(packet_id))\n else:\n raise _ROIFailedToSendError(\"Invalid packet id, failed to send\")", "title": "" }, { "docid": "849427c93bf9c43def7441d90106a62a", "score": "0.49795005", "text": "def to(self, device):\n for data in self.data_list:\n data.to(device)", "title": "" }, { "docid": "5ba61e18eaa795484dd4dbef1d37ea70", "score": "0.49792498", "text": "def cmd_publish(self): \n msg = dynamixel_command_list_t()\n msg.len = 6\n self.clamp()\n for i in range(msg.len):\n cmd = dynamixel_command_t()\n cmd.utime = int(time.time() * 1e6)\n cmd.position_radians = self.joint_angles[i]\n # you SHOULD change this to contorl each joint speed separately \n cmd.speed = self.speed\n cmd.max_torque = self.max_torque\n #print cmd.position_radians\n msg.commands.append(cmd)\n self.lc.publish(\"ARM_COMMAND\",msg.encode())", "title": "" }, { "docid": "cc42b5d9fd748d05d6b09affb534f1c2", "score": "0.4977282", "text": "def feed_devices(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]:", "title": "" }, { "docid": "80909c9a0d0102da55984db12be69eb2", "score": "0.49751556", "text": "def read_mavlink(self, name, message):\n import math\n\n # Update date and time\n now = datetime.datetime.now()\n\n if name == 'SYSTEM_TIME':\n self._sensor_data.time_unix_usec = datetime.datetime.fromtimestamp(\n message.time_unix_usec / 1000000.).strftime('%Y-%m-%d %H:%M:%S.%f') # %Y%m%d%H%M%S%f\n # self._sensor_data.date = str(now.strftime('%d%m%y%H%M%S'))\n self._sensor_data.millis = message.time_unix_usec # Timestamp in microseconds since UNIX epoch\n\n # Allow data export\n if self._sensor_data.flag:\n self._sensor_data.export_csv()\n\n # First IMU\n elif name == 'RAW_IMU':\n self._sensor_data.ax = message.xacc\n self._sensor_data.ay = message.yacc\n self._sensor_data.az = message.zacc\n self._sensor_data.gx = message.xgyro\n self._sensor_data.gy = message.ygyro\n self._sensor_data.gz = message.zgyro\n self._sensor_data.mx = message.xmag\n self._sensor_data.my = message.ymag\n self._sensor_data.mz = message.zmag\n\n # Second IMU\n elif name == 'SCALED_IMU2':\n self._sensor_data.accx = message.xacc\n self._sensor_data.accy = message.yacc\n self._sensor_data.accz = message.zacc\n self._sensor_data.gyrox = message.xgyro\n self._sensor_data.gyroy = message.ygyro\n self._sensor_data.gyroz = message.zgyro\n self._sensor_data.magx = message.xmag\n self._sensor_data.magy = message.ymag\n self._sensor_data.magz = message.zmag\n\n elif name == 'ATTITUDE':\n self._sensor_data.roll_rate = math.degrees(message.rollspeed)\n self._sensor_data.pitch_rate = math.degrees(message.pitchspeed)\n self._sensor_data.yaw_rate = math.degrees(message.yawspeed)\n self._sensor_data.roll = math.degrees(message.roll)\n self._sensor_data.pitch = math.degrees(message.pitch)\n self._sensor_data.yaw = math.degrees(message.yaw)\n\n elif name == 'LOCAL_POSITION_NED':\n self._sensor_data.x = message.x\n self._sensor_data.y = message.y\n self._sensor_data.z = message.z\n self._sensor_data.vx = message.vx\n self._sensor_data.vy = message.vy\n self._sensor_data.vz = message.vz\n\n elif name == 'GLOBAL_POSITION_INT':\n self._sensor_data.lat = message.lat # degrees * 1E7\n self._sensor_data.lon = message.lon # degrees * 1E7\n self._sensor_data.alt = message.alt # meters\n self._sensor_data.relative_alt = message.relative_alt # meters\n self._sensor_data.gps_vx = message.vx # m/s * 100\n self._sensor_data.gps_vy = message.vy # m/s * 100\n self._sensor_data.gps_vz = message.vz # m/s * 100\n self._sensor_data.hdg = message.hdg # Heading\n\n elif name == 'GPS_RAW_INT':\n self._sensor_data.speed = message.vel # m/s * 100\n self._sensor_data.latitude = message.lat # degrees * 1E7\n self._sensor_data.longitude = message.lon # degrees * 1E7\n self._sensor_data.altitude = message.alt # meter * 1000\n self._sensor_data.course = message.cog # Direction of movement in degrees, not heading.\n self._sensor_data.hdop = message.eph\n self._sensor_data.vdop = message.epv\n self._sensor_data.fix = message.fix_type\n self._sensor_data.sat_view = message.satellites_visible\n\n elif name == 'GPS_STATUS':\n self._sensor_data.snr = message.satellite_snr\n\n elif name == 'VFR_HUD':\n self._sensor_data.airspeed = message.airspeed # m/s\n self._sensor_data.groundspeed = message.groundspeed # m/s\n self._sensor_data.heading = message.heading # Heading in degrees\n self._sensor_data.throttle = message.throttle # percent\n self._sensor_data.barometer = message.alt # meters\n self._sensor_data.climb = message.climb # meters/second\n\n elif name == 'SCALED_PRESSURE':\n self._sensor_data.press_abs = message.press_abs\n self._sensor_data.press_diff = message.press_diff\n self._sensor_data.temp = message.temperature\n\n elif name == 'NAV_CONTROLLER_OUTPUT':\n self._sensor_data.desired_roll = message.nav_roll\n self._sensor_data.desired_pitch = message.nav_pitch\n self._sensor_data.desired_heading = message.nav_bearing\n self._sensor_data.target_bearing = message.target_bearing\n self._sensor_data.wp_dist = message.wp_dist\n self._sensor_data.alt_error = message.alt_error\n self._sensor_data.aspd_error = message.aspd_error\n self._sensor_data.xtrack_error = message.xtrack_error\n\n elif name == 'VIBRATION':\n self._sensor_data.vibration_x = message.vibration_x\n self._sensor_data.vibration_y = message.vibration_y\n self._sensor_data.vibration_z = message.vibration_z\n\n\n # Notify all observers of new message (with new value)\n # Note that argument `cache=False` by default so listeners\n # are updated with every new message\n self.notify_attribute_listeners('sensor_data', self._sensor_data)", "title": "" }, { "docid": "d2d3e891336a37d56c06ecd26f8f3182", "score": "0.49706167", "text": "def test_pulsar_keyed_messages():\n\n dataset = tfio.experimental.streaming.PulsarIODataset(\n service_url=\"pulsar://localhost:6650\",\n topic=\"key-test\",\n subscription=\"subscription-0\",\n timeout=default_pulsar_timeout,\n )\n kv = dict()\n for msg, key in dataset:\n kv.setdefault(key.numpy().decode(), []).append(msg.numpy())\n assert kv[\"K0\"] == [(\"D\" + str(i)).encode() for i in range(0, 6, 2)]\n assert kv[\"K1\"] == [(\"D\" + str(i)).encode() for i in range(1, 6, 2)]", "title": "" }, { "docid": "8a93ef705ce6348d857cd7267f733a9b", "score": "0.49436262", "text": "def _build_parsed_values(self):\n\n SENSOR = \"Sensor\"\n TYPE = \"type\"\n ID = \"id\"\n PCB_SERIAL_NUMBER = \"PCBSerialNum\"\n ASSEMBLY_NUMBER = \"AssemblyNum\"\n SERIAL_NUMBER = \"SerialNumber\"\n FIRMWARE_VERSION = \"FirmwareVersion\"\n FIRMWARE_DATE = \"FirmwareDate\"\n COMMAND_SET_VERSION = \"CommandSetVersion\"\n PCB_ASSEMBLY = \"PCBAssembly\"\n MANUFACTURE_DATE = \"MfgDate\"\n INTERNAL_SENSORS = \"InternalSensors\"\n TEMPERATURE_SENSOR_ID = \"Main Temperature\"\n CONDUCTIVITY_SENSOR_ID = \"Main Conductivity\"\n PRESSURE_SENSOR_ID = \"Main Pressure\"\n EXTERNAL_SENSORS = \"ExternalSensors\"\n VOLT0 = \"volt 0\"\n VOLT1 = \"volt 1\"\n\n # check to make sure there is a correct match before continuing\n match = SBE16NOHardwareParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed hardware data: [%s]\" %\n self.raw_data)\n\n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\", root.tagName)\n serial_number = root.getAttribute(SERIAL_NUMBER)\n\n firmware_version = self._extract_xml_element_value(root, FIRMWARE_VERSION)\n firmware_date = self._extract_xml_element_value(root, FIRMWARE_DATE)\n command_set_version = self._extract_xml_element_value(root, COMMAND_SET_VERSION)\n manufacture_date = self._extract_xml_element_value(root, MANUFACTURE_DATE)\n\n pcb_assembly_elements = self._extract_xml_elements(root, PCB_ASSEMBLY)\n pcb_serial_number = []\n pcb_assembly = []\n for assembly in pcb_assembly_elements:\n pcb_serial_number.append(assembly.getAttribute(PCB_SERIAL_NUMBER))\n pcb_assembly.append(assembly.getAttribute(ASSEMBLY_NUMBER))\n\n temperature_sensor_serial_number = \"\"\n conductivity_sensor_serial_number = \"\"\n pressure_sensor_serial_number = \"\"\n pressure_sensor_type = \"\"\n volt0_serial_number = 0\n volt0_type = \"\"\n volt1_serial_number = 0\n volt1_type = \"\"\n\n internal_sensors_element = self._extract_xml_elements(root, INTERNAL_SENSORS)[0]\n sensors = self._extract_xml_elements(internal_sensors_element, SENSOR)\n\n for sensor in sensors:\n sensor_id = sensor.getAttribute(ID)\n if sensor_id == TEMPERATURE_SENSOR_ID:\n temperature_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n elif sensor_id == CONDUCTIVITY_SENSOR_ID:\n conductivity_sensor_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n elif sensor_id == PRESSURE_SENSOR_ID:\n pressure_sensor_serial_number = str(self._extract_xml_element_value(sensor, SERIAL_NUMBER))\n pressure_sensor_type = self._extract_xml_element_value(sensor, TYPE)\n\n external_sensors_element = self._extract_xml_elements(root, EXTERNAL_SENSORS)[0]\n sensors = self._extract_xml_elements(external_sensors_element, SENSOR)\n\n for sensor in sensors:\n sensor_id = sensor.getAttribute(ID)\n if sensor_id == VOLT0:\n volt0_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n volt0_type = self._extract_xml_element_value(sensor, TYPE)\n elif sensor_id == VOLT1:\n volt1_serial_number = self._extract_xml_element_value(sensor, SERIAL_NUMBER)\n volt1_type = self._extract_xml_element_value(sensor, TYPE)\n\n result = [{DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: str(serial_number)},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.FIRMWARE_VERSION,\n DataParticleKey.VALUE: firmware_version},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.FIRMWARE_DATE,\n DataParticleKey.VALUE: firmware_date},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.COMMAND_SET_VERSION,\n DataParticleKey.VALUE: command_set_version},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.MANUFACTURE_DATE,\n DataParticleKey.VALUE: manufacture_date},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PCB_SERIAL_NUMBER,\n DataParticleKey.VALUE: pcb_serial_number},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.ASSEMBLY_NUMBER,\n DataParticleKey.VALUE: pcb_assembly},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.TEMPERATURE_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: temperature_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.CONDUCTIVITY_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: conductivity_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PRESSURE_SENSOR_SERIAL_NUMBER,\n DataParticleKey.VALUE: pressure_sensor_serial_number},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.PRESSURE_SENSOR_TYPE,\n DataParticleKey.VALUE: pressure_sensor_type},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT0_SERIAL_NUMBER,\n DataParticleKey.VALUE: volt0_serial_number},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT0_TYPE,\n DataParticleKey.VALUE: volt0_type},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT1_SERIAL_NUMBER,\n DataParticleKey.VALUE: volt1_serial_number},\n {DataParticleKey.VALUE_ID: SBE16NOHardwareParticleKey.VOLT1_TYPE,\n DataParticleKey.VALUE: volt1_type}]\n\n return result", "title": "" }, { "docid": "ffe7140b94345526727371d8f2d98b06", "score": "0.49171403", "text": "def sensor(self):", "title": "" }, { "docid": "a2f3889f495ce645569446de2f489f54", "score": "0.49131998", "text": "def test_list_message(self):\n sensor = self._get_dummy_sensor()\n msg_list = sensor.create_event_messages({'test_1': 23.3, 'test_2': 'wat'})\n msg_0 = json.loads(msg_list[0])\n msg_1 = json.loads(msg_list[1])\n assert msg_0['value_type'] == 'float'\n assert msg_0['value'] == '23.3'\n assert msg_1['value_type'] == 'str'\n assert msg_1['value'] == 'wat'", "title": "" }, { "docid": "d4abdd9a71aa9136a0cbed745e5dfbd1", "score": "0.4896513", "text": "def send_all_devices(self, title: str, body: str, *args: str, **data: dict) -> requests:\n sent_token = {}\n for item in self.tokens_firebase:\n sent_token[item] = super(PushNotificationsFirebaseBroadcast, self).send(item, title, body, *args, **data)\n return sent_token", "title": "" }, { "docid": "35e42aa344760b46b8d949a4258031eb", "score": "0.4892687", "text": "def receive_sensor_data(self, data):\n for i in data:\n self.file.write(\"%03f\" % i)", "title": "" }, { "docid": "b5378bfa485f47570aff335ed4bdd386", "score": "0.4885033", "text": "def command_robot_speak(self, message):\n for conn in cl:\n conn.write_message(json.dumps(message))", "title": "" }, { "docid": "ae9f982505814c4ef3fe1b58cca0ff56", "score": "0.48763394", "text": "def callback_muscle_spindles_to_motoneurons(input_values, output_values, current_time, slot_nos, buffer):\n # mapping muscle spindles output -> motor neuron signals, delay signals by \n \n # get number of input and output values\n n_input_values = len(input_values) # = n_muscle_spindles\n n_output_values = len(output_values[0]) # = n_muscle_spindles (per output slot if there are multiple)\n \n # initialize buffer the first time\n if 0 not in buffer:\n for muscle_spindle_index in range(n_input_values):\n buffer[muscle_spindle_index] = None\n \n # loop over muscle spindles\n for muscle_spindle_index in range(n_input_values):\n \n # determine spike by threshold\n if input_values[muscle_spindle_index] > 0:\n buffer[muscle_spindle_index] = current_time # store time of last activation in buffer\n \n # if there has been a stimulation so far\n if buffer[muscle_spindle_index] is not None:\n \n # convolute Dirac delta, kernel is a shifted and scaled gaussian\n t_delay = muscle_spindle_delay # [ms] delay of the signal\n gaussian_std_dev = 10 # [ms] width of the gaussian curve\n convolution_kernel = lambda t: np.exp(-0.5 * ((t - t_delay) / gaussian_std_dev)**2)\n delayed_signal = convolution_kernel(current_time - buffer[muscle_spindle_index]) * 5\n \n # loop over output values and set all to the computed signal, cut off at 1e-5\n if delayed_signal > 1e-5:\n #print(\"muscle spindle t: {}, last_activation: {}, computed delayed_signal: {}\".format(current_time, buffer[muscle_spindle_index], delayed_signal))\n output_values[0][muscle_spindle_index] = delayed_signal\n else:\n output_values[0][muscle_spindle_index] = None # do not set any values\n \n #print(\"muscle_spindles_to_motoneurons: {} -> {}\".format(input_values, output_values))", "title": "" }, { "docid": "0992f3efb52b0d14898ec9b91fd0411f", "score": "0.48650762", "text": "def on_reader_status(moqs, obj, msg):\n obj.logger_debug(\"MQTT: topic: {}, data: {}\".format(msg.topic, msg.payload.decode(\"utf-8\")))\n default_values = [DATA_NONE] * 254\n try:\n if obj.config[\"use_registers\"]:\n raw = json.loads(msg.payload.decode(\"utf-8\"))\n values = list()\n values.append(raw[\"write\"][\"sector\"])\n if raw[\"write\"][\"status\"] == \"OK\":\n values.append(1)\n elif raw[\"write\"][\"status\"] == \"NOK\":\n values.append(0)\n else:\n values.append(DATA_NONE)\n elif obj.config[\"use_msgpack\"]:\n raw = json.loads(msg.payload.decode(\"utf-8\"))\n packed = msgpack.packb(raw)\n\n values = [v for v in packed]\n else: # if msgpack or registers are not allow use bare json in ascii\n values = [ord(v) for v in msg.payload.decode(\"utf-8\")]\n aas.logger_debug(\"Reader status: new values: \" + str(values))\n # setup default values and then set received values\n obj.context[aas.config[\"slave_id\"][\"reader_status\"]].setValues(3, 0, default_values)\n obj.context[aas.config[\"slave_id\"][\"reader_status\"]].setValues(4, 0, default_values)\n obj.context[aas.config[\"slave_id\"][\"reader_status\"]].setValues(3, 0, values)\n obj.context[aas.config[\"slave_id\"][\"reader_status\"]].setValues(4, 0, values)\n except:\n obj.logger_error(\"MQTT: received msq cannot be processed\")", "title": "" }, { "docid": "93704691d8f3b63d702a4e565e7ee961", "score": "0.48562124", "text": "def make_configuration_message(\n self, device_key: str, configuration: dict\n ) -> Message:\n topic = (\n self.CONFIGURATION_STATUS + self.DEVICE_PATH_PREFIX + device_key\n )\n\n for reference, config_value in configuration.items():\n if isinstance(config_value, tuple):\n values_list = []\n for value in config_value:\n if \"\\n\" in str(value):\n value = value.replace(\"\\n\", \"\\\\n\")\n value = value.replace(\"\\r\", \"\")\n\n if '\"' in str(value):\n value = value.replace('\"', '\\\\\"')\n\n values_list.append(value)\n\n configuration[reference] = \",\".join(map(str, values_list))\n else:\n if isinstance(config_value, bool):\n config_value = str(config_value).lower()\n\n configuration[reference] = str(config_value)\n\n payload = json.dumps({\"values\": configuration})\n\n message = Message(topic, payload)\n self.log.debug(f\"Made {message} from {configuration} and {device_key}\")\n\n return message", "title": "" }, { "docid": "12ea07649215499bca27035d324a85c8", "score": "0.4854487", "text": "def handshake(self):\n\n log.debug('Handshaking.')\n\n # Data for sending\n send_data = bytes([0])\n\n # We are trying to disturb the device as long as self.handshake_repeat is\n handshake_repeat = 0\n\n while True:\n # If we have a key then use it\n if self.key is not None:\n send_data = [x ^ self.key for x in send_data]\n\n # Sending 0x00 or the encrypted variant to the device\n log.debug('Sending data: %s [Waiting for handshaking]' % bytes_to_hex_str(send_data))\n self.port.write(send_data)\n\n # Reading what is going on\n read_data = self.port.read(1)\n log.debug('Reading data: %s [Waiting for handshaking]' % bytes_to_hex_str(read_data))\n\n # If we can not read data back try to use the default key\n if len(read_data) == 0:\n self.key = 0x55\n\n # Send as long as we have data and 0x07 -> the device is ready for handshake\n if len(read_data) == 1 and read_data[0] == 0x07:\n # Exiting the while loop\n log.debug('Reading data: %s [Ready for handshaking]' % bytes_to_hex_str(read_data))\n break\n\n # We are trying as much as we like?\n handshake_repeat += 1\n if handshake_repeat >= self.handshake_repeat:\n raise MK312HandshakeException('Repeating to much times! Restart the device?')\n\n # We can setup a new key -> Sending a key, actually the key is fixed to 0x00\n send_data = [0x2f, 0x00]\n checksum = sum(send_data) % 256\n send_data.append(checksum)\n\n # Only encrypt if we have a key (if not, we do not know if the device has one)\n if self.key:\n send_data = [x ^ self.key for x in send_data]\n log.debug('Sending data: %s' % bytes_to_hex_str(send_data))\n self.port.write(bytes(send_data))\n\n read_data = self.port.read(3)\n log.debug('Reading data: %s' % bytes_to_hex_str(read_data))\n\n if len(read_data) == 0:\n # Maybe there was a key stored but we do not know it.\n # Normally you have to switch off the device and restart it after a long time.\n # If you do not have used a different key yet we can try to test our default key\n self.key = 0x55\n self.handshake()\n else:\n # Check the checksum from the device\n checksum = read_data[-1]\n s = sum(read_data[:-1]) % 256\n if s != checksum:\n # If the checksum is wrong, redo a handshake\n # raise MK312ChecksumException('Checksum is wrong: 0x%0.2X != 0x%0.2X' % (s, checksum))\n self.handshake()\n return\n\n # Key generation: 0x55 ^ their_key\n self.key = 0x55 ^ read_data[1]\n log.debug('Handshake successful key is now: 0x%0.2X' % self.key)\n\n return True", "title": "" }, { "docid": "752a175434d15e425fee36425b73adba", "score": "0.48463374", "text": "def GenerateMessage(multiple):\n data = np.zeros(512 * multiple)\n s = np.random.choice(len(data), np.random.randint(len(data)), replace=False)\n data[s] = 1\n return data", "title": "" }, { "docid": "dd51fa20084dc25c0b73023a7203b86d", "score": "0.483802", "text": "def event_zigbee_recv_handler(event):\n # '0xa0', '0xba', '0x51', '0x47', '0x5', '0xf3', '0x51', '0x47', '0x79', '0x0', '0x95'\n pack_list = event.data.get('data')\n if pack_list[0] == '0xa0' and pack_list[5] == '0xf3':\n mac_l, mac_h = pack_list[6].replace('0x',''), pack_list[7].replace('0x','')\n mac_str = mac_l + '#' + mac_h\n dev = next((dev for dev in panels if dev.mac == mac_str), None)\n if dev is not None:\n dev.set_available(True)\n triger_key = pack_list[9].replace('0x', '')\n triger_key = str(int(triger_key, 16))\n dev.trigger(triger_key)\n if pack_list[0] == '0xa0' and pack_list[5] == '0xf3' and pack_list[8] == '0xcc':\n mac_l, mac_h = pack_list[6].replace('0x',''), pack_list[7].replace('0x','')\n mac_str = mac_l + '#' + mac_h\n dev = next((dev for dev in panels if dev.mac == mac_str), None)\n if dev is not None:\n dev.set_available(True)\n dev.heart_beat()\n if pack_list[0] == '0xa0' and pack_list[5] == '0xf3' and pack_list[8] == '0x77':\n # device status\n mac_l, mac_h = pack_list[2].replace('0x', ''), pack_list[3].replace('0x', '')\n mac_str = mac_l + '#' + mac_h\n dev = next((dev for dev in panels if dev.mac == mac_str), None)\n if dev is None:\n return\n dev.set_available(True)\n dev.heart_beat()", "title": "" }, { "docid": "eb17801ccc5c8b8e79a07c278f3e5a5c", "score": "0.48326772", "text": "def GetCombinerSensorData(self): \n with self.lock:\n control = self.getControlByte(flags=(ACK + SO), sn=self.SequenceNumber, an=self.AckNumber)\n self.Message = self.CompileSmallMessage(control=control, command=9)\n ok = self.DoCommand()\n if not ok:\n return False\n self.CombinerTemperature = float(self.RxMessage[4] + (self.RxMessage[3] << 8)) / 10.0\n self.CombinerCurrent = float(self.RxMessage[2] + (self.RxMessage[1] << 8)) / 10.0\n logger.debug(\"(%s) Retrieved combiner sensor data\" % self.name)\n return True", "title": "" }, { "docid": "74b4750598e126b6fddb3b8c0b698bc3", "score": "0.48318005", "text": "def sendMessageGetDeviceDetected():\n data = [0] * CMTP_BUFFER_LEN\n dataLength = 254\n serviceID = 11\n dataString = \"\"\n for i, resultData in enumerate(data):\n if i % 4 == 0:\n dataString += \"\\n\"\n dataString += \"{0:0{1}X}: \".format(i, 4)\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n else:\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n Messages.write_message(\"Sending Service: {}\".format(serviceID))\n Messages.write_message(\"Sending Size: {}\".format(dataLength))\n Messages.write_message(\"Sending Data: \\n{}\".format(dataString))\n results = data_store.scenario[\"capiController\"].sendMessage(data, serviceID, dataLength)\n Messages.write_message(results[\"description\"])\n assert results[\"result\"] == 0, \"Send message failed\"\n Messages.write_message(\"Received Status: {}\".format(results[\"data\"][\"status\"]))\n Messages.write_message(\"Received Service: {}\".format(results[\"data\"][\"service\"]))\n Messages.write_message(\"Received Size: {}\".format(results[\"data\"][\"size\"]))\n dataString = \"\"\n for i, resultData in enumerate(results[\"data\"][\"data\"]):\n if i % 4 == 0:\n dataString += \"\\n\"\n dataString += \"{0:0{1}X}: \".format(i, 4)\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n else:\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n Messages.write_message(\"Received Data: \\n{}\".format(dataString))\n assert results[\"data\"][\"status\"] == 0, \"Status from response is non-zero - {}\".format(results[\"data\"][\"status\"])", "title": "" }, { "docid": "47b59d2fd1b728a05143a26b7837dc36", "score": "0.48293498", "text": "def get_sensors_data(self, task):", "title": "" }, { "docid": "10551f8ace5eab7e9541fecb9bcd3bc7", "score": "0.4826881", "text": "def sendMessageReadExplicit(dwConfID, dwCrc32, wDeviceNumber, dwApi, wSlotNumber, wSubSlotNumber, wIndex, wLengthDataToRead, dataLength):\n assert len(byMac.split(\":\")) == 6, \"MAC Address not correct format\"\n assert int(dataLength) < 255, \"dataLength is too large, needs to be less than 254\"\n data = [0] * CMTP_BUFFER_LEN\n dataLength = int(dataLength)\n serviceID = 32\n # set configuration ID\n dwConfIDOffset = 24 - 24\n for i, num in enumerate(struct.pack('<i', int(dwConfID))):\n data[i + dwConfIDOffset] = num\n # set crc 32\n dwCrc32Offset = 28 - 24\n for i, num in enumerate(struct.pack('<i', int(dwCrc32))):\n data[i + dwCrc32Offset] = num\n # set device number\n wDeviceNumberOffset = 32 - 24\n for i, num in enumerate(struct.pack('<h', int(wDeviceNumber))):\n data[i + wDeviceNumberOffset] = num\n # set api\n dwApiOffset = 34 - 24\n for i, num in enumerate(struct.pack('<i', int(dwApi))):\n data[i + dwApiOffset] = num\n # set slot number\n wSlotNumberOffset = 38 - 24\n for i, num in enumerate(struct.pack('<h', int(wSlotNumber))):\n data[i + wSlotNumberOffset] = num\n # set sub slot number\n wSubSlotNumberOffset = 40 - 24\n for i, num in enumerate(struct.pack('<h', int(wSubSlotNumber))):\n data[i + wSubSlotNumberOffset] = num\n # set index\n wIndexOffset = 42 - 24\n for i, num in enumerate(struct.pack('<h', int(wIndex))):\n data[i + wSubSlotNumberOffset] = num\n # set length data to read\n wLengthDataToReadOffset = 44 - 24\n for i, num in enumerate(struct.pack('<h', int(wLengthDataToRead))):\n data[i + wLengthDataToReadOffset] = num\n # init data string\n dataString = \"\"\n for i, resultData in enumerate(data):\n if i % 4 == 0:\n dataString += \"\\n\"\n dataString += \"{0:0{1}X}: \".format(i, 4)\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n else:\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n Messages.write_message(\"Sending Service: {}\".format(serviceID))\n Messages.write_message(\"Sending Size: {}\".format(dataLength))\n Messages.write_message(\"Sending Data: \\n{}\".format(dataString))\n results = data_store.scenario[\"capiController\"].sendMessage(data, serviceID, dataLength)\n Messages.write_message(results[\"description\"])\n assert results[\"result\"] == 0, \"Send message failed\"\n Messages.write_message(\"Received Status: {}\".format(results[\"data\"][\"status\"]))\n Messages.write_message(\"Received Service: {}\".format(results[\"data\"][\"service\"]))\n Messages.write_message(\"Received Size: {}\".format(results[\"data\"][\"size\"]))\n dataString = \"\"\n for i, resultData in enumerate(results[\"data\"][\"data\"]):\n if i % 4 == 0:\n dataString += \"\\n\"\n dataString += \"{0:0{1}X}: \".format(i, 4)\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n else:\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n Messages.write_message(\"Received Data: \\n{}\".format(dataString))\n assert results[\"data\"][\"status\"] == 0, \"Status from response is non-zero - {}\".format(results[\"data\"][\"status\"])", "title": "" }, { "docid": "e5f199ab9ad329b68cb6f84802a80e23", "score": "0.48142508", "text": "def send_sensor_data(temp, humidity):\n code = BasePactAdapter.build_code(\n \"raspberrypi\",\n \"update-temp-humidity\",\n \"admin-keyset\",\n **{\"temp\": temp, \"humidity\": humidity,\n \"keyset_name\": \"admin-keyset\",\n \"time\": format_current_time()}\n )\n print(code)\n result = api.send_and_listen(code, \"admin-keyset\")\n print(result)", "title": "" }, { "docid": "ba1f8e9b53892a1520d86dc92884f7d8", "score": "0.4813796", "text": "def scanThread():\n global c, scanForDevices, scanDuration\n while True: # Enter the infinite loop\n if not scanForDevices: # Check for scanning\n scanDuration = 0 # No scanning\n time.sleep(SCAN_PERIODE)\n continue\n startTime = time.time() # Save start time of update cycle\n\n messagesSend = [] # List of messages to send\n\n inputListRegister, inputListDeregister = inputScan() # Get the Input devices and events\n outputListRegister, outputListDeregister = outputScan() # Get the Output devices and events\n pwmListRegister, pwmListDeregister = pwmScan() # Get the PWM devices and events\n adcListRegister, adcListDeregister = adcScan() # Get the ADC devices and events\n i2cListRegister, i2cListDeregister = i2cScan() # Get the I2C devices and events\n\n\n for device in inputListDeregister: # Create input device deregister message\n messagesSend.append(json.dumps({'type': 'Deregister',\n 'name': device['name']}))\n\n\n\n for device in inputListRegister: # Create input device register message\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n\n for device in outputListDeregister: # Create output device deregister message\n messagesSend.append(json.dumps({'type': 'Deregister',\n 'name': device['name']}))\n\n\n\n for device in outputListRegister: # Create output device register message\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n\n for device in pwmListDeregister: # Create pwm device deregister message\n messagesSend.append(json.dumps({'type': 'Deregister',\n 'name': device['name']}))\n\n\n\n for device in pwmListRegister: # Create pwm device register message\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n\n for device in adcListDeregister: # Create ADC device deregister message\n messagesSend.append(json.dumps({'type': 'Deregister',\n 'name': device['name']}))\n\n\n\n for device in adcListRegister: # Create ADC device register message\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n\n for device in i2cListDeregister: # Create I2C device deregister message\n messagesSend.append(json.dumps({'type': 'Deregister',\n 'name': device['name']}))\n\n\n\n for device in i2cListRegister: # Create I2C device register message\n messagesSend.append(json.dumps({'type': 'Register',\n 'name': device['name'],\n 'dir': device['dir'],\n 'dim': device['dim'],\n 'about': device['about'],\n 'settings': device['settings'],\n 'mode': device['mode'],\n 'flags': device['flags'],\n 'frequency': device['frequency'],\n 'dutyFrequency': device['dutyFrequency']}))\n\n if c.getState() == 'Connected':\n c.sendMessages(messagesSend) # Send the messages\n\n endTime = time.time() # Save end time of update cycle\n scanDuration = endTime - startTime # Calculate time used to scan for devices\n\n if (scanDuration < SCAN_PERIODE):\n time.sleep(SCAN_PERIODE - scanDuration) # Sleep until next scan period\n\n if exit: # Exit\n break;", "title": "" }, { "docid": "b54062de6e61f49aa485b63b26922d23", "score": "0.481221", "text": "def update(self):\n\n _LOGGER.info(\"Update called {}\".format(self._mac))\n # send update request\n cmds_connect = { 'tries': 5,\n 'commands': [\n { 'action': 'writeCharacteristic', 'uuid': UUID_PIN, 'value': [ int(x) for x in self._pin.to_bytes(4, byteorder = 'little') ] },\n ]\n }\n \n cmds_set = copy.deepcopy(cmds_connect)\n\n if self._current.mode_code != self._target.mode_code and self._target.manual is not None:\n cmds_set['commands'].append({ 'action': 'writeCharacteristic', 'uuid': UUID_MODE, 'value': [ self._target.mode_value, 0, 0 ] })\n\n if self._current.target_temp != self._target.target_temp and self._target.target_temp is not None:\n cmds_set['commands'].append({ 'action': 'writeCharacteristic', 'uuid': UUID_TEMP, 'value': [ 128, int(self._target.target_temp * 2), 128, 128, 128, 128, 128 ] })\n\n if len(cmds_set['commands']) > 1:\n mqtt.publish(self.hass, 'ble/{}/commands'.format(self._mac), json.dumps(cmds_set), 1, False)\n\n cmds_get = copy.deepcopy(cmds_connect)\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_MODE })\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_TEMP })\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_BATTERY })\n if self._current.model_no is None:\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_MODEL })\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_FIRMWARE })\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_SOFTWARE })\n cmds_get['commands'].append({ 'action': 'readCharacteristic', 'uuid': UUID_MANU })\n\n mqtt.publish(self.hass, 'ble/{}/commands'.format(self._mac), json.dumps(cmds_get), 1, False)", "title": "" }, { "docid": "a0bf085540709da3eae83d4d1d80e1e3", "score": "0.4803436", "text": "def m5_generate_exchange_request(self) -> List[Tuple[int, bytes]]:\n # 1. Generate Ed25519 long-term public key, iOSDeviceLTPK,\n # and long-term secret key, iOSDeviceLTSK\n if self.signing_key is None:\n self.signing_key, _ = ed25519.create_keypair()\n with open(os.path.join(self.storage_folder, \"secret-key\"),\n \"wb\") as secret_key_file:\n secret_key_file.write(self.signing_key.to_bytes())\n self.verifying_key = self.signing_key.get_verifying_key()\n\n # 2. Derive iOSDeviceX from the SRP shared secret by using HKDF-SHA-512\n salt = b\"Pair-Setup-Controller-Sign-Salt\"\n info = b\"Pair-Setup-Controller-Sign-Info\"\n output_size = 32\n\n hkdf = cryptography.hazmat.primitives.kdf.hkdf.HKDF(\n algorithm=cryptography.hazmat.primitives.hashes.SHA512(),\n length=output_size,\n salt=salt,\n info=info,\n backend=cryptography.hazmat.backends.default_backend())\n self.X = hkdf.derive(to_bytes(self.K))\n\n # 3. Concatenate iOSDeviceX with the iOS device's Pairing Identifier, iOSDevicePairingID,\n # and its long-term public key, iOSDeviceLTPK.\n # The concatenated value will be referred to as iOSDeviceInfo.\n\n self.device_info = (\n to_bytes(self.X) + session.pairing_id.encode('utf-8') +\n self.verifying_key.to_bytes())\n\n # 4. Generate iOSDeviceSignature by signing iOSDeviceInfo with its\n # long-term secret key, iOSDeviceLTSK, using Ed25519.\n\n self.device_signature = self.signing_key.sign(self.device_info)\n\n # 5. Construct a sub-TLV\n sub_ktlvs = [(constants.PairingKTlvValues.kTLVType_Identifier,\n self.pairing_id),\n (constants.PairingKTlvValues.kTLVType_PublicKey,\n self.verifying_key.to_bytes()),\n (constants.PairingKTlvValues.kTLVType_Signature,\n self.device_signature)]\n\n prepared_sub_ktlvs = b''.join(\n data for ktlv in sub_ktlvs for data in utils.prepare_tlv(*ktlv))\n\n # 6. Encrypt the sub-TLV, encryptedData, and generate the 16 byte auth tag, authTag.\n # using the ChaCha20-Poly1305 AEAD algorithm\n\n # this includes the auth_tag appended at the end\n encrypted_data = crypto_aead_chacha20poly1305_ietf_encrypt(\n key=self.S, nonce=\"PS-Msg05\", aad=None, message=prepared_sub_ktlvs)\n\n ktlvs = [(constants.PairingKTlvValues.kTLVType_State, pack('<B', 5)),\n (constants.PairingKTlvValues.kTLVType_EncryptedData,\n encrypted_data)]\n\n return ktlvs", "title": "" }, { "docid": "086ac84814ebbe06a3c45ea67188c515", "score": "0.47889802", "text": "def on_message(client, userdata, msg):\n global deviceD, MQTT_REGEX\n global mqtt_servoID, mqtt_servoAngle\n logging.debug(\"Received: {0} with payload: {1}\".format(msg.topic, str(msg.payload)))\n msgmatch = re.match(MQTT_REGEX, msg.topic) # Check for match to subscribed topics\n if msgmatch:\n mqtt_payload = json.loads(str(msg.payload.decode(\"utf-8\", \"ignore\"))) \n mqtt_topic = [msgmatch.group(0), msgmatch.group(1), msgmatch.group(2), type(mqtt_payload)] # breaks msg topic into groups - group/group1/group2\n if mqtt_topic[1] == 'servoZCMD':\n mqtt_servoID = int(mqtt_topic[2])\n mqtt_servoAngle = int(mqtt_payload) # Set the servo angle from mqtt payload", "title": "" }, { "docid": "58a7addf07852d991164c3ab673d671d", "score": "0.47852597", "text": "def to(self, device) -> None:\n\n if self.recording != {}:\n\n for v in self.variables:\n if isinstance(v, str):\n self.recording[v] = self.recording[v].to(device)\n\n elif isinstance(v, tuple):\n self.recording[v[0]] = self.recording[v[0]].to(device)", "title": "" }, { "docid": "178218a1b23922d76102a9e3dfbc9376", "score": "0.47813544", "text": "def read_sensor():\n pass", "title": "" }, { "docid": "2a8c1f9d257e99862db3367701f3bf00", "score": "0.4780762", "text": "def read_sensor_data(self):\n dht_result = self.dht_sensor.read()\n if dht_result.is_valid():\n data = SensorData(dht_result.humidity, dht_result.temperature)\n self.sensor_data.push(data)\n data.log_data() # log data to console\n else:\n print('Invalid sensor data.', dht_result)", "title": "" }, { "docid": "95155ad39c4d59744adb73a6df0618f0", "score": "0.4778543", "text": "def on_message(self, client, userdata, msg):\n # pylint: disable=unused-argument\n mqtt_message = json.loads(msg.payload)\n mqtt_message[\"ext\"] = mqtt_message[\"ext\"].split(\",\")\n\n # Restructure payload message\n new_mqtt_message = {}\n new_mqtt_message[\"id\"] = mqtt_message[\"id\"]\n new_mqtt_message[\"alert\"] = mqtt_message[\"alert\"]\n new_mqtt_message[\"ezviz_alert_type\"] = mqtt_message[\"ext\"][4]\n new_mqtt_message[\"serial\"] = mqtt_message[\"ext\"][2]\n new_mqtt_message[\"msg_time\"] = mqtt_message[\"ext\"][1]\n new_mqtt_message[\"img_url\"] = mqtt_message[\"ext\"][16]\n new_mqtt_message[\"name\"] = mqtt_message[\"ext\"][17]\n print(new_mqtt_message)\n\n if self._broker:\n # Register HA alert sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/alert/config\",\n json.dumps(\n {\n \"name\": \"alert\",\n \"device\": {\n \"name\": f\"{new_mqtt_message['name']}\",\n \"mf\": \"Ezviz\",\n \"ids\": f\"(ezviz, {new_mqtt_message['serial']})\",\n },\n \"state_topic\": f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/alert/state\",\n \"platform\": \"mqtt\",\n }\n ),\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Register HA alert_type sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/ezviz_alert_type/config\",\n json.dumps(\n {\n \"name\": \"ezviz_alert_type\",\n \"device\": {\n \"name\": f\"{new_mqtt_message['name']}\",\n \"mf\": \"Ezviz\",\n \"ids\": f\"(ezviz, {new_mqtt_message['serial']})\",\n },\n \"state_topic\": f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/ezviz_alert_type/state\",\n \"platform\": \"mqtt\",\n }\n ),\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Register HA msg_time sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/msg_time/config\",\n json.dumps(\n {\n \"name\": \"msg_time\",\n \"device\": {\n \"name\": f\"{new_mqtt_message['name']}\",\n \"mf\": \"Ezviz\",\n \"ids\": \"(ezviz, {new_mqtt_message['serial']})\",\n },\n \"state_topic\": f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/msg_time/state\",\n \"platform\": \"mqtt\",\n }\n ),\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Register HA img_url sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/img_url/config\",\n json.dumps(\n {\n \"name\": \"img_url\",\n \"device\": {\n \"name\": f\"{new_mqtt_message['name']}\",\n \"mf\": \"Ezviz\",\n \"ids\": f\"(ezviz, {new_mqtt_message['serial']})\",\n },\n \"state_topic\": f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/img_url/state\",\n \"platform\": \"mqtt\",\n }\n ),\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Update HA Alert sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/alert/state\",\n f\"{new_mqtt_message['alert']}\",\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Update HA ezviz_alert_type sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/ezviz_alert_type/state\",\n f\"{new_mqtt_message['ezviz_alert_type']}\",\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Update HA msg_time sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/msg_time/state\",\n f\"{new_mqtt_message['msg_time']}\",\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )\n\n # Update HA img_url sensor\n publish.single(\n f\"{STATE_TOPIC}/{new_mqtt_message['serial']}/img_url/state\",\n f\"{new_mqtt_message['img_url']}\",\n hostname=self._broker[\"broker_ip\"],\n auth={\n \"username\": self._broker[\"username\"],\n \"password\": self._broker[\"password\"],\n },\n )", "title": "" }, { "docid": "66ae1d1c2477df0760dfe02311fd365b", "score": "0.47770002", "text": "def sensor_reader():\n compensation_params = bme280.load_calibration_params(smbus2.SMBus(1), 0x76)\n ABSpressure = round(mpr.pressure,4)\n AMBPressure = round(bme.pressure,4)\n GaugePress = round((mpr.pressure - bme.pressure)/2.4908890833333,2)\n temp = round(bme.temperature,2)\n Flow_Check()\n FV = (round(adc.read_adc(2, gain=GAIN)*0.0001875,4)) #voltage\n DV = (round(adc.read_adc(3, gain=GAIN)*0.30518509476,4)) #sccm\n data = [ABSpressure, AMBPressure, GaugePress, temp, FV, DV, 1.0]\n print(str(data))\n return(data)", "title": "" }, { "docid": "5bad04e0d67cc97ec1c630fae2e429be", "score": "0.47738352", "text": "def update(self):\n\n valid_message = lambda msg: msg and not msg.startswith('#')\n\n def broadcast(message):\n for client in self.clients:\n self.incoming_serial.append((client, msg))\n\n # read and broadcast any new global messages\n msg = self.ser.readline()\n if valid_message(msg):\n with self.message_lock: broadcast(msg)\n\n # send client requests and accumulate responses\n with self.message_lock:\n while len(self.outgoing_serial):\n (id, msg) = self.outgoing_serial.pop(0)\n logging.debug('writing \"%s\" to serial port' % msg)\n self.ser.write(msg + '\\n')\n\n # wait till we get a response to our query\n if valid_message(msg):\n attempts = 100\n command = msg.split()[0]\n\n while attempts:\n response = self.ser.readline()\n if valid_message(response):\n logging.debug('response \"%s\"' % response.strip())\n logging.debug(' command \"%s\"' % command)\n if response and command == response.split()[0]:\n logging.debug('matched response %s' % response.strip())\n self.incoming_serial.append((id, response))\n break\n else:\n broadcast(msg)\n\n attempts -= 1\n else:\n # send out a timeout message\n logging.warning('timeout on message \"%s\"' % msg)\n self.incoming_serial.append((id, '!timeout\\r\\n'))", "title": "" }, { "docid": "5590b572df0c7f59c645a8af5edb859b", "score": "0.47710243", "text": "def on_message(client,userdata,msg):\n topic = msg.topic\n m_decode = str(msg.payload.decode(\"utf-8\"))\n print(\"From: \", topic)\n print (\"Message Received : \",m_decode)\n print()\n\n value = json.loads(m_decode)\n\n print(value)\n if value['sensorid'] == 0:\n if value['distance'] < 5.00:\n type = 0\n log_File(value,30,type)\n\n elif value['temperature'] > 36.00:\n type = 1\n log_File(value,120,type)\n elif value['humidity'] < 50.00:\n type = 2\n log_File(value,120,type)", "title": "" }, { "docid": "43e97a04532b03ff8fa034e001dd9461", "score": "0.47709006", "text": "def readThread(self):\n\t\tthread = Thread(target = self.send_data)\n\t\tthread.start()\n\t\t\n\t\twhile True:\n\t\t\tline = self.cmd.stdout.readline() #read the output from C\n\t\t\ttime.sleep(1)\n\t\t\tline = line.decode()\n\t\t\tline = line.strip(\"\\n\").split(\":\")\n\t\t\tif (line[0] == 'Sensor'):\n\t\t\t\ttemp = \"temperature\"+ line[1]\n\t\t\t\thumi = \"humidity\" + line[1]\n\t\t\t\tif self.clientCounter > 0 and self.sending:\n\t\t\t\t\tself.client.publish(temp, line[2])\n\t\t\t\t\tself.client.publish(humi, line[3])", "title": "" }, { "docid": "5cb2d10bdfcfba2c7e6246d1bb45541a", "score": "0.4769089", "text": "def read_serial():\n global prev_sender\n global prev_msg\n global data\n global ident\n global t\n\n serial1 = sys.stdin.read() # UART library doesn't work here!\n utime.sleep_ms(15)\n serial2 = sys.stdin.read()\n utime.sleep_ms(15)\n serial3 = sys.stdin.read()\n if serial1 is None and serial2 is None and serial3 is None:\n return 0\n \"\"\"\n if test:\n if c.isconnected() and serial1:2w\n c.sms_send(2524126262, serial1[:2])\n if c.isconnected() and serial2:\n c.sms_send(2524126262, serial2[:2])\n if c.isconnected() and serial3:\n c.sms_send(2524126262, serial3[:2])\n \"\"\"\n serial = None\n if serial1 and serial2 and serial3: ## 1 2 3\n serial = str(serial1) + str(serial2) + str(serial3)\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 1 2 3\")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial1)\n print(serial2)\n print(serial3)\n print(\"Therefore serial(3) is \")\n print(serial)\n\n elif serial1 and serial2 and not serial3: # 1 2\n serial = str(serial1) + str(serial2)\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 1 2\")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial2)\n print(\"Therefore serial2 is \")\n print(serial)\n elif serial1 and not serial2 and not serial3: # 1\n serial = serial1\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 1\")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial)\n elif not serial1 and serial2 and serial3: # 2 3\n serial = str(serial2) + str(serial3)\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 2 3 \")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial)\n elif not serial1 and not serial2 and serial3: # 3\n serial = serial3\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 3\")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial)\n elif not serial1 and serial2 and not serial3: # 2\n serial = serial2\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 2\")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial)\n elif serial1 and not serial2 and serial3: # 3 1\n serial = str(serial1) + str(serial3)\n if serial[-1] is not \"!\":\n if c.isconnected() and test:\n c.sms_send(9199204318, \"not !, 1 3\")\n # c.sms_send(2524126262, serial[-10:])\n return 0\n else:\n serial = serial[:-1]\n if not deployed:\n print(serial)\n else:\n return 0\n\n if serial:\n types = check_serial_type(serial)\n if types is 0:\n return 0\n elif types is 1: # post to viper DEPRECATED. Checks for IDent now in teensy string instead of using Xbee to update Idenmt\n serial = serial[1:]\n t = (time.localtime()) # (year, month, day, hour, second, day, yearday)\n t = create_time(t)\n ident = ident + 1\n ssend(serial, ident, t)\n\n elif types is 2: # send to users\n serial = serial[1:]\n # c.sms_send(2524126262, \"Got a C command\")\n send_text(serial)\n # prev_sender = None\n elif types is 3: # send to all users\n serial = serial[1:]\n send_text_all(serial)\n elif types is 4: # post to VIPER!!!! accounts for EEPROM Identifier\n if c.isconnected():\n c.sms_send(9199204318, \"Posting\")\n t = (time.localtime()) # (year, month, day, hour, second, day, yearday)\n t = create_time(t)\n comma = serial.find(\"<\")\n if comma is -1:\n if c.isconnected():\n c.sms_send(9199204318, \"No <\")\n return 0\n ident = serial[:comma]\n serial = serial[comma + 2:]\n ssend(serial, ident, t)\n elif types is 6:\n \"\"\"\n When receiving ?, check for texts\n \"\"\"\n\n if c.isconnected():\n msg_sms_receive = check_txt() # check for text\n # [sms_txt['message'], sms_txt] returns or \"\", None\n if msg_sms_receive:\n if len(msg_sms_receive['message']) > 0:\n msg = msg_sms_receive['message']\n else:\n msg = None\n sms_sender = msg_sms_receive['sender']\n if sms_sender:\n prev_sender = msg_sms_receive['sender']\n if msg is not None and sms_sender is not None:\n print(msg) # if true: interface with Teensy and send Teensy C#\n else:\n return 0", "title": "" }, { "docid": "f3a246bdf1f6b7acb17a729971662b15", "score": "0.47645625", "text": "def sendToMbed(msg):\n if not serial_port.is_open:\n return\n\n v = int(msg.data[0]) # velocity\n w = int(msg.data[1]) # angular velocity\n try:\n lights = int(msg.data[2])\n except:\n lights = int(0)\n\n data = packData(v, w, lights)\n\n serial_port.write(data)\n #print(v, w, lights)", "title": "" }, { "docid": "3df2b894edca065dd88a363fe08a56a0", "score": "0.47588748", "text": "def run(self):\n data = ''\n while not rospy.is_shutdown():\n if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):\n if self.synced:\n rospy.logerr(\"Lost sync with device, restarting...\")\n else:\n rospy.logerr(\"Unable to sync with device; possible link problem or link software version \"\n \"mismatch such as hydro rosserial_python with groovy Arduino\")\n self.lastsync_lost = rospy.Time.now()\n self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, \"no sync with device\")\n self.requestTopics()\n self.lastsync = rospy.Time.now()\n\n # This try-block is here because we make multiple calls to read(). Any one of them can throw\n # an IOError if there's a serial problem or timeout. In that scenario, a single handler at the\n # bottom attempts to reconfigure the topics.\n try:\n with self.mutex:\n if self.port.inWaiting() < 1:\n time.sleep(0.001)\n continue\n\n flag = [0, 0]\n flag[0] = self.tryRead(1)\n if (flag[0] != '\\xff'):\n continue\n\n flag[1] = self.tryRead(1)\n if flag[1] != self.protocol_ver:\n self.sendDiagnostics(\n diagnostic_msgs.msg.DiagnosticStatus.ERROR,\n \"Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client\")\n rospy.logerr(\"Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client\")\n protocol_ver_msgs = {'\\xff': 'Rev 0 (rosserial 0.4 and earlier)', '\\xfe': 'Rev 1 (rosserial 0.5+)', '\\xfd': 'Some future rosserial version'}\n if (flag[1] in protocol_ver_msgs):\n found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]\n else:\n found_ver_msg = \"Protocol version of client is unrecognized\"\n rospy.loginfo(\"%s, expected %s\", found_ver_msg, protocol_ver_msgs[self.protocol_ver])\n continue\n\n msg_len_bytes = self.tryRead(2)\n msg_length, = struct.unpack(\"<h\", msg_len_bytes)\n\n msg_len_chk = self.tryRead(1)\n msg_len_checksum = sum(map(ord, msg_len_bytes)) + ord(msg_len_chk)\n\n if msg_len_checksum % 256 != 255:\n rospy.loginfo(\"wrong checksum for msg length, length %d\", (msg_length))\n rospy.loginfo(\"chk is %d\", ord(msg_len_chk))\n continue\n\n # topic id (2 bytes)\n topic_id_header = self.tryRead(2)\n topic_id, = struct.unpack(\"<h\", topic_id_header)\n\n try:\n msg = self.tryRead(msg_length)\n except IOError:\n self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, \"Packet Failed : Failed to read msg data\")\n rospy.loginfo(\"Packet Failed : Failed to read msg data\")\n rospy.loginfo(\"expected msg length is %d\", msg_length)\n raise\n\n # checksum for topic id and msg\n chk = self.tryRead(1)\n checksum = sum(map(ord, topic_id_header)) + sum(map(ord, msg)) + ord(chk)\n\n if checksum % 256 == 255:\n self.synced = True\n try:\n self.callbacks[topic_id](msg)\n except KeyError:\n rospy.logerr(\"Tried to publish before configured, topic id %d\", topic_id)\n self.requestTopics()\n rospy.sleep(0.001)\n else:\n rospy.loginfo(\"wrong checksum for topic id and msg\")\n\n except IOError:\n # One of the read calls had an issue. Just to be safe, request that the client\n # reinitialize their topics.\n self.requestTopics()", "title": "" }, { "docid": "b6c32e797890557d6d4a96d47d9a87e6", "score": "0.47482327", "text": "def read(self):\n key, value = self.read_serial()\n if key is None:\n return\n\n if key in ('pressure', 'temperature'):\n value = int(value)\n if key in ('altitude', 'filtered', 'velocity'):\n value = float(value) / 100;\n self.emit(key, value)", "title": "" }, { "docid": "3a37ef742a609175e41826425f983193", "score": "0.47468108", "text": "def get_devices(self)->dict:\n req = requests.get(Meater.apiurl + '/devices',\n headers={'Authorization': f'Bearer {Meater.key}'}).json()\n for device in req['data']['devices']:\n device = Meater._output_format(device, self.convert)\n \n return req", "title": "" }, { "docid": "a501d1fcc00670d44f95c616e4d4a1b6", "score": "0.47395456", "text": "def read(sensors):\n status = \"ok\"\n data = {}\n temperature = 0.0\n latitude = ''\n longitude = ''\n\n global ds18b20, gps\n\n if 'ds18b20' in sensors:\n try:\n temperature = ds18b20.read()\n status = __merge_status(status, ds18b20.status)\n except ImportError:\n logger.error('DS18B20 is not initialized')\n if 'gps' in sensors:\n try:\n latitude, longitude = gps.read()\n status = __merge_status(status, gps.status)\n except ImportError:\n logger.error('GPS is not initialized')\n if not sensors:\n status = __merge_status(status, \"no sensors\")\n\n data['temp'] = temperature\n data['lati'] = latitude\n data['long'] = longitude\n\n return (status, data)", "title": "" }, { "docid": "6161f7a7390edf55c5c543d00d413df4", "score": "0.47245347", "text": "def send(self, device, message):\n self.on_value_change(device, message)", "title": "" }, { "docid": "3d9cbc91fb6633ba1eafc75d9e175f80", "score": "0.47211987", "text": "def aggregate_subdevice(self, device_data, topic):\r\n result = True\r\n tagged_device_data = {}\r\n device_tag = self.device_topic_dict[topic]\r\n site = self.site_topic_dict[topic]\r\n needed_devices = site['needed_devices']\r\n device_values = site['device_values']\r\n _log.info(\"Current device to aggregate: topic {} device: {}\".format(topic, device_tag))\r\n if device_tag not in needed_devices:\r\n result = False\r\n # optional eg: 'SomeFanSpeed' -> 'supply_fan_speed'\r\n mappings = site.get('point_mapping', {})\r\n _log.info(\"--- device_data -> {}\".format(device_data))\r\n _log.info(\"--- mappings -> {}\".format(mappings))\r\n for key, value in device_data.items():\r\n # weird ... bug\r\n if key.endswith(device_tag):\r\n _log.warning(\"--- weird entry in device_data ? {} -> {}\".format(key, value))\r\n _log.warning(\"--- device_tag ? {}\".format(device_tag))\r\n key = key[:-len(device_tag)-1]\r\n\r\n # here do the mapping between the actual device topic\r\n # and the APP expected topic names\r\n k = key\r\n if key in mappings:\r\n k = mappings[key]\r\n else:\r\n long_key = '/'.join([device_tag, key])\r\n if long_key in mappings:\r\n k = mappings[long_key]\r\n\r\n device_data_tag = \"&\".join([k, device_tag])\r\n tagged_device_data[device_data_tag] = value\r\n _log.info(\"--- tagged_device_data -> {}\".format(tagged_device_data))\r\n device_values.update(tagged_device_data)\r\n _log.info(\"--- device_values -> {}\".format(device_values))\r\n if device_tag in needed_devices:\r\n needed_devices.remove(device_tag)\r\n _log.info(\"--- needed_devices removed [{}] -> {}\".format(device_tag, needed_devices))\r\n return result", "title": "" }, { "docid": "31916d7f8e4d5003cecf081e42e06e92", "score": "0.4718316", "text": "def __zigbee2mqtt_event_received(self, message: BLCZigbee2mqttMessage) -> None:\n \n \"\"\"Here the timestamp of the of the device is received to make sure that system operates in the valid time interval. This is done using the datetime library\"\"\"\n currentTime = datetime.now()\n today10pm = currentTime.replace(hour=22, minute=0, second=0, microsecond=0) #Starting time\n today9am = currentTime.replace(hour=9, minute=0, second=0, microsecond=0) #Ending time\n\n #Making sure the time is valid\n if currentTime > today10pm or currentTime < today9am:\n # If message is None (it wasn't parsed), then don't do anything.\n if not message:\n return\n \n # If the message is not a device event, then don't do anything.\n if message.type_ != BLCZigbee2mqttMessageType.DEVICE_EVENT:\n return\n\n # Parse the topic to retreive the device ID. If the topic only has one level, don't do\n # anything.\n tokens = message.topic.split(\"/\")\n if len(tokens) <= 1:\n return\n\n # Retrieve the device ID from the topic.\n device_id = tokens[1]\n\n # If the device ID is known, then process the device event and send a message to the remote\n # web server.\n device = self.__devices_model.find(device_id)\n\n\n #When the device that has registered movement is found by the controller, then the controller checks for a occupancy message. If there are an occupancy message, then the trigger function in the statemachine is called. Otherwise an error is returned \n #Thus, only occupancy messages are accepted and forwarded to the state machine \n if device:\n try:\n occupancy = message.event[\"occupancy\"]\n except KeyError:\n pass\n else:\n self.stateMachine.trigger(occupancy, device_id) #Sends the occupancy message and the device ID \n\n #Occupancy message = True: PIR sensor has registered movement\n #Occuoancy message = False: PIR sensor has not registered movement ", "title": "" }, { "docid": "ae6f3960ba3562d5fed49f245e4b0318", "score": "0.4715545", "text": "def publish_sensor_data(data):\n payload = json.dumps(data)\n if MQTT_CLIENT:\n MQTT_CLIENT.publish(\"imu/\" + DEVICE_ID, payload)", "title": "" }, { "docid": "f40432fc3e10813330ca1ecae76daf61", "score": "0.47114053", "text": "def feed_messages(protocol):\n\n value = 0\n\n while True:\n yield from protocol.byte_output(0x1305, 0, value)\n yield from asyncio.sleep(1)\n value = value + 1 if value < 255 else 0", "title": "" }, { "docid": "6fba3d15594342222ff8e58817b892a3", "score": "0.47078258", "text": "def execute(self, device):\n read_data(device, self.command, self.indices, self.output_conversion, self.message)", "title": "" }, { "docid": "afa7a94b2e11d402da964d58a742cbf7", "score": "0.4705553", "text": "def sensor(sck):\n if sck.server.v:\n print(sck.ID + \" logged in\")\n while True:\n data = None\n while not data:\n try:\n data = sck.conn.recv(1024)\n except Exception:\n pass\n if sck.server.v:\n print(sck.server.ID + \" <- \" + sck.ID + \": \" + str(data, \"utf-8\"))\n data = str(data, \"utf-8\").split(\"|\")\n if data[1] == \"PUT\" and data[2] == sck.ID:\n storage[sck.ID].append(float(data[3]))", "title": "" }, { "docid": "25a6d23949852a1ab57813b208725fc6", "score": "0.47019798", "text": "def getSensors( self ) -> dict :\n pkg = { \"cmd\":\"getSensors\" }\n resp = self.sendPkg( pkg )\n self.pos = tuple( resp[\"pos\"] )\n self.speed = tuple( resp[\"speed\"] )\n self.angle = resp[\"angle\"]\n return resp", "title": "" }, { "docid": "9cfb1e9962612caeba978449e71bbb24", "score": "0.4697051", "text": "def sender(self):\n while 1:\n if self.fetch_ch_data():\n pos = []\n for i in range(self.r * self.c):\n diff = self.diffs_p[i]\n if diff > 0:\n pos.append(diff / 40)\n else:\n pos.append(diff / 40)\n for i in range(self.c * self.r):\n diff = self.diffs[i]\n if diff > 0:\n pos.append(diff / 402528)\n else:\n pos.append(diff / 402528)\n\n if self.conn:\n try:\n self.conn.send((' '.join(str(e) for e in pos) + '\\n').encode())\n except:\n self.conn.close()\n self.conn = None\n print(\"Connectioon broken\")", "title": "" }, { "docid": "917e6bd96a2722cf8933363665f5c442", "score": "0.46897167", "text": "def do_send_RF_code(value, TelegramResponder):\n\n for val in value.values():\n light = re.findall(r\"Switch\\b\\s*(\\w*)\", val)\n parts = val.split()\n if light and len(parts) > 2: # Turn on or off if the command is correct\n if (\n \"433MHz_Transiever\"\n in TelegramResponder.main.default_values_dict[\"settings\"]\n ):\n if (\n light[0]\n in TelegramResponder.main.default_values_dict[\"settings\"][\n \"433MHz_Transiever\"\n ][\"Codes\"].keys()\n ):\n onoff = 1 if parts[-1].upper() == \"ON\" else 0\n path = os.path.normpath(\n TelegramResponder.main.default_values_dict[\"settings\"][\n \"433MHz_Transiever\"\n ][\"path\"]\n )\n for switch in TelegramResponder.main.default_values_dict[\n \"settings\"\n ][\"433MHz_Transiever\"][\"Codes\"][light[0]]:\n code = switch\n cmd = \"{} {} {}\".format(path, code, onoff)\n os.system(cmd)\n if onoff:\n old_light = TelegramResponder.current_light\n TelegramResponder.current_light = light[0]\n else:\n old_light = None # Because everything is off\n TelegramResponder.current_light = None\n\n # Switch the old one off, which are not included in the new one\n if old_light and TelegramResponder.settings.get(\n \"Exclusive_Light_Switching\", True\n ):\n path = os.path.normpath(\n TelegramResponder.main.default_values_dict[\"settings\"][\n \"433MHz_Transiever\"\n ][\"path\"]\n )\n onoff = 0\n for switch in TelegramResponder.main.default_values_dict[\n \"settings\"\n ][\"433MHz_Transiever\"][\"Codes\"][old_light]:\n if (\n switch\n not in TelegramResponder.main.default_values_dict[\n \"settings\"\n ][\"433MHz_Transiever\"][\"Codes\"][\n TelegramResponder.current_light\n ]\n ):\n code = switch\n cmd = \"{} {} {}\".format(path, code, onoff)\n os.system(cmd)\n\n TelegramResponder.answer += \"Done and enjoy.\"\n else:\n TelegramResponder.answer += (\n \"This light configuration is not defined.\"\n )\n else:\n TelegramResponder.answer += (\n \"No transiever defined. Cannot do what you asked.\"\n )\n\n elif light and len(parts) == 2: # if no on or off is defined\n TelegramResponder.answer = {\n \"CALLBACK\": {\n \"info\": \"Would you like to turn {} ON or OFF\".format(light[0]),\n \"keyboard\": {\n \"ON\": \"Switch {} ON\".format(light[0]),\n \"OFF\": \"Switch {} OFF\".format(light[0]),\n },\n \"arrangement\": [\"ON\", \"OFF\"],\n }\n }\n elif light and len(parts) == 1: # If just the switch command was send\n if (\n \"433MHz_Transiever\"\n in TelegramResponder.main.default_values_dict[\"settings\"]\n ):\n keyboard = {}\n arrangement = []\n for light in TelegramResponder.main.default_values_dict[\"settings\"][\n \"433MHz_Transiever\"\n ][\"Codes\"]:\n keyboard[light] = \"Switch {}\".format(light)\n arrangement.append([light])\n TelegramResponder.answer = {\n \"CALLBACK\": {\n \"info\": \"Possible light configurations:\",\n \"keyboard\": keyboard,\n \"arrangement\": arrangement,\n }\n }", "title": "" }, { "docid": "1f7171ec75999cb16564bf75bc1b5767", "score": "0.467955", "text": "def _build_message(self, interval, get_result, time):\n message = {\n 'block_name': f'm1000_{interval}',\n 'timestamp': time,\n 'data': {}\n }\n\n for item in get_result:\n field_name, oid_value, oid_description = self._extract_oid_field_and_value(item)\n\n if oid_value is None:\n continue\n\n message['data'][field_name] = oid_value\n message['data'][field_name + \"_description\"] = oid_description\n\n return message", "title": "" }, { "docid": "31429b1db22de9e5ee132ab02e55d1ed", "score": "0.46777517", "text": "def create_multiple_device_keys_and_certs(\n number_of_devices,\n issuer_cert_subject,\n issuer_key,\n device_common_name,\n password,\n key_size=4096,\n days=365,\n):\n for i in range(1, number_of_devices + 1):\n device_password_file = COMMON_DEVICE_PASSWORD_FILE + str(i) + EXTENSION_NAME\n device_csr_file = COMMON_DEVICE_CSR_FILE + str(i) + EXTENSION_NAME\n device_cert_file = COMMON_DEVICE_CERT_FILE + str(i) + EXTENSION_NAME\n device_private_key = create_private_key(\n key_file=device_password_file, password=password, key_size=key_size\n )\n device_csr = create_csr(\n private_key=device_private_key,\n csr_file=device_csr_file,\n subject=device_common_name + str(i),\n is_ca=False,\n )\n\n builder = create_cert_builder(\n subject=device_csr.subject,\n issuer_name=issuer_cert_subject,\n public_key=device_csr.public_key(),\n days=int(days / 100),\n is_ca=False,\n )\n\n device_cert = builder.sign(\n private_key=issuer_key, algorithm=hashes.SHA256(), backend=default_backend()\n )\n with open(device_cert_file, \"wb\") as f:\n f.write(device_cert.public_bytes(serialization.Encoding.PEM))", "title": "" }, { "docid": "58f4302a7d35f70925b379dcfc60c722", "score": "0.467639", "text": "def _create_devices(self):\n for unit, value in self.variables.items():\n try:\n device = Devices[unit]\n except KeyError as err:\n Domoticz.Log(\"Device %i is not created yet. Creating....\" % unit)\n self._create_device(unit)", "title": "" }, { "docid": "43673e8a15aa8b5611a96b56142d7277", "score": "0.46588016", "text": "def sendMessageIdentify():\n data = [0] * CMTP_BUFFER_LEN\n dataLength = 254\n serviceID = 10\n dataString = \"\"\n for i, resultData in enumerate(data):\n if i % 4 == 0:\n dataString += \"\\n\"\n dataString += \"{0:0{1}X}: \".format(i, 4)\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n else:\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n Messages.write_message(\"Sending Service: {}\".format(serviceID))\n Messages.write_message(\"Sending Size: {}\".format(dataLength))\n Messages.write_message(\"Sending Data: \\n{}\".format(dataString))\n results = data_store.scenario[\"capiController\"].sendMessage(data, serviceID, dataLength)\n Messages.write_message(results[\"description\"])\n assert results[\"result\"] == 0, \"Send message failed\"\n Messages.write_message(\"Received Status: {}\".format(results[\"data\"][\"status\"]))\n Messages.write_message(\"Received Service: {}\".format(results[\"data\"][\"service\"]))\n Messages.write_message(\"Received Size: {}\".format(results[\"data\"][\"size\"]))\n dataString = \"\"\n for i, resultData in enumerate(results[\"data\"][\"data\"]):\n if i % 4 == 0:\n dataString += \"\\n\"\n dataString += \"{0:0{1}X}: \".format(i, 4)\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n else:\n dataString += \"{0:0{1}X} \".format(resultData, 2)\n Messages.write_message(\"Received Data: \\n{}\".format(dataString))\n assert results[\"data\"][\"status\"] == 0, \"Status from response is non-zero - {}\".format(results[\"data\"][\"status\"])", "title": "" }, { "docid": "7e01977b7d74132877df538453d34f18", "score": "0.46541184", "text": "def send_key_to_service(self):\n data_parts = self._divide_data(self.public_key)\n recieved_msg = False\n for attempt in xrange(3):\n self.send_packets(data_parts)\n self.timeout = time.time() + 120\n sniff(filter = self.sniff_filter, stop_filter = self._handle_packet) \n if not self.data_from_service:\n print 'key was not recieved'\n elif self.data_from_service == \"[RECIEVED KEY]\":\n print 'key recieved'\n self.data_from_service = ''\n self.splitted_data = {}\n return True\n return False", "title": "" }, { "docid": "1486393220ee608a50839cf764e19234", "score": "0.4647938", "text": "def message(client, feed_id, payload):\n ui.ClicktoConnect.setText(\"\")\n if feed_id == TEMPERATURE:\n print(payload)\n ui.temp_label.setText(payload + \" °C\")\n\n if feed_id == HUMIDITY:\n print(payload)\n ui.humidity_label.setText(payload + \" %\")\n\n if feed_id == OIL:\n print(payload)\n ui.oil_lable.setText(payload + \" %\")\n\n if feed_id == v:\n print(payload)\n ui.voltage_label.setText(payload + \" Volts\")\n\n if feed_id == CURRENT:\n print(payload)\n ui.Current_label.setText(payload + \" mAmps\")\n\n if feed_id == VIBRATION:\n if int(payload) >= 100:\n print(\"Slight Movement\")\n ui.vibration_label.setText(\"Slight Vibration\")\n else:\n print(\"Stable\")\n ui.vibration_label.setText(\"Stable\")", "title": "" }, { "docid": "ff6c02aa1b3a4522e34218d100eeee5c", "score": "0.46418697", "text": "def generate_signal(key, num_timesteps=500):\n timesteps = jnp.arange(0, num_timesteps)\n dim = 4\n true_source = jnp.zeros((num_timesteps, dim))\n\n true_source = true_source.at[:, 0].set(jnp.sin(timesteps / 2)) # sinusoid\n true_source = true_source.at[:, 1].set(((timesteps % 23 - 11) / 9) ** 5)\n true_source = true_source.at[:, 2].set((timesteps % 27 - 13) / 9) # sawtooth\n\n key, subkey = jax.random.split(key)\n rand = jax.random.uniform(subkey, (num_timesteps,))\n key, subkey = jax.random.split(key)\n true_source = true_source.at[:, 3].set(\n jnp.where(rand < 0.5, rand * 2 - 1, -1)\n * jnp.log(jax.random.uniform(subkey, (num_timesteps,)))\n ) # impulsive noise\n\n true_source /= true_source.std(axis=0)\n true_source -= true_source.mean(axis=0)\n key, subkey = jax.random.split(key)\n mixing_matrix = jax.random.uniform(subkey, (dim, dim)) # mixing matrix\n return (\n true_source,\n mixing_matrix,\n jax.vmap(ica.get_signal, (None, 0), 0)(mixing_matrix, true_source),\n )", "title": "" }, { "docid": "1a41f2d6409274a0d874bc902b4c255c", "score": "0.46402857", "text": "def send_mqtt(self, hmac_signed):\r\n lib = {}\r\n lib[\"hmac\"] = hmac_signed\r\n #lib[\"what\"] = \"thefuck\"\r\n lib[\"Data\"] = ubinascii.hexlify(self.enData)\r\n lib[\"ID\"] = ubinascii.hexlify(self.nodeid)\r\n lib[\"IV\"] = ubinascii.hexlify(self.iv)\r\n # a Python object (dict): \r\n #print('processed JSON'+ str(lib))\r\n return json.dumps(lib)", "title": "" }, { "docid": "9f035d04288fa13899f0096288659260", "score": "0.46401888", "text": "def on_mdp_request(self, msg):\n ### NON TESTE !\n \n Plugin.on_mdp_request(self, msg)\n #self.log.info(u\"==> Received MQ message: %s\" % format(msg))\n # => MQ Request received : <MQMessage(action=client.cmd, data='{u'state': u'1', u'command_id': 14, u'device_id': 39}')>\n \n if msg.get_action() == \"client.cmd\":\n data = msg.get_data()\n #self.log.debug(u\"==> Received MQ REQ command message: %s\" % format(data)) \n # DEBUG ==> Received MQ REQ command message: {u'value': u'1', u'command_id': 50, u'device_id': 139}\n device_id = data[\"device_id\"]\n command_id = data[\"command_id\"]\n \n # {\"44.0\": {\"name\": \"Bureau\", \"dmgid\": \"120\", \"vtype\": \"V_STATUS\"}}\n for node in self.mysensorsmanager.nodes:\n if '255' not in node:\n if nodes[node]['dmgid'] == device_id: \n nodesensor_name = nodes[node]['name'] # \"Relay #1\"\n nodesensor_vtype = nodes[node]['vtype'] # \"V_STATUS\"\n break\n if nodesensor_vtype in [\"V_STATUS\", \"V_UP\", \"V_DOWN\", \"V_STOP\", \"V_PERCENTAGE\", \"V_IR_SEND\"]:\n msg = node.replace(\".\",\";\") + '1;0;' + self.mysensorsmanager.setreqType.index(nodesensor_vtype) + \";\" + data[\"value\"] + \"\\n\"\n # msg = \"42;0;1;0;2;1\\n\" nodeid;childsensorid;set command;no ack;vtype;value\\n\n # Command from the controller (1 = SET), Outgoing message to node 42 child sensor 0, Set V_STATUS (2) variable to 1 (turn on). No ack is requested from destination node.\n self.log.info(u\"==> Send SET message '%s' for device '%s/%s' (id:%s) to Gateway\" % (msg, nodesensor_name, nodesensor_vtype.lower(), device_id))\n self.mysensorsmanager.msgSendQueue.put(msg)\n \n # Reply MQ REP (acq) to REQ command\n self.send_rep_ack(True, None, command_id, device_name) ;\n else: \n self.log.error(u\"### Node sensor vtype not found for device '%s' (id:%s)\" % (nodesensor_name, device_id))", "title": "" }, { "docid": "1ab44c7c15841c5aa525e30de2c17b69", "score": "0.46391696", "text": "def test_get_sensor_signatures():\n qwe = QwiicExporter()\n for name, data in qwe.sensors.items():\n if name == \"ICM-20948 IMU\":\n assert qwe.get_subsensor_signatures(data) == [\n [\"aX,aY,aZ\", \"gX,gY,gZ\", \"mX,mY,mZ\", \"imu_degC\"],\n [\"aX,aY,aZ\", \"gX,gY,gZ\", \"mX,mY,mZ\"],\n [\"aX,aY,aZ\", \"gX,gY,gZ\", \"imu_degC\"],\n [\"aX,aY,aZ\", \"mX,mY,mZ\", \"imu_degC\"],\n [\"gX,gY,gZ\", \"mX,mY,mZ\", \"imu_degC\"],\n [\"aX,aY,aZ\", \"gX,gY,gZ\"],\n [\"aX,aY,aZ\", \"mX,mY,mZ\"],\n [\"aX,aY,aZ\", \"imu_degC\"],\n [\"gX,gY,gZ\", \"mX,mY,mZ\"],\n [\"gX,gY,gZ\", \"imu_degC\"],\n [\"mX,mY,mZ\", \"imu_degC\"],\n [\"aX,aY,aZ\"],\n [\"gX,gY,gZ\"],\n [\"mX,mY,mZ\"],\n [\"imu_degC\"],\n ]\n elif name == \"BME280 atmospheric sensor\":\n assert qwe.get_subsensor_signatures(data) == [\n [\"pressure_Pa\", \"humidity_%\", \"altitude_m\", \"temp_degC\"],\n [\"pressure_Pa\", \"humidity_%\", \"altitude_m\"],\n [\"pressure_Pa\", \"humidity_%\", \"temp_degC\"],\n [\"pressure_Pa\", \"altitude_m\", \"temp_degC\"],\n [\"humidity_%\", \"altitude_m\", \"temp_degC\"],\n [\"pressure_Pa\", \"humidity_%\"],\n [\"pressure_Pa\", \"altitude_m\"],\n [\"pressure_Pa\", \"temp_degC\"],\n [\"humidity_%\", \"altitude_m\"],\n [\"humidity_%\", \"temp_degC\"],\n [\"altitude_m\", \"temp_degC\"],\n [\"pressure_Pa\"],\n [\"humidity_%\"],\n [\"altitude_m\"],\n [\"temp_degC\"],\n ]\n elif name == \"VCNL4040 proximity sensor\":\n assert qwe.get_subsensor_signatures(data) == [\n [\"prox(no unit)\", \"ambient_lux\"],\n [\"prox(no unit)\"],\n [\"ambient_lux\"],\n ]\n elif name == \"OpenLog Artemis\":\n assert qwe.get_subsensor_signatures(data) == [\n [\"output_Hz\", \"count\"],\n [\"output_Hz\"],\n [\"count\"],\n ]\n elif name == \"CCS811 air quality sensor\":\n assert qwe.get_subsensor_signatures(data) == [\n [\"tvoc_ppb\", \"co2_ppm\"],\n [\"tvoc_ppb\"],\n [\"co2_ppm\"],\n ]\n elif name == \"MS8607 PHT sensor\":\n assert qwe.get_subsensor_signatures(data) == [\n [\"humidity_%\", \"hPa\", \"degC\"],\n [\"humidity_%\", \"hPa\"],\n [\"humidity_%\", \"degC\"],\n [\"hPa\", \"degC\"],\n [\"humidity_%\"],\n [\"hPa\"],\n [\"degC\"],\n ]\n else:\n assert (\n qwe.get_subsensor_signatures(data) is False\n ), \"Unknown sensor, cannot test get_sensor_signatures()\"", "title": "" }, { "docid": "4d64b1c32cc14af39555cc4029c00f7b", "score": "0.46350092", "text": "def gen_device_keypair():\n return _gen_specific_keypair(\"fd\")", "title": "" }, { "docid": "8d19f7b497ba57b04cdf84728ba1b966", "score": "0.46336296", "text": "async def device_list_received(msg):\n try:\n payload = json.loads(msg.payload)\n except ValueError as err:\n _LOGGER.error(\"Unable to parse JSON module list: %s\", err)\n return\n\n modules: List[AmpioModuleInfo] = AmpioModuleInfo.from_topic_payload(payload)\n\n for module in modules:\n data_modules = hass.data[DATA_AMPIO_MODULES]\n await async_setup_device_registry(hass, config_entry, module)\n data_modules[module.user_mac] = module\n ampio.async_publish(\n hass, REQUEST_MODULE_NAMES.format(mac=module.user_mac), \"1\", 0, False\n )", "title": "" }, { "docid": "67a7bb09376e59ec40d5f3f69a37a150", "score": "0.46329403", "text": "def get(self):\n logger.debug(\"Join Response Handler\")\n parser = reqparse.RequestParser()\n parser.add_argument(\"device\", help=\"This field cannot be blank\", required=True)\n parser.add_argument(\"context\", help=\"This field cannot be blank\", required=True)\n parser.add_argument(\"keys\", help=\"This field cannot be blank\", required=True)\n data = parser.parse_args()\n # print(data)\n device = ast.literal_eval(data[\"device\"])\n # print(device['deviceProfileID'])\n context = ast.literal_eval(data[\"context\"])\n # print(context)\n keys = ast.literal_eval(data[\"keys\"])\n # print(keys)\n chs_client = Chs_client()\n device_profile_id = chs_client.get_roaming_device_profile_id()\n try:\n if device_profile_id:\n device[\"device\"][\"deviceProfileID\"] = device_profile_id\n chs_client.create_device(device)\n chs_client.set_device_context(context)\n chs_client.set_device_keys(keys)\n now = datetime.now()\n end = datetime.timestamp(now)\n with open(\"end.txt\", \"a\") as f:\n f.write(str(end) + \"\\n\")\n #print(end)\n except:\n logger.error(\"Join Response Handling Failed\")", "title": "" }, { "docid": "c456d57e8e2089adf9b3e50225571009", "score": "0.46324873", "text": "def create_device(self, name, nodeidchildid, devicetype):\n client_id = \"plugin-mysensors.{0}\".format(get_sanitized_hostname())\n # name = \"Node xx\" or SensorName/Node_id.Child_id\n # nodeidchildid = Node_id.Child_id\n # devicetype = \"mysensors.node\" or \"mysensors.s_temp\" ...\n devicedata = {'data':\n {\n u'name': name, \n u'description': \"\",\n u'reference': \"\",\n u'global': [\n {\n u'key': u'nodesensor',\n u'value': nodeidchildid,\n u'type': u'string',\n u'description': u'nodeid.sensorid'\n }\n ],\n u'client_id': client_id,\n u'device_type': devicetype,\n u'xpl': [],\n u'xpl_commands': {},\n u'xpl_stats': {}\n }\n }\n\n cli = MQSyncReq(zmq.Context())\n msg = MQMessage()\n msg.set_action('device.create')\n msg.set_data(devicedata)\n response = cli.request('dbmgr', msg.get(), timeout=10).get()\n create_result = json.loads(response[1]) # response[1] is a string !\n self.log.debug(u\"==> Create device result: '%s'\" % response)\n if not create_result[\"status\"]:\n self.log.error(\"### Failed to create device '%s' (%s) !\" % (nodeidchildid))\n '''\n ==> response = \n '['device.create.result', \n '{\n \"status\": true, \n \"result\": {\"info_changed\": \"2017-03-23 00:17:41\", \n \"commands\": {}, \n \"description\": \"\", \n \"reference\": \"\", \n \"xpl_stats\": {}, \n \"xpl_commands\": {}, \n \"client_version\": \"0.1\", \n \"client_id\": \"plugin-mysensors.mydomogik\", \n \"device_type_id\": \"mysensors.node\", \n \"sensors\": \n {\n \"i_sketch_name\": \n {\n \"conversion\": \"\", \n \"value_min\": null, \n \"data_type\": \"DT_String\", \n \"reference\": \"i_sketch_name\", \n \"last_received\": null, \n \"value_max\": null, \n \"incremental\": false, \n \"timeout\": 0, \n \"formula\": null, \n \"last_value\": null, \n \"id\": 147, \n \"name\": \"Sketch Name\"\n }, \n \"nodetype\": \n {\"conversion\": \"\", \"value_min\": null, \"data_type\": \"DT_String\", \"reference\": \"nodetype\", \"last_received\": null, \"value_max\": null, \"incremental\": false, \"timeout\": 0, \"formula\": null, \"last_value\": null, \"id\": 145, \"name\": \"Node Type\"}, \n \"i_sketch_version\": \n {\"conversion\": \"\", \"value_min\": null, \"data_type\": \"DT_String\", \"reference\": \"i_sketch_version\", \"last_received\": null, \"value_max\": null, \"incremental\": false, \"timeout\": 0, \"formula\": null, \"last_value\": null, \"id\": 148, \"name\": \"Sketch Version\"}, \n \"nodeapiversion\": \n {\"conversion\": \"\", \"value_min\": null, \"data_type\": \"DT_String\", \"reference\": \"nodeapiversion\", \"last_received\": null, \"value_max\": null, \"incremental\": false, \"timeout\": 0, \"formula\": null, \"last_value\": null, \"id\": 146, \"name\": \"Node Api Version\"}, \n \"i_battery_level\": \n {\"conversion\": \"\", \"value_min\": null, \"data_type\": \"DT_Battery\", \"reference\": \"i_battery_level\", \"last_received\": null, \"value_max\": null, \"incremental\": false, \"timeout\": 0, \"formula\": null, \"last_value\": null, \"id\": 149, \"name\": \"Battery level\"}\n }, \n \"parameters\": {\"nodesensor\": {\"key\": \"nodesensor\", \"type\": \"string\", \"id\": 31, \"value\": \"44.255\"}}, \"id\": 23, \"name\": \"Node 44\"}\n }\n ']'\n '''", "title": "" }, { "docid": "f2c292b1a0a5014872071d1a5e40f50c", "score": "0.46284172", "text": "def _on_message(self, unused_client, unused_userdata, message):\n payload = str(message.payload.decode(\"utf-8\"))\n config = json.loads(payload)\n\n self.interphone_enabled = config[\"interphoneEnabled\"]\n self.sound_volume = config[\"soundVolume\"]\n self.detect_once = config[\"detectOnce\"]\n\n print(\n \"Received message on topic '{}' with Qos {}\".format(\n message.topic, str(message.qos)\n )\n )\n print(\"Updated device configurations.\")\n print(f\"[interphone_enabled] {self.interphone_enabled}\")\n print(f\"[sound_volume] {self.sound_volume}\")\n print(f\"[detect_once] {self.detect_once}\")", "title": "" }, { "docid": "12bbe70c23f3c2b53d6edfdacdd27570", "score": "0.4625336", "text": "def send(self, numbers, alert):\n\n A =[]\n\n\n for n in numbers:\n #try:\n if not self.debug:\n message = (self.client.messages.create(body=alert, to= \"+57\" + n,\n from_=self.twilioValues[\"from_\"]))\n message.sid\n A.append(message.sid)\n else:\n #print (\" -> \" + \"+57\" + n + \".\")\n #print(alert)\n #print(numbers)\n A.append(\"SMdf9c1a1f03474f8690ffff460f2b988c\")\n #except:\n # if self.debug:\n # print (\"number \" + str(n) + \" is invalid\")\n # else:\n # pass\n print(A)\n print(alert)\n print(numbers)", "title": "" }, { "docid": "7e255746b6c32cbc4497abda6d5cdbeb", "score": "0.46220237", "text": "def async_added_to_hass(self):\n @callback\n def adv_received(topic, payload, qos):\n \"\"\"A new MQTT message has been received.\"\"\"\n self._current.parse_adv_msg(payload)\n if self._current.target_temp is not None:\n self._target.target_temp = self._current.target_temp\n self.hass.async_add_job(self.async_update_ha_state())\n\n @callback\n def data_received(topic, payload, qos):\n \"\"\"A new MQTT message has been received.\"\"\"\n self._current.parse_data(topic.split('/')[3], payload)\n #self._current.mode_code = self._target.mode_code\n #self._current.target_temp = self._target.target_temp\n self.hass.async_add_job(self.async_update_ha_state())\n\n yield from mqtt.async_subscribe(self.hass, 'ble/{}/advertisement/ff'.format(self._mac), adv_received, 1)\n yield from mqtt.async_subscribe(self.hass, 'ble/{}/data/+'.format(self._mac), data_received, 1)\n now = datetime.now()\n cmds = { 'tries': 10,\n 'commands': [\n { 'action': 'readCharacteristic', 'uuid': UUID_MODEL },\n { 'action': 'readCharacteristic', 'uuid': UUID_FIRMWARE },\n { 'action': 'readCharacteristic', 'uuid': UUID_SOFTWARE },\n { 'action': 'readCharacteristic', 'uuid': UUID_MANU },\n { 'action': 'writeCharacteristic', 'uuid': UUID_PIN, 'value': [ 0, 0, 0, 0 ], 'ignoreError': '1' }, # try PIN 000000 first, in case the thermostat was reset\n { 'action': 'writeCharacteristic', 'uuid': UUID_PIN, 'value': [ int(x) for x in self._pin.to_bytes(4, byteorder = 'little') ] }, # send real/desired PIN\n { 'action': 'writeCharacteristic', 'uuid': UUID_DATETIME, 'value': [ now.minute, now.hour, now.day, now.month, now.year - 2000 ] },\n { 'action': 'readCharacteristic', 'uuid': UUID_MODE },\n { 'action': 'readCharacteristic', 'uuid': UUID_TEMP },\n { 'action': 'readCharacteristic', 'uuid': UUID_BATTERY },\n ]\n }\n mqtt.async_publish(self.hass, 'ble/{}/commands'.format(self._mac), json.dumps(cmds), 1, False)", "title": "" }, { "docid": "00e71ca3d5e6dff29ca24eb52af97965", "score": "0.46151528", "text": "def on_data(topic ,data):\r\n if data['topic'] == 'temperature':\r\n post_data = ClientPostData(data['topic'], data['time'], data['data'])\r\n temperature.append(post_data) \r\n if data['topic'] == 'light':\r\n post_data = ClientPostData(data['topic'], data['time'], data['data'])\r\n light.append(post_data)\r\n if data['topic'] == 'monitor':\r\n # decode image from b64\r\n im = base64.b64decode(data['data'])\r\n arr = np.fromstring(im, dtype=np.uint8)\r\n arr = np.reshape(arr, (480,960,3))\r\n global image\r\n image = Image.fromarray(arr)", "title": "" }, { "docid": "be4b65cf18837d43a630d143a01b1c34", "score": "0.46126875", "text": "def receive_instructions_message(self, instructions_msg):\n self.instruction_builder.reset_instructions()\n\n if instructions_msg.instructions is None or len(instructions_msg.instructions) == 0:\n rospy.logwarn('Unknown command!')\n\n for instruction in instructions_msg.instructions:\n # Formula used to translate values from one given value range to another value range maintaining it's ratio:\n # NewValue = (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin\n if instruction.instruction == 'HOLD':\n self.drone.hold()\n return\n\n if instruction.instruction == 'THROTTLE_UP':\n scaled_throttle = (((instruction.intensity - 0.0) * (1.0 - 0.5)) / (100.0 - 0.0)) + 0.5\n self.instruction_builder.add_throttle(scaled_throttle)\n elif instruction.instruction == 'THROTTLE_DOWN':\n scaled_throttle = (((instruction.intensity - 0.0) * (0.2 - 0.0)) / (100.0 - 0.0)) + 0.0\n self.instruction_builder.add_throttle(scaled_throttle)\n else:\n self.instruction_builder.add_throttle(0.7)\n\n if instruction.instruction == 'FORWARD':\n scaled_pitch = -1.0 * ((((instruction.intensity - 0.0) * (0.5 - 0.1)) / (100.0 - 0.0)) + 0.1)\n self.instruction_builder.add_pitch(scaled_pitch)\n self.instruction_builder.add_throttle(-0.1)\n\n if instruction.instruction == 'TURN_LEFT':\n scaled_yaw = -1 * ((((instruction.intensity - 0.0) * (1.0 - 0.3)) / (100.0 - 0.0)) + 0.3)\n scaled_roll = -1 * ((((instruction.intensity - 0.0) * (0.8 - 0.0)) / (100.0 - 0.0)) + 0.0)\n self.instruction_builder.add_yaw(scaled_yaw)\n self.instruction_builder.add_roll(scaled_roll)\n elif instruction.instruction == 'TURN_RIGHT':\n scaled_yaw = (((instruction.intensity - 0.0) * (1.0 - 0.3)) / (100.0 - 0.0)) + 0.3\n scaled_roll = (((instruction.intensity - 0.0) * (0.8 - 0.0)) / (100.0 - 0.0)) + 0.0\n self.instruction_builder.add_yaw(scaled_yaw)\n self.instruction_builder.add_roll(scaled_roll)\n\n self.drone.by_instruction(self.instruction_builder.get_instructions())", "title": "" }, { "docid": "4d98e0ad055618146996d141fb8cdf9c", "score": "0.4611342", "text": "def test_get_device_messages_list_for_device(self):\n pass", "title": "" }, { "docid": "2786f65609fd4310065376455d0ce9a1", "score": "0.46108255", "text": "def on_message(client, userdata, msg):\n payload = msg.payload.decode('utf-8') # py3\n data = json.loads(payload)\n\n productivity_values_data = {\"soil_moisture\": data[\"analog_soil_moisture\"],\n \"temperature\": data[\"ds18b20_temp\"],\n \"illuminance\": data[\"bh1750_illuminance\"],\n \"date\": datetime.now()\n }\n\n productivity_values = aqua_crop_model.aqua_crop(**productivity_values_data)\n print(data)\n data.update(productivity_values)\n socketIO.emit('stations', json.dumps(data))\n socketIO.wait(seconds=args['wait'])", "title": "" }, { "docid": "8d7f2ed179617b7571d636c1ea779a7f", "score": "0.46067145", "text": "def send_action(devices, action):\n\n for dev in devices:\n body = {}\n if dev:\n mMonitor.engine.send_request(mMonitor.dev,[dev.address,],action,body)", "title": "" }, { "docid": "2d9df71547159c3136a2fba112dc4d6f", "score": "0.4606015", "text": "def arduino_watch(acm_mqtt, acm_config):\n acm_sqlite_conf = acm_config.getConf('sqlite')\n acm_sqlite = ACMsqlite.acmDB(acm_sqlite_conf['db_dir'],acm_sqlite_conf['xbee_csv'])\n \n acm_xbee_conf = acm_config.getConf('xbee','uart')\n acm_xbee = ACMXbee.acmXbee('\\x00\\x13\\xA2\\x00\\x40\\xB1\\xD6\\x2A',port=acm_xbee_conf['port'],baud=acm_xbee_conf['baud'])\n\n while True:\n try:\n packet, data = acm_xbee.wait_res()\n if data['id'] == 'rx':\n \n #print hex(data['source_addr_long'])\n code = \":\".join(\"{:02x}\".format(ord(c)) for c in data['source_addr_long'])\n bbb_date_u = int(time.time())\n bbb_date_t = datetime.fromtimestamp(bbb_date_u).strftime(\"%d-%m-%Y %H:%M:%S\")\n\n #Time request\n if packet['flag'] == 0x0F:\n acm_sqlite.insert(0x0F,code=code,date_t=bbb_date_t,date_u=bbb_date_u)\n acm_xbee.send_time(bbb_date_u)\n acm_mqtt.sendInit(time=bbb_date_t)\n\n #Keep Alive\n elif packet['flag'] == 0x21:\n acm_sqlite.insert(0x21,code=code,date_t=bbb_date_t,date_u=bbb_date_u)\n acm_mqtt.sendKeep(time=bbb_date_t)\n #Alarm\n elif packet['flag'] == 0x55:\n acm_sqlite.insert(0x55,code=code,date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u'])\n acm_mqtt.sendAlarm(time=bbb_date_t)\n #SNMP-TRAP\n\n #Alarm on\n elif packet['flag'] == 0x56:\n if packet['tres'] < packet['alm1']: \n acm_sqlite.insert(0x56,code=code,name='alm1',scan=packet['alm1'],date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u'])\n acm_mqtt.sendAlarmOn(time=bbb_date_t,topic='chiller1',value=packet['alm1'])\n \n if packet['tres'] < packet['alm2']: \n acm_sqlite.insert(0x56,code=code,name='alm2',scan=packet['alm2'],date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u'])\n acm_mqtt.sendAlarmOn(time=bbb_date_t,topic='general1',value=packet['alm2'])\n \n if packet['tres'] < packet['alm3']:\n acm_sqlite.insert(0x56,code=code,name='alm3',scan=packet['alm3'],date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u']) \n acm_mqtt.sendAlarmOn(time=bbb_date_t,topic='chiller2',value=packet['alm3'])\n \n if packet['tres'] < packet['alm4']: \n acm_sqlite.insert(0x56,code=code,name='alm4',scan=packet['alm4'],date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u'])\n acm_mqtt.sendAlarmOn(time=bbb_date_t,topic='general2',value=packet['alm4'])\n\n #Finish\n elif packet['flag'] == 0xAA:\n acm_sqlite.insert(0xAA,code=code,date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u'])\n acm_mqtt.sendFinish(time=bbb_date_t)\n\n #Error\n elif packet['flag'] == 0xFF:\n acm_sqlite.insert(0xFF,code=code,date_t=bbb_date_t,date_u=bbb_date_u,a_date_t=packet['ts_d'],a_date_u=packet['ts_u'],errc=packet['errc'])\n acm_mqtt.sendErrorXbee(time=bbb_date_t,error=packet['errc'])\n \n elif packet['flag'] == 0x01:\n acm_sqlite.insert(0x01,code=code,date_t=bbb_date_t,date_u=bbb_date_u,err=packet['err'])\n\n elif data['id'] == 'tx_status':\n #print \"Response: \" + str(packet['status'])\n pass\n except KeyboardInterrupt:\n break", "title": "" }, { "docid": "b8af7f85044ab54486d69ae94e3ae150", "score": "0.46043798", "text": "def ReadDustTrak():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((HOST, PORT))\n\n s.sendall(\"MSTATUS\\r\")\n data = s.recv(1024)\n Received = repr(data).strip(\"'\\\\r\\\\n\")\n\n if Received == 'Running':\n s.sendall(\"RDSN\\r\")\n data = s.recv(1024)\n SerialNum = repr(data).strip(\"'\\\\r\\\\n\")\n \n s.sendall(\"RMLOGGEDMEAS\\r\")\n data = s.recv(1024)\n Received = repr(data).strip(\"'\\\\r\\\\n\")\n RArray = Received.split(',')\n \n OutputFile.write(\",\".join([INSTR,time.strftime(\"%Y-%m-%d %H:%M:%S\"),str(RArray[1])])+\"\\n\")\n OutputFile.flush()\n elif Received == 'Idle':\n s.sendall(\"MSTART\\r\")\n s.close()\n return\n \n print \"DustTrak, {}, {} mg/m3\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"), RArray[1])\n \n s.close()", "title": "" } ]
34aba6c22545ff88c6c765471d69fd2a
A shortand to create an original Article and its translation.
[ { "docid": "5d2e225981e4df530ec9820e1b8a9211", "score": "0.6733866", "text": "def multilingual_article(**kwargs):\n langs = set(kwargs.pop(\"langs\", []))\n contents = kwargs.pop(\"contents\", {})\n translations = {}\n\n # Create original object\n original = ArticleFactory(**kwargs)\n kwargs[\"original\"] = original\n\n # Create translations adopting original kwargs or possible language\n # specific kwargs if any\n for lang in langs:\n context = kwargs.copy()\n context[\"language\"] = lang\n\n if lang in contents:\n context.update(contents[lang])\n\n translations[lang] = ArticleFactory(\n **context\n )\n\n return {\n \"original\": original,\n \"translations\": translations,\n }", "title": "" } ]
[ { "docid": "a13a1997c353eade0d83338d86918ad0", "score": "0.6235401", "text": "def maketrans(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "d1713bd4c92bd95b2575278cf87f58e5", "score": "0.6148261", "text": "def create_translation_entry(self, sdoc: SourcedDocument):\n sdoc.doc.id = self.current_art_id\n self.current_art_id += 1\n return {\n \"document_id\": sdoc.doc.id,\n \"document_collection\": self.collection,\n \"source_doc_id\": sdoc.source_id,\n \"md5\": self.get_md5(sdoc),\n \"source\": sdoc.source,\n \"date_inserted\": self.insertion_time\n }", "title": "" }, { "docid": "bb931f42342b9e84cd083cd74456296e", "score": "0.5992268", "text": "def test_translation_create(self):\n pass", "title": "" }, { "docid": "1d421119cd94a71e13507502066f20fc", "score": "0.5879826", "text": "def articles():", "title": "" }, { "docid": "4eee4b2fb584cc0c0d7d7070eed403f1", "score": "0.58373934", "text": "def create_article(article_name: str) -> PubMedArticle:\n article_path = os.path.join(get_resources_path(), article_name)\n xml_element = ET.parse(article_path).getroot()\n return PubMedArticle(xml_element)", "title": "" }, { "docid": "a6b3a734818467c486e13de0fc3ed97a", "score": "0.5784804", "text": "def new_article_from_draft(draft):\n new_article = Article( article_name = draft.article_name,\n article_external_url = draft.article_external_url,\n #name_author = draft.name_author,\n article_description = draft.article_description,\n article_body_text = draft.article_body_text,\n article_caption_styles = draft.article_caption_styles,\n article_type = draft.article_type,\n )\n\n new_article.save()\n old_authors = draft.name_author.all()\n new_article.name_author = old_authors\n new_article.save()\n draft.delete()\n print(old_authors)\n print(new_article.id)", "title": "" }, { "docid": "0781a54acf49cd1c9f903e93f931782d", "score": "0.5778184", "text": "def __init__(self, article):\n\t\tself.title = article.title\n\t\tself.date = article.date\n\t\tself.image_url = article.image_url\n\t\tself.description = (article.description[:140].strip() + '...') if len(article.description) > 140 else article.description", "title": "" }, { "docid": "3f7ce12f7a4b8443a4a27f7573b12a98", "score": "0.5777499", "text": "def translate(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "65ec7f4559add08dd5f16df54bdbd7be", "score": "0.5744597", "text": "def get_article(self):\n\n document_cursor = db.documents.find_one({\"url\": self.url})\n if document_cursor:\n document_id = document_cursor[\"_id\"]\n else:\n document_id = None\n if document_id:\n print(\"This article was already in the database\")\n self.db_doc = db.documents.find_one({\"_id\": document_id})\n self.title = self.db_doc[\"title\"]\n self.authors = self.db_doc[\"authors\"]\n self.text = self.db_doc[\"text\"]\n self.publish_date = self.db_doc[\"publish-date\"]\n return None\n\n self.article = Article(self.url)\n grammar_mistakes = self.check_grammar(self.article.title)\n\n # try to download the article\n try:\n self.article.download()\n except:\n print(\"couldnt download\")\n return None\n\n # try to parse the article\n try:\n self.article.parse()\n except:\n print(\"couldnt parse\")\n return None\n\n # Try to use nlp on the article\n try:\n self.article.nlp()\n except:\n print(\"couldnt use nlp\")\n return None\n\n self.title = self.article.title\n self.authors = self.article.authors\n self.text = self.article.text\n self.publish_date = self.article.publish_date\n print(self.title)\n print(self.authors)\n\n #self.grammar_mistakes = self.check_grammar(self.text)\n\n # document_cursor = db.documents.find_one({\"url\": self.url})\n # if document_cursor:\n # document_id = document_cursor[\"_id\"]\n # else:\n # document_id = None\n # if document_id:\n\n\n document_id=self.add_article_to_db()\n\n for author in self.authors:\n author_cursor = db.authors.find_one({\"author\":author})\n\n if author_cursor:\n #author_id = author_cursor[\"_id\"]\n if document_id != author_cursor[\"known_articles\"]:\n db.authors.updateOne({\"author\":author},{\"known_articles\":author_cursor[\"known_articles\"].extend(document_id)})\n else:\n db.authors.insert_one({\n \"author\": author,\n \"known_articles\": [document_id]\n })", "title": "" }, { "docid": "5a1c7e2d6aa0e6481f4241985976d127", "score": "0.57442206", "text": "def translate(self):\n raise NotImplementedError", "title": "" }, { "docid": "42eb1c060fc36af5ecb57e846a562e0d", "score": "0.5712158", "text": "def _get_translated_body(self):\r\n # [START _get_translated_body]\r\n body = self._body\r\n url = self._url\r\n articleid = self._articleid\r\n translator = Translator()\r\n\r\n split_body_list = body.split('```')\r\n chunk_size = 2000\r\n prefix = '\\n\\n```'\r\n suffix = '```\\n\\n'\r\n banner = '**This article is an automatic translation of the article[' + articleid + '] below.\\n' + url +'**\\n\\n'\r\n translated_body = banner\r\n for i,element in enumerate(split_body_list):\r\n if i%2 == 0:\r\n if len(element) > chunk_size:\r\n chunk_list = [element[i: i+chunk_size] for i in range(0, len(element), chunk_size)]\r\n split_body_list[i] = ''\r\n for chunk in chunk_list:\r\n split_body_list[i] += translator.translate(chunk,dest='en').text\r\n else:\r\n split_body_list[i] = translator.translate(element,dest='en').text\r\n split_body_list[i] = self._delete_redundant_space(split_body_list[i])\r\n\r\n if i + 1 == len(split_body_list) or len(split_body_list) == 1:\r\n translated_body += split_body_list[i]\r\n else:\r\n translated_body += (split_body_list[i] + prefix)\r\n else:\r\n if i + 1 == len(split_body_list) or len(split_body_list) == 1:\r\n translated_body += split_body_list[i]\r\n else:\r\n translated_body += (split_body_list[i] + suffix)\r\n\r\n return translated_body\r\n # [END _get_translated_body]\r", "title": "" }, { "docid": "f0c6154b0347a2e24d86f5eef022bcea", "score": "0.57097405", "text": "def create( self, trans, name, description=''):", "title": "" }, { "docid": "498aac8c9052e705e659ec0bd662c47e", "score": "0.570527", "text": "def from_text(cls, article_text):\n finder = TemplateFinder(article_text)\n find_re_daten = finder.get_positions(RE_DATEN)\n find_re_abschnitt = finder.get_positions(RE_ABSCHNITT)\n # only one start template can be present\n if len(find_re_daten) + len(find_re_abschnitt) != 1:\n raise ReDatenException(\"Article has the wrong structure. There must be one start template\")\n if find_re_daten:\n find_re_start = find_re_daten\n else:\n find_re_start = find_re_abschnitt\n find_re_author = finder.get_positions(RE_AUTHOR)\n # only one end template can be present\n if len(find_re_author) != 1:\n raise ReDatenException(\"Article has the wrong structure. There must one stop template\")\n # the templates must have the right order\n if find_re_start[0][\"pos\"][0] > find_re_author[0][\"pos\"][0]:\n raise ReDatenException(\"Article has the wrong structure. Wrong order of templates.\")\n # it can only exist text between the start and the end template.\n if find_re_start[0][\"pos\"][0] != 0:\n raise ReDatenException(\"Article has the wrong structure. There is text in front of the article.\")\n if find_re_author[0][\"pos\"][1] != len(article_text):\n raise ReDatenException(\"Article has the wrong structure. There is text after the article.\")\n try:\n re_start = TemplateHandler(find_re_start[0][\"text\"])\n except TemplateHandlerException as error:\n raise ReDatenException(\"Start-Template has the wrong structure.\") from error\n try:\n re_author = REAuthor.from_template(find_re_author[0][\"text\"])\n except TemplateHandlerException as error:\n raise ReDatenException(\"Author-Template has the wrong structure.\") from error\n properties_dict = cls._extract_properties(re_start.parameters)\n return Article(article_type=re_start.title,\n re_daten_properties=properties_dict,\n text=article_text[find_re_start[0][\"pos\"][1]:find_re_author[0][\"pos\"][0]]\n .strip(),\n author=re_author)", "title": "" }, { "docid": "98d9fe2355e082271b658c53b5e7116d", "score": "0.56716543", "text": "def create_with_edition(self, title, author, text):\n new_post = Post.objects.create(title=title, slug=slugify(title), author=author)\n new_edition = Edition.objects.create(post=new_post, text=text)\n return new_post", "title": "" }, { "docid": "2f9fe0ee2cfa3fc398296ea9bc4fb5da", "score": "0.56290185", "text": "def show_teaser(article):\n return {'article': article}", "title": "" }, { "docid": "8ce890269795b7d96d5e68f206c8eea9", "score": "0.55602896", "text": "def create_passage(passage, translation=None):\n return {\"word\": passage, \"translation\": translation, \"next\": None}", "title": "" }, { "docid": "3dbdc8b6cc6011efe1403ccce2d743c9", "score": "0.5546388", "text": "def translate(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "3035be04cef117c3c50b584bf4217696", "score": "0.5532673", "text": "def create_article(title, content_source=\"test article text\", days_in_past=0):\n time = timezone.now() + datetime.timedelta(days=-days_in_past)\n return Article.objects.create(title=title,\n content_source=content_source,\n pub_date=time)", "title": "" }, { "docid": "3035be04cef117c3c50b584bf4217696", "score": "0.5532673", "text": "def create_article(title, content_source=\"test article text\", days_in_past=0):\n time = timezone.now() + datetime.timedelta(days=-days_in_past)\n return Article.objects.create(title=title,\n content_source=content_source,\n pub_date=time)", "title": "" }, { "docid": "952cf6de326a9ca9ffc38e0f83bfcdcc", "score": "0.55176383", "text": "def test_translation_review(self):\n pass", "title": "" }, { "docid": "c807ca6e9be4d6ea371b88e1798e2f26", "score": "0.5507654", "text": "def mktranslation(self, obj, lang, **kwargs):\n try:\n obj.set_current_language(lang)\n except:\n try:\n obj.translate(lang)\n except IntegrityError:\n pass\n for k, v in kwargs.items():\n setattr(obj, k, v)\n obj.save()", "title": "" }, { "docid": "e565fcd90318101682b4e2cb0206ff6e", "score": "0.5478009", "text": "def translate(text):", "title": "" }, { "docid": "79c386cb9e9f85767d400a8a35138283", "score": "0.5475836", "text": "def test_translated(self, client, root):\n n1 = root.add(langslugs=dict(nl=\"a\", en=\"b\"))\n n2 = root.add(langslugs=dict(en=\"a\", nl=\"b\"))\n\n\n translation.activate('nl')\n res = MainHandler.coerce(dict(instance=\"a\"))\n assert res['instance'] == n1\n\n translation.activate('en')\n res = MainHandler.coerce(dict(instance=\"a\"))\n assert res['instance'] == n2", "title": "" }, { "docid": "5451ed2cfbb4743e4531020f8687d6ee", "score": "0.54758334", "text": "def AddTranslator(self, translator):", "title": "" }, { "docid": "93fedcf2b5572bb0d6def60f13b39a60", "score": "0.54429305", "text": "def test_translate(self):\n translation = Translation(**gen_data())\n translation.translate()\n self.assertEqual('Bonjour le monde!', translation.output)", "title": "" }, { "docid": "fe7c37513f4029e5fa00ab04dfc07b7c", "score": "0.54203105", "text": "def dbwriteArticle(self, a):\n # add head + title\n e = Element(\"article\", lang=self.language)\n if self.root is None:\n self.root = e\n h = SubElement(e,\"articleinfo\")\n t = SubElement(h, \"title\")\n if a.caption:\n t.text = a.caption\n \n # add a section and heading for this article \n s = SubElement(e, \"section\")\n si = SubElement(s, \"sectioninfo\")\n h = SubElement(si, \"title\")\n h.text = a.caption\n e.writeto = s\n return e", "title": "" }, { "docid": "ea28eaee3c5e5d899682c496d46683d3", "score": "0.5399459", "text": "def translate(obj, columns, content):\n return super(FullTranscript, FullTranscript).translate(obj, columns, content)", "title": "" }, { "docid": "3324e622877655402c5cce76a616b1da", "score": "0.5385482", "text": "def _trans(self, event, old, new, user=None):\n remark = u'{}: '.format(event) if event else u''\n remark += u'{} >> {}'.format(old, new)\n if user:\n Note.objects.create(content_object=self,\n remark=remark, is_history=True,\n created_by=user)\n else:\n Note.objects.create(content_object=self,\n remark=remark, is_history=True)", "title": "" }, { "docid": "3941c6155910b98418a416b1b6829b9d", "score": "0.53554004", "text": "async def create_article(article: schemas.Article) -> schemas.Preview.Article:\n return datasource.insert_article(article)", "title": "" }, { "docid": "3d941bb72e4db03c20fc8c101f163702", "score": "0.53433675", "text": "def test_article_manager_create_article(self):\n article = {\n 'source': {'id': 'news-com-au', 'name': 'News.com.au'},\n 'author': 'unknown',\n 'title': 'F1 British Grand Prix live: updates, results, starting grid, Vettel reacts to Ferrari sabotage '\n 'questions',\n 'description': 'The British Grand Prix has ended in incredible drama as the last lap went down to the '\n 'wire with Lewis Hamilton winning after his tyre blew on the last lap.',\n 'url': 'https://www.news.com.au/sport/motorsport/formula-one/live-updates-from-the-2020-british-grand'\n '-prix/live-coverage/ba297f46d4e91321c092db9d3d5d2e1f',\n 'urlToImage': 'https://content.api.news/v3/images/bin/2554ff2213b5c8a54e9809d310e697db',\n 'publishedAt': '2020-08-02T22:04:07Z',\n 'content': '...'\n }\n created = Article.objects.create_article(article)\n self.assertEqual(article['source']['name'], created.source)\n self.assertEqual('unknown', created.author)\n self.assertEqual(article['title'], created.title)\n self.assertEqual(article['description'], created.description)\n self.assertEqual(article['url'], created.url)\n self.assertEqual(parse_datetime(article['publishedAt']), created.published_at)\n self.assertEqual('...', created.content)", "title": "" }, { "docid": "cfaec30800c3c2b1a6ef3034f3424e48", "score": "0.5324116", "text": "def process_text(article_text,nlp):\n \n # lemmatize NOTE: NOT IMPLEMENTED YET...\n # article_text = lemmatize(article_text,nlp)\n \n # run gensim preprocess\n article_text = simple_preprocess(article_text, deacc=True)\n \n return article_text", "title": "" }, { "docid": "a1e68f0dde9df87475243522dafb5906", "score": "0.531908", "text": "def translate(src_filename, dest_filename, dest_lang, src_lang='auto', specialwords_filename=''):\n translator = Translator() # Initialize translator object\n\n with open(src_filename) as srcfile, open(dest_filename, 'w') as destfile:\n\n lines = srcfile.readlines()\n specialwords_dict = {}\n\n # If special words file exists, place special word mappings into specialwords_dict\n if specialwords_filename != '':\n with yaml.load(open(specialwords_filename)) as specialwords_fulllist:\n\n # Gets source language if not passed through\n if src_lang == 'auto':\n src_lang == str(translator.detect(lines[0]))[14:16]\n\n # Attempts to add the correct dictionary of special words\n try:\n specialwords_dict = specialwords_dict_full[src_lang + '_' + dest_lang]\n except KeyError:\n print('Special words file doesn\\'t contain required language translation!')\n\n # Parses each line for special cases and ignores them when translating\n for line in lines:\n line = line.strip()\n\n # Parses for code blocks and ignores them entirely\n if line.startswith(\"```\"):\n line = line\n\n else:\n # Parses for URL's and file links and ignores them\n if line.find(\"[\") != -1 and line.find(\"]\") != -1 and line.find(\"(\") != -1 and line.find(\")\") != -1:\n ignore_start = line.find(\"(\")\n ignore_end = line.find(\")\")\n head = replace(specialwords_dict,line[0:ignore_start])\n tail = replace(specialwords_dict,line[ignore_end+1:])\n head = translator.translate(head, dest_lang, src_lang).text\n tail = translator.translate(tail, dest_lang, src_lang).text\n line = head + line[ignore_start:ignore_end+1] + tail\n\n # Translates normally if there are no special cases\n else:\n line = translator.translate(line, dest_lang, src_lang).text\n\n # Write to destination file\n destfile.write(line + '\\n')", "title": "" }, { "docid": "df24f5aa78e1413cf7ea1003b2180b20", "score": "0.53131425", "text": "def transform_article(article, featured_image=None, author=None):\n article['image'] = featured_image\n\n article['author'] = author\n\n if 'date_gmt' in article:\n article_gmt = article['date_gmt']\n article_date = datetime.strptime(article_gmt, '%Y-%m-%dT%H:%M:%S')\n article['date'] = article_date.strftime('%-d %B %Y')\n\n if 'excerpt' in article and 'rendered' in article['excerpt']:\n article['excerpt']['raw'] = strip_excerpt(\n article['excerpt']['rendered'])[:340]\n\n # If the excerpt doesn't end before 340 characters, add ellipsis\n raw_article = article['excerpt']['raw']\n # split at the last 3 characters\n raw_article_start = raw_article[:-3]\n raw_article_end = raw_article[-3:]\n # for the last 3 characters replace any part of […]\n raw_article_end = raw_article_end.replace('[', '')\n raw_article_end = raw_article_end.replace('…', '')\n raw_article_end = raw_article_end.replace(']', '')\n # join it back up\n article['excerpt']['raw'] = ''.join([\n raw_article_start,\n raw_article_end,\n ' […]'])\n\n return article", "title": "" }, { "docid": "c7a7c15911eb1004befc6a988f9e61ec", "score": "0.5284541", "text": "def generate(indef_arts, def_arts, spros, opros, nouns, templates, \n lang, output_dir):\n\n outfile = open(output_dir, 'w')\n french_vowels = ['i', 'u', 'o', 'a', 'h', 'e', 'é']\n m_nouns, f_nouns = nouns['m'], nouns['f']\n\n for i, template in enumerate(templates): # for each template\n template = template.split()\n # words to keep\n words_to_keep = [i for i, w in enumerate(template) if not w.startswith('[')]\n # words to change\n words_to_change = [i for i, w in enumerate(template) if w.startswith('[')]\n for k, noun in enumerate(m_nouns):\n m_sentence = ['*'] * len(template)\n f_sentence = ['*'] * len(template)\n for idx, w in enumerate(words_to_change):\n word_to_change = template[w]\n\n if word_to_change == '[noun]':\n m_sentence[w] = m_nouns[k]\n f_sentence[w] = f_nouns[k] \n\n # handle the special def arts in french\n if lang == 'fr' and template[w - 1] == '[def_art]':\n # in our case, the def articles\n # only comes before nouns\n\n if m_nouns[k][0].lower() in french_vowels:\n\n m_sentence[w - 1] = def_arts['special']\n if f_nouns[k][0].lower() in french_vowels:\n f_sentence[w - 1] = def_arts['special']\n\n elif word_to_change == '[indef_art]':\n m_sentence[w] = indef_arts['m']\n f_sentence[w] = indef_arts['f']\n\n elif word_to_change == '[def_art]':\n m_sentence[w] = def_arts['m']\n f_sentence[w] = def_arts['f']\n\n elif word_to_change == '[spro]':\n m_sentence[w] = spros['m']\n f_sentence[w] = spros['f']\n\n elif word_to_change == '[opro]':\n m_sentence[w] = opros['m']\n f_sentence[w] = opros['f']\n\n for w in words_to_keep:\n m_sentence[w] = template[w]\n f_sentence[w] = template[w]\n\n print(m_sentence)\n print(f_sentence)\n if lang == 'fr':\n out_m_sentence = ' '.join(m_sentence).replace(\"l' \", \"l'\").replace(\"qu' \", \"qu'\")\n out_f_sentence = ' '.join(f_sentence).replace(\"l' \", \"l'\").replace(\"qu' \", \"qu'\")\n else:\n out_m_sentence = ' '.join(m_sentence)\n out_f_sentence = ' '.join(f_sentence)\n\n # src, trg, src_gender, trg_gender\n outfile.write(out_m_sentence + '\\t' + out_m_sentence + '\\t' + 'M' + '\\t' + 'M')\n outfile.write('\\n')\n outfile.write(out_m_sentence + '\\t' + out_f_sentence + '\\t' + 'M' + '\\t' + 'F')\n outfile.write('\\n')\n outfile.write(out_f_sentence + '\\t' + out_m_sentence + '\\t' + 'F' + '\\t' + 'M')\n outfile.write('\\n')\n outfile.write(out_f_sentence + '\\t' + out_f_sentence + '\\t' + 'F'+ '\\t' + 'F')\n outfile.write('\\n')\n outfile.close()", "title": "" }, { "docid": "02f5e98ef2163e40e2c249db4b6da5e6", "score": "0.52391344", "text": "def set_articles(articles):\n global article_features\n global article_M, article_M_inv, article_w\n global article_b\n article_features = articles\n article_M = {a: np.identity(6) for a in article_features}\n article_M_inv = {a: np.identity(6) for a in article_M}\n article_b = {a: np.zeros((6, 1)) for a in article_features}\n article_w = {a: np.zeros((6, 1)) for a in article_features}", "title": "" }, { "docid": "4cb133c8d1228b457f3ef81825e25fc1", "score": "0.5233303", "text": "def test_translation_update(self):\n pass", "title": "" }, { "docid": "7ccd4989e2294898404f36c972ffa11f", "score": "0.52151895", "text": "def create_article(conn, article):\n\n global id_of_first_author_column;\n\n bibtex_id = \"\";\n year = article['\"Year\"'];\n title = article['\"Title\"'];\n doi = article['\"DOI\"'];\n pages = article['\"pages\"'];\n\n if pages == '\"\"':\n first_page = -1;\n last_page = -1;\n else:\n temp = pages.replace('\"', '').split(\"-\");\n first_page = int(temp[0]);\n last_page = int(temp[1]);\n\n first_page = first_page;\n last_page = last_page;\n\n conference_id = get_conference_id(conn, year);\n\n record = (bibtex_id, conference_id, title,doi,first_page,last_page);\n\n sql = ''' INSERT INTO articles(bibtex_id,conference_id,title,doi,first_page,last_page)\n VALUES(?,?,?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, record);\n\n article_id = cur.lastrowid;\n\n # Add all the authors\n # Look for columns of authors\n for i in range(article['\"Number of authours\"']):\n fullname = article[id_of_first_author_column + i];\n author_id = get_author_id(conn, fullname);\n create_authorship(conn, author_id, article_id);", "title": "" }, { "docid": "918142d4eeab652be1e906c1ff1b31ef", "score": "0.52133614", "text": "def encode_article(self):\n sentences_to_encode = []\n encoded_article = [None]\n sentence_list = [0]\n sent_count = 0\n\n for sentence in self.article:\n sent_count += len(sentence)\n sentence_list.append(sent_count)\n for sent in sentence:\n sentences_to_encode.append(sent) # Split into sentences\n\n print('Loading models...'),\n model = skipthoughts.LoadModel().load_model()\n encoder = skipthoughts.Encoder(model)\n print('Encoding sentences...'),\n encoded_sentences = encoder.encode(sentences_to_encode, verbose=False)\n print('Done')\n\n for i in range(len(self.article)):\n begin = sentence_list[i]\n end = sentence_list[i + 1]\n encoded_article[i] = encoded_sentences[begin:end]\n return encoded_article", "title": "" }, { "docid": "92cd298e1bd04079e7d9537f144d713d", "score": "0.519701", "text": "def translate(self, object_name):\r\n pass", "title": "" }, { "docid": "13591fe16d4da1a8ee1fa62e7dcf94f6", "score": "0.5196361", "text": "def translate(full_text):\r\n translator = Translator()\r\n # breaking down text into small pieces for faster translation\r\n translations = []\r\n unique_elements = full_text.split('. ')\r\n\r\n for element in unique_elements:\r\n # Adding all the translations to a dictionary (translations)\r\n translations.append(translator.translate(element).text)\r\n # Final translated string\r\n english_translated_text = ''\r\n # joining all the converted chunks to final string\r\n for i in translations:\r\n english_translated_text = english_translated_text + ' ' + i\r\n # return the text for all translated chunks\r\n return english_translated_text", "title": "" }, { "docid": "e58eaef1fb281cc3cc4dbaa8838bd80a", "score": "0.51941335", "text": "def generate_translations(item):\n fr_prefix = '(français) '\n es_prefix = '(español) '\n oldname = str(item.name)\n item.name = {'en': oldname, 'fr': fr_prefix + oldname, 'es': es_prefix + oldname}\n item.save()", "title": "" }, { "docid": "98e25581271f07e51b177ce8da17f66c", "score": "0.5183574", "text": "def article(self, article_id, text_format=None):\n endpoint = 'articles/{}'.format(article_id)\n params = {'text_format': text_format or self.response_format}\n return self._make_request(path=endpoint, params_=params, public_api=True)", "title": "" }, { "docid": "57702ac62d40140debabd0ee52d35ef1", "score": "0.5181046", "text": "def translational(self, x):\n self.trans.copy_from(x)", "title": "" }, { "docid": "4755e9ad4c4ed3c1a13c0fd703c0bf69", "score": "0.51672024", "text": "def translate(request, document_slug, document_locale, revision_id=None):\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n parent_doc = get_object_or_404(\n Document, locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug)\n user = request.user\n\n if not revision_id:\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.REQUEST.get('tolocale',\n document_locale)\n\n # Set a \"Discard Changes\" page\n discard_href = ''\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return HttpResponseRedirect(reverse(\n 'wiki.edit_document', locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.full_path]))\n\n if not parent_doc.is_localizable:\n message = _lazy(u'You cannot translate this document.')\n return render(request, 'handlers/400.html',\n {'message': message}, status=400)\n\n if revision_id:\n initial_rev = get_object_or_404(Revision, pk=revision_id)\n\n based_on_rev = get_current_or_latest_revision(parent_doc,\n reviewed_only=False)\n\n disclose_description = bool(request.GET.get('opendescription'))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = _split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = _split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n try:\n parent_topic_translated_doc = (\n parent_doc.parent_topic.translations.get(\n locale=document_locale))\n slug_dict = _split_slug(\n parent_topic_translated_doc.slug + '/' + slug_dict['specific'])\n except:\n pass\n\n user_has_doc_perm = ((not doc) or (doc and doc.allows_editing_by(user)))\n user_has_rev_perm = ((not doc) or (doc and doc.allows_revision_by(user)))\n if not user_has_doc_perm and not user_has_rev_perm:\n # User has no perms, bye.\n raise PermissionDenied\n\n doc_form = rev_form = None\n\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict['specific']\n doc_initial = _document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {'title': based_on_rev.title,\n 'slug': slug_dict['specific']}\n doc_form = DocumentForm(initial=doc_initial)\n\n if user_has_rev_perm:\n initial = {'based_on': based_on_rev.id, 'comment': '',\n 'toc_depth': based_on_rev.toc_depth}\n if revision_id:\n initial.update(\n content=Revision.objects.get(pk=revision_id).content)\n elif not doc:\n initial.update(content=based_on_rev.content)\n instance = doc and get_current_or_latest_revision(doc)\n rev_form = RevisionForm(instance=instance, initial=initial)\n\n if request.method == 'POST':\n which_form = request.POST.get('form', 'both')\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get('slug', slug_dict['specific'])\n destination_slug = _join_slug(slug_dict['parent_split'], posted_slug)\n\n if user_has_doc_perm and which_form in ['doc', 'both']:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({'locale': document_locale})\n post_data.update({'slug': destination_slug})\n\n doc_form = DocumentForm(post_data, instance=doc)\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n if which_form == 'both':\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionValidationForm(request.POST.copy())\n rev_form.parent_slug = slug_dict['parent']\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == 'doc' or\n rev_form.is_valid()):\n rev_form = RevisionForm(post_data)\n\n if rev_form.is_valid():\n doc = doc_form.save(parent_doc)\n\n if which_form == 'doc':\n url = urlparams(reverse('wiki.edit_document',\n args=[doc.full_path],\n locale=doc.locale),\n opendescription=1)\n return HttpResponseRedirect(url)\n else:\n doc_form.data['slug'] = posted_slug\n doc_form_invalid = True\n else:\n doc_form.data['slug'] = posted_slug\n doc_form_invalid = True\n\n if doc and user_has_rev_perm and which_form in ['rev', 'both']:\n post_data = request.POST.copy()\n if not 'slug' in post_data:\n post_data['slug'] = posted_slug\n\n rev_form = RevisionValidationForm(post_data)\n rev_form.parent_slug = slug_dict['parent']\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n # append final slug\n post_data['slug'] = destination_slug\n\n # update the post data with the toc_depth of original\n post_data['toc_depth'] = based_on_rev.toc_depth\n\n rev_form = RevisionForm(post_data)\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid():\n _save_rev_and_notify(rev_form, request.user, doc)\n url = reverse('wiki.document', args=[doc.full_path],\n locale=doc.locale)\n return HttpResponseRedirect(url)\n\n parent_split = _split_slug(parent_doc.slug)\n\n return render(request, 'wiki/translate.html',\n {'parent': parent_doc, 'document': doc,\n 'document_form': doc_form, 'revision_form': rev_form,\n 'locale': document_locale, 'based_on': based_on_rev,\n 'disclose_description': disclose_description,\n 'discard_href': discard_href,\n 'specific_slug': parent_split['specific'],\n 'parent_slug': parent_split['parent']})", "title": "" }, { "docid": "c9516fe390c1b067efdae9355fe26c46", "score": "0.5167005", "text": "def from_translation(self, la):\n E = self.realization_of()\n return self((E.exp_lattice()(la),self.cartesian_factors()[1].one()))", "title": "" }, { "docid": "49360c8ed6f0ffc12ef12dce056eff7d", "score": "0.515288", "text": "def get_translate(self):\n selection = self.selection.strip().strip(string.punctuation)\n selection = selection.replace('_', ' ')\n\n # ToDo split CamelCase words\n words = selection.split()\n splitted = []\n for w in words:\n matches = re.finditer(\n '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',\n w)\n splitted.append(' '.join([m.group(0) for m in matches]))\n selection = ' '.join(splitted)\n\n payload = {'word': selection}\n\n twords = []\n try:\n resp = requests.get(\n \"http://api.lingualeo.com/gettranslates\",\n params=payload,\n )\n resp_json = resp.json()\n twords = [\n translation['value'].encode(\"utf-8\")\n for translation in resp_json.get(\"translate\", ())\n ]\n error = resp_json.get('error_msg')\n except requests.ConnectionError as err:\n error = 'Connection error occurred: {}'.format(err)\n except ValueError:\n error = resp.text.encode(\"utf-8\")\n except Exception as err:\n error = 'Error occurred in translator module: {}'.format(err)\n\n res = Translate(\n selection=selection,\n error=error or None,\n twords=twords,\n )\n return res", "title": "" }, { "docid": "6af8e92f78dd27ec1f367cebce918a5f", "score": "0.5151055", "text": "def _add_article(self, article):\n pmid = int(article[0].strip(' \\n').split('|')[0])\n # Note: The 2: slicing makes title, abstract, and mesh list objects\n # Had I only indexed into 2, then they would've all been str objs.\n title, abstract, mesh = [data.strip(' \\n').split('|')[2:]\n for data in article]\n\n mesh_terms = {\n m.partition('!')[0].lower().strip().rstrip('*')\n for m in mesh\n } - {''} # Remove empty term\n\n # PMIDs of articles we're trying to predict MeSH terms for\n self.articles.append(pmid)\n # Title/abstract/MeSH terms of a cited article\n self.citations[pmid]['title'] = ''.join(title)\n self.citations[pmid]['abstract'] = ''.join(abstract)\n self.citations[pmid]['mesh'] = mesh_terms", "title": "" }, { "docid": "699f6f94006d41db720fbbc76f34b715", "score": "0.5142341", "text": "def simple_backtranslate(original, lang_tmp = \"en\"):\n \n import textblob\n t = textblob.blob.TextBlob(original)\n translated = str(t.translate(from_lang = \"fr\", to = lang_tmp))\n blob = textblob.blob.TextBlob(translated)\n tr = blob.translate(from_lang = lang_tmp, to = \"fr\")\n return str(tr)", "title": "" }, { "docid": "d11551d5bbfde57e2433fca0061852c7", "score": "0.5127855", "text": "def english_to_german(self,text):\n translation = dict()\n if len(text) == 0:\n return text\n translation = self.language_translator.translate(text, model_id='en-de').get_result()\n translated_text = translation['translations'][0]['translation']\n return translated_text", "title": "" }, { "docid": "12f14f0d8fcbce7a2a6617f4dfc0a030", "score": "0.5123375", "text": "def parse_article(self, response):\n\n # search for article title\n title_parts = response.xpath('//div[@id=\"articlehead\"]/h1/text()').extract()\n if len(title_parts) > 0:\n title = ' '.join(set(title_parts)).encode('utf-8').strip()\n else:\n title = ''\n\n # search for article published date\n datetime_element_parts = response.xpath('//small[@id=\"pubdate\"]/strong/text()').extract()\n if len(datetime_element_parts) > 0:\n datetime_iso_str = self.parse_published_datetime(datetime_element_parts)\n else:\n datetime_iso_str = ''\n\n # search for article intro text\n article_intro_parts = response.xpath('//div[@id=\"intro\"]/strong/text()').extract()\n article_intro = ' '.join([x.strip() for x in article_intro_parts]).strip()\n\n # search for article full text\n article_full_text_fragments = response.xpath('//div[@id=\"articlebody\"]/descendant::p/descendant-or-self::*/text()').extract()\n article_full_text = ' '.join([x.strip() for x in article_full_text_fragments]).strip()\n\n # reconstruct the url to the nicely rendered page\n url_parts = response.url.split('/')\n article_id = url_parts.pop()\n url_parts.append('vrtnieuws')\n url_parts.append(article_id)\n url = '/'.join(url_parts)\n\n # now create an Article item, and return it. All Articles created during scraping can be written to an output file when the -o option is given.\n article = Article()\n article['url'] = url\n article['intro'] = article_intro\n article['title'] = title\n article['published_at'] = datetime_iso_str\n article['text'] = article_full_text\n return article", "title": "" }, { "docid": "d00465bb3a9fa30feb95e3028c46af0d", "score": "0.5118378", "text": "def start_add_article(bot, update):\n user_logger(update, \"starts add article\")\n send(bot, chat_id=update.message.chat_id, text=\"Which is the link of the article you wish to add? (You can look for it with the Wikipedia bot @wiki) Or /cancel if you want\") \n return ARTICLE", "title": "" }, { "docid": "044639e66b57a129923f2a5d02961011", "score": "0.50946444", "text": "def test_translation_show(self):\n pass", "title": "" }, { "docid": "36e7ded4f3872c2354f7ad63396137a6", "score": "0.5087646", "text": "def from_translation(self, la):\n return self((self.cartesian_factors()[0].one(),self.realization_of().exp_lattice()(la)))", "title": "" }, { "docid": "2118369c867a9e5212490d833408f0a3", "score": "0.5084515", "text": "def translate_many(obj, columns, contents):\n return super(FullTranscript, FullTranscript).translate_many(obj, columns, contents)", "title": "" }, { "docid": "2156af514f4b1cbe41a612259b2d4f4e", "score": "0.50663656", "text": "def getTranslation(self):\n pass", "title": "" }, { "docid": "cf3b7b98c76b4e0c621b6b1013862157", "score": "0.5062391", "text": "def __init__(self,src,tar,decoration,sentence):\r\n self.src = src\r\n self.tar = tar\r\n self.decoration = decoration\r\n self.triple = (src,decoration,tar)\r\n self.sentence = sentence\r\n # BIO annotation:\r\n src = \"\"\r\n tar = \"\"\r\n dec = \"\"\r\n for index,char in enumerate(self.src):\r\n if index == 0:\r\n src += char + \"_B-Verb\"\r\n else:\r\n src += char + \"_I-Verb\"\r\n for index,char in enumerate(self.tar):\r\n if index == 0:\r\n tar += char + \"_B-Obj\"\r\n else:\r\n tar += char + \"_I-Obj\"\r\n for index,char in enumerate(self.decoration):\r\n if index == 0:\r\n dec += char + \"_B-Dec\"\r\n else:\r\n dec += char + \"_I-Dec\"\r\n # replace the raw sentence:\r\n sentence = sentence.replace(self.src,src)\r\n sentence = sentence.replace(self.tar,tar)\r\n sentence = sentence.replace(self.decoration,dec)\r\n self.entityAnnotation = sentence", "title": "" }, { "docid": "5a8275e28c28e374da9bba90aabebc8f", "score": "0.5062032", "text": "def generate_old (self):\r\n vtopics = self.parse_input()\r\n for vtopic in vtopics:\r\n print(\"[* corpus-generate] Researching: [%s]\" % vtopic)\r\n vlinks = self.get_google_results(vtopic[\"explanation\"])\r\n \r\n # Extract article chunked into paragraphs.\r\n for vlink in vlinks:\r\n try:\r\n print(\"[* corpus-generate] Retrieve article for [%s]\" \r\n % vlink)\r\n varticle = get_article(vlink)\r\n\r\n # Add to output corpus\r\n for vpg in varticle: VCORPUS.append(\r\n [\" \".join(vpg), vtopic['name']])\r\n \r\n print(\"[* corpus-generate] Writing article(?\")\r\n write_article(varticle, vtopic['name'])\r\n \r\n except Exception as e:\r\n print(\"[* corpus-generate] Could not run!: \" + str(e))", "title": "" }, { "docid": "2575cb33586e5883a62cf17f33a00345", "score": "0.5061343", "text": "def sample_translate(text):\n group = ConverseGroup(json=sample_data_source)\n t = Translator(group)\n return t.translate(text)", "title": "" }, { "docid": "f804ec067dd2c047c5e472995388ba52", "score": "0.5043085", "text": "def setUp(self):\n\n self.new_article=Article('author','title','description','www.news.com','urltoimage','2020-04-03T10:09:32Z','content')", "title": "" }, { "docid": "9d76b7c88f00d9060262d94305983ee8", "score": "0.5032036", "text": "def __str__(self):\n return 'Article : {} by {}'.format(self.titre, self.auteur)", "title": "" }, { "docid": "9f9034ac7410519e8a5a371541d9ce70", "score": "0.50240815", "text": "def deconstructing(self, text, target='ar'):\n import sagas\n source = 'en'\n options = {'get_pronounce', 'get_translations'}\n res, t = translate(text, source=source, target=target,\n trans_verbose=False, options=options)\n print('✁', '%s(%s %s)' % (text, res, ''.join(t.pronounce)))\n for sent in text.split(' '):\n res, t = translate(sent, source=source, target=target,\n trans_verbose=False, options=options)\n # print('%s(%s%s)' % (sent, res, marks_th(t.pronounce)), end=\" \")\n print('%s(%s%s)' % (sent, res, marks_th(t.pronounce)))\n sagas.print_df(t.translations)\n print('.')", "title": "" }, { "docid": "a234a2088421e5e8528f670f7bb2840f", "score": "0.5016896", "text": "async def translate(self, message):\n term = message.regex.group(1)\n from_language = message.regex.group(2)\n to_language = message.regex.group(3)\n _dictionary = dict()\n languages_dict = {\n \"spanish\": \"es\",\n \"belorussian\": \"be\",\n \"bulgarian\": \"bg\",\n \"catalan\": \"cs\",\n \"czech\": \"cs\",\n \"german\": \"de\",\n \"english\": \"en\",\n \"french\": \"fr\",\n \"croatian\": \"hr\",\n \"italian\": \"it\",\n \"latin\": \"la\",\n \"macedonian\": \"mk\",\n \"dutch\": \"nl\",\n \"polish\": \"pl\",\n \"portuguese\": \"pt\",\n \"romanian\": \"ro\",\n \"russian\": \"ru\",\n \"slovak\": \"sk\",\n \"slovenian\": \"sl\",\n \"serbian\": \"sr\",\n \"ukrainian\": \"uk\",\n }\n entries = swadesh.entries(\n [\n languages_dict.get(from_language, \"english\"),\n languages_dict.get(to_language, \"english\"),\n ]\n )\n\n for word in entries:\n _word = word[0].split(\", \")\n if len(_word) > 1:\n _dictionary[_word[0]] = word[1]\n _dictionary[_word[1]] = word[1]\n else:\n _dictionary[word[0]] = word[1]\n\n translation = _dictionary.get(\n term, \"Sorry, I can't find the \" \"translation for that word :(\"\n )\n\n await message.respond(\n \"The {} word '{}' in {} is: {}\".format(\n from_language, term, to_language, translation\n )\n )", "title": "" }, { "docid": "3fa1d234aa2c69063723546a35324229", "score": "0.5016787", "text": "def createPaste(content, language):", "title": "" }, { "docid": "723a1d30e87eeef18ffd8510a0c53639", "score": "0.5015891", "text": "def fix_transclusion(page_content, title, labels, lang):\n page_content = page_content.splitlines()\n title = clean_title(title) # Remove subpage and namespace from title\n corrected_labels = {}\n edited = False\n\n for line in page_content:\n if title in line:\n index = page_content.index(line) # Remember index of each line\n\n # Case 1: HTML syntax for transclusion\n if line.startswith('<pages index'):\n for label in labels.keys():\n if label in line:\n # Check if transclusion template uses old label\n pattern = (r'(<pages index\\s?=\\s?\"?{}\"?\\s.*?fromsection'\n '\\s?=\\s?\"?)({}|.*)(\"?\\s?tosection\\s?=\\s?\"?)({}|.*)'\n '(\"?\\s?/>)'.format(re.escape(title), label, label))\n match = re.search(pattern, line)\n if match:\n # Replace old label(s) with new label\n line = (re.sub(r'([from|to]section\\s?=\\s?\"?){}'\n .format(label), r'\\1{}'.format(labels[label]),\n line, count=2))\n page_content[index] = line\n corrected_labels[label] = labels[label]\n edited = True\n del match\n\n # Case 2: Mediawiki syntax\n if line.startswith('{{#lst:') or line.startswith('{{#lstx'):\n for label in labels.keys():\n if label in line:\n pattern = (r'({{#lstx?:)(\\w+:)?({})(/\\d*)?([|]{})(}})'\n .format(title, label))\n match = re.search(pattern, line)\n if match:\n line = re.sub(pattern, r'\\1\\2\\3\\4|{}\\6'.format\n (labels[label]), line)\n page_content[index] = line\n corrected_labels[label] = labels[label]\n edited = True\n del match\n\n # Case 3: template used for transclusion\n if template[lang] and line.lower().startswith(template[lang][0]):\n for label in labels.keys():\n if label in line:\n pattern = (r'({}{})(/\\d*)?(.*?)([|])(\\w+)(\\s?=\\s?)({})'\n '(.*?}}$)'.format(template[lang][1], title, label))\n match = re.match(pattern, line)\n if match and match.group(5) in template[lang][2:]:\n line = (re.sub(pattern, r'\\1\\2\\3\\4\\5\\6{}\\8'.format\n (labels[label]), line))\n page_content[index] = line\n corrected_labels[label] = labels[label]\n edited = True\n del match\n\n page_content = '\\n'.join(page_content)\n if edited:\n return page_content, corrected_labels\n return None, None", "title": "" }, { "docid": "757296335a6e4490f5224ef2e5c79a59", "score": "0.50120014", "text": "def opp_topic(article, nasari):\n\n sentences = []\n for p in article['body'][1:]:\n sents = nltk.sent_tokenize(p)\n sentences.append(sents)\n\n # newspaper\n news_opp = {'title': True,\n \"opps\": [(1, 1), (1, 2), (2, 1), (2, 2), (3, 1), (3, 2), (4, 1), (4, 2), (5, 1), (6, 1)]}\n\n # Wall Street Journal\n wsj_opp = {'title': True, \"opps\": [(1, 1), (1, 2)]}\n\n # \"opps\" corrisponde a (indice paragrafo, indice frase)\n # \"title\" = true se è necessario considerare anche il titolo\n\n vectors = []\n if article['genre'] == 'wsj':\n opp = wsj_opp\n elif article['genre'] == 'news':\n opp = news_opp\n else:\n opp = {'title': True, 'opps': []}\n\n if opp['title']:\n vectors = title_topic(article, nasari)\n for t in opp['opps']:\n p = t[0] # indice paragrafo\n s = t[1] # indice sentence\n if p < len(sentences) and s < len(sentences[p]):\n sent = sentences[t[0]][t[1]]\n vectors += create_vectors(sent, nasari)\n\n return vectors", "title": "" }, { "docid": "a2a4dbbea65db6bf0618dd49457d0efe", "score": "0.4997483", "text": "def transform(self, doc):\n pass", "title": "" }, { "docid": "2b54ad7adbef8b3966118598fd8e75a0", "score": "0.49920186", "text": "def article_row(self):\n return BPOArticle(\n record_id=self.record_id,\n date_time_stamp=self.date_time_stamp(),\n action_code=self.action_code(),\n record_title=self.record_title(),\n publication_id=self.publication_id(),\n publication_title=self.publication_title(),\n publication_qualifier=self.publication_qualifier(),\n publisher=self.publisher(),\n alpha_pub_date=self.alpha_pub_date(),\n numeric_pub_date=self.numeric_pub_date(),\n source_type=self.source_type(),\n object_type=self.object_type(),\n language_code=self.language_code(),\n issn=self.issn(),\n start_page=self.start_page(),\n end_page=self.end_page(),\n pagination=self.pagination(),\n url_doc_view=self.url_doc_view(),\n abstract=self.abstract(),\n text=self.full_text(),\n )", "title": "" }, { "docid": "8577dd78748d5f3a7e4be94240b06fd3", "score": "0.49861988", "text": "def _work_from_adelaide(a_work: AdelaideWork) -> None:\n if len(a_work.title) > 100 or len(a_work.author_last) > 50:\n return\n if a_work.author_first and len(a_work.author_first) > 50:\n return\n author, _ = Author.objects.get_or_create(\n first_name=a_work.author_first,\n last_name=a_work.author_last\n )\n\n work, _ = Work.objects.update_or_create(\n title=a_work.title,\n author=author,\n defaults={\n 'translator': a_work.translator\n }\n )", "title": "" }, { "docid": "1e85ce5d67a44bdf7efbed52a31e739e", "score": "0.49851036", "text": "def build_article(url=u''):\n url = url or '' # empty string precedence over None\n a = Article(url)\n return a", "title": "" }, { "docid": "8fe03e35f15e8a80a5b17944da8c91f8", "score": "0.49776417", "text": "def copy_and_rewrite(input, output, translate):\n with open(input) as infile:\n with open(output, 'w+') as outfile:\n for line in infile:\n outfile.write(translate(line))", "title": "" }, { "docid": "729bfa032282f12e3ae0f982e5ce10af", "score": "0.49750164", "text": "def test_translated_titles(self):\r\n pass", "title": "" }, { "docid": "cb10d9c42a40d4d5659af48183217d80", "score": "0.49716488", "text": "def recreate_text_representation(candidate, entity_replacement = True, span_replacement = True, as_tokens=False , replace_conseq_entities = False, trim_text = False, window=0, lemmas = False, shortest_dep_path=False, nlp = None\n ):\n value = deepcopy(candidate)\n if lemmas:# and not shortest_dep_path:\n tokens = deepcopy(value['lemmas'])\n else:\n tokens = deepcopy(value['words'])\n# print '\\n','Initial indices:',value['gene_idx'],value['chem_idx']\n if entity_replacement:\n for i, t_type in enumerate(value['entity_types']):\n if t_type !=u'O':\n tokens[i] = str(t_type).upper()\n \n if shortest_dep_path:\n for idx in value['chem_idx']:\n tokens[idx] = \"ENTITY1\"\n for idx in value['gene_idx']:\n tokens[idx] = \"ENTITY2\"\n \n idcs_to_keep = get_shortest_dep_path(candidate['dep_parents'], candidate['chem_idx'][0], candidate['gene_idx'][0] )\n words = [tokens[i] for i in idcs_to_keep]\n return ' '.join(words)\n \n \n # if only_between, drop words before/after chem+gene\n if trim_text and not shortest_dep_path:\n start = min(value['gene_idx']+value['chem_idx'])\n start = max(0, start-window)\n \n stop = max(value['gene_idx']+value['chem_idx'])\n stop = stop + window\n \n# print 'sdas', start,stop\n \n keys_with_lists = ['entity_types', 'lemmas', 'pos_tags', 'words']\n \n #trim all lists containing lemmas etc.\n # TODO: fix issue that window also counts special characters.\n for key in keys_with_lists: \n value[key] = value[key][start:stop+1]\n tokens = tokens[start:stop+1]\n\n #replace indices to keep consistency \n value['chem_idx'][:] = [x - start for x in value['chem_idx']]\n value['gene_idx'][:] = [x - start for x in value['gene_idx']]\n \n# print 'new value indices:',value['chem_idx'],value['gene_idx']\n \n# print tokens\n #TODO: potentially replace with lemmas here maybe\n \n #replace span with ENTITY1, ENTITY2 --- destruction of list consistency after this point\n if span_replacement or shortest_dep_path:\n for idx in value['chem_idx']:\n tokens[idx] = \"ENTITY1\"\n for idx in value['gene_idx']:\n tokens[idx] = \"ENTITY2\"\n \n #convert whole span to 1 token\n idx_to_del = value['gene_idx'][1:]+value['chem_idx'][1:]\n # keep track of entity indices (merged & converted from list to int)\n gene_idx = shift_idx(value['gene_idx'][0], idx_to_del)\n chem_idx = shift_idx(value['chem_idx'][0], idx_to_del)\n for index in sorted(idx_to_del, reverse=True):\n del tokens[index]\n \n# if shortest_dep_path:\n# return get_shortest_dep_path(' '.join(tokens) , chem_idx, gene_idx, nlp)\n \n \n if replace_conseq_entities and not shortest_dep_path: \n tokens = map(itemgetter(0), groupby(tokens))\n \n# #shit after that\n# if return_idcs:\n# chem_idx = deepcopy(value['chem_idx'])\n# gene_idx = deepcopy(value['gene_idx'])\n \n# if (len(chem_idx)>1) and (gene_idx[0]<chem_idx[0]):\n# print ' '.join(tokens)\n# print 'l1',chem_idx\n# chem_idx = chem_idx[0] - len(gene_idx) - 1\n# print 'l2',tokens[chem_idx], chem_idx\n \n# elif (len(gene_idx)>1) and (chem_idx[0]<gene_idx[0]):\n# print ' '.join(tokens)\n# gene_idx = gene_idx[0] - len(chem_idx) - 1\n# print 'l3',gene_idx, tokens[gene_idx]\n \n \n# print tokens\n if as_tokens:\n return tokens\n elif shortest_dep_path:\n return (' '.join(tokens), chem_idx, gene_idx, \n #tokens[chem_idx], tokens[gene_idx] #debug/test\n )\n else:\n return ' '.join(tokens)", "title": "" }, { "docid": "b66c9f7ed6327f7ef5b16cbcb7bee3e3", "score": "0.49709746", "text": "def normalize( self, article ):\r\n def getPossibleKeywords( line ):\r\n def normalizeKeyword( word ):\r\n def replaceFromTo( interWord, fromTo ):\r\n replFrom, to = fromTo\r\n return interWord.replace( replFrom, to )\r\n return reduce( replaceFromTo, self.wordReplacements, word )\r\n return ( normalizeKeyword( word ) for word in line.split( \" \" ) )\r\n return chain.from_iterable( ( getPossibleKeywords( line ) \r\n for line in article.lower().split( \"\\n\" ) ) )\r\n \r\n #=======================================================================\r\n # def extractKeywords( sent ):\r\n # def concatNamedEntities( sentTree ):\r\n # try:\r\n # tag = sentTree.node\r\n # except AttributeError:\r\n # word = sentTree[ 0 ]\r\n # return word\r\n # else:\r\n # if( tag == 'NE' ):\r\n # return \" \".join( ( name for name, tag in sentTree ) )\r\n # return ( concatNamedEntities( subTree ) for subTree in sentTree )\r\n # tokenized = nltk.word_tokenize( sent )\r\n # tagged = nltk.pos_tag( tokenized )\r\n # chunked = nltk.ne_chunk( tagged, binary=True )\r\n # return concatNamedEntities( chunked )\r\n # return [ keyword.strip().lower() \r\n # for sent in nltk.sent_tokenize( article )\r\n # for keyword in extractKeywords( sent ) ] \r\n #=======================================================================\r", "title": "" }, { "docid": "6befb3faf5e324b2166b6488c08bbf0c", "score": "0.49482128", "text": "def translate(\n self,\n text='translation !',\n src='en',\n dest=['fr']\n ):\n if not isinstance(text, str) and not isinstance(text, unicode):\n raise(AttributeError(\n 'translate(text=) you must pass string of text to be'\n ' translated'))\n if str(src) not in self.languages:\n raise(AttributeError(\n 'translate(src=) passed language is not supported: ' + src))\n if not isinstance(dest, list):\n raise(AttributeError(\n 'translate(dest=) you must pass list of strings of supported'\n ' languages'))\n for dl in dest:\n if str(dl) not in self.languages:\n if self.fail_safe:\n return text\n else:\n raise(AttributeError(\n 'translate(dest=[]) passed language is not '\n 'supported: ' + str(dl)))\n if self.fail_safe:\n T = google_translator(service_urls=self.service_urls)\n parent = self\n\n class translatorC(object):\n def translate(self, text, dest, src):\n try:\n return T.translate(\n text=text,\n dest=dest,\n src=src\n )\n except Exception as error: # pragma: nocover\n parent.errors.append(error)\n return namedtuple('FailsafeTranlsation',\n ['text'])(text=text)\n translator = translatorC()\n else:\n translator = google_translator(service_urls=self.service_urls)\n if self.cache and text in self.STORAGE.keys():\n if len(dest) > 1:\n toReturn = {}\n for dl in dest:\n if dl in self.STORAGE[text].keys():\n toReturn[dl] = self.STORAGE[text][dl]\n else:\n toReturn[dl] = translator.translate(\n text,\n dl,\n src\n ).text\n if toReturn != self.STORAGE[text]:\n self.STORAGE[text] = toReturn\n self.cacheIt()\n return toReturn\n else:\n if dest[0] not in self.STORAGE[text].keys():\n toRetTra = translator.translate(\n text,\n dest[0],\n src\n ).text\n self.STORAGE[text][dest[0]] = toRetTra\n self.cacheIt()\n return toRetTra\n else:\n return self.STORAGE[text][dest[0]]\n else:\n toStore = {text: {\n src: text\n }}\n for dl in dest:\n translation = translator.translate(\n text,\n dl,\n src\n )\n toStore[text][dl] = translation.text\n if self.cache:\n self.STORAGE[text] = toStore[text]\n self.cacheIt()\n if len(dest) > 1:\n return toStore[text]\n else:\n return toStore[text][dest[0]]", "title": "" }, { "docid": "9cdb2fe6b0433368b266998dc305b9c8", "score": "0.49371663", "text": "def wikipedia_articles(self, language=None):\n if language == None: language = flags.arg.language\n return self.wf.resource(\"articles@10.rec\",\n dir=corpora.wikidir(language),\n format=\"records/frame\")", "title": "" }, { "docid": "15cd2fd404134c50d115c8ba3648cf6b", "score": "0.49368352", "text": "def eficas_translation(ts_file, new_ts_file, lang):\n dico_cata_to_label = {}\n dico_cata_to_telemac = {}\n header = '<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n header += '<!DOCTYPE TS><TS version=\"1.1\" language=\"'+lang+'\">'\n header += '<context>\\n'\n header += ' <name>@default</name>\\n'\n\n end = '</context>\\n</TS>\\n'\n\n pattern_in = re.compile(r'^\\s*<source>(?P<ident>.*)</source>\\s*$')\n pattern_out = \\\n re.compile(r'^\\s*<translation>(?P<traduit>.*)</translation>\\s*$')\n pattern_in2 = \\\n re.compile(r'^\\s*<source2>(?P<ident>.*)</source2>\\s*$')\n pattern_out2 = \\\n re.compile(r'^\\s*<translation2>(?P<traduit>.*)</translation2>\\s*$')\n liste_maj = []\n liste_maj.append(('for h', 'for H'))\n liste_maj.append(('pour h', 'pour H'))\n liste_maj.append(('for u', 'for U'))\n liste_maj.append(('pour u', 'pour U'))\n liste_maj.append(('of k', 'of K'))\n liste_maj.append(('de k', 'de K'))\n liste_maj.append(('of h', 'of H'))\n liste_maj.append(('de h', 'de H'))\n liste_maj.append(('u and v', 'U and V'))\n liste_maj.append(('u et v', 'U et V'))\n liste_maj.append(('on h', 'on H'))\n liste_maj.append(('sur h', 'sur H'))\n liste_maj.append(('supg', 'SUPG'))\n liste_maj.append(('k and epsilon', 'K and Epsilon'))\n liste_maj.append(('k-epsilon', 'K-Epsilon'))\n liste_maj.append(('gmres', 'GMRES'))\n liste_maj.append(('cgstab', 'CGSTAB'))\n liste_maj.append(('q(z)', 'Q(Z)'))\n liste_maj.append(('z(q)', 'Z(Q)'))\n liste_maj.append(('wgs84', 'WGS84'))\n liste_maj.append(('wgs84', 'UTM'))\n liste_maj.append(('n-scheme', 'N-Scheme'))\n liste_maj.append(('scheme n', 'Scheme N'))\n liste_maj.append(('psi-scheme', 'PSI-Scheme'))\n liste_maj.append((' psi', ' PSI'))\n liste_maj.append(('f(t90)', 'F(T90)'))\n liste_maj.append(('(pa)', '(Pa)'))\n liste_maj.append(('h clipping', 'H clipping'))\n liste_maj.append(('delwaq', 'DELWAQ'))\n liste_maj.append(('tomawac', 'TOMAWAC'))\n liste_maj.append(('chezy', 'CHEZY'))\n liste_maj.append(('hllc', 'HLLC'))\n liste_maj.append(('c-u', 'C-U'))\n liste_maj.append(('c,u,v', 'C,U,V'))\n liste_maj.append(('h,u,v', 'H,U,V'))\n liste_maj.append(('previmer', 'PREVIMER'))\n liste_maj.append(('fes20xx', 'FES20XX'))\n liste_maj.append(('legos-nea', 'LEGOS-NEA'))\n liste_maj.append(('tpxo', 'TPXO'))\n liste_maj.append((' x', ' X'))\n liste_maj.append((' y', ' Y'))\n liste_maj.append(('waf', 'WAF'))\n liste_maj.append(('(w/kg)', '(W/kg)'))\n liste_maj.append(('(j/kg)', '(W/kg)'))\n liste_maj.append(('zokagoa', 'Zokagoa'))\n liste_maj.append(('nikuradse', 'Nikuradse'))\n liste_maj.append(('froude', 'Froude'))\n liste_maj.append(('gauss', 'Gauss'))\n liste_maj.append(('seidel', 'Seidel'))\n liste_maj.append(('leo', 'Leo'))\n liste_maj.append(('postma', 'Postma'))\n liste_maj.append(('crout', 'Crout'))\n liste_maj.append(('okada', 'Okada'))\n liste_maj.append(('jmj', 'JMJ'))\n liste_maj.append(('haaland', 'HAALAND'))\n liste_maj.append(('grad(u)', 'grad(U)'))\n liste_maj.append(('variable z', 'variable Z'))\n liste_maj.append(('variable r', 'variable R'))\n liste_maj.append(('ascii', 'ASCII'))\n\n with open(ts_file, 'r') as fobj:\n for ligne in fobj.readlines():\n if pattern_in.match(ligne):\n word = pattern_in.match(ligne)\n ident = word.group('ident')\n if pattern_out.match(ligne):\n word = pattern_out.match(ligne)\n traduit = word.group('traduit')\n dico_cata_to_telemac[ident] = traduit\n traduit_main = traduit.lower()\n for trad in liste_maj:\n traduit = traduit_main.replace(trad[0], trad[1])\n traduit_main = traduit\n chaine = traduit_main[0].upper() + traduit_main[1:]\n dico_cata_to_label[ident] = chaine\n if pattern_in2.match(ligne):\n word = pattern_in2.match(ligne)\n ident = word.group('ident')\n if pattern_out2.match(ligne):\n word = pattern_out2.match(ligne)\n traduit = word.group('traduit')\n dico_cata_to_telemac[ident] = traduit\n dico_cata_to_label[ident] = traduit\n\n with open(new_ts_file, 'w') as fobj:\n fobj.write(header)\n for k in dico_cata_to_telemac:\n text = \" <message>\\n <source>\"\n text += k\n text += \"</source>\\n <translation>\"\n text += dico_cata_to_label[k]\n text += \"</translation>\\n </message>\\n\"\n fobj.write(text)\n fobj.write(end)\n\n system(\"lrelease %s\"%new_ts_file)", "title": "" }, { "docid": "746b05a1b817eadcd5a923c742eb96db", "score": "0.49313417", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n fields = [field for field in trans_dict.keys() if field[1]==self.language]\n for f in fields:\n setattr(self,f[0],trans_dict[f])", "title": "" }, { "docid": "5e8d8fa4943faa481c81ced373543ee4", "score": "0.492742", "text": "def __init__(\n self,\n orig_phrase: str = None,\n orig_phrase_norm: str = None,\n wiki_title: str = None,\n wiki_title_norm: str = None,\n score: int = 0,\n pageid: int = 0,\n description: str = None,\n relations: WikipediaPageExtractedRelations = None,\n ) -> None:\n self.orig_phrase = orig_phrase\n if orig_phrase_norm is None:\n self.orig_phrase_norm = StringUtils.normalize_str(orig_phrase)\n else:\n self.orig_phrase_norm = orig_phrase_norm\n\n self.wiki_title = wiki_title.replace(DISAMBIGUATION_TITLE, \"\")\n if wiki_title_norm is None:\n self.wiki_title_norm = StringUtils.normalize_str(wiki_title)\n else:\n self.wiki_title_norm = wiki_title_norm\n\n self.score = score\n self.pageid = int(pageid)\n self.description = description\n self.relations = relations", "title": "" }, { "docid": "db368a434f063b1956ecabad3c4d0304", "score": "0.49264848", "text": "def prepare_articles(self, articles):\n\n prepared_articles = []\n\n article_non_insight_doi_list = get_non_insight_doi_list(articles, self.logger)\n\n remove_article_doi = []\n related_articles = []\n # Process or delete articles as required\n for article in articles:\n self.logger.info(article.doi + \" is type \" + article.article_type)\n if is_insight_article(article):\n self.set_related_article(\n article,\n articles,\n related_articles,\n article_non_insight_doi_list,\n remove_article_doi,\n )\n\n # Can remove articles now if required\n for article in articles:\n if article.doi not in remove_article_doi:\n prepared_articles.append(article)\n\n return prepared_articles", "title": "" }, { "docid": "471b8b3024f3b274d78b19c79f7fdcbc", "score": "0.49189648", "text": "def translate(self, *args):\n return self.robot.translate(*args)", "title": "" }, { "docid": "bf1ae554e847f03f324222e8286dcfe7", "score": "0.4916338", "text": "def post_article():\n json = request.get_json()\n\n public = json.get('public') is not False\n if not public:\n category = None\n else:\n category = json.get('category')\n if category is None:\n return {'category': '公开的文章必须属于某一个分类'}\n if not Category.exist(Category.id == category):\n return {'category': '该分类不存在'}\n\n id = json.get('id', '')\n if not re_match(app_config['ARTICLE_ID_PATTERN'], id):\n return {'id': app_config['ARTICLE_ID_DESCRIPTION']}\n\n title = json.get('title', '').strip()\n if not title:\n return {'title': '请输入有效的文章标题'}\n\n text_type = json.get('text_type', '')\n if not RendererCollection.does_support(text_type):\n return {'text_type': '抱歉,暂不支持该格式的文章'}\n\n source_text = json.get('source_text', '')\n try:\n rendered_text = RendererCollection.render(text_type, source_text)\n except RendererCollection.RenderedError as ex:\n return {'source_text': '渲染文章失败,错误信息如下:\\r\\n' + ex.message}\n\n is_commentable = json.get('is_commentable') is not False\n\n article = Article.create(id=id, title=title,\n text_type=text_type, source_text=source_text,\n content=rendered_text,\n is_commentable=is_commentable, public=public,\n category=category)\n\n event_emitted.send(\n current_app._get_current_object(),\n type='Article: Post',\n description='Author(%s) posted a new article(%s).'\n % (article.author_id, article.id)\n )\n\n return None, {'article': article.to_dict()}", "title": "" }, { "docid": "4e51deb455832d94386a33e82d4295f9", "score": "0.49146888", "text": "def test_new_language_translation_creation(self):\n url = u\"/composers/translate/edit?appid=%s&srclang=all_ALL&editSelectedSourceButton=&targetlang=te_ST&srcgroup=ALL&targetgroup=ALL\" % (\n self.firstApp.unique_id)\n print \"URL: \" + url\n rv = self.flask_app.get(url)\n assert rv.status_code == 200\n\n data = rv.data.decode(\"utf8\") # This bypasses an apparent utf8 FlaskClient bug.\n\n # Ensure that the identifiers appear in the response.\n # (that is, that it appears to be translate-able)\n assert u\"hello_world\" in data\n assert u\"black\" in data", "title": "" }, { "docid": "49d99ece8347c15a344d3da9e7ca4f85", "score": "0.4914653", "text": "def create_cms_article(user, item):\n if not (user.is_staff and user.has_perm('coop_cms.add_article')):\n raise PermissionDenied\n \n art = Article.objects.create(title=item.title, content=item.summary)\n item.processed = True\n item.save()\n return art", "title": "" }, { "docid": "3260159d71d3277cedd8e5e5d22c261c", "score": "0.49101502", "text": "def prepare_nlu_text(example: Text, entities: List[Dict]):\n if not Utility.check_empty_string(example):\n if entities:\n from rasa.shared.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter\n example = RasaYAMLWriter.generate_message({'text': example, \"entities\": entities})\n return example", "title": "" }, { "docid": "1c3e8ab5b1a89528561c950371b1ee71", "score": "0.4910095", "text": "def __corpus_embedding_creator(self, input_data, original_corpus: bool):\n if original_corpus:\n input_data = self.__vectorizer.fit_transform(input_data)\n return self.__TfidfTransformer.fit_transform(input_data)", "title": "" }, { "docid": "38098fac9ea41018f03fbede24b915b2", "score": "0.49083048", "text": "def tr2(jenni, input):\r\n if not input.group(2): return jenni.say(\"No input provided.\")\r\n command = input.group(2).encode('utf-8')\r\n\r\n def langcode(p):\r\n return p.startswith(':') and (2 < len(p) < 10) and p[1:].isalpha()\r\n\r\n args = ['auto', 'en']\r\n\r\n for i in xrange(2):\r\n if not ' ' in command: break\r\n prefix, cmd = command.split(' ', 1)\r\n if langcode(prefix):\r\n args[i] = prefix[1:]\r\n command = cmd\r\n phrase = command\r\n\r\n if (len(phrase) > 350) and (not input.admin):\r\n return jenni.reply('Phrase must be under 350 characters.')\r\n\r\n src, dest = args\r\n if src != dest:\r\n msg, src = translate(phrase, src, dest)\r\n if isinstance(msg, str):\r\n msg = msg.decode('utf-8')\r\n if msg:\r\n msg = web.decode(msg) # msg.replace('&#39;', \"'\")\r\n msg = '\"%s\" (%s to %s, translate.google.com)' % (msg, src, dest)\r\n else: msg = 'The %s to %s translation failed, sorry!' % (src, dest)\r\n\r\n jenni.reply(msg)\r\n else: jenni.reply('Language guessing failed, so try suggesting one!')", "title": "" }, { "docid": "8ebe6d8c3174edfd926555efcda93323", "score": "0.49049714", "text": "def blogYY_page_mod_article(article_id):\n article = search_article_by_id(article_id)[0]\n return render_template(\n template_name_or_list=\"blogYY/pg_add_article/aa.html\",\n title=article[\"title\"],\n content=article[\"content\"],\n category_id=article[\"category_id\"],\n submit_url=url_for('blogYY_api_mod_article_v1', article_id=article[\"id\"]),\n redirect_url=url_for('blogYY_page_single_article', article_id=article[\"id\"]),\n categories=search_categories()\n )", "title": "" }, { "docid": "dfebab4aee9542abb1a0b4df6caafc9d", "score": "0.48998842", "text": "def from_translation(self, la):\n PW0 = self.realization_of().PW0()\n return self(PW0.from_translation(la))", "title": "" }, { "docid": "03b8a88e0f8aad0aa039d738d179f724", "score": "0.48955297", "text": "def get_article_from_id(id, separator=\"\\n\\n\"):\n with open(DATA_PATH + \"articles/\" + id, \"r\") as article_data:\n article_data_map = json.loads(article_data.read())\n\n raw_contents = json.loads(article_data_map[CONTENT_FIELD])\n questions = get_questions(raw_contents)\n concatenated_content = \"\\n\".join([elem[\"content\"] for elem in raw_contents if \"Overview\" not in elem[\"content\"] and \"What do you think?\" not in elem[\"content\"]])\n\n if len(concatenated_content) < MIN_ARTICLE_LEN:\n return \"\", []\n else:\n return process_article(concatenated_content, separator), questions\n \n # article_data_map[\"ka_url\"] --> url\n # article_data_map[\"translated_title\"] --> title", "title": "" }, { "docid": "f45c224980f651c704f1e0d255e6f3d6", "score": "0.48883346", "text": "def translation(translation):\n return np.array([\n [1, 0, translation[0]],\n [0, 1, translation[1]],\n [0, 0, 1]\n ])", "title": "" }, { "docid": "fea5b4ed3520b6b8d1c9c97e301106c7", "score": "0.48824593", "text": "def copy(self, other_passage):\n other = Layer0(root=other_passage, attrib=self.attrib.copy())\n other.extra = self.extra.copy()\n for t in self._all:\n copied = other.add_terminal(t.text, t.punct, t.paragraph)\n copied.extra = t.extra.copy()", "title": "" }, { "docid": "a5ec77161210b7a8564a94c9ed8ce336", "score": "0.48805857", "text": "def get_translate_entry(word, original_language, target_language):\n lookup = {\n \"cs\": \"czech\",\n \"da\": \"danish\",\n \"de\": \"german\",\n \"el\": \"greek\",\n \"es\": \"spanish\",\n \"et\": \"estonian\",\n \"en\": \"english\",\n \"fr\": \"french\",\n \"it\": \"italian\",\n \"nb\": \"norwegian\",\n \"nl\": \"dutch\",\n \"po\": \"polish\",\n \"pt\": \"portuguese\",\n \"sl\": \"slovene\",\n \"sv\": \"swedish\"\n }\n \n original_language = lookup[original_language[:2]]\n target_language = lookup[target_language[:2]]\n if original_language == 'german':\n word = sanitize_german(word)\n\n url = 'https://www.collinsdictionary.com/us/dictionary/%s-%s/%s' % (original_language, target_language, word)\n\n request = requests.get(url)\n if request.status_code == 200:\n return url\n return None", "title": "" }, { "docid": "18adf592901cfcddbc73c541bfe49e5a", "score": "0.48700637", "text": "def __init__(\n self,\n es,\n es_index: str,\n nlp,\n source_description_field: str,\n target_title_field: str,\n target_description_field: str,\n target_alias_field: str = None,\n ):\n\n self.es = es\n self.es_index = es_index\n\n self.nlp = nlp\n\n self.source_fields = {\"description\": source_description_field}\n\n self.target_fields = {\n \"title\": target_title_field,\n \"description\": target_description_field,\n }\n\n if target_alias_field is not None:\n self.target_fields.update(\n {\n \"alias\": target_alias_field,\n }\n )", "title": "" }, { "docid": "da4a84f375b1cbcd7d097aef1ca299d8", "score": "0.48641682", "text": "def create_article(title, content, title_image, article_session_id, is_draft, user_id, group_id, category_id, abstract, book_id, special_id=None):\n\n result = db_session.query(Article).filter_by(\n article_session_id=article_session_id).all()\n if (special_id is not None) and (is_draft == '0'):\n special = db_session.query(Special).filter_by(\n special_id=special_id).scalar()\n special.last_modified = datetime.now()\n db_session.commit()\n\n if len(result) > 0:\n article = db_session.query(Article).filter_by(\n article_session_id=article_session_id).scalar()\n article.title = title\n article.content = content\n article.picture = title_image\n article.time = datetime.now()\n article.is_draft = is_draft\n article.abstract = abstract\n article.book_id = book_id\n article.special_id = special_id\n article.category = category_id\n db_session.commit()\n return result[0].article_id\n else:\n article = Article(title=title, content=content, picture=title_image, time=datetime.now(), user_id=user_id, article_session_id=article_session_id,\n is_draft=is_draft, groups=group_id, category=category_id, abstract=abstract, book_id=book_id, special_id=special_id)\n db_session.add(article)\n db_session.commit()\n result = db_session.query(Article).filter_by(\n article_session_id=article_session_id).first()\n return result.article_id", "title": "" }, { "docid": "e970b141f3bb85cbf571b456986fe474", "score": "0.48639563", "text": "def test_representation(self):\n self.assertEqual(\n str(self.article),\n \"Why Python is such a nice language 2020-01-01T12:00:00\"\n )", "title": "" }, { "docid": "38e69a81337958542e6941b1d0c26a77", "score": "0.48592383", "text": "def translate(ctx, message):\r\n if ctx.args.lang() == \"en\":\r\n return message\r\n\r\n _tr = {}\r\n _tr[\" (general presentation)\"] = {\r\n \"fr\": \" (présentation générale)\"\r\n }\r\n _tr[\" (main menu)\"] = {\r\n \"fr\": \" (menu principal)\"\r\n }\r\n _tr[\"This command line interface uses the following patterns for comments definition:\"] = {\r\n \"fr\": \"Cette interface en ligne de commande utilise les motifs suivants pour la définition de commentaires :\"\r\n }\r\n _tr[\" is composed of the following menus:\"] = {\r\n \"fr\": \" est composé des menus suivants :\"\r\n }\r\n _tr[\" is composed of the following commands:\"] = {\r\n \"fr\": \" est composé des commandes suivantes :\"\r\n }\r\n _tr[\"Command Line Interface \"] = {\r\n \"fr\": \"Interface en ligne de commande (CLI) \"\r\n }\r\n _tr[\"Command Line Interface documentation\"] = {\r\n \"fr\": \"Documentation d'interface en ligne de commande (CLI)\"\r\n }\r\n _tr[\"Description:\"] = {\r\n \"fr\": \"Description :\"\r\n }\r\n _tr[\"float value\"] = {\r\n \"fr\": \"Valeur décimale\"\r\n }\r\n _tr[\"host value\"] = {\r\n \"fr\": \"Adresse réseau\"\r\n }\r\n _tr[\"int value\"] = {\r\n \"fr\": \"Valeur entière\"\r\n }\r\n _tr[\"Menu \"] = {\r\n \"fr\": \"Menu \"\r\n }\r\n _tr[\"No help available\"] = {\r\n \"fr\": \"Aucune aide disponible\"\r\n }\r\n _tr[\"Options:\"] = {\r\n \"fr\": \"Options :\"\r\n }\r\n _tr[\"Parameters:\"] = {\r\n \"fr\": \"Paramètres :\"\r\n }\r\n _tr[\"string value\"] = {\r\n \"fr\": \"Chaîne de caractères\"\r\n }\r\n _tr[\"Synopsis:\"] = {\r\n \"fr\": \"Synopsis :\"\r\n }\r\n _tr[\"This command opens the menu \"] = {\r\n \"fr\": \"Cette commande ouvre le menu \"\r\n }\r\n _tr[\".\"] = {\r\n \"fr\": \".\"\r\n }\r\n\r\n try:\r\n if ctx.args.lang() == \"fr\":\r\n import codecs\r\n _iso = codecs.lookup(\"utf-8\")\r\n return _iso.decode(_tr[message][\"fr\"])[0]\r\n except KeyError:\r\n pass\r\n modlog.log(ctx.MODLOG_FILTER, modlog.WARNING, \"No translation for '%s'\" % message)\r\n return message", "title": "" }, { "docid": "0bea8baf61923235e4975dbd6d582b08", "score": "0.48567662", "text": "def get_Te(self, een):\n self.Te = deepcopy(een.Te)", "title": "" }, { "docid": "80c5145269cb65714756e10333302156", "score": "0.4852506", "text": "def create_new():\n\n fa_plus = DRIVER.find_element_by_css_selector(\".fa-plus\")\n fa_plus.click()\n create_key = DRIVER.find_element_by_id(\"create-key\")\n add_editor = DRIVER.find_element_by_xpath(\"//*[@id='create-translation']/form/button[2]\")\n create_key.click()\n create_key.clear()\n create_key.send_keys(str(key))\n add_editor.click()\n create_summernote = DRIVER.find_element_by_xpath(\"\"\n \"//*[@id='create-translation']/form/div[2]/div/div[3]/div[2]/p\")\n create_summernote.click()\n create_summernote.clear()\n create_summernote.send_keys(str(new_en))\n DRIVER.find_element_by_xpath(\"//*[@id='create-translation']/form/button[1]\").click()", "title": "" }, { "docid": "278780d85346dc50c37ea9b0e80697e3", "score": "0.4850419", "text": "def reconstruct_from_transformed(self, node, transformed_node):\n return transformed_node", "title": "" }, { "docid": "c88bb8ae8d181aaa10ddec0ea2332ac4", "score": "0.4849736", "text": "def create_translate(cls, tx, ty=0.0):\n return cls(1.0, 0.0, 0.0, 1.0, tx, ty)", "title": "" } ]
f5f611dc1fa49edc0903b5086772d214
Combine multiple generators into one
[ { "docid": "cbfb857bc40adbf0cbc3a1fca182edd7", "score": "0.7115147", "text": "def multiplex(sources):\n return it.chain.from_iterable(sources)", "title": "" } ]
[ { "docid": "c7a09ddd2a1ad01359b94e02267b5713", "score": "0.73177344", "text": "def combine_generators_no_random_crop(sharp_generator, blur_generator):\n while True:\n sharp_batch = sharp_generator.next()\n blur_batch = blur_generator.next()\n\n res = [sharp_batch, blur_batch]\n\n yield res", "title": "" }, { "docid": "dc3c0da763e581890ec873abf3b454bc", "score": "0.72304946", "text": "def combine_generators(sharp_generator, blur_generator):\n while True:\n sharp_batch = sharp_generator.next()\n blur_batch = blur_generator.next()\n\n sharp_batch, blur_batch = random_crop(sharp_batch, blur_batch)\n\n res = [sharp_batch, blur_batch]\n\n yield res", "title": "" }, { "docid": "4745d0e85afbbc558dec68a5add57dff", "score": "0.7212617", "text": "def gen_concatenate(iterators):\n for it in iterators:\n yield from it", "title": "" }, { "docid": "51cc6ce73d773d41944c331c7b82b836", "score": "0.7137542", "text": "def TryMultipleGenerator():\n\tprint('I should be the first generator')\n\tfor i in range(5):\n\t\tyield i+1\n\n\tprint('I should be the second generator')\n\tfor j in range(10):\n\t\tyield j*10", "title": "" }, { "docid": "86d21b81fcc53c83c128728dc3a46ac8", "score": "0.7052838", "text": "def chain_generators(*sprite_generators):\n\n def _generate(*args, **kwargs):\n return list(itertools.chain(*[generator(*args, **kwargs)\n for generator in sprite_generators]))\n\n return _generate", "title": "" }, { "docid": "276d9768038794a3fd85ff726fbbcc5c", "score": "0.69457084", "text": "def chain_generators(iterable, list_of_generators):\r\n\r\n def flatten1(nested_iterable):\r\n return (item for inner in nested_iterable for item in inner)\r\n\r\n # IMPORTANT: this function can NOT be inlined. Python scope/shadowing intricacies would break it\r\n def apply(gen, iterable): \r\n return (gen(x) for x in iterable)\r\n\r\n def flatmap(iter, gen):\r\n return flatten1(apply(gen, iter))\r\n\r\n return reduce(flatmap, list_of_generators, iterable)", "title": "" }, { "docid": "16f0f281d3dd2d6579a8efc9e59c9e95", "score": "0.6784467", "text": "def make_generators_generator(g):\n for x in g:\n yield x", "title": "" }, { "docid": "db5542fbdd1aa7d192ed6ec1e2d9828e", "score": "0.64963466", "text": "def mix_generators(generator_list):\n i = 0\n l = len(generator_list)\n stopiters_seen = 0\n while stopiters_seen <= l:\n try:\n yield six.next(generator_list[i % l])\n i += 1\n stopiters_seen = 0\n except StopIteration:\n i += 1\n stopiters_seen += 1", "title": "" }, { "docid": "4044be6db2e56846c42b0f4fe65d61fb", "score": "0.61763394", "text": "def concat_seq(sources: Iterable[AsyncObservable[TSource]]) -> AsyncObservable[TSource]:\n\n return pipe(\n of_seq(sources),\n merge_inner(1),\n )", "title": "" }, { "docid": "4f224dc66f49be220255e11ac520c44d", "score": "0.6116887", "text": "def make_generators_generator(g):\n \"*** YOUR CODE HERE ***\"\n\n def helper(g, i):\n gen = g()\n for _ in range(i):\n yield next(gen)\n\n length = len(list(g()))\n for i in range(1, length + 1):\n yield helper(g, i)", "title": "" }, { "docid": "f4c2f10f017e029dedd27871d864972c", "score": "0.5957536", "text": "def generate_different_arrays():\n yield np.array([1, 2])\n yield np.array([1, 2, 3])", "title": "" }, { "docid": "b6b8396667f71123577777cdc56911dd", "score": "0.5947436", "text": "def Serial(*fns): # pylint: disable=invalid-name\n def composed_fns(generator=None):\n for f in fastmath.tree_flatten(fns):\n generator = f(generator)\n return generator\n return composed_fns", "title": "" }, { "docid": "cec93fb32a072041dab6c65f9eec6d14", "score": "0.5936838", "text": "def _product_generator(a, b):\n yield a * b", "title": "" }, { "docid": "246b3f9a578462c2a944ee0f2aaf620e", "score": "0.59360576", "text": "def merge_streams(a, b):\n stream = (a, b)\n item = list(map(_get_next, stream))\n cur_stream = _get_cur_stream_idx(item) # Stream we are reading ATM.\n\n while item != [_sentinel, _sentinel]: # While at least one not at EOF.\n yield item[cur_stream]\n item[cur_stream] = _get_next(stream[cur_stream])\n cur_stream = _get_cur_stream_idx(item)", "title": "" }, { "docid": "07300a0105791a3e88808c869a31b6b4", "score": "0.5931834", "text": "def _iter_merge_checks(checks_1, checks_2):\n if (checks_1 is not None):\n yield from checks_1\n \n if (checks_2 is not None):\n yield from checks_2", "title": "" }, { "docid": "fe2f535323dbe9409f0c1f71bf327d8c", "score": "0.5925361", "text": "def concat(seqs: Union[Seq, Seq[Seq]], *extra) -> Iter:\n if extra:\n return Iter(itertools.chain(seqs, *extra))\n return Iter(itertools.chain.from_iterable(seqs))", "title": "" }, { "docid": "8df67cd64970fb2ecdae24e4e4feb4a7", "score": "0.59073645", "text": "def gen():\n yield", "title": "" }, { "docid": "d4a314e0f066f2be79ded65373e9aa10", "score": "0.588959", "text": "def zip_with(func, *iterables):\n\n for args in zip(*iterables):\n yield func(*args)", "title": "" }, { "docid": "4b61c20e7a61439761f6ee28e72be964", "score": "0.5880449", "text": "def roundrobin(iterables):\n curr_alive = map(iter, iterables)\n while curr_alive:\n next_alive = []\n for gen in curr_alive:\n try:\n yield next(gen)\n except StopIteration:\n pass\n else:\n next_alive.append(gen)\n curr_alive = next_alive", "title": "" }, { "docid": "2e70ee8ae976f7bf602ac9ce79c6e9ab", "score": "0.5867071", "text": "def roundrobin(*iterables):\n pending = len(iterables)\n funs = cycle(iter(it).next for it in iterables)\n while pending:\n try:\n for fun in funs:\n yield fun()\n except StopIteration:\n pending -= 1\n funs = cycle(islice(funs, pending))", "title": "" }, { "docid": "c34f5a4bd70fdd2931b8cb478343ed88", "score": "0.58531594", "text": "def batch_generator():\n source_batch = []\n target_batch = []\n for sources, target in utils.yield_sources_and_targets(\n FLAGS.predict_input_file, FLAGS.input_format):\n\n source_batch.append(\n FLAGS.special_glue_string_for_joining_sources.join(sources))\n target_batch.append(target)\n if len(source_batch) == FLAGS.predict_batch_size:\n yield source_batch, target_batch\n source_batch = []\n target_batch = []\n\n if source_batch:\n yield source_batch, target_batch", "title": "" }, { "docid": "d40ee8a2d529f66eb06fac931a8d05d0", "score": "0.58434135", "text": "async def achain(*async_iters: AsyncIterable[T]) -> AsyncIterator[T]:\n for aiter in async_iters:\n async for elem in aiter:\n yield elem", "title": "" }, { "docid": "1ab9fed836540647bd1e0fd99edfe03e", "score": "0.58290917", "text": "def imapzip(generator, *iterables):\n return itertools.izip(*tuple(itertools.starmap(generator, zip(*iterables))))", "title": "" }, { "docid": "082650302e54033020864022e81e0646", "score": "0.58168626", "text": "def test_multiple_yields(self):\n def generate_10_arrays():\n \"\"\"Put out 10 numpy arrays\"\"\"\n for _ in range(10):\n yield np.array([1, 2, 3, 4]).astype(np.float32)\n def assert_expectation(array):\n \"\"\"Assert the array is as expected\"\"\"\n np.testing.assert_almost_equal(array, [1, 2, 3, 4])\n self.occurences += 1\n blocks = []\n blocks.append((NumpySourceBlock(generate_10_arrays), {'out_1': 0}))\n blocks.append((NumpyBlock(assert_expectation, outputs=0), {'in_1': 0}))\n Pipeline(blocks).main()\n self.assertEqual(self.occurences, 10)", "title": "" }, { "docid": "e3b33b4f29062c9f97bd5fdc1023406e", "score": "0.5773603", "text": "def iterate(self):\n for result_a, binds_a in self.bound_a:\n for result_b, self.binds in self.bound_b:\n if not isinstance(result_a, tuple):\n result_a = (result_a,)\n if not isinstance(result_b, tuple):\n result_b = (result_b,)\n self.binds.update(binds_a)\n yield tuple(chain(result_a, result_b))", "title": "" }, { "docid": "b9cb09e4547d9038cd8830171476415e", "score": "0.5746471", "text": "def merge_streamlines(backward, forward):\n B = iter(backward)\n F = iter(forward)\n while True:\n yield concatenate((B.next()[:0:-1], F.next()))", "title": "" }, { "docid": "7f8d7ef0ffcce2a2bfdcdbf19cf385d9", "score": "0.5745188", "text": "def merge_graphs(generators, strategy_fields_source_node=lambda g: {}, strategy_graph_id=graph_id):\n graph = nx.MultiDiGraph()\n\n print('Merging subgraphs ...')\n for generator in tqdm(generators):\n for g in generator():\n\n id_sub_graph = strategy_graph_id(g)\n assert id_sub_graph not in graph\n\n graph.add_node(id_sub_graph, type='source', value=id_sub_graph, id=id_sub_graph, **strategy_fields_source_node(g))\n\n # Element in queue represent nodes in the shape (id_node_in_subgraph, id_node_in_graph)\n q = [('source', id_sub_graph)]\n while True:\n\n if len(q) == 0:\n break\n else:\n node = q.pop(0)\n\n edges = g.edges(node[0])\n for edge in edges:\n\n n = g.nodes()[edge[1]].copy()\n\n old_id = n['id']\n new_id = n['mergiable_id']\n\n n.pop('id')\n n.pop('graph')\n graph.add_node(new_id, id=new_id, **n)\n\n type_edge = g[node[0]][old_id]['type']\n graph.add_edge(node[1], new_id, type=type_edge)\n\n q.append((old_id, new_id))\n\n return graph", "title": "" }, { "docid": "67c898ad379e79aa60f0aa910bba7c15", "score": "0.57398504", "text": "def chain(*fs):\n\tdef chained(x):\n\t\tres = x\n\t\tfor f in fs:\n\t\t\tres = f(res)\n\t\treturn res\n\treturn chained", "title": "" }, { "docid": "b56ac401407be8a900a031c15efe5400", "score": "0.56415904", "text": "async def azip(*iterables: AsyncIterable[T]) -> AsyncIterator[Tuple[T, ...]]:\n iterators = [iterable.__aiter__() for iterable in iterables]\n while True:\n try:\n yield tuple(await asyncio.gather(*(itr.__anext__() for itr in iterators)))\n except StopAsyncIteration:\n break", "title": "" }, { "docid": "0a088c186a23bb7af53d0ae0c119fcac", "score": "0.5632826", "text": "def compose_contexts(\n head: ContextManager, *tail: ContextManager\n) -> Generator[Any, None, None]:\n with ExitStack() as stack:\n if len(tail):\n yield tuple(stack.enter_context(c) for c in [head, *tail])\n else:\n yield stack.enter_context(head)", "title": "" }, { "docid": "3771e89d3dbf86a3bd5a8d3cb446600d", "score": "0.5628315", "text": "def generate_two_different_arrays():\n for _ in range(10):\n yield np.random.rand(4)\n for _ in range(10):\n yield np.random.rand(5)", "title": "" }, { "docid": "e52982fedee89781c0488e8e65ede587", "score": "0.5575016", "text": "def alternate_iterables(*iterables):\n iterators = [iter(iterable) for iterable in iterables]\n\n while True:\n try:\n for iterator in iterators:\n yield next(iterator)\n except StopIteration:\n break", "title": "" }, { "docid": "7aa04ed24f3ddea72f8cbd9b03fcdca6", "score": "0.5571096", "text": "def _overlay_gen():\n while True:\n for overlay in overlays:\n yield overlay", "title": "" }, { "docid": "4592be005f3a426114a6b2c54eef3b0c", "score": "0.5568623", "text": "def zip_with(func: Callable, *iters) -> Iterable:\n return map(func, zip(*iters))", "title": "" }, { "docid": "96fa156e83f43c488b1b62015d36cf61", "score": "0.5550289", "text": "def expand_circuit_generator(circuit_generator, circuits):\n for circuit in circuit_generator:\n for relay in circuit:\n circuits.setdefault(relay, set(circuit)).update(circuit)", "title": "" }, { "docid": "24bce8beda9d28a2de71595c85eff0ce", "score": "0.554429", "text": "def gen():\n for x in range(20):\n yield x", "title": "" }, { "docid": "207f55d84237cc1229081aac52ff4a73", "score": "0.5544061", "text": "def nested(*mgrs):\n with contextlib.ExitStack() as stack:\n outputs = [stack.enter_context(cm) for cm in mgrs]\n yield outputs", "title": "" }, { "docid": "b56d456ea53002ac34f321f72a337343", "score": "0.5527838", "text": "def pipeline(*functions):\n head = functions[0]\n tail = functions[1:]\n if tail:\n def _fn(iterable):\n for i in pipeline(*tail)(head(iterable)):\n yield i\n return _fn\n else:\n return head", "title": "" }, { "docid": "f1f8fbda05d1c4190f8078817fd7d44b", "score": "0.5527089", "text": "def gen_things(repeat=1):\n\tfor x in itertools.product(ascii_lowercase, repeat=repeat):\n\t\tyield x\n\tfor x in gen_things(repeat+1):\n\t\tyield x", "title": "" }, { "docid": "1dd0df1576426b5282a0049660d38a29", "score": "0.55237025", "text": "def cartesian_product(a, b):\n\n for i in a:\n for j in b:\n yield i + (j,)", "title": "" }, { "docid": "aff9c49c8d655499974b860a45698153", "score": "0.55206954", "text": "def generators(self):\n return self._gens", "title": "" }, { "docid": "c9d418a427a7c605feb1d323f15122db", "score": "0.54975235", "text": "def yieldAllCombos(items):\n for combo in powerSet(items):\n bag1 = combo[0]\n toTake = combo[1]\n \n for combo2 in powerSet(toTake):\n bag2 = combo2[0]\n \n yield (bag1, bag2)", "title": "" }, { "docid": "fbe4061dca7c8ace6add74f6c230495d", "score": "0.5486556", "text": "def sequences(self):\n for fasta in self.raw_files:\n for seq in fasta: yield seq", "title": "" }, { "docid": "d6721cf4b746dc5a2c1284df869c566c", "score": "0.5486126", "text": "def combine_plays(games):\n chain = itertools.chain(*[g.drives.plays() for g in games])\n return nflgame.seq.GenPlays(chain)", "title": "" }, { "docid": "68e06745da285a3c93f89f4c3a0140c3", "score": "0.54828465", "text": "def combine(args1, args2):\n return [arg1.combine(arg2) for arg1, arg2 in zip(args1, args2)]", "title": "" }, { "docid": "caca66538264dc51478a333efed74995", "score": "0.54826397", "text": "def substitute_generator(self, generator, out):\n for value in generator:\n self.substitute_value(value, out)\n return self", "title": "" }, { "docid": "6c3a782cbfc65b64d23ca57c379d91db", "score": "0.54760075", "text": "def Chapter4_12():\n\n from itertools import chain\n a = [1, 2, 3, 4]\n b = ['x', 'y', 'z']\n for x in chain(a, b):\n print(x)", "title": "" }, { "docid": "2c17c2169155d67875a01cd73e919797", "score": "0.5474594", "text": "def pam(functions: Iterable[Callable], *args, **kwargs):\n for f in functions:\n yield f(*args, **kwargs)", "title": "" }, { "docid": "e37bb7aea7324537ee5ca4e67373c582", "score": "0.5470446", "text": "def zip_readers(*readers, indices=None) -> Generator:\n if indices:\n iterators = zip(*(r.select_record_indices(indices) for r in readers))\n else:\n iterators = zip(*readers)\n for record_tuple in iterators:\n yield record_tuple", "title": "" }, { "docid": "6ee31e835991168cd2dae340045e4106", "score": "0.54635113", "text": "def ijoin(iters):\r\n return (x for it in iters for x in it)", "title": "" }, { "docid": "1034c67da8fa0cae7d488d0c7dae7f8d", "score": "0.54461986", "text": "def cartesian_product(items, get_separator):\n combs = [None]\n first_item_required = None\n\n for required, args in items:\n if first_item_required is None:\n first_item_required = required\n\n separator = get_separator(required)\n _combine = partial(combine, separator=separator)\n\n combs = map(\n _combine,\n product(combs, args)\n )\n\n if combs != [None]:\n for comb in combs:\n yield first_item_required, comb", "title": "" }, { "docid": "55ebd7f244cf0fc0b86de69d0162e44e", "score": "0.54375833", "text": "def __call__(self, *sources: Source) -> Iterable[Swimporting]:\n if len(sources) > 1:\n src = chain.from_iterable(sources)\n elif sources:\n src = sources[0]\n else:\n return\n\n for o in src:\n s = self.do(o)\n if s:\n yield s", "title": "" }, { "docid": "cd23164165c0b00c295c342286431a4d", "score": "0.5437451", "text": "def test_multiple_sequences(self):\n\n def generate_different_arrays():\n \"\"\"Yield four different groups of two arrays\"\"\"\n dtypes = ['float32', 'float64', 'complex64', 'int8']\n shapes = [(4,), (4, 5), (4, 5, 6), (2,) * 8]\n for array_index in range(4):\n yield np.ones(\n shape=shapes[array_index],\n dtype=dtypes[array_index])\n yield 2 * np.ones(\n shape=shapes[array_index],\n dtype=dtypes[array_index])\n\n def switch_types(array):\n \"\"\"Return two copies of the array, one with a different type\"\"\"\n return np.copy(array), np.copy(array).astype(np.complex128)\n\n self.occurences = 0\n def compare_arrays(array1, array2):\n \"\"\"Make sure that all arrays coming in are equal\"\"\"\n self.occurences += 1\n np.testing.assert_almost_equal(array1, array2)\n\n blocks = [\n (NumpySourceBlock(generate_different_arrays), {'out_1': 0}),\n (NumpyBlock(switch_types, outputs=2), {'in_1': 0, 'out_1': 1, 'out_2': 2}),\n (NumpyBlock(np.fft.fft), {'in_1': 2, 'out_1': 3}),\n (NumpyBlock(np.fft.ifft), {'in_1': 3, 'out_1': 4}),\n (NumpyBlock(compare_arrays, inputs=2, outputs=0), {'in_1': 1, 'in_2': 4})]\n\n Pipeline(blocks).main()\n self.assertEqual(self.occurences, 8)", "title": "" }, { "docid": "63ab985187ab8bdbfce6723f6ceff441", "score": "0.5422615", "text": "def _get_yielded_and_returned_values(gen, values_to_send=()):\n yvals = []\n try:\n yvals.append(next(gen))\n for s in values_to_send:\n yvals.append(gen.send(s))\n while True: # Loop until `gen` returns, even if no more values to send.\n yvals.append(next(gen))\n except StopIteration as e:\n rvals = e.value\n return yvals, rvals", "title": "" }, { "docid": "20cebd134ead50dab060a8b3e60d3224", "score": "0.54173774", "text": "def sequence(*args):\n if len(args) > 1:\n def gen():\n while True:\n for o in args:\n yield o\n return gen()\n\n func = args and args[0] or None\n if isinstance(func, _.string_types):\n func = func.format\n\n elif func is None:\n func = lambda x: x\n\n def gen2():\n counter = 0\n while True:\n yield func(counter)\n counter += 1\n return gen2()", "title": "" }, { "docid": "2d8bcd1b7336f1a1294df2daf0e4515f", "score": "0.54116684", "text": "def roundrobin(*iterables):\n # Recipe credited to George Sakkis\n pending = len(iterables)\n nexts = cycle(iter(it).next for it in iterables)\n while pending:\n try:\n for next in nexts:\n yield next()\n except StopIteration:\n pending -= 1\n nexts = cycle(islice(nexts, pending))", "title": "" }, { "docid": "12dae1f974cb630a2b524277b82dc63f", "score": "0.54071265", "text": "def generator_start_wrapper(generator):\n yield\n\n yield from generator", "title": "" }, { "docid": "b2efdd688ce6851d9418b2d1ce79a52d", "score": "0.54056275", "text": "def commutators(scalar):\n return com1(scalar), com2(scalar), com3(scalar), com4(scalar)", "title": "" }, { "docid": "5ba63b465189152f8f9013def55cf84f", "score": "0.54034525", "text": "def create_generator(self):", "title": "" }, { "docid": "1624764657445a43444815d23a154b50", "score": "0.54007", "text": "def tst():\n yield \"a\", \"b\"", "title": "" }, { "docid": "3130d5e3d7f6793a485556d332a6f697", "score": "0.53600836", "text": "def fit_generator_from_aggregator(aggregator):\r\n return aggregator.map(func=fit_from_agg_obj)", "title": "" }, { "docid": "0a10cce74e94804927ac598f16cdc9de", "score": "0.53596437", "text": "def merge(self, accumulators):\n pass", "title": "" }, { "docid": "3fffb43c78455637d65b4687e630b2db", "score": "0.53572583", "text": "def merge_accumulators(\n self, accumulators: Iterable[_PartialNLStats]) -> _PartialNLStats:\n it = iter(accumulators)\n result = next(it)\n for accumulator in it:\n result += accumulator\n return result", "title": "" }, { "docid": "23b7572cc79298d73a1438e425714405", "score": "0.5346404", "text": "def zip_args_and_kwargs(\n args: Iterable[Tuple[Any, ...]],\n kwargs: Iterable[Dict[str, Any]],\n) -> Generator[Tuple[Tuple[Any, ...], Dict[str, Any]], None, None]:\n iterators: List[Iterator] = [iter(args), iter(kwargs)]\n fills = {0: (), 1: {}}\n num_active = 2\n while True:\n values = []\n for i, it in enumerate(iterators):\n try:\n values.append(next(it))\n except StopIteration:\n num_active -= 1\n if num_active == 0:\n return\n iterators[i] = itertools.repeat(fills[i]) # replace the iterator\n values.append(fills[i]) # for this iteration, insert fills[i] manually\n yield tuple(values)", "title": "" }, { "docid": "7de0d0a54a3cc8719b5d73a6e7a55e53", "score": "0.5317929", "text": "def get_generators(self, len_val, src_vocab, text_embedding, args):\n gate_gen = None\n if args['slot_gating']:\n gate_gen = Generator(len(GATES), scope_name=\"gate\")\n fertility_generator = Generator(len_val + 1, scope_name=\"fertility\")\n state_generator = Generator(src_vocab, text_embedding.get_embedding_wieght(), scope_name='state')\n point_state_generator = PointerGenerator(state_generator=state_generator, scope_name='point_generator')\n\n return fertility_generator, point_state_generator, gate_gen", "title": "" }, { "docid": "8e9ddad41927ae86b0414823d97663c7", "score": "0.52954257", "text": "def combine_latest(other: AsyncObservable[TOther]) -> Zipper[Any, TOther]:\n\n def _combine_latest(source: AsyncObservable[TSource]) -> AsyncObservable[Tuple[TSource, TOther]]:\n async def subscribe_async(aobv: AsyncObserver[Tuple[TSource, TOther]]) -> AsyncDisposable:\n safe_obv, auto_detach = auto_detach_observer(aobv)\n\n async def worker(inbox: MailboxProcessor[Msg[TSource]]) -> None:\n @tailrec_async\n async def message_loop(\n source_value: Option[TSource], other_value: Option[TOther]\n ) -> TailCallResult[NoReturn]:\n cn = await inbox.receive()\n\n async def get_value(n: Notification[Any]) -> Option[Any]:\n with match(n) as m:\n for value in case(OnNext[TSource]):\n return Some(value)\n\n for err in case(OnError):\n await safe_obv.athrow(err)\n\n while m.default():\n await safe_obv.aclose()\n return Nothing\n\n with match(cn) as case:\n for value in case(SourceMsg[TSource]):\n source_value = await get_value(value)\n break\n\n for value in case(OtherMsg[TOther]):\n other_value = await get_value(value)\n break\n\n def binder(s: TSource) -> Option[Tuple[TSource, TOther]]:\n def mapper(o: TOther) -> Tuple[TSource, TOther]:\n return (s, o)\n\n return other_value.map(mapper)\n\n combined = source_value.bind(binder)\n for x in combined.to_list():\n await safe_obv.asend(x)\n\n return TailCall(source_value, other_value)\n\n await message_loop(Nothing, Nothing)\n\n agent = MailboxProcessor.start(worker)\n\n async def obv_fn1(n: Notification[TSource]) -> None:\n pipe(SourceMsg(n), agent.post)\n\n async def obv_fn2(n: Notification[TOther]) -> None:\n pipe(OtherMsg(n), agent.post)\n\n obv1: AsyncObserver[TSource] = AsyncNotificationObserver(obv_fn1)\n obv2: AsyncObserver[TOther] = AsyncNotificationObserver(obv_fn2)\n dispose1 = await pipe(obv1, source.subscribe_async, auto_detach)\n dispose2 = await pipe(obv2, other.subscribe_async, auto_detach)\n\n return AsyncDisposable.composite(dispose1, dispose2)\n\n return AsyncAnonymousObservable(subscribe_async)\n\n return _combine_latest", "title": "" }, { "docid": "cea8a8ecce027f7efdb0cd9298e7ecd1", "score": "0.5288658", "text": "def product(*args, **kwds):\n pools = map(tuple, args) * kwds.get('repeat', 1)\n result = [[]]\n for pool in pools:\n result = [x+[y] for x in result for y in pool]\n for prod in result:\n yield tuple(prod)", "title": "" }, { "docid": "c19238638d7eba6e38989cc2b00ca7d5", "score": "0.52800506", "text": "def _repeat_stream(stream, n_devices):\n while True:\n for example in stream(n_devices):\n yield example", "title": "" }, { "docid": "d7a9545b0fcf0deaa6518a7ac3c20f7b", "score": "0.52757585", "text": "def __next__(self):\n\n if self.batch_index == 0:\n self.reset_index()\n\n current_index = (self.batch_index * self.batch_size) % self.n\n if self.n > current_index + self.batch_size:\n self.batch_index += 1\n else:\n self.batch_index = 0\n\n # random sample the lambda value from beta distribution.\n l = np.random.beta(self.alpha, self.alpha, self.batch_size)\n\n X_l = l.reshape(self.batch_size, 1, 1, 1)\n y_l = l.reshape(self.batch_size, 1)\n\n # Get a pair of inputs and outputs from two iterators.\n X1, y1 = self.generator1.next()\n X2, y2 = self.generator2.next()\n\n # Perform the mixup.\n X = X1 * X_l + X2 * (1 - X_l)\n y = y1 * y_l + y2 * (1 - y_l)\n return X, y", "title": "" }, { "docid": "42c9cb5f29ff1545f70c1cd61df8d68c", "score": "0.5261039", "text": "def concatenate(u, words):\n\n for word in words:\n yield tuple([u] + list(word))", "title": "" }, { "docid": "176a60c9dd43dd74a7dbd508bdcadd74", "score": "0.5257654", "text": "def tasks_generator(num_all_classes=100, num_avg_classes=2):\n tasks_iter = itertools.combinations(np.arange(num_all_classes), num_avg_classes)\n for task in tasks_iter:\n yield task", "title": "" }, { "docid": "53ffa188a77fe3297e3a313d8267f7c6", "score": "0.52474356", "text": "def interleave(seqs: Union[Seq, Seq[Seq]], *extra, aligned=False):\n if extra:\n seqs = (seqs, *extra)\n if aligned:\n return Iter(_interleave_aligned(seqs))\n return Iter(toolz.interleave(seqs))", "title": "" }, { "docid": "f61b238ed55217d2f81b7622c0ec3f86", "score": "0.52347803", "text": "def gens(self):\n return tuple(self.algebra_generators())", "title": "" }, { "docid": "12f0ab9927e07475dfb5f3febf3fcb02", "score": "0.5224693", "text": "def roundrobin(*iterables: tp.Iterable[tp.Any]) -> tp.Iterator[tp.Any]:\n # Recipe credited to George Sakkis\n num_active = len(iterables)\n nexts = itertools.cycle(iter(it).__next__ for it in iterables)\n while num_active:\n try:\n for next_ in nexts:\n yield next_()\n except StopIteration:\n # Remove the iterator we just exhausted from the cycle.\n num_active -= 1\n nexts = itertools.cycle(itertools.islice(nexts, num_active))", "title": "" }, { "docid": "7dcda61d3d4ca91a0455a6d6e91b8a78", "score": "0.52164465", "text": "def assemble(self, instructions):\n for instruction in instructions:\n yield instruction.serialize()", "title": "" }, { "docid": "894ebc5ac874c3904022202db34d3001", "score": "0.5213145", "text": "def test_multiple_output_rings(self):\n def generate_many_arrays():\n \"\"\"Put out 10x10 numpy arrays\"\"\"\n for _ in range(10):\n yield (np.array([1, 2, 3, 4]).astype(np.float32),) * 10\n def assert_expectation(*args):\n \"\"\"Assert the arrays are as expected\"\"\"\n assert len(args) == 10\n for array in args:\n np.testing.assert_almost_equal(array, [1, 2, 3, 4])\n self.occurences += 1\n blocks = []\n blocks.append((\n NumpySourceBlock(generate_many_arrays, outputs=10),\n {'out_%d' % (i + 1): i for i in range(10)}))\n blocks.append((\n NumpyBlock(assert_expectation, inputs=10, outputs=0),\n {'in_%d' % (i + 1): i for i in range(10)}))\n Pipeline(blocks).main()\n self.assertEqual(self.occurences, 10)", "title": "" }, { "docid": "799aef831b3091f8a8d5b490f5904860", "score": "0.5197894", "text": "def _advance_pattern_generators(self,p):\n return p.generators", "title": "" }, { "docid": "3a97ec91202c7651022c830d4ac6a3e0", "score": "0.5196196", "text": "def combine_latest(observables: Union[ObservableBase, Iterable[ObservableBase]],\n mapper: Callable[[Any], Any]) -> ObservableBase:\n if isinstance(observables, typing.Observable):\n observables = [observables]\n\n args = list(observables)\n result_mapper = mapper\n parent = args[0]\n\n def subscribe(observer, scheduler=None):\n n = len(args)\n has_value = [False] * n\n has_value_all = [False]\n is_done = [False] * n\n values = [None] * n\n\n def next(i):\n has_value[i] = True\n\n if has_value_all[0] or all(has_value):\n try:\n res = result_mapper(*values)\n except Exception as ex:\n observer.on_error(ex)\n return\n\n observer.on_next(res)\n elif all([x for j, x in enumerate(is_done) if j != i]):\n observer.on_completed()\n\n has_value_all[0] = all(has_value)\n\n def done(i):\n is_done[i] = True\n if all(is_done):\n observer.on_completed()\n\n subscriptions = [None] * n\n\n def func(i):\n subscriptions[i] = SingleAssignmentDisposable()\n\n def on_next(x):\n with parent.lock:\n values[i] = x\n next(i)\n\n def on_completed():\n with parent.lock:\n done(i)\n\n subscriptions[i].disposable = args[i].subscribe_(on_next, observer.on_error,\n on_completed, scheduler)\n\n for idx in range(n):\n func(idx)\n return CompositeDisposable(subscriptions)\n return AnonymousObservable(subscribe)", "title": "" }, { "docid": "c0526036f8f7e82529a701ffd1922783", "score": "0.5195552", "text": "def generator(self, latent, reuse):\n pass", "title": "" }, { "docid": "36558a6ae11cbeb2816add50d0f95aeb", "score": "0.51948166", "text": "def merge_adjacent(gen):\n gen = iter(gen)\n last = next(gen)\n for this in gen:\n if this.merge_key == last.merge_key:\n last.merge(this)\n elif last < this:\n yield last\n last = this\n else:\n raise AssertionError('Bad order, %s > %s' % (last, this))\n yield last", "title": "" }, { "docid": "ac9d339cf38de99d1cb18401b22c5d1d", "score": "0.518061", "text": "def get_instructions(self):\n for block in self:\n for instruction in block:\n yield instruction", "title": "" }, { "docid": "d4f19f20bb3f78616dba6da86bdea368", "score": "0.5171164", "text": "def combine_ranges(all_ranges: Iterable[IPNetwork]) -> Iterator[IPNetwork]:\n # ipaddress.collapse_addresses can't handle v4 and v6 ranges at the same time\n ipv4, ipv6 = [], []\n # only consume all_ranges once\n for anet in all_ranges:\n if anet.version == 4:\n ipv4.append(anet)\n elif anet.version == 6:\n ipv6.append(anet)\n else:\n raise TypeError(anet)\n\n for ranges in [ipv4, ipv6]:\n ranges = list(ipaddress.collapse_addresses(sorted(ranges))) # type: ignore\n for net in ranges:\n if net.version == 4 and net.prefixlen < 16:\n for subnet in net.subnets(new_prefix=16):\n yield subnet\n elif net.version == 6 and net.prefixlen < 19:\n for subnet in net.subnets(new_prefix=19):\n yield subnet\n else:\n yield net", "title": "" }, { "docid": "5636b0c0aa61d0e3618819b3e3d4d2f4", "score": "0.51665026", "text": "def join(*seqs):\n return reduce(operator.concat, seqs)", "title": "" }, { "docid": "50f9237a8b05f0258ee624c92f65b02d", "score": "0.5166049", "text": "def others(config, configs):\n for other in configs:\n if other is config:\n continue\n yield other", "title": "" }, { "docid": "a416af002e53cf223ccf278b4d38bbfe", "score": "0.515366", "text": "def forward_sequence(self):\n functions = []\n\n def visit_func(pf):\n functions.append(pf)\n self.execute_on_proto(visit_func)\n for pf in functions:\n yield pf", "title": "" }, { "docid": "60af186406998dde79bf76340545d937", "score": "0.5152686", "text": "def generate_different_arrays():\n dtypes = ['float32', 'float64', 'complex64', 'int8']\n shapes = [(4,), (4, 5), (4, 5, 6), (2,) * 8]\n for array_index in range(4):\n yield np.ones(\n shape=shapes[array_index],\n dtype=dtypes[array_index])\n yield 2 * np.ones(\n shape=shapes[array_index],\n dtype=dtypes[array_index])", "title": "" }, { "docid": "21a6cac6d2ab728fd3fcf4df1a2721e8", "score": "0.51503", "text": "def many(p: Parser, init: Optional[List[Any]] = None) -> Parser:\n @parser\n def g(c: Cursor, a: Any):\n try:\n result = init or []\n while True:\n x, c, a = p(c, a).invoke()\n result.append(x)\n except Failure:\n return result, c, a\n return g", "title": "" }, { "docid": "aaba7aaa7d821ae14fcd2d9b34691d34", "score": "0.5141723", "text": "def items(self):\n self.run_all_gens()\n _coconut_yield_from_7 = _coconut.iter(self.registered.items())\n while True:\n try:\n yield _coconut.next(_coconut_yield_from_7)\n except _coconut.StopIteration as _coconut_yield_err_3:\n _coconut_yield_from_6 = _coconut_yield_err_3.args[0] if _coconut.len(_coconut_yield_err_3.args) > 0 else None\n break\n\n _coconut_yield_from_6", "title": "" }, { "docid": "de6133ca04f1fe52222df492d83ab388", "score": "0.5129925", "text": "def gen_ruptures(self, sources, monitor, site_coll):\r\n filtsources_mon = monitor.copy('filtering sources')\r\n genruptures_mon = monitor.copy('generating ruptures')\r\n filtruptures_mon = monitor.copy('filtering ruptures')\r\n for src in sources:\r\n with filtsources_mon:\r\n s_sites = src.filter_sites_by_distance_to_source(\r\n self.maximum_distance, site_coll\r\n ) if self.maximum_distance else site_coll\r\n if s_sites is None:\r\n continue\r\n\r\n with genruptures_mon:\r\n ruptures = list(src.iter_ruptures())\r\n if not ruptures:\r\n continue\r\n\r\n for rupture in ruptures:\r\n with filtruptures_mon:\r\n r_sites = filters.filter_sites_by_distance_to_rupture(\r\n rupture, self.maximum_distance, s_sites\r\n ) if self.maximum_distance else s_sites\r\n if r_sites is None:\r\n continue\r\n yield SourceRuptureSites(src, rupture, r_sites)\r\n filtsources_mon.flush()\r\n genruptures_mon.flush()\r\n filtruptures_mon.flush()", "title": "" }, { "docid": "bafd6afa4d942f929fd78fc0ae24d95c", "score": "0.5123105", "text": "def test_compare_structure_of_two_custom_generators_with_complex_dependencies():\n\n mapping = {\n 1: ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],\n 2: ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],\n 3: ['c', 'cc', 'ccc', 'cccc', 'ccccc'],\n 4: ['d', 'dd', 'ddd', 'dddd', 'ddddd'],\n 5: ['e', 'ee', 'eee', 'eeee', 'eeeee'],\n 6: ['f', 'ff', 'fff', 'ffff', 'fffff'],\n 7: ['g', 'gg', 'ggg', 'gggg', 'ggggg'],\n }\n\n class Quux1Generator(CustomGenerator):\n nn = Integer(1, 5)\n aa = SelectMultiple(Lookup(Integer(1, 7), mapping), num=nn)\n\n class Quux2Generator(CustomGenerator):\n nn = Integer(1, 5)\n key_gen = Integer(1, 7)\n mapping_gen = Constant(mapping)\n lookup = Lookup(key_gen, mapping_gen)\n aa = SelectMultiple(lookup, num=nn)\n\n g1 = Quux1Generator()\n g2 = Quux2Generator()\n\n df1 = g1.generate(100, seed=12345).to_df()\n df2 = g2.generate(100, seed=12345).to_df()\n\n pd.util.testing.assert_frame_equal(df1, df2[[\"nn\", \"aa\"]])", "title": "" }, { "docid": "8e9e535309a706fc6ef4efb6ee6e73a4", "score": "0.51147544", "text": "def do_combine(stream, log, select_func, combine_func):\n for id11, record_set in itertools.groupby(stream, lambda r: r.station_uid):\n log.write('%s\\n' % id11)\n records = set()\n for record in record_set:\n records.add(record)\n ann_mean, ann_anoms = series.monthly_annual(record.series)\n record.set_ann_anoms(ann_anoms)\n record.ann_mean = ann_mean\n begin, end = records_begin_end(records)\n years = end - begin + 1\n # reduce the collection of records (by combining) until there\n # are none (or one) left.\n while records:\n if len(records) == 1:\n # Just one left, yield it.\n yield records.pop()\n break\n record = select_func(records)\n records.remove(record)\n sums, wgts = fresh_arrays(record, years)\n log.write(\"\\t%s %s %s -- %s\\n\" % (record.uid,\n record.first_valid_year(), record.last_valid_year(),\n record.source))\n combine_func(sums, wgts, begin, records, log, record.uid)\n final_data = average(sums, wgts)\n record.set_series(begin * 12 + 1, final_data)\n yield record", "title": "" }, { "docid": "823633ef6734023c99a7e253e5700134", "score": "0.511182", "text": "def sequence(first: Parser, *rest: Parser) -> Parser:\n if rest:\n return first >> (lambda _: sequence(*rest))\n else:\n return first", "title": "" }, { "docid": "1ad97ce2b3a98688de9b90f382fcb3ea", "score": "0.5106043", "text": "def pairs_generator(exchanges):\n return list(combinations(exchanges, 2))", "title": "" }, { "docid": "5366cc387faa279719d0604f7b212fff", "score": "0.5101079", "text": "def make_generators():\n \n # Rescale the RGB coefficants to make it play nicer with out model\n train_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = train_datagen.flow_from_directory(\n TRAIN_DATA_PATH,\n target_size= (150, 150),\n batch_size= 20,\n class_mode= 'binary')\n\n validation_generator = test_datagen.flow_from_directory(\n VAL_DATA_PATH,\n target_size= (150, 150),\n batch_size= 20,\n class_mode= 'binary')\n\n return train_generator, validation_generator", "title": "" }, { "docid": "bc1b79f2093cd1b100e8212253ec4816", "score": "0.50996995", "text": "def process(input_stream, base_generator, processors):\r\n from tqdm import tqdm\r\n from profiling import Profiled\r\n\r\n iterable = tqdm(base_generator(input_stream), \"baseline generator\", mininterval=3.0)\r\n results = chain_generators(iterable, processors)\r\n for _ in tqdm(results, \"final results\", mininterval=3.0):\r\n Profiled.total_count += 1", "title": "" }, { "docid": "1019ab43f903cebd4a3229f0fed4019b", "score": "0.5095481", "text": "def __iter__(self):\n integrator = (self._integrator,) if self._integrator else []\n yield from chain(self._tuners, self._updaters, integrator,\n self._writers, self._computes)", "title": "" }, { "docid": "65d4b846522676dcf35ee6494e754d7c", "score": "0.5094075", "text": "def mix_objects(source_session):\n with gen_asym_keys(source_session, ASYM_KEYS[0], 5) as asym_keys, gen_sym_keys(\n source_session, random.choice(SYM_KEYS), 20\n ) as sym_keys, create_data_object(\n source_session, DATA_TEMPLATE, 20\n ) as data_obj, create_data_object(\n source_session, CERTIFICATE_TEMPLATE, 20\n ) as cert_obj:\n yield get_migration_data(asym_keys + sym_keys + data_obj + cert_obj)", "title": "" }, { "docid": "8d4186505c4fa85ab7648ebe44f28f8f", "score": "0.5093453", "text": "def aggregate_meta(sources):\n for s in sources:\n for (chan, desc, fill) in s.meta:\n yield (s.mode, chan, desc, fill)", "title": "" }, { "docid": "6412de12253cc4c0752207c654bee2de", "score": "0.50933444", "text": "def _iter_alternate_objects(self):\n for alternate in self.alternates:\n for alternate_object in alternate:\n yield alternate_object", "title": "" }, { "docid": "0ca3a04f28b7935f2102a961374ddbbd", "score": "0.50929135", "text": "def listMergeGen(*args):\r\n if len(args) == 0:\r\n r = np.array([[]])\r\n elif len(args) == 1:\r\n a = np.array(args[0])\r\n if a.shape:\r\n r = a.reshape((np.amax(a.shape), 1))\r\n else:\r\n r = np.array([[a]])\r\n\r\n else:\r\n A = np.meshgrid(*args)\r\n\r\n r = np.array([i.flatten() for i in A]).T\r\n\r\n for i in r:\r\n yield i", "title": "" } ]
96dd970d4d2f402366f34f7bc5e38ac1
This function tokenize, remove stop words and apply lower case for every word within the text
[ { "docid": "0e412cf780cdd57714e1ba1ad43bc1c9", "score": "0.0", "text": "def parse_sentence(self, text):\n\n final_list = []\n list_of_words = self.clean_text(text.split())\n i = 0\n plus = 1\n while i < len(list_of_words):\n if len(list_of_words[i]) < 2 and not list_of_words[i].isdigit():\n i+=1\n continue\n if not list_of_words[i].lower() in self.stop_words and list_of_words[i][0] != \"@\" and not list_of_words[i].startswith(\"https\"):\n if list_of_words[i][0] == \"#\":\n words = self.serpearte_hashtags(list_of_words[i])\n for w in words:\n if w != '' or w.lower() not in self.stop_words:\n final_list.append(self.stem.stem_term(w.lower()))\n elif self.is_number(list_of_words[i]):\n if i != len(list_of_words)-1 and list_of_words[i+1] in self.perc:\n final_list.append(list_of_words[i] + \"%\")\n plus=2#change to while later\n if i != len(list_of_words)-1 and \"/\" in list_of_words[i+1]:\n lst = list_of_words[i+1].split(\"/\")\n try:\n float(lst[0])\n float(lst[1])\n final_list.append(list_of_words[i] + \" \" + list_of_words[i+1])\n plus=2\n except:\n pass\n if i != len(list_of_words)-1 and list_of_words[i+1] in self.numbers:\n if list_of_words[i+1][0] == \"t\" or list_of_words[i+1][0] == \"T\":\n final_list.append(list_of_words[i] + \"K\")\n else:\n final_list.append(list_of_words[i] + list_of_words[i+1][0].upper())\n plus=2\n else:\n final_list.append(self.reduce_number(list_of_words[i]))\n elif \"-\" in list_of_words[i] or \"/\" in list_of_words[i] or \"_\":\n if \"/\" in list_of_words[i]:\n lst = list_of_words[i].split(\"/\")\n elif \"-\" in list_of_words[i]:\n lst = list_of_words[i].split(\"-\")\n else:\n lst = list_of_words[i].split(\"_\")\n for j in lst:\n if j == '':\n continue\n if self.is_number(j):\n final_list.append(self.reduce_number(j))\n else:\n if j.lower() not in self.stop_words:\n final_list.append(self.stem.stem_term(j.lower()))\n elif \"..\" in list_of_words[i]:\n for j in list_of_words[i]:\n if j == \".\":\n list_of_words[i] = list_of_words[i].replace(j, \" \")\n s = list_of_words[i].split(\" \")\n for j in s:\n if j != \" \" and j.lower() not in self.stop_words:\n final_list.append(self.stem.stem_term(j.lower()))\n elif \".\" in list_of_words[i]:\n words = list_of_words[i].split(\".\")\n flag = True\n for w in words:\n if len(w) != 1:\n flag = False\n if flag:\n i+=plus\n continue\n for w in words:\n if w == '' or w[0] == \"@\":\n continue\n if self.is_number(w):\n final_list.append(self.reduce_number(w))\n else:\n if w.lower() not in self.stop_words:\n final_list.append(self.stem.stem_term(w.lower()))\n else:\n if list_of_words[i].lower() not in self.stop_words:\n final_list.append(self.stem.stem_term(list_of_words[i].lower()))\n i += plus\n final_list = list(filter(lambda a: a != '' or (len(a) != 1 and not a.isdigit()), final_list))\n return final_list", "title": "" } ]
[ { "docid": "a9c24e059ba0d629375783d4818e3a20", "score": "0.77378315", "text": "def tokenize(txt: str):\n return [w.lower() for w in txt.split(' ')]", "title": "" }, { "docid": "c5959a44e97cef37db948692cef781c8", "score": "0.7713063", "text": "def tokenize(self, text):\n words = nltk.word_tokenize(text)\n words = (self.stem(word) for word in words)\n words = (word.lower() for word in words)\n words = (word for word in words if word not in self.stop_words)\n return words", "title": "" }, { "docid": "15c1ef5f6ae0556e1ec42148b1854269", "score": "0.76650214", "text": "def tokenize(text):\n return [token.lower() for token in simple_preprocess(text) if token not in STOPWORDS and token not in reuters_stopwords]", "title": "" }, { "docid": "82f04df4428fad25d479b74d94f41a37", "score": "0.7526388", "text": "def tokenize(text):\n return gensim.utils.tokenize(text, to_lower=True)", "title": "" }, { "docid": "fafd7a4469d94f04ac33960bbc03aec1", "score": "0.72875655", "text": "def tokenize(doc):\n # doc = file.read()\n # token_pattern=r\"(?u)\\b\\w\\w+\\b\"\n token_pattern=r\"(?u)\\b\\w[a-zA-Z]+\\b\" # only word, not number\n token_pattern = re.compile(token_pattern)\n # tokenize = lambda doc: token_pattern.findall(doc.lower()\n tokenize = lambda doc: token_pattern.findall(doc)\n words = tokenize(doc) \n for i in range(len(words)):\n if words[i].istitle():\n words[i] = words[i].lower()\n return words", "title": "" }, { "docid": "feaab67c4cc41d8eb44a84a7c71d414e", "score": "0.7284889", "text": "def tokenize_text(text):\n result = []\n for seq in SEQUENCE.finditer(text):\n result.append(list(map(str.lower, WORD.findall(seq.group(0)))))\n return result", "title": "" }, { "docid": "7f30b1294f75193614acc560349673bc", "score": "0.72693795", "text": "def tokenize(self, text, never_split=None, **kwargs):\n if self.normalize_text:\n text = unicodedata.normalize(\"NFKC\", text)\n\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n\n for word in self.mecab(text):\n token = word.surface\n\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n\n tokens.append(token)\n\n return tokens", "title": "" }, { "docid": "70f8c800231fa758fdd1c77eb15ad402", "score": "0.7194744", "text": "def tokenize(self, sentence):\n sentence = ENSURE_UNICODE(sentence)\n for word in self._tokenizer(sentence):\n yield word if self._case_sensitive else word.lower()", "title": "" }, { "docid": "53b78885fec00ec4b2a8dc01b239366e", "score": "0.71805197", "text": "def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tok_obj in doc:\n if self._removePunct and tok_obj.is_punct:\n continue\n lemma = tok_obj.lemma_\n text = tok_obj.text\n if self._keepOnlyAlphaNum and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopWords or tok2 in self._stopWords:\n continue\n\n if self._lowerCase:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "title": "" }, { "docid": "458151d75508480b62440e016665eb9e", "score": "0.716325", "text": "def preprocess(txt,parser):\n\tdoc = parser(txt)\n\treturn [token.lemma_.lower() for token in doc]", "title": "" }, { "docid": "99e53dc4539edb6be773e3d9185595ad", "score": "0.7155567", "text": "def naive(self, text):\n\n\t\ttokenizedText = None\n\n\t\t#Fill in code here\n\t\ttottext=[]\n\t\tintext=[]\n\t\tt=''\n\t\tword=''\n\t\tfor i in range(len(text)):\n\t\t\tt=text[i].lower()\n\t\t\tfor j in range(len(t)):\n\t\t\t\tif t[j]!=' ':\n\t\t\t\t\tword=word+t[j]\n\t\t\t\tif t[j]==' ':\n\t\t\t\t\tintext.append(word)\n\t\t\t\t\tword=''\n\t\t\ttottext.append(intext)\n\t\t\tintext=[]\n\t\ttokenizedText=tottext\n\n\t\treturn tokenizedText", "title": "" }, { "docid": "71e56208430fc7d202b0e72b9329698f", "score": "0.7143314", "text": "def text_preprocessing(text):\n tokenizer = nltk.tokenize.RegexpTokenizer(r'\\w+')\n nopunc = clean_text(text)\n tokenized_text = tokenizer.tokenize(nopunc)\n remove_stopwords = [w for w in tokenized_text if w not in stopwords.words('english')]\n combined_text = ' '.join(tokenized_text)\n return combined_text", "title": "" }, { "docid": "8ca7c741d98def99938da052dcba2191", "score": "0.7099463", "text": "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n tokens = word_tokenize(text)\n clean_tokens = [lemmatizer.lemmatize(word)\n for word in tokens if word not in stop_words]\n return clean_tokens", "title": "" }, { "docid": "843cd23163f73270bfc0b1c60bfad42c", "score": "0.7085407", "text": "def procText(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for sent in doc.sentences:\n for tokObj in sent.words:\n if self._removePunct and tokObj.upos == 'PUNCT':\n continue\n lemma = tokObj.lemma\n text = tokObj.text\n if self._keepOnlyAlphaNum and not isAlphaNum(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopWords or tok2 in self._stopWords:\n continue\n\n if self._lowerCase:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "title": "" }, { "docid": "985437f5970a9b0d4a92edbabcd2acaa", "score": "0.7084913", "text": "def procText(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._removePunct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keepOnlyAlphaNum and not isAlphaNum(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopWords or tok2 in self._stopWords:\n continue\n\n if self._lowerCase:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "title": "" }, { "docid": "0e4c3e368f481df5279ecf66dd4b63d6", "score": "0.70735097", "text": "def _preprocess(self, a_txt):\n return super(LexiconBaseAnalyzer, self)._preprocess(a_txt).lower()", "title": "" }, { "docid": "e110a5a9edbc81fd97013c654a659343", "score": "0.70731664", "text": "def normalize_text(s, lower_case = True):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower() if lower_case else text\n #return text.lower()\n \n def remove_stop_words(text):\n return re.sub(r'\\b(as|to|that|and|of)\\b', ' ', text)\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "512f7ce1781c17f635fde142ca3e31b0", "score": "0.7067765", "text": "def simple_tokenization(doc):\n tok = re.findall('(\\\\w+)', doc.lower())\n docs = ' '.join(tok)\n return word_tokenize(docs)", "title": "" }, { "docid": "e3385a2dfae0fabfaef1ed09e582b853", "score": "0.7065585", "text": "def tokenize(self, text):\n\n return word_tokenize(text)", "title": "" }, { "docid": "124040aeabb11a9504d547a80a64ad44", "score": "0.70500314", "text": "def tokenize_and_clean(text):\r\n tokens = []\r\n for token in word_tokenize(text):\r\n if not token.isdigit(): # keep purely numerical strings unchanged\r\n token = ''.join(c for c in token if c.isalpha())\r\n if token.isalnum(): #and token.lower() not in stops: # if token != \"\"\r\n tokens.append(token.lower())\r\n return tokens", "title": "" }, { "docid": "326fde92140fea8a5073a51264988c56", "score": "0.70296556", "text": "def tokenize(text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-z0-9]\",\" \",text)\n words = word_tokenize(text)\n words = [w for w in words if w not in stopwords.words(\"english\")]\n \n clean_tokens = []\n lemmatizer = WordNetLemmatizer()\n \n for w in words:\n clean_tok = lemmatizer.lemmatize(w , pos='v').strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "title": "" }, { "docid": "c8f18c11bd4a9f89a96391675eeb4ac9", "score": "0.70207685", "text": "def process(self, text: str, allow_stopwords: bool = False) -> str:\n ret = []\n\n # split and cast to lower case\n text = re.sub(r'<[^>]+>', ' ', str(text))\n for word in re.split('[^a-zA-Z]', str(text).lower()):\n # remove non-alphabetic and stop words\n if (word.isalpha() and word not in self.stopwords) or allow_stopwords:\n if word not in self.stemmed:\n self.stemmed[word] = self.ps.stem(word)\n # use stemmed version of word\n ret.append(self.stemmed[word])\n return ' '.join(ret)", "title": "" }, { "docid": "a5ec6f60b99f5443b96fafc591b3aca5", "score": "0.7007904", "text": "def textPreprocessing(self):\n\t\tignored_words = nltk.corpus.stopwords.words('english')\n\t\tfor i in range(len(self._data)):\n\t\t\tcom = nltk.wordpunct_tokenize(self._data[i][self._comName])\n\t\t\tnewc = []\n\t\t\tfor j in range(len(com)):\n\t\t\t\ttoken = com[j].lower()\n\t\t\t\t#checking stopwords\n\t\t\t\tif token not in ignored_words:\n\t\t\t\t\tnewc.append(token)\n\t\t\tself._data[i][self._comName] = \" \".join(newc)\n\t\t#end", "title": "" }, { "docid": "1f0ce98380c61e5b5a7392c0ba5758e2", "score": "0.7007564", "text": "def tokenize(text: str):\n RE_WORDS = r'\\S+'\n return list([x for x in re.findall(RE_WORDS, text.lower())])", "title": "" }, { "docid": "c3d9a8e6185301c8bd28f18626e1c9cb", "score": "0.6998272", "text": "def initial_clean(text):\n text = re.sub(\"[^a-zA-Z ]\", \"\", text)\n text = text.lower() # lower case text\n text = nltk.word_tokenize(text)\n return text", "title": "" }, { "docid": "c97ebe36a92b8c52038f4ab48ed7893b", "score": "0.6976115", "text": "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "title": "" }, { "docid": "5932148192170b247ba6a2cb9733f09c", "score": "0.69752234", "text": "def tokenize(self, text):\n res = []\n if len(text) == 0:\n return res\n\n if self.lower:\n text = text.lower()\n # for the multilingual (include: Chinese and English)\n res = tokenize_words(text)\n return res", "title": "" }, { "docid": "e623a58906b9f7d86aa1c3fb6b48d616", "score": "0.6969152", "text": "def preprocess(text, language=\"english\", lower=True):\n words = []\n tokenized_text = []\n\n for line in text:\n tokenized = nltk.word_tokenize(line, language=language)\n if lower:\n tokenized = [word.lower() for word in tokenized]\n\n tokenized_text.append(tokenized)\n for word in tokenized:\n words.append(word)\n\n most_common = Counter(words).most_common()\n\n return tokenized_text, most_common", "title": "" }, { "docid": "82ae6831d96062db222a8ec170097a61", "score": "0.6958967", "text": "def process_text(s):\n # remove punctuation characters\n s = s.translate(\n str.maketrans('', '', string.punctuation))\n # remove multiple consecutive spaces\n s = re.sub(' +', ' ', s)\n \n return s.lower()", "title": "" }, { "docid": "4484bdbfb6dbfe3a484379474a458c7f", "score": "0.69518286", "text": "def tokenize(text):\n tokens = word_tokenize(text)\n # remove stopwords\n stop = set(stopwords.words('english'))\n tokens = [token for token in tokens if token not in stop]\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "title": "" }, { "docid": "e699c5af5824d5a77e4a1ed525cd7ce3", "score": "0.69356287", "text": "def tokenize(text, stem_words = True):\n stop_words = stopwords.words('english')\n stemmer = PorterStemmer()\n regex = r\"([a-zA-Z]+)\"\n tokens = re.findall(regex, text.lower())\n if (stem_words):\n return [stemmer.stem(t) for t in tokens if not t in stop_words]\n else:\n return [t for t in tokens if not t in stop_words]", "title": "" }, { "docid": "eb1b481b9dbcaeabdf001b6c02007856", "score": "0.69251627", "text": "def tokenize(self, text, never_split=None, **kwargs):\n if self.normalize_text:\n text = unicodedata.normalize(\"NFKC\", text)\n\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n\n for word in self.sudachi.tokenize(text):\n token = word.surface()\n\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n\n if self.trim_whitespace:\n if token.strip() == \"\":\n continue\n else:\n token = token.strip()\n\n tokens.append(token)\n\n return tokens", "title": "" }, { "docid": "6ad5494ebb1bb6688188a19ae52d747c", "score": "0.69224113", "text": "def tokenize(self, text):\n text = convert_to_unicode(text)\n\n\n orig_tokens = whitespace_tokenize(text)\n # print(\"original tokens: \", orig_tokens)\n split_tokens = []\n for token in orig_tokens:\n # if self.do_lower_case:\n # token = token.lower()\n # token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n # print(\"split tokens: \", split_tokens)\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "title": "" }, { "docid": "a792aa4dba795261b1829b555186e7b4", "score": "0.69173497", "text": "def tokenize(text):\n \n \n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n clean_tokens=[]\n \n for tok in tokens:\n clean_tok=lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "title": "" }, { "docid": "4ece93b9dca8089585cc53fa19c94e33", "score": "0.6914823", "text": "def _parse_into_words(text):\r\n # TODO: \r\n # Check if using spaCy here is better\r\n return re.findall(r\"\\w+\", text.lower())", "title": "" }, { "docid": "9faaf4ee86af883054e9aa52740d93e9", "score": "0.68902045", "text": "def tokenize(document):\n # Get a set of stop words and standard puncuation\n stop_words = set(stopwords.words('english'))\n punctuation = set(string.punctuation)\n # Tokenize the words, remove those that are in the stops list, and make them lowercase\n tokenized_lower = word_tokenize(document.lower())\n # Filter any tokens that are either in stop_words list or punctuation list\n return [word for word in tokenized_lower if word not in stop_words and word not in punctuation]", "title": "" }, { "docid": "81fd5429fc22058869c8fa6825ce602b", "score": "0.6881069", "text": "def preprocessing_text(df): \r\n #remove upper cases\r\n df=df.lower()\r\n #replacing new line sign '\\n' with a whitespace ' ' \r\n df=df.replace('\\\\n',' ')\r\n\r\n #for removing stop words\r\n stop_words = set(stopwords.words('english')) \r\n\r\n #for removing punctuations\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n \r\n #to tokenize the string\r\n word_tokens = tokenizer.tokenize(df) \r\n \r\n #stemming\r\n ps = PorterStemmer()\r\n filtered_words = [ps.stem(w) for w in word_tokens if not w in stop_words] \r\n\r\n return filtered_words", "title": "" }, { "docid": "3414481683a4cf7b14194e7979cb626b", "score": "0.6880825", "text": "def tokenize(text):\n\n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # tokenize text\n tokens = word_tokenize(text)\n \n # lemmatize and remove stop words\n tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n\n return tokens", "title": "" }, { "docid": "adcd95cc1ab6fcdd1e5826407e2d1d4b", "score": "0.6878907", "text": "def preprocess(text):\r\n result = \"\"\r\n for token in simple_preprocess(text):\r\n if token not in STOPWORDS and len(token) > 3:\r\n result = result + \" \" + lemmatize_steming(token)\r\n return result", "title": "" }, { "docid": "37fee1ebd60434b032cf1e8180bf0166", "score": "0.68679595", "text": "def tokenize(text):\n # Normalize\n normalized_text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # Tokenize\n words = word_tokenize(normalized_text)\n \n # Remove stop words\n words = [w for w in words if w not in stopwords.words(\"english\")]\n \n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n clean_words = [lemmatizer.lemmatize(w).strip() for w in words]\n clean_words = [lemmatizer.lemmatize(w, pos=\"v\").strip() for w in clean_words]\n \n return clean_words", "title": "" }, { "docid": "946254760593ed32421cd7ebb0c067a5", "score": "0.68642545", "text": "def tokenize(text):\n # convert text to lowercase\n text = text.lower()\n \n tokens = word_tokenize(text)\n \n STOPWORDS = list(set(stopwords.words('english')))\n # remove short words\n tokens = [token for token in tokens if len(token) > 2]\n # remove stopwords\n tokens = [token for token in tokens if token not in STOPWORDS]\n \n lemmatizer = WordNetLemmatizer()\n\n # clean tokens by lemmatizing, making all tokens lowercase and stripping any whitespace\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "72c16ca9bbaf677d90d5b8f092d95e56", "score": "0.686189", "text": "def text_preprocess(text, stop_words, stemmer):\n\n #convert to lowercase\n text = text.lower()\n\n #break up text into words\n word_list = word_tokenize(text)\n\n #remove stopwords and punctuation\n word_list = [word for word in word_list if word not in string.punctuation and word not in stop_words]\n\n #apply a stemmer\n word_list = [stemmer.stem(word) for word in word_list]\n\n return word_list", "title": "" }, { "docid": "b45ae3e13db6ffccfd12b791524ea5f1", "score": "0.6858426", "text": "def tokenize_words(line, lowercase=True, filter_stopwords=True):\n words = _WORD_REGEX.findall(line.lower() if lowercase else line)\n return remove_stop_words(words) if filter_stopwords else words", "title": "" }, { "docid": "f8b97d0bf1f91c9736bbbbd222ba08f7", "score": "0.68384707", "text": "def tokenize(text):\n \n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n tokens = [word for word in tokens if word not in stopwords.words(\"english\")]\n \n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "1534f7a3904d7dfdb68a371d4af66387", "score": "0.68372935", "text": "def tokenize(text):\n text = text.lower() # lowercase\n words = wordpunct_tokenize(text) # wrap around nltk's tokenizer\n words = [w.strip() for w in words if w.strip()] # rm empty words\n words = [w for w in words if w.isalpha()] # remove non-alphabetic words\n return words", "title": "" }, { "docid": "28f20dcf404729ed85da2f0eff5df135", "score": "0.6823557", "text": "def tokenizer(text):\n # Create a list of the words\n # Convert the words to lowercase\n # Remove the punctuation\n # Remove the stop words\n # Lemmatize Words into root words\n lemmatizer = WordNetLemmatizer()\n sw = set(stopwords.words('english'))\n regex = re.compile(\"[^a-zA-Z ]\")\n re_clean = regex.sub('', text)\n words = word_tokenize(re_clean)\n return [lemmatizer.lemmatize(word.lower()) for word in words if word.lower() not in set(stopwords.words('english'))]", "title": "" }, { "docid": "c02a66fa5e1f34d70750b187165abeea", "score": "0.6816639", "text": "def tokenize(document):\n \n result = nltk.tokenize.word_tokenize(document)\n result = [x.lower() for x in result]\n for item in result.copy(): # iterate through a copy of result so that you don't skip items as you remove items and the indices get messed up\n if item in nltk.corpus.stopwords.words(\"english\") or item in string.punctuation:\n while item in result: result.remove(item)\n\n return result", "title": "" }, { "docid": "a95aa629a9800c78c4412be7799f4e9a", "score": "0.6814781", "text": "def tokenize(text):\n tokens = word_tokenize(text)\n \n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # iterate through each token\n clean_tokens = []\n for tok in tokens:\n \n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "a59fe25b4fdcc92563620a2d969297a9", "score": "0.6799538", "text": "def process_text(text):\r\n \r\n # decontract text\r\n text = expand_contractions(text.lower())\r\n \r\n # keep only letters and numbers\r\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text)\r\n \r\n # tokenize\r\n tokens = word_tokenize(text)\r\n \r\n # nltk pos tags\r\n tokens = nltk.pos_tag(tokens)\r\n \r\n # change nltk to wordnet pos tags and lemmatize\r\n tokens = lemmatize(tokens)\r\n \r\n # remove stop words and return clean tokens\r\n return [token for token in tokens if token not in stop_words]", "title": "" }, { "docid": "1959e69fe6de8d35f1f6ee5fdef19dd8", "score": "0.67887527", "text": "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower().strip()) # Normalize text and remove white space\n words = word_tokenize(text) # Tokenize text\n words = [w for w in words if w not in stopwords.words(\"english\")] # Remove stop words\n words = [WordNetLemmatizer().lemmatize(w) for w in words] # Lemmatize\n words = [WordNetLemmatizer().lemmatize(w, pos='v') for w in words] # Lemmatize verbs by specifying pos\n\n return words", "title": "" }, { "docid": "e1a7927b5dce527a5659427b83a0bcb8", "score": "0.67834985", "text": "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "title": "" }, { "docid": "8c67bede2dae92bc6cc8f12d4fe27a86", "score": "0.6777806", "text": "def tokenize(text):\n lemmatizer = WordNetLemmatizer()\n \n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n \n token_list = word_tokenize(text)\n token_list = [token.strip() for token in token_list if token not in stopwords.words(\"english\")]\n token_list = [lemmatizer.lemmatize(token) for token in token_list]\n \n return token_list", "title": "" }, { "docid": "a89c1d4af91aed11a40c76ba6101486e", "score": "0.6761988", "text": "def tokenize_text(input_file,replace):\n text = ''\n with open(input_file, 'r') as content_file:\n text = content_file.read().decode('utf-8')\n if replace==True:\n text = re.sub('[^A-Za-z0-9+-.@]+',' ', text.lower())\n tokens = nltk.word_tokenize(text)\n return tokens", "title": "" }, { "docid": "6ec4602166457f730d730714b0bbc63c", "score": "0.67511344", "text": "def tokenize(self, document):\n return [t.lower() for t in re.findall(r\"\\w+(?:[-']\\w+)*\", document)]", "title": "" }, { "docid": "82374e0a6e9f5e40b000c764d14151be", "score": "0.6746696", "text": "def pre_process_text(text):\n text = text.lower()\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(text)\n word_tokens = [w for w in word_tokens if not w in stop_words]\n stemmer = stem.PorterStemmer()\n stemmed_text = [stemmer.stem(w) for w in word_tokens]\n filtered_text = [w for w in stemmed_text if not w in stop_words]\n text_len = min(1000, len(filtered_text))\n return \" \".join(filtered_text[0:text_len])", "title": "" }, { "docid": "f888156e39930cd5105800f9933c66f9", "score": "0.67412174", "text": "def preprocess_text(text, lemmatizing=False):\r\n text = text.lower()\r\n \r\n if lemmatizing:\r\n lemmatizer = WordNetLemmatizer()\r\n text = ' '.join([lemmatizer.lemmatize(word, pos='v') for word in text.split()])\r\n \r\n # Remove html tags\r\n text = re.sub(r'<.*?>', '', text)\r\n \r\n # Replace punctuation with spaces\r\n translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))\r\n text = text.translate(translator)\r\n\r\n # Remove stop words\r\n text = ' '.join([word for word in text.split() if word not in stopwords.words('english')])\r\n \r\n # Remove additional white spaces\r\n text = ' '.join(text.split())\r\n \r\n return text", "title": "" }, { "docid": "43f2d5eaf2b6e141a29cb5ec154b7d67", "score": "0.67353785", "text": "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "0f8b51dd17819b5ddd660874b8e9584b", "score": "0.6719576", "text": "def text_to_word_sequence_nltkword(text,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=\" \"):\n if lower:\n text = text.lower()\n\n tokens = word_tokenize(text)\n return tokens", "title": "" }, { "docid": "a063796f17bfd6b1839ff180e6ca3390", "score": "0.671734", "text": "def preprocess_text(given_text):\n\ttext = given_text.lower() \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Lower all text\n\ttext = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))) \t# Remove punctuation\n\twords = []\n\tfor word in text.split():\n\t\twords.append(word)\n\treturn words", "title": "" }, { "docid": "21c38f0201c147479013ef7321116c90", "score": "0.67173165", "text": "def tokenize(text, remove_stop_words=False, stopwords=stopwords.words('english')):\n # case normalization (i.e. all lower case)\n text = text.lower()\n # punctuation removal\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text)\n # tokenize the text\n tokens = word_tokenize(text)\n # Optional stop words removal.\n # Note: calling stopwords.words('english') here directly causes\n # pickling functions for some reason (which prevents multicore\n # processing).\n if remove_stop_words:\n tokens = [\n token for token in tokens if token not in stopwords\n ]\n\n # wordnet lemmatize\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(token) for token in tokens]\n\n return tokens", "title": "" }, { "docid": "18408aecc2ef5bae787b837a55719c52", "score": "0.6715173", "text": "def tokenize(text):\n text = text.lower() #lower case\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n text = text.translate(str.maketrans('', '', string.punctuation)) #remove punctuations\n\n tokens = word_tokenize(text) #tokenize\n lemmatizer = WordNetLemmatizer()\n\n #remove stopwords\n clean_tokens = [w for w in tokens if w not in stopwords.words(\"english\")]\n\n #reduce words to their root form\n clean_tokens = [WordNetLemmatizer().lemmatize(w) for w in clean_tokens]\n\n return clean_tokens", "title": "" }, { "docid": "4bcd254637fd7bd7ae76aee0652d5dc2", "score": "0.6706489", "text": "def tokenize(text):\n global TOK\n tokens = TOK.tokenize(text)\n output = {\n 'words': replace_digits(tokens.words()),\n 'offsets': tokens.offsets(),\n 'pos': tokens.pos(),\n 'lemma': tokens.lemmas(),\n 'ner': tokens.entities(), # keep capitalization is good for NER?\n }\n return output", "title": "" }, { "docid": "298ec4830b98f7e8178c37c5fe1806ec", "score": "0.6692397", "text": "def test_tokenize_words(self):\n self.assertEqual(res.tokenize_words(\"am going home\"), [\"am\", \"going\", \"home\"])", "title": "" }, { "docid": "38416884865cad0a81edb113f9675c4d", "score": "0.66890335", "text": "def tokenisation_stopwords(text):\n text = text.split(' ')\n # Removing 1 letter long words and empty strings, and the remaining bad characters\n text = [re.sub('[^a-zA-Z0-9]*|[\\]{1,2}u[a-z0-9]*', '', word) for word in text if len(word) > 1]\n # Removing stopwords\n stop_words_extra =['aint', 'arent', 'cant', 'couldve', 'couldnt', 'didnt', 'doesnt', 'dont', 'hadnt', 'hasnt',\n 'havent', 'hed', 'hell', 'hes', 'heve', 'Id', 'Ill', 'Im', 'Ive', 'isnt', 'itll', 'its',\n 'lets', 'mustnt', 'shed', 'shell', 'shes', 'shouldve', 'shouldnt', 'thatll', 'thats',\n 'theres', 'theyd', 'theyll', 'theyre', 'theyve', 'wasnt', 'wed', 'well', 'were', 'weve',\n 'werent', 'whats', 'wheres', 'wholl', 'whos', 'wont', 'wouldve', 'wouldnt', 'yall', 'youd',\n 'youll','youre', 'youve']\n text = [word for word in text if word not in stopwords.words('english')]\n text = [word for word in text if word not in stop_words_extra]\n return text", "title": "" }, { "docid": "e7b79b783bdc70a181dfe797ba8cfa73", "score": "0.6685498", "text": "def tokenize(text):\n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n \n # normalize case and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # tokenize text\n tokens = word_tokenize(text)\n \n tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n \n return tokens", "title": "" }, { "docid": "bfc680dd53367fbf4133be87b46b1ac4", "score": "0.66835004", "text": "def preprocess(tweet_text):\n return tweet_text.lower().split()", "title": "" }, { "docid": "557e58ccaf33012d126ed1a5c244b5f5", "score": "0.6680401", "text": "def tokenize2(text):\n\n # tokenize text\n tokens = word_tokenize(text)\n\n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # iterate through each token\n tok_list = []\n for tok in tokens:\n\n # lemmatize, normalize case, and remove leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n tok_list.append(clean_tok)\n\n return tok_list", "title": "" }, { "docid": "85e8142215e24ea7179b3e71d79cfccd", "score": "0.66790193", "text": "def tokenize(self, text, never_split=None, **kwargs):\n if self.normalize_text:\n text = unicodedata.normalize(\"NFKC\", text)\n\n text = text.strip()\n\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n\n for mrph in self.juman.apply_to_sentence(text).morphemes:\n token = mrph.text\n\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n\n if self.trim_whitespace:\n if token.strip() == \"\":\n continue\n else:\n token = token.strip()\n\n tokens.append(token)\n\n return tokens", "title": "" }, { "docid": "178c0b867b456776462d97c5302d1f9e", "score": "0.667788", "text": "def tokenize(string, lowercase=False):\n\timport re\n\twords = re.findall(\"([\\w'-+]+)\", string)\n\tif lowercase == True:\n\t\tlowerwords = []\n\t\tfor word in words:\n\t\t\tlowerwords.append(word.lower())\n\t\twords = lowerwords\n\treturn words", "title": "" }, { "docid": "ad4758660e16d88c1ae21606b4d1a9f3", "score": "0.6677427", "text": "def tokenize(text):\n tokens = nltk.tokenize.word_tokenize(text)\n # initiate lemmatizer\n lemma = nltk.stem.WordNetLemmatizer()\n # iterate through tokens lemmatizing and cleaning\n clean_tokens = []\n for token in tokens:\n clean_token = lemma.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "title": "" }, { "docid": "a38a1214752fc4acbb7e941cadcac565", "score": "0.66712266", "text": "def tokenize(sentence): \n words = re.findall(r'\\b[a-z]+\\b', sentence)\n return ' '.join( words)", "title": "" }, { "docid": "fffc83ce839f03dadfd4c27f58be1054", "score": "0.6665572", "text": "def tokenize(text):\n\n ## normalize text\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n ## tokenize texts\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n ## lemmatize, lowercase, strip spaces\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "title": "" }, { "docid": "900fbef8b898a397dff19f494f941e7a", "score": "0.66593796", "text": "def tokenize(text):\n words = wakati.parse(text).split()\n return words", "title": "" }, { "docid": "67578397eb51500cc4c192d44561bbf2", "score": "0.6653253", "text": "def tokenize(text): \n # kee[ only letters and numbers\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) \n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n\n #clean text and split into tokens(words)\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "e3efa323177427fbd6a9f6215b6a4e8b", "score": "0.66524595", "text": "def tokenise_string(s, stop=True):\n\n filter_function = lambda x: x not in STOP_WORDS\n if stop == False:\n filter_function = lambda x: True\n\n return map(stem, filter(filter_function, [re.sub(r'[^a-z ]', '', word) for word in s.lower().strip().split()])) # uhuhuh!", "title": "" }, { "docid": "bf832563bb85c2004f5ada1df2107602", "score": "0.66522825", "text": "def tokenize(text):\n text = word_tokenize(text)\n tokens = []\n for word in text:\n word = WordNetLemmatizer().lemmatize(word).lower().strip()\n tokens.append(word)\n return tokens", "title": "" }, { "docid": "cdf6b61f0c79468e3e5331fe5ac4c21a", "score": "0.66431874", "text": "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n tokens = [w for w in tokens if bool(re.search(r\"[^a-zA-Z0-9]\", w)) != True]\n tokens = [WordNetLemmatizer().lemmatize(w, pos='v') for w in tokens if stopwords.words(\"english\")]\n tokens = [PorterStemmer().stem(w) for w in tokens]\n return tokens", "title": "" }, { "docid": "f88f958399f9579f7a3026b0d17f9de1", "score": "0.66381377", "text": "def text_to_word_sequence_stanford(text,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=\" \"):\n # TODO INSTALL CORENLP\n if lower:\n text = text.lower()\n\n tokens = CoreNLPTokenizer().tokenize(text)\n return tokens", "title": "" }, { "docid": "5cc529633842d4d1ee897be9a0116a5d", "score": "0.66320884", "text": "def tokenize(self, sentence):\n return [e.lower() for e in sentence.split() if len(e) >= 2]", "title": "" }, { "docid": "28584b99d7495d3d3f16ffaaa6f3b497", "score": "0.6627767", "text": "def process_text(text, stem=True):\n text = text.translate(string.punctuation)\n tokens = word_tokenize(text)\n\n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n return tokens", "title": "" }, { "docid": "fe311e9ad463407b7e06b7c186cae2de", "score": "0.66265076", "text": "def tokenize(text):\n\n\ttokenized_text = text.lower()\n\ttokenized_text = re.findall(r'[a-z0-9]+',\n\t\t\t\t\t\t\t\ttokenized_text) # splits string with delimiter being everything except alphabetical letters\n\treturn tokenized_text", "title": "" }, { "docid": "cfbaec85ecd967e5779a97e0a2b8bb88", "score": "0.6626497", "text": "def preprocess_gensim(doc):\n doc = doc.lower() # Lower the text.\n doc = word_tokenize(doc) # Split into words.\n doc = [w for w in doc if not w in stop_words] # Remove stopwords.\n doc = [w for w in doc if w.isalpha()] # Remove numbers and punctuation.\n return doc", "title": "" }, { "docid": "f998dba3cb6a706351cbdf9063520dae", "score": "0.6623796", "text": "def tokenize(text: str) -> [str]:\n vectorizer = CountVectorizer(analyzer='word')\n vectorizer.fit_transform([text])\n tokens = vectorizer.get_feature_names()\n # stems = self.stem_tokens(tokens, stemmer)\n return tokens", "title": "" }, { "docid": "2650f03d71b39db1245b53ba31175496", "score": "0.66184276", "text": "def word_tokenize(self,words):\n tokens = re.findall(r\"[a-z]+-?[a-z]+\", words.lower(),\n flags = re.UNICODE | re.LOCALE)\n return(tokens)", "title": "" }, { "docid": "5df3d706d8f0bf7e1678f765a99a16d5", "score": "0.6617486", "text": "def pre_processing(text):\r\n sent_tokenize_list = sent_tokenize(text)\r\n #print sent_tokenize_list\r\n #print len(sent_tokenize_list)\r\n #tokenise words\r\n #print stop_words\r\n words=word_tokenize(text)\r\n stop_words = str(stopwords.words('english'))\r\n alpha=stop_words.replace(\"u'\", \"\")\r\n #print words\r\n result = []\r\n #print alpha\r\n #remove stop words\r\n for item in words:\r\n if item not in alpha:\r\n result.append(item)\r\n #print \"Filtered\",result\r\n fil=str(result)\r\n #remove punctuation\r\n repstr=\" \" * 32\r\n table=string.maketrans(string.punctuation,repstr)\r\n s=fil.translate(table)\r\n #return s\r\n\r\n\r\n #lemmatizing\r\n lemmatizer=WordNetLemmatizer()\r\n h=lemmatizer.lemmatize(s)\r\n #print \"Lemma\",lemmatizer.lemmatize(s)\r\n #stemming\r\n wordss=word_tokenize(h)\r\n ps=PorterStemmer()\r\n list1=[]\r\n for i in wordss:\r\n k=(ps.stem(i))\r\n list1.append(k)\r\n #print list1\r\n final= ' '.join(list1)\r\n finall=str(final)\r\n return finall", "title": "" }, { "docid": "030df38d932c8d1f99557f83534c7871", "score": "0.6612498", "text": "def tokenize(document):\n # Converting all words to lower case\n document = document.lower()\n # tokeniizing the string to words\n words = nltk.tokenize.word_tokenize(document)\n final_words = []\n # Add the word to the string if it is not a punctuatuion or an English stop word\n for word in words:\n if word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\"):\n final_words.append(word)\n\n return final_words", "title": "" }, { "docid": "47f5434f7e34a6cc8614686aaaee630f", "score": "0.66085696", "text": "def words(text): \n return re.findall('[a-z]+', text.lower())", "title": "" }, { "docid": "5d14c472a7605272cd3154286d04ee50", "score": "0.6606162", "text": "def tokenize(document):\n\n # finding the tokens and seves them in a lowercase list \n tokens = [word.lower() for word in nltk.word_tokenize(document)]\n\n # intialize a filter list of words that are not stop words \n filter_list = []\n\n # initialize stop words and punctuations\n stop_words = nltk.corpus.stopwords.words(\"english\")\n punctuation = [punct for punct in string.punctuation] \n\n # go over all the words anf filter out the stop words and punctuation \n for word in tokens:\n if word in stop_words:\n continue \n\n elif word in punctuation:\n continue \n\n else:\n filter_list.append(word)\n\n\n return filter_list\n #raise NotImplementedError", "title": "" }, { "docid": "70984638567ac1d800112f5cf3d37c65", "score": "0.6599118", "text": "def tokenize(text):\n # replace all token delimiters with a costmmon delimiter - space and split it\n # make all lowerscore and replace underscores with space first\n text = re.sub('_', ' ', text.lower())\n tokens = re.findall(r'[\\w]+', text)\n return list(tokens)", "title": "" }, { "docid": "fd2378ae8b33a66c9154a4055061db68", "score": "0.65977204", "text": "def tokenize(text):\n\n # remove punctuation and lowercase\n text = re.sub(r'[^\\w\\s]','', text).lower()\n # split into tokens\n tokens = word_tokenize(text)\n # lemmatize\n lemmatizer = WordNetLemmatizer()\n clean_tokens = [lemmatizer.lemmatize(word) for word in tokens]\n return clean_tokens", "title": "" }, { "docid": "cd1e0208d44360f094dfef52fb9eda9d", "score": "0.6597287", "text": "def initial_clean(text):\n text = re.sub(\"((\\S+)?(http(s)?)(\\S+))|((\\S+)?(www)(\\S+))|((\\S+)?(\\@)(\\S+)?)\", \" \", text)\n text = re.sub(\"[^a-zA-Z ]\", \"\", text)\n text = text.lower() # lower case the text\n text = nltk.word_tokenize(text)\n return text", "title": "" }, { "docid": "cd1e0208d44360f094dfef52fb9eda9d", "score": "0.6597287", "text": "def initial_clean(text):\n text = re.sub(\"((\\S+)?(http(s)?)(\\S+))|((\\S+)?(www)(\\S+))|((\\S+)?(\\@)(\\S+)?)\", \" \", text)\n text = re.sub(\"[^a-zA-Z ]\", \"\", text)\n text = text.lower() # lower case the text\n text = nltk.word_tokenize(text)\n return text", "title": "" }, { "docid": "dafa1ec87870f635bf0c0a57cd664b6b", "score": "0.659701", "text": "def process_text(self, text):\n\n self.__tokens.clear()\n\n tokenize = [t.lower().strip(\":,.!?\") for t in text.split()]\n\n filtered_tokens = self.__filter_text(tokenize)\n\n self.__tokens.extend(filtered_tokens)\n self.__all_tokens.extend(filtered_tokens)", "title": "" }, { "docid": "b25d0bcc5a99c0c30eb1024b3d560dbd", "score": "0.65837455", "text": "def remove_stopwords(text, is_lower_case=False, stopwords=stopword_list):\n tokens = tokenizer.tokenize(text)\n tokens = [token.strip() for token in tokens]\n if is_lower_case:\n filtered_tokens = [token for token in tokens if token not in stopwords]\n else:\n filtered_tokens = [token for token in tokens if token.lower() not in stopwords]\n filtered_text = ' '.join(filtered_tokens) \n return filtered_text", "title": "" }, { "docid": "2b396d5ab5db7fd97738c2f1d928bf1b", "score": "0.65648067", "text": "def tokenize(document):\n return [word.lower() for word in nltk.word_tokenize(document) \n if (word.lower() not in nltk.corpus.stopwords.words(\"english\")\n and all([char not in string.punctuation for char in word.lower()]))]", "title": "" }, { "docid": "0a17dfc5c5680f327ea49982fca2d525", "score": "0.654812", "text": "def tokenize(texts):\n # type: (pd.Series) -> pd.Series\n return texts.apply(lambda x: re.findall(r'\\b\\w+\\b', x.lower()))", "title": "" }, { "docid": "80ce959a388e6bf3b0e4225142325c8d", "score": "0.65455455", "text": "def do_tokenize(self, text):\n text = text.lower() if self.lc else text\n if self.collapse_hashtags:\n text = re.sub('#\\S+', 'THIS_IS_A_HASHTAG', text)\n else:\n text = re.sub('#(\\S+)', r'HASHTAG_\\1', text)\n if self.collapse_mentions:\n text = re.sub('@\\S+', 'THIS_IS_A_MENTION', text)\n if self.collapse_urls:\n text = re.sub('http\\S+', 'THIS_IS_A_URL', text)\n if self.limit_repeats:\n text = re.sub(r'(.)\\1\\1\\1+', r'\\1', text)\n toks = []\n for tok in text.split():\n tok = re.sub(r'^(' + punc_re + '+)', r'\\1 ', tok)\n tok = re.sub(r'(' + punc_re + '+)$', r' \\1', tok)\n for subtok in tok.split():\n if self.retain_punc_toks or re.search('\\w', subtok):\n toks.append(subtok)\n return toks", "title": "" }, { "docid": "13b52ce92e4baabc2e59c92b6be62a25", "score": "0.6525562", "text": "def __call__(self, text):\n\n tokens = text.split()\n\n for i in range(len(tokens) - 1):\n if is_dash_word(tokens[i]):\n\n # Require the first character of the next word is an alpha\n if not tokens[i + 1][0].isalpha():\n continue\n\n # Skip words with more than 2 caps\n if len([x for x in tokens[i + 1] if x == x.upper()]) >= 2:\n continue\n\n word = \"{}{}\".format(tokens[i][:-1], tokens[i + 1])\n\n test_word = ''.join([x for x in word if x.isalpha()])\n\n # Only combine sensible english words\n if test_word.lower() not in self.english_words:\n continue\n\n self.logger.info(\"Merging tokens %s %s %s\"\n % (tokens[i], tokens[i + 1], word))\n\n tokens[i] = word\n tokens[i + 1] = ''\n\n doc = ' '.join((x for x in tokens if x))\n return doc", "title": "" }, { "docid": "f70b9ccacb15bba30f78d16053fda3bb", "score": "0.6524916", "text": "def _words_tokenizer(self):\n words = []\n for sentence in self.temp_list:\n words.append(nltk.word_tokenize(sentence))\n self.temp_list = words", "title": "" }, { "docid": "eeaf2ae88e2b73407262e825cb084bf0", "score": "0.65185577", "text": "def apply_all(text):\n return stem_words(remove_stop_words(initial_clean(text)))", "title": "" }, { "docid": "eeaf2ae88e2b73407262e825cb084bf0", "score": "0.65185577", "text": "def apply_all(text):\n return stem_words(remove_stop_words(initial_clean(text)))", "title": "" } ]
b63d48e3c4fe997cca7ed55c2e825fdf
Converts all schemes in a given object to its proper swagger representation.
[ { "docid": "8f948b5ea876011ccdf22b5f90657f56", "score": "0.5604471", "text": "def _extract_schemas(self, obj):\n definitions = {}\n if isinstance(obj, list):\n for i, o in enumerate(obj):\n obj[i], definitions_ = self._extract_schemas(o)\n definitions.update(definitions_)\n if isinstance(obj, dict):\n for k, v in obj.items():\n obj[k], definitions_ = self._extract_schemas(v)\n definitions.update(definitions_)\n if inspect.isclass(obj):\n # Object is a model. Convert it to valid json and get a definition object\n if not issubclass(obj, Schema):\n raise ValueError('\"{0}\" is not a subclass of the schema model'.format(obj))\n definition = obj.definitions()\n description = parse_schema_doc(obj, definition)\n if description:\n definition['description'] = description\n # The definition itself might contain models, so extract them again\n definition, additional_definitions = self._extract_schemas(definition)\n definitions[obj.__name__] = definition\n definitions.update(additional_definitions)\n obj = obj.reference()\n return obj, definitions", "title": "" } ]
[ { "docid": "7f31adca643df64f561772f8c5bcdf1d", "score": "0.6359533", "text": "def swaggerish(self):\n\n # Better chosen dinamically from endpoint.py\n schemes = ['http']\n if self._customizer._production:\n schemes = ['https']\n\n # A template base\n output = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"version\": \"0.0.1\",\n \"title\": \"Your application name\",\n },\n \"schemes\": schemes,\n # \"host\": \"localhost\" # chosen dinamically\n \"basePath\": \"/\",\n \"securityDefinitions\": {\n \"Bearer\": {\n \"type\": \"apiKey\",\n \"name\": \"Authorization\",\n \"in\": \"header\"\n }\n # \"OauthSecurity\": {\n # \"type\": \"oauth2\",\n # \"tokenUrl\": \"https://oauth.simple.api/token\",\n # \"flow\": \"accessCode\",\n # \"authorizationUrl\": \"https://blabla/authorization\",\n # \"scopes\": {\n # \"admin\": \"Admin scope\",\n # \"user\": \"User scope\"\n # }\n # }\n # TODO: check about scopes (roles?)\n },\n \"security\": [\n {\n \"Bearer\": []\n }\n ]\n }\n\n ###################\n # Set existing values\n proj = self._customizer._configurations['project']\n if 'version' in proj:\n output['info']['version'] = proj['version']\n if 'title' in proj:\n output['info']['title'] = proj['title']\n\n ###################\n output['definitions'] = self.read_definitions()\n output['consumes'] = [JSON_APPLICATION]\n output['produces'] = [JSON_APPLICATION]\n\n ###################\n # Read endpoints swagger files\n for key, endpoint in enumerate(self._endpoints):\n\n endpoint.custom['methods'] = {}\n endpoint.custom['params'] = {}\n\n for method, file in endpoint.methods.items():\n # add the custom part to the endpoint\n self._endpoints[key] = \\\n self.read_my_swagger(file, method, endpoint)\n\n ###################\n # Save query parameters globally\n self._customizer._query_params = self._qparams\n self._customizer._parameter_schemas = self._parameter_schemas\n output['paths'] = self._paths\n\n ###################\n tags = []\n for tag, desc in self._customizer._configurations['tags'].items():\n tags.append({'name': tag, 'description': desc})\n output['tags'] = tags\n\n self._customizer._original_paths = self._original_paths\n return output", "title": "" }, { "docid": "5e0ecf837a7a8c5fb3ccc815beeee2e1", "score": "0.5981558", "text": "def api_schema(request):\n generator = CorniceSwagger(get_services())\n # function docstrings are used to create the route's summary in Swagger-UI\n generator.summary_docstrings = True\n generator.default_security = get_security\n swagger_base_spec = {\n \"host\": get_magpie_url(request.registry),\n \"schemes\": [request.scheme]\n }\n swagger_base_spec.update(SecurityDefinitionsAPI)\n generator.swagger = swagger_base_spec\n json_api_spec = generator.generate(title=TitleAPI, version=__meta__.__version__, info=InfoAPI)\n return json_api_spec", "title": "" }, { "docid": "d5f2e1a445bb6cb533b83bdf72ae1963", "score": "0.58855915", "text": "def input_to_swagger(self):\n return {\"type\": \"object\", \"example\": {}}", "title": "" }, { "docid": "8827da09ff4f89d4f22cf216b7c3da73", "score": "0.5883961", "text": "async def openapi_schema(request: web.Request) -> web.Response:\n schema_format = request.match_info.get(\"schema_format\")\n schema = get_openapi_schema(request.config_dict)\n\n if schema_format == \"json\":\n return web.json_response(schema)\n\n if schema_format == \"yaml\":\n safe_dumper = getattr(yaml, \"CSafeDumper\", yaml.SafeDumper)\n return web.Response(\n text=yaml.dump(schema, Dumper=safe_dumper),\n content_type=\"application/yaml\",\n )\n\n raise ConfigurationError(\n f\"Schema format {schema_format} not supported at a moment.\"\n )", "title": "" }, { "docid": "06a543bc1959e6e413a56bfab9070bfe", "score": "0.58181477", "text": "def _to_swagger(base=None, description=None, resource=None, options=None):\n # type: (Dict[str, str], str, Resource, Dict[str, str]) -> Dict[str, str]\n definition = dict_filter(base or {}, options or {})\n\n if description:\n definition['description'] = description.format(\n name=getmeta(resource).name if resource else \"UNKNOWN\"\n )\n\n if resource:\n definition['schema'] = {\n '$ref': '#/definitions/{}'.format(getmeta(resource).resource_name)\n }\n\n return definition", "title": "" }, { "docid": "bdc39edfe0826acf2f0d1c535cc99754", "score": "0.5752418", "text": "def init_bundles(schema: \"BaseOpenAPISchema\") -> Dict[str, CaseInsensitiveDict]:\n output: Dict[str, CaseInsensitiveDict] = {}\n for endpoint in schema.get_all_endpoints():\n output.setdefault(endpoint.path, CaseInsensitiveDict())\n output[endpoint.path][endpoint.method.upper()] = Bundle(endpoint.verbose_name) # type: ignore\n return output", "title": "" }, { "docid": "cf5022e300f0eec55dfafcfc8f006237", "score": "0.5693794", "text": "def _custom_openapi_method(zelf: FastAPI) -> dict:\n if not zelf.openapi_schema:\n\n if zelf.redoc_url:\n desc = compose_long_description(zelf.description)\n else:\n desc = zelf.description\n openapi_schema = get_openapi(\n title=zelf.title,\n version=zelf.version,\n openapi_version=zelf.openapi_version,\n description=desc,\n routes=zelf.routes,\n tags=zelf.openapi_tags,\n servers=zelf.servers,\n )\n\n add_vendor_extensions(openapi_schema)\n patch_openapi_specs(openapi_schema)\n zelf.openapi_schema = openapi_schema\n\n return zelf.openapi_schema", "title": "" }, { "docid": "8805a9f0cb3b278e895371b08c714085", "score": "0.56355596", "text": "def introspect(self, request):\n # Generate a path based on the standard automate URLs above\n patterns = [path(request.path, include(self.urls()))]\n generator = SchemaGenerator(patterns=patterns)\n return Response(generator.get_schema())", "title": "" }, { "docid": "bd4295718cd2554dfb8240815cd1f1b9", "score": "0.55284894", "text": "def _generate_openapi_object(document: OpenApiDocument) -> OrderedDict:\n parsed_url = urlparse.urlparse(document.url)\n\n swagger = OrderedDict()\n\n swagger['swagger'] = '2.0'\n swagger['info'] = OrderedDict()\n swagger['info']['title'] = document.title\n swagger['info']['description'] = document.description\n swagger['info']['version'] = document.version\n\n if parsed_url.netloc:\n swagger['host'] = parsed_url.netloc\n if parsed_url.scheme:\n swagger['schemes'] = [parsed_url.scheme]\n\n swagger['paths'] = _get_paths_object(document)\n\n return swagger", "title": "" }, { "docid": "0cf7f89936610531cb39c98726fb9e33", "score": "0.5463968", "text": "def custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The VICC Gene Normalizer\",\n version=__version__,\n openapi_version=\"3.0.3\",\n description=\"Normalize gene terms.\",\n routes=app.routes\n )\n # openapi_schema['info']['license'] = { # TODO\n # \"name\": \"name of our license\",\n # \"url\": \"http://www.our-license-tbd.com\"\n # }\n openapi_schema['info']['contact'] = { # TODO\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\"\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "title": "" }, { "docid": "013af3547f1ae774991c58db2bd555a3", "score": "0.5462466", "text": "def generate_swagger_object(document):\n parsed_url = urlparse.urlparse(document.url)\n\n swagger = OrderedDict()\n\n swagger['swagger'] = '2.0'\n swagger['info'] = OrderedDict()\n swagger['info']['title'] = document.title\n swagger['info']['version'] = '' # Required by the spec\n\n if parsed_url.netloc:\n swagger['host'] = parsed_url.netloc\n if parsed_url.scheme:\n swagger['schemes'] = [parsed_url.scheme]\n\n swagger['paths'] = _get_paths_object(document)\n swagger['definitions'] = _get_definitions_object(document)\n\n return swagger", "title": "" }, { "docid": "4e8e14db9ea8cca34c9b2979ed91a76c", "score": "0.5427914", "text": "def update_schema(schema: dict):\n definitions = schema[\"definitions\"]\n for name, definition in definitions.items():\n version = None\n if \"x-kubernetes-group-version-kind\" in definition:\n k8s_versions = definition[\"x-kubernetes-group-version-kind\"]\n if len(k8s_versions) != 1:\n print(\"warning, not correctly handling\", name)\n\n k8s_version = k8s_versions[0]\n version = os.path.join(k8s_version[\"group\"], k8s_version[\"version\"])\n\n if \"properties\" in definition:\n # remove all fields named status\n # this might have some false positives, lets see what goes missing :)\n if \"status\" in definition[\"properties\"]:\n definition[\"properties\"].pop(\"status\")\n\n for prop_name, prop in definition[\"properties\"].items():\n # set the default of properties that are enums\n # with only one possible value to that value\n # This sets the `kind` property on all objects.\n if \"enum\" in prop and len(prop[\"enum\"]) == 1:\n prop.setdefault(\"default\", prop[\"enum\"][0])\n\n # set api version\n if prop_name == \"apiVersion\":\n if version:\n prop.setdefault(\"default\", version)\n\n # inline IntOrString, avoids creation of IntOrString class\n # this allows assigning int or string directly, instead of this\n # ref_int_or_string = \"#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString\"\n # TODO using \"oneOf\" in properties is not supported by datamode-code-generator\n # see https://github.com/koxudaxi/datamodel-code-generator/issues/139\n # if \"$ref\" in prop:\n # ref = prop[\"$ref\"]\n\n # if ref == ref_int_or_string:\n # prop.pop(\"$ref\")\n\n # prop.update(definitions[ref.split('/')[-1]])\n\n # print(prop)\n # \"$ref\":\n # \"oneOf\": [\n # {\n # \"type\": \"string\"\n # },\n # {\n # \"type\": \"integer\"\n # }\n # ]", "title": "" }, { "docid": "b2904b37c8aa0a312940aeea1b25bf9c", "score": "0.53712267", "text": "def serialize(self, model_object):\n context = dict()\n for name, definition in model_object.definitions(props=False, rels=True):\n context[name] = {\n '@id': definition.absolute_uri(),\n '@type': '@id',\n }\n\n for name, definition in model_object.definitions(props=True, rels=False):\n context[name] = {\n '@id': definition.absolute_uri(),\n }\n\n for name, value in model_object.values.items():\n if isinstance(value, Uri) or isinstance(value, Url):\n context[name] = {\n '@id': model_object.definition(name).absolute_uri(),\n '@type': '@id',\n }\n\n deflated = self.deflate(model_object, props=True, rels=True)\n deflated['@context'] = {k: v for k, v in context.items() if k in deflated}\n deflated['@context']['@base'] = Ori.uri\n deflated['@context'][model_object.verbose_name()] = model_object.absolute_uri()\n deflated['@type'] = model_object.verbose_name()\n\n ori_identifier = model_object.get_short_identifier()\n if ori_identifier:\n deflated['@id'] = ori_identifier\n\n return deflated", "title": "" }, { "docid": "04aea1c336857bbe5041af8ce79a8c9c", "score": "0.5365679", "text": "def doc_formatter(target_object):\n doc = target_object.__doc__\n\n # Handle non-pydantic objects\n if doc is None:\n new_doc = ''\n elif 'Parameters\\n' in doc or not (issubclass(target_object, BaseSettings) or issubclass(target_object, BaseModel)):\n new_doc = doc\n else:\n type_formatter = {'boolan': 'bool',\n 'string': 'str',\n 'integer': 'int'\n }\n # Add the white space\n if not doc.endswith('\\n\\n'):\n doc += \"\\n\\n\"\n new_doc = dedent(doc) + \"Parameters\\n----------\\n\"\n target_schema = target_object.schema()\n # Go through each property\n for prop_name, prop in target_schema['properties'].items():\n # Catch lookups for other Pydantic objects\n if '$ref' in prop:\n # Pre 0.28 lookup\n lookup = prop['$ref'].split('/')[-1]\n prop = target_schema['definitions'][lookup]\n elif 'allOf' in prop:\n # Post 0.28 lookup\n try:\n # Validation, we don't need output, just the object\n _JsonRefModel(**prop)\n lookup = prop['allOf'][0]['$ref'].split('/')[-1]\n prop = target_schema['definitions'][lookup]\n except ValidationError:\n # Doesn't conform, pass on\n pass\n # Get common properties\n prop_type = prop[\"type\"]\n new_doc += prop_name + \" : \"\n prop_desc = prop['description']\n\n # Check for enumeration\n if 'enum' in prop:\n new_doc += '{' + ', '.join(prop['enum']) + '}'\n\n # Set the name/type of object\n else:\n if prop_type == 'object':\n prop_field = prop['title']\n else:\n prop_field = prop_type\n new_doc += f'{type_formatter[prop_field] if prop_field in type_formatter else prop_field}'\n\n # Handle Classes so as not to re-copy pydantic descriptions\n if prop_type == 'object':\n if not ('required' in target_schema and prop_name in target_schema['required']):\n new_doc += \", Optional\"\n prop_desc = f\":class:`{prop['title']}`\"\n\n # Handle non-classes\n else:\n if 'default' in prop:\n default = prop['default']\n try:\n # Get the explicit default value for enum classes\n if issubclass(default, Enum):\n default = default.value\n except TypeError:\n pass\n new_doc += f\", Default: {default}\"\n elif not ('required' in target_schema and prop_name in target_schema['required']):\n new_doc += \", Optional\"\n\n # Finally, write the detailed doc string\n new_doc += \"\\n\" + indent(prop_desc, \" \") + \"\\n\"\n\n # Assign the new doc string\n target_object.__doc__ = new_doc", "title": "" }, { "docid": "832b92534b8a1ea3810d394d8341d5dc", "score": "0.53519154", "text": "def swagger(\n app,\n url_parser=flask_url_parser,\n rule_parser=flask_rule_parser,\n process_doc=_sanitize,\n prefix=None,\n from_file_keyword=None,\n template=None,\n):\n output = {\n \"swagger\": \"2.0\",\n \"info\": {\"version\": \"0.0.0\", \"title\": \"Cool product name\"},\n }\n\n paths = defaultdict(dict)\n definitions = defaultdict(dict)\n if template is not None:\n output.update(template)\n # check for template provided paths and definitions\n for k, v in output.get(\"paths\", {}).items():\n paths[k] = v\n for k, v in output.get(\"definitions\", {}).items():\n definitions[k] = v\n output[\"paths\"] = paths\n output[\"definitions\"] = definitions\n\n swagger_endpoints = url_parser(app, prefix)\n for rule, methods in swagger_endpoints.items():\n rule = rule_parser(rule)\n\n operations = dict()\n for verb, method in methods:\n summary, description, swag = _parse_docstring(\n method, process_doc, from_file_keyword\n )\n # Do we have docstring to parse?\n if swag is not None:\n defs = swag.get(\"definitions\", [])\n defs = _extract_definitions(defs)\n params = swag.get(\"parameters\", [])\n defs += _extract_definitions(params)\n responses = swag.get(\"responses\", {})\n responses = {str(key): value for key, value in responses.items()}\n if responses is not None:\n defs = defs + _extract_definitions(responses.values())\n for definition in defs:\n def_id = definition.pop(\"id\", None)\n if def_id is not None:\n definitions[def_id].update(definition)\n operation = dict(\n summary=summary, description=description, responses=responses\n )\n # parameters - swagger ui dislikes empty parameter lists\n if len(params) > 0:\n operation[\"parameters\"] = params\n # other optionals\n for key in OPTIONAL_FIELDS:\n if key in swag:\n operation[key] = swag.get(key)\n operations[verb] = operation\n\n if len(operations):\n paths[rule].update(operations)\n return output", "title": "" }, { "docid": "4d489759e5562c97e81a632793aa0fad", "score": "0.5314773", "text": "def do_schema(self):\n\n name = '%s.%s.%s' % (BACKEND_PACKAGE, 'rest', 'schema')\n module = self._meta.get_module_from_string(name)\n schema_class = getattr(module, 'RecoverSchema')\n\n self._schema_endpoint = EndpointElements(\n cls=schema_class,\n exists=True,\n custom={\n 'methods': {\n 'get': ExtraAttributes(auth=None),\n # WHY DOES POST REQUEST AUTHENTICATION\n # 'post': ExtraAttributes(auth=None)\n }\n },\n methods={}\n )\n\n # TODO: find a way to map authentication\n # as in the original endpoint for the schema 'get' method\n\n # TODO: find a way to publish on swagger the schema\n # if endpoint is enabled to publish and the developer asks for it", "title": "" }, { "docid": "75a94ca26ee4608ad96e95526900143b", "score": "0.5308563", "text": "def schema_factory(cls, obj, **kwargs):\n if isinstance(obj, str):\n openapi_type = cls._openapi_type(obj)\n return openapi.Schema(\n type=openapi_type,\n **kwargs\n )\n\n if isinstance(obj, list):\n if len(obj) != 1:\n raise Exception('List types must have exactly one element to specify the schema of `items`')\n return openapi.Schema(\n type=openapi.TYPE_ARRAY,\n items=cls.schema_factory(obj[0]),\n **kwargs\n )\n\n if isinstance(obj, dict):\n return openapi.Schema(\n type=openapi.TYPE_OBJECT,\n properties={\n k: cls.schema_factory(sub_obj)\n for k, sub_obj\n in obj.items()\n },\n **kwargs\n )\n\n raise Exception(f'Unhandled type \"{type(obj)}\" for {obj}')", "title": "" }, { "docid": "76c9a1ae1b13c714488cd03e9b6fd6f0", "score": "0.53071064", "text": "def to_api(_object):\n try:\n return _object.to_api()\n except AttributeError:\n return _object", "title": "" }, { "docid": "2d745dd83dc2c3e996b3f7afa5662e14", "score": "0.5284773", "text": "def format_schema(self, schema):\n if getattr(schema, 'properties', {}):\n schema.properties = OrderedDict(\n (self.format_string(key), self.format_schema(openapi.resolve_ref(val, self.components)) or val)\n for key, val in schema.properties.items()\n )\n\n if getattr(schema, 'required', []):\n schema.required = [self.format_string(p) for p in schema.required]", "title": "" }, { "docid": "0648a7176d6cdefd2c4c82e00853639c", "score": "0.5263264", "text": "def to_representation(self, obj):\n urls = list(obj.all())\n return [(url.url, url.description) for url in urls]", "title": "" }, { "docid": "a616f6bb8a7a482fbd6c43fa359003cb", "score": "0.52482826", "text": "def default(self, obj):\n @singledispatch\n def default(obj):\n super(GoodJSONEncoder, self).default(obj)\n\n @default.register(ObjectId)\n @default.register(UUID)\n def conv_objid(obj):\n return text_type(obj)\n\n @default.register(datetime)\n def conv_datetime(obj):\n if self.epoch_mode:\n return int(\n (timegm(obj.timetuple()) * 1000) +\n ((obj.microsecond) / 1000)\n )\n return obj.isoformat()\n\n @default.register(DBRef)\n def conv_dbref(obj):\n doc = obj.as_doc()\n ret = {\n \"collection\": doc[\"$ref\"],\n \"id\": self.default(doc[\"$id\"])\n }\n if obj.database:\n ret[\"db\"] = doc[\"$db\"]\n ret.update({\n key: value\n for (key, value) in doc.items()\n if key[0] != \"$\"\n })\n return ret\n\n @default.register(RE_TYPE)\n @default.register(Regex)\n def conv_regex(obj):\n flags_map = {\n \"i\": obj.flags & re.IGNORECASE,\n \"l\": obj.flags & re.LOCALE,\n \"m\": obj.flags & re.MULTILINE,\n \"s\": obj.flags & re.DOTALL,\n \"u\": obj.flags & re.UNICODE,\n \"x\": obj.flags & re.VERBOSE\n }\n flags = [key for (key, contains) in flags_map.items() if contains]\n ret = {\"regex\": obj.pattern}\n if flags:\n ret[\"flags\"] = (\"\").join(flags)\n return ret\n\n @default.register(MinKey)\n def conv_minkey(obj):\n return {\"minKey\": True}\n\n @default.register(MaxKey)\n def conv_maxkey(obj):\n return {\"maxKey\": True}\n\n @default.register(Timestamp)\n def conv_timestamp(obj):\n return {\"time\": obj.time, \"inc\": obj.inc}\n\n @default.register(Code)\n def conv_code(obj):\n return {\"code\": str(obj), \"scope\": obj.scope}\n\n @default.register(Binary)\n def conv_bin(obj):\n return {\n \"data\": b64encode(obj).decode(\"utf-8\"),\n \"type\": obj.subtype\n }\n\n if PY3:\n @default.register(bytes)\n def conv_bytes(obj):\n return {\"data\": b64encode(obj).decode(\"utf-8\"), \"type\": 0}\n\n return default(obj)", "title": "" }, { "docid": "d87da8dd68665de8db8341c3c2a7a877", "score": "0.52324057", "text": "def _pack_document(self, objects, type_):\n\n def pack_object(obj):\n packed_obj = {'type': type_}\n attributes = {}\n relationships = {}\n for key, value in obj.items():\n if isinstance(value, JsonApiRelationship):\n relationships[key] = value.as_linkage()\n elif key == 'id':\n packed_obj['id'] = value\n else:\n attributes[key] = value\n\n if attributes:\n packed_obj['attributes'] = attributes\n\n if relationships:\n packed_obj['relationships'] = relationships\n\n return packed_obj\n\n try:\n if isinstance(objects, list):\n packed_objects = [pack_object(obj) for obj in objects]\n else:\n packed_objects = pack_object(objects)\n except KeyError:\n raise JsonApiException('Invalid resource info format', True)\n\n return {'data': packed_objects}", "title": "" }, { "docid": "ef7f41b16d4852d42671ea3a74628ba4", "score": "0.51945394", "text": "def to_swagger(self, bound_resource=None):\n return _to_swagger(\n {\n 'name': self.name,\n 'in': self.in_.value,\n 'type': str(self.type) if self.type else None,\n },\n description=self.description,\n resource=bound_resource if self.resource is DefaultResource else self.resource,\n options=self.options\n )", "title": "" }, { "docid": "82da09e9d9574a4b8a097e542a76517b", "score": "0.5189164", "text": "def _gen_endpoint(self, name, obj, description, version=1, schema=None):\n # check for invalid PO names\n _check_endpoint_name(name)\n\n if description is None:\n description = obj.__doc__.strip() or \"\" if isinstance(obj.__doc__, str) else \"\"\n\n endpoint_object = CustomQueryObject(query=obj, description=description,)\n\n return {\n \"name\": name,\n \"version\": version,\n \"description\": description,\n \"type\": \"model\",\n \"endpoint_obj\": endpoint_object,\n \"dependencies\": endpoint_object.get_dependencies(),\n \"methods\": endpoint_object.get_methods(),\n \"required_files\": [],\n \"required_packages\": [],\n \"schema\": copy.copy(schema),\n }", "title": "" }, { "docid": "d005473445a67bb56c71a08378ec1550", "score": "0.51735437", "text": "def printSchemes():\n\tschemes=getSchemeList()\n\tfor s in schemes:\n\t\tprint \"scheme_name: {} scheme_options: {}\".format(s.name,s.options)", "title": "" }, { "docid": "7af45c8b9a3def744328cd800ddf2b4d", "score": "0.5167875", "text": "def map_serializer(self, auto_schema, direction):\n if isinstance(self.target.serializers, dict):\n sub_components = self._get_explicit_sub_components(auto_schema, direction)\n else:\n sub_components = self._get_implicit_sub_components(auto_schema, direction)\n\n return {\n 'oneOf': [ref for _, ref in sub_components],\n 'discriminator': {\n 'propertyName': self.target.resource_type_field_name,\n 'mapping': {resource_type: ref['$ref'] for resource_type, ref in sub_components}\n }\n }", "title": "" }, { "docid": "7c6f0ff8f69374311bc7d43f46e2c681", "score": "0.5163314", "text": "def __call__(self, title, version, base_path='/', info={}, swagger={},\n schema_transformers=[], **kwargs):\n\n info.update(title=title, version=version)\n swagger.update(swagger='2.0', info=info, basePath=base_path)\n self.schema_transformers.extend(schema_transformers)\n\n paths, tags = self._build_paths(**kwargs)\n if paths:\n swagger['paths'] = paths\n if tags:\n swagger['tags'] = tags\n\n definitions = self.definitions.definition_registry\n if definitions:\n swagger['definitions'] = definitions\n\n parameters = self.parameters.parameter_registry\n if parameters:\n swagger['parameters'] = parameters\n\n responses = self.responses.response_registry\n if responses:\n swagger['responses'] = responses\n\n return swagger", "title": "" }, { "docid": "aaddd81de9af50562353b6642d649755", "score": "0.51470786", "text": "def loads(self, obj):\n\n buf = io.BytesIO(obj)\n schema_dict = schemaless_reader(buf, self.schema_dict)\n\n if self.decoding_method:\n return [self.decoding_method(schema_dict)]\n else:\n return [schema_dict]", "title": "" }, { "docid": "6414d3051920913323ee10bd60e26f79", "score": "0.5139515", "text": "def schema(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n schema = super().schema(*args, **kwargs)\n cls.check_provider_added(schema)\n schema = cls.change_format_to_oneOf(schema)\n schema = cls.resolve_refs(schema)\n schema = cls.remove_enum_allOf(schema)\n return schema", "title": "" }, { "docid": "31ce216f4a2547b398775bf6de953ba6", "score": "0.5136734", "text": "def serialize(self, document, base_url: str, path, schema) -> str:\n home = {\n \"api\": {\n \"title\": schema[\"info\"][\"title\"],\n \"links\": {\n \"author\": schema[\"info\"][\"contact\"][\"name\"],\n \"describedBy\": \"\",\n },\n },\n \"resources\": {},\n }\n for path, path_desc in schema[\"paths\"].items():\n home[\"resources\"][path.strip(\"/\")] = {\n \"href\": as_absolute(base_url, path),\n \"hints\": {\"allow\": [key.upper() for key in path_desc.keys()]},\n }\n return json.dumps(home, indent=4)", "title": "" }, { "docid": "82e6aac41f7b96a82898225d38e99e8c", "score": "0.5128058", "text": "def _make_doc_response_schema(schema):\n return schema", "title": "" }, { "docid": "9f25adb59c8990d11c8f5e65a857e3db", "score": "0.50924915", "text": "def _ref_to_spec(self, data):\n if isinstance(data, list):\n for i_v in data:\n self._ref_to_spec(i_v)\n if isinstance(data, dict):\n for k, v in data.items():\n if isinstance(v, list):\n for i_v in v:\n self._ref_to_spec(i_v)\n if isinstance(v, dict):\n self._ref_to_spec(v)\n elif k == \"$ref\":\n name_schema = v.split(\"/\")[-1]\n schema = class_registry.get_class(name_schema)\n name_schema = create_schema_name(schema=schema)\n if name_schema not in self.spec.components.schemas:\n self.spec.components.schema(name_schema, schema=schema)\n data[k] = \"/\".join(v.split(\"/\")[:-1] + [name_schema])", "title": "" }, { "docid": "c7d0088711d9d89a899e12fc1b127c52", "score": "0.5089259", "text": "def openapi():\n app = create_base_app()\n typer.secho(json.dumps(app.openapi(), indent=2))", "title": "" }, { "docid": "6114fdd91b877d847170fdbddcae62c3", "score": "0.50879925", "text": "def _schema_to_json(obj):\n if isinstance(obj, validater.Schema):\n return obj.data\n elif isinstance(obj, (types.FunctionType, types.LambdaType)):\n return obj.__name__\n else:\n return six.text_type(obj)", "title": "" }, { "docid": "8afba4539155036b56ab1216f5f4d918", "score": "0.50680184", "text": "def make_all_rules(\n schema: \"BaseOpenAPISchema\", bundles: Dict[str, CaseInsensitiveDict], connections: EndpointConnections\n) -> Dict[str, Rule]:\n return {\n f\"rule {endpoint.verbose_name}\": make_rule(\n endpoint, bundles[endpoint.path][endpoint.method.upper()], connections\n )\n for endpoint in schema.get_all_endpoints()\n }", "title": "" }, { "docid": "e7a85797df3ffb3f5983dabc22c997b1", "score": "0.50550336", "text": "def convert(self, views, form_type='add'):\n\n if not isinstance(views, list) and (isinstance(views, Form)\n or issubclass(views, Form)):\n return super().convert(views)\n try:\n iter(views)\n except TypeError:\n views = [views]\n schema = OrderedDict([\n ('type', 'object'),\n ('definitions', OrderedDict([])),\n ('properties', OrderedDict([]))\n ])\n for view in views:\n name, view_schema = self.convert_view(view, form_type)\n for k, v in view_schema['definitions'].items():\n schema['definitions'][k] = v\n for k, v in view_schema['properties'].items():\n schema['properties'][k] = v\n return schema", "title": "" }, { "docid": "0d4d615b56a64dddae0fd9c5476f3413", "score": "0.50506866", "text": "def custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"Custom title\",\n version=\"7.7.3\",\n description=\"This is a very custom OpenAPI schema\",\n routes=app.routes,\n )\n # we can add the ReDoc extension, adding a custom `x-logo` to the `info` \"object\" in the OpenAPI schema\n openapi_schema[\"info\"][\"x-logo\"] = {\n \"url\": \"https://fastapi.tiangolo.com/img/logo-margin/logo-teal.png\"\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "title": "" }, { "docid": "55b9f21e2af6cacb3b56cce010cf555a", "score": "0.5044342", "text": "def NormalizeWithContext(self, object_type, context):\n\n relative_path = self._api_context[object_type].format(**context)\n\n return '{0}/{1}'.format(self._api_host.rstrip('/'), relative_path)", "title": "" }, { "docid": "7539fccac80878e2bdcdc371821c9068", "score": "0.50344247", "text": "def get_schemes(self):\n pass", "title": "" }, { "docid": "f725fb99c0d1e690d80235c6063b37b6", "score": "0.50310403", "text": "def proc_object(self, obj: JS_object) -> None:\n for k in obj._as_dict.keys():\n if k in self.jsonld:\n logging.warning(\"Redefinition of element: %s\" % k)\n elif k == '$ref':\n self.ref_type(obj[k])\n else:\n self.jsonld[k] = self.proc_schema(obj[k], JSON_ld())\n self.jsonld[k]['@id'] = self.namespace + ':' + k", "title": "" }, { "docid": "07425ace3f4edd158338262b529b4a24", "score": "0.5022986", "text": "def generate(json_obj) -> dict:\n builder = SchemaBuilder(\n schema_uri='http://json-schema.org/draft-07/schema#')\n builder.add_object(json_obj)\n return builder.to_schema()", "title": "" }, { "docid": "b21c7b1a8683d87b793aebda16011fb2", "score": "0.5010723", "text": "def openapi(self):\n if not self.__openapi_schema:\n self.__openapi_schema = symmetric.openapi.utils.get_openapi(\n self,\n symmetric.helpers.humanize(\n symmetric.helpers.get_module_name(self)\n ) + \" API\"\n )\n return self.__openapi_schema", "title": "" }, { "docid": "2444b2e2514c9a60fb1f07efe13b6081", "score": "0.5010179", "text": "def output(self, api_object):\n self.api_object = api_object\n self.change_format()", "title": "" }, { "docid": "b8e85369466fc95f2ee87c96d129bbc2", "score": "0.5005478", "text": "def add_swagger_ui(self):", "title": "" }, { "docid": "a00bb06eb954c9fc50c1b95366fa11a5", "score": "0.49832925", "text": "def schema_to_parameters(schema, service=None):\n ret = []\n models = {}\n\n for location in (\"path\", \"header\", \"querystring\", \"body\"):\n if not cornice_swagger.util.is_object(schema):\n schema = schema()\n location_schema = schema.get(location)\n swag = cornice_swagger.swagger_model.SwaggerModel()\n if not location_schema:\n continue\n\n entry = swag.to_swagger(location_schema)\n\n if swag.models:\n _in = location\n name = location_schema.name\n parameter = dict()\n if location == \"body\":\n parameter[\"in\"] = _in\n parameter[\"name\"] = name\n parameter[\"description\"] = location_schema.description\n parameter[\"schema\"] = entry\n else:\n if location == \"querystring\":\n _in = \"query\"\n title = (location_schema.title or\n location_schema.__class__.__name__)\n for k, v in swag.models[title][\"properties\"].items():\n parameter = dict()\n parameter[\"in\"] = _in\n parameter[\"name\"] = v[\"name\"]\n parameter[\"required\"] = v[\"required\"]\n parameter[\"type\"] = v[\"type\"]\n if \"description\" in v:\n parameter[\"description\"] = v[\"description\"]\n ret.append(parameter)\n swag.models = {}\n\n ret.append(parameter)\n models.update(swag.models)\n return ret, models", "title": "" }, { "docid": "a45704d2f841c3abc980ad648d128403", "score": "0.49655193", "text": "def schemas(api):\n explain = SRUCall.explain(api)\n for s in explain[\"schemas\"]:\n print(s[0], \"-\", s[1])", "title": "" }, { "docid": "bfb61eec2b760d6596c4ae93ff824200", "score": "0.49582177", "text": "def cli(yamlfile, format, output):\n print(OwlSchemaGenerator(yamlfile, format).serialize(output=output))", "title": "" }, { "docid": "3811ad66a013275ad18d74f725697925", "score": "0.49466777", "text": "def __definitions_descriptor(self):\n # Filter out any keys that aren't 'properties' or 'type'\n result = {}\n for def_key, def_value in self.__parser.schemas().iteritems():\n if 'properties' in def_value or 'type' in def_value:\n key_result = {}\n required_keys = set()\n if 'type' in def_value:\n key_result['type'] = def_value['type']\n if 'properties' in def_value:\n for prop_key, prop_value in def_value['properties'].items():\n if isinstance(prop_value, dict) and 'required' in prop_value:\n required_keys.add(prop_key)\n del prop_value['required']\n key_result['properties'] = def_value['properties']\n # Add in the required fields, if any\n if required_keys:\n key_result['required'] = sorted(required_keys)\n result[def_key] = key_result\n\n # Add 'type': 'object' to all object properties\n # Also, recursively add relative path to all $ref values\n for def_value in result.itervalues():\n for prop_value in def_value.itervalues():\n if isinstance(prop_value, dict):\n if '$ref' in prop_value:\n prop_value['type'] = 'object'\n self._add_def_paths(prop_value)\n\n return result", "title": "" }, { "docid": "b9897fb50d7cb36f8467b761e71d352e", "score": "0.49388573", "text": "def get_blueprint():\n blueprint = flask.Blueprint('api', __name__)\n for cls in Resource.plugins:\n resource = cls()\n blueprint.add_url_rule(resource.get_endpoint(),\n endpoint=cls.__name__.lower(),\n view_func=resource.handle,\n methods=resource.methods)\n return blueprint", "title": "" }, { "docid": "42a5f5be6d5d170e87b2e548eccf3119", "score": "0.49321088", "text": "def test_app_generates_correct_api_openapi_schema():\n schema = create_app(ephemeral=True).openapi()\n\n assert len(schema[\"paths\"].keys()) > 1\n assert all([p.startswith(\"/api/\") for p in schema[\"paths\"].keys()])", "title": "" }, { "docid": "c344da9c670dd858fe00b8933c44047e", "score": "0.49301517", "text": "def show_schemas():\n schema_names = spec_loader.get_schema_names()\n resp = {'names': schema_names}\n if flask.request.args.get('show_source'):\n resp['content'] = spec_loader.get_schema_dicts(schema_names)\n return flask.jsonify(resp)", "title": "" }, { "docid": "a62981222d83949a14355f3b256042fb", "score": "0.49277082", "text": "def process_objects(self, app, objects):\n api: Api = app.extensions['api']\n\n for resource_cls in objects.values():\n if isinstance(resource_cls.Meta.model, str):\n resource_cls.Meta.model = \\\n self.unchained.sqlalchemy_bundle.models[resource_cls.Meta.model]\n model_name = resource_cls.Meta.model.__name__\n\n self.attach_serializers_to_resource_cls(model_name, resource_cls)\n self.bundle.resources_by_model[model_name] = resource_cls\n api.register_model_resource(resource_cls)", "title": "" }, { "docid": "17aae759f81178096cf92d59a8b6f771", "score": "0.49215567", "text": "def print_openapi_spec() -> None:\n for router in ROUTERS:\n if not router.include:\n router.add_to_application(app)\n print(yaml.dump(app.openapi()))", "title": "" }, { "docid": "4523ca51284552ccd38bba9b3ab8ff96", "score": "0.49213943", "text": "def get_schema(self, request=None, public=False):\n paths = self.get_paths()\n if not paths:\n return None\n\n schema = {\n \"openapi\": \"3.0.2\",\n \"info\": self.get_info(),\n \"paths\": paths,\n }\n\n return schema", "title": "" }, { "docid": "29fb39412b3c79d979b5a735fabf0b4d", "score": "0.49213386", "text": "def find_schema(obj):\n for k, v in obj.items():\n if isinstance(v, dict):\n if '$schema' in v:\n obj[k] = default_values(v)\n obj[k]['$schema'] = v['$schema']\n else:\n obj[k] = find_schema(v)\n else:\n raise Exception\n return obj", "title": "" }, { "docid": "fc1e0ab9f1624648d75204d79301bc49", "score": "0.49192804", "text": "def __schema__(self):\n if not self._schema:\n try:\n self._schema = CustomSwagger(self).as_dict(self._custom_definition)\n except Exception:\n # Log the source exception for debugging purpose\n # and return an error message\n msg = 'Unable to render schema'\n logger.exception(msg) # This will provide a full traceback\n return {'error': msg}\n return self._schema", "title": "" }, { "docid": "0b58d42955296284fb714cc5980e0288", "score": "0.49052584", "text": "def convert_to_openapispec():\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--basedir', required=True,\n help='basedir of family')\n parser.add_argument('-l', '--lones', required=True,\n help='comma separated resource names')\n parser.add_argument('--outfmt', required=False,\n default='json',\n help='yaml or json')\n parser.add_argument('--outdir', required=False,\n help='output directory')\n parser.add_argument('--infile', required=False,\n help='full path of schema file')\n parser.add_argument('-m', '--module', required=False,\n help='Module used for creating spec')\n args = parser.parse_args()\n log_format = '[%(filename)s:%(lineno)d]' \\\n '[%(levelname)s]: %(message)s'\n logging.basicConfig(format=log_format,\n level=logging.INFO)\n openapiconverter.main(args)", "title": "" }, { "docid": "1f22cd96ae0a418b34fc6c894e8f713e", "score": "0.49019504", "text": "def _schema(cls, extractor):\n # First get the short description by looking for the first complete sentence.\n description = []\n for line in inspect.getdoc(cls).splitlines():\n # Stop when we hit an empty line or see variable statements.\n if not line or line.startswith(\":\"):\n break\n description.append(line.strip())\n description = \" \".join(description)\n\n schema = {\n \"title\": cls.__name__.strip(\"_\").rstrip(\"2\"),\n \"description\": description,\n \"type\": \"object\",\n \"properties\": {\n # Include the \"type\" property that gets added dynamically during serialization.\n \"type\": {\n \"const\": cls._type(),\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"type\"],\n }\n for field in cls.fields():\n is_required = field.default is not None\n\n # Allow customization within attr metadata field for corner cases.\n if \"jsonschema\" in field.metadata:\n sub_schema = field.metadata[\"jsonschema\"]\n else:\n sub_schema = extractor.extract(field.type)\n\n # Anything not required is nullable.\n if not is_required:\n if list(sub_schema.keys()) == [\"type\"]:\n if isinstance(sub_schema[\"type\"], list):\n if \"null\" not in sub_schema[\"type\"]:\n sub_schema[\"type\"].append(\"null\")\n elif sub_schema[\"type\"] != \"null\":\n sub_schema[\"type\"] = [sub_schema[\"type\"], \"null\"]\n elif list(sub_schema.keys()) == [\"anyOf\"]:\n if {\"type\": \"null\"} not in sub_schema[\"anyOf\"]:\n sub_schema[\"anyOf\"].append({\"type\": \"null\"})\n else:\n sub_schema = {\"anyOf\": [sub_schema, {\"type\": \"null\"}]}\n\n schema[\"properties\"][field.name] = sub_schema\n\n if is_required:\n schema[\"required\"].append(field.name)\n\n return schema", "title": "" }, { "docid": "ff83c23220d38da368354706d3221fed", "score": "0.48902914", "text": "def _generate_base_schema(self, top_object) -> list:\r\n if isinstance(top_object, Table):\r\n return self._generate_base_table(top_object)\r\n elif isinstance(top_object, View):\r\n return self._generate_base_view(top_object)\r\n elif isinstance(top_object, Sequence):\r\n return self._generate_base_sequence(top_object)\r\n elif isinstance(top_object, Procedure):\r\n return self._generate_base_procedure(top_object)\r\n else:\r\n raise Exception(\"unknown schema \" + str(top_object))", "title": "" }, { "docid": "d1628dccf4f309511c7ef150fb8c7724", "score": "0.4862412", "text": "def render_schema(self, do_print=True):\n rendered_schema = json.dumps(self.schema())\n if do_print:\n print(rendered_schema)\n else:\n return rendered_schema", "title": "" }, { "docid": "a291d385f6b3a3e3483f4608a4d48eeb", "score": "0.48615018", "text": "def build_active_schema_for_object(cls, obj):\n schema = cls()\n schema.add_object(obj)\n return schema", "title": "" }, { "docid": "11da397b649914b1b262652c0e30f9e3", "score": "0.4861495", "text": "def dump_oggbundle_schemas():\n transaction.doom()\n writer = OGGBundleJSONSchemaDumpWriter()\n result = writer.dump()\n return result", "title": "" }, { "docid": "563222992dbedbe3fee604e9fae573f4", "score": "0.48339707", "text": "def generate_schema():\n\n _result = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"The JSON Schema for QAL resources\",\n \"title\": \"QAL Resources\",\n \"type\": \"object\",\n \"version\": __version__,\n \"properties\": {\n \"resources\":\n {\n \"type\": \"array\",\n \"items\": {\n \"$ref\": \"#/definitions/Resource\"\n }\n }\n },\n \"namespace\": \"qal\",\n \"required\": [\"resources\"],\n \"definitions\": {\n\n }\n }\n\n def _property_to_type(_property_name):\n if _property_name == \"uuid\":\n return [{\n \"type\": \"string\",\n \"pattern\": \"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$\"\n }]\n else:\n return [{\n \"type\": \"string\"\n }]\n # First, Add parameter types\n for _curr_class in list_prefixed_classes(globals(), \"resource\", _exclude=[\"Resources\"]):\n _result[\"definitions\"].update({_curr_class: {\n \"type\": \"object\",\n \"properties\": json_add_child_properties(globals(), _curr_class, _property_to_type),\n \"additionalProperties\": True\n }\n })\n\n return _result", "title": "" }, { "docid": "2ba25031c7fb28ca493054dcb5450e4e", "score": "0.4827812", "text": "def default(self, obj):\n if hasattr(obj, 'to_dict'):\n result = obj.to_dict()\n result['_type'] = f'{obj.__module__}.{obj.__class__.__name__}'\n result['_version'] = 0\n return result\n return super().default(obj)", "title": "" }, { "docid": "0f9e7b9760a542d4765efe872a619787", "score": "0.4821796", "text": "def deflate(self, model_object, props, rels):\n namespace_manager = NamespaceManager(self.g)\n namespace_manager.bind(\n Ori.prefix,\n Namespace(Ori.uri),\n override=False\n )\n namespace_manager.bind(\n model_object.prefix,\n Namespace(model_object.uri),\n override=False\n )\n\n s = URIRef('{}{}'.format(Ori.uri, model_object.get_ori_identifier()))\n p = URIRef('{}type'.format(Rdf.uri))\n o = URIRef(self.uri_format(model_object))\n self.g.add((s, p, o,))\n\n for name, definition in model_object.definitions(props=props, rels=rels):\n try:\n value = model_object.values.get(name, None)\n if value:\n p = URIRef(self.uri_format(definition))\n try:\n o = self.serialize_prop(definition, value)\n except (IgnoredProperty, MissingProperty):\n raise\n\n namespace_manager.bind(\n definition.ns.prefix,\n Namespace(definition.ns.uri),\n override=False\n )\n if type(o) != list:\n self.g.add((s, p, o,))\n else:\n for oo in o:\n self.g.add((s, p, oo,))\n elif definition.required and not model_object.skip_validation:\n raise RequiredProperty(\"Property '{}' is required for {}\".format(\n name, model_object.compact_uri())\n )\n except IgnoredProperty:\n continue", "title": "" }, { "docid": "fcbd38043642b47a52300ae2dcbc59be", "score": "0.48147994", "text": "def __schema_descriptor(self, services):\n methods_desc = {}\n\n for service in services:\n protorpc_methods = service.all_remote_methods()\n for protorpc_method_name in protorpc_methods.iterkeys():\n rosy_method = '%s.%s' % (service.__name__, protorpc_method_name)\n method_id = self.__id_from_name[rosy_method]\n\n request_response = {}\n\n request_schema_id = self.__request_schema.get(method_id)\n if request_schema_id:\n request_response['request'] = {\n '$ref': request_schema_id\n }\n\n response_schema_id = self.__response_schema.get(method_id)\n if response_schema_id:\n request_response['response'] = {\n '$ref': response_schema_id\n }\n\n methods_desc[rosy_method] = request_response\n\n descriptor = {\n 'methods': methods_desc,\n 'schemas': self.__parser.schemas(),\n }\n\n return descriptor", "title": "" }, { "docid": "884fa7b42518668a700a63616b1f8474", "score": "0.48137224", "text": "def genSwagger(filename, api_object):\n global swagger_template, template_components\n \n try:\n template = copy.deepcopy(swagger_template)\n\n template['info']['title'] = api_object['name']\n template['info']['version'] = str(api_object['version'])\n template['info']['description'] = api_object['description']\n\n for resource in api_object['resources']:\n resource_template = template_components.get(resource['method'].upper())\n \n path = \"\"\n if resource['path'][0] != \"/\":\n path = '/' + resource['path']\n else:\n path = resource['path']\n \n if resource['method'].upper() == \"DELETE\":\n path += r\"/{itemId}\"\n elif resource['method'].upper() == \"PUT\":\n path += r\"/{itemId}\"\n \n template['paths'][path] = resource_template\n \n with open(abs_path + '/../../../../data/swagger/{}.json'.format(filename), 'w') as f:\n json.dump(template, f)\n\n return True, json.dumps(template)\n \n except Exception as err:\n logger.exception(\"Swagger creation failed for API: {}. Error: {}\".format(filename, err))\n return False, None", "title": "" }, { "docid": "90e5df0fa6253987366b40779bf10c88", "score": "0.4809211", "text": "def get_schema(self, parser_context: Mapping[str, Any]) -> Type[ResourceObject]:\n return parser_context[\"view\"].get_serializer().schema", "title": "" }, { "docid": "7512fd4446994c9622bae085ed6607e2", "score": "0.4803665", "text": "def setup_swagger(\n routes: typing.List[tornado.web.URLSpec],\n *,\n swagger_url: str = \"/api/doc\",\n api_base_url: str = \"/\",\n description: str = \"Swagger API definition\",\n api_version: str = \"1.0.0\",\n title: str = \"Swagger API\",\n contact: str = \"\",\n schemes: list = None,\n security_definitions: dict = None,\n security: list = None,\n display_models: bool = True,\n api_definition_version: str = API_SWAGGER_2\n):\n swagger_schema = generate_doc_from_endpoints(\n routes,\n api_base_url=api_base_url,\n description=description,\n api_version=api_version,\n title=title,\n contact=contact,\n schemes=schemes,\n security_definitions=security_definitions,\n security=security,\n api_definition_version=api_definition_version,\n )\n\n _swagger_ui_url = \"/{}\".format(swagger_url) if not swagger_url.startswith(\"/\") else swagger_url\n _base_swagger_ui_url = _swagger_ui_url.rstrip(\"/\")\n _swagger_spec_url = \"{}/swagger.json\".format(_swagger_ui_url)\n\n routes[:0] = [\n tornado.web.url(_swagger_ui_url, SwaggerUiHandler),\n tornado.web.url(\"{}/\".format(_base_swagger_ui_url), SwaggerUiHandler),\n tornado.web.url(_swagger_spec_url, SwaggerSpecHandler),\n ]\n\n SwaggerSpecHandler.SWAGGER_SPEC = swagger_schema\n\n with open(os.path.join(STATIC_PATH, \"ui.html\"), \"r\", encoding=\"utf-8\") as f:\n SwaggerUiHandler.SWAGGER_HOME_TEMPLATE = (\n f.read().replace(\"{{ SWAGGER_URL }}\", _swagger_spec_url).replace(\"{{ DISPLAY_MODELS }}\", str(-1 if not display_models else 1))\n )", "title": "" }, { "docid": "feb15f70be4631242c8fbd78b44a2597", "score": "0.47897843", "text": "def create_swagger_endpoint(api):\n\n class SwaggerEndpoint(Resource):\n def get(self):\n swagger_object = {}\n # filter keys with empty values\n for k, v in api._swagger_object.iteritems():\n if v or k == 'paths':\n if k == 'paths':\n paths = {}\n for endpoint, view in v.iteritems():\n views = {}\n for method, docs in view.iteritems():\n # check permissions. If a user has not access to an api, do not show the docs of it\n if auth(request.args.get('api_key'), endpoint, method):\n views[method] = docs\n if views:\n paths[endpoint] = views\n swagger_object['paths'] = paths\n else:\n swagger_object[k] = v\n return swagger_object\n\n return SwaggerEndpoint", "title": "" }, { "docid": "cfd3ef7f98d5c7cdf95ebd528c69d788", "score": "0.47889823", "text": "def serialise(self):\n schema = dict(\n format_version=self.SCHEMA_FORMAT_VERSION,\n fieldtypes=self.fieldtypes,\n routes=self.routes,\n guessers=[g.serialise() for g in self.guessers],\n next_slot = self.next_slot,\n )\n return json.dumps(schema, sort_keys=True)", "title": "" }, { "docid": "09fb5c6414c7c0b4b6057f5fdeefa8b1", "score": "0.47878876", "text": "def register_routes_from_swagger(config):\n settings = config.get_settings()\n # Append trailing slash if not present\n if settings['pyramid_swagger.schema_directory'][-1] != '/':\n settings['pyramid_swagger.schema_directory'] += '/'\n swagger_spec_path = \"{0}{1}\".format(\n settings['pyramid_swagger.schema_directory'],\n settings['pyramid_swagger.schema_file']\n )\n with open(swagger_spec_path) as phile:\n yaml_dict = yaml.load(phile)\n base_path = '' if yaml_dict['basePath'] == '/' else yaml_dict['basePath']\n for k, v in yaml_dict['paths'].items():\n pattern = \"{base}{path}\".format(base=base_path, path=k)\n for verb, data in v.items():\n route_name = data['operationId']\n LOGGER.info(\n 'Adding Swagger route name=%s, method=%s, pattern=%s',\n route_name,\n verb.upper(),\n pattern\n )\n config.add_route(name=route_name, pattern=pattern, request_method=verb.upper())", "title": "" }, { "docid": "e1e48283b7c5c373d67e16b6d4fd23e1", "score": "0.47845888", "text": "def add_openapi_json(self):", "title": "" }, { "docid": "80bbd1886c6be8026f4be2068c7e92ae", "score": "0.47805062", "text": "def scout_core_specification_swagger(*args, **kwargs):\n swag = swagger(current_app)\n swag['info']['title'] = 'Wild Me - Scout MWS Project, Phase 1'\n swag['info'][\n 'description'\n ] = 'Documentation for all REST API endpoints provided by Wild Me for the Scout collaboration'\n swag['info']['version'] = 'v0.1'\n swag['info']['contact'] = {\n 'name': 'Wild Me',\n 'url': 'http://wildme.org',\n 'email': 'dev@wildme.org',\n }\n swag['info']['license'] = {\n 'name': 'Apache 2.0',\n 'url': 'http://www.apache.org/licenses/LICENSE-2.0.html',\n }\n swag['host'] = 'kaiju.dyn.wildme.io:5000'\n swag['schemes'] = [\n 'http',\n ]\n\n response = jsonify(swag)\n return response", "title": "" }, { "docid": "7853c7b8dfa6f8a03719275eb72ab4e7", "score": "0.47723496", "text": "def build_all_bundle_schemas():\n for portal_type, short_name in GEVER_TYPES_TO_OGGBUNDLE_TYPES.items():\n builder = OGGBundleJSONSchemaBuilder(portal_type)\n schema = builder.build_schema()\n filename = get_bundle_schema_filename(short_name)\n yield filename, schema\n\n for portal_type, short_name in GEVER_SQL_TYPES_TO_OGGBUNDLE_TYPES.items():\n builder = OGGBundleJSONSchemaSQLBuilder(portal_type)\n schema = builder.build_schema()\n filename = get_bundle_schema_filename(short_name)\n yield filename, schema", "title": "" }, { "docid": "685e42b128e02a05f13d959f90e3293e", "score": "0.47674236", "text": "def _gen_docs(self):\n # The static content defined here mimics and can easily be converted\n # to a swagger file documentation. Possibly converting the Rbkcli to\n # a API server.\n doc = {\n 'definitions': {},\n 'paths': {}\n }\n\n cmdlets_files = self._get_cmdlets_files()\n loaded_cmdlets = self._load_cmdlets_files(cmdlets_files)\n usable_cmdlets = self._get_usable_cmdlets(loaded_cmdlets)\n\n for line in usable_cmdlets:\n if line['name'] != '':\n cmd_name = line['name'].replace(' ', '/')\n cmd_name = '/' + cmd_name\n parameters_found = line['param'].split(',')\n doc['paths'][cmd_name] = {\n 'get': {\n 'description': line['cmdlet_description'],\n 'operationId': '',\n 'operation': line['command'],\n 'parameters': [],\n 'responses': {\n '200': {\n 'description': line['response_description'],\n 'schema': {},\n 'table_order': line['table_order'],\n 'multiple_output': line['multiple_output']\n }\n },\n 'summary': line['cmdlet_summary'],\n 'tags': '',\n 'x-group': ''\n }\n }\n for param in parameters_found:\n doc['paths'][cmd_name]['get']['parameters'].append({\n \"in\": \"body\",\n \"name\": line['param'],\n \"required\": True,\n \"type\": \"string\"\n })\n\n self.endpoints = doc", "title": "" }, { "docid": "944e9b0ece221460dc960e00fbfa1ce1", "score": "0.47620457", "text": "def _BuildSchemaDefinitions(self):\n schemas = self.values.get('schemas')\n if schemas:\n for name, def_dict in schemas.iteritems():\n # Upgrade the string format schema to a dict.\n if isinstance(def_dict, unicode):\n def_dict = simplejson.loads(def_dict)\n self._schemas[name] = self.DataTypeFromJson(def_dict, name)", "title": "" }, { "docid": "f02f5da370a32438cecb7543028072a3", "score": "0.47574404", "text": "def _rewrite_refs(self, schema):\n if isinstance(schema, list):\n for value in schema:\n self._rewrite_refs(value)\n\n if isinstance(schema, dict):\n for key, value in schema.iteritems():\n if isinstance(value, (list, dict)):\n self._rewrite_refs(value)\n\n if key == '$ref' and value in schema_paths:\n del schema[key]\n schema.update(resolve_ref(value))", "title": "" }, { "docid": "2bcac99b0133c5db74febd223f32e4e9", "score": "0.47536445", "text": "def to_default_representation(self, obj):\n return super().to_representation(obj)", "title": "" }, { "docid": "5ddeea3becf722b8d7355888ee44ab70", "score": "0.47428823", "text": "def _gen_docs(self):\n # The static content defined here mimics and can easily be converted\n # to a swagger file documentation. Possibly converting the Rbkcli to\n # a API server.\n self.meta_api.doc = {\n 'definitions': {},\n 'paths': {}\n }\n\n cmdlets_files = self._get_cmdlets_files()\n loaded_cmdlets = self._load_cmdlets_files(cmdlets_files)\n usable_cmdlets = self._get_usable_cmdlets(loaded_cmdlets)\n\n for line in usable_cmdlets:\n if line['name'] != '':\n cmd_name = line['name'].replace(' ', '/')\n cmd_name = '/' + cmd_name\n parameters_found = line['param'].split(',')\n self.meta_api.doc['paths'][cmd_name] = {\n 'get': {\n 'description': line['cmdlet_description'],\n 'operationId': '',\n 'operation': line['command'],\n 'parameters': [],\n 'responses': {\n '200': {\n 'description': line['response_description'],\n 'schema': {},\n 'table_order': [],\n 'multiple_output': line['multiple_output']\n }\n },\n 'summary': line['cmdlet_summary'],\n 'tags': '',\n 'x-group': ''\n }\n }\n for param in parameters_found:\n self.meta_api.doc['paths'][cmd_name]['get']['parameters'].append({\n \"in\": \"body\",\n \"name\": line['param'],\n \"required\": True,\n \"type\": \"string\"\n })\n\n self.endpoints = self.meta_api.doc", "title": "" }, { "docid": "9962f83ebe28a4fbe5529cc280865c38", "score": "0.473935", "text": "def iinfo_schemas(self):\n remote_classname = self.schema.remoteClass.split('.')[-1]\n remote_spec = self.class_.zenpack.classes.get(remote_classname)\n if not remote_spec:\n return {}\n\n schemas = {}\n\n if isinstance(self.schema, (ToOne)):\n schemas[self.name] = schema.Entity(\n title=_t(remote_spec.label),\n group=\"Relationships\",\n order=3.0)\n else:\n relname_count = '{}_count'.format(self.name)\n schemas[relname_count] = schema.Int(\n title=_t(u'Number of {}'.format(remote_spec.plural_label)),\n group=\"Relationships\",\n order=6.0)\n\n return schemas", "title": "" }, { "docid": "e53438b61d7e47f63b37264f0048303b", "score": "0.4734321", "text": "def schemes(self, schemes: List[str]):\n\n self._schemes = schemes", "title": "" }, { "docid": "a17ff29dc238a62d4ea5fdba49deb12f", "score": "0.47227666", "text": "def swagger_content_type_hack(view, supported=['application/json', 'application/xml']):\n def _inner(*args, **kwargs):\n resp = view(*args, **kwargs)\n if isinstance(resp, Response):\n if resp.data and 'apis' in resp.data:\n apis = resp.data['apis']\n for api in apis:\n if 'operations' in api:\n for op in api['operations']:\n op['produces'] = supported\n return resp\n return _inner", "title": "" }, { "docid": "5d6200d440c17cb49e324fbdad374466", "score": "0.47218448", "text": "def get_schema(cls) -> SchemaDictType:\n return {\n 'type': 'unicode',\n 'validators': [{\n 'id': 'is_nonempty'\n }],\n 'ui_config': {\n 'placeholder': 'https://www.example.com'\n },\n 'post_normalizers': [{\n 'id': 'sanitize_url'\n }]\n }", "title": "" }, { "docid": "b1740cd3bc47595cf2ea84d040e0c5fa", "score": "0.47179604", "text": "def map_rest_schema(schema, resource_fields):\n for attr in schema:\n # hack fix... change to make fields primary thing and switch get_field_type to return opposite\n if not isinstance(schema[attr]['type'], fields.Raw):\n resource_fields[attr] = get_field_type(schema[attr]['type'])\n else:\n resource_fields[attr] = schema[attr]['type']\n if schema[attr].get('nested', False):\n resource_fields[attr].__init__(attribute=schema[attr]['dict'])", "title": "" }, { "docid": "211cee8e7411d73ac382c2cdaa770639", "score": "0.47147828", "text": "def __init__(self, info: object=None, paths: object=None, swagger: str=None, host: str=None, schemes: List[str]=None, base_path: str=None, security_definitions: object=None, produces: List[str]=None, definitions: object=None, openapi: str=None, servers: List[object]=None, components: object=None): # noqa: E501\n self.swagger_types = {\n 'info': object,\n 'paths': object,\n 'swagger': str,\n 'host': str,\n 'schemes': List[str],\n 'base_path': str,\n 'security_definitions': object,\n 'produces': List[str],\n 'definitions': object,\n 'openapi': str,\n 'servers': List[object],\n 'components': object\n }\n\n self.attribute_map = {\n 'info': 'info',\n 'paths': 'paths',\n 'swagger': 'swagger',\n 'host': 'host',\n 'schemes': 'schemes',\n 'base_path': 'basePath',\n 'security_definitions': 'securityDefinitions',\n 'produces': 'produces',\n 'definitions': 'definitions',\n 'openapi': 'openapi',\n 'servers': 'servers',\n 'components': 'components'\n }\n\n self._info = info\n self._paths = paths\n self._swagger = swagger\n self._host = host\n self._schemes = schemes\n self._base_path = base_path\n self._security_definitions = security_definitions\n self._produces = produces\n self._definitions = definitions\n self._openapi = openapi\n self._servers = servers\n self._components = components", "title": "" }, { "docid": "8d81dfaff3ac469e3a1dcd018784c29a", "score": "0.4714654", "text": "def workWithSchema(self):\n #import avro.schema\n #schema = avro.schema.parse(json_encode(self.json_args))\n #print schema\n #Once we have the avro schema from UI we need to work with it\n schemas.append(self.json_args)", "title": "" }, { "docid": "df912acc2f1cfc30807da13653515174", "score": "0.47132203", "text": "def generate_all(discdoc):\n print(\"Processing:\", discdoc.get(\"id\", \"\"))\n schemas = discdoc.get(\"schemas\", {})\n resources = discdoc.get(\"resources\", {})\n # Generate scopes.\n scopes_type = generate_scopes_type(discdoc[\"name\"], discdoc.get(\"auth\", {}).get(\"oauth2\", {}).get(\"scopes\", {}))\n\n # Generate parameter types (*Params - those are used as \"side inputs\" to requests)\n params_struct_name = global_params_name(discdoc.get(\"name\"))\n parameter_types, parameter_enums = generate_params_structs(resources, global_params=params_struct_name)\n\n # Generate service impls.\n services = []\n for resource, methods in sorted(resources.items()):\n services.append(generate_service(resource, methods, discdoc))\n if \"methods\" in discdoc:\n services.append(generate_service(\"Global\", discdoc, discdoc, generate_subresources=False))\n\n # Generate schema types.\n structs = []\n enums = []\n for name, desc in sorted(schemas.items()):\n typ, substructs, subenums = parse_schema_types(name, desc)\n structs.extend(substructs)\n enums.extend(subenums)\n\n # Generate global parameters struct and its Display impl.\n if \"parameters\" in discdoc:\n schema = {\"type\": \"object\", \"properties\": discdoc[\"parameters\"]}\n name = replace_keywords(snake_to_camel(params_struct_name))\n typ, substructs, subenums = parse_schema_types(name, schema)\n for s in substructs:\n s[\"optional_fields\"] = s[\"fields\"]\n parameter_types.extend(substructs)\n parameter_enums.extend(subenums)\n\n # Assemble everything into a file.\n modname = (discdoc[\"id\"] + \"_types\").replace(\":\", \"_\")\n out_path = path.join(\"gen\", modname + \".rs\")\n with open(out_path, \"w\") as f:\n f.write(RustHeader)\n f.write(scopes_type)\n # Render resource structs.\n for s in structs:\n struct_inline_comments(s)\n if not s[\"name\"]:\n print(\"WARN\", s)\n f.write(chevron.render(SchemaStructTmpl, s))\n for e in enums:\n enum_inline_comments(e)\n f.write(chevron.render(SchemaEnumTmpl, e))\n for e in parameter_enums:\n enum_inline_comments(e)\n f.write(chevron.render(SchemaEnumTmpl, e))\n # Render *Params structs.\n for pt in parameter_types:\n struct_inline_comments(pt)\n f.write(chevron.render(SchemaStructTmpl, pt))\n f.write(chevron.render(SchemaDisplayTmpl, pt))\n # Render service impls.\n for s in services:\n f.write(s)\n try:\n subprocess.run([\"rustfmt\", out_path, \"--edition=2018\"])\n except:\n return", "title": "" }, { "docid": "ad5895bb9787c3103120e0e16e1b6c90", "score": "0.47067106", "text": "def swagger(self, swagger: str):\n\n self._swagger = swagger", "title": "" }, { "docid": "1d25cdfbb766daa2691aa177899170d3", "score": "0.4703228", "text": "def _convert_json_schema_to_rest(schema: Dict) -> List[str]:\n\n schema = json.dumps(schema, default=str, indent=3)\n lines = [f\" {line}\" for line in schema.split(\"\\n\")]\n lines = \"\\n\".join(lines)\n lines = TPL_COLLAPSE.format(lines).split(\"\\n\")\n\n return lines", "title": "" }, { "docid": "b081afd8a00263df34ae97966e1c0774", "score": "0.4695786", "text": "def export_swagger(\n routes: typing.List[tornado.web.URLSpec],\n *,\n api_base_url: str = \"/\",\n description: str = \"Swagger API definition\",\n api_version: str = \"1.0.0\",\n title: str = \"Swagger API\",\n contact: str = \"\",\n schemes: list = None,\n security_definitions: dict = None,\n security: list = None,\n):\n return generate_doc_from_endpoints(\n routes,\n api_base_url=api_base_url,\n description=description,\n api_version=api_version,\n title=title,\n contact=contact,\n schemes=schemes,\n security_definitions=security_definitions,\n security=security,\n )", "title": "" }, { "docid": "525cd5091313450ebe771fecd27fafaa", "score": "0.46949035", "text": "def get_descriptor_defaults(self, api_info, hostname=None):\n hostname = (hostname or util.get_app_hostname() or\n api_info.hostname)\n protocol = 'http' if ((hostname and hostname.startswith('localhost')) or\n util.is_running_on_devserver()) else 'https'\n defaults = {\n 'swagger': '2.0',\n 'info': {\n 'version': api_info.version,\n 'title': api_info.name\n },\n 'host': hostname,\n 'consumes': ['application/json'],\n 'produces': ['application/json'],\n 'schemes': [protocol],\n 'basePath': api_info.base_path.rstrip('/'),\n }\n\n return defaults", "title": "" }, { "docid": "54dd0154f06ccbf50d81ae112ede0849", "score": "0.46933958", "text": "def setup_swagger(\n routes: typing.List[tornado.web.URLSpec],\n *,\n swagger_url: str = \"/api/doc\",\n api_base_url: str = \"/\",\n description: str = \"Swagger API definition\",\n api_version: str = \"1.0.0\",\n title: str = \"Swagger API\",\n contact: str = \"\",\n schemes: list = None,\n security_definitions: dict = None,\n security: list = None,\n display_models: bool = True,\n):\n swagger_schema = generate_doc_from_endpoints(\n routes,\n api_base_url=api_base_url,\n description=description,\n api_version=api_version,\n title=title,\n contact=contact,\n schemes=schemes,\n security_definitions=security_definitions,\n security=security,\n )\n\n _swagger_url = (\n \"/{}\".format(swagger_url) if not swagger_url.startswith(\"/\") else swagger_url\n )\n _base_swagger_url = _swagger_url.rstrip(\"/\")\n\n routes[:0] = [\n tornado.web.url(_swagger_url, SwaggerHomeHandler),\n tornado.web.url(\"{}/\".format(_base_swagger_url), SwaggerHomeHandler),\n ]\n\n with open(os.path.join(STATIC_PATH, \"ui.html\"), \"r\") as f:\n SwaggerHomeHandler.SWAGGER_HOME_TEMPLATE = (\n f.read()\n .replace(\"{{ SWAGGER_SCHEMA }}\", json.dumps(swagger_schema))\n .replace(\"{{ DISPLAY_MODELS }}\", str(-1 if not display_models else 1))\n )", "title": "" }, { "docid": "c0ad98e789110bbd4ed1933f1f8fdc82", "score": "0.4682381", "text": "def to_representation(self, obj):\n if type(obj) is not list:\n return [tag for tag in obj.all()]\n return obj", "title": "" }, { "docid": "93f8f75313735bfddd38d8f7f5c6df3f", "score": "0.46757478", "text": "def get_security_definitions(self, schema: Dict[str, Any], resolver: RefResolver) -> Dict[str, Any]:\n components = schema.get(\"components\", {})\n security_schemes = components.get(\"securitySchemes\", {})\n if \"$ref\" in security_schemes:\n return resolver.resolve(security_schemes[\"$ref\"])[1]\n return security_schemes", "title": "" }, { "docid": "33c4141bcca730207ab51258c8fd287b", "score": "0.467435", "text": "def schemes(self):\n return [self]", "title": "" }, { "docid": "16b85ecf7019bed92d5c3f7a7fe9a4bf", "score": "0.46739846", "text": "def swagger(self) -> Optional[Any]:\n return pulumi.get(self, \"swagger\")", "title": "" }, { "docid": "16b85ecf7019bed92d5c3f7a7fe9a4bf", "score": "0.46739846", "text": "def swagger(self) -> Optional[Any]:\n return pulumi.get(self, \"swagger\")", "title": "" }, { "docid": "1cd0a84a1668e0812da92bf603e9bcf5", "score": "0.4659864", "text": "def __pretty_object__(obj):\n # type: (object) -> str\n if str(type(obj)) not in constants.SYS_TYPE:\n from flask import session, request\n import numpy\n if obj is session:\n return __pretty_session__()\n elif obj is request:\n return __pretty_request__()\n elif isinstance(obj, list) or isinstance(obj, numpy.ndarray):\n print __pretty_multi_array__(obj)\n else:\n return __to_yaml__(obj)\n else:\n return str(obj)", "title": "" }, { "docid": "39b5cb4eadb54edd3f18814697a9fc1a", "score": "0.46471936", "text": "def build_active_schema_for_objects(cls, objects):\n schema = cls()\n schema.add_objects(objects)\n return schema", "title": "" } ]
1eeb69bd9b832fc54ecb2ace1e7e5086
Checks if the username and password are correct. Returns True if it is, False otherwise.
[ { "docid": "fc87a52a3502d066792596aed153d276", "score": "0.0", "text": "def authenticate_account(self, username, password):\n\n self.session = validate_session(self.session)\n account = self.session.query(User).filter(User.username == username).first()\n # Check if account/username exist. False if it doesn't.\n if account is None:\n return False\n\n if Crypto.verify_scrypt_hash(password, account.hash):\n return True\n else:\n return False", "title": "" } ]
[ { "docid": "446a6f1c2a454e86e0924b6a75fccf40", "score": "0.835001", "text": "def check_auth(username, password):\n un = username == blp.login_credentials['user']\n pw = password == blp.login_credentials['pass']\n return un and pw", "title": "" }, { "docid": "45aaaa68062e19e8cefcf5111d2d0d82", "score": "0.83034396", "text": "def check_auth(username, password):\n return username == uname and password == pwd", "title": "" }, { "docid": "c32fd25d98edff040df4592a674f160e", "score": "0.823231", "text": "def check_auth(username, password):\n return username == settings.USER and password == settings.PASSWORD", "title": "" }, { "docid": "ddb2db9d45a02f885be24c5c74c8dd8c", "score": "0.82174146", "text": "def check_auth(username, password):\n if app.args.AUTH:\n if ':' in app.args.AUTH:\n user, pwd = app.args.AUTH.split(':')[:2]\n else:\n user, pwd = app.args.AUTH, ''\n return username == user and password == pwd\n else:\n return True", "title": "" }, { "docid": "af1fc12584053762dcd93698ca4345ab", "score": "0.8197617", "text": "def check_auth(username, password):\n if AUTH_USER is None:\n return True\n return username == AUTH_USER and password == AUTH_PASSWD", "title": "" }, { "docid": "e513e6c70acf311508239cd8d9cab6d0", "score": "0.8145576", "text": "def check_auth(username, password):\n return username == 'sample' and password == 'sample'", "title": "" }, { "docid": "3a312e153e189e26b25178c0cb23f827", "score": "0.8077144", "text": "def userAndPasswordCorrect(self, username, password):\n return True", "title": "" }, { "docid": "230a0919a10a4ada987c62beb5e88f4e", "score": "0.8059623", "text": "def check_auth(usr, pwd):\n return usr == username and pwd == password", "title": "" }, { "docid": "80fb067ccaf771be4b97e2ddfc2f9013", "score": "0.80589145", "text": "def check_auth(username, password):\n return username == app.config['auth_user'] and password == app.config['auth_pass']", "title": "" }, { "docid": "005c523d1148979efd6c3fb997f8f05e", "score": "0.8054224", "text": "def check_auth(username, password):\n return username == app.config['ADMIN_USER'] and \\\n password == app.config['ADMIN_PASS']", "title": "" }, { "docid": "33995f82dc998b1a4cbba4c7ae9b1d54", "score": "0.8041471", "text": "def check_auth(username, password):\r\n try:\r\n if app.config['CONFIG']['bauth'][username]['pass'] == password:\r\n return True\r\n except Exception as e:\r\n rootLogger.info(e)\r\n\r\n return False", "title": "" }, { "docid": "dbf5042e662f8384d2ef001ffb9d6bc7", "score": "0.803071", "text": "def check_auth(username, password):\n return username == app.config['WC_LOGIN']['username'] and password == app.config['WC_LOGIN']['password']", "title": "" }, { "docid": "e2b0fcbfda8f4645dde183aaf900ecda", "score": "0.79818785", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "title": "" }, { "docid": "9668121a9058001c88cd545af71b5473", "score": "0.79743207", "text": "def check_auth(username, password):\n\treturn username == 'admin' and password == 'secret'", "title": "" }, { "docid": "7520eb64d79e1617b46d771d5d8bba67", "score": "0.7960091", "text": "def verify_credentials(username: str, password: str) -> bool:\n return AuthenticationService.is_valid_credentials(username, password)", "title": "" }, { "docid": "47faf12d29c3bef1bd576433e08ec9d1", "score": "0.7950642", "text": "def check_auth(username, password):\n config = flask.current_app.config\n try:\n isuser = hmac.compare_digest(config['TTN_USER'], username)\n ispass = hmac.compare_digest(config['TTN_PASSWORD'], password)\n except KeyError:\n flask.current_app.logger.warn(\"No credentials for ttn specified.\")\n return False\n return isuser and ispass", "title": "" }, { "docid": "ec389cc19fe32ac8113a2ecb234e2616", "score": "0.7939864", "text": "def check_auth(username, password):\n return username == 'sammy' and password == 'BasicPassword!'", "title": "" }, { "docid": "6d9c2b007caa6ac342dcd9aa7f8129b6", "score": "0.7900145", "text": "def check_auth(username, password):\n return username == ADMIN_USER and password == ADMIN_PASS", "title": "" }, { "docid": "9b3a41f5a1213b0c69cd56b5e69a0b16", "score": "0.78785694", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "9b3a41f5a1213b0c69cd56b5e69a0b16", "score": "0.78785694", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "9b3a41f5a1213b0c69cd56b5e69a0b16", "score": "0.78785694", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "9a69976520ed4e7b735e358cb9b62fae", "score": "0.7878089", "text": "def check_auth(username, password):\n return username == queryname and password == querypw", "title": "" }, { "docid": "a5d4d9a16cb164b50e568e3820766de1", "score": "0.7854986", "text": "def check_auth(username, password):\n return username == 'guest' and password == 'password'", "title": "" }, { "docid": "5c78fe569208edf64c83d86763bb5ece", "score": "0.7848427", "text": "def check_credentials(username, password):\n dm = datamodel.TmData(cherrypy.request.app.config['/'])\n user = dm.get_user(username)\n if not user:\n return \"Incorrect username or password\"\n h = hashlib.md5()\n h.update(password.encode('utf-8'))\n pwd = h.hexdigest()\n if user[1] != pwd:\n return \"Incorrect username or password\"\n return None", "title": "" }, { "docid": "3f6c477a47524f7e9406383ca56a5ed2", "score": "0.7742826", "text": "def check_auth(username, password):\n return 'lme' == username and 'lme' == password", "title": "" }, { "docid": "f0a4316a23ba050cbedf3f52df5f4126", "score": "0.77216727", "text": "def check_auth(username, password):\n\n env_password = getenv('HTTP_PASSWORD', cast=str)\n env_username = getenv('HTTP_USERNAME', cast=str)\n\n return username == env_username and password == env_password", "title": "" }, { "docid": "87dd51bc6aa1aacb0052d73b69f2366b", "score": "0.77120525", "text": "def valid_login(username: str, password: str) -> bool:\n account = execute_one_query(\"SELECT * FROM person WHERE username = %s\", username)\n return account and check_password_hash(account[1], password)", "title": "" }, { "docid": "ed0517500a80ed3c1f44d947a940853e", "score": "0.77034366", "text": "def check_credentials():\n user = session.get('user', None)\n user_auth = session.get('user_auth', None)\n if user and user_auth:\n return check_auth(user, user_auth)\n return False", "title": "" }, { "docid": "a3ede46eb03c97821391740393adc1f9", "score": "0.7692338", "text": "def check_auth(username, password): \n if not User.objects(username=username):\n return False\n user = User.objects.get(username=username) \n if not user or not user.verify_password(password):\n return False\n return True", "title": "" }, { "docid": "ae002e1d72dea0138022e98a68341cfc", "score": "0.76773405", "text": "def check_auth(username, password):\n return username == 'TRAIN' and password == 'TuN3L'", "title": "" }, { "docid": "0b68542cf987b1ff2423e79672d13d1a", "score": "0.7650397", "text": "def check_user_password(username, password):\n from x84.bbs import find_user, get_user\n handle = find_user(username)\n if handle is None:\n return False\n user = get_user(handle)\n if user is None:\n return False\n return password and user.auth(password)", "title": "" }, { "docid": "814cd55d2f7f50fdbdfdc42e265b526e", "score": "0.7596233", "text": "def checkAuthentication(username, password):\n\n # get the ticket\n if getAuthTicket(username, password) is not None:\n return True\n\n return False", "title": "" }, { "docid": "ffc50175e5425d43d9e16b43031469d0", "score": "0.7582177", "text": "def validate(kls, username, password):\n user = kls.get(username)\n if user is None:\n return False\n else:\n return user.check_password(str(password))", "title": "" }, { "docid": "1231c21bd809d7bedef3f3290262ddca", "score": "0.7582004", "text": "def verify_password(username, password):\n if not username or username != current_app.config['TOKEN']:\n return False\n return True", "title": "" }, { "docid": "447b26cf6bf91537cd2cec617ba7a269", "score": "0.75226456", "text": "def check_auth(password):\n return password == PASSWORD", "title": "" }, { "docid": "0be4373630d3dbd6abb463a93bd7d4b6", "score": "0.75160027", "text": "def is_valid_credentials(username: str, password: str) -> bool:\n\n # If username is empty then no point attempting to validate, so return False\n if not username:\n return False\n\n try:\n user = UserService.get_user_by_username(username)\n except NotFound:\n return False\n\n login_success = AuthenticationService._is_valid_password(password, user.password)\n dmis.authenticated_user_id = user.user_id\n\n return login_success", "title": "" }, { "docid": "f1f01712f42742c4a218c56ca0ad11ae", "score": "0.7477888", "text": "def check_creds(username, password):\n\n STMT = \"SELECT username, password_hash FROM users WHERE username = %s\"\n \n conn = database.get_conn()\n cursor = conn.cursor()\n cursor.execute(STMT, (username, ))\n user = cursor.fetchone()\n cursor.close()\n conn.close()\n\n if user is None:\n return False\n\n return argon2.verify(password, user[1])", "title": "" }, { "docid": "ec7529d23edab1339b9259d3061a7916", "score": "0.7470366", "text": "def check_auth(username, password):\n return username == os.environ['HTUSER'] and password == os.environ[\n 'HTAUTH']", "title": "" }, { "docid": "43b8ede1ad1bbdac523a4d20de10b873", "score": "0.74423933", "text": "def is_login_valid(username, password):\n username_valid = False\n flag = False\n correct_password = get_password(username)\n if correct_password: # Username exists\n username_valid = True\n if password == correct_password: # Password is correct\n flag = True\n return (username_valid, flag)", "title": "" }, { "docid": "3da0b0dd6d37847da799eb2aee437e1d", "score": "0.74130994", "text": "def check_auth(username, password):\n\n email = username\n password = password\n cursor = mysql.connect().cursor()\n cursor.execute(\"SELECT COUNT(*) FROM dav_users WHERE email='\" + email + \"' AND password='\" + password + \"'\")\n data = cursor.fetchone()\n\n if data == 0:\n return False\n else:\n return True\n # return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "b506d4572a9adef794501f5dc408f8f1", "score": "0.7391268", "text": "def check_password(self, username, password):\n assert self.dev_users\n return self.dev_users.get(username) == password", "title": "" }, { "docid": "5a6b932f16e30417f0e2b130d69b655a", "score": "0.7327687", "text": "def check_auth(username, password):\n User1 = User.query.filter(User.username == username).first()\n if User1 and User1.password ==password:\n return True\n else:\n return False", "title": "" }, { "docid": "1d69b85a55166f8e50b7f999b2bc9626", "score": "0.73253906", "text": "def check_pw(username, password):\n required_columns = \"passwd\"\n where_clause = \"username = %s\"\n params = (username, )\n user_details = fetch_from_table(\n required_columns=required_columns,\n where_clause=where_clause,\n params=params,\n table=\"agent\")\n\n if user_details:\n valid_pw = bytes(user_details[0][\"passwd\"], 'utf8')\n return valid_pw == hashpw(password.encode(\"utf-8\"), valid_pw)\n else:\n return False", "title": "" }, { "docid": "2d49979aadfebeefbcb353a5a31942d6", "score": "0.7315844", "text": "def check_password():\n\n credential = request.form.get(\"credential\")\n password = request.form.get(\"password\")\n\n if password_is_correct(credential, password):\n return \"true\"\n else:\n return \"false\"", "title": "" }, { "docid": "1e5b33c699769d6af1a5736cef0f3c99", "score": "0.72665334", "text": "def check_credentials(self):\n select_query = \"SELECT UserPassword FROM User WHERE Email = %s\"\n self._cursor.execute(select_query, (self._email, )) \n\n fetched_data = self._cursor.fetchone()\n\n if not fetched_data:\n print(\"Your email is not registered in the database.\")\n return False\n\n fetched_password = fetched_data[0]\n if fetched_password == self._password:\n print(\"Congratz, u (\"+self._email+\") are logged in!\")\n return True\n else: \n print(\"The password does not match!\")\n return False", "title": "" }, { "docid": "994534ab53d1f89a5753bcf4d7923ee8", "score": "0.72379994", "text": "def check_password(username, password):\r\n query = \"\"\"SELECT password FROM clients\r\n WHERE username=?\"\"\"\r\n cursor.execute(query, (username, ))\r\n actual_pass = cursor.fetchone()\r\n if not actual_pass:\r\n return False\r\n if actual_pass[0] == hash_password(password):\r\n return True\r\n return False", "title": "" }, { "docid": "0419c0562dfb9a0fee650604db5b9018", "score": "0.7228656", "text": "def authenticate(self, user, password):\n\t\tif user == \"root\" and password == \"toor\":\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "ec46ef4363d7c435a1bda6bf2ea0dafe", "score": "0.7216104", "text": "def is_authorised(username, password):\n return username == \"pokemon\" and password == \"go\"", "title": "" }, { "docid": "1918cb7cc1b10aae90bba46c8f5beee3", "score": "0.72094506", "text": "def validate(self, password: str) -> bool:", "title": "" }, { "docid": "66c5fbd3b2e0a05a256a6fe8a7847302", "score": "0.72084755", "text": "def valid_login(username, password):\n db = get_db()\n db.ping(True)\n cur = db.cursor()\n\n try:\n sql = \"SELECT password FROM users WHERE user_name = '{}';\".format(username)\n cur.execute(sql)\n for i in cur:\n return check_password_hash(i[0], password)\n return False\n except mysql.connector.Error as err:\n flash(err, \"set\")\n return False\n finally:\n cur.close()\n db.close()", "title": "" }, { "docid": "eb8570703752bb811e0432b369f9c61c", "score": "0.7195735", "text": "def verify_login(user: dict) -> bool:\n username = user.get(\"username\")\n password = user.get(\"password\")\n if not username or not password:\n return False\n existing_user = User.query.filter_by(username=username).first()\n if not existing_user:\n return False\n if check_password_hash(existing_user.password, password):\n return True\n return False", "title": "" }, { "docid": "830213656456299c9c4256830c112ed0", "score": "0.7161857", "text": "def _checkAuthentication(self, name, password, request):\n # For the base implementation the password is checked\n # against acl_users.\n aclu = getToolByName(self, 'acl_users')\n res = aclu.authenticate(name, password, request)\n authentication_ok = res is not None\n return authentication_ok", "title": "" }, { "docid": "93fdb4deb35d7a8c37e38077c517b88e", "score": "0.71430695", "text": "def login_check(username, password):\n user_pw = db.session.query(tables['organizers'].password).filter_by(username=username)\n if user_pw.scalar() is None:\n return False\n split_list = user_pw.one().password.split('$')\n salt = split_list[1]\n password_hashed = split_list[2]\n return get_hashed_text(salt, password) == password_hashed", "title": "" }, { "docid": "78c90726b07c5d3446fdf18732e13c4e", "score": "0.71260417", "text": "def check_password(self, username, password, env=None):\n if not username or not password:\n return False\n provider = self.get_sync_provider(fallback=True)\n if username == 'root' and not provider.syncs_root:\n provider = ajenti.usersync.AjentiSyncProvider.get()\n\n if not username in ajenti.config.tree.users:\n return False\n\n try:\n provider.sync()\n except Exception as e:\n logging.error(str(e))\n\n result = provider.check_password(username, password)\n\n provider_name = type(provider).__name__\n\n ip_notion = ''\n ip = env.get('REMOTE_ADDR', None) if env else None\n if ip:\n ip_notion = ' from %s' % ip\n\n if not result:\n msg = 'failed login attempt for %s (\"%s\") through %s%s' % \\\n (username, password, provider_name, ip_notion)\n syslog.syslog(syslog.LOG_WARNING, msg)\n logging.warn(msg)\n else:\n msg = 'user %s logged in through %s%s' % (username, provider_name, ip_notion)\n syslog.syslog(syslog.LOG_INFO, msg)\n logging.info(msg)\n return result", "title": "" }, { "docid": "63e89e3afead92b660887821ca6498b5", "score": "0.711976", "text": "def verify_password(username, password):\n from .logic import get_users\n\n if hasattr(flask_g, 'username'):\n if not flask_g.username == '' and username == '':\n username = flask_g.username\n if hasattr(flask_g, 'password'):\n if not flask_g.password == '' and password == '':\n password = flask_g.password\n\n if username == '':\n app.logger.warning(\"Unauthorized access attempted!\")\n return False\n\n if username == 'admin':\n if 'ADMIN_PASS_FILE' in app.config:\n\n try:\n f = open(app.config['ADMIN_PASS_FILE'], 'r')\n app.config['ADMIN_PASS'] = str(f.read()).rstrip(\"\\r\\n\")\n f.close()\n except IOError as e:\n print(\"Unable to read admin password file: %s\" % app.config['ADMIN_PASS_FILE'])\n return False\n\n if check_password_hash(app.config['ADMIN_PASS'], str(password)):\n flask_g.username = username\n flask_g.password = password\n return True\n else:\n return False\n\n if not username == '':\n users_l = get_users(q_username=username, page=1, per_page=1)\n if len(users_l) < 1:\n print(\"User {} not found\".format(username))\n return False\n else:\n user = users_l[0].as_dict()\n\n if check_password_hash(user['password'], str(password)):\n flask_g.username = user['username']\n flask_g.password = user['password']\n return True\n else:\n return False", "title": "" }, { "docid": "7b6967ce9bc5a3ddd3be1c28bce63aea", "score": "0.70946634", "text": "def _verify_credentials(self):\n r = requests.get(self.apiurl+\"account/verify_credentials.xml\", auth=HTTPBasicAuth(self._username, self._password),\n headers=self.header)\n if r.status_code != 200:\n raise UserLoginFailed(\"Username or Password incorrect.\")", "title": "" }, { "docid": "a2462b13ac96abc341ad3c5579193fd6", "score": "0.7082045", "text": "def is_correct_credential_pair(username, password):\n return bool(is_valid_username(username) and\n is_valid_password(username, password) and\n is_valid_credential_pair_db(username, password))", "title": "" }, { "docid": "0385406cc62e39a30350a42023aad711", "score": "0.7081029", "text": "def verify_user(self, username: str, password: str):\n result = self.__get_user(username=username)\n db_password = result[0].get('password') if result else ''\n return self.__verify_password(password=password, hashed_password=db_password)", "title": "" }, { "docid": "17c2819c6071104f921afcb5841c96e7", "score": "0.7079846", "text": "def is_valid_credential_pair_db(username, password):\n connection = connect_db()\n cursor = connection.cursor(buffered=True)\n cursor.execute('SELECT password FROM Users WHERE username=%s LIMIT 1',\n (username,))\n result = cursor.fetchall()\n close_db(connection)\n return bool(result and\n bcrypt.checkpw(password.encode(), result[0][0].encode()))", "title": "" }, { "docid": "bb28f04baf938bd5818b023b9edac620", "score": "0.7055832", "text": "def check_auth(username, password):\n return os.getenv('COMPOSE_USERNAME') == username and os.getenv('COMPOSE_PASSWORD') == password", "title": "" }, { "docid": "d574ec08638be468ed43352cea3c4f53", "score": "0.7052717", "text": "def authenticate(username, password):\n lines = list()\n with open(\"passwords.txt\", \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip(\"\\n\")\n stored_username, stored_password = line.split(\":\")\n if stored_username == username and stored_password == password:\n return True\n return False", "title": "" }, { "docid": "fd4834a3168bab442c75bf3bca12d766", "score": "0.705246", "text": "def check_auth(username, password):\n dn = secrets.DN_STRING.format(username)\n with ldap_connection(secrets.LDAP_URL) as conn:\n try:\n conn.simple_bind_s(dn, password)\n conn.search_s(secrets.BASE_DN, ldap.SCOPE_ONELEVEL)\n except ldap.LDAPError:\n return False\n return True", "title": "" }, { "docid": "4413f7ad84a9b230bd7fb6a905c53766", "score": "0.70457727", "text": "def check_auth(username, password):\r\n for user in USER:\r\n if user['user']== username and user['pass']==password:\r\n return True\r\n #return username == 'admin' and password == 'secret'\r", "title": "" }, { "docid": "9e38bfdb05be81df39fd603bc71eba43", "score": "0.7042962", "text": "def user_exist(self,password):\n if self.password == password:\n return True", "title": "" }, { "docid": "9ef268e4258bae4751ed7abd0aee1c1a", "score": "0.70418", "text": "def verify_password(username, password):\n if not username:\n # Warning/info?\n return False\n single_user_data = userdata.get(username)\n if single_user_data:\n stored_pw = single_user_data.get(PASSWORD)\n assert stored_pw is not None, 'Server error: user data must contain a password!'\n res = sha256_crypt.verify(password, stored_pw)\n else:\n logger.info('User \"{}\" does not exist'.format(username))\n res = False\n return res", "title": "" }, { "docid": "a1fce9f617636a7d46d36f920c2107ce", "score": "0.7037066", "text": "def verify_login(username, password, user_data):\n # Encode password using salt\n salted_password = password + user_data.salt\n encoded_password = hashlib.sha512(salted_password.encode()).hexdigest()\n\n # Verify if encoded password corresponds to user's password in the database\n # will return True if password is correct\n return compare_digest(encoded_password, user_data.password)", "title": "" }, { "docid": "1e1c33ab495b0edb66ff01e171a8afc4", "score": "0.7033827", "text": "def authenticate(self, entusername, entpassword):\r\n self.EnteredUsername = entusername\r\n self.EnteredPassword = entpassword\r\n\r\n if self.EnteredUsername in self.user_accounts:\r\n if self.user_accounts[self.EnteredUsername] == self.EnteredPassword:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "848df66ade1245604b7b5c501906ca45", "score": "0.70296574", "text": "def valid_credentials(_login, _password):\n center = Center.query.filter_by(_login=_login, _password=_password).first()\n if center:\n return True\n else:\n return False", "title": "" }, { "docid": "7109876df3819735417d20787a444ad0", "score": "0.6999185", "text": "def verifypwd(username, password):\n dbuser = db.GqlQuery(\"SELECT * FROM LoginData\")\n\n for x in dbuser:\n if x.username == username:\n pwhash = str(x.password)\n return valid_pw(username, password, pwhash)\n return False", "title": "" }, { "docid": "900e295641b53a82f272e4d2493fb403", "score": "0.69919485", "text": "def _test_auth(self):\n user = self.get_user()\n try:\n if \"Invalid username/password\" in user.json().values():\n return False\n else:\n return True\n except JSONDecodeError:\n warnings.warn(\"ESPA Service appears to be offline! Response: {}\".format(user.json()))\n return True", "title": "" }, { "docid": "eb702d3e3e41cdac8b27be61f4c7dff2", "score": "0.69806623", "text": "def check_user_pass(usr, pwd):\n\n ret = AUTHDB.get(usr, None)\n if (ret is None) or (ret != pwd):\n return False\n return True", "title": "" }, { "docid": "c80ff91ca8830b1f109717aaefc0055a", "score": "0.696238", "text": "def is_authentic(self, login, password):\n \n return True", "title": "" }, { "docid": "2d0b86405a808bff61c0cdc9a7047a50", "score": "0.6959467", "text": "def valid_login(self, email: str, password: str) -> bool:\n if type(email) is not str or type(password) is not str:\n return False\n\n try:\n user = self._db.find_user_by(email=email)\n pwd = bytes(password, 'utf-8')\n if bcrypt.checkpw(pwd, user.hashed_password):\n return True\n else:\n return False\n except NoResultFound:\n return False", "title": "" }, { "docid": "198a25037c736d6c0ca5ca44c4a63feb", "score": "0.6955093", "text": "def is_user_valid(self, username, password):\r\n temp_hashed_password = self.get_hashed_password(password)\r\n user = self.session.query(User).filter(User.user_name == username).filter(\r\n User.password == temp_hashed_password)\r\n return user is not None", "title": "" }, { "docid": "fa546a6ee275144385ba449e19055e7d", "score": "0.69535583", "text": "def is_user_authenticated(self, username, password):\n user = self.db.users.find_one({\n \"username\": username\n })\n if user:\n return user[\"password\"] == Util.sha256(password)\n return False", "title": "" }, { "docid": "c377d37f67170c825e6d2c4f7046cea8", "score": "0.695223", "text": "def valid_login(self, email: str, password: str) -> bool:\n try:\n u = self._db.find_user_by(email=email)\n except NoResultFound:\n return False\n return bcrypt.checkpw(password.encode(), u.hashed_password)", "title": "" }, { "docid": "23cdda55fbb9c9baf3f5eefbda2488e3", "score": "0.69403476", "text": "def authenticate_user(username):\n return True if get_user_by_username(username) else False", "title": "" }, { "docid": "52969e760e13c61b2ac774a37f6f7946", "score": "0.69397795", "text": "def check_my_users(user):\n\t user_data = my_users.get(user['username'])\n\t if not user_data:\n\t return False # <--- invalid credentials\n\t elif user_data.get('password') == user['password']:\n\t return True # <--- user is logged in!\n\t\n\t return False # <--- invalid credentials", "title": "" }, { "docid": "279ca4a1936aeef9b6da6108743b59fe", "score": "0.6919977", "text": "def validate_user(username, password, cfpassword, email):\n result = is_username_valid(username) and is_password_valid(password)\n result = result and is_cfpassword_valid(password, cfpassword)\n result = result and is_email_valid(email)\n return result", "title": "" }, { "docid": "114294879d2d9b80758b3c5919a8181e", "score": "0.6884935", "text": "def verify_password(database: str, username: str, password: str) -> bool:\n db = DatabaseController(database)\n user_db_data = db.select_user_data(username)\n return user_db_data[0] == HashPasswordHelper.hash_password(password, user_db_data[1], user_db_data[2])", "title": "" }, { "docid": "15b219a7ac089beabf2a4801f68addad", "score": "0.68764126", "text": "def check_login_good(username, password):\n # Note that we are NOT building our own query via string formatting --\n # we are allowing the psycopg2 library to handle escaping the user inputs\n query = (\n \"SELECT id, username, password FROM users \"\n \"WHERE username = %s AND password = %s\"\n )\n cursor.execute(query, (username, password))\n result = cursor.fetchone()\n return result", "title": "" }, { "docid": "b6448b8288d6ce55e370e95527018b7f", "score": "0.6867732", "text": "def auth(self):\n if self.username is not None and self.password is not None:\n self.session.auth = (self.username, self.password)\n auth_response = self.session.get(self.url + '/')\n if auth_response.status_code != 401:\n return True\n return False", "title": "" }, { "docid": "9ac12cd480c3385905022e8503b3559c", "score": "0.68671745", "text": "def authenticate (username, password):\n try:\n user = User.objects.get(username=username)\n\n if user.check_password(password):\n return user\n else:\n return False\n\n except:\n return False", "title": "" }, { "docid": "144a7db65f820a23ccc6145cd39f280c", "score": "0.6843814", "text": "def check_for_login(error):\n print(\"Checking username and password combination.\")\n if 'password authentication failed' in str(error):\n print(\" Username/password combination is invalid.\")\n print(\" Try checking your username and password. \"\n \"One or both are likely incorrect.\")\n print_error_text(error)\n return True\n print(\" ... No issue found.\")\n return False", "title": "" }, { "docid": "4214ed7d065aaf3d774209df473dd7d1", "score": "0.68409956", "text": "def login(self, username, password):\n for registrant in self.accounts:\n if username in registrant['username'] and password in registrant['password']:\n return True\n else:\n return False", "title": "" }, { "docid": "6389af80d922c12980c8f284de769aca", "score": "0.68379873", "text": "def valid_login(self, email: str, password: str) -> bool:\n user = self._db.find_user_by(email)\n if user.email == email:\n if bcrypt.checkpw(password.encode('utf-8'), user.hashed_password):\n return True\n return False", "title": "" }, { "docid": "6130aeaf826a536c28d22dceb80ace99", "score": "0.6819086", "text": "def verify_pw(username, password):\n if not UserExist(username):\n return False\n hashed_pw = users.find({\n \"Username\": username\n })[0][\"Password\"]\n if bcrypt.hashpw(password.encode('utf8'), hashed_pw) == hashed_pw:\n return True\n else:\n return False", "title": "" }, { "docid": "32f312f94f1bead7866b176d07777857", "score": "0.6814118", "text": "def checkLogin(self):\n\t\tprint(username)\n\t\tprint(password)\n\n\t\tusernameInput = input('\\nWhat is your username?\\n')\n\t\tpasswordInput = input('\\nWhat is your password?\\n')\n\n\t\t#Checks if username and password is correct\n\t\tif usernameInput == developer.username:\n\n\t\t\tif passwordInput == developer.password:\n\t\t\t\tprint('\\nSuccess! You are now in the developer menu!')\n\t\t\t\tself.loggedIn = True\n\t\t\t\tuser = developer()\n\t\t\t\tmenu.developerMenu()\n\t\t\telse:\n\t\t\t\tprint('Error: Wrong username or password')\n\t\t\t\tmenu.mainMenu()\n\n\t\telse:\n\t\t\tprint('Error: Wrong username or password')\n\t\t\tmenu.mainMenu()", "title": "" }, { "docid": "9dec8e5477c8fa362f57517b4d88e337", "score": "0.6810149", "text": "def check_login(username=None, password=None):\n print(\"check_login: username=%s password=%s\" % (username, password))\n cl_response_queue = queue.Queue()\n message_data = database.DatabaseDataMessage(\n table_name=\"webserver\",\n field=\"\"\" \"{}\" \"\"\".format(\"WEBSERVER_USER_NAME\"),\n data=\"\"\" \"{}\" \"\"\".format(username),\n caller_queue=cl_response_queue)\n message = database.DatabaseMessage(\n command=database.DatabaseCommand.DB_SELECT_DATA,\n message=message_data)\n db_queue.put(message)\n db_task.join(timeout=0.65)\n\n print(\"check_login: Wait on password response\")\n while cl_response_queue.empty() is True:\n pass\n\n print(\"check_login: Got password response\")\n user_response = cl_response_queue.get()\n print(user_response)\n\n # Make sure there is some sort of response before\n # parsing it.\n if len(user_response):\n db_username = user_response[0][1]\n db_password = user_response[0][2]\n print(\"DB Username = %s\" % (db_username))\n print(\"DB Password = %s\" % (db_password))\n return (db_username == username and db_password == password)\n else:\n # There was no response so no chance of logging in or parsing\n # the response\n return False", "title": "" }, { "docid": "1aeb5e0b01f3001fce3f588e0b959fee", "score": "0.68085146", "text": "def check_auth(username, password, access):\n u = users.get(username,None)\n if u:\n if u[0] == password and (access in u[1] or u[1][0]==\"all\"):\n return username and password\n else:\n return None\n else:\n return None", "title": "" }, { "docid": "b3f147f62833857e9aa2f894693a0466", "score": "0.67997265", "text": "def checkpass():\n username = request.args.get(\"username\")\n password = request.args.get(\"password\")\n\n if not username:\n return jsonify(False)\n\n # Ensure password was submitted\n elif not password:\n return jsonify(False)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username=:username\", username=username)\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], password):\n return jsonify(False)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return jsonify(True)", "title": "" }, { "docid": "7f5c589a7ee36d73d3a1cffcb5535db8", "score": "0.67922425", "text": "def check(user, password, log=True):\n\t\tUser.init()\n\n\t\tuser = user.lower()\n\n\t\tif User.instance.user == b\"\":\n\t\t\treturn True\n\t\telif user == User.instance.user:\n\t\t\tif encryption.gethash(password) == User.instance.password:\n\t\t\t\tif log == True:\n\t\t\t\t\tUser.loginState[0] = True\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tif log == True:\n\t\t\t\t\tUser.loginState[0] = False\n\t\t\t\t\tuseful.syslog(\"Login failed, wrong password for user '%s'\"%useful.tostrings(user))\n\t\telse:\n\t\t\tif user != b\"\":\n\t\t\t\tif log == True:\n\t\t\t\t\tUser.loginState[0] = False\n\t\t\t\t\tuseful.syslog(\"Login failed, unkwnon user '%s'\"%useful.tostrings(user))\n\t\treturn False", "title": "" }, { "docid": "4605d4ab39bfb3db7b671200a5ca6645", "score": "0.6778981", "text": "def check_credentials(server, username, password):\n\tbasicauth_header = urllib2.base64.b64encode('%s:%s' % (username, password))\n\turl = 'https://%s/certsrv/' % server\n\treq = urllib2.Request(url)\n\treq.add_header(\"Authorization\", \"Basic %s\" % basicauth_header)\n\ttry:\n\t\turllib2.urlopen(req)\n\texcept urllib2.HTTPError as error:\n\t\tif error.code == 401:\n\t\t\treturn False\n\t\telse:\n\t\t\traise\n\telse:\n\t\treturn True", "title": "" }, { "docid": "569ccdb39615e6e295144f7cd45cbf69", "score": "0.6774533", "text": "def verify_password_or_access_token(username_or_token: str,\n password: str) -> bool:\n r = requests.get(\n f'{USER_SERVICE}/user-auth',\n json={\n 'username_or_token': username_or_token,\n 'password': password\n }\n )\n if r.status_code == 401:\n return False\n # Save the found username for the current request processing\n g.username = r.json()['data']\n return True", "title": "" }, { "docid": "6407f0a51f10128a1712af7f66b997e9", "score": "0.67520094", "text": "def is_valid_password(username, password):\n return bool(password and username.lower() not in password.lower() and\n len(password) <= 72)", "title": "" }, { "docid": "4fb7a809428078c942ec599fb30d9cac", "score": "0.67436266", "text": "def check_credentials(self, **kwargs):\n if not (self.cfg(\"username\") and self.cfg(\"password\") and self.cfg(\"prefix\")):\n warnings.warn(\n f\"The {self.__class__.__name__} is misconfigured. Please \"\n f\"set {self.cfgkey('username')}, {self.cfgkey('password')}\"\n f\" and {self.cfgkey('prefix')} in your configuration.\",\n UserWarning,\n )", "title": "" }, { "docid": "f7123dd6a9933176db5a7a43e4319591", "score": "0.6729454", "text": "def check_login_credentials():\n\n # Look at the stdout stream to see the values coming in from the login form\n print('request.form values from login_form.html')\n for x in request.form:\n print(' {}: {}'.format(x, request.form[x]))\n\n\n credentials_ok = True # Any failed test reverses value to False\n\n # Check that data entry fields exist and have some value in them\n\n # if '/check_credentials' route invoked without using login_prompt page\n # then the request.form dictionary will not have expected entries\n if 'user_id' not in request.form:\n credentials_ok = False\n\n elif 'password' not in request.form:\n credentials_ok = False\n\n # if one of the request.form login entry fields is left empty\n elif not request.form['user_id']:\n credentials_ok = False\n\n elif not request.form['password']:\n credentials_ok = False\n\n # At this point, take the login credentials to do a database check\n # 1. retrieve a user from the database with user_id as the key\n # 2. encrypt the password\n # 3. check that the encrypted password is correct for the user\n\n elif not user_in_database():\n credentials_ok = False\n\n elif not user_password_match():\n credentials_ok = False\n\n if credentials_ok:\n return redirect(url_for('first_action'))\n\n flash('Username-Password combination not valid.')\n return redirect(url_for('present_login_form'))", "title": "" }, { "docid": "f54350ddf26e630bc18a08a5af4bde64", "score": "0.67226404", "text": "def validate_credentials(username, password):\n user = db_conn.query_db('SELECT * FROM `xeno`.`users` WHERE `userid`=%s', [username], True)\n\n #print \"Validating: \", username, \":\", password\n if user is None: # not a valid username\n return None\n\n stored_salted_password = user[\"hpass\"]\n end_salt_pos = stored_salted_password.find('==') + 2\n salt = stored_salted_password[0:end_salt_pos]\n # stored_password = stored_salted_password[end_salt_pos:] # this isnt needed I dont think...\n\n if stored_salted_password == encode_password(password, salt):\n userid = user[\"userid\"]\n return User(userid, user)\n ''' This is a simple test.\n if username == \"Xeno\" and password == \"cars\":\n userid = \"testing\"\n return User(userid)\n '''\n return None", "title": "" }, { "docid": "6d6fa7a48c6d3544fc6437c88815340a", "score": "0.67139715", "text": "def check_basic_auth(user, passwd):\n\n auth = request.authorization\n return auth and auth.username == user and auth.password == passwd", "title": "" }, { "docid": "4f4712fffe921b6058d3168ffbf237bb", "score": "0.67130435", "text": "def password_check(username):\n password = input(\"Password: \")\n check_status = check_in_file(username, password, mode='password')\n return check_status", "title": "" }, { "docid": "6ffbe9824ca8f5d99011a176cea79b17", "score": "0.6659585", "text": "def has_password(self, password):\n return password==self.user_name", "title": "" } ]
ddabbd7b41297deab1ffbb5f91311cdb
Destroy all the stores.
[ { "docid": "f9ebd72c0ef3741a5c880058e8813d2e", "score": "0.81792104", "text": "def destroy_stores(self):\n shutil.rmtree(os.path.join(DATA_STORE, self.name))", "title": "" } ]
[ { "docid": "9f429cbeb9566b1fe28e72709e16eb8b", "score": "0.7163623", "text": "def cleanupStore(self):\n if self.store:\n self.store.cleanup()", "title": "" }, { "docid": "ec3f31f5e70bd594f86b9c285dbaf905", "score": "0.64436644", "text": "def clean(self):\n logger.debug('Deleting all runtimes')\n\n runtimes = self.list_runtimes()\n\n for runtime in runtimes:\n runtime_name, runtime_memory = runtime\n self.delete_runtime(runtime_name, runtime_memory)\n\n layers = self._list_layers()\n for layer_name, _ in layers:\n self._delete_layer(layer_name)", "title": "" }, { "docid": "e21915ec1b9a39cc746035ba84b3725f", "score": "0.6295653", "text": "def clear(self):\n self.delete_objects()\n self.del_hooks()\n self.broker.reset()\n self.emane.reset()", "title": "" }, { "docid": "d547b880caad6e370c62b5bd174772fa", "score": "0.6247881", "text": "def destroy(self):\n for container in self._containers.copy():\n container.destroy()\n\n assert len(self._containers) == 0", "title": "" }, { "docid": "c01beac2a818ab23b4be8ee58870420c", "score": "0.62467194", "text": "def delete_all(self):\n pass", "title": "" }, { "docid": "7259762281128ac5d618abd7857b24a7", "score": "0.62402064", "text": "def deleteAll(self):\n return prod_collection.delete_many({})", "title": "" }, { "docid": "7f7d0267d4f9144f253fb2901bbb22f5", "score": "0.61734307", "text": "def clear(self) -> None:\n\n if self.pipeline._transformer:\n for idx in self._store:\n self.pipeline(idx).unpersist(blocking=True)\n\n self._store.clear()", "title": "" }, { "docid": "b174da2c9ef863f3a564091664f50219", "score": "0.61593086", "text": "def clear_all_caches(self):\n\n for key in self._registry:\n self.clear_cache(key)", "title": "" }, { "docid": "6c382e30a56ff26aecba4a1aec40478e", "score": "0.6154785", "text": "def destroy_all(self):\r\n\r\n [widget.destroy() for widget in self.widgets]", "title": "" }, { "docid": "241375b422701040bf7b619346cd64a5", "score": "0.6136179", "text": "def remove_all_tools(self):\n self.db_manager.remove_all_tools()", "title": "" }, { "docid": "f15f55d191469c3e863bc5b240868c05", "score": "0.6100213", "text": "def remove_all_tools(self):\n logger = Logger().get()\n logger.debug(\"remove all tools\")\n if self.db_name != \"test\":\n logger.error(\"Cannot remove all tools unless current db\"\n \" is \\\"test\\\"!\")\n return\n\n try:\n # Get ids of all jobs\n delete_result = self.__tools_collection.delete_many({})\n\n # Set the flag of initialization to False\n metadata = self.__metadata_collection.find()[0]\n metadata['has_initialized_pharos'] = False\n\n condition = {\"_id\": metadata['_id']}\n self.__metadata_collection.update_one(condition, {\"$set\": metadata})\n\n logger.debug(f\"Remove {delete_result.deleted_count} tools.\")\n except Exception as e:\n logger.error(f\"something wrong in remove_all_tools, Exception: {e}\")", "title": "" }, { "docid": "b97ecf50d9921e0b02f175d70ef30e07", "score": "0.6092327", "text": "def remove_all(cls):\n for document in cls.database:\n document.delete()", "title": "" }, { "docid": "1278c3ee54bd2f566fdbe1156fe16cec", "score": "0.6078893", "text": "def purge_all(self):\r\n raise NotImplementedError(\"Sub-classes of Manager should implement\"\r\n \" .purge_all()\")", "title": "" }, { "docid": "062869e11510e52d83aeae05c73b27cc", "score": "0.6078348", "text": "def clean(self):\n for loc in self.locations.values():\n shutil.rmtree(loc)", "title": "" }, { "docid": "8b200d11687334a84c3ab3439eaa58b3", "score": "0.6061952", "text": "def _purge_all(self):\n for key in self.keys():\n self.delete(key)", "title": "" }, { "docid": "b8f4f2e8247a69ccd5f4c601d2e7e48c", "score": "0.6052412", "text": "def empty_all_models():\n # Iterate through LetterGameInsertions the models and call the empty model method to clean them out\n for model in DeleteData.models:\n DeleteData.empty_model(model)", "title": "" }, { "docid": "b92203da3e6e21e5d0747190e695bbe0", "score": "0.60521567", "text": "def cleanUp(self):\n for machine in self._machines.copy():\n self.destroyMachine(machine)\n\n assert len(self._machines) == 0", "title": "" }, { "docid": "896b9f3bae108884076d7ae2ded820af", "score": "0.60494834", "text": "def cleanall(self):\n return self._manager.cleanall()", "title": "" }, { "docid": "9439d46bc2d7f519a0d27e468fd84050", "score": "0.6023564", "text": "def unsubscribe_all(self):\n for sub in self.session:\n sub.destroy()", "title": "" }, { "docid": "fadab4e114a45544c7c6b47e9f7c447e", "score": "0.6018372", "text": "def clean_up_service(self):\n for service in self.service_list:\n self.catalog_api.delete_service(service['id'])", "title": "" }, { "docid": "329a8fbe9867a8778c6041e6341182dd", "score": "0.6014507", "text": "def clear(self) -> None:\n self._store.clear()", "title": "" }, { "docid": "8c9f324751da5edf50b0939caa1cbb02", "score": "0.6001814", "text": "def clear_all(self):\n keys = self._analytics_backend.keys()\n\n for key in itertools.chain(*keys):\n with self._analytics_backend.map() as conn:\n if key.startswith(self._prefix):\n conn.delete(key)", "title": "" }, { "docid": "3f33f40c07048d4dc7dac26ab2c3494b", "score": "0.5997771", "text": "def purge_all(delete=False, verbosity=0):\n _location_purge_all(delete, verbosity)\n _storage_purge_all(delete, verbosity)", "title": "" }, { "docid": "fef5742de59ff9ebc5e5db991688ff8b", "score": "0.5980893", "text": "def _purge_all(self):\r\n for key in self.keys():\r\n self.delete(key)", "title": "" }, { "docid": "565b08b6ed9cd1cb6c1f9013596427bc", "score": "0.5976179", "text": "def clear_db(self):\n People.objects.all().delete()\n Company.objects.all().delete()\n Tag.objects.all().delete()\n Food.objects.all().delete()\n self.stdout.write(self.style.SUCCESS('Successfully cleared all data'))", "title": "" }, { "docid": "1afcd44503928444c2d6b7d94a931773", "score": "0.5959921", "text": "def delete_all(self):\n query = self.db.session.query(models.Work)\n for work_obj in query:\n self.db.session.delete(work_obj)\n self.db.session.commit()", "title": "" }, { "docid": "dd541506e1e68ae11f83e050721cb6f2", "score": "0.5946315", "text": "def destroy_tables(self):\n tables = self.table_names\n for table in tables:\n self.drop(table)\n self.save()\n self.close()", "title": "" }, { "docid": "7d73a274add37f74b4e5fd8c9c824a2f", "score": "0.5940249", "text": "def creasesClearAll(self):\n \n pass", "title": "" }, { "docid": "f695fd9477073ccfeb5f8baee49c4b6c", "score": "0.59389484", "text": "def delete_everything(self):\n self.instance.delete_everything()", "title": "" }, { "docid": "507e4a63e6378a8d59c5985e66e0a0bc", "score": "0.59107876", "text": "def clear(self, verbose=False):\n with session_scope() as session:\n numSym = session.query(Synonym).delete()\n numPosts = session.query(Post).delete()\n numRels = session.query(SynonymPostAssociation).delete()\n session.commit()\n if verbose:\n print(\n f'''Successfully deleted all data from database: \n {numSym} synonyms, \n {numPosts} posts, \n and {numRels} synonym<->post relations.''')", "title": "" }, { "docid": "f1eff50e99f0b114297be0e5db55faa1", "score": "0.59047455", "text": "def cleanup():\n remove_all_objects()\n remove_orphan_data()", "title": "" }, { "docid": "b06dc30f7b212f3e1e2fc5455bf75380", "score": "0.58954585", "text": "async def reset_all():\n return State().delete().execute()", "title": "" }, { "docid": "a2c5041c401f98e22ab39a1a4a26d325", "score": "0.58738637", "text": "def clear(self):\n\t\tself.store = {}\n\t\tif not self.lazysave: self.save()", "title": "" }, { "docid": "15d8a1ec2505759dbd6949f70096a1a1", "score": "0.5869648", "text": "def clearall(self):\n for res in self._getResNames():\n self.clear(res)", "title": "" }, { "docid": "43e73da3a2b6e12eb142862ccd5da63f", "score": "0.5857684", "text": "def destroy_all(self, destroy_canvas: bool = False) -> None:\n\n if destroy_canvas:\n self.commands.append({\"$type\": \"destroy_ui_canvas\",\n \"canvas_id\": self._canvas_id})\n else:\n for ui_id in self._ui_ids:\n self.commands.append({\"$type\": \"destroy_ui_element\",\n \"id\": ui_id,\n \"canvas_id\": self._canvas_id})\n self._ui_ids.clear()", "title": "" }, { "docid": "a4a2b8e90251c955f08c0537bcf3709d", "score": "0.58495027", "text": "def cleanup(self):\n helpers.delete_objs_parallel(pod.get_all_pods(namespace=self.namespace))\n helpers.delete_objs_parallel(self.all_pvc_obj)\n self.rbd_sc_obj.delete()\n self.cephfs_sc_obj.delete()", "title": "" }, { "docid": "cd01117bdd068e1fd4c5c8112c2a836e", "score": "0.58392483", "text": "def delete_all(self):\n allC=[]\n self.__storeToFile(allC)", "title": "" }, { "docid": "740cecd9460007af81d4b4f2aff5daeb", "score": "0.5839114", "text": "def _purge_all(self):\r\n raise NotImplementedError(\"Sub-classes of Manager should implement\"\r\n \" ._purge_all()\")", "title": "" }, { "docid": "5a624af741000fd66e10a3b861171b81", "score": "0.58384585", "text": "def clear(self):\r\n logger.debug('Clearing all models from memory.')\r\n self.java.clear()", "title": "" }, { "docid": "7ed3ffff4232c717f930460d161ab710", "score": "0.5836243", "text": "def reset_dbs(self) -> None:\n self.mongo().clear_db()\n self.redis().clear_db()", "title": "" }, { "docid": "5b3939ee6d50ca3f4a06a579735ecf80", "score": "0.58268785", "text": "def delete_all_sessions():\n AccountSession.delete().execute()", "title": "" }, { "docid": "e3c2bd1f6de5c895d1d729d105ec3fdb", "score": "0.5819684", "text": "def delete_all(self):\n logger.info('Flushing database...')\n logger.debug('Call Successful: %s', 'delete_all: delete_all call successful', extra=d)\n logger.debug('Flushall Attempt: %s', 'delete_all: attempting to flush all items from queue', extra=d)\n self.r.flushall()\n logger.info('Database emptied')", "title": "" }, { "docid": "855bbfdecf264f065476d0c732d28eb4", "score": "0.581262", "text": "def _clear(self):\n self.stdout.write(\"Clearing data\")\n\n get_user_model().objects.all().delete()\n Manager.objects.all().delete()\n Member.objects.all().delete()\n Shift.objects.all().delete()\n Priority.objects.all().delete()\n Status.objects.all().delete()\n Group.objects.all().delete()", "title": "" }, { "docid": "b607f351896ac1709f52af8b65419b2d", "score": "0.57891786", "text": "def fusion_srm_ilo_api_delete_all_sso_certs(self):\n self.ilo_client.delete_all_sso_certs()", "title": "" }, { "docid": "791db763472eeb7964fe9e2d8d607817", "score": "0.5788232", "text": "def deregister_all_nodes(self):\n\n self.nodes = set()\n storage.node.remove_all()", "title": "" }, { "docid": "37efc2934bf38e64ca442dccd4de76f8", "score": "0.5777878", "text": "def delete_all(self):\n self._exec_delete_all(self.table)", "title": "" }, { "docid": "76ce97885fae296943b806281bec402e", "score": "0.57773423", "text": "def unload(self):\n for component in self.components.values():\n component.destroy()\n\n self.on_unloaded()", "title": "" }, { "docid": "933b6b4c38ed104c15c1619907da5450", "score": "0.57717335", "text": "def del_all_saved(self):\n self.saved = dict()", "title": "" }, { "docid": "6bf98e4b8090950c35ca700291d60484", "score": "0.5754316", "text": "def clear_data():\n Student.objects.all().delete()\n Teacher.objects.all().delete()\n Homework.objects.all().delete()\n HomeworkResult.objects.all().delete()", "title": "" }, { "docid": "1fabc363496190d8e0edf4c6e5f81a81", "score": "0.5753621", "text": "def _purge_all(self):\r\n # Given the races around connection closing, the easiest thing to do\r\n # here is to create a new manager with the same config for cleanup\r\n # operations.\r\n new_manager = yield self.from_config(self._config)\r\n # If we're a submanager we might have a different key prefix.\r\n new_manager._key_prefix = self._key_prefix\r\n yield new_manager._do_purge()\r\n yield new_manager._close()", "title": "" }, { "docid": "a5b7ff978406cef2e04d95dab3fce72d", "score": "0.5737473", "text": "def _purge_all(self):\n # Given the races around connection closing, the easiest thing to do\n # here is to create a new manager with the same config for cleanup\n # operations.\n new_manager = yield self.from_config(self._config)\n # If we're a submanager we might have a different key prefix.\n new_manager._key_prefix = self._key_prefix\n yield new_manager._do_purge()\n yield new_manager._close()", "title": "" }, { "docid": "ba2e9432643075f9de8aa5d9613e0781", "score": "0.5735537", "text": "def clear_all(self):", "title": "" }, { "docid": "e14ef081f832e7e5db92e12740e4aae6", "score": "0.57301855", "text": "def delete(self):\n for vertex_list in self._vertex_lists:\n vertex_list.delete()\n self._vertex_lists.clear()\n\n for box in self._boxes:\n box.delete(self)\n self._boxes.clear()", "title": "" }, { "docid": "135a9aeee5eeb23d1a9c2590402e5b9d", "score": "0.57231396", "text": "def destroy_all_containers(self):\n self.log.info(\"Destroying %d containers\", len(self.container))\n errors = self.destroy_containers(self.container)\n if errors:\n self.log.error(\n \"Errors detected destroying %d containers: %d\",\n len(self.container), len(errors))\n for error in errors:\n self.log.error(\" %s\", error)\n self.container = []\n return len(errors) == 0", "title": "" }, { "docid": "44e224f8432087fc2fd00ebadf18bc24", "score": "0.5716785", "text": "def del_all_V1(self):\n\n self.del_all_file_V1()\n self.del_all_collection_V1()", "title": "" }, { "docid": "f7d134f1cedb4d19af724b4e5d541368", "score": "0.57161874", "text": "def clear_all_queues(self):\n return self.client.delete_queues()", "title": "" }, { "docid": "c41479e4b7145f079ea96a7926ca6ed2", "score": "0.56908864", "text": "def destroyAllPanels (self):\n\n panels = (self.comparePanel, self.colorPanel, self.findPanel, self.fontPanel, self.prefsPanel)\n\n for panel in panels:\n if panel:\n panel.top.destroy()", "title": "" }, { "docid": "a11f1b4fe1ab1e4ff784dc0d809b8ce8", "score": "0.56860757", "text": "def cleanup_manager(deco, self):\n yield self.manager.purge_all()\n yield self.manager.close_manager()", "title": "" }, { "docid": "d3484881918678f43d19e8ecb93050c2", "score": "0.56849766", "text": "def cleanup(self):\n for process in self.processes:\n process.terminate()", "title": "" }, { "docid": "f4f5361a1e0e3140495dd046c60d0886", "score": "0.56846565", "text": "def system_reset(self, system_id):\n self.db.delete_all_documents('projects')\n self.db.delete_all_documents('experiments')\n self.db.delete_all_documents('experimentsMetrics')\n self.delete_queue()\n self.delete_pool()", "title": "" }, { "docid": "19e9ca50f8ac44dd8ccb18210139324d", "score": "0.5680037", "text": "def delete_all_servers():\n nectar.delete_all_servers()", "title": "" }, { "docid": "918caddfe098989be6b465713eee58f2", "score": "0.5674994", "text": "def destroy():\n subprocess.check_call([settings.APP_NAME, 'main', 'destroy'])\n subprocess.check_call([settings.APP_NAME, 'addons', 'destroy'])\n subprocess.check_call([settings.APP_NAME, 'deis', 'destroy'])", "title": "" }, { "docid": "a43c7a24bde13ce662b3657e92bcf837", "score": "0.5667435", "text": "def cleardb():\n\n with app.app_context():\n db.drop_all()\n print 'Database cleared'", "title": "" }, { "docid": "f5c3d66d29c9a20864fbe38741719f8e", "score": "0.56654847", "text": "def clear(self):\n self.results.delete_many({})\n self.complementary.delete_many({})\n self.space.delete_many({})", "title": "" }, { "docid": "8406a56b356c8dbdb3fe7072f3e3de1b", "score": "0.56584585", "text": "def teardown(self):\n with app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "8954bc92eea49cebcb7c93db2a7110c2", "score": "0.56506383", "text": "def delete_all():\n try:\n storm_.delete_all_entries()\n print get_formatted_message('all entries deleted.', 'success')\n except Exception as error:\n print get_formatted_message(str(error), 'error')", "title": "" }, { "docid": "0592ddeda8a0278b3d60907feee51dfe", "score": "0.56480664", "text": "def deleteAllProcessesAndSamples(self):\n # TODO: Experiment.deleteAllProcessesAndSamples()\n pass", "title": "" }, { "docid": "ea0851ddc7c595b1a6053e458d51e1cc", "score": "0.56466013", "text": "def close(self):\n self._store.close()", "title": "" }, { "docid": "0ebc3a6fcdb7d14e0b9fc32c89351af7", "score": "0.56297535", "text": "def tearDown(self):\n logger.info('tearDown: empty the table')\n for customer in db.Customer:\n customer.delete_instance()", "title": "" }, { "docid": "ed9670c07afb8a7f4da9cad8c49ad933", "score": "0.56294066", "text": "def teardown(self):\n\n self.shut_down_instance(self.instances)\n self.instances = []\n try:\n self.client.delete_internet_gateway(InternetGatewayId=self.internet_gateway)\n self.internet_gateway = None\n self.client.delete_route_table(RouteTableId=self.route_table)\n self.route_table = None\n for subnet in list(self.sn_ids):\n # Cast to list ensures that this is a copy\n # Which is important because it means that\n # the length of the list won't change during iteration\n self.client.delete_subnet(SubnetId=subnet)\n self.sn_ids.remove(subnet)\n self.client.delete_security_group(GroupId=self.sg_id)\n self.sg_id = None\n self.client.delete_vpc(VpcId=self.vpc_id)\n self.vpc_id = None\n except Exception as e:\n logger.error(\"{}\".format(e))\n raise e\n self.show_summary()\n os.remove(self.config['stateFilePath'])", "title": "" }, { "docid": "cce13fe0f389882242937ad9b0a794d0", "score": "0.56236607", "text": "def tear_down_all(self):\n self.dut.kill_all()", "title": "" }, { "docid": "97678985353310231e525e26a9f88d00", "score": "0.5622182", "text": "def clear_data():\r\n meta = db.metadata\r\n for table in reversed(meta.sorted_tables):\r\n db.session.execute(table.delete())\r\n db.session.commit()", "title": "" }, { "docid": "4e51b3b2d881c7000c0bbda7ba16ef90", "score": "0.5619535", "text": "def tear_down_all(self):\n pass", "title": "" }, { "docid": "4e51b3b2d881c7000c0bbda7ba16ef90", "score": "0.5619535", "text": "def tear_down_all(self):\n pass", "title": "" }, { "docid": "4e51b3b2d881c7000c0bbda7ba16ef90", "score": "0.5619535", "text": "def tear_down_all(self):\n pass", "title": "" }, { "docid": "4e51b3b2d881c7000c0bbda7ba16ef90", "score": "0.5619535", "text": "def tear_down_all(self):\n pass", "title": "" }, { "docid": "967b26fd661ca89c794c4a2ef851f0b9", "score": "0.5612156", "text": "def drop_all(self):\n Base.metadata.drop_all(self.engine)", "title": "" }, { "docid": "1bdd2f558aa3e6b2ef7127e27604f5d5", "score": "0.5599224", "text": "def delete_all_tables():\n Base.metadata.drop_all(bind=engine, tables=[Operations.__table__])", "title": "" }, { "docid": "1cfdc6f58ab3aab5bbc03c0056c487c2", "score": "0.55987054", "text": "def clear(self):\n for k in self.data_to_save:\n self.__dict__[k].clear()", "title": "" }, { "docid": "33285f4245fc2c07e310afc650d9bd49", "score": "0.55893", "text": "def delete_everything(self):\n for bucket in self.buckets.values():\n bucket.delete_everything()\n self.buckets = {}", "title": "" }, { "docid": "a2bf317b298e89d2539031faed941335", "score": "0.55866617", "text": "def clear_all_databases(settings, nuke=False):\n\n\n #if not raw_input(\"Are you sure you want to delete everything? Y/N: \") == 'Y':\n # sys.exit(\"Not deleting, getting out of here.\")\n\n if nuke:\n settings = open_settings()\n Session, engine = load_connection(settings['connection_string'])\n\n session = Session()\n for table in reversed(Base.metadata.sorted_tables):\n #if table.name == 'files':\n # #continue\n # pass\n try:\n print(\"Deleting {}\".format(table.name))\n session.execute(table.delete())\n session.commit()\n except:\n print(\"Cannot delete {}\".format(table.name))\n pass\n else:\n sys.exit('TO DELETE DB YOU NEED TO SET NUKE FLAG, EXITING')", "title": "" }, { "docid": "8aae91c61d61b2926dee790de95dce66", "score": "0.55815357", "text": "def close_all(cls):\n with cls._lock:\n for key, value in list(cls._instances.items()):\n value.__close_resources()\n cls._instances.clear()", "title": "" }, { "docid": "6863b84a7b5bc0abfbc2df6c5535216f", "score": "0.5580423", "text": "def reset_storage(self):\n for host in self._host_dict:\n self.reset_qubits_from_host(host)", "title": "" }, { "docid": "a648eaf943ef6137e9bab53caef47f07", "score": "0.5578176", "text": "def destroy_models(self):\n for node in self.conv_nodes_iterator():\n del node.model\n clear_session() # removing session", "title": "" }, { "docid": "a4539298c11453571f799f873920de25", "score": "0.5575184", "text": "def delete_all_songs(self):\n session = self._db_session()\n\n session.query(Song).delete()\n session.commit()\n\n session.close()", "title": "" }, { "docid": "2a64c4dc8fcc4e2bbae7ad590d18e757", "score": "0.55732703", "text": "def _restore_storages(self):\n\n for storage in self._objects.values():\n storage._restore()", "title": "" }, { "docid": "5b2b692e0d93ce0b47cdcdec69049f3d", "score": "0.5572601", "text": "def clear(self) -> None:\n self.subscribers_configurer.clear()\n if self._use_store_queues:\n self.store_configurer.clear()", "title": "" }, { "docid": "e5268f437e3cdac314a22af7d6105d17", "score": "0.5568259", "text": "def delete_all(self):\n self.images.clear()", "title": "" }, { "docid": "fd63bddeeb3ed74067c115e9e6c8e82a", "score": "0.5564514", "text": "def cleanup(self, force_stop=True):\n for worker in self.workers():\n worker.stop(force_stop=force_stop)\n\n try:\n self._subscriber.delete_subscription(self._subscription_path)\n print(\"Deleted subscription \" + self._subscription_path)\n except:\n pass\n try:\n self._publisher.delete_topic(self._topic_path)\n print(\"Deleted topic \" + self._topic_path)\n except:\n pass", "title": "" }, { "docid": "19eb06d8bfa97d56bed7113d626f5883", "score": "0.556271", "text": "def __del__(self):\n for ins in self.instruments.values():\n ins.close()\n self.resource_mgr.close()", "title": "" }, { "docid": "3784e1c8a9b8938bd9700977f1fb1c53", "score": "0.55528665", "text": "def _location_purge_all(delete=False, verbosity=0):\n if DataLocation.objects.exists():\n for location in DataLocation.objects.filter(Q(purged=False) | Q(data=None)):\n location_purge(location.id, delete, verbosity)\n else:\n logger.info(\"No data locations\")", "title": "" }, { "docid": "279f29f6f17e28216618ae60be6edba4", "score": "0.5544224", "text": "def cleanup():\n for dataset in Dataset.all():\n for link in Link.all_unmatched(dataset):\n db.session.delete(link)\n db.session.flush()\n db.session.commit()", "title": "" }, { "docid": "49afbfddd15b2481e4865752ac738c86", "score": "0.55387414", "text": "def cleanup_all(data_home=None):\n removed = 0\n for name, meta in DATASETS.items():\n _, ext = os.path.splitext(meta[\"url\"])\n removed += cleanup_dataset(name, data_home=data_home, ext=ext)\n\n print(\n \"Removed {} fixture objects from {}\".format(removed, get_data_home(data_home))\n )", "title": "" }, { "docid": "7696d6f488b865b5cd5c54501b0c4a93", "score": "0.55384505", "text": "def cleanup(self):\n os.rmdir(self.tleap_dir)\n os.rmdir(self.antechamber_dir)\n os.rmdir(self.parmchk_dir)", "title": "" }, { "docid": "84119d7346d766217715187f5a4124ed", "score": "0.55298626", "text": "def clear(self):\n self.__owned_workspaces.clear()", "title": "" }, { "docid": "2bbf6e83874f4bd04f583161a57d60a1", "score": "0.5524603", "text": "def clear_tables():\r\n\r\n # Delete all rows in table, so if we need to run this a second time,\r\n # we won't be trying to add duplicate users\r\n User.query.delete()\r\n Relational.query.delete()\r\n Trip.query.delete()\r\n Wishlist.query.delete()\r\n\r\n print('ALL TABLES HAVE BEEN CLEARED OF THEIR DATA')", "title": "" }, { "docid": "2c8aec0e02fed328e82218cd5434ec0a", "score": "0.55243576", "text": "def clear(self):\n for shared_generators in self._shared_generators:\n shared_generators.clear()\n self._shared_generators = []", "title": "" }, { "docid": "16f7b57cb64ca885516f00fad6f60b61", "score": "0.5522722", "text": "def delete_all(self):\n self.__projects.clear()", "title": "" }, { "docid": "e953a0c46f6939eb999709f07efc5e31", "score": "0.55177927", "text": "def delete_all_tracks(self):\n self._tracks = []\n self._eps = []\n # leave the funcs and labels", "title": "" }, { "docid": "f5c01705550d8300c3e06f7fe1b8e537", "score": "0.5516964", "text": "def drop_all(self):\n try:\n self.__lock.acquire_write()\n Logger.instance().debug(\"Dropping all tables...\")\n Base.metadata.drop_all(self.engine)\n finally:\n # Always release the lock\n self.__lock.release()", "title": "" } ]
c41095549bd4ed7e3560b4e62f9c5644
Toogle the feature /autosave
[ { "docid": "3d88796a7e042d014f8f188692779a26", "score": "0.6832002", "text": "def autosave(connection):\r\n if not connection.protocol.autosave:\r\n connection.protocol.autosave = True\r\n return \"Autosave enabled.\"\r\n else:\r\n connection.protocol.autosave = False\r\n return \"Autosave disabled.\"", "title": "" } ]
[ { "docid": "95232758d83058425927fc755b481bdc", "score": "0.7093163", "text": "def autosave_handler(self):\n self.autosave_timer_waiting = False\n if self.editor.document().isModified():\n self.autosave()", "title": "" }, { "docid": "cbae35afd4db48fd9c88b845cb79de50", "score": "0.65503746", "text": "def autosave(self):\n return self._autosave", "title": "" }, { "docid": "b5915126d5f89e130a74f4b0f299ebf9", "score": "0.62856436", "text": "def autosave(self):\n tabs = self.tabs\n self.save_by_uuid(\n tabs['uuid'],\n tabs['name'],\n tabs['text'],\n str(tabs.currentIndex()),\n tabs.get('path')\n )\n self.editor.document().setModified(False)\n self.sync_tab_indices()", "title": "" }, { "docid": "e0406e14fe1f75001799dbe6192797db", "score": "0.61678654", "text": "def MarkAsSaved(self):", "title": "" }, { "docid": "0800d85ed3881472f5671737e19bdc74", "score": "0.6018382", "text": "def autosave(self):\n\t\tself.log.debug(\"Session: autosaving\")\n\t\t# call saving through horizons.main and not directly through session, so that save errors are handled\n\t\tsuccess = self.save(SavegameManager.create_autosave_filename())\n\t\tif success:\n\t\t\tSavegameManager.delete_dispensable_savegames(autosaves = True)", "title": "" }, { "docid": "3abac37d4d9e1ae7ceadcdeb35b65826", "score": "0.5979588", "text": "def save(self):\n self.config.set(\"makeactive\", self.makeactive)", "title": "" }, { "docid": "3bbff150710f5ecc9d94259e5c7f1cc6", "score": "0.58593255", "text": "def is_saved(self, event=None):\r\n file_name = os.path.basename(self.pathfile)\r\n if file_name == \"\":\r\n file_name = \"untitled\"\r\n\r\n if self.content_to_save != self.text_sheet.get(\"1.0\", \"end-1c\"):\r\n self.saved = False\r\n self.master.title(\"%s : Mind Note*\" % file_name)\r\n else:\r\n self.saved = True\r\n self.master.title(\"%s : Mind Note\" % file_name)", "title": "" }, { "docid": "82734711fc5cb4e25c7bcb451de93dce", "score": "0.58260196", "text": "def save_timer(self):\n self.autosave_timer_waiting = True\n if self.autosave_timer.isActive():\n self.autosave_timer.stop()\n\n self.setup_save_timer()\n self.autosave_timer.timeout.connect(self.autosave_handler)\n if self.editor.document().isModified():\n self.autosave_timer.start()", "title": "" }, { "docid": "a6e3f5cba977acf6a25112fb6b0d64c8", "score": "0.5791793", "text": "def on_save_activate(self, *args):\n self.app.save_notelist()", "title": "" }, { "docid": "c0fecb3864e0bc42c8d47e51e3143ada", "score": "0.573265", "text": "def save(self):\n Preferences.setEditor(\n \"AutoCompletionEnabled\",\n self.acEnabledGroupBox.isChecked())\n Preferences.setEditor(\n \"AutoCompletionCaseSensitivity\",\n self.acCaseSensitivityCheckBox.isChecked())\n \n Preferences.setEditor(\n \"AutoCompletionReversedList\",\n self.acReversedCheckBox.isChecked())\n Preferences.setEditor(\n \"AutoCompletionReplaceWord\",\n self.acReplaceWordCheckBox.isChecked())\n Preferences.setEditor(\n \"AutoCompletionThreshold\",\n self.acThresholdSlider.value())\n Preferences.setEditor(\n \"AutoCompletionScintillaOnFail\",\n self.acScintillaCheckBox.isChecked())\n Preferences.setEditor(\n \"AutoCompletionTimeout\",\n self.acTimeoutSpinBox.value())\n Preferences.setEditor(\n \"AutoCompletionCacheEnabled\",\n self.acCacheGroup.isChecked())\n Preferences.setEditor(\n \"AutoCompletionCacheSize\",\n self.acCacheSizeSpinBox.value())\n Preferences.setEditor(\n \"AutoCompletionCacheTime\",\n self.acCacheTimeSpinBox.value())\n Preferences.setEditor(\n \"AutoCompletionWatchdogTime\",\n self.acWatchdogDoubleSpinBox.value() * 1000)\n Preferences.setEditor(\n \"AutoCompletionMaxLines\",\n self.acLinesSlider.value())\n Preferences.setEditor(\n \"AutoCompletionMaxChars\",\n self.acCharSlider.value())", "title": "" }, { "docid": "9a9d9a75b3a48f1e99191b5f12a15cd2", "score": "0.5720629", "text": "def toggle_autoPreview(self):\n if self.__auto_timer.isActive():\n self.__auto_timer.stop()\n else:\n self.__auto_timer.start(100)", "title": "" }, { "docid": "74cac84c171d63f8abac35a6746991e1", "score": "0.5643983", "text": "def onOpenSave(editor, path):\n\tproject = getProjectForFile(path)\n\tif not project:\n\t\treturn\n\n\teditor.project = project\n\tproject.applyOptions(editor)", "title": "" }, { "docid": "f51c936ff28dd29e08abe7ebef578e6b", "score": "0.5627555", "text": "def dispModeSave(self, mode):\n pass", "title": "" }, { "docid": "fdaf1df06cdc5cfb6d9fb52710cbb20d", "score": "0.56194866", "text": "def saveasbuttonclicked(self, _):\n fname = self.savedialog()\n if fname is None:\n return\n self.docpath = fname\n self.savefile()", "title": "" }, { "docid": "3e374662cbf2ff597a9b3e122da60002", "score": "0.55892575", "text": "def _stop_editing():\n global _EDITABLE\n _EDITABLE = False", "title": "" }, { "docid": "f18eda7f2fe070f967dbbf87741be501", "score": "0.55647784", "text": "def SaveEnabled(self) -> bool:", "title": "" }, { "docid": "2109e4d30360af962747f1051f0da90c", "score": "0.55417275", "text": "def savebuttonclicked(self, _):\n if self.docpath is not None:\n self.savefile()\n else:\n self.saveasbuttonclicked(None)", "title": "" }, { "docid": "437614a1f8895e34f0f79e536cd4e449", "score": "0.54992074", "text": "def Regenmode(self) -> bool:", "title": "" }, { "docid": "33133b70030a813613f0e57a3c03dbd4", "score": "0.54925835", "text": "def save(interactive=True, all=True):\n pass # implemented in Ada", "title": "" }, { "docid": "6ec407ebcea6cc9b7f444924d4467993", "score": "0.542806", "text": "def edit_pad(self):\n\t\tpath = join(self.save_dir, vim.current.line.split(\" @\")[0])\n\t\tvim.command(\"bd\")\n\t\tself.pad_open(path=path)", "title": "" }, { "docid": "a2ae09e1099d71351afe1a6fc2790b88", "score": "0.54264396", "text": "def _on_save_changed(self):\n\n if self.save_file.isChecked():\n self.path_widget.setEnabled(True)\n else:\n self.path_widget.setEnabled(False)", "title": "" }, { "docid": "efb18b6beeca98e3befb76d7149ffef9", "score": "0.5418351", "text": "def vi_editing_mode(event: E) -> None:\n event.app.editing_mode = EditingMode.VI", "title": "" }, { "docid": "c4916b6b137ded40794e3ab5b286792f", "score": "0.5415203", "text": "def select_short_view_mode_in_toolbar(self):\n self._file_actions_toolbar.select_short_view_mode_in_toolbar()", "title": "" }, { "docid": "df40c31e985fa963782fbf9bf79724d0", "score": "0.5406741", "text": "def save(replace=True):", "title": "" }, { "docid": "dc6f3318d8f5436cf760f0f9eb91e454", "score": "0.5398526", "text": "def issue_autoset(self):\n cmd = \"AUT\"\n self.write(cmd)", "title": "" }, { "docid": "baa8ddba24c1466c1e6c3b36b98339f3", "score": "0.537731", "text": "def setAutoFocus(self,b):\n self.autoFocus=b", "title": "" }, { "docid": "9a89ca8d832d8bdb64a49ff31661c717", "score": "0.5374753", "text": "def edit_mode(self, value):\n self.editMode = value\n if not self.editMode:\n # back to original state\n self.paths.active_path = None\n self.update()", "title": "" }, { "docid": "8512a30b8b97ae63a3ceb4960be43d6d", "score": "0.5371871", "text": "def on_save_as_activate(self, *args):\n result = self.save_as_dialog.run()\n self.save_as_dialog.hide()\n if result == gtk.RESPONSE_OK:\n self.app.save_notelist_as(self.save_as_dialog.get_filename())", "title": "" }, { "docid": "fcfbea8452784178f7f432d296d8fa41", "score": "0.5360709", "text": "def save(self):\n enabled = self.viewerGroupBox.isChecked()\n if enabled:\n Preferences.setDocuViewer(\n \"ShowInfoOnOpenParenthesis\",\n self.parenthesisCheckBox.isChecked())\n Preferences.setDocuViewer(\n \"Provider\",\n self.providerComboBox.itemData(\n self.providerComboBox.currentIndex())\n )\n else:\n Preferences.setDocuViewer(\"Provider\", \"disabled\")", "title": "" }, { "docid": "bf493c2eb041d75814193b6b6df74246", "score": "0.53548384", "text": "def save_as(self, fname, no_prompt = 0, echo_cmd=0):\n\n if echo_cmd: self.echo_command('save_as', fname, no_prompt)\n self.app.save_file(fname, no_prompt = no_prompt)", "title": "" }, { "docid": "0dba19e91e39556f3c0487a94a49c1c2", "score": "0.5343752", "text": "def save(self, interactive=True, file='Same file as edited by the buffer'):\n pass # implemented in Ada", "title": "" }, { "docid": "7259db93d2a13b62b027f1eae3307c45", "score": "0.5343587", "text": "def autosave_notelist(self):\n if self.dirname:\n self.save_notelist()", "title": "" }, { "docid": "a70a5f0e1661986cdc6f6a1ce9aea3fc", "score": "0.53324705", "text": "def saving(self):\n print(\"Saving....\")", "title": "" }, { "docid": "51b3348f80818a6b3cc89da3f8fedd9b", "score": "0.5318991", "text": "def save(self, event: typing.Optional[tk.Event] = None):\n self.withdraw()\n self.update_idletasks()\n self.apply()\n self.cancel()", "title": "" }, { "docid": "f3fc366de9b25c607da08d2b9ad10de6", "score": "0.53185445", "text": "def Activar(self):\n self.activa = True\n self.focus = True", "title": "" }, { "docid": "970851f3347e9b4ab616fd3cb34a87de", "score": "0.53126293", "text": "def on_press(event):\n if v.mode == Filename.MODE_OPEN:\n mode = wx.FD_OPEN\n elif v.mode == Filename.MODE_APPEND:\n mode = wx.FD_SAVE\n else:\n mode = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT\n dlg = wx.FileDialog(control, v.browse_msg, style=mode)\n if v.get_directory_fn is not None:\n dlg.SetDirectory(v.get_directory_fn())\n if v.exts is not None:\n dlg.SetWildcard(\"|\".join([\"|\".join(tuple(x)) for x in v.exts]))\n if dlg.ShowModal() == wx.ID_OK:\n if v.set_directory_fn is not None:\n v.set_directory_fn(dlg.GetDirectory())\n v.value = dlg.GetFilename()\n setting_edited_event = SettingEditedEvent(\n v, self.__module, v.value, event\n )\n self.notify(setting_edited_event)\n self.reset_view()", "title": "" }, { "docid": "d52767741d6d03ce1d1c9caa704502a2", "score": "0.53011686", "text": "def ToggleFeature(self, event, feature, val, IdRange):\n if feature == \"IndentSize\":\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetIndent(val)\n elif feature == \"IndetationGuides\":\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetIndentationGuides(val)\n elif feature == \"BackSpaceUnindent\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetBackSpaceUnIndents(val)\n elif feature == \"Whitespace\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetViewWhiteSpace(val)\n elif feature == \"UseTabs\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetUseTabs(val)\n elif feature == \"CarretWidth\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetCaretWidth(val)\n elif feature == \"IndentSize\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetTabWidth(val)\n elif feature == \"LineNumbers\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n if val == True:\n\n item.SetMarginWidth(1, 45)\n else:\n item.SetMarginWidth(1, 1)\n elif feature == \"FoldMarks\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n if val == True:\n item.SetMarginType(2, wx.stc.STC_MARGIN_SYMBOL)\n item.SetMarginMask(2, wx.stc.STC_MASK_FOLDERS)\n item.SetMarginSensitive(2, True)\n item.SetMarginWidth(2, 12)\n elif val == False:\n item.SetMarginWidth(2, 1)\n elif feature == \"SyntaxHighlight\":\n\n if val == False:\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.StyleClearAll()\n elif val == True:\n for id in IdRange:\n SyntCol.ActivateSyntaxHighLight(id)\n elif feature == \"StatusBar\":\n\n item = wx.FindWindowById(999)\n if val == True:\n item.Show(True)\n else:\n item.Hide()\n elif feature == \"TabWidth\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetTabWidth(val)\n elif feature == \"EdgeLine\":\n\n if val == False:\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetEdgeMode(wx.stc.STC_EDGE_NONE)\n else:\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetEdgeMode(wx.stc.STC_EDGE_LINE)\n elif feature == \"EdgeColumn\":\n\n for id in IdRange:\n item = wx.FindWindowById(id)\n item.SetEdgeColumn(val)\n elif feature == \"SourceBrowser\":\n\n counter = 0\n if val == True:\n for id in IdRange:\n item = wx.FindWindowById(2000 + id)\n nb = wx.FindWindowById(4003 + id)\n if self.GetOption(\"SourceBrowser\") and not self.GetOption(\"FileTree\"):\n item.parent.GetParent().GetParent().GetParent().SplitVertically(item.GetParent().GetParent().GetParent(),\n wx.FindWindowById(1001 + id))\n nb.AddPage(item.GetParent(), \"Source Browser\")\n counter += 1\n else:\n\n for id in IdRange:\n item = wx.FindWindowById(2000 + id)\n nb = wx.FindWindowById(4003 + id)\n if not self.GetOption(\"SourceBrowser\") and not self.GetOption(\"FileTree\"):\n item.parent.GetParent().GetParent().GetParent().Unsplit(item.GetParent().GetParent().GetParent())\n nb.RemovePage(self.GetTab(\"Source Browser\", nb))\n counter += 1\n elif feature == \"FileTree\":\n\n counter = 0\n if val == True:\n for id in IdRange:\n item = wx.FindWindowById(5000 + id)\n nb = wx.FindWindowById(4003 + id)\n if self.GetOption(\"FileTree\") and not self.GetOption(\"SourceBrowser\"):\n item.parent.GetParent().GetParent().GetParent().SplitVertically(item.GetParent().GetParent().GetParent(),\n wx.FindWindowById(1001 + id))\n nb.AddPage(item.GetParent(), \"File Browser\")\n counter += 1\n else:\n\n for id in IdRange:\n item = wx.FindWindowById(5000 + id)\n nb = wx.FindWindowById(4003 + id)\n if not self.GetOption(\"FileTree\") and not self.GetOption(\"SourceBrowser\"):\n item.parent.GetParent().GetParent().GetParent().Unsplit(item.GetParent().GetParent().GetParent())\n nb.RemovePage(self.GetTab(\"File Browser\", nb))\n counter += 1\n elif feature in [\"PythonShell\", \"BashShell\"]:\n\n item = wx.FindWindowById(4002)\n OSShell = wx.FindWindowById(4000)\n PyShell = wx.FindWindowById(4001)\n Nb_Panel = wx.FindWindowById(998)\n if not self.GetOption(\"PythonShell\"):\n try:\n PyShell.OnClose(0)\n item.RemovePage(self.GetTab(\"Python\", item))\n except:\n pass\n if not self.GetOption(\"BashShell\") and feature == \\\n \"BashShell\":\n\n OSShell.OnClose(0)\n item.RemovePage(self.GetTab(\"OS Shell\", item))\n\n if not self.GetOption(\"PythonShell\") and not self.GetOption(\"BashShell\"):\n\n item.GetParent().GetParent().Unsplit(item.GetParent())\n else:\n\n if self.GetOption(\"PythonShell\") and feature == \\\n \"PythonShell\":\n PyShell.OnRun(0, self.GetOption(\"PyPath\"))\n item.AddPage(PyShell.parent, \"Python\")\n PyShell.GetParent().Fit()\n\n if self.GetOption(\"BashShell\") and feature == \\\n \"BashShell\":\n OSShell.OnRun(0, self.GetOption(\"OSPath\"))\n item.AddPage(OSShell.parent, \"OS Shell\")\n OSShell.GetParent().Fit()\n\n item.GetParent().GetParent().SplitHorizontally(Nb_Panel,\n item.GetParent())\n item.GetParent().GetParent().Refresh()\n elif feature == \"EdgeLine\":\n\n if val:\n for id in IdRange:\n wx.FindWindowById(id).SetEdgeMode(wx.stc.STC_EDGE_LINE)\n else:\n for id in IdRange:\n wx.FindWindowById(id).SetEdgeMode(wx.stc.STC_EDGE_NONE)", "title": "" }, { "docid": "9c1be08c88266aa0dc40be10e0ee0f1a", "score": "0.529862", "text": "def DwgFileWasSavedByAutodeskSoftware(self) -> bool:", "title": "" }, { "docid": "ddddccf894fc574db421dc6c5f77dea9", "score": "0.5298492", "text": "def on_autoactivateCheckBox_clicked(self):\n self.autoactivateCheckBox.setChecked(self.__autoactivate)", "title": "" }, { "docid": "4488a811f44c9406370f619979beaba0", "score": "0.52885306", "text": "def editAdvPreferences(self, event):\n for widget in self.widgets:\n widget[0].Enable()", "title": "" }, { "docid": "26e46f4d0d715a2a41622187ca883b95", "score": "0.5284583", "text": "def commit_changes():\n save()\n activate()", "title": "" }, { "docid": "e869333c2c3e99dfacc33c5c38a3d8a4", "score": "0.52738714", "text": "def controlS(self):\n menubar.master.bind_all(\"<Control-s>\", self.save)", "title": "" }, { "docid": "4e224056d1c76d24290b0d99a7842738", "score": "0.52617615", "text": "def refresh_():\n save_script_()\n if not appdata.get('filename'):\n # save was cancelled by the user\n return\n exec_script()\n appdata.set('dirty', False)", "title": "" }, { "docid": "d305bc7309cc99192a32b3d450504924", "score": "0.52614605", "text": "async def cmd_forcesave(self, **_):\n self.config.save(vb=1)\n return \"Saved.\"", "title": "" }, { "docid": "6cbdaf3b2dc8de38429bc83687b68212", "score": "0.52523875", "text": "def define_autosave_path():\n global NUKE_DIR\n global AUTOSAVE_FILE\n AUTOSAVE_FILE = os.getenv(\n 'PYTHONEDITOR_AUTOSAVE_FILE'\n )\n if (AUTOSAVE_FILE is None\n or not parent_isdir(AUTOSAVE_FILE)\n ):\n if not os.path.isdir(NUKE_DIR):\n NUKE_DIR = os.path.expanduser('~')\n AUTOSAVE_FILE = os.path.join(\n NUKE_DIR,\n 'PythonEditorHistory.xml'\n )\n os.environ[\n 'PYTHONEDITOR_AUTOSAVE_FILE'\n ] = AUTOSAVE_FILE\n return AUTOSAVE_FILE", "title": "" }, { "docid": "2a72c0e92db527e8cf69af22e91c6965", "score": "0.524441", "text": "def save():\n if current_filename is not None:\n save_gui(current_filename)\n else:\n save_as()", "title": "" }, { "docid": "2cbed25b821481f066cb1b00259f8503", "score": "0.5240317", "text": "def click_view_mode(self):\n self._file_actions_toolbar.click_view_mode()", "title": "" }, { "docid": "f2056dc75ee8cfd20d92409a5bc9f1a5", "score": "0.52384794", "text": "async def _pane_switch_mode(self) -> None:\n await self._app.current_filepane.pane_switch_mode()", "title": "" }, { "docid": "4b84ff1baa2698418fdf5a94528f66d6", "score": "0.52357274", "text": "def on_toggle_autoupdate_check(self, event):\r\n conf.UpdateCheckAutomatic = event.IsChecked()\r\n conf.save()", "title": "" }, { "docid": "629571b89e34731a190163376d8c3993", "score": "0.52265376", "text": "def force_save(self) -> bool:\n return self.get(\"force-save\", bool_or_value) # type: ignore", "title": "" }, { "docid": "a9dd8a9a15da6e2f8c0ecfd1fb7a2ffd", "score": "0.5218732", "text": "def _setautoOpenMap(self, value):\n\t\tif self.boldedItem:\n\t\t\tself.tree.SetItemBold(self.boldedItem, False)\n\t\tif self.selectedItem:\n\t\t\tself.tree.SetItemBold(self.selectedItem, True)\n\t\t\tself.boldedItem = self.selectedItem\n\t\t\tisZip = True\n\t\t\td = self.tree.GetItemData(self.boldedItem).GetData()\n\t\t\tif d.archive:\n\t\t\t\tisZip = d.archive.getType() == 'Zip'\n\t\trorSettings().setSetting(TOOLKIT, \"autoopen\", value)\n\t\trorSettings().setSetting(TOOLKIT, \"autoopeniszip\", isZip)\n\t\t\t\n\t\tself._autoOpenMap = value", "title": "" }, { "docid": "3b0e01712c5e9a5083ccca989e265c30", "score": "0.52047604", "text": "def action_save_as(self):\n\n file_dialog = FileDialog(\n gettext(\"Save Gaphor Model As\"), action=\"save\", filename=self.filename\n )\n\n filename = file_dialog.selection\n\n file_dialog.destroy()\n\n if filename:\n self.save(filename)\n return True\n\n return False", "title": "" }, { "docid": "7d69bb14bf5c0cec81ed01744085003e", "score": "0.5185158", "text": "def select_long_view_mode_in_toolbar(self):\n self._file_actions_toolbar.select_long_view_mode_in_toolbar()", "title": "" }, { "docid": "a586fae8cfb540f8ed5f2b1bc5ddfc59", "score": "0.5184916", "text": "def use_edit_popup(brain):", "title": "" }, { "docid": "b429a2b44fe6c04040cfb100aa094c71", "score": "0.517517", "text": "def toggle_expert_takeover(self):\n self.current_track_vehicle._expert_takeover = not self.current_track_vehicle._expert_takeover", "title": "" }, { "docid": "a39c41d0cd540e711afe1d59daacfb7c", "score": "0.5170487", "text": "def _saveButtonClick(self):\n if len(self._view.languageCombo.currentText()) > 0:\n self._config.language = self._view.languageCombo.currentText()\n self._config.backend = self._view.backendCombo.currentText()\n self._config.gain = self._view.gainSlider.value()\n self._config.camera = self._view.cameraEdit.text()\n self._config.shortcut = self._view.shortcutEdit.text()\n\n self._config.interval = self._view.intervalEdit.value()\n if self._view.intervalLabel.text() == self.interval_modes[2]:\n self._config.interval *= 60\n\n self._config.fullscreen = 0\n if self._view.fullscreenCheck.isChecked():\n self._config.fullscreen = 1\n self._config.startup = 0\n if self._view.startupCheck.isChecked():\n self._config.startup = 1\n\n self.backend.configSave()\n self._config.save()\n\n self._view.close()", "title": "" }, { "docid": "a57d770395fa2f45c9655ec3b22e94fb", "score": "0.51634794", "text": "def commModeSave(self, mode):\n pass", "title": "" }, { "docid": "8c679a8a347ed2407a4f245b0483ba1b", "score": "0.51527876", "text": "def end_save(_session):\n if _session.save_enabled:\n _session.save_enabled = False\n _session.save_file_path = None\n _session.prompt = _session.prompt.replace('*', '')\n print('Save session ended.')\n else:\n print('A save session is not currently enabled.')", "title": "" }, { "docid": "69194b95c39e5bc5add650f96ea192c9", "score": "0.5142079", "text": "def setEnabled(enabled):\n\tonOpenSave.enabled = enabled\n\tonPreOpen.enabled = enabled", "title": "" }, { "docid": "e79070eca524f4a51bf5cdb8483614e4", "score": "0.5137087", "text": "def motion_is_now_voteable(self):\n self.motion1.voteable = True\n self.motion1.save()\n self.browser.refresh()", "title": "" }, { "docid": "2cae90958776e33e49d92dd13b4924fa", "score": "0.51355207", "text": "def emacs_editing_mode(event: E) -> None:\n event.app.editing_mode = EditingMode.EMACS", "title": "" }, { "docid": "18b01cfb2fc4befab240726f0c77e407", "score": "0.5134704", "text": "def Editable(self) -> bool:", "title": "" }, { "docid": "6b4d2f45bc0c551e475cdc706cc32203", "score": "0.51343983", "text": "def toggle_name_on(self):\n self.tab.addstr(1, 1, \"{}\".format(self.name), bg.re)\n self.toggle_name = True\n self.tab.refresh()", "title": "" }, { "docid": "48b293050798da00b19b6fb2ba5785b6", "score": "0.5133477", "text": "def create_autosave_file():\n # look for the autosave\n if os.path.isfile(AUTOSAVE_FILE):\n\n # if the autosave file is empty, write header\n # FIXME: can this be an os.stat/get file size?\n # Furthermore, what if it's not empty but has a\n # corrupted header? What are the methods for\n # data preservation?\n with open(AUTOSAVE_FILE, 'r') as f:\n is_empty = not bool(f.read().strip())\n if is_empty:\n create_empty_autosave()\n else:\n\n # if file not found, check if directory exists\n if not os.path.isdir(NUKE_DIR):\n # filehandle, filepath = tempfile.mkstemp()\n # FIXME: set os.environ['PYTHONEDITOR_AUTOSAVE_FILE'] and define_autosave_path()\n # msg = 'Directory %s does not exist, saving to %s' % (NUKE_DIR, filepath)\n msg = 'Directory {0} does not exist'.format(NUKE_DIR)\n raise CouldNotCreateAutosave(msg)\n else:\n create_empty_autosave()\n return True", "title": "" }, { "docid": "18b01cfb2fc4befab240726f0c77e407", "score": "0.5133266", "text": "def Editable(self) -> bool:", "title": "" }, { "docid": "bd5c722c17eb83adead026e8ee90d6d1", "score": "0.5128371", "text": "def save_as(self, action):\n self.chooser_dialog(title=\"Save Document\", callback=self.on_file_save, save=True)\n return False # TODO this signals that file saving was not successful\n # because no action should be taken until the chooser\n # dialog is finish, however the user might then need to\n # repeat the action, once the document was saved and the\n # edited flag was cleared", "title": "" }, { "docid": "68710ccfb8d74ab0d4c34d9ac3af59e0", "score": "0.5126705", "text": "def do_single_save(self):\n pass", "title": "" }, { "docid": "9c8699fdb4d6a3d32841d2e263f61018", "score": "0.51195246", "text": "def set_saved(self, filename):\n\n sel = self.notebook.GetSelection()\n if sel >= 0:\n self.tab_list[sel].filename = filename\n self.set_tab_modified(sel, False)", "title": "" }, { "docid": "541fe96dbbb1faa4928a27a50c8b4646", "score": "0.5114742", "text": "def EnableSelection(self, flag):", "title": "" }, { "docid": "3fd65c4b9b885d3b61383a492c0b7576", "score": "0.5112609", "text": "def save(self):\n self.editor_input.save(self.document)\n self.dirty = False", "title": "" }, { "docid": "9343f9a069333eb2ecd449afefb73752", "score": "0.51024485", "text": "def toggle(self):\n self.setting = not self.setting", "title": "" }, { "docid": "b9bbab25345a1a7770b00a4cc8c76b75", "score": "0.5091114", "text": "def reactivate(self):\n self.active = True\n self.save()", "title": "" }, { "docid": "3d41ddc0b6076bc589963551bf8ac6f8", "score": "0.5091112", "text": "def save_file(self, buff_name, full_path = None, no_prompt = 0,\n rename_buff = 1, ask_for_new_name = 0, perform_callback = 0):\n debug.virtual('GenEdit.save_file')", "title": "" }, { "docid": "9cbccf5f9fb7cd81288f4fc669002639", "score": "0.5086999", "text": "def toggle(self):\n pass", "title": "" }, { "docid": "6df5af509fb87d81407a170ce671d769", "score": "0.5083608", "text": "def save_activated(self) -> None:\n self.setReadOnly(True)\n try:\n with open(self.path, 'w') as file:\n file.write(self.toPlainText())\n except Exception as e:\n # Set text how it was before\n self.setText(self.text_before_edit)\n # Show the error message\n error_msg = QtWidgets.QMessageBox()\n error_msg.setText(f\"{e}\")\n error_msg.setWindowTitle(f'Error trying to save markdown edit.')\n error_msg.exec_()", "title": "" }, { "docid": "2fa9bf9971656d4a57bd2a8cb333aa3a", "score": "0.5083561", "text": "def _set_drag_status(self):\n self.viewer.status = trans._(\n 'Hold <Alt> key to open plugin selection. Hold <Shift> to open files as stack.'\n )", "title": "" }, { "docid": "092e68141cf48c5eb195c46a0d09510b", "score": "0.50785273", "text": "def _saveFile(self):\n if self._features:\n filename = filedialog.asksaveasfilename(initialdir=\"/\", title=\"Select file\",\n filetypes=(\n (\"animangle files\", \"*.animangle\"), (\"all files\", \"*.*\")))\n self._features.requestSave(open(filename, \"w\"))", "title": "" }, { "docid": "96b1de120d197377afea2ec4787e1d23", "score": "0.50766003", "text": "def automatic(self):\n return False", "title": "" }, { "docid": "1e6c32ef8915c1d6ef7c76fd0fdece12", "score": "0.5063986", "text": "def edit(self):\r\n\t\treturn False", "title": "" }, { "docid": "10c82d4826e52adecccd3c3037e40199", "score": "0.5058703", "text": "def on_back_ext_changed(self, event):\n\n self.m_back_ext_button.Enable(self.m_back_ext_textbox.GetValue() != self.backup_ext)", "title": "" }, { "docid": "2f5df0fb0676d790a511d97a4ef72ad9", "score": "0.50586194", "text": "def on_toggle_marks(self):\n self.use_marks = True if not self.use_marks else False", "title": "" }, { "docid": "ebcf643b83c90671dffc609a87383e0e", "score": "0.505293", "text": "def auto_save_threshold(self, auto_save_threshold):\n\n self._auto_save_threshold = auto_save_threshold", "title": "" }, { "docid": "a20e5aa93b0b00ef7dcc7fedb2e3a077", "score": "0.50375754", "text": "def create_empty_autosave():\n with open(AUTOSAVE_FILE, 'w') as f:\n f.write(XML_HEADER+'<script></script>')", "title": "" }, { "docid": "9501537625b02f68c8730f86dad58b37", "score": "0.50356936", "text": "def select_manual_option(self):\n if not self.verify_manual_capture_mode():\n self.select_auto_btn()", "title": "" }, { "docid": "364e29d6161e2e11121679eaa7cf6d6f", "score": "0.5033404", "text": "def undo_open():\n _gv.gv_undo_open()", "title": "" }, { "docid": "6205149f5570b529982533e5dc09d548", "score": "0.50273573", "text": "def filename_complete(env: MnoteEnvironment, on_off: click.Choice):\n style = env.config.styles\n env.config.filename_complete = on_off == \"on\"\n env.config.write()\n\n echo_line()\n if env.config.filename_complete:\n echo_line(\"Default '--complete' flag for filename \", style.success(\"ON\"))\n else:\n echo_line(\"Default '--complete' flag for filename \", style.fail(\"OFF\"))", "title": "" }, { "docid": "dfc5fe4478108a270e0c96f50b6393b3", "score": "0.5016182", "text": "def settrigger_mode(self, newval):\n raise NotImplementedError", "title": "" }, { "docid": "5e5febbc7f1ffb85587341edf718739a", "score": "0.5012927", "text": "def set_auto():\n global auto_set\n if not auto_set:\n SC.reg_param('state', state_callback)\n SC.reg_param('source', source_callback)\n V.reg_param('sub_frequencyA', vegas_sb_callback)\n auto_set = True", "title": "" }, { "docid": "a1ef8eaefaa11a5455f332702988a773", "score": "0.50129217", "text": "def display_edit_completed():\r\n print('Edit saved')", "title": "" }, { "docid": "7c380d78069f58423e9c10cbabed3341", "score": "0.5006341", "text": "def _update_active(self, b):\n super(DemoUniverse, self)._update_active(b)\n scns = self.active()\n if not scns: return\n flds = [scn.field for scn in scns]\n fks = [scn.field_kind for scn in scns]\n fmls = [scn.field_ml for scn in scns]\n folder = self._controls['field']\n fopts = folder['fopts'].options\n fld = flds[0]\n fk = fks[0]\n fml = fmls[0]\n if not len(set(flds)) == 1:\n for scn in scns: scn.field = fld\n if not len(set(fks)) == 1:\n for scn in scns: scn.field_kind = fk\n if not len(set(fmls)) == 1:\n for scn in scns: scn.field_ml = fml\n folder[fld].value = fk\n folder.activate(fld, enable=True)\n folder.deactivate(*[f for f in fopts if f != fld])\n if fld == 'SolidHarmonic':\n ofks = [str(i) for i in range(8) if str(i) != fk]\n folder.activate(fk, enable=True)\n folder.deactivate(*ofks)\n folder._set_gui()", "title": "" }, { "docid": "46b7c759110d2f9b36f7f10de20214b9", "score": "0.50020224", "text": "def settings_save():\n\n answer = messagebox.askyesno(title='Закінчити редагування',\n message='Зберегти зміни?')\n if answer:\n tariffs_changing(entry_gas.get(), entry_water.get(), entry_electricity.get())\n set_win.destroy()\n button_settings.configure(state=NORMAL)\n\n elif not answer:\n set_win.destroy()\n button_settings.configure(state=NORMAL)", "title": "" }, { "docid": "e4ce94137df85a404b256450d062e394", "score": "0.5000504", "text": "def onSave(self, event):\n current_settings = GeneralSettingsData()\n\n current_settings.logging = self.cb_logging.GetValue()\n current_settings.use_hotkeys = self.cb_usehotkeys.GetValue()\n current_settings.warn_large_img = self.cb_warn_large.GetValue()\n if self.tc_hk_next.GetLineText(0):\n current_settings.hk_binding_next = tuple(\n self.tc_hk_next.GetLineText(0).strip().split(\"+\")\n )\n else:\n current_settings.hk_binding_next = None\n if self.tc_hk_pause.GetLineText(0):\n current_settings.hk_binding_pause = tuple(\n self.tc_hk_pause.GetLineText(0).strip().split(\"+\")\n )\n else:\n current_settings.hk_binding_pause = None\n\n current_settings.set_command = self.tc_setcmd.GetLineText(0).strip()\n\n current_settings.save_settings()\n # after saving file apply in tray object\n self.parent_tray_obj.read_general_settings()", "title": "" }, { "docid": "5f1b4403b86ffe3d16978cdd9160982a", "score": "0.5000007", "text": "def LoseFocus(self):\n self.focus = False", "title": "" }, { "docid": "cd866744b68d324633244519a4e3d190", "score": "0.49994484", "text": "def ToggleFlag(self, flag):", "title": "" }, { "docid": "c1e837ab172655ead2b8766913fcf594", "score": "0.4991357", "text": "def set_active(self):\r\n FreeCAD.setActiveDocument(self.Name)", "title": "" }, { "docid": "6b2a3bf4184825eb2615dd4290795b47", "score": "0.49871665", "text": "def checkpoint_saved(self):\n self._last_auto_save_time = time()", "title": "" }, { "docid": "d557854ffef646e59f1beb356dbb9d96", "score": "0.49866563", "text": "def updateModified(self):\n #self.ui.action_Add_Image.setEnabled(self._model is not None)\n #self.ui.action_Add_Video.setEnabled(self._model is not None)\n # TODO also disable/enable other items\n #self.ui.actionSave.setEnabled(self.annotations.dirty())\n #self.setWindowModified(self.annotations.dirty())\n pass", "title": "" }, { "docid": "090c58613d16dc6c8d5a103bab52add3", "score": "0.4979805", "text": "def set_edit_mode(request, state):\n setattr(request, \"_fluent_contents_edit_mode\", bool(state))", "title": "" }, { "docid": "116c11d57b72030b521ce5a6ff51964d", "score": "0.49735904", "text": "def on_manual_button_clicked(self, obj):\n\n\t\t# Ensure we can go back...\n\t\tself.idle_add(self.objects[\"parent\"].back_button.set_sensitive, True)\n\t\t\n\t\t# Switch to page 3\n\t\tself.pages_notebook.set_current_page(3)\n\n\t\tself.manual_ready()", "title": "" }, { "docid": "aaead4533be8f6141d0fd27ebc75fd47", "score": "0.49669957", "text": "def CheckSave(self):\n self.mSave.Enable(self.data.HasChanged())", "title": "" } ]
edd8209b451c7f174cedf83f811e6ded
Builds appropriate phrase for app
[ { "docid": "2ee64393237424f2194cb6e240785cf0", "score": "0.0", "text": "def _is_it_taps_aff(self, main_weather_data: MainWeatherData) -> str:\n self.temp = round(main_weather_data.temp, 1)\n\n if \".0\" in \"{:.1f}\".format(self.temp):\n self.temp = int(self.temp)\n\n if self.temp >= self.taps_aff_threshold:\n return f'<speak><emphasis level=\"strong\">YAS.</emphasis> It\\'s taps aff in {self.place_name}. It\\'s pure roasting at {self.temp} degrees</speak>'\n else:\n return f'<speak><emphasis level=\"strong\">Gnaw mate.</emphasis> It\\'s taps own in {self.place_name}. It\\'s only {self.temp} degrees</speak>'", "title": "" } ]
[ { "docid": "5068d9b7315d7647fd277a2378690c05", "score": "0.6656243", "text": "def gen_phrase_string(self):\n return self.post_process(self.gen_phrase())", "title": "" }, { "docid": "f4c38d3360b740fb302558e517c732aa", "score": "0.6161804", "text": "def translate(self, phrase):\n return phrase", "title": "" }, { "docid": "12131e15781865b3f8aae86dc955474f", "score": "0.6065521", "text": "def sentence():\n optional = random.choice(['yes', 'no'])\n if optional == 'yes':\n return nounPhrase() + \" \" + verbPhrase()\n if optional == 'no':\n return nounPhrase() + \" \" + verbPhrase() + \" \" + conjunction() + \" \" + nounPhrase() + \" \" + verbPhrase()", "title": "" }, { "docid": "92d01f01cad42278c671a33dccce967e", "score": "0.5991369", "text": "def abbreviate(phrase):", "title": "" }, { "docid": "b7aac8488a97b986ffad1c0ba5a59803", "score": "0.5721289", "text": "def form_sentence(self):\n \n if self.subject and self.verb and self.object: \n return \" \".join([self.subject, self.verb, self.object])\n elif self.subject and self.verb:\n return \" \".join([self.subject, self.verb])\n else:\n return self.subject", "title": "" }, { "docid": "737fad7aca6d070bf5e2fb96136e12da", "score": "0.57015353", "text": "def app_name(): # -> str\n return \"Read Text\"", "title": "" }, { "docid": "7ea27e2548f2b654ddb41ca52f91b0fd", "score": "0.569428", "text": "def build_text(self, task):", "title": "" }, { "docid": "49ec5816112fe563d55af8c5b68b2a00", "score": "0.56447375", "text": "def _get_string_phrase(phrase, index_vocab):\n res = \"\"\n for vocab_id in phrase.split():\n if res == \"\":\n res += index_vocab[int(vocab_id)]\n else:\n res += \" \" + index_vocab[int(vocab_id)]\n return res", "title": "" }, { "docid": "a0bc044b9d1bdccbc4ef1019750322ae", "score": "0.5595425", "text": "def translate_to_pirate_talk(phrase):\n\n result =[]\n translation = {'sir': 'matey', 'hotel': 'fleabag', 'student': 'swabbie', 'man': 'matey', \n 'professor':'foul blaggart', 'restaurant':'galley','your':'yer','excuse':'arr',\n 'students':'swabbies','are':'be','restroom':'head','my':'me','is':'be'}\n \n \n phrase = phrase.split()\n \n for word in phrase:\n \n if word in translation.keys():\n result.append(translation[word])\n else:\n result.append(word)\n\n str1 = \" \"\n \n print(str1.join(result))\n\n return str1.join(result)", "title": "" }, { "docid": "97c8d70f93b4a7b03265408130d25b87", "score": "0.555625", "text": "def gen(self):# Need to imporve\n try:\n temp = self.text.strip().split()\n except:\n pass\n return self.speller.genSentence(temp)\n pass", "title": "" }, { "docid": "838e4bd436a05d8710118b694133a2ce", "score": "0.5522631", "text": "def build_text(word_pairs):\n\n building_text = []\n\n # Gets a random pair to start off with\n pair_choice = random.choice(list(word_pairs))\n building_text += pair_choice.split()\n\n # Proceeds to get value of pair\n pair_follower = random.choice(list(word_pairs[pair_choice]))\n building_text.append(pair_follower)\n\n # Continues getting new pairs\n if len(building_text) < 100:\n new_pair = ' '.join(building_text[-2:])\n while True:\n if new_pair in word_pairs:\n building_text.append(random.choice(list(word_pairs[new_pair])))\n new_pair = ' '.join(building_text[-2:])\n else:\n break\n print(' '.join(building_text))", "title": "" }, { "docid": "6011c6c25819bcd5c8be66ba46948e8e", "score": "0.55188257", "text": "def verbPhrase():\n with open('verbs.txt') as v:\n verb = csv.reader(v)\n verbs = random.choice(list(verb)) \n\n optional = random.choice(['yes', 'no'])\n if optional == 'yes':\n return random.choice(verbs) + \" \" + nounPhrase() \n if optional == 'no':\n return random.choice(verbs) + \" \" + nounPhrase() + \" \" + prepositionalPhrase()", "title": "" }, { "docid": "96b7c98548c0e6633abd4a2b15dc0c2b", "score": "0.54932594", "text": "def make_pattern_text(self):\n\n text = \"Do you like \"\n if self.is_positive():\n text += \"more \"\n else:\n text += \"less \"\n\n text += \" and \".join(map(lambda p: p.name.replace(\"_\", \" \"), self.get_targets()))\n text += \"?\"\n\n return text", "title": "" }, { "docid": "acb4a8104c3ab91e6e634c4bd56de4de", "score": "0.5474925", "text": "def _GenerateDescription(self, verb, message, fallback):\n if self.toolset != 'target':\n verb += '(%s)' % self.toolset\n if message:\n return '%s %s' % (verb, self._ExpandSpecial(message))\n else:\n return '%s %s: %s' % (verb, self.name, fallback)", "title": "" }, { "docid": "01f80d2f772aedb19e27879bdb1384c6", "score": "0.54285204", "text": "def hey(phrase):\n \n if phrase.isupper():\n return \"Whoa, chill out!\"\n if phrase[-1:] == \"?\":\n return \"Sure.\"\n if phrase == \"\" or phrase.isspace():\n return \"Fine. Be that way!\"\n return \"Whatever.\"", "title": "" }, { "docid": "80fa4b3433075436c652da878babf227", "score": "0.54098433", "text": "def english_command(tokens, concept, discourse):\n verb = tokens[0]\n line = ''\n i = 1\n for part in discourse.command_canonical[verb].split():\n if (part in ['ACCESSIBLE', 'ACTOR', 'DESCENDANT', 'NEARBY', \n 'NOT-DESCENDANT', 'WORN']):\n line += concept.item[tokens[i]].noun_phrase(discourse)\n i += 1\n elif part in ['RELATION']:\n line += tokens[i].lower()\n i += 1\n elif part in ['STRING']:\n line += tokens[i]\n else:\n line += part\n line += ' '\n return line[:-1]", "title": "" }, { "docid": "b0ac8287cbb96e2966dec4a3158b8f5c", "score": "0.53940797", "text": "def build_message(self):\n commentUser = (\n \"**SEARCH TERM: {term}**\\n\\n\"\n \"Total Occurrence: {totalOccur} \\n\\n\"\n \"Total Chapters: {totalChapter} \\n\\n\"\n \"{warning}\"\n \"{chapterpov}\"\n \"######&#009;\\n\\n####&#009;\\n\\n#####&#009;\\n\\n\"\n \"&#009;\\n\\n&#009;\\n\\n\"\n \">{message}\"\n \"{link}\"\n )\n warning = \"\"\n if self.title.name != 'All' and self.title.name != 'PQ' and self.title.name != 'DE':\n warning = (\"**ONLY** for **{book}** and under due to the spoiler tag in the title.\\n\\n\").format(\n book = self.title.name,\n )\n if self._rowCount > MAX_ROWS:\n warning += (\"Excess number of chapters. Sorted by highest to lowest, top 30 results only.\\n\\n\")\n # Avoids spam and builds table heading only when condition is met\n if self._total > 0:\n self._message += (\n \"| Series| Book| Chapter| Chapter Name| Chapter POV| Occurrence| Quote^(First Occurrence Only)\\n\"\n )\n self._message += \"|:{dash}|:{dash}|:{dash}|:{dash}|:{dash}|:{dash}|:{dash}|\\n\".format(dash='-' * 11)\n # Each element added as a new row with new line\n for row in self._listOccurrence:\n self._message += row + \"\\n\"\n elif self._total == 0:\n self._message = \"**Sorry no results.**\\n\\n\"\n \n self._commentUser = commentUser.format(\n warning = warning,\n chapterpov = self._chapterPovMessage,\n term = self._searchTerm,\n totalOccur = self._total,\n message = self._message,\n link = self._links,\n totalChapter = self._rowCount\n )", "title": "" }, { "docid": "42b7d15a058330437e7100e3b7e752f5", "score": "0.5376561", "text": "def get_text(self):\n if self.them_text:\n return self.suggestion.text.replace('{{them}}', self.them_text)\n return self.suggestion.get_text()", "title": "" }, { "docid": "2d739e51b96404bfd38c0a62a56c9d43", "score": "0.5366304", "text": "def random_noun_phrase():\n noun_phrase = {}\n # necessary choices\n random_noun = Noun.objects.order_by('?')[0]\n random_adj = Adjective.objects.order_by('?')[0]\n # optional choices\n random_pos = random.choice([Possessive.objects.order_by('?')[0], None])\n random_dem = random.choice([get_chart_prefix('d', random_noun.noun_class), None])\n noun_phrase['noun'] = random_noun\n noun_phrase['adjs'] = [random_adj]\n if random_pos:\n noun_phrase['pos'] = random_pos\n if random_dem:\n noun_phrase['dem'] = random_dem\n return fix_noun_phrase(noun_phrase)", "title": "" }, { "docid": "57b15480bb23a8754ba80c29f06ed5b6", "score": "0.5362081", "text": "def hey(phrase):\n\t\n\tphrase = phrase.strip()\n\n\tif not phrase:\t\t\t\t\t# Checks to see if phrase is blank.\n\t\treturn 'Fine. Be that way!'\n\telif phrase.isupper():\t\t\t# Checks to see if phrase is a yell.\n\t\treturn 'Whoa, chill out!'\n\telif phrase.endswith('?'):\t\t# Checks to see if phrase is a question.\n\t\treturn 'Sure.'\n\telse:\n\t\treturn 'Whatever.'", "title": "" }, { "docid": "5ceec260a88b1c9f08d48777b0168bc3", "score": "0.5348122", "text": "def synthesize(self, phrase, language=\"en_US\"):\n pass", "title": "" }, { "docid": "ee50593ef54f59b2a2e1b6cbdd3e8533", "score": "0.5336454", "text": "def hey(phrase: str) -> str:\n phrase = phrase.strip()\n if not phrase: # if you address him without actually saying anything\n return \"Fine. Be that way!\"\n if phrase.isupper(): # if you yell at him\n if phrase.endswith('?'): # if you yell a question at him\n return \"Calm down, I know what I'm doing!\"\n return \"Whoa, chill out!\"\n if phrase.endswith('?'): # if you ask him a question\n return \"Sure.\"\n return \"Whatever.\" # to anything else", "title": "" }, { "docid": "b71476b9e094863bb542e95975b59d04", "score": "0.5329836", "text": "def translate(line):\n\n line = line.lower() # convert enlgish sentence to lowercase\n englishWords = line.split() # split line into words for processing, write to global variable\n maoriWords = [] # create list for maori sentence\n\n # check for valid sentences size before popping\n if len(englishWords) >= 2 and len(englishWords) <= 5:\n currentWord = englishWords.pop(0)\n\n # check for simple pronouns in main function\n if currentWord == \"i\":\n maoriWords.append(\"au\")\n currentWord = englishWords.pop(0)\n\n elif currentWord == \"he\" or currentWord == \"she\":\n maoriWords.append(\"ia\")\n currentWord = englishWords.pop(0)\n\n # check for more complex pronouns with methods\n\n elif currentWord == \"you\":\n currentWord, englishWords, maoriPronoun = getYouPronoun(currentWord, englishWords)\n maoriWords.append(maoriPronoun)\n\n elif currentWord == \"they\":\n currentWord, englishWords, maoriPronoun = getTheyPronoun(currentWord, englishWords)\n maoriWords.append(maoriPronoun)\n\n elif currentWord == \"we\":\n currentWord, englishWords, maoriPronoun = getWePronoun(currentWord, englishWords)\n maoriWords.append(maoriPronoun)\n\n else:\n return INVALID\n\n\n # infer tense and get verb\n # one left in list means auxiliary verb + verb\n if len(englishWords) == 1:\n\n if currentWord == \"am\" or currentWord == \"are\" or currentWord == \"is\":\n currentWord = englishWords.pop(0)\n maoriVerb = getVerbPresent(currentWord)\n\n # if verb was invalid return message and end early\n if maoriVerb == None:\n return \"invalid verb \\\"\" + currentWord + \"\\\"\"\n else:\n maoriWords.insert(0, maoriVerb)\n return ' '.join(maoriWords)\n\n elif currentWord == \"will\":\n currentWord = englishWords.pop(0)\n maoriVerb = getVerbFuture(currentWord)\n\n # if verb was invalid return message and end early\n if maoriVerb == None:\n return \"invalid verb \\\"\" + currentWord + \"\\\"\"\n else:\n maoriWords.insert(0, maoriVerb)\n return ' '.join(maoriWords)\n\n else:\n return INVALID\n\n # empty list means no auxiliary verb\n elif len(englishWords) == 0:\n maoriVerb = getVerbPastOrPresent(currentWord)\n\n if maoriVerb == None: # if verb was invalid return message and end early\n return \"invalid verb \\\"\" + currentWord + \"\\\"\"\n else:\n maoriWords.insert(0, maoriVerb)\n return ' '.join(maoriWords)\n\n\n else:\n return INVALID\n\n else:\n return INVALID", "title": "" }, { "docid": "caf764877a39cff4411f10ff9ac78e59", "score": "0.53245485", "text": "def initial_data(num_phrase):\n\n phrases = [\"you need new permissions\",\n \"I can arrange a meeting for you\",\n \"the next possible appointment could be tomorrow\"]\n\n final_words = ['pronoun', 'det', 'adj', 'n', 'md', 'v', 'prep']\n\n grammar = {'s': ['np vp'],\n 'np': ['pronoun', 'adj n', 'np pp', 'det n', 'n', 'det adj adj n'],\n 'pp': ['prep pronoun'],\n 'vp': ['v np', 'md vp'],\n 'pronoun': ['you', 'I'],\n 'det': ['a', 'the'],\n 'prep': ['for'],\n 'v': ['need', 'be', 'arrange'],\n 'adj': ['new', 'next', 'possible'],\n 'md': ['can', 'could'],\n 'n': ['permissions', 'meeting', 'appointment', 'tomorrow']}\n return phrases[num_phrase], final_words, grammar", "title": "" }, { "docid": "35d9b2238f122c3e0ccee45e62298fe3", "score": "0.5314347", "text": "def _grammar_injection(self, string_a, string, switch):\n if switch.lower() not in str(string).split(\" \", 1)[0].lower():\n if len(str(string).split(\" \", 1)) > 1:\n return f\"{str(string).split(' ', 1)[0]}{string_a} {str(string).split(' ', 1)[1]}\"\n else:\n return f\" {str(string).split(' ', 1)[0]}{string_a}\"\n return string", "title": "" }, { "docid": "513bad81a6bafbfa7b24f4b6111d803e", "score": "0.53060186", "text": "def build_word_info(self, word):\n if len(word) > 4:\n (radical, suffixes) = self._word_bsig[word]\n\n words = self._signatures_to_words[suffixes]\n # TODO : find the radical and the suffixes\n text_info = \"Morpheme clustering algorithm results on : {} : \\n cluster : {} \\n \\n \\n \".format(word, suffixes)\n if len(suffixes) >= 1:\n\n for suff in suffixes:\n for stem in self._signatures_to_stems[suffixes]:\n\n info = \"{}\".format(stem)+\" {}\".format(suff) + \"\\n \\n \\n \"\n\n if info != word:\n\n text_info = text_info + \"{}{}{}\".format(\" \\n \", \" - \", info)\n\n\n\n else:\n\n text_info += \"{}\".format(\" No close words found \")\n\n else:\n\n text_info = \"'\" + word + \"'\" + \" is too short, try a longer word.\"\n\n return text_info", "title": "" }, { "docid": "f1486146ca4a72e5596ac57ed47eb93b", "score": "0.5277118", "text": "def tool_description():\n\n return \"This text describes what the tool does\"", "title": "" }, { "docid": "9ad7bf2beae0b9f1e012498cfa3b2002", "score": "0.52645683", "text": "def create(self):\r\n clauses = []\r\n clauses.append(\"TEMPLATE = %s\" % self.template)\r\n if hasattr(self, 'options'):\r\n clauses.append(self.options)\r\n return [\"CREATE TEXT SEARCH DICTIONARY %s (\\n %s)\" % (\r\n self.qualname(), ',\\n '.join(clauses))]", "title": "" }, { "docid": "9bef9748c4d83a8f16db0d2914134ad9", "score": "0.52459264", "text": "def process_answer(self, message: str) -> str:\n\n logging.debug(\"process_answer()\")\n self.print_state()\n\n if self.state == \"waiting_lexicon_answer\":\n to_skip: bool = False\n\n if message == \"Show definition.\":\n self.state = \"waiting_lexicon_answer\"\n return \"\" # definition(self.word)\n elif message in [\"/know\", \"Know.\"]:\n response = LexiconResponse.KNOW\n elif message in [\"/skip\", \"Know, skip.\"]:\n response = LexiconResponse.KNOW\n to_skip = True\n elif message in [\"/not_a_word\", \"Not a word.\"]:\n response = LexiconResponse.NOT_A_WORD\n to_skip = True\n elif message in [\"/but\", \"But.\"]:\n response = LexiconResponse.DONT_BUT_PROPER_NOUN_TOO\n to_skip = True\n else:\n response = LexiconResponse.DONT\n\n self.lexicon.register(\n self.word,\n response,\n to_skip,\n log_name=\"log_ex\",\n answer_type=AnswerType.USER_ANSWER,\n )\n return \"\"\n\n sentence_id: int = (\n self.current_sentences[self.index].sentence.id_\n if self.index < len(self.current_sentences)\n else 0\n )\n\n # Preprocess answer.\n answer: str = self.learning.learning_language.decode_text(message)\n\n self.index += 1\n\n state: str = (\n f\", {self.learning.count_questions_to_repeat()} to repeat\"\n if self.learning.count_questions_to_repeat()\n else \"\"\n )\n\n if answer == self.word:\n self.index = 0\n self.learning.register(\n Response.RIGHT, sentence_id, self.word, self.interval * 2\n )\n transcriptions: list[str] = []\n if self.items:\n for item in self.items:\n for transcription in item.get_transcriptions():\n if transcription not in transcriptions:\n transcriptions.append(transcription)\n\n self.learning.write()\n\n self.print_state()\n return f\"Right{state}, \" + \", \".join(transcriptions) + \".\"\n\n elif answer in self.alternative_forms:\n self.print_state()\n return \"Right form.\"\n\n elif answer in [\"/skip\", \"Skip\"]:\n self.skip.add(self.word)\n self.index = 0\n self.print_state()\n return f\"Skipped for this session{state}.\"\n\n elif answer in [\"/no\", \"Don't know\"]:\n self.learning.register(\n Response.WRONG, sentence_id, self.word, SMALLEST_INTERVAL\n )\n self.learning.write()\n self.index = 0\n\n self.print_state()\n return f\"Right answer: {self.word}{state}.\"\n\n elif answer == \"/exclude\":\n self.data.exclude_sentence(self.word, sentence_id)\n self.skip.add(self.word)\n self.print_state()\n return \"Sentence was excluded.\"\n\n elif answer.startswith(\"/hide \"):\n parts = answer.split(\" \")\n self.data.exclude_translation(self.word, \" \".join(parts[1:]))\n self.skip.add(self.word)\n self.print_state()\n return \"Translation was hidden.\"\n\n else:\n self.print_state()\n return \"No.\"", "title": "" }, { "docid": "0a652a8742aa0d9e50c98584cfa00e52", "score": "0.5240408", "text": "def text(self) -> str:\n if self._text:\n return self._text\n\n # Pre-letter suprasegmentals\n for accent in self.accents:\n if accent == Accent.ACUTE:\n self._text += IPA.ACCENT_ACUTE\n elif accent == Accent.GRAVE:\n self._text += IPA.ACCENT_GRAVE\n\n if self.stress == Stress.PRIMARY:\n self._text += IPA.STRESS_PRIMARY\n elif self.stress == Stress.SECONDARY:\n self._text += IPA.STRESS_SECONDARY\n\n # Letters and diacritics\n for letter_index, letter in enumerate(self.letters):\n self._text += letter\n\n # Diacritics\n for diacritic in self.diacritics.get(letter_index, []):\n self._text += diacritic\n\n # Tone\n if self.tone:\n self._text += self.tone\n\n # Post-letter suprasegmentals\n if self.is_long:\n self._text += IPA.LONG\n\n # Re-normalize and combine\n self._text = unicodedata.normalize(\"NFC\", self._text)\n\n return self._text", "title": "" }, { "docid": "5dbf5e3b0229dc04cdec5973b8209ebc", "score": "0.5232675", "text": "def create(self):\r\n clauses = []\r\n if hasattr(self, 'init'):\r\n clauses.append(\"INIT = %s\" % self.init)\r\n clauses.append(\"LEXIZE = %s\" % self.lexize)\r\n return [\"CREATE TEXT SEARCH TEMPLATE %s (\\n %s)\" % (\r\n self.qualname(), ',\\n '.join(clauses))]", "title": "" }, { "docid": "8a7a4139b02fa51353f8b0f5d61f8c67", "score": "0.5231463", "text": "def make_poem():\n\n noun1, noun2, noun3 = get_words(NOUNS, 3)\n verb1, verb2, verb3 = get_words(VERBS, 3)\n adjective1, adjective2, adjective3 = get_words(ADJECTIVES, 3)\n preposition1, preposition2 = get_words(PREPOSITIONS, 2)\n adverb1, = get_words(ADVERBS, 1)\n a_or_an = \"An\" if adjective1.startswith(VOWELS) else \"A\"\n\n return \"\"\"\n {A_or_An} {adjective1} {noun1}\n \n {A_or_An} {adjective1} {noun1} {verb1} {preposition1} the {adjective2} {noun2}\n {adverb1}, the {noun1} {verb2}\n the {noun2} {verb3} {preposition2} a {adjective3} {noun3}\n \"\"\".format(A_or_An=a_or_an, noun1=noun1, noun2=noun2, noun3=noun3,\n verb1=verb1, verb2=verb2, verb3=verb3,\n adjective1=adjective1, adjective2=adjective2, adjective3=adjective3,\n preposition1=preposition1, preposition2=preposition2,\n adverb1=adverb1)", "title": "" }, { "docid": "e5b0f9d3b33d6aa3f561189405467d01", "score": "0.5230626", "text": "def turn_to_sentence(words, starting_msg, and_or, typ='misunderstood'):\n message = starting_msg\n for i in range(len(words)):\n # obj = OBJECTS[objects[i]] # objects[i], for example, is 'egg'. so OBJECTS['egg']\n if i == len(words) - 1 and len(words) > 1: # if it's the last word to add to the list AND if there is more than one word\n message += f' {and_or} ' if len(words) == 2 else f'{and_or} ' # add an 'and', e.g. x, y, and z -- space before 'and' based on conditional\n # objects_here += obj.short_description\n if typ in ('no quotes', 'objects here'):\n message += words[i]\n else:\n message += f\"'{words[i]}'\"\n if len(words) > 2:\n message += ', '\n if typ == 'objects here': # if printing out all the objects in a given location, then end the sentence nicely with a period.\n message = message.strip(', ') # strip trailing comma\n return message", "title": "" }, { "docid": "33b66d2c6132f4ee104a12f375be5a5a", "score": "0.52071035", "text": "def speak(bot, update, args):\n word = args[0] if len(args) > 0 else None\n update.message.reply_text(text=produce_sentence(word), quote=False)", "title": "" }, { "docid": "4307bf926b394a24d6bbfea7b073113e", "score": "0.5200156", "text": "def output(self):\n self.updated = False\n return ''.join([word for word in self.phrase])", "title": "" }, { "docid": "da1d60fcea2af00044ec6d5ef3a88d57", "score": "0.51990753", "text": "def actNatural(self):\r\n phrases = [\"kappa kappa\", \"oyoyoyoyoyoy\", \"RUINED IT!!\", \"Saved it!!\",\r\n \"Wat time is it?\", \"me gusta\", \"now THAT is funny.\", \"please be kind\",\r\n \"wow im offended\", \"i dont care if ur offended, this is who i am\",\r\n \"does anyone know how to get to falador?\", \"buying gf 248gp\",\r\n \"ya manky chav\", \"WOWWWW\", \"how do i mute someone?\", \"press alt+f4\",\r\n \"wow you guys are mean\", \"how do I send a private message?\", \"...I dont know you...\",\r\n \"a/s/l?\", \"18/f/cali\", \"noice.\", \"That can't be...\", \"42\", \"SELLOUT!\", \r\n \"lul\", \"(つ ͡° ͜ʖ ͡°)つ (つ ͡° ͜ʖ ͡°)つ (つ ͡° ͜ʖ ͡°)つ\", \"¯\\_(ツ)_/¯\", \"(ノಠ益ಠ)ノ彡┻━┻\", \r\n \"WOW SICK ULT\", \"NEW META\", \"REKT\", \"MOD ME BRO\", \"HARAMBABE ಠoಠ\"]\r\n\r\n phrase = phrases[random.randint(0, len(phrases) - 1)]\r\n response = \"PRIVMSG {0} :{1}\\r\\n\".format(self._channel, phrase).encode('utf-8')\r\n self.logSend(response)", "title": "" }, { "docid": "84f82f757fc1d7227ba75cb142b2d494", "score": "0.5186206", "text": "def make_text2(chains):\n\n ret_str = \" \"\n # Use a word starts with a capitalized letter as the beginning of sentence\n random_tuple = random.choice(chains.keys())\n \n while not random_tuple[0][0].isupper():\n random_tuple = random.choice(chains.keys())\n str_len = len(ret_str)\n \n while ret_str[-1] not in ['?', '.', '!'] or str_len < MAX_CHAR:\n ret_str = concat_str(ret_str, random_tuple[0])\n ret_str = concat_str(ret_str, random_tuple[1])\n \n random_value = random.choice(chains[random_tuple])\n if (random_value):\n ret_str = concat_str(ret_str, random_value)\n random_tuple = (random_tuple[1], random_value)\n else:\n break\n if ret_str[-1] not in ['?', '.', '!']:\n ret_str += \".\"\n\n return ret_str[1:]", "title": "" }, { "docid": "fd1789f11c60b9a3bba9adb8c11d379c", "score": "0.5168262", "text": "def __build_match_title_short(cls, details):\n title = details.get('home', {}).get('name_short')\n title += ' - '\n title += details.get('away', {}).get('name_short')\n return title", "title": "" }, { "docid": "199209e4e2922378d67306c3dc1ebd92", "score": "0.51674014", "text": "def help(bot, update):\n update.message.reply_text('''COMANDS:\\n/CSGO - CSGO Topstreams\\n/LOL - LOL Topstreams\\n\n /Fortnite - Fortnite Topstreams\\n/PUBG - PUBG Topstreams\\n/Dota - Dota Topstreams\n /Overwatch - Overwatch Topstreams\\n/Heartstone - Heartstone Topstreams\\n/FIFA - FIFA Topstreams\n \\n/TOP - Top streams of any game''')", "title": "" }, { "docid": "8824e30db31e115cafd72c11fc5c9f4d", "score": "0.51666796", "text": "def generate_phrase(self, max_size=None, min_words=None):\n\n # Invalid input values raise an exception\n valid = True\n\n if max_size:\n valid = type(max_size) is int and max_size >= 0\n\n if min_words:\n valid = valid and type(min_words) is int and min_words >= 0\n\n if not valid:\n raise ValueError(\"Expected positive int value(s) for input.\")\n\n # If both parameters are present, make sure parameters are reasonable\n if max_size and min_words:\n if 5*min_words >= max_size:\n raise ValueError(\"max_size paramemeter must be at least five times larger than min_words.\")\n\n new_word = ''\n msg = ''\n word_count = 0\n\n while 1:\n\n # Generate a successor word\n if new_word in self._markov_chain:\n new_word = self._get_next_term(self._markov_chain[new_word])\n\n # Generate a random word if new_word is not a key in the markov chain\n else:\n new_word = random.choice(self._markov_chain.keys())\n\n # Check if new word will push us over the character limit\n if max_size and len(msg) + len(new_word) >= max_size:\n break\n\n # Append successor to word\n msg += new_word + ' '\n\n # Increment word_count if min_words specified (and 'word' is not a period)\n if min_words and new_word is not '.':\n word_count += 1\n\n # probabalistically decide to stop\n if not min_words or word_count >= min_words:\n if random.random() <= self._stop_p:\n break\n\n return msg.rstrip().capitalize()", "title": "" }, { "docid": "2042f55072aa97d6b5966e7840a5b58a", "score": "0.51449275", "text": "def _append_app_name(self, val):\n if 'CFBundleName' in info:\n val += ' {}'.format(info['CFBundleName'])\n return val", "title": "" }, { "docid": "56f3c5de27997927190a73ceb14aa836", "score": "0.5144441", "text": "def hey(self, text):\n\n if not text or text.isspace():\n return 'Fine. Be that way!'\n elif text.isupper():\n return 'Woah, chill out!'\n elif text.endswith('?'): \n return 'Sure.'\n else:\n return 'Whatever.'", "title": "" }, { "docid": "fb5493169b8fb73807b3be7dec1f6f81", "score": "0.5143284", "text": "def to_phrases(self):\n phrase = ''\n name_pattern = r'[А-Я]+\\s[А-Я]\\.[А-Я]\\.'\n\n with open(self.__script, 'r') as read_file:\n for line in read_file.readlines():\n if re.search(name_pattern, line) or 'ГОЛОВУЮЧИЙ' in line:\n if phrase:\n if 'ГОЛОВУЮЧИЙ' not in phrase:\n politician = re.search(name_pattern, phrase)\n if politician:\n politician = politician[0]\n phrase = re.sub(name_pattern, '', phrase)\n else:\n phrase = ''\n else:\n politician = self.announcer\n phrase = re.sub('ГОЛОВУЮЧИЙ.', '', phrase)\n\n dots = re.findall(r'\\.', phrase)\n if len(dots) > 1:\n self.__phrase_analysis(politician, phrase)\n # print(politician)\n # print(phrase)\n phrase = ''\n phrase += line", "title": "" }, { "docid": "1a2d4c0c481f672b97e684a7049b1e67", "score": "0.513873", "text": "def get_static_welcome_message():\n return \\\n\"\"\"\n<h3>Search Help</h3>\n<ul><li>The display below the line is an example of the output the browser\nshows you when you enter a search word. The search word was <b>green</b>.</li>\n<li>The search result shows for different parts of speech the <b>synsets</b>\ni.e. different meanings for the word.</li>\n<li>All underlined texts are hypertext links. There are two types of links:\nword links and others. Clicking a word link carries out a search for the word\nin the Wordnet database.</li>\n<li>Clicking a link of the other type opens a display section of data attached\nto that link. Clicking that link a second time closes the section again.</li>\n<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>\n<li>Clicking on a relation name opens a section that displays the associated\nsynsets.</li>\n<li>Type a search word in the <b>Next Word</b> field and start the search by the\n<b>Enter/Return</b> key or click the <b>Search</b> button.</li>\n</ul>\n\"\"\"", "title": "" }, { "docid": "952c24739f86275fe39fd3e8bc9ef46d", "score": "0.5134184", "text": "def text(self) -> str:\n if self._text:\n return self._text\n\n for accent in self.accents:\n if accent == Accent.ACUTE:\n self._text += IPA.ACCENT_ACUTE\n elif accent == Accent.GRAVE:\n self._text += IPA.ACCENT_GRAVE\n\n if self.stress == Stress.PRIMARY:\n self._text += IPA.STRESS_PRIMARY\n elif self.stress == Stress.SECONDARY:\n self._text += IPA.STRESS_SECONDARY\n\n for letter_index, letter in enumerate(self.letters):\n self._text += letter\n\n if letter_index in self.nasalated:\n self._text += IPA.NASAL\n\n if letter_index in self.raised:\n self._text += IPA.RAISED\n\n for c in self._extra_combining[letter_index]:\n self._text += c\n\n if self.tone:\n self._text += self.tone\n\n if self.elongated:\n self._text += IPA.LONG\n\n # Re-normalize and combine\n self._text = unicodedata.normalize(\"NFC\", self._text)\n\n return self._text", "title": "" }, { "docid": "0ec44d4585b30aea72ee5bdef9623375", "score": "0.51310563", "text": "def main():\n\n args = get_args()\n word = args.word\n\n \"\"\"choose article based on first letter of word\"\"\"\n char = word[0].lower() in 'aeiou'\n article = ''\n\n article = 'an' if char == True else 'a'\n\n\n # print('positional = \"{}\"'.format(word))\n\n print('Ahoy, Captain, {} {} off the larboard bow!'.format(article, word))", "title": "" }, { "docid": "0c63cf82e5456cb266f2a57db129d247", "score": "0.51305634", "text": "def _generate_friendly_name(self, request):\n\n language = get_language_from_request(request)[:2].lower()\n suggestion = \" \".join(get_random(language, 3, only_single_words=True))\n\n return suggestion[:127]", "title": "" }, { "docid": "e5be83d3f702e322c9581cdbd7a22d3e", "score": "0.5123248", "text": "def generate():\n\n pref = [\"\", \"educational\", \"cheap\", \"best\", \"most interesting\", \"creative\", \"durable\", \"hobby\", \"blue\"\n \"wonderful\", \"gift\", \"learning\", \"play\", \"backyard\", \"activity\", \"birding\", \"bird watching\",\n \"cool\", \"colorful\", \"party\", \"travel\", \"outdoor\", \"outside\", \"indoor\", \"small\", \"light\", \"stem\",\n \"art\", \"learn\", \"good\", \"fun\", \"new\", \"teaching\", \"top\", \"tops\", \"unique\", \"unusual\", \"preschool\"]\n\n what = [\"\", \"toy\", \"toys\", \"gift\", \"gifts\", \"binocular\", \"binoculars\", \"activity\",\n \"game\", \"games\", \"set\", \"accessory\", \"stuff\", \"things\", \"party\", \"gadget\"]\n\n usage = [\"\", \"for kids\", \"for toddlers\", \"for boys\", \"for girls\", \"for children\",\n \"for kid\", \"for toddler\", \"for boy\", \"for girl\", \"for babies\", \"for baby\",\n \"for age\", \"for 3\", \"for 4\", \"for 5\", \"for 6\", \"for car trip\",\n \"for year\", \"for birthday\", \"for year old\", \"for preschoolers\", \"for brain\",\n \"sale\", \"2018\", \"for grandson\", \"for granddaughter\"]\n\n res = []\n for a in pref:\n for b in what:\n for c in usage:\n phrase = \"{} {} {}\".format(a,b,c).replace(\" \", \" \").replace(\" \", \" \").strip()\n #print(phrase)\n res.append(phrase)\n return res", "title": "" }, { "docid": "8305984369decadeb95604454d0ed59e", "score": "0.51228213", "text": "def build_phrase_input(phrases, pad_idx, task):\n tokens = [phrase.tokens for phrase in phrases]\n ners = [phrase.ners for phrase in phrases]\n subj_posis = torch.tensor([phrase.subj_posi for phrase in phrases]).unsqueeze(1)\n obj_posis = torch.tensor([phrase.obj_posi for phrase in phrases]).unsqueeze(1)\n if task == \"re\":\n lengths = torch.tensor([len(token_seq) for token_seq in tokens]).unsqueeze(1)\n subj_bool = subj_posis < lengths\n obj_bool = obj_posis < lengths\n assert sum(subj_bool).item() == len(phrases)\n assert sum(obj_bool).item() == len(phrases)\n subj = torch.tensor([phrase.ners[phrase.subj_posi] for phrase in phrases]).unsqueeze(1) # has to be NERs due to type check\n obj = torch.tensor([phrase.ners[phrase.obj_posi] for phrase in phrases]).unsqueeze(1)\n else:\n no_ner_id = NER_LABEL_SPACE[\"\"]\n subj = torch.full((len(phrases),1), no_ner_id)\n obj = torch.full((len(phrases),1), no_ner_id)\n\n ner_pad = NER_LABEL_SPACE[\"<PAD>\"]\n\n tokens, _ = BaseVariableLengthDataset.variable_length_batch_as_tensors(tokens, pad_idx)\n ners, _ = BaseVariableLengthDataset.variable_length_batch_as_tensors(ners, ner_pad)\n\n assert tokens.shape == ners.shape\n\n phrase_input = torch.cat([tokens, ners, subj_posis, obj_posis, subj, obj], dim=1)\n\n return phrase_input", "title": "" }, { "docid": "b8d51404638e2e2ce3ea47e3f6177f6a", "score": "0.5114722", "text": "def cond_translate_snippet(text):\n return translate_snippet(text, keychar=\"@\")", "title": "" }, { "docid": "0401104e5605eb9fb25cfea3577cf171", "score": "0.5111081", "text": "def sentence():\r\n # Determine the number of comma-separated sections and number of words in\r\n # each section for this sentence.\r\n sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]\r\n s = u', '.join(sections)\r\n # Convert to sentence case and add end punctuation.\r\n return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))", "title": "" }, { "docid": "246f23197c032ccd87fa89bb0a4b2052", "score": "0.51088095", "text": "def __str__(self):\n return self.main_word", "title": "" }, { "docid": "5d41b41ee32ffbe458be4d09e92bcbcf", "score": "0.5104865", "text": "def _generate_help_text(self):\n\n # General package description (mandatory)\n lines = self._generate_section('Description', self.app.cfg['description'], strip=True)\n\n # Package usage instructions (optional)\n lines.extend(self._generate_section('Usage', self.app.cfg['usage'], strip=True))\n\n # Examples (optional)\n lines.extend(self._generate_section('Examples', self.app.cfg['examples'], strip=True))\n\n # Additional information: homepage + (if available) doc paths/urls, upstream/site contact\n lines.extend(self._generate_section(\"More information\", \" - Homepage: %s\" % self.app.cfg['homepage']))\n\n docpaths = self.app.cfg['docpaths'] or []\n docurls = self.app.cfg['docurls'] or []\n if docpaths or docurls:\n root_envvar = ROOT_ENV_VAR_NAME_PREFIX + convert_name(self.app.name, upper=True)\n lines.extend([\" - Documentation:\"])\n lines.extend([\" - $%s/%s\" % (root_envvar, path) for path in docpaths])\n lines.extend([\" - %s\" % url for url in docurls])\n\n for contacts_type in ['upstream', 'site']:\n contacts = self.app.cfg['%s_contacts' % contacts_type]\n if contacts:\n if isinstance(contacts, list):\n lines.append(\" - %s contacts:\" % contacts_type.capitalize())\n lines.extend([\" - %s\" % contact for contact in contacts])\n else:\n lines.append(\" - %s contact: %s\" % (contacts_type.capitalize(), contacts))\n\n # Extensions (if any)\n extensions = self._generate_extension_list()\n lines.extend(self._generate_section(\"Included extensions\", '\\n'.join(wrap(extensions, 78))))\n\n return '\\n'.join(lines)", "title": "" }, { "docid": "b95e1a1f2d851c85304c250247d6247a", "score": "0.5103791", "text": "def add_quotes_if_needed(self, app=\"\"): # -> str\n if \" \" in app:\n if not app.startswith('\"'):\n app = \"\".join(['\"', app])\n if not app.endswith('\"'):\n app = \"\".join([app, '\"'])\n return app", "title": "" }, { "docid": "8909c37066211f91a9e63434d3950487", "score": "0.508499", "text": "def phrasesDeck(iphrases):\n return \"\"", "title": "" }, { "docid": "fe23bed26bc7fb747068aff120c4b007", "score": "0.50824356", "text": "def BuildHelpText(self):\n buf = io.StringIO()\n buf.write('\\n+\\n')\n for key, help_text in sorted(six.iteritems(self.choices)):\n buf.write('*{}*::: {}\\n'.format(key, help_text))\n return (\n '{}Must be one of the following values:{}'.format(\n _Insert(super(Enum, self).BuildHelpText()), buf.getvalue()))", "title": "" }, { "docid": "fb43c7c5be439f4ef24cd6aa7d8a894a", "score": "0.50811255", "text": "def make_text(chains):\n words = []\n punctuation = [\".\", \",\", \"!\", \"?\", \";\", \":\"]\n\n curr_ngram = choice(chains.keys()) #picks random bigram key\n words.extend(curr_ngram)\n\n while curr_ngram in chains: #there is a curr_value in the dictionary:\n curr_key = curr_ngram[1:]\n curr_value = choice(chains[curr_ngram]) #randomly selects curr_value\n curr_ngram = curr_key + (curr_value,) #updates the curr_ngram variable\n words.append(curr_ngram[-1])\n\n gen_text = \" \".join(words)#long string of generated text\n\n return gen_text", "title": "" }, { "docid": "16b04f9d23135139303c9b747e321330", "score": "0.50763226", "text": "def handle_message(response):\n phrase = ''\n if response.strip() != '':\n answers = response.split(',')\n answers_count = len(answers)\n for i in range(answers_count):\n answer = answers[i].strip().capitalize()\n phrase += answer + (\n ' or ' if i == answers_count - 2 and answers_count > 1 else ' ')\n if any(char.isdigit() for char in answer) or answer in ('Yes', 'No'):\n break\n return {'result': phrase}", "title": "" }, { "docid": "7af938a109b7977c8e64705184b73f01", "score": "0.5076043", "text": "def _correct_grammar(self, string_a, string_b, input, switch):\n if switch.lower() in str(input).split(\" \", 1)[0].lower():\n return f\" {string_b} {input}\"\n return f\" {string_a} {input}\"", "title": "" }, { "docid": "bc92408960e7e82465a3f7b107410b3b", "score": "0.5075161", "text": "def update_pho(self, speaker_code):\n if not self.is_speaker_in_annotation(speaker_code):\n return f'{speaker_code} not in annotation'\n\n # TODO: What if the words have not been extracted yet?\n words = self.words_uttered_by[speaker_code]\n n_words = len(words)\n if n_words == 0:\n return 'error: no words were extracted'\n\n # TODO: What if the subtiers have not been categorized yet?\n pho_subtiers = self.sub_tiers_by_label[TRANSCRIPTION_LABEL]\n if len(pho_subtiers) == 0:\n pho_subtier = SubTier(label=TRANSCRIPTION_LABEL, contents=(' '.join(['###'] * n_words) + '\\n'))\n self.sub_tiers = [pho_subtier] + self.sub_tiers\n self.sub_tiers_by_label[TRANSCRIPTION_LABEL].append(pho_subtier)\n return 'pho subtier added'\n\n [pho_subtier] = pho_subtiers\n m_transcriptions = len(self.transcriptions)\n # Above, we counted '###' as well as actual transcriptions\n m_transcribed = m_transcriptions - self.transcriptions.count('###')\n\n if m_transcribed == 0:\n pho_subtier.contents = ' '.join(['###'] * n_words) + '\\n'\n self.extract_phonetic_transcriptions()\n if m_transcriptions < n_words:\n return \"###'s added, needs transcription\"\n elif m_transcriptions > n_words:\n return \"###'s removed, needs transcription\"\n elif m_transcriptions == n_words:\n return \"needs transcription\"\n\n if m_transcriptions > n_words:\n return 'error: more transcriptions than there are words'\n\n if m_transcriptions < n_words:\n # If we got here, there is at least one actual transcription\n return 'error: fewer transcriptions than there are words, order unknown, sort manually'\n\n # If we got here, m_transcriptions == n_words\n\n if m_transcribed < n_words:\n return 'needs some transcription'\n\n if m_transcribed == n_words:\n return 'all transcribed'", "title": "" }, { "docid": "3a2a4cc9c16a1aeb157b03f9346a762e", "score": "0.5074825", "text": "def sentencegenerator(text,startword,length = 10):\r\n d = suffixdictionary(text)\r\n sentence = [startword]\r\n index = 0\r\n while index < length:\r\n sentence.append(random.choice(d[sentence[index]]))\r\n index = index + 1\r\n return ' '.join(sentence)", "title": "" }, { "docid": "522ec6e23fe206c0e4b8c4bd19e489d6", "score": "0.5071008", "text": "def simple_app():\n return '<b>This app is officially working</b>'", "title": "" }, { "docid": "5064783c4dee8387f78cc96daeb1d744", "score": "0.5064435", "text": "def sample_word(self, pos, grammar_class):\n chosen_word = rand.choice(self.word_dict[grammar_class])\n word_string = chosen_word + f\"_{grammar_class}_\" + str(pos)\n return word_string", "title": "" }, { "docid": "af62e9da28afa1e0c4201694309620ce", "score": "0.50637364", "text": "def make_text(chains):\n\n\n # start somewhere\n cap_keys = [key for key in chains.keys() if key[0].istitle()]\n\n ngram = choice(cap_keys)\n\n # ngram = choice(list(chains.keys()))\n\n # start list with first two words\n words =list(ngram)\n\n while ngram in chains: \n # use tuple as key to get list of next words\n # choose a next word with 'choice' (returns words as a string)\n next_word = choice(chains[ngram])\n \n # add chosen word to words list\n words.append(next_word) \n\n # make new key from second tuple item and chosen word\n \n # make ngram from tuple to list\n ngram_list = list(ngram)\n\n # make slice from 1 to last\n ngram_slice = ngram_list[1:]\n \n # add our chosen word to the slice\n ngram_slice.append(next_word)\n\n # consciously re-tuple our list\n ngram = tuple(ngram_slice)\n\n return \" \".join(words)", "title": "" }, { "docid": "aa4a2db52aa79c4f0ec8458a000a2714", "score": "0.5059467", "text": "def sing_a_song():\r\n return 'What do we do with a drunken sailor?'", "title": "" }, { "docid": "c2fe1c4a80aa1aa9ec4bdc47414172fd", "score": "0.50576925", "text": "def user_phrase():\n phrase = input(\"please type in the phrase you want to encode/decode.\")\n phrase = phrase.lower()\n return phrase", "title": "" }, { "docid": "c0d3b7bb191507c9e8c31954db554239", "score": "0.5054058", "text": "def generate_text(start_word, model, h_learnable, embedder, word2id, id2word, max_len, allow_unknowns, top_k=5):\n model.eval()\n with torch.no_grad():\n # Try at most 10 times to generate a review that ends with \"<EOR>\".\n # If unsuccesful, output whatever review was generated last.\n trys = 0\n while trys <= 10:\n trys += 1\n word_ids = [word2id[start_word]]\n\n if h_learnable:\n h = None\n else:\n h = model.init_hidden(batch_size=1)\n \n while word_ids[-1] != word2id['<EOR>'] and len(word_ids) <= max_len:\n x = word_to_model_input(id2word[word_ids[-1]], embedder, word2id)\n y, h = predict(\n x_packed=x,\n model=model,\n h_learnable=h_learnable,\n h=h,\n batch_size=1,\n word2id=word2id,\n allow_unknowns=allow_unknowns,\n top_k=top_k\n )\n word_ids.append(y)\n \n if word_ids[-1] == word2id['<EOR>']:\n break\n words = [id2word[idx] for idx in word_ids]\n return \" \".join(words)", "title": "" }, { "docid": "ab5d8d5ceab0ec3e9e81e2850675cff9", "score": "0.5047875", "text": "def __convert(self, kind: str) -> str:\n sep: str = self.separators[kind]\n words: List[str] = regex.findall(self.pattern, self.string)\n size: int = len(words)\n converted: str = \"\"\n if size == 0:\n converted = self.string\n elif kind in (\"pascal\", \"title\", \"header\"):\n converted = sep.join([w.lower().capitalize() for w in words])\n elif kind in (\"snake\", \"kebab\", \"dot\"):\n converted = sep.join([w.lower() for w in words])\n elif kind == \"camel\":\n converted = sep.join(\n [\n w.lower() if i == 0 else w.lower().capitalize()\n for i, w in enumerate(words)\n ]\n )\n elif kind == \"constant\":\n converted = sep.join([w.upper() for w in words])\n return converted", "title": "" }, { "docid": "8590845202dca01438b9ab709447c4cf", "score": "0.5033167", "text": "def whichBetter(self, type_valg):\n word = \"\"\n if type_valg == \"Stein\":\n word = \"Papir\"\n elif type_valg == \"Saks\":\n word = \"Stein\"\n else:\n word = \"Saks\"\n return word", "title": "" }, { "docid": "1cc5fd6831605fee3211049ddfd2b41a", "score": "0.50310034", "text": "def pronunciation(word, say):\n if word in say:\n return say[word]\n else:\n return ''", "title": "" }, { "docid": "29dfc2a2aa769ccb5c2d554d05998592", "score": "0.5028345", "text": "def short_name(self):\n self.details['short_name'] = input(\n \"Short Name \" + str(on([\"M\"], args)) + \" (\" + Colors.OKBLUE + self.details['name'][\n 0:12] + Colors.ENDC + \"): \")\n # default to project name\n if self.details['short_name'] == '':\n self.details['short_name'] = self.details['name'][0:12]", "title": "" }, { "docid": "063d4d61ded9a782539fadbcc7d8d505", "score": "0.5014829", "text": "def make_text(chains):\n\n ret_str = \"\"\n end_the_str = False\n\n while len(ret_str) < MAX_CHAR:\n # Use a word starts with a capitalized letter as the beginning of sentence\n random_tuple = random.choice(chains.keys())\n while not random_tuple[0][0].isupper():\n random_tuple = random.choice(chains.keys())\n\n while len(ret_str) < MAX_CHAR:\n if len(ret_str) + len(random_tuple[0]) > MAX_CHAR:\n end_the_str = True\n break;\n ret_str = concat_str(ret_str, random_tuple[0])\n if len(ret_str) + len(random_tuple[1]) > MAX_CHAR:\n end_the_str = True\n break;\n ret_str = concat_str(ret_str, random_tuple[1])\n \n random_value = random.choice(chains[random_tuple])\n if (random_value):\n if len(ret_str) + len(random_value) > MAX_CHAR:\n end_the_str = True\n break;\n ret_str = concat_str(ret_str, random_value)\n random_tuple = (random_tuple[1], random_value)\n else:\n break\n\n if end_the_str:\n break\n\n return ret_str", "title": "" }, { "docid": "871299dfb75561df1ed74d6872ada30f", "score": "0.50126153", "text": "def get_thank_you_text(survey):\n if survey.get('language') == 'ms':\n thankyou_text = 'Terima Kasih'\n elif survey.get('language') == 'zh':\n thankyou_text = '谢谢'\n elif survey.get('language') == 'ja':\n thankyou_text = 'ありがとうございました'\n elif survey.get('language') == 'ko':\n thankyou_text = '고맙습니다'\n else:\n thankyou_text = 'Thank You'\n return thankyou_text", "title": "" }, { "docid": "e13691e46bdfc7b2dcf705c4b8e6b0a7", "score": "0.4993633", "text": "def correct(word):\n if word == \"show_tell\":\n return 'Show \\\\& Tell'\n elif word == 'top_down':\n return \"Top-down\"\n elif word == \"resnet50\":\n return \"ResNet-50\"\n elif word == \"resnet152\":\n return \"ResNet-152\"\n elif \"cnn\" in word:\n return \"RNN + CNN\"\n else:\n return word", "title": "" }, { "docid": "19f61e924b31cee04c048fe50a8b89e4", "score": "0.49910954", "text": "def main():\n\n conf = parse_arguments()\n if conf.url:\n try:\n response = urllib.request.urlopen(conf.url)\n words = response.read().decode().splitlines()\n except urllib.error.URLError:\n print('web request failure', file=sys.stderr)\n sys.exit(2)\n else:\n try:\n with open('/usr/share/dict/words', 'r') as f:\n words = f.read().splitlines()\n except OSError:\n print('no dictionary file', file=sys.stderr)\n sys.exit(3)\n \n nonalpha = ''\n if conf.num:\n nonalpha += string.digits\n if conf.sym:\n nonalpha += string.punctuation\n #nonalpha = string.digits + string.punctuation\n for i in range(int(conf.phrases)):\n pw = ''\n while not valid_phrase(pw, conf):\n # rerolls passphrase until valid\n pw = ''\n if secrets.randbits(1) == 1 and len(nonalpha) > 0:\n # 50% chance of number or symbol at beginning versus letter\n pw += secrets.choice(nonalpha)\n for j in range(int(conf.words)):\n if secrets.randbits(1) == 0:\n pw += secrets.choice(words).upper()\n else:\n pw += secrets.choice(words).lower()\n if secrets.randbits(1) == 1 and len(nonalpha) > 0:\n # 50% chance of number or symbol between words and at the end\n pw += secrets.choice(nonalpha)\n print(pw + '\\t' + str(len(pw)))", "title": "" }, { "docid": "17aa8ca7567366961e1facbeb996c58d", "score": "0.49850595", "text": "def __init__(self):\n self.nlp = spacy.load('en_core_web_sm')\n if args.add_q:\n self.q_name = \"_with_question\"\n else:\n self.q_name = \"_without_question\"", "title": "" }, { "docid": "96e828ebc5bd6b8b15ed6f85ae3ff1cc", "score": "0.49840906", "text": "def apology_choose(message):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology_choose.html\", bottom=escape(message))", "title": "" }, { "docid": "1feff80b2830df9ebb0cde4ae87fbd46", "score": "0.4981439", "text": "async def translate(self, ctx, *, phrase: str):\n \n langs = re.search(r\"(\\w{2})-(\\w{2})\", phrase[0:5])\n if langs:\n sl = langs.group(1)\n tl = langs.group(2)\n phrase = phrase[6:]\n else:\n sl = \"auto\"\n tl = \"en\"\n\n url = \"https://translate.googleapis.com/translate_a/single\"\n params = {'client': 'gtx', 'sl': sl, 'tl': tl, 'dt': 't', \"q\": phrase}\n ua = \"Mozilla/5.0 (X11; CrOS x86_64 12239.19.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.38 Safari/537.36\"\n headers = {'User-Agent': ua}\n async with self.bot.session.get(url, headers=headers, params=params) as resp:\n result = await resp.json()\n await ctx.send(\"{} ({}): {}\".format(result[0][0][1], result[2], result[0][0][0]))", "title": "" }, { "docid": "f8d6c5a44660722a13526c09bb0a2490", "score": "0.49773678", "text": "def shortHelpString(self):\r\n return self.tr(\"Example algorithm short description\")", "title": "" }, { "docid": "2ef868fedc1307740491880c02a67570", "score": "0.49741906", "text": "def get_short_text(self) -> str:\n oc = self.__oc\n return f\"{oc.name} the {oc.species.title()} {' '.join('#' + tag.replace(' ', '') for tag in self._tags) if self._tags else ''}\"", "title": "" }, { "docid": "f702723b7caaa9c21dcf3984429fce77", "score": "0.49714324", "text": "def make_text(chains):\n\n # randomly select a dictionary entry to start with\n\n search_tuple = random.choice(chains.keys())\n sentence_list = [search_tuple[0], search_tuple[1]]\n \n #loop goes here\n \n while len(\" \".join(sentence_list)) < 130:\n rand_val = random.choice(chains[search_tuple])\n sentence_list.append(rand_val)\n search_tuple = (search_tuple[1], rand_val)\n if not chains.get(search_tuple):\n break\n\n sentence = \" \".join(sentence_list).capitalize()\n return sentence", "title": "" }, { "docid": "450d955df168fa99b0ed099438269d7c", "score": "0.49708033", "text": "def valiant_app_title() -> str:\n return \"Valiant\" # noqa:DAR201", "title": "" }, { "docid": "be9921aa062fcf4be522f3ceeeecdf8b", "score": "0.49696136", "text": "def update_text(self, old_text, new_text):\n keyword_found = self.check_for_keywords(new_text)\n if keyword_found is not None:\n self.sound_alarm(keyword_found, new_text)\n self.output_text += f\"\\nMALICIOUS PHRASE -->> {new_text} <<--\"\n else:\n self.output_text += f\"\\n{new_text}\"\n self.output_box.config(text=self.output_text)", "title": "" }, { "docid": "0b7833ebbed7d33e07790235972a1583", "score": "0.49660146", "text": "def mlpPhrase():\n configuration['mlpPhrase'].update({\n 'phraseMaxLength': 50,\n 'phraseTokenEmb': int(Generator.generateValue([25, 200], True)),\n 'phrasePosEmb': int(Generator.generateValue([5, 50], True)),\n 'gru': Generator.generateValue([True, False], False, True),\n 'wordRnnUnitNum': int(Generator.generateValue([25, 500], True)),\n 'useB1': Generator.generateValue([True, False], False),\n 'useB-1': Generator.generateValue([True, False], False),\n 'transPosEmb': int(Generator.generateValue([5, 50], True)),\n 'transTokenEmb': int(Generator.generateValue([25, 200], True)),\n 'denseUnitNumber': int(Generator.generateValue([25, 500], True)),\n 'lr': round(Generator.generateValue([0.01, 0.2], True), 3)\n })\n configuration['embedding']['lemma'] = Generator.generateValue([True, False], False)\n configuration['sampling']['importantSentences'] = True\n configuration['sampling']['overSampling'] = True", "title": "" }, { "docid": "c7cb99d67d035d00b06f8a3136a1525b", "score": "0.49647617", "text": "def BuildHelpText(self):\n return (\n '{}Must be a string representing a day of the week in English, '\n 'such as \\'MON\\' or \\'FRI\\'. Case is ignored, and any characters after '\n 'the first three are ignored.{}'.format(\n _Insert(super(DayOfWeek, self).BuildHelpText()),\n _Append(self.Constraints())))", "title": "" }, { "docid": "eb791b586cf8e6bd63f778e79f499f94", "score": "0.49606007", "text": "def generate_phrase_tts(phrase, save_dir):\n # Initialize text to speech system\n remove_whitespace = sub('\\s+', '_', phrase)\n filename = join(save_dir, remove_whitespace + '.wav')\n if platform == 'linux':\n # eSpeak does not implement the save_to_file function\n # So we must call it directly\n subprocess.Popen(['espeak', phrase, '-w', filename])\n else:\n if platform != 'darwin':\n print('File may not be created on unsupported OS')\n engine = pyttsx3.init()\n engine.save_to_file(phrase, filename)\n engine.runAndWait()", "title": "" }, { "docid": "7cccd5b207c4f60d75f8e7c9a5823ed5", "score": "0.49579707", "text": "def nounPhrase():\n with open('articles.txt') as a:\n article = csv.reader(a)\n articles = random.choice(list(article))\n with open('adjectives.txt') as ad:\n adjective = csv.reader(ad)\n adjectives = random.choice(list(adjective)) \n with open('nouns.txt') as n:\n noun = csv.reader(n)\n nouns = random.choice(list(noun)) \n\n optional1 = random.choice(['yes', 'no'])\n if optional1 == 'yes': \n return random.choice(articles) + \" \" + random.choice(nouns)\n if optional1 == 'no': \n return random.choice(articles) + \" \" + random.choice(adjectives) + \" \" + random.choice(nouns)", "title": "" }, { "docid": "122f8bafe840ad2061e571b3af884434", "score": "0.4957891", "text": "def get_app_title_rst(self, continuation=''):\n\n if continuation:\n continuation = ' ' + continuation\n\n title = self._context['app_name'] + continuation\n title = f\"{title}\\n{'=' * len(title)}\"\n\n return title", "title": "" }, { "docid": "12a853e3ed7a31528984e229e270ba0f", "score": "0.49564636", "text": "def generate_sentence(name, comment, score):\n return ' '.join((name, comment, str(score)))", "title": "" }, { "docid": "6f16678a40b417f3fdfc3b4dc9233d05", "score": "0.4955744", "text": "def do_comprehend_languages(self, arg):\r\n print_description('1st', 'Comprehend Languages', '1 Action [R]', '1 Hour', 'Self', 'None', 'Social',\r\n 'V, S, M', 'Divination')", "title": "" }, { "docid": "07245ef935e84c28532e56d84ce1e1ff", "score": "0.49505526", "text": "def word_display(secret_word, additional_words):\n\n pass # ToDo: Remove this line", "title": "" }, { "docid": "2239234fbafb738eb7b52dad4053729b", "score": "0.49482384", "text": "def test_makeAltName(self):\n mk = search.makeAltName\n self.assertEqual(mk(\"hello\"), \"hello\")\n self.assertEqual(mk(\"Hello\"), \"hello\")\n self.assertEqual(mk(\"Hello There\"), \"hello there\")\n self.assertEqual(mk(\"Cat's Grace\"), \"cats grace\")\n self.assertEqual(mk(\"<Cat</>'s Grace\"), \"cats grace\")\n self.assertEqual(mk(\"<Cat</>'s Grace, Mass\"), \"cats grace mass\")", "title": "" }, { "docid": "1e5acc9bf41cdcc73c0044d425ac6575", "score": "0.49434415", "text": "def do_speak_with_plants(self, arg):\r\n spell_helper(\"Speak with Plants\")", "title": "" }, { "docid": "49e70f9951abe68136e4c4c41aa5361b", "score": "0.4940188", "text": "def update_text(self):\n likes = \"\"\n if self.likes_comedy.get():\n likes += \"You like comedic movies.\\n\"\n if self.likes_drama.get():\n likes += \"You like dramatic movies.\\n\"\n if self.likes_romance.get():\n likes += \"You like romantic movies.\"\n self.results_txt.delete(0.0, END)\n self.results_txt.insert(0.0, likes)", "title": "" }, { "docid": "857dc3a4d706a6c6e0e489308391ebbe", "score": "0.49396452", "text": "def generate_basic_description(cls) -> str:\n extractive_abstractive = \"extractive\" if cls.is_extractive else \"abstractive\"\n neural = \"neural\" if cls.is_neural else \"non-neural\"\n\n basic_description = (\n f\"{cls.model_name} is a\"\n f\"{'query-based' if cls.is_query_based else ''} \"\n f\"{extractive_abstractive}, {neural} model for summarization.\"\n )\n if cls.is_multi_document or cls.is_dialogue_based:\n basic_description += (\n f\"It can handle {'multi-document' if cls.is_multi_document else ''} \"\n f\"{'dialogue' if cls.is_dialogue_based else ''} textual data.\"\n )\n\n return basic_description", "title": "" }, { "docid": "a84cc20e8a175f169ebedd5c3807f276", "score": "0.49395812", "text": "def shortHelpString(self):\r\n return self.tr('PegelOnline short description: to do')", "title": "" }, { "docid": "569708a945ca304459d40fce304f9b70", "score": "0.49388468", "text": "def speak(self, n, start = None):\n\t\tif (not start) or (start not in self.unique_words):\n\t\t\tk = int( self.t * random.random() )\n\t\t\tk = k if k != self.t else self.t-1\n\t\t\tstart = self.unique_words[k]\n\t\tfinal = []\n\t\tcurrent_word = start\n\t\tcurrent_index = self.unique_words.index(start)\n\t\tfor i in range(n):\n\t\t\tfinal.append(current_word)\n\t\t\tcurrent_word = ''\n\t\t\ti = 0\n\t\t\twhile current_word == '':\n\t\t\t\ti += 1\n\t\t\t\tnew_word, new_index = self.obtain_word(current_index)\n\t\t\t\tcurrent_word = new_word\n\t\t\t\tif current_word in final:\n\t\t\t\t\tcurrent_word = ''\n\t\t\t\tif current_word == 'system':\n\t\t\t\t\tcurrent_word = ''\n\t\t\t\tif current_word != '':\n\t\t\t\t\tcurrent_index = new_index\n\t\t\t\tif i % 20 == 0:\n\t\t\t\t\ti = 1\n\t\t\t\t\tcurrent_index = new_index\n\t\treturn ' '.join(final)", "title": "" }, { "docid": "6d03fc0041fe8df6488c1b5bac038fa4", "score": "0.4937366", "text": "def build_simple_sms(self, content):\r\n return '{0}'.format(content)", "title": "" }, { "docid": "5e8c806e133b7b99e32caa785747d489", "score": "0.493727", "text": "def translate(sentence):\n\n\twords = {'esta': 'is', 'la': 'the', 'en': 'in', 'gato': 'cat', 'casa': 'house', 'el': 'the'}\n\treturn ' '.join(words[word] for word in sentence.split() if word in words)", "title": "" }, { "docid": "b2cbdc5035c66cc592f3ee05c377cd70", "score": "0.49366137", "text": "def main():\r\n parser = argparse.ArgumentParser(prog='pig', \r\n description=\"English to Pig Latin translator\",\r\n epilog='(lsoAay aay igPay atinLay otay nglishEay ranslatortay!)')\r\n parser.add_argument('text', nargs='?')\r\n args = parser.parse_args()\r\n\r\n if not args.text:\r\n # No text was given on the command line, so we will \r\n # translate our test messages.\r\n phrases = ['These are not the droids youre looking for', \r\n 'I have a bad feeling about this']\r\n bonus_phrases = ['Do. Or do not. There is no try!',\r\n 'Iay maay luentfay niay veroay ixsay illionmay ormsfay foay ommunicationcay!',\r\n 'ren\\'tAay ouyay aay ittlelay hortsay orfay aay tormtrooperSay?',\r\n 'Iay indfay ouryay acklay foay aithfay isturbingday.']\r\n\r\n for phrase in phrases:\r\n print('\\nOriginal phrase: ' + phrase )\r\n print('Translation: ' + translate(phrase))\r\n\r\n print('\\n**BONUS PHRASES**')\r\n for phrase in bonus_phrases:\r\n print('\\nOriginal phrase: ' + phrase )\r\n print('Translation: ' + translate(phrase))\r\n sys.exit(0)\r\n\r\n # Text was given on the command line. Translate it.\r\n print(translate(args.text))", "title": "" }, { "docid": "227e8ad827b248167c6ba17947fd956e", "score": "0.49296322", "text": "def short_action_string(self):\n output = \"{0} \".format(self.actor)\n if self.override_string:\n output += self.override_string\n else:\n output += self.verb\n return output", "title": "" } ]
599789f663255a4386290c952ba2936e
Removes points that are outside the interval determined by [min_percentile, max_percentile]. The percentiles are computed and applied to each axis (x,y,z) individually.
[ { "docid": "4799caa088d6eb0a3b02e5d1a31d3f83", "score": "0.70688254", "text": "def remove_distant_and_close_points(cloud:np.ndarray, min_percentile:int=1, max_percentile:int=99)->np.ndarray:\n cloud = cloud.reshape(-1,3)\n\n min_x = np.percentile(cloud[:,0], min_percentile, interpolation='nearest')\n max_x = np.percentile(cloud[:,0], max_percentile, interpolation='nearest')\n min_y = np.percentile(cloud[:,1], min_percentile, interpolation='nearest')\n max_y = np.percentile(cloud[:,1], max_percentile, interpolation='nearest')\n min_z = np.percentile(cloud[:,2], min_percentile, interpolation='nearest')\n max_z = np.percentile(cloud[:,2], max_percentile, interpolation='nearest')\n\n # condition_x = np.any([cloud[:,0] < min_x, cloud[:,0] > max_x], axis=0).reshape((cloud.shape[0], 1))\n # condition_y = np.any([cloud[:,1] < min_y, cloud[:,1] > max_y], axis=0).reshape((cloud.shape[0], 1))\n # condition_z = np.any([cloud[:,2] < min_z, cloud[:,2] > max_z], axis=0).reshape((cloud.shape[0], 1))\n # condition = np.concatenate([condition_x, condition_y, condition_z], axis=1)\n # TODO Try this\n condition_xyz = np.any([cloud[:,0] < min_x,\n cloud[:,0] > max_x,\n cloud[:,1] < min_y,\n cloud[:,1] > max_y,\n cloud[:,2] < min_z,\n cloud[:,2] > max_z], axis=0).reshape((cloud.shape[0], 1))\n condition = np.concatenate([condition_xyz, condition_xyz, condition_xyz], axis=1)\n\n\n masked_cloud = ma.masked_where(condition, cloud, copy=False)\n # ~masked_cloud.mask removes the values where the mask is True.\n # With True meaning that the condition applied, this means that we want to remove those values. \n return masked_cloud[~masked_cloud.mask].data.reshape((-1,3))", "title": "" } ]
[ { "docid": "4b77098adb812cbb946d3a783f6e7601", "score": "0.6658416", "text": "def test_exclude_percentile(self):\n data = np.copy(DATA)\n data[0:50, 0:50] = np.nan\n with pytest.warns(AstropyUserWarning,\n match='Input data contains invalid values'):\n bkg = Background2D(data, (25, 25), filter_size=(1, 1),\n exclude_percentile=100.0)\n assert len(bkg._box_idx) == 12", "title": "" }, { "docid": "690c031e82bbcca291c6dd4b0d44d1d8", "score": "0.6491309", "text": "def remove_outliers(arr, z=1.5):\n\n q25, q75 = np.percentile(arr, 25), np.percentile(arr, 75)\n iqr = q75 - q25\n limit = z * iqr\n return [a for a in arr if a >= q25 - limit and a <= q75 + limit]", "title": "" }, { "docid": "adf5bbd83ad86af55b8d74ab46c9af9c", "score": "0.63799745", "text": "def imenhance(x, percentiles=[5, 95]):\n isf = np.isfinite(x)\n (vmin, vmax) = np.percentile(x[isf], percentiles)\n y = x\n y[x < vmin] = vmin\n y[x > vmax] = vmax\n return y", "title": "" }, { "docid": "92ebe1e8b48c3d46c038adb557df00af", "score": "0.6334107", "text": "def test_percentiles_axis():\n data = rs.randn(10, 10)\n\n # Test against the median with 50th percentile\n median1 = np.median(data)\n out1 = stat.percentiles(data, 50)\n assert_array_almost_equal(median1, out1)\n\n for axis in range(2):\n median2 = np.median(data, axis=axis)\n out2 = stat.percentiles(data, 50, axis=axis)\n assert_array_almost_equal(median2, out2)\n\n median3 = np.median(data, axis=0)\n out3 = stat.percentiles(data, [50, 95], axis=0)\n assert_array_almost_equal(median3, out3[0])\n assert_equal(2, len(out3))", "title": "" }, { "docid": "df28551e5a35358c50aa0af5768f0865", "score": "0.6319891", "text": "def remove_outliers(data, col):\n def q1(x):\n q = np.percentile(x,25)\n return q\n\n def q3(x):\n q = np.percentile(x,75)\n return q\n\n IR = q3(data[col])-q1(data[col])\n outliers_l = q1(data[col])-1.5*IR\n outliers_u = q3(data[col])+1.5*IR\n\n data = data[(data[col]>=outliers_l) & (data[col]<=outliers_u)]\n\n return data", "title": "" }, { "docid": "965cdb1d2fe6391b707574a6e5a476ac", "score": "0.6246908", "text": "def percentiles(data, lo, hi):\n max = data.max()\n dataArray = data.flatten()\n pHi = numpy.percentile(dataArray, hi)\n pLo = numpy.percentile(dataArray, lo)\n range = pHi - pLo\n scale = range/255\n data = numpy.clip(data, pLo, pHi)\n data-= pLo\n data/=scale\n return data", "title": "" }, { "docid": "897e1187aadee202f1d69c6d3a7acfd8", "score": "0.6207559", "text": "def remove_outliers(data):\n\tdata.sort()\n\tnumPointsToRemove = (5*len(data))/100\n\treturn data[numPointsToRemove:len(data)-numPointsToRemove]", "title": "" }, { "docid": "cfd45fb0db06a450ac01eb83bbfc6cbd", "score": "0.6200231", "text": "def get_percentiles(self):\n\n if self.xcube is None:\n self.xpercentiles = self.quantiles\n else:\n data = self.xcube.data\n if np.sum(np.isfinite(data)) == 0:\n #All data is nan, cannot calculate percentiles\n raise ValueError('Data for x-axis all nan')\n self.xpercentiles = np.percentile(data[~np.isnan(data)],\n q=self.quantiles)\n\n for line in self.lines:\n data = line['cube'].data\n if np.sum(np.isfinite(data)) == 0:\n #All data is nan, cannot calculate percentiles\n warnings.warn('All nan data')\n continue\n line['ypercentiles'] = np.percentile(data[~np.isnan(data)],\n q=self.quantiles)\n\n return self.lines", "title": "" }, { "docid": "925bcfffbd57ecbc41a6162e08cfb24e", "score": "0.6165104", "text": "def test_Data_percentile_median(self):\n # ranks: a sequence of percentile rank inputs. NOTE: must\n # include 50 as the last input so that cf.Data.median is also\n # tested correctly.\n ranks = ([30, 60, 90], [90, 30], [20])\n ranks = ranks + (50,)\n\n d = cf.Data(self.a, chunks=(2, 2, 3, 5))\n\n for axis in [None] + self.axes_combinations:\n for keepdims in (True, False):\n for q in ranks:\n a1 = np.percentile(d, q, axis=axis, keepdims=keepdims)\n b1 = d.percentile(q, axes=axis, squeeze=not keepdims)\n self.assertEqual(b1.shape, a1.shape)\n self.assertTrue((b1.array == a1).all())\n\n # Check that the _axes attribute has been updated\n # for the new rank dimension, where appropriate.\n if keepdims:\n if isinstance(q, list):\n self.assertEqual(len(b1._axes), len(d._axes) + 1)\n else:\n self.assertEqual(len(b1._axes), len(d._axes))\n\n # Masked data\n a = self.ma\n filled = np.ma.filled(a, np.nan)\n d = cf.Data(self.ma, chunks=(2, 2, 3, 5))\n\n with np.testing.suppress_warnings() as sup:\n sup.filter(\n category=RuntimeWarning,\n message=\".*All-NaN slice encountered.*\",\n )\n for axis in [None] + self.axes_combinations:\n for keepdims in (True, False):\n for q in ranks:\n a1 = np.nanpercentile(\n filled, q, axis=axis, keepdims=keepdims\n )\n mask = np.isnan(a1)\n if mask.any():\n a1 = np.ma.masked_where(mask, a1, copy=False)\n\n b1 = d.percentile(q, axes=axis, squeeze=not keepdims)\n self.assertEqual(b1.shape, a1.shape)\n self.assertTrue((b1.array == a1).all())\n\n # Check for no warning when data is of masked type but with no\n # missing values\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", category=UserWarning)\n d = cf.Data(np.ma.arange(100))\n d.percentile(ranks[0])\n\n # Test scalar input (not masked)\n a = np.array(9)\n d = cf.Data(a)\n for keepdims in (True, False):\n for q in ranks:\n a1 = np.nanpercentile(a, q, keepdims=keepdims)\n b1 = d.percentile(q, squeeze=not keepdims)\n self.assertEqual(b1.shape, a1.shape)\n self.assertTrue((b1.array == a1).all())\n\n # Test scalar input (masked)\n a = np.ma.array(9, mask=True)\n filled = np.ma.filled(a.astype(float), np.nan)\n d = cf.Data(a)\n\n with np.testing.suppress_warnings() as sup:\n sup.filter(\n category=RuntimeWarning,\n message=\".*All-NaN slice encountered.*\",\n )\n for keepdims in (True, False):\n for q in ranks:\n a1 = np.nanpercentile(filled, q, keepdims=keepdims)\n mask = np.isnan(a1)\n if mask.any():\n a1 = np.ma.masked_where(mask, a1, copy=False)\n\n b1 = d.percentile(q, squeeze=not keepdims)\n self.assertEqual(b1.shape, a1.shape)\n self.assertTrue(\n (b1.array == a1).all() in (True, np.ma.masked)\n )\n\n # Test mtol=1\n d = cf.Data(self.a)\n d[...] = cf.masked # All masked\n for axis in [None] + self.axes_combinations:\n for q in ranks:\n e = d.percentile(q, axes=axis, mtol=1)\n self.assertFalse(np.ma.count(e.array, keepdims=True).any())\n\n a = np.ma.arange(12).reshape(3, 4)\n d = cf.Data(a)\n d[1, -1] = cf.masked # 1 value masked\n for q in ranks:\n e = d.percentile(q, mtol=1)\n self.assertTrue(np.ma.count(e.array, keepdims=True).all())\n\n # Test mtol=0\n for q in ranks:\n e = d.percentile(q, mtol=0)\n self.assertFalse(np.ma.count(e.array, keepdims=True).any())\n\n # Test mtol=0.1\n for q in ranks:\n e = d.percentile(q, axes=0, mtol=0.1)\n self.assertEqual(np.ma.count(e.array), 3 * e.shape[0])\n\n for q in ranks[:-1]: # axis=1: exclude the non-sequence rank\n e = d.percentile(q, axes=1, mtol=0.1)\n self.assertEqual(np.ma.count(e.array), 2 * e.shape[0])\n\n q = ranks[-1] # axis=1: test the non-sequence rank\n e = d.percentile(q, axes=1, mtol=0.1)\n self.assertEqual(np.ma.count(e.array), e.shape[0] - 1)\n\n # Check invalid ranks (those not in [0, 100])\n for q in (-9, [999], [50, 999], [999, 50]):\n with self.assertRaises(ValueError):\n d.percentile(q).array", "title": "" }, { "docid": "55d47aebe60f31e69332e8e7761f80dd", "score": "0.60653436", "text": "def outliers(data):\n if len(data) < 2:\n raise RuntimeError('Too few values given to outliers')\n\n first_quartile = lower_quartile(data)\n third_quartile = upper_quartile(data)\n iqr = interquartile_range(data)\n\n lower_limit = (first_quartile - 1.5 * iqr)\n upper_limit = (third_quartile + 1.5 * iqr)\n\n results = []\n for each in data:\n if each < lower_limit or each > upper_limit:\n results.append(each)\n return results", "title": "" }, { "docid": "918db9d30412dc8457e0d2782689ae67", "score": "0.6020458", "text": "def ml_percentile(in_data, percentiles):\n\n data = np.sort(in_data)\n p_rank = 100.0 * (np.arange(data.size) + 0.5) / data.size\n perc = np.interp(percentiles, p_rank, data, left=data[0], right=data[-1])\n return perc", "title": "" }, { "docid": "c246046e0f4f4609bc9c7dc39552eef3", "score": "0.59835273", "text": "def xr_percentile(\n src: xr.Dataset,\n percentiles: Sequence,\n nodata,\n) -> xr.Dataset:\n\n data_vars = {}\n for band, xx in src.data_vars.items():\n \n xx_data = xx.data\n if len(xx.chunks[0]) > 1:\n xx_data = xx_data.rechunk({0: -1})\n \n tk = tokenize(xx_data, percentiles, nodata)\n for percentile in percentiles:\n name = f\"{band}_pc_{int(100 * percentile)}\"\n yy = da.map_blocks(\n partial(np_percentile, percentile=percentile, nodata=nodata), \n xx_data, \n drop_axis=0, \n meta=np.array([], dtype=xx.dtype),\n name=f\"{name}-{tk}\",\n )\n data_vars[name] = xr.DataArray(yy, dims=xx.dims[1:], attrs=xx.attrs)\n \n coords = dict((dim, src.coords[dim]) for dim in xx.dims[1:])\n return xr.Dataset(data_vars=data_vars, coords=coords, attrs=src.attrs)", "title": "" }, { "docid": "1235390688f6d6402d53315d77f32d98", "score": "0.5935325", "text": "def spatial_percentile(percentile, points_on_line_x, points_on_line_y):\n assert 0. <= percentile <= 1.\n # First, uniquely associate each value with its spacing. This is just a\n # trapezium rule approach:\n spacing_each_trapezium = points_on_line_x[1:] - points_on_line_x[:-1]\n val_at_each_trapezium = (points_on_line_y[:-1] + points_on_line_y[1:]) / 2.\n area_each_trapezium = spacing_each_trapezium * val_at_each_trapezium\n # order the data by y:\n order_for_vals = np.argsort(val_at_each_trapezium)\n accumulating_areas = np.cumsum(area_each_trapezium[order_for_vals])\n total_area = accumulating_areas[-1]\n # find the percentile in the accumulating areas:\n pc_position = np.searchsorted(accumulating_areas,\n percentile * total_area)\n pc_value = val_at_each_trapezium[order_for_vals][pc_position]\n return pc_value", "title": "" }, { "docid": "e6eb8ad3be077c348dc6822514b4e758", "score": "0.5928688", "text": "def test_percentiles():\n single_val = 5\n single = stat.percentiles(a_range, single_val)\n assert_equal(single, single_val)\n\n multi_val = [10, 20]\n multi = stat.percentiles(a_range, multi_val)\n assert_array_equal(multi, multi_val)\n\n array_val = rs.randint(0, 101, 5).astype(float)\n array = stat.percentiles(a_range, array_val)\n assert_array_almost_equal(array, array_val)", "title": "" }, { "docid": "c710a050a4aa28f1b6107d3277bb3747", "score": "0.5895379", "text": "def get_smoothed_y_percentiles(self,i,imported_array_list,y_percentile,smoothing_kernel_y):\n imported_array = imported_array_list[i]\n y_percentiles = np.percentile(imported_array[0],y_percentile,axis=1,interpolation='lower')\n y_percentiles_smoothed = self.median_filter_2d(y_percentiles,smoothing_kernel_y)\n return y_percentiles_smoothed", "title": "" }, { "docid": "ad1d0f6450ba6fad33c43bbe8324a4e4", "score": "0.58534145", "text": "def clip_extreme(imgs, percentile=5, allowed_steepness=3, dim=2):\n if isinstance(imgs, torch.Tensor):\n return_torch = True\n device = imgs.device\n dtype = imgs.dtype\n imgs = imgs.cpu().numpy()\n else:\n return_torch=False\n img_shape = imgs.shape[-dim:]\n imgs = imgs.reshape(imgs.shape[:-dim] + (-1,))\n # mins = np.min(imgs, axis=-1)\n # maxs = np.max(imgs, axis=-1)\n soft_mins = np.percentile(imgs, percentile, axis=-1)\n soft_maxs = np.percentile(imgs, 100-percentile, axis=-1)\n ranges = soft_maxs - soft_mins\n print(ranges)\n allowed_extra = ranges * percentile/100 * allowed_steepness\n imgs = imgs.clip((soft_mins - allowed_extra)[..., None],\n (soft_maxs - allowed_extra)[..., None])\n imgs = imgs.reshape(imgs.shape[:-1] + img_shape)\n if return_torch:\n return torch.tensor(imgs, dtype=dtype).to(device)\n else:\n return imgs", "title": "" }, { "docid": "6c5768465361f53b282749aded920a05", "score": "0.5836667", "text": "def test_Data_mean_of_upper_decile(self):\n # Masked array, non-masked weights\n a = self.ma\n weights = self.w\n d = cf.Data(a, \"m\", chunks=(2, 3, 2, 5))\n\n for axis in axis_combinations(a):\n b = reshape_array(a, axis)\n w = reshape_array(weights, axis)\n b = np.ma.filled(b, np.nan)\n with np.testing.suppress_warnings() as sup:\n sup.filter(\n category=RuntimeWarning,\n message=\".*All-NaN slice encountered.*\",\n )\n sup.filter(\n category=UserWarning,\n message=\"Warning: 'partition' will ignore the 'mask' of the MaskedArray.*\",\n )\n p = np.nanpercentile(b, 90, axis=-1, keepdims=True)\n\n b = np.ma.masked_where(np.isnan(b), b, copy=False)\n p = np.where(np.isnan(p), b.max() + 1, p)\n\n with np.testing.suppress_warnings() as sup:\n sup.filter(\n category=UserWarning,\n message=\"Warning: 'partition' will ignore the 'mask' of the MaskedArray.*\",\n )\n b = np.ma.where(b < p, np.ma.masked, b)\n\n b = np.ma.average(b, axis=-1, weights=w)\n b = np.ma.asanyarray(b)\n\n e = d.mean_of_upper_decile(\n axes=axis, weights=weights, squeeze=True\n )\n with np.testing.suppress_warnings() as sup:\n sup.filter(\n category=UserWarning,\n message=\"Warning: 'partition' will ignore the 'mask' of the MaskedArray.*\",\n )\n e = e.array\n\n self.assertTrue(\n (np.ma.getmaskarray(e) == np.ma.getmaskarray(b)).all()\n )\n self.assertTrue(np.allclose(e, b))\n\n # mtol\n a[0, 0] = cf.masked\n d = cf.Data(a, \"m\", chunks=3)\n e = d.mean_of_upper_decile(mtol=0)\n self.assertEqual(e.array.mask, True)\n\n # Inplace\n self.assertIsNone(d.mean_of_upper_decile(inplace=True))", "title": "" }, { "docid": "c97fa74d90556805b662a2bd23ac1100", "score": "0.5801683", "text": "def pruning_with_percentile(self, q: float):\n if len(self._weight_mask) == 0:\n return\n for name, param in self.named_parameters():\n if 'weight' not in name:\n # skip the other type weights\n continue\n cpu_param = param.cpu()\n old_mask = self._weight_mask[id(param)][0]\n percentile_value = percentile(tensor_nonzero(cpu_param * old_mask), q)\n new_mask = torch.where(cpu_param.abs() < percentile_value,\n torch.zeros_like(old_mask), old_mask)\n param.data = param.data * new_mask.to(self._device)\n self._weight_mask.update({id(param) : (new_mask, name)})", "title": "" }, { "docid": "93ea1c713b33c7800b321a2f171e25ef", "score": "0.57994294", "text": "def clamp(img, percentile, normalize=False):\n img_ = np.copy(img)\n img_[img_ > np.percentile(img_, percentile)] = np.percentile(img_, percentile)\n if normalize:\n out = img_ / img_.max()\n return out\n elif not normalize:\n out = img_\n return out\n else:\n raise ValueError('Normalize must be either True or False.')", "title": "" }, { "docid": "c8afc2470e658da8fc0766cb9a60bd9c", "score": "0.57960975", "text": "def preprocess_minmax(img: np.array) -> np.array:\n im_min = np.percentile(img, 2)\n im_max = np.percentile(img, 98)\n im_range = im_max - im_min\n # print(f'percentile 2 {im_min}, percentile 98 {im_max}, im_range {im_range}')\n\n # Normalise to the percentile\n img = img.astype(np.float32)\n img = (img - im_min) / im_range\n img = img.clip(0, 1)\n\n return img", "title": "" }, { "docid": "7eca5641570a5c15dcdb9fb53bcf5d9c", "score": "0.5781637", "text": "def clean_data(data,min=0,max=100,sd=5):\n data = data.where((data>min) & (data<max))\n data = data.where(reject_outliers(data,sd))\n return data", "title": "" }, { "docid": "d2d68303713daf03076c4b5f4cfa6d49", "score": "0.57463306", "text": "def grid_from_X(x, percentiles=(0.05, 0.95), grid_resolution=100):\r\n x = x[~x.isnull()]\r\n if len(percentiles) != 2:\r\n raise ValueError('percentile must be tuple of len 2')\r\n if not all(0. <= x <= 1. for x in percentiles):\r\n raise ValueError('percentile values must be in [0, 1]')\r\n\r\n uniques = np.unique(x)\r\n if uniques.shape[0] < grid_resolution:\r\n # feature has low resolution use unique vals\r\n return uniques\r\n else:\r\n emp_percentiles = mquantiles(x, prob=percentiles)\r\n # create axis based on percentiles and grid resolution\r\n return np.linspace(emp_percentiles[0],\r\n emp_percentiles[1],\r\n num=grid_resolution, endpoint=True)", "title": "" }, { "docid": "4121c3d623f7dc2931c219f7c70549b8", "score": "0.5726104", "text": "def delete_outliers(self):\n\n limits = self._limits\n column = self._column\n\n func = lambda x: (x >= limits[0]) & (x <= limits[1])\n self._df = self._df.filter(func(col(column)))\n\n return self._df", "title": "" }, { "docid": "d7007c98f46d2533fc8d1c070f3c5e27", "score": "0.57132715", "text": "def _clip(\n image: xr.DataArray, p_min: int, p_max: int,\n ) -> xr.DataArray:\n v_min, v_max = np.percentile(image, [p_min, p_max])\n\n image = image.clip(min=v_min, max=v_max)\n\n return image", "title": "" }, { "docid": "7caf754da40e8e99d9713f3806959419", "score": "0.56986296", "text": "def robustScale(x, cols: list = []):\n if not cols:\n cols = list(range(x.shape[1]))\n top = x[:, cols] - np.percentile(x[:, cols], 25, axis=0)\n bot = np.percentile(x[:, cols], 75, axis=0) - np.percentile(x[:, cols], 25, axis=0)\n x[:, cols] = top / bot\n return x", "title": "" }, { "docid": "d2ae1ab2426fcc4c14b2cbf19c15561a", "score": "0.56981295", "text": "def unique_percentiles_interpolate(perc_values,\n percentiles=[0, 5, 10, 30, 50,\n 70, 90, 95, 100],\n k=1):\n uniq_ind = np.sort(np.unique(perc_values, return_index=True)[1])\n if len(uniq_ind) == 1:\n uniq_ind = np.repeat(uniq_ind, 2)\n uniq_ind[-1] = len(percentiles) - 1\n uniq_perc_values = perc_values[uniq_ind]\n inter = sc_int.InterpolatedUnivariateSpline(\n np.array(percentiles)[uniq_ind],\n uniq_perc_values,\n k=k, ext=0,\n check_finite=True)\n uniq_perc_values = inter(percentiles)\n return uniq_perc_values", "title": "" }, { "docid": "f1fa46cb22bbd1235f4184633a82ef43", "score": "0.569696", "text": "def _calc_percentile(self, fitted_values: np.ndarray) -> np.ndarray:\n return np.percentile(fitted_values, self.params.quantile, 0)", "title": "" }, { "docid": "ffb16cbf4a6c8f649711c9d4d23cc132", "score": "0.56882125", "text": "def _percentiles(t, q=[15., 50., 85.]):\n return tfp.stats.percentile(t, q, axis=0)", "title": "" }, { "docid": "fd7a14d064af1e1ef33811226916d7e2", "score": "0.5669077", "text": "def remove_outliers(self, threshold=5):\n for i in range(len(self)):\n self[i] = self[i].remove_outliers(threshold=threshold)\n\n return self", "title": "" }, { "docid": "d5ebd6452b751e851f84e51d4723efa5", "score": "0.5598348", "text": "def _percentile_values(y_samples, percentile_step):\n percentiles = range(0, 100 + percentile_step, percentile_step)\n assert len(percentiles) % 2 == 1\n assert 0 in percentiles\n assert 50 in percentiles\n assert 100 in percentiles\n\n if isinstance(y_samples, np.ndarray):\n assert len(y_samples.shape) == 2, 'y_samples must be a 2D numpy array or a list of lists of numbers.'\n assert len(y_samples) > 0, 'y_samples must not be empty.'\n assert len(y_samples[0]) > 0, 'y_samples must not be empty.'\n else:\n assert isinstance(y_samples, list), 'y_samples must be a 2D numpy array or a list of lists of numbers.'\n assert y_samples, 'y_samples must not be empty.'\n for ys in y_samples:\n assert isinstance(ys, list), 'y_samples must be a 2D numpy array or a list of lists of numbers.'\n assert ys, 'y_samples must not have empty inner lists, each X value must have at least one Y sample.'\n if len(set(len(ys) for ys in y_samples)) == 1:\n # All y_samples rows have the same length. Can convert into a numpy array for fast percentile computation.\n y_samples = np.array(y_samples)\n\n # y_list is a list of arrays. Each list is for a percentile, each array element is y value at x.\n if isinstance(y_samples, np.ndarray):\n # y_samples is a numpy array, can compute percentiles in one go.\n y_list = np.percentile(y_samples, percentiles, axis=1)\n else:\n # y_samples is a list of lists of uneven lengths. Slow mode.\n logger.warning('Slow percentile computation: consider switching to 2D numpy arrays.')\n num_x = len(y_samples)\n y_list = []\n for p in percentiles:\n y_list.append(np.zeros(num_x, dtype=np.float64))\n for i in xrange(num_x):\n y_list[-1][i] = np.percentile(y_samples[i], p)\n\n assert len(y_list) == len(percentiles)\n return percentiles, y_list", "title": "" }, { "docid": "2dea2bf60eab93913a5c207d58a22128", "score": "0.5596752", "text": "def percentile(x, ys):\n sz_y = len(ys)\n if sz_y == 0:\n return -1\n elif sz_y == 1:\n return 0.\n else:\n return sum(y < x for y in ys) / float(len(ys) - 1) * 100", "title": "" }, { "docid": "01bd0cce8d8200e1383c516aa95d44cf", "score": "0.5595679", "text": "def clipOutliers(self,limits=(1,99),percentage=True,action='noclipmedian'):\n if np.isscalar(limits):\n limits=(-limits,limits)\n elif len(limits)==2:\n pass\n else:\n raise ValueError('unrecognized limit form')\n \n im=self._active\n \n if percentage:\n mi,ma=np.min(im),np.max(im)\n rng=ma-mi\n limits=(mi+limits[0]*rng/100,mi+limits[1]*rng/100)\n \n clipcond=np.logical_or(im>limits[1],im<limits[0])\n self._active,nclip=self._repl_inds(action,clipcond)\n \n self._changed = True\n return nclip", "title": "" }, { "docid": "d315a6018dce115c8216237f89608357", "score": "0.55749416", "text": "def get_outliers(values):\n # drop NAs \n values = values.dropna()\n names = values.index\n # calculate percentiles\n p25 = np.percentile(values, 25)\n p75 = np.percentile(values, 75)\n # initiate empty lists \n indices_of_outliers = []\n values_of_outliers = []\n # for each value \n for value, name in zip(values, names):\n # apply is_extreme\n if is_extreme(value, p25, p75):\n # get index and value\n indices_of_outliers.append(name)\n values_of_outliers.append(value)\n outliers = zip(indices_of_outliers, values_of_outliers)\n #outliers.sort(key = lambda t: t[1])\n return outliers", "title": "" }, { "docid": "d915e3ef7557429ff895008d4251a249", "score": "0.5572701", "text": "def percentile_for(percentile: int) -> StatFunction:\n if percentile < 0 or percentile > 100:\n raise TypeError('percentile must be between 0 and 100, inclusive')\n\n def percentile_(series: pd.Series):\n return series.quantile(percentile / 100)\n percentile_.__name__ = 'percentile_{:2.0f}'.format(percentile)\n return percentile_", "title": "" }, { "docid": "8ba315638fa3d0873cac3162f5353d84", "score": "0.5549827", "text": "def imshowe(x, percentiles=[5, 95], **kwargs):\n isf = np.isfinite(x)\n if np.any(isf):\n (vmin, vmax) = np.percentile(x[isf], percentiles)\n else:\n logger.warning('No finite numbers in array')\n vmin = 0\n vmax = 1\n if 'percentiles' in kwargs:\n del(kwargs['percentiles'])\n return imshow(x, vmin=vmin, vmax=vmax, **kwargs)", "title": "" }, { "docid": "07bfdb5921b3b1d200239e3fb4ea0e7f", "score": "0.55362695", "text": "def __init__(self, img, percentiles=[1,99]):\n assert len(percentiles) == 2\n vmin, vmax = np.percentile(img, percentiles)\n super().__init__(vmin=vmin, vmax=vmax)", "title": "" }, { "docid": "e54735f7bd2b6ba02a042daeb00c4096", "score": "0.55264735", "text": "def remove_outliers(tX):\n clean_data = []\n for f in tX.T:\n mean = np.mean(f, axis=0)\n std = np.std(f, axis=0)\n edge = std * 3\n lowerb = mean - edge\n upperb = mean + edge\n for i,x in enumerate(f):\n if x<lowerb:\n f[i]=lowerb\n elif x>upperb:\n f[i]=upperb\n \n return tX", "title": "" }, { "docid": "f6dc237a9d11dd5d0848b6e5b3a57a2a", "score": "0.55225736", "text": "def scale_percentile(matrix):\n orig_shape = matrix.shape\n matrix = np.reshape(matrix, [matrix.shape[0]*matrix.shape[1], 3]).astype(float)\n \n # Get 2nd and 98th percentile\n mins = np.percentile(matrix, 1, axis=0)\n maxs = np.percentile(matrix, 99, axis=0) - mins\n \n matrix = (matrix - mins[None,:])/maxs[None,:]\n matrix = np.reshape(matrix, orig_shape)\n matrix = matrix.clip(0,1)\n return matrix", "title": "" }, { "docid": "099a0f064db0ba5d962635dde9214114", "score": "0.55035377", "text": "def remove_outliers(\r\n data,\r\n method: str = \"gaussian\",\r\n cutoff=3,\r\n skip: Optional[Union[str, List[str]]] = None,\r\n only: Optional[Union[str, List[str]]] = None,\r\n):\r\n # Copy to avoid replacing in-place\r\n data = data.copy(deep=True)\r\n\r\n # Which columns\r\n columns = _validate_skip_only(data, skip, only)\r\n is_continuous = _get_dtypes(data) == \"continuous\"\r\n columns = columns & is_continuous\r\n\r\n # Check cutoff and method, printing what is being done\r\n if cutoff <= 0:\r\n raise ValueError(\"'cutoff' must be >= 0\")\r\n if method == \"iqr\":\r\n click.echo(\r\n f\"Removing outliers from {len(data):,} observations of {columns.sum():,} continuous variables \"\r\n f\"with values < 1st Quartile - ({cutoff} * IQR) or > 3rd quartile + ({cutoff} * IQR)\"\r\n )\r\n elif method == \"gaussian\":\r\n click.echo(\r\n f\"Removing outliers from {len(data):,} observations of {columns.sum():,} continuous variables \"\r\n f\"with values more than {cutoff} standard deviations from the mean\"\r\n )\r\n else:\r\n raise ValueError(\r\n f\"'{method}' is not a supported method for outlier removal - only 'gaussian' and 'iqr'.\"\r\n )\r\n\r\n # Remove outliers\r\n # Note: This could be faster by performing calculations on the entire dataset at once, but in practice this should\r\n # be used on more of a limited basis, reviewing changes in each variable.\r\n for col_name, process_col in columns.iteritems():\r\n if not process_col:\r\n continue\r\n if method == \"iqr\":\r\n q1 = data[col_name].quantile(0.25)\r\n q3 = data[col_name].quantile(0.75)\r\n iqr = abs(q3 - q1)\r\n bottom = q1 - (iqr * cutoff)\r\n top = q3 + (iqr * cutoff)\r\n elif method == \"gaussian\":\r\n mean = data[col_name].mean()\r\n std = data[col_name].std()\r\n bottom = mean - (std * cutoff)\r\n top = mean + (std * cutoff)\r\n # Replace with NA\r\n outliers_bottom = data[col_name] < bottom\r\n outliers_top = data[col_name] > top\r\n data.loc[outliers_bottom, col_name] = np.nan\r\n data.loc[outliers_top, col_name] = np.nan\r\n # Log\r\n click.echo(\r\n f\"\\tRemoved {outliers_bottom.sum()} low and {outliers_top.sum()} high outliers \"\r\n f\"from {col_name} (outside {bottom:,.2f} to {top:,.2f})\"\r\n )\r\n\r\n return data", "title": "" }, { "docid": "5d466782fdf1fada30b662844168e51f", "score": "0.54912925", "text": "def test_percentiles_acc():\n # First a basic case\n data = np.array([10, 20, 30])\n val = 20\n perc = stat.percentiles(data, 50)\n assert_equal(perc, val)\n\n # Now test against scoreatpercentile\n percentiles = rs.randint(0, 101, 10)\n out = stat.percentiles(a_norm, percentiles)\n for score, pct in zip(out, percentiles):\n assert_equal(score, sp.stats.scoreatpercentile(a_norm, pct))", "title": "" }, { "docid": "ef1e88dc7f92dcd42c3bccf54e04e291", "score": "0.54714763", "text": "def raster_percentile(band):\n return bisect(percentiles, band)", "title": "" }, { "docid": "4cba938cf5f4b3bff3ed73041af65142", "score": "0.5466502", "text": "def treat_outliers(df, col):\n Q1 = df[col].quantile(0.25) # 25th quantile\n Q3 = df[col].quantile(0.75) # 75th quantile\n IQR = Q3 - Q1\n Lower_Whisker = Q1 - 1.5 * IQR\n Upper_Whisker = Q3 + 1.5 * IQR\n\n # all the values smaller than Lower_Whisker will be assigned the value of Lower_Whisker\n # all the values greater than Upper_Whisker will be assigned the value of Upper_Whisker\n df[col] = np.clip(df[col], Lower_Whisker, Upper_Whisker)\n\n return df", "title": "" }, { "docid": "4cba938cf5f4b3bff3ed73041af65142", "score": "0.5466502", "text": "def treat_outliers(df, col):\n Q1 = df[col].quantile(0.25) # 25th quantile\n Q3 = df[col].quantile(0.75) # 75th quantile\n IQR = Q3 - Q1\n Lower_Whisker = Q1 - 1.5 * IQR\n Upper_Whisker = Q3 + 1.5 * IQR\n\n # all the values smaller than Lower_Whisker will be assigned the value of Lower_Whisker\n # all the values greater than Upper_Whisker will be assigned the value of Upper_Whisker\n df[col] = np.clip(df[col], Lower_Whisker, Upper_Whisker)\n\n return df", "title": "" }, { "docid": "e9df7fc8ee6575dce8212c40953f0277", "score": "0.54515636", "text": "def fast_abs_percentile(data, percentile=80):\n if hasattr(data, 'mask'):\n # Catter for masked arrays\n data = np.asarray(data[np.logical_not(data.mask)])\n data = np.abs(data)\n data = data.ravel()\n index = int(data.size * .01 * percentile)\n if partition is not None:\n # Partial sort: faster than sort\n return partition(data, index)[index + 1]\n data.sort()\n return data[index + 1]", "title": "" }, { "docid": "e037d5e2552efc63d52f14c53b1d11ca", "score": "0.54457676", "text": "def remove_outliers(target_df, low, high, exclude_cols):\n processed_df = target_df.copy()\n quant_df = target_df.quantile([low, high])\n cols = [col for col in target_df.columns if col not in exclude_cols and col != 'id' and col != 'label']\n quant_df = quant_df[cols]\n quant_df.index = [low, high]\n\n for col in target_df:\n if col != 'id' and col != 'label':\n continue\n if col not in exclude_cols:\n processed_df.loc[processed_df[col] > quant_df[col].values[1], col] = quant_df[col].values[1] # giant outliers convert to higher bound value\n processed_df.loc[processed_df[col] < quant_df[col].values[0], col] = quant_df[col].values[0] # low outliers convert to lower bound value\n\n return processed_df", "title": "" }, { "docid": "4213acde5e9fc04fc8fa35de6a42a361", "score": "0.5420591", "text": "def remove_outliers_iqr(data, feature, exclude = list()):\n \n if np.issubdtype(data[feature].dtype, np.number) and feature not in exclude:\n \n q1 = data[feature].quantile(0.25)\n q3 = data[feature].quantile(0.75)\n\n iqr = q3 - q1\n lower_bound = q1 -(1.5 * iqr) \n upper_bound = q3 +(1.5 * iqr) \n\n outliers = data[feature].apply(lambda x : np.nan if (x < lower_bound or \n x > upper_bound) \n else x)\n else:\n outliers = data[feature]\n \n return outliers", "title": "" }, { "docid": "60bbf9fb5d2ec5d5d12a39afcbe997cc", "score": "0.54202724", "text": "def give_percentiles_all(i, data,X):\n sc = -X[i,:]\n it_idx = data.item_int[np.logical_and(data.patient_int==i,data.rating_test)]\n res = give_percentiles(sc)\n return res[it_idx]", "title": "" }, { "docid": "ca6dd745899ef1b97487aa5c639caaa3", "score": "0.54089653", "text": "def _quantile(x, q, **kwargs):\n x = np.asarray(x)\n p = np.asarray(q) * 100\n return np.nanpercentile(x, p, **kwargs)", "title": "" }, { "docid": "1ade424eb2bdfb01d283f23a34f1788a", "score": "0.5402604", "text": "def percentile(self, q, axis=None, iterate_rays=False, **kwargs):\n if hasattr(np, 'nanpercentile') and not iterate_rays:\n result = self.apply_numpy_function(np.nanpercentile, q=q,\n axis=axis, projection=True,\n unit=self.unit, how='cube',\n **kwargs)\n else:\n result = self.apply_function(np.percentile, q=q, axis=axis,\n projection=True, unit=self.unit,\n **kwargs)\n\n return result", "title": "" }, { "docid": "b4926ee08db9f6c45fef789e259d5f67", "score": "0.5397032", "text": "def remove_outliers(self):\n self.stats()\n\n drop_indices = []\n for col_n, col_name in enumerate(self.data.columns):\n\n for row_n, value in enumerate(self.data[col_name]):\n\n if value < self.means[col_n] - 3 * self.stds[col_n] or value > self.means[col_n] + 3 * self.stds[col_n]:\n # Drop entire row if the value is +/- 3 standard deviations from the column mean\n drop_indices.append(row_n)\n\n # Remove duplicate indices\n drop_indices = list(set(drop_indices))\n\n # Save identified outliers\n self.outliers = self.data.iloc[drop_indices]\n\n # Drop outliers and reset index\n self.data.drop(index=drop_indices, inplace=True)\n self.data.reset_index(drop=True, inplace=True)\n\n # Recompute stats after dropping outliers\n print(self.means)\n print(self.stds)\n self.stats()", "title": "" }, { "docid": "01722b9777c491b2d13c3c8e5ddf0200", "score": "0.53939855", "text": "def nanpercentile(a, per):\n out = np.empty(a.shape[0])\n for i in range(a.shape[0]):\n b = a[i][np.isfinite(a[i])]\n if b.shape[0] == 0:\n out[i] = np.nan\n else:\n out[i] = np.percentile(b[np.isfinite(b)], per)\n return out", "title": "" }, { "docid": "1b1ce0008949f2a47072b684561b332d", "score": "0.53826725", "text": "def calc_percentile(values):\n return (rankdata(values) - 1) / len(values) * 100", "title": "" }, { "docid": "0f2270d72949a15dc5dcf971ee0547a2", "score": "0.53773004", "text": "def calculate_outlier_bounds(df, column_name):\n bounds = dict(zip([\"q1\", \"q3\"], df.approxQuantile(column_name, [0.25, 0.75], 0)))\n iqr = bounds['q3'] - bounds['q1']\n bounds['min'] = bounds['q1'] - (iqr * 1.5)\n bounds['max'] = bounds['q3'] + (iqr * 1.5)\n return bounds", "title": "" }, { "docid": "cb3cb72672fd40d469ee379c74bb6563", "score": "0.5361838", "text": "def percentile(data, pcntl):\n nr = np.size(data, axis=0)\n\n x = np.sort(data, axis=0)\n return x[int(np.floor(pcntl*nr)),]", "title": "" }, { "docid": "3aa685a902d1aefdfcb6bfdc1a1e90b2", "score": "0.53562295", "text": "def remove_outlier(dataset, auto_fill=True, ratio=1.5):\n from ..data import Dataset\n from .fill import fill\n from numpy import percentile, isnan, nonzero, nan\n\n if not isinstance(dataset, Dataset):\n raise TypeError(\"Dataset has to be class core.data.dataset.Dataset\")\n\n data = dataset.data\n time_col = dataset.time_column_index\n\n for i in range(data.shape[1]):\n if i == time_col:\n continue\n column = nonzero(~isnan(data[:, i]))[0]\n q25, q75 = percentile(data[column, i], (25, 75))\n iqr = q75 - q25\n data[column[nonzero(data[column, i] < (q25 - ratio * iqr))[0]], i] = nan\n data[column[nonzero(data[column, i] > (q75 + ratio * iqr))[0]], i] = nan\n dataset.change_values(data)\n\n if auto_fill:\n fill(dataset)", "title": "" }, { "docid": "4af88fbd4407f7f6b3ad4a602af1c4b9", "score": "0.53540665", "text": "def remove_outliers(df, column_name):\n import pyspark.sql.functions as f\n bounds = calculate_outlier_bounds(df, column_name)\n return df.filter((f.col(column_name) >= bounds['min']) & (f.col(column_name) <= bounds['max']))", "title": "" }, { "docid": "73c25e9ce85d7847b60ceddec576005d", "score": "0.5322219", "text": "def get_smoothed_y_percentiles(self,imported_hdf5_handle,y_percentile,smoothing_kernel_y):\n y_percentiles_path = self.chunk_t((imported_hdf5_handle[self.seg_channel],),(2,),1,self.get_y_percentile,\"y_percentile\",\"data\",y_percentile,t_range_tuple=(self.t_range,))\n \n with h5py_cache.File(y_percentiles_path,\"r\",chunk_cache_mem_size=self.chunk_cache_mem_size) as y_percentiles_handle:\n y_percentiles_smoothed = self.median_filter_2d(y_percentiles_handle[\"data\"],smoothing_kernel_y)\n# y_percentiles_smoothed_path = self.chunk_t((y_percentiles_handle[\"data\"],),(1,),1,self.median_filter_2d,\"y_percentile_smoothed\",\"data\",smoothing_kernel_y)\n self.removefile(y_percentiles_path)\n# return y_percentiles_smoothed_path\n return y_percentiles_smoothed", "title": "" }, { "docid": "eb8f3c6710c14f289549b77b98593a17", "score": "0.5314725", "text": "def test_create_group_percentiles_output_buckets(self):\n\n created_percentiles = []\n\n desired_percentiles = [str(percent) + '%' for percent in wb_utils.Settings.formatted_percentiles]\n\n for data_item in self.results['Data']:\n for percent in data_item['percentileList']:\n percentile_keys = [perval['percentiles'] for perval in percent['percentileValues']]\n created_percentiles.append(percentile_keys)\n\n self.assertTrue([desired_percentiles == l for l in created_percentiles],\n \"\"\"create_group_percentiles returned unexecpted percentile buckets\n returned: {}\"\"\".format(created_percentiles))", "title": "" }, { "docid": "653ba3b8735ed9f18d049c1f428c7e84", "score": "0.5311623", "text": "def interquartile_range(X):\n quartiles = np.nanpercentile(X, [25, 75], axis=0)\n return quartiles[1, :] - quartiles[0, :]", "title": "" }, { "docid": "cb2a14d2315b48de82f2f9cfecdedf69", "score": "0.5302771", "text": "def calculate_percentile(percentile, dataframe, column_name):\n return round(dataframe[column_name].quantile(percentile),2 )", "title": "" }, { "docid": "462b312e3e3c2cedc1b143a875e0ce31", "score": "0.5296902", "text": "def filter_by_percentile(tags_distribution: Dict[str, int], tol_percentile: int) -> Set[str]:\n if len(list(tags_distribution.values())) > 100:\n top_percentile = np.percentile(list(tags_distribution.values()), tol_percentile)\n return {k for k, v in tags_distribution.items() if v > top_percentile}\n else:\n return {k for k, v in tags_distribution.items()}", "title": "" }, { "docid": "cc815928289e73cc0f83025752329c64", "score": "0.5296731", "text": "def reject_outliers(data, cutoff, thresh=0.08):\n data = np.array(data)\n marsz = data[:, 4]\n data = data[(data[:, 4] >= cutoff[0]) & (data[:, 4] <= cutoff[1])]\n m = np.mean(data[:, 4], axis=0)\n # return data\n return data[(data[:, 4] <= m + thresh) & (data[:, 4] >= m - thresh)]", "title": "" }, { "docid": "664fb7f43834ae1c9ebcfea6609f3885", "score": "0.5293619", "text": "def signal_percentile(sampled_signal, percentile):\n return np.percentile(sampled_signal.reshape(-1), percentile)", "title": "" }, { "docid": "04cf7bd36de2969575eee1e97b96f8f1", "score": "0.5292302", "text": "def parse_percentiles(percentiles: Sequence[float] | float | None) -> Sequence[float]:\n if isinstance(percentiles, float):\n percentiles = [percentiles]\n elif percentiles is None:\n percentiles = []\n if not all((0 <= p <= 1) for p in percentiles):\n raise ValueError(\"percentiles must all be in the range [0, 1]\")\n\n sub_50_percentiles = sorted(p for p in percentiles if p < 0.5)\n at_or_above_50_percentiles = sorted(p for p in percentiles if p >= 0.5)\n\n if not at_or_above_50_percentiles or at_or_above_50_percentiles[0] != 0.5:\n at_or_above_50_percentiles = [0.5, *at_or_above_50_percentiles]\n\n return [*sub_50_percentiles, *at_or_above_50_percentiles]", "title": "" }, { "docid": "550d5f82990994e210e7389823e393ea", "score": "0.5283657", "text": "def get_percentile_distr():\n second_part = (np.geomspace(5, 50, 10)).astype(int)\n first_part = (101. - np.geomspace(1, 51, 20)).astype(int)\n return np.hstack([first_part, second_part])", "title": "" }, { "docid": "c398394a8e078d423491bf1e766504fa", "score": "0.5282513", "text": "def cat_continuous(x, granularity=\"Medium\"):\n if granularity == \"High\":\n lspercentile = [\n np.percentile(x, 5),\n np.percentile(x, 10),\n np.percentile(x, 15),\n np.percentile(x, 20),\n np.percentile(x, 25),\n np.percentile(x, 30),\n np.percentile(x, 35),\n np.percentile(x, 40),\n np.percentile(x, 45),\n np.percentile(x, 50),\n np.percentile(x, 55),\n np.percentile(x, 60),\n np.percentile(x, 65),\n np.percentile(x, 70),\n np.percentile(x, 75),\n np.percentile(x, 80),\n np.percentile(x, 85),\n np.percentile(x, 90),\n np.percentile(x, 95),\n np.percentile(x, 99),\n ]\n res = [\n \"> p90 (%s)\" % (lspercentile[8])\n if z > lspercentile[8]\n else \"<= p10 (%s)\" % (lspercentile[0])\n if z <= lspercentile[0]\n else \"<= p20 (%s)\" % (lspercentile[1])\n if z <= lspercentile[1]\n else \"<= p30 (%s)\" % (lspercentile[2])\n if z <= lspercentile[2]\n else \"<= p40 (%s)\" % (lspercentile[3])\n if z <= lspercentile[3]\n else \"<= p50 (%s)\" % (lspercentile[4])\n if z <= lspercentile[4]\n else \"<= p60 (%s)\" % (lspercentile[5])\n if z <= lspercentile[5]\n else \"<= p70 (%s)\" % (lspercentile[6])\n if z <= lspercentile[6]\n else \"<= p80 (%s)\" % (lspercentile[7])\n if z <= lspercentile[7]\n else \"<= p90 (%s)\" % (lspercentile[8])\n if z <= lspercentile[8]\n else \"> p90 (%s)\" % (lspercentile[8])\n for z in x\n ]\n elif granularity == \"Medium\":\n lspercentile = [\n np.percentile(x, 10),\n np.percentile(x, 20),\n np.percentile(x, 30),\n np.percentile(x, 40),\n np.percentile(x, 50),\n np.percentile(x, 60),\n np.percentile(x, 70),\n np.percentile(x, 80),\n np.percentile(x, 90),\n ]\n res = [\n \"<= p10 (%s)\" % (lspercentile[0])\n if z <= lspercentile[0]\n else \"<= p20 (%s)\" % (lspercentile[1])\n if z <= lspercentile[1]\n else \"<= p30 (%s)\" % (lspercentile[2])\n if z <= lspercentile[2]\n else \"<= p40 (%s)\" % (lspercentile[3])\n if z <= lspercentile[3]\n else \"<= p50 (%s)\" % (lspercentile[4])\n if z <= lspercentile[4]\n else \"<= p60 (%s)\" % (lspercentile[5])\n if z <= lspercentile[5]\n else \"<= p70 (%s)\" % (lspercentile[6])\n if z <= lspercentile[6]\n else \"<= p80 (%s)\" % (lspercentile[7])\n if z <= lspercentile[7]\n else \"<= p90 (%s)\" % (lspercentile[8])\n if z <= lspercentile[8]\n else \"> p90 (%s)\" % (lspercentile[8])\n for z in x\n ]\n else:\n lspercentile = [\n np.percentile(x, 15),\n np.percentile(x, 50),\n np.percentile(x, 85),\n ]\n res = [\n \"1-Very Low\"\n if z < lspercentile[0]\n else \"2-Low\"\n if z < lspercentile[1]\n else \"3-High\"\n if z < lspercentile[2]\n else \"4-Very High\"\n for z in x\n ]\n return res", "title": "" }, { "docid": "acedf2ba083f29f6b80020e6868514bb", "score": "0.527658", "text": "def reject_outliers(data, deviation_tolerance=.6745):\n\n data = data.astype('float')\n distance = _np.abs(data - _np.median(data))\n sigma = _np.median(distance) / .6745\n data[distance > deviation_tolerance * sigma] = _np.nan\n return data", "title": "" }, { "docid": "bb9088d7131f4cc995058612c379e69c", "score": "0.5273992", "text": "def percentile(percent):\n @wraps(percentile)\n def _reduce(iterable):\n sorts = sorted(iterable)\n key = (len(iterable)-1) * percent\n key_floor = math.floor(key)\n key_ceil = math.ceil(key)\n if key_ceil == key_floor:\n return sorts[int(key)]\n d0 = sorts[int(key_floor)] * (key_ceil - key)\n d1 = sorts[int(key_ceil)] * (key - key_floor)\n return d0+d1\n return _reduce", "title": "" }, { "docid": "fd355f06222256f4501b8541b29d6ea9", "score": "0.5272074", "text": "def delete_outliers(data, y, ids, threshold):\n idxs = np.all(abs(data)<threshold,1)\n\n data_clean = data[idxs,:]\n y_clean = y[idxs]\n ids = ids[idxs]\n\n return data_clean, y_clean, ids", "title": "" }, { "docid": "fd0e085cb04827bb73eeba6f3b4b6a65", "score": "0.5260961", "text": "def PercentileCalculator(numbers, percentiles=PERCENTILES_LIST):\n\n if not len(numbers): # 'if not numbers' will fail if numbers is a pd.Series.\n raise ValueError(\"Can't compute percentiles of empty list.\")\n\n numbers_sorted = sorted(numbers)\n count = len(numbers_sorted)\n total = sum(numbers_sorted)\n result = {}\n for percentile in percentiles:\n float(percentile) # verify type\n if percentile < 0.0 or percentile > 100.0:\n raise ValueError('Invalid percentile %s' % percentile)\n\n percentile_string = 'p%s' % str(percentile)\n index = int(count * float(percentile) / 100.0)\n index = min(index, count - 1) # Correction to handle 100th percentile.\n result[percentile_string] = numbers_sorted[index]\n\n average = total / float(count)\n result['average'] = average\n if count > 1:\n total_of_squares = sum([(i - average) ** 2 for i in numbers])\n result['stddev'] = (total_of_squares / (count - 1)) ** 0.5\n else:\n result['stddev'] = 0\n\n return result", "title": "" }, { "docid": "6f9ed3a4e0f8796043f33b8b45842ad6", "score": "0.525979", "text": "def test_Percentiles_group_percentiles_out_creation(self):\n\n self.Percentiles.population_percentiles()\n self.assertTrue(hasattr(self.Percentiles, 'group_percentiles_out'),\n \"\"\"population_percentile_vecs not attribute after population_percentiles run\"\"\")", "title": "" }, { "docid": "4204c997d6d4d1edfb005ca6aa454ce7", "score": "0.5253785", "text": "def customized_box_plot(percentiles, axes, redraw = True, *args, **kwargs):\n\n box_plot = axes.boxplot([[-9, -4, 2, 4, 9],]*1, *args, **kwargs) \n # Creates len(percentiles) no of box plots\n\n min_y, max_y = float('inf'), -float('inf')\n\n for box_no, (q1_start, \n q2_start,\n q3_start,\n q4_start,\n q4_end,\n fliers_xy) in enumerate(percentiles):\n\n # Lower cap\n box_plot['caps'][2*box_no].set_ydata([q1_start, q1_start])\n # xdata is determined by the width of the box plot\n\n # Lower whiskers\n box_plot['whiskers'][2*box_no].set_ydata([q1_start, q2_start])\n\n # Higher cap\n box_plot['caps'][2*box_no + 1].set_ydata([q4_end, q4_end])\n\n # Higher whiskers\n box_plot['whiskers'][2*box_no + 1].set_ydata([q4_start, q4_end])\n\n # Box\n box_plot['boxes'][box_no].set_ydata([q2_start, \n q2_start, \n q4_start,\n q4_start,\n q2_start])\n\n # Median\n box_plot['medians'][box_no].set_ydata([q3_start, q3_start])\n\n # Outliers\n if fliers_xy is not None and len(fliers_xy[0]) != 0:\n # If outliers exist\n box_plot['fliers'][box_no].set(xdata = fliers_xy[0],\n ydata = fliers_xy[1])\n\n min_y = min(q1_start, min_y, fliers_xy[1].min())\n max_y = max(q4_end, max_y, fliers_xy[1].max())\n\n else:\n min_y = min(q1_start, min_y)\n max_y = max(q4_end, max_y)\n\n # The y axis is rescaled to fit the new box plot completely with 10% \n # of the maximum value at both ends\n axes.set_ylim([min_y*1.1, max_y*1.1])\n\n # If redraw is set to true, the canvas is updated.\n if redraw:\n ax.figure.canvas.draw()\n\n return box_plot", "title": "" }, { "docid": "e29ee683c4cbff437371867018d81da2", "score": "0.52533287", "text": "def get_y_percentile(self,array_tuple,y_percentile):\n array, = array_tuple\n out_array = np.percentile(array,y_percentile,axis=1,interpolation='lower')\n return out_array", "title": "" }, { "docid": "b9c2e933d3ebc463990449afec4c003b", "score": "0.52529764", "text": "def _percentile_interval(self, ab_estimates = None):\n ll = np.percentile(ab_estimates, q = (self.alpha/2)*100)\n ul = np.percentile(ab_estimates, q = (1 - self.alpha/2)*100)\n return np.array([ll, ul])", "title": "" }, { "docid": "c1265aaa33c87ef8f239a7fa5ac7fe02", "score": "0.5252969", "text": "def discard_outliers(x_train, ys_train ):\n index = []\n threshold =8.5\n for i in range(x_train.shape[0]):\n if np.amax(np.abs(x_train[i, :])) > threshold:\n index.append(i)\n x_train = np.delete(x_train, index, 0)\n ys_train = np.delete(ys_train, index, 0)\n return x_train, ys_train", "title": "" }, { "docid": "e4a34155464dc8f1fa99fbfb1696cbe6", "score": "0.524914", "text": "def cutoff_from_percentile(self, percentile, pos_label=None):\n return (\n pd.Series(self.pred_probas(pos_label))\n .nlargest(int((1 - percentile) * len(self)))\n .min()\n )", "title": "" }, { "docid": "39251c47fdfd3c8926da76078599bac6", "score": "0.52269894", "text": "def remove_outliers(X: List[Union[int, float]], t: float = 3.5) -> List[Union[int, float]]:\n mean_X = np.mean(X)\n std_X = np.std(X)\n\n good_x = []\n\n for x in X:\n z_score = (x - mean_X) / std_X\n if z_score < t:\n good_x.append(x)\n return good_x", "title": "" }, { "docid": "eb4339cbc964951338524513bb4a9aa6", "score": "0.52175856", "text": "def get_climatology(self,percentiles):\n self.percentiles=percentiles\n \n lat,lon=self.data[\"clim\"][0].shape[-2:]\n dist=np.zeros([1,lat,lon])\n \n #We call the whole cubelist into memory\n self.data[\"clim\"].realise_data()\n dist=np.concatenate([f.data.reshape([-1,lat,lon]) for f in self.data[\"clim\"]])\n \n self.distribution=np.percentile(dist,percentiles,axis=0)\n self.distribution[0]-=0.01\n \n means=np.zeros([len(percentiles)-1,lat,lon])\n for i in range(len(percentiles)-1):\n for j in range(lat):\n for k in range(lon):\n means[i,j,k]=dist[np.digitize(dist[:,j,k],\\\n self.distribution[:,j,k],right=True)==i+1,j,k].mean()\n #interpolates empty bins as being halfway between the distribution bounds \n for i,j,k in np.argwhere(np.isnan(means)):\n means[i,j,k]=self.distribution[i:i+2,j,k].mean()\n self.dist_means=means", "title": "" }, { "docid": "9ddc868204ce6131d5a0b58ce405f1ef", "score": "0.5208818", "text": "def normalize99(img):\n X = img.copy()\n X = (X - np.percentile(X, 1)) / (np.percentile(X, 99) - np.percentile(X, 1))\n return X", "title": "" }, { "docid": "dfbcf341c7a8545a9fce1302ba82a2b4", "score": "0.52067035", "text": "def give_percentiles( scores):\n res = []\n for i in scores:\n res.append( stats.percentileofscore(scores,i))\n return np.array(res)/100", "title": "" }, { "docid": "cd5d511d7f29c7d28a698dd4dd62dfa5", "score": "0.51997083", "text": "def unconditional_quantile_y(x, alpha_grid, func):\n n_draws = len(x)\n\n # Equation 21a\n y_x = Parallel(n_jobs=8)(delayed(func)(i) for i in x)\n y_x_asc = np.sort(y_x)\n q_index = (np.floor(alpha_grid * n_draws)).astype(int)\n quantile_y_x = y_x_asc[q_index]\n\n return quantile_y_x", "title": "" }, { "docid": "2cfe6064b54e4bafc24a99ab38bf1c59", "score": "0.5198672", "text": "def SELpercentile(SEL, percentile=5):\n fff = np.percentile(SEL, 100-percentile)\n\n return fff", "title": "" }, { "docid": "64127ee8b40ca7420be9bdd8f31576a0", "score": "0.5194027", "text": "def _get_grids(feature_values, num_grid_points, grid_type, percentile_range, grid_range):\n\n if grid_type == 'percentile':\n # grid points are calculated based on percentile in unique level\n # thus the final number of grid points might be smaller than num_grid_points\n start, end = 0, 100\n if percentile_range is not None:\n start, end = np.min(percentile_range), np.max(percentile_range)\n\n percentile_grids = np.linspace(start=start, stop=end, num=num_grid_points)\n value_grids = np.percentile(feature_values, percentile_grids)\n\n grids_df = pd.DataFrame()\n grids_df['percentile_grids'] = [round(v, 2) for v in percentile_grids]\n grids_df['value_grids'] = value_grids\n grids_df = grids_df.groupby(['value_grids'], as_index=False).agg(\n {'percentile_grids': lambda v: str(tuple(v)).replace(',)', ')')}).sort_values('value_grids', ascending=True)\n\n feature_grids, percentile_info = grids_df['value_grids'].values, grids_df['percentile_grids'].values\n else:\n if grid_range is not None:\n value_grids = np.linspace(np.min(grid_range), np.max(grid_range), num_grid_points)\n else:\n value_grids = np.linspace(np.min(feature_values), np.max(feature_values), num_grid_points)\n feature_grids, percentile_info = value_grids, []\n\n return feature_grids, percentile_info", "title": "" }, { "docid": "348835087b831c8bd81768b7f613f28e", "score": "0.51901513", "text": "def remove_ourlier_diff_median(data, median_scaler=25):\n daily_diff = (data.resample('D').last().dropna().diff().abs() + 0.05) * median_scaler\n daily_diff['diff_date'] = daily_diff.index.strftime('%Y-%m-%d')\n data_test = data.diff()\n data_test['diff_date'] = data_test.index.strftime('%Y-%m-%d')\n data_test_diff = pd.merge(data_test, daily_diff, on='diff_date')\n indexer = ((np.abs(data_test_diff['close_x']) < np.abs(data_test_diff['close_y'])) &\n (np.abs(data_test_diff['open_x']) < np.abs(data_test_diff['open_y'])) &\n (np.abs(data_test_diff['high_x']) < np.abs(data_test_diff['high_y'])) & \n (np.abs(data_test_diff['low_x']) < np.abs(data_test_diff['low_y'])))\n # indexer = (indexer | data_test_diff['close_y'].isna())\n data_final = data.loc[indexer.values, :]\n \n return data_final", "title": "" }, { "docid": "995c5947eca7e83cc9b54ac3771c78c6", "score": "0.51841736", "text": "def calculate_percentiles_from_raster(raster_uri, percentiles):\n raster = gdal.Open(raster_uri, gdal.GA_ReadOnly)\n\n def numbers_from_file(fle):\n \"\"\"Generates an iterator from a file by loading all the numbers\n and yielding\n\n fle = file object\n \"\"\"\n arr = np.load(fle)\n for num in arr:\n yield num\n\n # List to hold the generated iterators\n iters = []\n\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n\n n_rows = raster.RasterYSize\n n_cols = raster.RasterXSize\n\n # Variable to count the total number of elements to compute percentile\n # from. This leaves out nodata values\n n_elements = 0\n\n #Set the row strides to be something reasonable, like 256MB blocks\n row_strides = max(int(2**28 / (4 * n_cols)), 1)\n\n for row_index in xrange(0, n_rows, row_strides):\n #It's possible we're on the last set of rows and the stride\n #is too big, update if so\n if row_index + row_strides >= n_rows:\n row_strides = n_rows - row_index\n\n # Read in raster chunk as array\n arr = band.ReadAsArray(0, row_index, n_cols, row_strides)\n\n tmp_uri = pygeoprocessing.geoprocessing.temporary_filename()\n tmp_file = open(tmp_uri, 'wb')\n # Make array one dimensional for sorting and saving\n arr = arr.flatten()\n # Remove nodata values from array and thus percentile calculation\n arr = np.delete(arr, np.where(arr == nodata))\n # Tally the number of values relevant for calculating percentiles\n n_elements += len(arr)\n # Sort array before saving\n arr = np.sort(arr)\n\n np.save(tmp_file, arr)\n tmp_file.close()\n tmp_file = open(tmp_uri, 'rb')\n tmp_file.seek(0)\n iters.append(numbers_from_file(tmp_file))\n arr = None\n\n # List to store the rank/index where each percentile will be found\n rank_list = []\n # For each percentile calculate nearest rank\n for perc in percentiles:\n rank = math.ceil(perc/100.0 * n_elements)\n rank_list.append(int(rank))\n\n # A variable to burn through when doing heapq merge sort over the\n # iterators. Variable is used to check if we've iterated to a\n # specified rank spot, to grab percentile value\n counter = 0\n # Setup a list of zeros to replace with percentile results\n results = [0] * len(rank_list)\n\n LOGGER.debug('Percentile Rank List: %s', rank_list)\n\n for num in heapq.merge(*iters):\n # If a percentile rank has been hit, grab percentile value\n if counter in rank_list:\n LOGGER.debug('percentile value is : %s', num)\n results[rank_list.index(counter)] = int(num)\n counter += 1\n\n band = None\n raster = None\n return results", "title": "" }, { "docid": "f36baaab4212473d67e29612f8604d9b", "score": "0.51838475", "text": "def scoreatpercentile(self, independent_data, percentile):\n _groups = self.grouping_function(independent_data)\n result = np.empty(independent_data.shape) * np.nan\n for key in self._models.keys():\n indices = _groups==key\n result[indices] = self._models[key].scoreatpercentile(independent_data[indices], percentile)\n return result", "title": "" }, { "docid": "9031a7091fd6ad983c1b58edca8fb642", "score": "0.5173151", "text": "def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x", "title": "" }, { "docid": "59cc36d34969979f14bf683d6730c9bd", "score": "0.5166004", "text": "def clean_outliers(self, dim=3):\n ok_points = []\n not_ok_points = []\n\n if dim != 3:\n distances = [dist(x.point, self.center, d3=False) for x in self.points]\n else:\n distances = [dist(x.point, self.center) for x in self.points]\n stdv = np.std(distances)\n meandis = np.mean(distances)\n for i in range(len(self.points)):\n if distances[i] < (2.5 * stdv + meandis):\n ok_points.append(self.points[i])\n else:\n not_ok_points.append(self.points[i])\n return ok_points, not_ok_points", "title": "" }, { "docid": "c98f4f98d9d6a6d03e4724bd56f71359", "score": "0.51487285", "text": "def p_threshold(self, percentile):\n\n masked_I, window_I, window_mask, dtc_idx = self._set_up()\n r = np.nanpercentile(masked_I.filled(np.nan), percentile, axis=1)\n\n threshold = np.repeat(r, np.square(2*self.extent)).reshape(masked_I.shape)\n window_I[(window_I > threshold) & (window_mask > 0)] = -1\n\n if len(np.where(np.isnan(r))[0])>0: # deal with blocks for which all pixels are masked\n window_I[np.where(np.isnan(r))] = -1\n \n masked_img = self.indexed[:,3].copy().flatten()\n masked_img[dtc_idx] = window_I\n\n return masked_img", "title": "" }, { "docid": "c79ab36c853ea89f53033a7f16c8e9e7", "score": "0.5146289", "text": "def set_color_by_percentile(this, start, end):\n if str(this) == 'nan':\n return 'grey'\n if this < start:\n return start\n elif this > end:\n return end\n return this", "title": "" }, { "docid": "49e2ede35dad1a19aff8aa5c18698302", "score": "0.5146138", "text": "def get_quantile(data):\n # median = DataAnalyser.get_median(data)\n # filtered = [i for i in data if i is not None]\n # filtered.sort()\n # first_quantile = DataAnalyser.get_median([item for item in filtered if item < median])\n # third_quantile = DataAnalyser.get_median([item for item in filtered if item > median])\n\n # return first_quantile, third_quantile\n sorted_data = sorted(data)\n return sorted_data[round(len(data) * 1 / 4)], sorted_data[round(len(data) * 3 / 4)]", "title": "" }, { "docid": "0a292c3d3dba3f26987846e541ee2dc2", "score": "0.51320106", "text": "def outlierCleaner(predictions, ages, net_worths):\n import numpy as np\n cleaned_data = []\n\n ### your code goes here\n errors = []\n for n in range(0, 89):\n\t\td = abs(net_worths[n] - predictions[n])\n\t\terrors.append(d)\n\t\t\n cut = np.percentile(errors, 90)\n for n in range(0, 89):\n\t\tif errors[n] <= cut:\n\t\t\tcleaned_data.append((ages[n], net_worths[n], errors[n]))\n\t\t\n return cleaned_data", "title": "" }, { "docid": "feccb496e236d83e3c14a06b0d9d0e63", "score": "0.5127159", "text": "def outliers(self):\n\n limits = self._limits\n column = self._column\n\n values_without_range = list(set((self._df\n .rdd.map(lambda x: x[column])\n .filter(lambda x: x < limits[0] or x > limits[1])\n .collect())))\n\n return values_without_range", "title": "" }, { "docid": "f5613fa6db921a59b6d5071a35bed231", "score": "0.51267356", "text": "def limit_range(vals, rng):\n if len(vals) > 2:\n rng = sorted(rng)\n lower = np.percentile(vals, rng[0], interpolation=\"nearest\")\n higher = np.percentile(vals, rng[1], interpolation=\"nearest\")\n limited = [x for x in vals if x >= lower and x <= higher]\n return np.array(limited)\n return vals", "title": "" }, { "docid": "e5c9850b5e0ee0092c155edd7dddf93e", "score": "0.51224047", "text": "def percentile_normalization(userID, cur, sampling_percentiles):\n times = []\n query1 = \"select creation_date from post where owner_user_id = %(id)s and (post_type_id = 1 or post_type_id = 2) order by creation_date\"\n cur.execute(query1, {'id': userID})\n posts = [i[0] for i in results(cur)]\n start = get_start_time(userID, cur)\n for p in sampling_percentiles:\n x = int(len(posts) * p)\n if x == 0:\n times.append(start)\n else:\n times.append(posts[x-1])\n return times", "title": "" }, { "docid": "27bb7f867042dfa2e0da50ec7db00d5d", "score": "0.51217884", "text": "def do_polarization_clip_check(self, naxis, lower, upper):\n\n # round floats to individual pixels\n p1 = math.floor(lower)\n p2 = math.ceil(upper)\n\n # bounds check\n if p1 < 1:\n p1 = 1\n if p2 > naxis:\n p2 = naxis\n\n # validity check, no pixels included\n if p1 > naxis or p2 < 1:\n error = 'pixels coordinates {}:{} do not intersect {} to {}'\\\n .format(p1, p2, 1, naxis)\n raise NoContentError(error)\n\n # an actual cutout\n return [p1, p2]", "title": "" }, { "docid": "fa3944c9fc1141dc504b1e5b67af8842", "score": "0.51175594", "text": "def compute_bounds(xmin, xmax, xv):\n\n if xmin is not None or xmax is not None:\n if type(xmin) == str and xmin.startswith(\"percentile\"):\n xmin = np.nanpercentile(xv, float(xmin[11:-1]))\n if type(xmax) == str and xmax.startswith(\"percentile\"):\n xmax = np.nanpercentile(xv, float(xmax[11:-1]))\n\n if xmin is None or xmin == np.nanmin(xv):\n xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20\n if xmax is None or xmax == np.nanmax(xv):\n xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20\n\n return xmin, xmax", "title": "" }, { "docid": "d8717c6087944b7865283628348f7811", "score": "0.51162624", "text": "def scoreatpercentile(a, per, limit=(), interpolation_method='fraction'):\n values = np.sort(a, axis=0)\n if limit:\n values = values[(limit[0] <= values) & (values <= limit[1])]\n\n idx = per /100. * (values.shape[0] - 1)\n if (idx % 1 == 0):\n score = values[int(idx)]\n else:\n if interpolation_method == 'fraction':\n score = _interpolate(values[int(idx)], values[int(idx) + 1],\n idx % 1)\n elif interpolation_method == 'lower':\n score = values[np.floor(idx)]\n elif interpolation_method == 'higher':\n score = values[np.ceil(idx)]\n else:\n raise ValueError(\"interpolation_method can only be 'fraction', \" \\\n \"'lower' or 'higher'\")\n\n return score", "title": "" }, { "docid": "21c0fbeb06e0114dc2b0524b60fa55d1", "score": "0.5093741", "text": "def unique_percentiles_beta(perc_values,\n percentiles):\n # normalize between 0 and 1\n uniq, uniq_ind, counts = np.unique(\n perc_values, return_index=True, return_counts=True)\n if len(uniq) != len(perc_values):\n min_value = np.min(perc_values)\n perc_values = perc_values - min_value\n max_value = np.max(perc_values)\n perc_values = perc_values / max_value\n percentiles = np.asanyarray(percentiles)\n percentiles = percentiles / 100.0\n\n p, ier = sc_opt.curve_fit(betainc,\n percentiles,\n perc_values)\n uniq_perc_values = sc_special.betainc(p[0], p[1], percentiles)\n uniq_perc_values = uniq_perc_values * max_value + min_value\n else:\n uniq_perc_values = perc_values\n return uniq_perc_values", "title": "" }, { "docid": "a24c0f72c5371263e914a0aa2c6875cd", "score": "0.50732625", "text": "def pdf_percentiles(self, percentile, **kwargs):\n percentile = u.Quantity(percentile, u.percent).value\n percs = np.percentile(self.distribution, percentile, axis=-1, **kwargs)\n # numpy.percentile strips units for unclear reasons, so we have to make\n # a new object with units\n if hasattr(self.distribution, \"_new_view\"):\n return self.distribution._new_view(percs)\n else:\n return percs", "title": "" } ]
8e02496a6c8282245427a4927c688a28
Returns analytical solution for different moments of lognormal size distr
[ { "docid": "b2866e65341a2e864981cd5a55805301", "score": "0.6666449", "text": "def analytic_moms_for_lognormal(mom, n_tot, mean_r, gstdev):\n if mom == 0:\n ret = n_tot\n else:\n ret = n_tot * mean_r**mom * math.exp((mom**2)/2. * math.pow(math.log(gstdev), 2))\n return ret", "title": "" } ]
[ { "docid": "8d0eeec394c6cdc872ab50fcf4dab135", "score": "0.65942085", "text": "def lognormvariate(self, mu, sigma):\n pass", "title": "" }, { "docid": "3d8a95f64a721e2186597f80c6ebf099", "score": "0.6581352", "text": "def lognormvariate(self, mu, sigma):\n pass", "title": "" }, { "docid": "3d8a95f64a721e2186597f80c6ebf099", "score": "0.6581352", "text": "def lognormvariate(self, mu, sigma):\n pass", "title": "" }, { "docid": "3d8a95f64a721e2186597f80c6ebf099", "score": "0.6581352", "text": "def lognormvariate(self, mu, sigma):\n pass", "title": "" }, { "docid": "580fc1dfa86b8b155db6d1ccb1748089", "score": "0.63014734", "text": "def mvn_log_pdf(x: np.ndarray, mu: np.ndarray, sigma_inv: np.ndarray, log_det: float) -> np.ndarray:\n n = len(mu)\n o = x - mu\n l = (sigma_inv@o.T).T\n maha = np.sum(o*l, axis=-1)\n ld = -0.5*(n*np.log(2*np.pi) + log_det + maha)\n return ld", "title": "" }, { "docid": "67194f91f7757bec422379e29dbe8104", "score": "0.6259757", "text": "def log_density_gaussian(self, x, mu, logvar):\n norm = - 0.5 * (math.log(2 * math.pi) + logvar)\n log_density = norm - 0.5 * ((x - mu) ** 2 * torch.exp(-logvar))\n return log_density", "title": "" }, { "docid": "63dfb0d098b2023ffc6a66988b866c7e", "score": "0.6252234", "text": "def perturb_lognormal(step, solver_order, noise_scale, random_state=None, size=()):\n shift = 0.5 * np.log(1 + noise_scale * (step ** (2 * solver_order)))\n mean = np.log(step) - shift\n cov = 2 * shift\n samples = np.exp(\n scipy.stats.multivariate_normal.rvs(\n mean=mean, cov=cov, size=size, random_state=random_state\n )\n )\n return samples", "title": "" }, { "docid": "2c8e72c0d40d869c3dc1095c46976f13", "score": "0.62147254", "text": "def fit_temp_dens_relation(logoverden, logT):\n \n ind = np.where((0.1 < logoverden) * (logoverden < 1.0) * (0.1 < logT) * (logT < 5.0))\n\n logofor = logoverden[ind]\n logtfor = logT[ind]\n\n def min_func(param):\n \"\"\"Function to minimize: power law fit to temperature density relation.\"\"\"\n logT0 = param[0]\n gammam1 = param[1]\n #print(param)\n return logtfor - (logT0 + gammam1 * logofor)\n res = leastsq(min_func, np.array([np.log10(1e4), 0.5]), full_output=True)\n params = res[0]\n if res[-1] <= 0:\n print(res[3])\n return 10**params[0], params[1] + 1", "title": "" }, { "docid": "f4c5d4f275de93fd710418af3a5194af", "score": "0.62146217", "text": "def nllfun(x):\n mu = x[0]\n logsigma = x[1]\n return logsigma + 0.5*np.sum((mu/(np.exp(logsigma) + 0.01))**2)", "title": "" }, { "docid": "d9635372e367fad8db57a39300cf8d7f", "score": "0.61965066", "text": "def probNormalizeLog(distributions):\n if distributions.ndim > 1:\n distributions = distributions - np.max(distributions, axis=1,keepdims=True)\n else:\n distributions = distributions - np.max(distributions)\n try:\n prob = probNormalize(np.exp(distributions))\n except FloatingPointError, e:\n print distributions\n print np.max(distributions), np.min(distributions)\n raise e\n return prob", "title": "" }, { "docid": "84609def7d928b62355d6405fa8c33e3", "score": "0.61930674", "text": "def LogMultiNorm(x, mu, sigma):\n return -(np.log(np.linalg.det(2 * np.pi * sigma)) + (x - mu).T.dot(np.linalg.inv(sigma)).dot(x - mu)) / 2", "title": "" }, { "docid": "d4e5984e6728e98a7141a932ba7c3c57", "score": "0.6185559", "text": "def mh_lognormal(init,ys,ts,iters,fi=None):\n print(\"Running Metropolis Algorithm with Log-Normal proposals.\")\n D = len(init)\n my_dist = dist(init)\n # initialize state and log-likelihood\n state = init.copy()\n Lp_state = my_dist.log_likelihood(ys,ts)\n accepts = 0.\n\n cov = (0.1**2)*np.eye(D)*1./D\n for i in np.arange(0, iters):\n if fi is not None:\n write_samp(fi,state)\n\n # log(Rv) follow a normal distribution\n mu = np.log(state)\n # propose a new state\n prop = np.exp(np.random.multivariate_normal(mu, cov))\n move_p = np.log(scipy.stats.multivariate_normal(np.log(state),cov).\\\n pdf(np.log(prop)))\n rev_p = np.log(scipy.stats.multivariate_normal(np.log(prop),cov).\\\n pdf(np.log(state)))\n my_dist.set_params(prop)\n Lp_prop = my_dist.log_likelihood(ys,ts)\n #print Lp_prop+rev_p\n #print Lp_state+move_p\n rand = np.random.rand()\n prob = min(1,((Lp_prop+rev_p)-(Lp_state+move_p)))\n if np.log(rand) < prob:\n accepts += 1\n state = prop.copy()\n Lp_state = Lp_prop\n print (\"acct bc %s < %s (iter %s)\"%(np.log(rand),prob,i))\n print state\n else:\n my_dist.set_params(state)\n\n print 'Acceptance ratio', accepts/iters", "title": "" }, { "docid": "a6b10391761e29e253292b101f2c6664", "score": "0.6166954", "text": "def log_density_gaussian(self, x: Tensor, mu: Tensor, logvar: Tensor):\n norm = -0.5 * (math.log(2 * math.pi) + logvar)\n log_density = norm - 0.5 * ((x - mu) ** 2 * torch.exp(-logvar))\n return log_density", "title": "" }, { "docid": "938cacd752a86f855c8f5cc29b6401de", "score": "0.6161923", "text": "def lognormal_like(x, mu, tau):\n return flib.lognormal(x,mu,tau)", "title": "" }, { "docid": "a0f6998de870e2aabc50ec979789b35f", "score": "0.6140542", "text": "def find_one_sigma_equivalent(logpost, x0, l0=None, dx_max=None):\n x0 = np.atleast_1d(x0)\n sigmas = np.zeros(x0.shape[0])\n\n if dx_max is None:\n dx_max = 0*x0 + 1.0\n\n if l0 is None:\n l0 = logpost(x0)\n \n for i in range(x0.shape[0]):\n xlow = x0.copy()\n xlow[i] -= dx_max[i]\n xhigh = x0.copy()\n xhigh[i] += dx_max[i]\n llow = logpost(xlow)\n lhigh = logpost(xhigh)\n\n while llow < l0 - 0.5 and not xlow[i] == x0[i]:\n dx = x0[i] - xlow[i]\n xlow = x0.copy()\n xlow[i] = x0[i] - dx/2\n llow = logpost(xlow)\n\n while lhigh < l0 - 0.5 and not xhigh[i] == x0[i]:\n dx = xhigh[i] - x0[i]\n xhigh = x0.copy()\n xhigh[i] = x0[i] + dx/2\n lhigh = logpost(xhigh)\n\n sigmas[i] = 0.5*(xhigh[i] - xlow[i])\n\n return sigmas", "title": "" }, { "docid": "a007b0f38f199e4f8e89cc959b70b795", "score": "0.60996205", "text": "def ExpLogLikelihood(obs_data,pis,mus,sigmas,gamma):\n return np.sum(np.array(\n [np.sum(np.array(\n [gamma[n,k]*(np.log(pis[k])+LogMultiNorm(obs_data[n],mus[k],sigmas[k]))\n for k in range(K)]\n )) for n in range(N)]))", "title": "" }, { "docid": "b44db12d97196d5ef28e9c9b9d90e7d9", "score": "0.6062823", "text": "def log_sum_from_individual_logs(logs: Sequence) -> float:\n\n\treturn scipy.special.logsumexp(logs)", "title": "" }, { "docid": "a5d5ab6779225f64381d6557e3d374e3", "score": "0.60575956", "text": "def variances_log_chems(chemnames,logprior=1.0e20) :\n #senstraj = load('EndogenousEGFR3T3sensNoPriors')\n times = senstraj.timepoints \n jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*np.eye(\n len(jtjtrunc),len(jtjtrunc))) \n var = {}\n bestfit = {} \n optvarkeys = list(clc.optimizableVars.keys())\n first = optvarkeys[0]\n last = optvarkeys[-1]\n for name in chemnames :\n var[name] = [] \n bestfit[name] = [] \n chemindex = senstraj.key_column.get(name) \n index1sens = senstraj.key_column.get((name,first))\n index2sens = senstraj.key_column.get((name,last))\n sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])\n traj_this_chem = copy.copy(senstraj.values[:,chemindex]) \n for j, pname in enumerate(ovvarnames) :\n sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)\n # need to scale each row by 1/chemvalue to mimic a derivative w.r.t. \n # log chemicals. Add a small value to chemvalue to avoid divide by zero\n for i in range(len(times)) :\n sensarray_this_chem[i,:] = old_div(sensarray_this_chem[i,:],(traj_this_chem[i]+1.0e-6))\n\n tmp = np.dot(sensarray_this_chem,jtjinv)\n for i in range(len(tmp[:,0])) :\n var[name].append(np.dot(tmp[i,:],sensarray_this_chem[i,:]))\n \n bestfit[name] = senstraj.values[:,chemindex]\n var[name] = np.asarray(var[name])\n return times,bestfit,var", "title": "" }, { "docid": "1c548ce3307a9c8bcc81afc95642ef02", "score": "0.60562223", "text": "def gaussian_entropy(logdet_V, D):\n return -0.5*logdet_V + 0.5*D + 0.5*D*np.log(2*np.pi)", "title": "" }, { "docid": "376ac334ff110828709ad01f411a4e80", "score": "0.60548127", "text": "def estimate_lognormal(self, data):\r\n\r\n if self.fit_method == 1:\r\n _results = LogNormal().maximum_likelihood_estimate(data,\r\n self.start_time,\r\n self.rel_time)\r\n\r\n self.n_suspensions = _results[3]\r\n self.n_failures = _results[4]\r\n elif self.fit_method == 2:\r\n _results = regression(data, self.start_time, self.rel_time,\r\n dist='lognormal')\r\n\r\n self.rho = _results[3]\r\n self.n_suspensions = _results[4]\r\n self.n_failures = _results[5]\r\n\r\n self.scale[1] = _results[0][0]\r\n self.shape[1] = _results[0][1]\r\n self.variance[0] = _results[1][0] # Scale\r\n self.variance[1] = _results[1][2] # Shape\r\n self.covariance[0] = _results[1][1] # Scale-Shape\r\n self.mle = _results[2][0]\r\n self.aic = _results[2][1]\r\n self.bic = _results[2][2]\r\n\r\n self.calculate_parameter_bounds(data)\r\n self.hazard_function()\r\n self.reliability_function()\r\n self.mean()\r\n\r\n return False", "title": "" }, { "docid": "b39dfc969c17978a0f94f0a31343913a", "score": "0.60535073", "text": "def asym_dist(_,dat_v,__,rescale):\n from scipy.stats import linregress\n lg = lambda x: linregress(range(len(x.reshape(-1))),x.reshape(-1)).intercept\n flat = lambda x: np.array([g for m in x for g in m ])\n sxd2 =[]; sgd2 = []\n for j in dat_v:\n for k in j:\n if type(k[1][1])==float and (np.isnan(k[1][1])): \n continue\n else:\n # Predict the length and gfp at begin\n x1 = k[1][0]\n x2 = k[1][1]\n x1,x2 = [lg(x) for x in [x1,x2]]\n perfx = np.log((np.exp(x1)+np.exp(x2)))-rescale*np.log(2) # perfect division\n sxd2.append((perfx-x1,perfx-x2))\n return flat(sxd2),_,_", "title": "" }, { "docid": "112975ac963ab3456cb72ee58f5f29e8", "score": "0.6031262", "text": "def mvn_nll(mu: np.ndarray, sigma: np.ndarray, x: np.ndarray) -> float:\n k = len(mu)\n Id = np.eye(k)\n chol = np.linalg.cholesky(sigma)\n chol_inv = spla.solve_triangular(chol, Id, lower=True)\n sigma_inv = chol_inv.T@chol_inv\n log_det = 2*np.sum(np.log(np.diag(chol)))\n nll = -np.mean(mvn_log_pdf(x, mu, sigma_inv, log_det))\n return nll", "title": "" }, { "docid": "2ab1234045b8fe0f2bb103ed75ff9e16", "score": "0.60305643", "text": "def _simple_relative_entropy_implementation(\n self, rho, sigma, log_base=np.log, tol=1e-12):\n # S(rho || sigma) = sum_i(p_i log p_i) - sum_ij(p_i P_ij log q_i)\n rvals, rvecs = rho.eigenstates()\n svals, svecs = sigma.eigenstates()\n rvecs = np.hstack([vec.full() for vec in rvecs]).T\n svecs = np.hstack([vec.full() for vec in svecs]).T\n # Calculate S\n S = 0\n for i in range(len(rvals)):\n if abs(rvals[i]) >= tol:\n S += rvals[i] * log_base(rvals[i])\n for j in range(len(svals)):\n P_ij = (\n np.dot(rvecs[i], svecs[j].conjugate()) *\n np.dot(svecs[j], rvecs[i].conjugate())\n )\n if abs(svals[j]) < tol and not (\n abs(rvals[i]) < tol or abs(P_ij) < tol):\n # kernel of sigma intersects support of rho\n return np.inf\n if abs(svals[j]) >= tol:\n S -= rvals[i] * P_ij * log_base(svals[j])\n return np.real(S)", "title": "" }, { "docid": "a020a4d01f78878c953a73297fc59966", "score": "0.6013505", "text": "def log_normal(x, m, v):\r\n\tdim=m.shape[-1]\r\n\tconst=-dim/2*np.log(2*np.pi)\r\n\tlog_prob=const-0.5*torch.log(v.sum(axis=-1))-0.5*((x-m)**2/v).sum(axis=-1)\r\n\treturn log_prob", "title": "" }, { "docid": "12ad68ce7b933684c90bad0cf0a843ff", "score": "0.6008636", "text": "def variational_objective(var_param):\n samples = var_family.sample(var_param, n_samples)\n a = partial(var_family.logdensity,var_param=var_param)\n def nested_fn(samples):\n lower_bound = np.mean(logdensity(samples)) - np.mean(a(samples))\n return -lower_bound\n b= nested_fn(samples)\n return b", "title": "" }, { "docid": "5efc579549f3cea6398f4b373fd27ec8", "score": "0.60071135", "text": "def mul_var_normal(weights, means, logvars):\n ll = 0\n\n for i in range(len(weights)):\n w = weights[i]\n mean = means[i]\n if len(logvars) > 1:\n logvar = logvars[i]\n var = logvar.exp()\n else:\n logvar = logvars[0]\n var = math.exp(logvar)\n\n logstd = logvar * 0.5\n ll += torch.sum(\n -((w - mean)**2)/(2*var) - logstd - math.log(math.sqrt(2*math.pi))\n )\n\n return ll", "title": "" }, { "docid": "10ab5a5dcc24df49eafc7e9cd03a7595", "score": "0.6000301", "text": "def lognorm_lev(mu, sigma, n, limit):\n if limit == -1:\n return np.exp(n * mu + n * n * sigma * sigma / 2)\n else:\n phi = ss.norm.cdf\n ll = np.log(limit)\n sigma2 = sigma * sigma\n phi_l = phi((ll - mu) / sigma)\n phi_l2 = phi((ll - mu - n * sigma2) / sigma)\n unlimited = np.exp(n * mu + n * n * sigma2 / 2)\n return unlimited * phi_l2 + limit ** n * (1 - phi_l)", "title": "" }, { "docid": "4706bd641d785d747c4bae16a3edd046", "score": "0.59839344", "text": "def lognorm_approx(ser):\n m, cv = xsden_to_meancv(ser.index, ser.values)\n mu, sigma = mu_sigma_from_mean_cv(m, cv)\n fz = ss.lognorm(sigma, scale=np.exp(mu))\n return fz", "title": "" }, { "docid": "a2c6a51aad5fc400140a71a95ae387f2", "score": "0.5978666", "text": "def lognormal_expval(mu, tau):\n return np.exp(mu + 1./2/tau)", "title": "" }, { "docid": "55658e99d76eda4013761be57aff6a5e", "score": "0.59774536", "text": "def logsum(vfun, lam):\n\n # rescale values for numeric robustness\n vmax = np.max(vfun, axis=0)\n vfun_norm = vfun - vmax\n\n # apply formula (could be njitted in separate function)\n VE = vmax + lam * np.log(np.sum(np.exp(vfun_norm / lam), axis=0))\n return VE", "title": "" }, { "docid": "2b6f90eab6a8dfa590499b46151ad445", "score": "0.59736603", "text": "def loglikelihood(self):\n # update the latent variances based on the updated latent covariances matrices\n self.latent_variances = [np.diag(Si) for Si in self.latent_Sigmas]\n\n # add up the terms that are valid over the entire index 1 <= i <= N\n determinants = [np.prod(np.diag(Sigma)) for Sigma in self.latent_Sigmas]\n traces = [np.trace(Sigma) for Sigma in self.latent_Sigmas]\n miTmi = [np.sqrt(sum(mi**2)).item() for mi in self.latent_means]\n\n global_terms = [0.5*(np.log(det) - tr - mi_norm) for tr, det, mi_norm in zip(traces, determinants, miTmi)]\n\n # now for the terms in Omega_plus\n\n def get_omega_terms(plus=True):\n \"\"\" Compute the summation terms over Omega plus or Omega minus \"\"\"\n if plus:\n B = self.B1 \n error_func = lambda x: SQRT_PI_OVER_2*erfc(x/ROOT2)\n exp_func = lambda x: np.exp(-0.5*x**2)\n index_set = self.I1\n mu = self.mu1\n\n else:\n B = self.B2\n error_func = lambda x: SQRT_PI_OVER_2*(erf(x/ROOT2) + 1)\n exp_func = lambda x: -np.exp(-0.5*x**2)\n index_set = self.I2\n mu = self.mu2\n\n BTB = np.matmul(B.T, B)\n\n # these are the 'delta_i' parameters when nu=e_q, beta=0\n deltas = [-self.latent_means[i][-1]/np.sqrt(self.latent_variances[i][-1]) for i in index_set]\n \n _BTBSigmas = [np.matmul(BTB, self.latent_Sigmas[i]) for i in index_set]\n\n _diagonal_terms = [BTBS[-1][-1] for BTBS in _BTBSigmas]\n _exp_terms = [delta*exp_func(delta) for delta in deltas]\n _erf_terms = [error_func(delta) for delta in deltas]\n _trace_terms = [np.trace(BTBS) for BTBS in _BTBSigmas]\n _quadratic_terms = [\n np.matmul(\n np.matmul(\n self.latent_means[i].T,\n BTB\n ),\n self.latent_means[i]\n ) for i in index_set\n ]\n _yi_terms = [\n np.matmul(\n self.Y[i] - mu,\n (self.Y[i] - mu).T - 2*np.matmul(B, self.latent_means[i])\n ) for i in index_set\n ]\n\n _terms = [\n item1*item2.item() + item3*(item4 + item5 + item6) for item1, \n item2, \n item3, \n item4, \n item5, \n item6 in zip(\n _diagonal_terms,\n _exp_terms,\n _erf_terms,\n _trace_terms,\n _quadratic_terms,\n _yi_terms\n )\n ]\n\n return _terms\n\n self.omega_plus_terms = get_omega_terms(plus=True)\n self.omega_minus_terms = get_omega_terms(plus=False)\n\n # finally, compute the scalars that are independent of the data and latent variables\n scalars = 0.5*self.N*(self.q - self.p*np.log(TWOPI*self.sigma2))\n\n # add all of the terms together\n total = 0.5*np.sum(global_terms) - (TWOPI**-0.5)/(2*self.sigma2)*(np.sum(self.omega_plus_terms) + np.sum(self.omega_minus_terms))\n\n return total + scalars", "title": "" }, { "docid": "8caea465be9179151d905a6c5c0efb3b", "score": "0.59699017", "text": "def norm_log_pdf(x: np.ndarray, mu: float, sigmasq: float) -> np.ndarray:\n o = x - mu\n ld = -0.5*np.square(o) / sigmasq - 0.5*np.log(sigmasq) - 0.5*np.log(2*np.pi)\n return ld", "title": "" }, { "docid": "b77de7150fb7ba682385f2a12075fe2a", "score": "0.59502894", "text": "def gaussian_real(mean, logsd, dim):\n B, C, m = assert_real(mean, dim)\n assert mean.shape == logsd.shape\n\n class o(object):\n pass\n\n o.mean = mean\n o.logstd = logsd\n # logcov = torch.zeros(B, C, m, m, m, dim, dim).to(logsd.device)\n # covinv = torch.zeros(B, C, m, m, m, dim, dim).to(logsd.device)\n # for d in range(dim):\n # # logcov[..., d, d] = logsd[..., d]\n # # covinv[..., d, d] = 1 / torch.exp(logsd[..., d])\n # o.logcov = logcov\n stdinv = 1 / torch.exp(logsd) # [B, C, m, m, m, 6]\n o.eps = torch.randn(mean.shape)\n o.sample = mean + torch.exp(logsd) * o.eps.to(mean.device)\n o.sample2 = lambda eps: mean + torch.exp(logsd) * eps.to(mean.device)\n\n nlog2pi = dim * np.log(2 * np.pi)\n logcov = logsd.sum(dim=-1).unsqueeze(-1) # [B, C, m, m, m, 1]\n #centerx = (x - mean).unsqueeze(-2) # [B, C, m, m, m, 1, 6]\n #centerxT = (x - mean).unsqueeze(-1) # [B, C, m, m, m, 6, 1]\n #xsquare = centerx @ covinv # [B, C, m, m, m, 1, 6]\n #xsquare = xsquare @ centerxT # [B, C, m, m, m, 1, 1]\n #xsquare = xsquare.squeeze(-1) # [B, C, m, m, m, 1]\n o.logps = lambda x: -0.5 * (nlog2pi + logcov + (x - mean) ** 2 * stdinv)\n #o.logps = lambda x: -0.5 * ((x - mean) ** 2 * stdinv)\n # o.logps = lambda x: -0.5 * (dim * np.log(2 * np.pi) + logsd.sum(dim=-1).unsqueeze(-1) + (x - mean) ** 2 / torch.exp(2. * logsd))\n o.logp = lambda x: o.logps(x).sum(dim=(1, 2, 3, 4))\n # o.get_eps = lambda x: (x - mean) / torch.exp(logsd)\n return o", "title": "" }, { "docid": "fe14cb182e709b598fa482314a4bf9ad", "score": "0.5939857", "text": "def lognormal(x, p, g):\n x = numpy_array(x) # check if numpy array, if not make numpy array\n x = assert_2d_sort(x)\n return np.exp(normal(np.log(x), p, g))", "title": "" }, { "docid": "5c97bbe7843ca6b572d70684a73044bb", "score": "0.590387", "text": "def sklogn_analytical_likelihood(self, ddt):\n # skewed lognormal distribution with boundaries\n if (ddt < self.lam) or ((-self.mu + math.log(ddt - self.lam)) ** 2 / (2. * self.sigma ** 2) > self.explim):\n return -np.inf\n else:\n llh = math.exp(-((-self.mu + math.log(ddt - self.lam)) ** 2 / (2. * self.sigma ** 2))) / (\n math.sqrt(2 * math.pi) * (ddt - self.lam) * self.sigma)\n\n if np.isnan(llh):\n return -np.inf\n else:\n return np.log(llh)", "title": "" }, { "docid": "dfeb215b72bc80bcb1926ec78e0f9ce9", "score": "0.5900217", "text": "def random_lognormal(args):\n return DecisionParser.rand_x_normal(random.lognormvariate, args)", "title": "" }, { "docid": "6102af8d42605f24ca4dfd1fa7a8b06b", "score": "0.58979064", "text": "def normal_log_prob(latent: base.Array, sigma: float = 1, mu: float = 0):\n latent, _ = jax.tree_flatten(latent)\n latent = jax.tree_map(lambda x: x.flatten(), latent)\n latent = jnp.concatenate(latent)\n latent_dim = len(latent)\n latent_l2_sq = jnp.sum(jnp.square(latent - mu))\n return -0.5 * (latent_dim * jnp.log(2 * jnp.pi * sigma**2)\n + latent_l2_sq / sigma**2)", "title": "" }, { "docid": "5190dec56ab9211d1c3ef39a29eeec84", "score": "0.5889136", "text": "def logistic_var():\n z = np.linspace(-5,5,1000) # z points to test\n p1 = 1/(1+np.exp(-z)) # target probability\n var_test = np.linspace(2,3,1000)\n err = []\n for v in var_test:\n p2 = 0.5*(1+scipy.special.erf(z/np.sqrt(v*2)))\n err.append(np.mean((p1-p2)**2))\n \n i = np.argmin(err)\n wvar = var_test[i]\n return wvar", "title": "" }, { "docid": "d423b90c71b56106a8f0e940c7590870", "score": "0.58890647", "text": "def log_normal(x, m, v):\n element_wise = -0.5 * (torch.log(v) + (x - m).pow(2) / v + np.log(2 * np.pi))\n log_prob = element_wise.sum(-1)\n return log_prob", "title": "" }, { "docid": "c5b65a9d86e7e4fbbe77287721721fb5", "score": "0.5877169", "text": "def my_loglike(theta, x, data, sigma):\n sim_bins = run_exp_three_times_and_bin(theta, x)\n\n return -(0.5 / sigma ** 2) * np.sum((data - sim_bins) ** 2)", "title": "" }, { "docid": "6e38432ed5b66405b26ac10a2549111f", "score": "0.58750695", "text": "def variational_objective(var_param):\n samples = var_family.sample(var_param, n_samples)\n lower_bound = np.mean(logdensity(samples)) - np.mean(var_family.logdensity(samples, var_param))\n return -lower_bound", "title": "" }, { "docid": "b5bfe401b02d624a2c0704860992ab34", "score": "0.587297", "text": "def _independent_gaussian_log_prob_vector(x, mus, sigmas):\n # The inverse of a diagonal matrix is just inverting values on the\n # diagonal\n cov_inv = torch.eye(d) * (1 / sigmas ** 2)\n\n # sum(log) = log(prod)\n logpdf = (-d / 2 * torch.log(2 * torch.as_tensor(torch.pi)) -\n torch.log(torch.prod(sigmas))) \\\n - 0.5 * mahalanobis(x[:3], mus, cov_inv) ** 2\n return logpdf", "title": "" }, { "docid": "31263a909b6110aa5bdc41ef7892bf8b", "score": "0.58668375", "text": "def siglog_normal_init_guess(self, reg_pnts_array, pos_traj):\n\n def make_sigma_equation(reg_pnts):\n \"\"\"\n input is a series of registration points t1 - t5\n though only t1, t3, t5 are useful\n \"\"\"\n def sigma_estimator(sigma):\n #do float here, otherwise the second term will be ZERO!!\n return ((np.exp(-sigma**2+3*sigma) - 1) / (np.exp(sigma*6) - 1) - \n float(reg_pnts[2] - reg_pnts[0]) / (reg_pnts[4] - reg_pnts[0]))\n #return (np.exp(sigma**2) - 1) / (np.exp(6*sigma**2) - 1) - (reg_pnts[2] - reg_pnts[0]) / (reg_pnts[4] - reg_pnts[0])\n return sigma_estimator\n\n init_guess = []\n vel_profile=self.get_vel_profile(pos_traj)/self.dt\n for reg_pnts in reg_pnts_array:\n #make an estimator for sigma\n sig_est_func = make_sigma_equation(reg_pnts)\n #solve it\n init_sigma = (reg_pnts[4] - reg_pnts[0])/2*self.dt\n #<hyin/Dec-24-2014> solving the equation is still not clear and the results seem not right\n #more investigation is needed to know the derivation of the equation, e.g., how the units are consistent...\n #currently, use an even more simple heuristics...\n sig_sln = sciopt.broyden1(sig_est_func, init_sigma, f_tol=1e-14)\n #try direct nonlinear optimization, note the returned function is modified with square\n #see sigma_estimator above\n #sig_sln = sciopt.fminbound(sig_est_func, 0, init_sigma*3) #search between (0, 3*init_sigma)\n #print sig_sln, sig_est_func(sig_sln)\n sig = sig_sln #only need scalar value\n #sig = init_sigma\n if sig <= 0:\n #will this happen?\n #this will happen when actual mode locates on the right side of 'Gaussian mean'\n #lognormal distribution is asymmetrical in regards of mode, but does it always distribute more mass on left side? \n #(okay, i know it's long tail, more mass means some more slopeness when going up)\n sig = np.abs(sig)\n a_array = np.array([3*sig, 1.5*sig**2+sig*np.sqrt(0.25*sig**2+1),\n sig**2, 1.5*sig**2-sig*np.sqrt(0.25*sig**2+1), -3*sig])\n #estimate mu\n mu_1 = np.log((reg_pnts[2]-reg_pnts[0])*self.dt/(np.exp(-a_array[2])-np.exp(-a_array[0])))\n mu_2 = np.log((reg_pnts[4]-reg_pnts[0])*self.dt/(np.exp(-a_array[4])-np.exp(-a_array[0])))\n mu = (mu_1 + mu_2)/2\n #estimate D\n D_array = np.array([np.sqrt(np.pi*2)*vel_profile[i]*np.exp(mu)*sig\n *np.exp((a_array[i]**2/(2*sig**2)-a_array[i])) for i in range(len(a_array))])\n D = np.average(D_array)\n #estimate t0\n t0_array = np.array([reg_pnts[i]*self.dt - np.exp(mu)*np.exp(-a_array[i]) for i in range(len(a_array))])\n t0 = np.average(t0_array)\n\n theta_s, theta_e = self.siglog_normal_init_ang_guess((D, t0, mu, sig),\n pos_traj, reg_pnts)\n \n #add\n init_guess.append((D, t0, mu, sig, theta_s, theta_e))\n return init_guess", "title": "" }, { "docid": "deff43e9eb3fbd415a669c8fa0b9311c", "score": "0.5852983", "text": "def logchol2sigma(params):\n U = torch.zeros((2, 2))\n U.view(-1)[[0, 1, 3]] = params # fill upper triangle\n U.diagonal().exp_()\n return U.t() @ U", "title": "" }, { "docid": "db9e2badc5c85d6af8b407796cdb3773", "score": "0.58300114", "text": "def logGaussian(x, mu, var):\n\n d = len(x)\n log_prob = -1/(2*var) * (x-mu) @ (x-mu)\n log_prob-= d/2 * np.log(2*np.pi*var)\n\n return log_prob", "title": "" }, { "docid": "dd1998b3be485ff0688207ce547a3c43", "score": "0.5825949", "text": "def lognormal_param_transform(self):\n M = self.data_mean\n S = self.data_stddev\n variance = numpy.log( (S**2) / (M**2) + 1)\n sigma = numpy.sqrt(variance)\n mu = numpy.log(M) - (0.5 * (sigma**2))\n\n return mu, sigma", "title": "" }, { "docid": "a43e06cfcd1faba629eff7d532b418c9", "score": "0.5825454", "text": "def lognorm_mean(mean, sigma):\n if sigma == 0:\n return DeterministicDistribution(mean)\n else:\n mu = mean * np.exp(-0.5 * sigma**2)\n return stats.lognorm(scale=mu, s=sigma)", "title": "" }, { "docid": "7a9c6360299bb5ac21d56b0ff9a08da0", "score": "0.5823315", "text": "def partial_e(sev_name, fz, a, n):\n\n if sev_name not in ['lognorm', 'gamma', 'pareto', 'expon']:\n raise NotImplementedError(f'{sev_name} NYI for analytic moments')\n\n if a == 0:\n return [0] * (n+1) # for k in range(n+1)]\n\n if sev_name == 'lognorm':\n m = fz.stats('m')\n sigma = fz.args[0]\n mu = np.log(m) - sigma**2 / 2\n ans = [np.exp(k * mu + (k * sigma)**2 / 2) *\n (ss.norm.cdf((np.log(a) - mu - k * sigma**2)/sigma) if a < np.inf else 1.0)\n for k in range(n+1)]\n return ans\n\n elif sev_name == 'expon':\n # really needed for MEDs\n # expon is gamma with shape = 1\n scale = fz.stats('m')\n shape = 1.\n lgs = loggamma(shape)\n ans = [scale ** k * np.exp(loggamma(shape + k) - lgs) *\n (ss.gamma(shape + k, scale=scale).cdf(a) if a < np.inf else 1.0)\n for k in range(n + 1)]\n return ans\n\n elif sev_name == 'gamma':\n shape = fz.args[0]\n scale = fz.stats('m') / shape\n # magic ingredient is the norming constant\n # c = lambda sh: 1 / (scale ** sh * gamma(sh))\n # therefore c(shape)/c(shape+k) = scale**k * gamma(shape + k) / gamma(shape)\n # = scale ** k * exp(loggamma(shape + k) - loggamma(shape)) to avoid errors\n ans = [scale ** k * np.exp(loggamma(shape + k) - loggamma(shape)) *\n (ss.gamma(shape + k, scale=scale).cdf(a) if a < np.inf else 1.0)\n for k in range(n + 1)]\n return ans\n\n elif sev_name == 'pareto':\n # integrate xf(x) even though nx^n-1 S(x) may be more obvious\n # former fits into the overall scheme\n # a Pareto defined by agg is like so: ss.pareto(2.5, scale=1000, loc=-1000)\n α = fz.args[0]\n λ = fz.kwds['scale']\n loc = fz.kwds.get('loc', 0.0)\n # regular Pareto is scale=lambda, loc=-lambda, so this has no effect\n # single parameter Pareto is scale=lambda, loc=0\n # these formulae for regular pareto, hence\n if λ + loc != 0:\n logger.log(WL, 'Pareto not shifted to x>0 range...using numeric moments.')\n return partial_e_numeric(fz, a, n)\n ans = []\n # will return inf if the Pareto does not have the relevant moments\n # TODO: formula for shape=1,2,3\n for k in range(n + 1):\n b = [α * (-1) ** (k - i) * binom(k, i) * λ ** (k + α - i) *\n ((λ + a) ** (i - α) - λ ** (i - α)) / (i - α)\n for i in range(k + 1)]\n ans.append(sum(b))\n return ans", "title": "" }, { "docid": "16ca565d23368f1890e2a2a6fdb096f2", "score": "0.58224374", "text": "def gaussian_log_density(mean, log_var, x):\n std = torch.exp(log_var).to(device)\n # return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \\\n # + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \\\n # + tf.reduce_sum(self.logstd, axis=-1)\n neg_log_density = 0.5 * torch.sum(torch.pow((x - mean)/(std + 1e-6), 2), dim=-1) + \\\n 0.5 * np.log(2.0 * np.pi) * x.size(-1) + \\\n torch.sum(log_var, dim=-1)\n return -neg_log_density", "title": "" }, { "docid": "ec67d6d23ec7f46fea59ca9b0b76256d", "score": "0.581852", "text": "def vec_lnlognorm(x, mu, sigma, shift=0.):\n\n x_ = np.where(0 < (x - shift), (x - shift), 1)\n result1 = -np.log(np.sqrt(2. * math.pi) * x_ * sigma) - (np.log(x_) - mu)\\\n ** 2 / (2. * sigma ** 2)\n result = np.where(0 < (x - shift), result1, float(\"-inf\"))\n\n return result", "title": "" }, { "docid": "050fb452799d89d7c153f49a921d4612", "score": "0.5813111", "text": "def FD_log2(x, f_x, nn, liminf, suplim):\n \"\"\"Input: time starting with average dt(comming from repulsive_FD function), and f_time related to that time array\"\"\"\n lim_inf = round(np.log10(liminf),2)\n sup_lim = round(np.log10(suplim),2)\n b = np.linspace(lim_inf, sup_lim, nn)\n x_log = 10.0**b\n fx_log = np.zeros(len(x_log))\n for j in range(1, len(x_log)-1):\n for i in range(len(x)-1):\n if (x_log[j] - x[i])*(x_log[j] - x[i+1]) < 0.0 : #change of sign\n if (x_log[j] - x[i]) < (x_log[j] - x[i+1]):\n x_log[j] = x[i]\n fx_log[j] = f_x[i]\n else:\n x_log[j] = x[i+1]\n fx_log[j] = f_x[i+1]\n return x_log, fx_log", "title": "" }, { "docid": "4db7054583dd634dc0f92dd137a35a1d", "score": "0.5806972", "text": "def logGaussian(x, mu, var):\n d = len(x)\n log_prob = -1/(2*var) * (x-mu) @ (x-mu)\n log_prob-= d/2 * np.log(2*np.pi*var)\n\n return log_prob", "title": "" }, { "docid": "c35a72bb05fa39866346eca9ce7021c1", "score": "0.58031434", "text": "def estimate_from_work_samples(work_samples):\n return np.log(np.mean(np.exp(-np.array(work_samples))))", "title": "" }, { "docid": "4f5946f067e47ef72938d4ef51974e78", "score": "0.5798444", "text": "def _get_log_norms(ti_mps, str_set):\n core_tensor, boundaries = ti_mps.core_tensor, get_bound_mats(ti_mps)\n str_lens, max_len = str_set.str_lens, str_set.index_mat.shape[1]\n \n # Unpack boundary matrices and define rightward transfer operator\n left_mat, right_mat = boundaries\n t_op = build_t_op(core_tensor, direction='right', jitted=True)\n\n # Function implementing a single step of computing log_norm\n def scan_iter(iter_state, _):\n # scan_iter: c -> a -> (c, b)\n # iter_state holds a running log_norm and a density operator\n old_norm, old_density = iter_state\n\n # Apply transfer_op and normalize output\n new_density = t_op(old_density)\n this_norm = np.linalg.norm(new_density)\n new_density = new_density / this_norm\n\n new_norm = old_norm + np.log(this_norm)\n out_norm = new_norm + np.log(utils.hs_dot(right_mat, new_density))\n\n return (new_norm, new_density), out_norm\n\n # Apply transfer operator many times (starting with left_mat), and \n # generate list of log_norms\n iter_init = (0., left_mat)\n _, proper_log_norms = jax.lax.scan(scan_iter, iter_init, \n np.empty(max_len))\n assert len(proper_log_norms) == max_len\n\n # Add norm for the empty distribution and pick out lengths of interest\n empty_log_norm = np.log(utils.hs_dot(left_mat, right_mat))[None]\n all_log_norms = np.concatenate((empty_log_norm, proper_log_norms))\n log_norms = all_log_norms[str_lens]\n\n return log_norms", "title": "" }, { "docid": "e2ca7d3bf09e73b4dfc7d45108cc813e", "score": "0.57801867", "text": "def resCalc(mean, sigma):\n return (sigma*2*np.sqrt(2*np.log(2)))/mean", "title": "" }, { "docid": "9f8e11e03fdd2c200e9e4036ab97f912", "score": "0.57784307", "text": "def log_multinomial_density(X, log_thetas):\n lpr = np.zeros((X.shape[0], len(log_thetas)))\n\n for i in range(X.shape[0]):\n for j in range(len(log_thetas)):\n lpr[i][j] = np.sum([log_thetas[j][dim][value] for dim, value in enumerate(X[i])])\n return lpr", "title": "" }, { "docid": "7e042b04c2979bac0fc263db3c3f6c6e", "score": "0.5771863", "text": "def test_log_normal_diag(self):\n N = 100\n S = 50\n D = 10\n\n means = torch.randn(S, D)\n covs = torch.rand(S, D)\n x = torch.randn(N, D)\n\n distributions = [\n dist.MultivariateNormal(means[i], torch.diag(covs[i]))\n for i in range(S)\n ]\n\n expected = []\n for item in x:\n e_item = []\n for d in distributions:\n e_item.append(d.log_prob(item))\n expected.append(e_item)\n expected = torch.as_tensor(expected)\n\n predicted = log_normal(x, means, covs, 'diag')\n\n self.assertTrue(\n torch.allclose(expected, predicted, atol=1e-03, rtol=1e-05)\n )", "title": "" }, { "docid": "5ab652340a5964931b37b8fbf6cb1886", "score": "0.57699895", "text": "def log_norm_medium_concentration(scale, dimension):\n scale = np.asfarray(scale)\n shape = scale.shape\n scale = scale.flatten()\n\n # Function is unstable at zero.\n # Scale needs to be float for this to work.\n scale[scale < 1e-2] = 1e-2\n\n r_range = range(dimension - 2 + 1)\n r = np.asarray(r_range)[None, :]\n\n # Mardia1999Watson Equation 3\n temp = scale[:, None] ** r * np.exp(-scale[:, None]) / \\\n np.asarray([math.factorial(_r) for _r in r_range])\n\n return (\n np.log(2.) + dimension * np.log(np.pi) +\n (1. - dimension) * np.log(scale) + scale +\n np.log(1. - np.sum(temp, -1))\n ).reshape(shape)", "title": "" }, { "docid": "710ad092912ee20724ae0d0569bd8a17", "score": "0.57575834", "text": "def loglike_MNL(self):\n\n top = np.zeros(shape=self.av.shape[2], dtype=\"float64\")\n bottom = np.zeros(shape=self.av.shape[2], dtype=\"float64\")\n for c in range(self.count_c):\n for e in range(self.count_e):\n top += (\n self.av[c][e] * self.choice[c][e] * np.exp(self.get_utility(c, e))\n )\n bottom += self.av[c][e] * np.exp(self.get_utility(c, e))\n\n log_res = self.weight_vector * np.log(top / bottom)\n res = np.nansum(log_res)\n number_nan = np.sum(np.isnan(log_res))\n\n return res, number_nan", "title": "" }, { "docid": "7c7fa0812243e0cc7023f455fa1c9c0c", "score": "0.5737334", "text": "def log_normal_variance(mu, sigma):\n exponent1 = sigma**2\n exponent2 = 2 * mu + sigma**2\n return (np.exp(exponent1) - 1) * np.exp(exponent2)", "title": "" }, { "docid": "53e603f0d38b96387e0e1e8597c15f38", "score": "0.57331264", "text": "def arlognormal_like(x, a, sigma, rho):\n return flib.arlognormal(x, np.log(a), sigma, rho, beta=1)", "title": "" }, { "docid": "fe1b87d9802efb5bcedf4108a7b9cec0", "score": "0.5730004", "text": "def log_normal(X, mu, sigma):\n N = len(X)\n loglik = -(N/2)*np.log(2*np.pi) - (N/2)*np.log(sigma**2) - (1/(2*sigma**2))*(np.sum((X-mu)**2))\n return loglik", "title": "" }, { "docid": "2d0b20baebc724376977507364b188fc", "score": "0.5711383", "text": "def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n var = self.std**2\n nitems = params.size\n diff = params - self.mean\n scaled_sq_err_term = -0.5 * jnp.dot(diff, diff) / var\n # log determinant of covariance matrix\n log_det_cov_term = -nitems * jnp.log(self.std)\n norm_term = -0.5 * nitems * jnp.log(2 * jnp.pi)\n return log_det_cov_term + scaled_sq_err_term + norm_term", "title": "" }, { "docid": "f96e5db369a9bbac865733badb1c0370", "score": "0.5706779", "text": "def __logLikelihood(self, X, means, covars, ps):\n\n error = 0.0\n for n, xn in enumerate(X):\n arg = 0.0\n for k in range(means.shape[0]):\n arg += ps[k] * multiGaussianPDF(xn, means[k], covars[k])\n error += np.log(arg)\n return error", "title": "" }, { "docid": "dd944f969bb22dd17eddc28fabd55b38", "score": "0.56974983", "text": "def geometricMean(alldistribT):\n return np.exp(np.mean(np.log(alldistribT), axis=1))", "title": "" }, { "docid": "72ce5449d4fd3a39092c20c8249a6872", "score": "0.5695466", "text": "def neglog_density_normal(x, mean=0., sigma=1.):\n return np.sum((x - mean)**2, axis=1) / 2 / sigma**2", "title": "" }, { "docid": "1968b41fcf935ee6655c5bdc1c13e2c7", "score": "0.569209", "text": "def matrix_log_density_gaussian(x, q):\n x = torch.unsqueeze(x, dim=1)\n _mu = torch.unsqueeze(q.loc, dim=0)\n _sigma = torch.unsqueeze(q.scale, dim=0)\n _q = Normal(loc=_mu, scale=_sigma)\n return _q.log_prob(x)", "title": "" }, { "docid": "42fad586b8f5e0707ca5d291f9fd5178", "score": "0.5684034", "text": "def delta_loglike(self):\n try:\n # rewrite due to deprication of numpy.matrix\n # gm = np.matrix(self.gradient())\n # H = self.hessian()\n # return (gm * H.I * gm.T)[0,0]/4\n g = self.gradient()\n H = self.hessian()\n return np.dot( np.dot(g, np.linalg.inv(H)) , g)/4\n except Exception as msg:\n print ('Failed log likelihood estimate, returning 99.: %s' % msg)\n return 99.", "title": "" }, { "docid": "dbdb2858156ba9a7e61ae80a2281f6d9", "score": "0.56717545", "text": "def calc_log_evidence(self, idx):\n logpsi = np.empty((self.T, 2))\n N = self.Nframe\n nn = N['count']\n uu = N['unit']\n\n if self.overdispersion:\n od = self.nodes['overdispersion'].expected_x()\n elif self.overdispersion_natural:\n # get the dataframe for count values\n cnt_index = self.Nframe.sort_values(['unit', 'trial', 'time']).index\n # index matrix of 2 columns: Column 1 original, Column 2 sorted\n ind_mat = np.c_[np.array(cnt_index),\n np.array(xrange(cnt_index.shape[0]))]\n\n # expectation of phi\n expected_phi = self.nodes['overdispersion_natural'].expected_x()\n\n # sort expected_phi and compute cumulative product\n exphi_sorted = self._sort_values(ind_mat, expected_phi)\n # cumulative product\n exphi_sorted = np.cumprod(exphi_sorted.reshape(-1, self.time_natural), axis=1).ravel()\n # unsort expected_phi\n od = self._unsort_values(ind_mat, exphi_sorted)\n else:\n od = 1\n bl = self.nodes['baseline'].expected_x()[uu]\n Fk = self.F_prod(idx)\n G = self.G_prod()\n\n allprod = bl * od * Fk * G\n\n lam = self.nodes['fr_latents']\n bar_log_lam = lam.expected_log_x()[uu, idx]\n bar_lam = lam.expected_x()[uu, idx]\n\n N['lam0'] = -allprod\n N['lam1'] = -(allprod * bar_lam) + (nn * bar_log_lam)\n\n logpsi = N.groupby('time').sum()[['lam0', 'lam1']].values\n\n return logpsi", "title": "" }, { "docid": "3cf8367fe5b4b5095e5d26edfcb51ef5", "score": "0.56698763", "text": "def calc_ent(x):\n\n x_value_list = set([x[i] for i in range(x.shape[0])])\n ent = 0.0\n for x_value in x_value_list:\n p = float(x[x == x_value].shape[0]) / x.shape[0]\n logp = np.log2(p)\n ent -= p * logp\n\n return ent", "title": "" }, { "docid": "ce313bde5114e9caeebbd302c89a30eb", "score": "0.5662155", "text": "def ln_normal_trunc(x, mean, sigma, a, b):\n x_ = np.where((a < x) & (x < b), x, 1)\n k = math.log(norm.cdf((b - mean) / sigma) -\n norm.cdf((a - mean) / sigma))\n result1 = -math.log(sigma) - 0.5 * math.log(2. * math.pi) -\\\n 0.5 * ((x_ - mean) / sigma) ** 2. - k\n result = np.where((a < x) & (x < b), result1, float(\"-inf\"))\n\n return result", "title": "" }, { "docid": "d590c73ce3457bb12f19382a4cf256b9", "score": "0.5661465", "text": "def rand_scale_log_normal(mean_scale, one_sigma_at_scale):\n\n log_sigma = math.log(one_sigma_at_scale)\n return mean_scale*math.exp(random.normalvariate(0.0, log_sigma))", "title": "" }, { "docid": "3a93111972fb64106e572f1b502bdd4c", "score": "0.56584245", "text": "def lognorm_mean_var(mean, variance):\n if variance == 0:\n return DeterministicDistribution(mean)\n else:\n scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy')\n return stats.lognorm(scale=scale, s=sigma)", "title": "" }, { "docid": "927544879a0c955c71c109a644cb11da", "score": "0.56554604", "text": "def LOG_B_m_x(i,X,myTheta):\n\n d = X.shape[1]\n term_1 = -0.5 * np.sum(np.true_divide(np.power(X - myTheta.mu[i], 2), myTheta.Sigma[i]), axis=1)\n term_2 = (d * 0.5) * np.log(2 * np.pi)\n term_3 = 0.5 * np.sum(np.log(myTheta.Sigma[i])) # fix this\n\n return term_1 - term_2 - term_3", "title": "" }, { "docid": "4486a66557372d183a7e90e7daeaf5f0", "score": "0.5654624", "text": "def lognorm_s(n, params):\n F0, n0, sigma = params\n retval = F0 * np.exp(-0.5 * (np.log(n / n0) / sigma) ** 2.) / n\n return (retval)", "title": "" }, { "docid": "043250c5417cb3ef16a2d99122705526", "score": "0.5650477", "text": "def lpe_test_log_0(self):\n\n res_2 = self.X_factor.log(2)\n res_10 = self.X_factor.log(10)\n res_e = self.X_factor.log(math.e)\n\n s = len(self.X_factor.values)\n\n assert res_2.values == list(\n map(math.log, self.X_domain, [2] * s)\n ) and res_2.rand_vars == [self.X]\n\n assert res_10.values == list(\n map(math.log, self.X_domain, [10] * s)\n ) and res_10.rand_vars == [self.X]\n\n assert res_e.values == list(\n map(math.log, self.X_domain, [math.e] * s)\n ) and res_e.rand_vars == [self.X]", "title": "" }, { "docid": "4f49587de1fe107b60c15c4708f1d035", "score": "0.5649254", "text": "def rlognormal(mu, tau,size=None):\n\n return np.random.lognormal(mu, np.sqrt(1./tau),size)", "title": "" }, { "docid": "0622b92eb88cd15434ce33914fe008c7", "score": "0.5649053", "text": "def log_normal_prob(x: float, mu: float = 0, sigma: float = 1):\n gauss = tfd.Normal(loc=mu, scale=sigma)\n return gauss.log_prob(x)", "title": "" }, { "docid": "d0edbb4a902fc5cd6f2cfb49028f7494", "score": "0.5648403", "text": "def log_prob(self, params: Pytree, inputs: np.ndarray) -> np.ndarray:\n cov = np.identity(self.input_dim)\n nu = np.exp(params)\n maha, log_det = _mahalanobis_and_logdet(inputs, cov)\n t = 0.5 * (nu + self.input_dim)\n A = gammaln(t)\n B = gammaln(0.5 * nu)\n C = self.input_dim / 2.0 * np.log(nu * np.pi)\n D = 0.5 * log_det\n E = -t * np.log(1 + (1.0 / nu) * maha)\n\n return A - B - C - D + E", "title": "" }, { "docid": "1b1dace4d3606ba03e3581fd2c116ff2", "score": "0.56429243", "text": "def multivariate_gaussian_pdf(self, X, mu_all, sigma_all, log_scale=False):\n dim = X.shape[-1]\n sigma_invs = np.stack([np.linalg.inv(sigma) for sigma in sigma_all]) ## shape = (num_s, dim, dim)\n sigma_det = np.array([np.linalg.det(sigma) for sigma in sigma_all]) ## shape = (num_s, )\n \n\n if log_scale:\n factor = -0.5*(np.log((2*np.pi)**dim) + np.log(sigma_det))\n centered_part = X - np.expand_dims(mu_all, axis=1) ## shape (T, dim) - (num_s, 1, dim) = (num_s, T, dim)\n dot_part = np.sum(centered_part[..., None] * np.expand_dims(sigma_invs, axis=1), axis=-2) ## shape = (num_s, T, dim)\n all_part = np.sum(dot_part*centered_part, axis=-1) ## shape = (num_s, T)\n main_part = -0.5*all_part\n # import ipdb; ipdb.set_trace()\n return main_part + factor[:, None]\n\n else:\n factor = 1/np.sqrt((2*np.pi)**dim * sigma_det) ## shape = (num_s, )\n centered_part = X - np.expand_dims(mu_all, axis=1) ## shape (T, dim) - (num_s, 1, dim) = (num_s, T, dim)\n dot_part = np.sum(centered_part[..., None] * np.expand_dims(sigma_invs, axis=1), axis=-2) ## shape = (num_s, T, dim)\n all_part = np.sum(dot_part*centered_part, axis=-1) ## shape = (num_s, T)\n main_part = np.exp(-0.5*all_part)\n \n return factor[:, None] * main_part", "title": "" }, { "docid": "326739fb83c38229c9aea0e6c6920bfe", "score": "0.563609", "text": "def power_law_discrete_log_likelihood(alpha, x, xmin, minimize=False):\n\n n = x.size\n z = zeta(xmin, alpha)\n\n res = - n * np.log(z) - alpha * sum([np.log(i) for i in x])\n\n if minimize:\n return res * - 1\n else:\n return res", "title": "" }, { "docid": "75bf7f8213dc8ed405c8cc72a79636bb", "score": "0.56358135", "text": "def expected_log_prob_s_given_h(self, stats):\n\n mean_h = stats.d['mean_h']\n mean_sq_s = stats.d['mean_sq_s']\n mean_hs = stats.d['mean_hs']\n\n half = as_floatX(0.5)\n two = as_floatX(2.)\n N = as_floatX(self.nhid)\n pi = as_floatX(np.pi)\n\n term1 = half * T.log( self.alpha ).sum()\n term2 = - half * N * T.log(two*pi)\n term3 = - half * T.dot( self.alpha , mean_sq_s )\n term4 = T.dot(self.mu*self.alpha,mean_hs)\n term5 = - half * T.dot(T.sqr(self.mu), self.alpha * mean_h)\n\n rval = term1 + term2 + term3 + term4 + term5\n\n assert len(rval.type.broadcastable) == 0\n\n return rval", "title": "" }, { "docid": "70c3690ba600570bd9741045c56de672", "score": "0.56264967", "text": "def _logpdf(self, x, dim, df, scale, log_det_scale):\n log_det_x = np.empty(x.shape[-1])\n x_inv = np.copy(x).T\n if dim > 1:\n _cho_inv_batch(x_inv) # works in-place\n else:\n x_inv = 1./x_inv\n tr_scale_x_inv = np.empty(x.shape[-1])\n\n for i in range(x.shape[-1]):\n C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)\n\n log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))\n\n tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()\n\n # Log PDF\n out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -\n (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -\n multigammaln(0.5*df, dim))\n\n return out", "title": "" }, { "docid": "5d1368f70ce226bf8837a1345ff2a19b", "score": "0.5623958", "text": "def test_log_normal_diag_shared(self):\n N = 100\n S = 50\n D = 10\n\n means = torch.randn(S, D)\n covs = torch.rand(D)\n x = torch.randn(N, D)\n\n distributions = [\n dist.MultivariateNormal(means[i], torch.diag(covs))\n for i in range(S)\n ]\n\n expected = []\n for item in x:\n e_item = []\n for d in distributions:\n e_item.append(d.log_prob(item))\n expected.append(e_item)\n expected = torch.as_tensor(expected)\n\n predicted = log_normal(x, means, covs, 'diag-shared')\n\n self.assertTrue(\n torch.allclose(expected, predicted, atol=1e-03, rtol=1e-05)\n )", "title": "" }, { "docid": "c00c95bef4ba76bdf4b62c7057223167", "score": "0.56213236", "text": "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "title": "" }, { "docid": "f1f1ad0275ae0bdd1e99d5feea0c46b5", "score": "0.56146884", "text": "def _entropy(self, dim, df, log_det_scale):\n return (\n 0.5 * (dim+1) * log_det_scale +\n 0.5 * dim * (dim+1) * _LOG_2 +\n multigammaln(0.5*df, dim) -\n 0.5 * (df - dim - 1) * np.sum(\n [psi(0.5*(df + 1 - (i+1))) for i in range(dim)]\n ) +\n 0.5 * df * dim\n )", "title": "" }, { "docid": "7518c5fe1ef9fc1f7eef90eeaeac829d", "score": "0.56012297", "text": "def likelihood(mean, logs, x):\n if mean is None and logs is None:\n return -0.5 * (x ** 2 + GaussianDiag.Log2PI)\n else:\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / torch.exp(logs * 2.) + GaussianDiag.Log2PI)", "title": "" }, { "docid": "dd51070691d2fce09f87700ef0f24208", "score": "0.5599779", "text": "def normalize(lls):\n return lls - np.log(np.exp(lls).sum())", "title": "" }, { "docid": "bd1947619731bc7d230bbbe1265cad0b", "score": "0.5599093", "text": "def _vmf_log(X, kappa, mu):\n n_examples, n_features = X.shape\n return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))", "title": "" }, { "docid": "209c7e1b7d39c59c8a79f28b2fd75cbc", "score": "0.5597885", "text": "def test_log_normal_spherical(self):\n N = 100\n S = 50\n D = 10\n\n means = torch.randn(S, D)\n covs = torch.rand(S)\n x = torch.randn(N, D)\n\n distributions = [\n dist.MultivariateNormal(\n means[i], torch.diag(covs[i].clone().expand(D))\n ) for i in range(S)\n ]\n\n expected = []\n for item in x:\n e_item = []\n for d in distributions:\n e_item.append(d.log_prob(item))\n expected.append(e_item)\n expected = torch.as_tensor(expected)\n\n predicted = log_normal(x, means, covs, 'spherical')\n\n self.assertTrue(\n torch.allclose(expected, predicted, atol=1e-03, rtol=1e-05)\n )", "title": "" }, { "docid": "1067bf06951794b50d1d1ac2176358f1", "score": "0.55966264", "text": "def variational_objective(params, t):\n mean, log_std = unpack_params(params)\n samples = rs.randn(num_samples, D) * np.exp(log_std) + mean\n lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples,last_theta,time_series,current_t, t))\n return -lower_bound", "title": "" }, { "docid": "b3db808c6a4aeece56fea25723f99892", "score": "0.5593598", "text": "def loguniform_mean_var(mean, var):\n if var < 0:\n raise ValueError('Variance must be positive')\n elif var == 0:\n # treat special case separately\n return DeterministicDistribution(mean)\n else:\n # determine width parameter numerically\n cv2 = var / mean**2 # match square coefficient of variation\n \n def _rhs(q):\n \"\"\" match the coefficient of variation \"\"\"\n return 0.5 * (q + 1) * np.log(q) / (q - 1) - 1 - cv2\n \n width = optimize.newton(_rhs, 1.1)\n return loguniform_mean(mean, np.sqrt(width))", "title": "" }, { "docid": "93fd60d7309b494c4af64bf87d301295", "score": "0.5592026", "text": "def lognormal_approximation_coefficient(lat, lon):\n type_output = type(lat)\n lat = prepare_input_array(lat)\n lon = prepare_input_array(lon)\n lon = np.mod(lon, 360)\n val = __model.lognormal_approximation_coefficient(lat, lon)\n u_adim = u.dimensionless_unscaled\n return (prepare_output_array(val[0], type_output) * u_adim,\n prepare_output_array(val[1], type_output) * u_adim,\n prepare_output_array(val[2], type_output) * u_adim)", "title": "" }, { "docid": "18fedaa43f8fe2abc3005e9c60224965", "score": "0.5587865", "text": "def gaussian_logpdf(yVy, yVmu, muVmu, logdet_V, D):\n return -0.5*yVy + yVmu - 0.5*muVmu + 0.5*logdet_V - 0.5*D*np.log(2*np.pi)", "title": "" }, { "docid": "255226f86a31e99d096fe80571f4c4ce", "score": "0.55872345", "text": "def z_lognorm(x: pd.Series):\n x = x - np.min(x) + 1\n base = x.median()\n x_norm = np.log(x)/np.log(base)-1\n std = x_norm.std()\n x_norm /= -std\n x_norm.loc[x_norm.isna()] = inaccurate_response\n return x_norm", "title": "" }, { "docid": "7b46a183f7c478dcbf91b394a974698f", "score": "0.5567669", "text": "def LogNormSpheres(q, A, mu, sigma, N=1000):\n Rmin = 0\n Rmax = np.exp(mu + 3 * sigma)\n R = np.linspace(Rmin, Rmax, N + 1)[1:]\n P = 1 / np.sqrt(2 * np.pi * sigma ** 2 * R ** 2) * np.exp(-(np.log(R) - mu) ** 2 / (2 * sigma ** 2))\n def Fsphere_outer(q, R):\n qR = np.outer(q, R)\n q1 = np.outer(q, np.ones_like(R))\n return 4 * np.pi / q1 ** 3 * (np.sin(qR) - qR * np.cos(qR))\n I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P))\n return A * I.sum(1) / P.sum()", "title": "" }, { "docid": "89bfb7bf4d3e54c6857ec451ba99f6fa", "score": "0.5561856", "text": "def do_log_return_moments_calculations(array: np.ndarray, annualisation_factor: float) -> dict:\n _, number_steps = array.shape\n log_returns = np.log(array[:, 1:] / array[:, :-1])\n return do_moments_calculations(log_returns, annualisation_factor)", "title": "" }, { "docid": "31b21ddadb67d9e2faa4ea17b5c940d5", "score": "0.55610573", "text": "def logistic_fcn( # pylint: disable=C0103\n x: FloatOrDistArray, L: FloatOrDistVar, k: FloatOrDistVar, x0: FloatOrDistVar,\n) -> FloatOrDistArray:\n return L / (1 + exp(-k * (x - x0)))", "title": "" }, { "docid": "1c77ad9d84e05d91a7e0d53e1180e7ff", "score": "0.5559325", "text": "def void_radii_dist_linear(self,logr,cosm):\n\n logr_l = log(exp(logr)/1.7)\n\n # D ; the void-and-cloud parameter\n D = self.void_and_cloud()\n\n # calculate volume from a given R\n V = (4. * pi * pow(exp(logr_l),3)) / 3.\n\n # get sigma from PowSpec class fit\n sigma = cosm.sig_fit(log(r))\n\n # get dln(1/sigma) / dln(r)\n dlns_dlnr = fabs(log(cosm.Dplus/cosm.growth(0.)) + cosm.dlnsigmadlnr(logr_l))\n\n # calculate f(sigma)\n if(np.isscalar(sigma)):\n fSig = self.multiplicity_function_jlh(self,sigma,D)\n else:\n fSig = self.multiplicity_function_jlh_vec(sigma,D)\n\n no_dens = (fSig/V) * dlns_dlnr\n\n return no_dens", "title": "" }, { "docid": "d6b979614397187143e2a2374d7180f3", "score": "0.5556822", "text": "def exact_log_det(mvp_vec, D, N):\n eye = np.eye(D)\n matlist = []\n for i in xrange(D):\n cur_dir = eye[:, i]\n matlist.append(mvp_vec(np.tile(cur_dir, (N, 1))))\n mat = np.concatenate([np.expand_dims(x, axis=2) for x in matlist], axis=2)\n logdets = []\n for cur_mat in mat: # Not vectorized, but could be if autograd supported vectorized calls to slogdet. Does it?\n sign, logdet = np.linalg.slogdet(cur_mat)\n logdets.append(logdet)\n assert len(logdets) == N\n return fast_array_from_list(logdets)", "title": "" } ]
8dea5d2418572c602a423cc45cb5061a
Test to start the specified service
[ { "docid": "bea8ebe3b44f298935e6ce5406c87de9", "score": "0.7395312", "text": "def test_start():\n mock_true = MagicMock(return_value=True)\n mock_false = MagicMock(return_value=False)\n mock_info = MagicMock(side_effect=[{\"Status\": \"Running\"}])\n\n with patch.object(win32serviceutil, \"StartService\", mock_true), patch.object(\n win_service, \"disabled\", mock_false\n ), patch.object(win_service, \"info\", mock_info):\n assert win_service.start(\"spongebob\") is True\n\n mock_info = MagicMock(\n side_effect=[\n {\"Status\": \"Stopped\", \"Status_WaitHint\": 0},\n {\"Status\": \"Start Pending\", \"Status_WaitHint\": 0},\n {\"Status\": \"Running\"},\n ]\n )\n\n with patch.object(win32serviceutil, \"StartService\", mock_true), patch.object(\n win_service, \"disabled\", mock_false\n ), patch.object(win_service, \"info\", mock_info), patch.object(\n win_service, \"status\", mock_true\n ):\n assert win_service.start(\"spongebob\") is True", "title": "" } ]
[ { "docid": "06347100c5d25a23c13c075938520e7a", "score": "0.7975998", "text": "def start_service(service):\n subprocess.call([\"service\", service, \"start\"])", "title": "" }, { "docid": "4502c4639ce33b3e8797f182e1151e8d", "score": "0.78600645", "text": "def test_stop_start(self):\n my_service = SimpleService()\n my_service.start()\n\n self.assertTrue(my_service.started)\n\n my_service.stop()\n\n self.assertFalse(my_service.started)\n\n my_service.start()\n\n # gevent.sleep(0) does not give the service enough time to run.\n gevent.sleep(0.001)\n self.assertTrue(my_service.started)", "title": "" }, { "docid": "5417d5c2829c3fc1ce02114c70b77201", "score": "0.7859392", "text": "def test_startService(self):\n self.poller.startService()\n self.timer.return_value.startService.assert_called_once_with()", "title": "" }, { "docid": "059b4aac45b338c93f0ed9efad153de1", "score": "0.7840608", "text": "def test_start_services(self):\n my_service = make_service(logger=mock.Mock())\n mock_greenlet = mock.Mock()\n\n my_service.add_service(mock_greenlet)\n\n my_service.join()\n\n mock_greenlet.join.assert_called_once_with()", "title": "" }, { "docid": "a978f01acba9b9d3673c158ab873b308", "score": "0.78269166", "text": "def test_start_stop_simple_service(self):\n self.executed = False\n\n def run(time):\n self.executed = True\n gevent.sleep(time)\n\n class MyService(service.Service):\n def do_start(self):\n self.spawn(run, 10)\n\n my_service = MyService()\n\n my_service.start()\n\n self.assertTrue(self.executed)\n self.assertTrue(my_service.started)\n\n my_service.stop()\n\n self.assertFalse(my_service.started)", "title": "" }, { "docid": "67b501bae5b4f3fac23bdcfe1784c75c", "score": "0.75843453", "text": "def test_do_start(self, mock_do_start):\n my_service = make_service()\n\n # Run something on service so it doesn't stop right away.\n my_service.spawn(gevent.sleep, 1)\n\n my_service.start()\n\n mock_do_start.assert_called_once_with()\n\n # ensure that `do_start` is called only once.\n my_service.start()\n\n mock_do_start.assert_called_once_with()", "title": "" }, { "docid": "3b54ffa2c83d33a949d15c29e6232b72", "score": "0.756705", "text": "def test_start_stop_simple_service2(self):\n self.executed = False\n\n def run(time):\n self.executed = True\n gevent.sleep(time)\n\n class MyService(service.Service):\n def do_start(self):\n self.spawn(run, 0)\n\n my_service = MyService()\n\n my_service.start()\n\n self.assertTrue(self.executed)\n self.assertTrue(my_service.started)\n\n my_service.join()\n\n self.assertFalse(my_service.started)", "title": "" }, { "docid": "4886ad9f752966aa19003bde67fbfe24", "score": "0.75114053", "text": "def testStartEverythingExplicitly(self):\n ss = self.ss\n ysst = YouShouldStartThis(store=ss)\n ss.powerUp(ysst, IService)\n self.topdb.powerUp(self.ssitem, IService)\n self._startService()\n self.failUnless(ysst.started)", "title": "" }, { "docid": "4dded99b20ef0c474fa2d3bf43e8a563", "score": "0.7482764", "text": "def testStartEverythingExplicitly(self):\n ss = self.ss\n ysst = YouShouldStartThis(store=ss)\n ss.powerUp(ysst, IService)\n self.topdb.powerUp(self.ssitem, IService)\n self._startService()\n self.assertTrue(ysst.started)", "title": "" }, { "docid": "43502633a26d690b168571affe0518e0", "score": "0.7462408", "text": "def start(service, *args, **kwargs):\n if service == 'all':\n s.info(\n f\"Starting all services: $[{', '.join(map(str, [s for s in Service.SERVICE_LIST]))}]\\n\")\n return [Service(s, *args, **kwargs).start() for s in Service.SERVICE_LIST]\n service = Service(service, *args, **kwargs)\n s.info(f\"Starting $[{service.name}]\\n\")\n service.start()", "title": "" }, { "docid": "74055e6c3f170f1d0bb66030371c43cc", "score": "0.7404273", "text": "def test_start(self):\n my_service = SimpleService()\n\n self.event_fired = False\n\n def on_start():\n self.event_fired = True\n\n my_service.on('start', on_start)\n\n self.assertFalse(my_service.started)\n\n my_service.start()\n\n self.assertTrue(my_service.started)\n self.assertTrue(self.event_fired)\n\n # now test calling `start` again. It should be idempotent\n self.event_fired = False\n\n my_service.start()\n\n self.assertFalse(self.event_fired)", "title": "" }, { "docid": "ae7371e8a02c4c30cca467eed816595a", "score": "0.73456526", "text": "def service_start(self, service):\n cmd = [\n 'service.start', service,\n '--out', 'newline_values_only'\n ]\n return self.run_salt(cmd)['stdout'].strip().lower() == b'true'", "title": "" }, { "docid": "7c2b6d9db240b5d8b8054edd1d18927c", "score": "0.72434205", "text": "def test_start_already_running():\n mock_false = MagicMock(return_value=False)\n mock_error = MagicMock(\n side_effect=pywintypes.error(1056, \"StartService\", \"Service is running\")\n )\n mock_info = MagicMock(side_effect=[{\"Status\": \"Running\"}])\n with patch.object(win32serviceutil, \"StartService\", mock_error), patch.object(\n win_service, \"disabled\", mock_false\n ), patch.object(win_service, \"_status_wait\", mock_info):\n assert win_service.start(\"spongebob\") is True", "title": "" }, { "docid": "80ab174e4ab83c3450d5afc13732604d", "score": "0.724007", "text": "def testDontStartNormally(self):\n ss = self.ss\n ycst = YouCantStartThis(store=ss)\n ss.powerUp(ycst, IService)\n self._startService()\n self.failIf(ycst.started)", "title": "" }, { "docid": "a58a98aa36a900fd9c00f6f4d132ea72", "score": "0.72063875", "text": "async def _service_start(self) -> bool:\n for _svc_id, service in self._services.items():\n service.start(True)\n return True", "title": "" }, { "docid": "82496c2eb08d72d491e516eb8912582c", "score": "0.71671325", "text": "def test_add_service_start(self, mock_watch):\n mock_watch.__name__ = 'watch_service'\n\n my_service = SimpleService()\n mock_service = mock.Mock()\n\n my_service.start()\n\n my_service.add_service(mock_service)\n gevent.sleep(0.0)\n\n mock_watch.assert_called_once_with(mock_service)\n self.assertEqual(my_service.services, [mock_service])", "title": "" }, { "docid": "4e41d4a127504ed193258297494a03c4", "score": "0.7141826", "text": "def _start_srv(service, name):\n service.start()\n sender.fire(CollectorServiceStartedEvent, srv_name=name)", "title": "" }, { "docid": "c381ed1a23acb5c02335ed1a567baad1", "score": "0.7132379", "text": "def _startService(self):\n assert not self.serviceStarted\n self.serviceStarted = True\n return IService(self.topdb).startService()", "title": "" }, { "docid": "c381ed1a23acb5c02335ed1a567baad1", "score": "0.7132379", "text": "def _startService(self):\n assert not self.serviceStarted\n self.serviceStarted = True\n return IService(self.topdb).startService()", "title": "" }, { "docid": "bb65b0d1beae7d8fb626ca3f7dd3b22a", "score": "0.71303374", "text": "def test_create_service_optional(self):\n pwd.getpwnam.return_value = self.mock_pwrow('test_shell', 'test_home')\n svc_dir = supervisor.create_scan_dir(self.root, 5000)\n\n supervisor.create_service(\n svc_dir,\n 'xx',\n 'ls -al',\n userid='proid1',\n monitor_policy={\n 'limit': 5,\n 'interval': 60,\n 'tombstone': {\n 'uds': True,\n 'path': '/run/tm_ctl/tombstone',\n 'id': 'xx'\n }\n },\n environ={\n 'b': 'test2'\n },\n trace={\n 'instanceid': 'xx',\n 'uniqueid': 'ID1234',\n 'service': 'xx',\n 'path': '/run/tm_ctl/appevents'\n }\n )\n service_dir = os.path.join(self.root, 'xx')\n self.assertTrue(os.path.isdir(service_dir))\n data_dir = os.path.join(service_dir, 'data')\n self.assertTrue(os.path.isfile(os.path.join(data_dir, 'app_start')))\n self.assertTrue(os.path.isfile(os.path.join(service_dir, 'run')))\n self.assertTrue(os.path.isfile(os.path.join(service_dir, 'finish')))\n self.assertTrue(os.path.isfile(os.path.join(service_dir, 'env/b')))", "title": "" }, { "docid": "e50a8bae31914bed567f815bac696211", "score": "0.71238977", "text": "def test_no_greenlets_or_child_services(self):\n self.started = False\n self.stopped = False\n\n def start():\n self.started = True\n\n def stop():\n self.stopped = True\n\n class MyService(service.Service):\n def do_start(self):\n start()\n\n def do_stop(self):\n stop()\n\n my_service = MyService()\n\n my_service.join()\n\n self.assertTrue(self.started)\n self.assertTrue(self.stopped)\n self.assertFalse(my_service.started)", "title": "" }, { "docid": "639c82a6d5cd37fd1c4cb9d350846342", "score": "0.7066224", "text": "def test_start_stop_with_child_service(self):\n self.executed = False\n\n def run(time):\n self.executed = True\n gevent.sleep(time)\n\n class ChildService(service.Service):\n def do_start(self):\n self.spawn(run, 10)\n\n my_service = make_service()\n child_service = ChildService()\n\n my_service.add_service(child_service)\n\n my_service.start()\n\n self.assertTrue(self.executed)\n self.assertTrue(my_service.started)\n self.assertTrue(child_service.started)\n\n my_service.stop()\n\n self.assertFalse(my_service.started)\n self.assertFalse(child_service.started)", "title": "" }, { "docid": "853b9419bcf7c55132c7fa0d0a732455", "score": "0.70637345", "text": "def test_service(host):\n assert host.service('prometheus').is_enabled\n assert host.service('prometheus').is_running\n assert host.service('prometheus-alertmanager').is_enabled\n assert host.service('prometheus-alertmanager').is_running", "title": "" }, { "docid": "d37e5fbfc126323c888d25b764ed6804", "score": "0.6964373", "text": "def start(app):\n _service(app, 'start')", "title": "" }, { "docid": "4ddf6c61746db27f0d3d0d82db7f966f", "score": "0.6961026", "text": "def testDontStartNormally(self):\n ss = self.ss\n ycst = YouCantStartThis(store=ss)\n ss.powerUp(ycst, IService)\n self._startService()\n self.assertFalse(ycst.started)", "title": "" }, { "docid": "76cafe9965e21ca2d6c805b5d12f9dac", "score": "0.6879523", "text": "def test_docker_service(host):\n dockerd = host.service('docker')\n\n assert dockerd.is_running", "title": "" }, { "docid": "e3c476490a42a5a2c0c7a6ffdf0b1c56", "score": "0.68464386", "text": "def start_srv(start, process, destination_address=world.f_cfg.mgmt_address):\n if destination_address not in world.f_cfg.multiple_tested_servers:\n world.multiple_tested_servers.append(destination_address)\n\n if world.f_cfg.install_method == 'make':\n v4_running, v6_running = _check_kea_status(destination_address)\n\n if process is None:\n process = \"starting\"\n # check process - if None add some.\n\n if v4_running and world.proto == 'v4' or v6_running and world.proto == 'v6':\n result = _stop_kea_with_keactrl(destination_address) # TODO: check result\n\n result = _start_kea_with_keactrl(destination_address)\n _check_kea_process_result(start, result, process)\n else:\n _restart_kea_with_systemctl(destination_address)", "title": "" }, { "docid": "ef73e79351bd5e09d9882c41e3701ee0", "score": "0.68412435", "text": "def test_start_stop_with_child_service2(self):\n self.executed = False\n\n def run(time):\n self.executed = True\n gevent.sleep(time)\n\n class ChildService(service.Service):\n def do_start(self):\n self.spawn(run, 0)\n\n my_service = make_service()\n child_service = ChildService()\n\n my_service.add_service(child_service)\n\n my_service.start()\n\n self.assertTrue(self.executed)\n self.assertTrue(my_service.started)\n self.assertTrue(child_service.started)\n\n my_service.join()\n\n self.assertFalse(my_service.started)\n self.assertFalse(child_service.started)", "title": "" }, { "docid": "0f884aecdc4782f2e7ce30b124f83501", "score": "0.6841097", "text": "def _start(self, units, start):\n\n if start:\n action = \"start\"\n else:\n action = \"stop\"\n\n if not Trivial.is_iterable(units):\n units = [units]\n\n for unit in units:\n self._proc.run_verify(f\"systemctl {action} -- '{unit}'\")\n if self.is_active(unit) != start:\n status = None\n try:\n status, _ = self._proc.run_verify(f\"systemctl status -- '{unit}'\")\n except Error:\n pass\n\n msg = f\"failed to {action} systemd unit '{unit}'\"\n if status:\n msg += f\", here is its current status:\\n{status}\"\n raise Error(msg)", "title": "" }, { "docid": "d135432927ac9bd99bdc08edffc1b40e", "score": "0.67349553", "text": "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1) # wait until the service is available", "title": "" }, { "docid": "5dd039ebf1888edd1140fee4215d8a15", "score": "0.6731426", "text": "def test_create_service(self):\n pwd.getpwnam.return_value = self.mock_pwrow('test_shell', 'test_home')\n\n supervisor.create_service(\n self.root,\n 'xx',\n 'ls -al',\n userid='proid1',\n downed=True\n )\n service_dir = os.path.join(self.root, 'xx')\n self.assertTrue(os.path.isdir(service_dir))\n data_dir = os.path.join(service_dir, 'data')\n self.assertTrue(os.path.isfile(os.path.join(data_dir, 'app_start')))\n self.assertTrue(os.path.isfile(os.path.join(service_dir, 'run')))\n self.assertFalse(os.path.isfile(os.path.join(service_dir, 'finish')))\n self.assertTrue(os.path.isfile(os.path.join(service_dir, 'down')))\n\n # Do not create down file.\n supervisor.create_service(\n self.root,\n 'bar',\n 'proid1',\n 'ls -al',\n downed=False\n )\n service_dir = os.path.join(self.root, 'bar')\n self.assertFalse(os.path.exists(os.path.join(service_dir, 'down')))", "title": "" }, { "docid": "acc1dcebc814fd6ba40cfc793573d071", "score": "0.6693825", "text": "def test_service(host):\n\n assert host.service('kibana').is_enabled\n\n if host.system_info.codename == 'jessie':\n jessie_check = host.run('service kibana status')\n assert jessie_check.rc == 0\n assert 'is running' in jessie_check.stdout\n else:\n assert host.service('kibana').is_running\n assert host.check_output('systemctl status kibana').rc == 0", "title": "" }, { "docid": "3bb4a7b704d68b6fc8b1763547ab8efc", "score": "0.66519433", "text": "def loadService(self, service, **kargs):\n return self.start_service(service, **kargs)", "title": "" }, { "docid": "a604adcedc4b7b5432d9fd0a3040dd14", "score": "0.6641564", "text": "def test_start(self):\n an_container = models.Container.get(self.client, 'an-container')\n\n an_container.start(wait=True)", "title": "" }, { "docid": "ea4d937ff338ea48e3b1f1b52d48cf46", "score": "0.66263324", "text": "def start(self, restart=True):\n if not self.is_enabled():\n LOG.error('The service is not enabled')\n return False\n\n LOG.info('Starting %s services via systemd', self.name)\n if self.is_running() and restart:\n subprocess.check_call(\n ['systemctl', 'restart', '{0}.target'.format(self.name)])\n else:\n subprocess.check_call(\n ['systemctl', 'start', '{0}.target'.format(self.name)])\n return True", "title": "" }, { "docid": "41c275b13f84e66870b3241f7f69d7da", "score": "0.6613791", "text": "def test_on_start():\n assert started", "title": "" }, { "docid": "c72d7b0bda6aa5537dbf11ef27fe814f", "score": "0.6596695", "text": "def _restart_service(self, container, service):\n\n if container.get_service(service).is_running():\n container.stop(service)\n # Restart it and report a new status to Juju\n container.start(service)\n logging.info(f\"Restarted service: {service}\")\n # workaround for https://github.com/canonical/pebble/issues/46\n\n if self._check_karma_service_alive():\n self.unit.status = ActiveStatus()\n else:\n self.unit.status = UnknownStatus()", "title": "" }, { "docid": "0258c4e6d6cae892fb8a42679e0f1b5f", "score": "0.6567258", "text": "def startService(handler, *args, **kwargs):\n\n preProcessHandler(handler)\n enforceSolitude(handler)\n if handler._dbus_kill: sys.exit()\n if handler._dbus_go_daemon: _goDaemon()\n\n print(\"{:s} daemon: {:d}\".format(handler._dbus_service, os.getpid()))\n\n postProcessHandler(handler, *args, **kwargs)", "title": "" }, { "docid": "d8cd50f81385700ecb49aa294c6df0b7", "score": "0.65619165", "text": "def test_control_service(self):\n # Disable W0212(protected-access)\n # pylint: disable=W0212\n self.assertTrue(supervisor.control_service(\n self.root, supervisor.ServiceControlAction.down\n ))\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svc'), '-d', self.root]\n )\n\n self.assertTrue(supervisor.control_service(\n self.root, (\n supervisor.ServiceControlAction.up,\n supervisor.ServiceControlAction.once_at_most,\n ),\n timeout=100, # Should not be used\n ))\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svc'), '-uO', self.root]\n )\n\n self.assertTrue(supervisor.control_service(\n self.root, supervisor.ServiceControlAction.up,\n wait=supervisor.ServiceWaitAction.up,\n timeout=100,\n ))\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svwait'), '-t100', '-u', self.root]\n )\n\n treadmill.subproc.check_call.side_effect = \\\n subproc.CalledProcessError(1, supervisor._get_cmd('svc'))\n self.assertRaises(\n subproc.CalledProcessError,\n supervisor.control_service,\n self.root,\n supervisor.ServiceControlAction.down\n )\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svc'), '-d', self.root]\n )", "title": "" }, { "docid": "802c80d07e384420a1ca5a16e2cec5a1", "score": "0.6535537", "text": "def on_service_add(self, service):\n self.launch_thread(service.name, self.check_loop, service)", "title": "" }, { "docid": "aed275ab4149a09ebabf47c6bc20b774", "score": "0.65170586", "text": "def service_start(self, svc_manager):\n # TODO: Functionality of this method should be moved to the\n # Package.handle_svc_start() method\n svc_name = svc_manager.svc_name\n\n # Discover initial service status\n try:\n ret_code, stdout, stderr = svc_manager.status()\n except svcm_exc.ServiceManagerCommandError as e:\n err_msg = f'Get initial service status: {svc_name}: {e}'\n raise type(e)(err_msg) from e # Subject to retry\n else:\n log_msg = (f'Get initial service status: {svc_name}:'\n f'stdout[{stdout}] stderr[{stderr}]')\n LOG.debug(log_msg)\n svc_status = str(stdout).strip().rstrip('\\n')\n\n # Manage service appropriately to its status\n if svc_status == SVC_STATUS.STOPPED:\n # Start a service\n try:\n svc_manager.start()\n except svcm_exc.ServiceManagerCommandError as e:\n err_msg = f'Start service: {svc_name}: {e}'\n raise type(e)(err_msg) from e # Subject to retry\n # Verify that service is running\n try:\n ret_code, stdout, stderr = svc_manager.status()\n LOG.debug(f'Get final service status: {svc_name}:'\n f'stdout[{stdout}] stderr[{stderr}]')\n svc_status = str(stdout).strip().rstrip('\\n')\n\n if svc_status == SVC_STATUS.START_PENDING:\n msg = f'Service is starting: {svc_name}'\n LOG.debug(msg)\n raise svcm_exc.ServiceTransientError(msg) # Subject to retry\n elif svc_status != SVC_STATUS.RUNNING:\n err_msg = (f'Start service: {svc_name}: Failed:'\n f' {svc_status}')\n raise svcm_exc.ServicePersistentError(err_msg)\n except svcm_exc.ServiceManagerCommandError as e:\n err_msg = f'Get final service status: {svc_name}: {e}'\n raise type(e)(err_msg) from e # Subject to retry\n elif svc_status == SVC_STATUS.START_PENDING:\n msg = f'Service is starting: {svc_name}: ...'\n LOG.debug(msg)\n raise svcm_exc.ServiceTransientError(msg) # Subject to retry\n elif svc_status == SVC_STATUS.RUNNING:\n LOG.debug(f'Service is already running: {svc_name}')\n else:\n err_msg = f'Invalid service status: {svc_name}: {svc_status}'\n raise svcm_exc.ServicePersistentError(err_msg)", "title": "" }, { "docid": "8400ab7d189fa75accb03fcd3a0aab8c", "score": "0.6509138", "text": "def test_03_verify_launch_services(self):\n self.fc.flow_load_home_screen()\n self.home.select_menu()\n self.home.select_plugins_tab_from_menu()\n self.home.select_service_routing_plugin()\n self.service_plugin.enter_service_launch_data('{\"https://bruce-williams.github.io\"}')\n self.service_plugin.select_launch_service_test_btn()\n assert self.service_plugin.get_service_launch_result() == {'errorType': 'invalidOptions', 'message': 'url is missing from openUrl serviceOptions'}\n self.service_plugin.select_get_service_instance_test_btn()\n assert self.service_plugin.get_service_instance_result() == {'errorType': 'serviceInstanceNotFound', 'message': 'The service instance with id: was not found'}\n for i in saf_misc.load_json(ma_misc.get_abs_path(TEST_DATA.SERVICE_ROUTING))[\"urls\"]:\n data = '{\"url\":\"'+i+'\"}'\n self.service_plugin.enter_service_launch_data(data)\n self.service_plugin.select_launch_service_test_btn()\n if all(word not in data for word in [\"localhost\", \"chrisgeohringhp\"]):\n self.driver.press_key_back()\n self.service_plugin.select_add_listener_test_btn()\n self.service_plugin.enter_service_launch_data(data)\n self.service_plugin.select_launch_service_test_btn()\n self.service_plugin.select_event_close_button(1)\n self.service_plugin.select_event_close_button(0)", "title": "" }, { "docid": "a7a52197abf53efadf25ed66ad7dd3a6", "score": "0.6474801", "text": "def test_get_single_service(self):\n pass", "title": "" }, { "docid": "12abf4543cb8382f87d6f9bf6ac980bb", "score": "0.6469933", "text": "def test_start(self) -> None:\n self._test_worker_command(\"start\")", "title": "" }, { "docid": "751cffb772f9e7d33537f108087ae99f", "score": "0.6414413", "text": "def start(self):\n LOG.info(\"Start service.\\n\")\n\n try:\n LOG.debug(\"Wait for game start.\\n\")\n self.wait_start()\n LOG.debug(\"Game started.\\n\")\n self.run()\n finally:\n LOG.info(\"Stop service.\\n\")\n GPIO.cleanup()", "title": "" }, { "docid": "ddeae30f33526849f236b446300bfab5", "score": "0.6411617", "text": "def run(cls):\n if not all(v is None for v in cls._services.values()):\n cls._set_host_id()\n cls._setup_logging()\n cls._set_process_name()\n cls._set_signal_handlers()\n cls._start_server()\n else:\n cls._logger.error('No services to host')", "title": "" }, { "docid": "96e10fd0fe319679b7fef43cc58a9197", "score": "0.6385642", "text": "def open_service(self):\n try:\n self.service = build(self.service_name, self.version, credentials=self.creds)\n except Exception as e:\n raise AssertionError(e)", "title": "" }, { "docid": "0e64c105c27f3fd5e73ae14c91b85e83", "score": "0.63688385", "text": "def test_get_service(self):\n pass", "title": "" }, { "docid": "8476550039a3de4ea889de2421514f87", "score": "0.6300333", "text": "def enable_service(self, service):\n call_throws(['systemctl', 'enable', '--now', service])", "title": "" }, { "docid": "76b8f61e4c550800fac6da6ed6caeba5", "score": "0.6295617", "text": "def start_pacemaker_service(mnode):\n\n cmd = \"systemctl start pacemaker\"\n return g.run(mnode, cmd)", "title": "" }, { "docid": "878fd66e4ae366e7606518cef9d3b4a3", "score": "0.62762386", "text": "def test_add_service(self, mock_spawn):\n my_service = make_service()\n mock_service = mock.Mock()\n\n my_service.add_service(mock_service)\n\n self.assertFalse(mock_spawn.called)\n\n self.assertEqual(my_service.services, [mock_service])", "title": "" }, { "docid": "25e10e55d8acf865972921e7c5035557", "score": "0.62576526", "text": "def test(self, name=\"\"):\n if self.running():\n self.stop()\n\n self.start()\n assert self.running()\n self._log_info(\"openresty is running\")", "title": "" }, { "docid": "5a98c3bb5612a8fed8384780bfd8f4e6", "score": "0.62374693", "text": "def manage_service(service, command):\n cmd = [\"/bin/service\", \"--quiet\", service, command]\n LOGGER.debug(\"%s service %s..\" % (command, service))\n subprocess.Popen(cmd, close_fds=True, preexec_fn=fork_handler,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n LOGGER.debug(\"%s service %s..done\" % (command, service))\n SPLASH.update(service)", "title": "" }, { "docid": "5143bce89cc96dbd908a9adc2eedae83", "score": "0.6217335", "text": "def isServiceRunning(positive, host, user, password, service):\n result = False\n machine = Machine(host, user, password).util(LINUX)\n if machine is not None:\n result = machine.isServiceRunning(service)\n\n return result == positive", "title": "" }, { "docid": "24a3e8a0e540cc5cd263e42c79cc22b0", "score": "0.6208803", "text": "def start_subprocess(self):\n \n self.server = InetdService(self)\n yield from self.server.run()\n\n self.loginfo(\"inetd service {0} listening on port {1}\".format(self.name, self.port))", "title": "" }, { "docid": "3516508b4c0f6187d64ce6f30f03457a", "score": "0.620731", "text": "def enable_service(service, failonerror=True):\n if os.path.exists(\"/run/systemd\"):\n exec_command(\"/usr/bin/systemctl enable %s\" % (service), not failonerror)\n else:\n exec_command(\"/sbin/chkconfig %s on\" % (service), not failonerror)", "title": "" }, { "docid": "8cc8335477dcc4d589c0795678185441", "score": "0.6206382", "text": "def start() -> None:\n \n active_server_name = srv_mgr.get_active_server_name()\n if not active_server_name:\n print(f'No active server found. Set an active server?\\n')\n sys.exit(1)\n \n if srv_mgr.is_server_running():\n print(f'Looks like this server {active_server_name} is already running. Use restart instead of start.')\n sys.exit(1)\n\n if srv_mgr.start_server():\n print(f\"Server {active_server_name} started succesfully!\")\n sys.exit(0)\n\n print(f\"Server {active_server_name} could not be started!\")\n sys.exit(1)", "title": "" }, { "docid": "03183ae2432419c9d43ce65d3543351d", "score": "0.6169518", "text": "def test_create(self):\n from gevent import pool\n\n my_service = make_service()\n\n self.assertFalse(my_service.started)\n self.assertEqual(my_service.services, [])\n self.assertIsInstance(my_service.pool, pool.Group)", "title": "" }, { "docid": "ff90266bb361dad305e37bae7d071dae", "score": "0.61596406", "text": "def start_srv(start, process, destination_address=world.f_cfg.mgmt_address):\n world.cfg['leases'] = world.f_cfg.software_install_path + 'var/kea/kea-leases4.csv'\n v6, v4 = check_kea_status(destination_address)\n\n if process is None:\n process = \"starting\"\n # check process - if None add some.\n if not v4:\n result = fabric_sudo_command('( ' + world.f_cfg.software_install_path + 'sbin/keactrl start '\n + ' & ); sleep ' + str(world.f_cfg.sleep_time_1),\n destination_host=destination_address)\n check_kea_process_result(start, result, process)\n else:\n result = fabric_sudo_command('(' + world.f_cfg.software_install_path + 'sbin/keactrl stop '\n + ' & ); sleep ' + str(world.f_cfg.sleep_time_1),\n destination_host=destination_address)\n # check_kea_process_result(start, result, process)\n result = fabric_sudo_command('(' + world.f_cfg.software_install_path + 'sbin/keactrl start '\n + ' & ); sleep ' + str(world.f_cfg.sleep_time_1),\n destination_host=destination_address)\n check_kea_process_result(start, result, process)\n sleep(2)", "title": "" }, { "docid": "6e1bc7fd73409b244028c6cde8bef84c", "score": "0.61467177", "text": "def test_service(self):\n net = INetSim(\"dummy\")\n net.start()\n time.sleep(1)\n count = net.proc_count()\n print(f\"[*] Start count: {count}\")\n self.assertTrue(count > 15)\n\n net.stop()\n count = net.proc_count()\n print(f\"[*] Stop count: {count}\")\n self.assertTrue(count == 0)", "title": "" }, { "docid": "fc70154564fb56b22fd6a75af8e2e377", "score": "0.61420697", "text": "def test_empty_on_start(self):\n service = self.service(FilePath(self.mktemp()))\n self.assertEqual(service.get(), Deployment(nodes=frozenset()))", "title": "" }, { "docid": "f262a5ec3d22cd27c4b6c12115ef95f7", "score": "0.6138308", "text": "def startService(self):\n super(ControllerQueue, self).startService()\n self._workCheckLoop()\n self._overdueCheckLoop()", "title": "" }, { "docid": "c223511a0f9906776692b34db1925cea", "score": "0.61370045", "text": "def run_service(cmd):\n subprocess.Popen(f\"{cmd} >/dev/null 2>&1 &\", shell=True)", "title": "" }, { "docid": "8b7fb56bf70305f38ffe6497642e92a3", "score": "0.6127151", "text": "def test_do_start_error(self, mock_teardown):\n class MyService(service.Service):\n def do_start(self):\n raise RuntimeError\n\n my_service = MyService()\n my_service.logger = mock.Mock()\n\n with self.assertRaises(RuntimeError):\n my_service.start()\n\n mock_teardown.assert_called_once_with()", "title": "" }, { "docid": "804ea12e3ed717e8eceac8ed2755e838", "score": "0.61255825", "text": "def start_services(extras=None):\n import dbus\n\n os.setuid(0)\n try:\n bus = dbus.SystemBus()\n except dbus.DBusException:\n UI.error(_(\"Cannot connect to DBus, services won't be started\"))\n return\n\n if extras:\n # Start only the services given in extras\n for service in extras:\n try:\n manage_service(service, \"start\")\n except dbus.DBusException:\n pass\n\n else:\n # Start network service first\n try:\n manage_service(\"NetworkManager\", \"ready\")\n except Exception, error:\n UI.warn(_(\"Unable to start network:\\n %s\") % error)\n\n # Almost everything depends on logger, so start manually\n manage_service(\"rsyslog\", \"start\")\n if not wait_bus(\"/dev/log\", stream=False, timeout=15):\n UI.warn(_(\"Cannot start system logger\"))\n\n # Mount remote filesystems if any\n mount_remote_filesystems()\n\n if not CONFIG.get(\"safe\"):\n UI.info(_(\"Starting services\"))\n services = get_service_list(bus)\n\n # Remove already started services\n services = set(services).difference([\"rsyslog\", \"NetworkManager\"])\n\n # Give login screen a headstart\n head_start = CONFIG.get(\"head_start\")\n run_head_start = head_start and head_start in services\n\n # Decide whether we'll stop plymouth or not\n stop_plymouth = \"off\" in get_kernel_option(\"xorg\") or \\\n not run_head_start\n if run_head_start:\n manage_service(head_start, \"ready\")\n services.remove(head_start)\n\n # Run other services\n for service in services:\n manage_service(service, \"ready\")\n\n if stop_plymouth:\n # Stop plymouth\n SPLASH.quit(retain_splash=False)\n\n # Close the handle\n bus.close()", "title": "" }, { "docid": "34281a54cab94832bd35051f6c00eb5f", "score": "0.6114792", "text": "def test_04_verify_get_service_instance(self):\n self.fc.flow_load_home_screen()\n self.home.select_menu()\n self.home.select_plugins_tab_from_menu()\n self.home.select_service_routing_plugin()\n self.service_plugin.enter_service_launch_data('{\"url\":\"https://bruce-williams.github.io\"}')\n self.service_plugin.select_get_service_instance_test_btn()\n svc_id = self.service_plugin.get_service_instance_svc_id()\n self.service_plugin.enter_get_service_instance_svc_id(\"none\")\n assert self.service_plugin.get_service_instance_result() == {'errorType': 'serviceInstanceNotFound', 'message': 'The service instance with id: was not found'}\n self.service_plugin.enter_get_service_instance_svc_id(svc_id)\n self.service_plugin.select_get_service_instance_test_btn()\n assert self.service_plugin.get_service_instance_result() != {'errorType': \"serviceNotFound\"}", "title": "" }, { "docid": "4b46a235cd465789747e3300db3048af", "score": "0.61147916", "text": "def start_time_service():\n return __salt__[\"service.start\"](\"w32time\")", "title": "" }, { "docid": "c7fcb9cdd661489312710c5cd520d282", "score": "0.60939896", "text": "def test_stop_services(self):\n my_service = SimpleService()\n my_service.start()\n\n mock_greenlet = mock.Mock()\n\n my_service.services = [mock_greenlet]\n\n my_service.stop()\n\n self.assertEqual(my_service.services, [])\n\n mock_greenlet.stop.assert_called_once_with()", "title": "" }, { "docid": "b7933836d38ae6765c0642359cd82bc3", "score": "0.6087366", "text": "def exec_service(service, command, failonerror=True):\n if os.path.exists(\"/run/systemd\"):\n exec_command(\"/usr/bin/systemctl %s %s\" % (command, service), not failonerror)\n else:\n exec_command(\"/sbin/service %s %s\" % (service, command), not failonerror)", "title": "" }, { "docid": "c24a55cb8e1dc983908bee8040e3b315", "score": "0.60686725", "text": "def start(self):\n\tif self.m_handle is not None:\n\t raise Error, 'Service already started.'\n\tself.m_stop = False\n\tself.m_handle = _cpg.initialize(self)\n\t_cpg.join(self.m_handle, self.m_name)", "title": "" }, { "docid": "2736bdbaeadaae95edea09e53ecc2994", "score": "0.6066672", "text": "def test_start_emit_error(self, mock_stop):\n my_service = make_service(logger=mock.Mock())\n\n @my_service.on('start')\n def on_start():\n raise RuntimeError()\n\n with self.assertRaises(RuntimeError):\n my_service.start()\n\n self.assertTrue(mock_stop.called)", "title": "" }, { "docid": "b5f71677881c5b6c19fed2cab3df491e", "score": "0.6056587", "text": "def start_service() -> None:\n\n loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()\n\n try:\n log_manager = LogRecordManager()\n record_handler = LogRecordHandler(log_manager=log_manager)\n\n app = web.Application()\n app.router.add_post('/read_log', handler=record_handler.get_list)\n\n web.run_app(\n app=app,\n host=CONFIGURATION['SERVICE_HOST'],\n port=CONFIGURATION['SERVICE_PORT'],\n )\n\n except KeyboardInterrupt:\n pass\n\n finally:\n loop.close()", "title": "" }, { "docid": "f4f9f38278f2d484565c2df8ae7b5710", "score": "0.6031784", "text": "def require_service(srv_name, srv_type, port=None, user_name=None, password=None):\n try:\n return create_service(srv_name, srv_type, port, user_name, password)\n except ServiceAlreadyExists:\n srv = get_service(srv_name)\n if not srv.is_running():\n srv.start()\n return srv", "title": "" }, { "docid": "0b64278ff94abb4900730ff50728dcd6", "score": "0.60243386", "text": "def start_services(\n self, services: List[str], timeout: float = 30.0, delay: float = 0.1,\n ) -> ChangeID:\n return self._services_action('start', services, timeout, delay)", "title": "" }, { "docid": "244d26b7e558767f06105c5dfc921b0a", "score": "0.60079616", "text": "def test_stopping_with_a_child_service(self):\n self.executed = [False, False]\n\n def run(index, time):\n self.executed[index] = True\n gevent.sleep(time)\n\n class MyService(service.Service):\n def do_start(self):\n self.spawn(run, 0, 0)\n\n class ChildService(service.Service):\n def do_start(self):\n self.spawn(run, 1, 10)\n\n my_service = MyService()\n child_service = ChildService()\n\n my_service.add_service(child_service)\n\n my_service.start()\n\n self.assertTrue(all(self.executed))\n self.assertTrue(my_service.started)\n self.assertTrue(child_service.started)\n\n my_service.stop()", "title": "" }, { "docid": "e4de5f0fb06e4264871eead93d199cea", "score": "0.600666", "text": "def test_start_job(self):\n pass", "title": "" }, { "docid": "74c692ecbe5e3cbdb1a525f8a1df6c87", "score": "0.60015714", "text": "def test_start_process(self):\n from supvisors.mainloop import SupvisorsMainLoop\n main_loop = SupvisorsMainLoop(self.supvisors)\n # test rpc error\n self.mocked_rpc.side_effect = Exception\n main_loop.start_process('10.0.0.1', 'dummy_process', 'extra args')\n self.assertEqual(2, self.mocked_rpc.call_count)\n self.assertEqual(call('10.0.0.1', main_loop.env), self.mocked_rpc.call_args)\n # test with a mocked rpc interface\n rpc_intf = DummyRpcInterface()\n self.mocked_rpc.side_effect = None\n self.mocked_rpc.return_value = rpc_intf\n with patch.object(rpc_intf.supvisors, 'start_args') as mocked_supvisors:\n main_loop.start_process('10.0.0.1', 'dummy_process', 'extra args')\n self.assertEqual(3, self.mocked_rpc.call_count)\n self.assertEqual(call('10.0.0.1', main_loop.env), self.mocked_rpc.call_args)\n self.assertEqual(1, mocked_supvisors.call_count)\n self.assertEqual(call('dummy_process', 'extra args', False), mocked_supvisors.call_args)", "title": "" }, { "docid": "cd49fcc523f752069e88f7927d1377b8", "score": "0.59989476", "text": "def test_start(self):\n def cb(container):\n self.assertEqual(container, {'Id': 'mocked'})\n self.builder.start(callback=cb)\n self.instance.start.assert_called_with(container='mocked')", "title": "" }, { "docid": "55067d3638e1dd28c0d2069a07fac37f", "score": "0.59964466", "text": "def started_services():\n for srv, status in rc_status('--servicelist'):\n if status == 'started':\n yield srv", "title": "" }, { "docid": "88686fe6e0555b00345916dc420e2ffc", "score": "0.5985676", "text": "def start_service(self, service_name, service_type, folder=''):\n\n self._post(self._get_path(\"start_service\", service_path=self._get_service_path(service_name, folder),\n service_type=service_type))", "title": "" }, { "docid": "fb49965034dd41501cef303eae5f46f6", "score": "0.59566146", "text": "def start(test, config):\n thread = DuringTestThread(test, config)\n thread.daemon = True\n thread.start()\n return thread.stop", "title": "" }, { "docid": "bee4842342cadde5aaa2f15a458c1641", "score": "0.59550124", "text": "def start(self):\n if self.service.getProcessDicts() == []:\n return\n\n def start2(process, nbr=None):\n\n cwd = process[\"cwd\"]\n # args['process'] = process\n if nbr is None:\n self.stop()\n\n tcmd = process[\"cmd\"]\n if tcmd == \"jspython\":\n tcmd = \"source %s/env.sh;jspython\" % (j.dirs.base)\n\n targs = process[\"args\"]\n tuser = process[\"user\"]\n if tuser == \"\":\n tuser = \"root\"\n tlog = self.service.hrd.getBool(\"process.log\", default=True)\n env = process[\"env\"]\n\n startupmethod = process[\"startupmanager\"]\n domain, name = self._getDomainName(process)\n if nbr is not None:\n name = \"%s.%d\" % (name, i)\n log(\"Starting %s:%s\" % (domain, name))\n\n if startupmethod == 'upstart':\n # check if we are in our docker image which uses myinit instead\n # of upstart\n if j.sal.fs.exists(path=\"/etc/my_init.d/\"):\n cmd2 = \"%s %s\" % (tcmd, targs)\n extracmds = \"\"\n if cmd2.find(\";\") != -1:\n parts = cmd2.split(\";\")\n extracmds = \"\\n\".join(parts[:-1])\n cmd2 = parts[-1]\n\n C = \"#!/bin/sh\\nset -e\\ncd %s\\nrm -f /var/log/%s.log\\n%s\\nexec %s >>/var/log/%s.log 2>&1\\n\" % (\n cwd, name, extracmds, cmd2, name)\n j.sal.fs.remove(\"/var/log/%s.log\" % name)\n j.sal.fs.createDir(\"/etc/service/%s\" % name)\n path2 = \"/etc/service/%s/run\" % name\n j.sal.fs.writeFile(path2, C)\n j.sal.fs.chmod(path2, 0o770)\n j.sal.process.execute(\n \"sv start %s\" % name, die=False, outputToStdout=False, outputStderr=False, captureout=False)\n else:\n j.sal.ubuntu.service_install(name, tcmd, pwd=cwd, env=env)\n j.sal.ubuntu.service_start(name)\n\n elif startupmethod == \"tmux\":\n j.tools.cuisine.local.tmux.executeInScreen(\n domain, name, tcmd + \" \" + targs, cwd=cwd, env=env, user=tuser) # , newscr=True)\n\n else:\n raise j.exceptions.RuntimeError(\n \"startup method not known or disabled:'%s'\" % startupmethod)\n\n # if msg==\"\":\n # pids=self.getPids(ifNoPidFail=False,wait=False)\n # if len(pids) != self.numprocesses:\n # msg=\"Could not start, did not find enough running instances, needed %s, found %s\"%(self.numprocesses,len(pids))\n\n # if msg==\"\" and pids!=[]:\n # for pid in pids:\n # test=j.sal.process.isPidAlive(pid)\n # if test==False:\n # msg=\"Could not start, pid:%s was not alive.\"%pid\n\n # if log!=\"\":\n # msg=\"%s\\nlog:\\n%s\\n\"%(msg,log)\n\n # self.raiseError(msg)\n # return\n\n isrunning = self.check_up(wait=False)\n if isrunning:\n return\n\n processes = self.service.getProcessDicts()\n for i, process in enumerate(processes):\n\n if \"platform\" in process:\n if not j.core.platformtype.myplatform.checkMatch(process[\"platform\"]):\n continue\n if len(processes) > 1:\n start2(process, nbr=i)\n else:\n start2(process)\n\n isrunning = self.check_up()\n if isrunning is False:\n msg = \"\"\n\n if self.service.getTCPPorts() == [0]:\n print('Done ...')\n elif self.service.getTCPPorts() != []:\n ports = \",\".join([str(item)\n for item in self.service.getTCPPorts()])\n msg = \"Could not start:%s, could not connect to ports %s.\" % (\n self.service, ports)\n j.events.opserror_critical(msg, \"service.start.failed.ports\")\n else:\n j.events.opserror_critical(\n \"could not start:%s\" % self.service, \"service.start.failed.other\")", "title": "" }, { "docid": "877be0ce72ae362b726fa65f3d7cfeab", "score": "0.5945746", "text": "def start(self):\n if self.main_service_name is None:\n raise ValueError('Main service not set! Please call \"set_main_service\"')\n\n self.services[self.main_service_name].main(\n self._get_to_send(self.main_service_name),\n self.configs[self.main_service_name]\n )", "title": "" }, { "docid": "f92c665cc9ae02c0fdfbf4ce0ed2cea5", "score": "0.59302753", "text": "def test_join(self, mock_start, mock_stop):\n my_service = make_service()\n thread = my_service._run_thread = mock.Mock()\n\n my_service.join()\n\n mock_start.assert_called_once_with()\n thread.get.assert_called_once_with()\n mock_stop.assert_called_once_with()", "title": "" }, { "docid": "7c2f04cd5149a360725a14c4f4059025", "score": "0.59240025", "text": "def test_control_service_wait(self):\n # Disable W0212(protected-access)\n # pylint: disable=W0212\n\n # shutdown supervised service\n res = supervisor.control_service(\n self.root, supervisor.ServiceControlAction.down,\n wait=supervisor.ServiceWaitAction.down,\n timeout=100,\n )\n\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svc'), '-d', self.root]\n )\n treadmill.supervisor.wait_service.assert_called_with(\n self.root,\n treadmill.supervisor.ServiceWaitAction.down,\n timeout=100\n )\n self.assertTrue(res)\n\n # shutdown service timeouts\n treadmill.subproc.check_call.reset_mock()\n supervisor.wait_service.side_effect = \\\n subproc.CalledProcessError(99, supervisor._get_cmd('svwait'))\n\n res = supervisor.control_service(\n self.root, supervisor.ServiceControlAction.down,\n wait=supervisor.ServiceWaitAction.down,\n timeout=100,\n )\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svc'), '-d', self.root]\n )\n treadmill.supervisor.wait_service.assert_called_with(\n self.root,\n treadmill.supervisor.ServiceWaitAction.down,\n timeout=100\n )\n self.assertFalse(res)\n\n # shutdown unsupervised service\n treadmill.subproc.check_call.reset_mock()\n treadmill.subproc.check_call.side_effect = \\\n subproc.CalledProcessError(100, supervisor._get_cmd('svc'))\n\n with self.assertRaises(subproc.CalledProcessError):\n supervisor.control_service(\n self.root, supervisor.ServiceControlAction.down,\n wait=supervisor.ServiceWaitAction.down,\n timeout=100,\n )", "title": "" }, { "docid": "f6b08e12dc970f1c8b03290518c60162", "score": "0.5915418", "text": "def service(self, service_name):\n pass", "title": "" }, { "docid": "25ba8c28f2234ea9ee458946a2636dd2", "score": "0.59107196", "text": "def service():\n pass", "title": "" }, { "docid": "1d79a42649180a089b2c48d11189046d", "score": "0.5890802", "text": "def main():\n\n mask = os.umask(0) # fix the umask\n\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(aliases=['name', 'service']),\n state=dict(default=None, choices=['started', 'restarted',\n 'stopped', 'reloaded']),\n enabled=dict(default=None, type='bool')\n ),\n required_one_of=[['state', 'enabled']],\n supports_check_mode=True)\n\n p = module.params\n\n # Set path for sv\n sv_path = module.get_bin_path('sv', True)\n if not os.path.exists(sv_path):\n module.fail_json(msg=\"Cannot find sv binary in path.\")\n\n # ensure enabled if required\n if p['state'] in ['started', 'restarted', 'reload']:\n if p['enabled'] is False:\n module.fail_json(msg=\"Conflicting request. Will not proceed.\")\n p['enabled'] = True\n\n if p['enabled'] is False:\n if not p['state'] is None and p['state'] != \"stopped\":\n module.fail_json(msg=\"Conflicting request. Will not proceed.\")\n p['state'] = \"stopped\"\n\n service = Service(module, sv_path)\n module.debug(\"Service initialized.\")\n service.do_action()\n\n result = {}\n result[\"name\"] = service.name\n result[\"changed\"] = service.changed\n result[\"enable\"] = service.get_enabled()\n result[\"state\"] = \"down\" if not result[\"enable\"] else service.get_status()\n\n module.exit_json(**result)\n os.umask(mask) # Reset the umask to original value", "title": "" }, { "docid": "429905f4305afaba0d5c489be7ba54aa", "score": "0.588829", "text": "def test_restart():\n mock_true = MagicMock(return_value=True)\n with patch.object(win_service, \"create_win_salt_restart_task\", mock_true):\n with patch.object(win_service, \"execute_salt_restart_task\", mock_true):\n assert win_service.restart(\"salt-minion\") is True\n\n with patch.object(win_service, \"stop\", mock_true):\n with patch.object(win_service, \"start\", mock_true):\n assert win_service.restart(\"salt\") is True", "title": "" }, { "docid": "39baff7701d5af4b37bf6713a6ba9aca", "score": "0.58577853", "text": "def test_service_job_running(self):\n\n def _ensure_node_services(unused_pipeline_state, node_id):\n self.assertEqual('my_example_gen', node_id)\n return service_jobs.ServiceStatus.RUNNING\n\n self._mock_service_job_manager.ensure_node_services.side_effect = (\n _ensure_node_services)\n tasks = self._generate_and_test(\n True,\n num_initial_executions=0,\n num_tasks_generated=0,\n num_new_executions=0,\n num_active_executions=0)\n self.assertEmpty(tasks)", "title": "" }, { "docid": "ea9cb2a6863ec1274b53d5d565f3727c", "score": "0.58473897", "text": "def start_service_in(sparkl_path, module_name, watchdog_path,\n **kwargs):\n\n # Filter kwargs for `sparkl service`\n service_kwargs = dict()\n for key, value in kwargs.items():\n if key in ['path', 'alias']:\n service_kwargs[key] = value\n\n # Start the SPARKL service in the specified directory.\n with ChDir(watchdog_path):\n sparkl('service', sparkl_path, module_name,\n **service_kwargs)\n\n # Give the watchdog time to start.\n sleep(kwargs.get('wait', 3))", "title": "" }, { "docid": "603cffdaa394c88ba8a4d52c70c935af", "score": "0.5827291", "text": "def test_start_from_args_no_args():\n log = conlog.start()\n log.info('Testing')", "title": "" }, { "docid": "7c64fe3d114f361a14d465e85bb79411", "score": "0.5791843", "text": "def test_wait_service(self):\n # Disable W0212(protected-access)\n # pylint: disable=W0212\n supervisor.wait_service(\n self.root, supervisor.ServiceWaitAction.down\n )\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svwait'), '-d', self.root]\n )\n\n treadmill.subproc.check_call.reset_mock()\n supervisor.wait_service(\n (\n os.path.join(self.root, 'a'),\n os.path.join(self.root, 'b')\n ),\n supervisor.ServiceWaitAction.up,\n all_services=False,\n timeout=100,\n )\n\n treadmill.subproc.check_call.assert_called_with(\n [\n supervisor._get_cmd('svwait'), '-t100', '-o', '-u',\n os.path.join(self.root, 'a'),\n os.path.join(self.root, 'b')\n ]\n )\n\n treadmill.subproc.check_call.reset_mock()\n treadmill.subproc.check_call.side_effect = \\\n subproc.CalledProcessError(99, supervisor._get_cmd('svwait'))\n with self.assertRaises(subproc.CalledProcessError):\n supervisor.wait_service(\n self.root, supervisor.ServiceWaitAction.really_down\n )\n treadmill.subproc.check_call.assert_called_with(\n [supervisor._get_cmd('svwait'), '-D', self.root]\n )", "title": "" }, { "docid": "61c665dab10458d42e7841f732f935f5", "score": "0.578035", "text": "def start():\n sudo('service uwsgi start %s' % PROJECT_NAME)", "title": "" }, { "docid": "66276f44066684334499e982b7518052", "score": "0.57765007", "text": "def test_spawn_error(self):\n my_service = make_service()\n\n self.executed = False\n\n def trap_error(exc_type, exc_value, exc_traceback):\n self.assertIsInstance(exc_value, RuntimeError)\n self.executed = True\n\n def raise_error():\n raise RuntimeError\n\n my_service.on('error', trap_error)\n\n my_service.spawn(raise_error)\n\n gevent.sleep(0.0)\n\n self.assertTrue(self.executed)", "title": "" }, { "docid": "d3b546a06bd32d0fc5015e9f5a54fddf", "score": "0.57750255", "text": "def test_start_problem(self):\n self.assertRaises (AppStartError, Application().start, 'Hiya')", "title": "" }, { "docid": "545bd8d1aa7f16ce5e84e2cd2bcfea02", "score": "0.5771033", "text": "def start_service(cwd, service_module, run_daemon, run_ssl):\n \n def add_extra_configs(conf):\n \"\"\"Add Extra keys to snetd.config.json\"\"\"\n with open(conf, \"r\") as f:\n snetd_configs = json.load(f)\n if run_ssl:\n snetd_configs[\"ssl_cert\"] = \"/opt/singnet/.certs/fullchain.pem\"\n snetd_configs[\"ssl_key\"] = \"/opt/singnet/.certs/privkey.pem\"\n snetd_configs[\"payment_channel_ca_path\"] = \"/opt/singnet/.certs/ca.pem\"\n snetd_configs[\"payment_channel_cert_path\"] = \"/opt/singnet/.certs/client.pem\"\n snetd_configs[\"payment_channel_key_path\"] = \"/opt/singnet/.certs/client-key.pem\"\n _network = \"mainnet\"\n if \"ropsten\" in conf:\n _network = \"ropsten\"\n infura_key = os.environ.get(\"INFURA_API_KEY\", \"\")\n if infura_key:\n snetd_configs[\"ethereum_json_rpc_endpoint\"] = \"https://{}.infura.io/{}\".format(_network, infura_key)\n pk_metering = os.environ.get(\"PVT_KEY_FOR_METERING\", \"\")\n if pk_metering:\n snetd_configs[\"metering_enabled\"] = True\n snetd_configs[\"metering_end_point\"] = \"https://marketplace-mt-v2.singularitynet.io\"\n snetd_configs[\"pvt_key_for_metering\"] = pk_metering\n snetd_configs[\"max_message_size_in_mb\"] = os.environ.get(\"MAX_MESSAGE_SIZE\", 128)\n with open(conf, \"w\") as f:\n json.dump(snetd_configs, f, sort_keys=True, indent=4)\n \n all_p = []\n if run_daemon:\n for idx, config_file in enumerate(glob.glob(\"./snetd_configs/*.json\")):\n add_extra_configs(config_file)\n all_p.append(start_snetd(str(cwd), config_file))\n service_name = service_module.split(\".\")[-1]\n grpc_port = registry[service_name][\"grpc\"]\n p = subprocess.Popen([sys.executable, \"-m\", service_module, \"--grpc-port\", str(grpc_port)], cwd=str(cwd))\n all_p.append(p)\n return all_p", "title": "" }, { "docid": "cfaa451d1e513d39e94780938ff0a3f0", "score": "0.57650423", "text": "def test_start_process(mock_agent):\n mock_agent.register_process('test_process', tfunc, tfunc)\n res = mock_agent.start('test_process', params={'a': 1})\n print(res)\n assert res[0] == ocs.OK\n assert isinstance(res[1], str)\n assert res[2]['session_id'] == 0\n assert res[2]['op_name'] == 'test_process'\n assert res[2]['op_code'] == OpCode.STARTING.value\n assert res[2]['status'] == 'starting'\n assert res[2]['success'] is None\n assert res[2]['end_time'] is None\n assert res[2]['data'] == {}", "title": "" }, { "docid": "c0241d792029746be8f827af612f3972", "score": "0.5763295", "text": "def test_subclass(self):\n class SomeService(base.Service):\n def run(self):\n logging.info('some service running')", "title": "" }, { "docid": "589f99679f1dc01e13a2fe8a70f82834", "score": "0.5754939", "text": "def test_create_measur_service(self):\n pass", "title": "" } ]
3d4824220e2b76fe41c979db0d89d648
Calculate valuation signals such as P/E and P/Sales ratios.
[ { "docid": "8f6ca49ea6fe4c1d2298b36e8c742fc1", "score": "0.0", "text": "def val_signals(self, variant='daily', func=None,\n shares_index=SHARES_DILUTED):\n\n # Load the required datasets.\n # This is only really necessary if the cache-file needs refreshing,\n # but it is easier to program like this and the overhead is small.\n df_prices = self.load_shareprices(variant=variant)\n df_income_ttm = self.load_income(variant='ttm')\n df_balance_ttm = self.load_balance(variant='ttm')\n df_cashflow_ttm = self.load_cashflow(variant='ttm')\n\n # List of datasets used to determine if disk-cache must be refreshed.\n datasets = [('shareprices', variant),\n ('income', 'ttm'),\n ('balance', 'ttm'),\n ('cashflow', 'ttm')]\n\n # List of arguments used to uniquely identify the cache-file.\n cache_ids = [variant, _func_name(func=func), shares_index]\n\n # Create dict with disk-cache arguments.\n cache_args = self._cache_args(datasets=datasets,\n cache_ids=cache_ids)\n\n # Check whether special datasets for banks and insurances should be used.\n banks = False\n insurance = False\n if self._dataset_extension == \"-banks\":\n banks = True\n elif self._dataset_extension == \"-insurance\":\n insurance = True\n\n # Calculate the signals, or load the DataFrame from the disk-cache.\n df_result = val_signals(df_prices=df_prices,\n df_income_ttm=df_income_ttm,\n df_balance_ttm=df_balance_ttm,\n df_cashflow_ttm=df_cashflow_ttm,\n shares_index=shares_index, func=func,\n banks=banks, insurance=insurance,\n **self._signal_args, **cache_args)\n\n return df_result", "title": "" } ]
[ { "docid": "35f81c10fe1b2d0507b135e909721a2d", "score": "0.6021242", "text": "def npv(rate,values):\n \n \n return float()", "title": "" }, { "docid": "51c6470dfd89042b82c41307358c6aea", "score": "0.5861876", "text": "def results( self ):\n A = AA = 0.0\n x = xx = 0.0\n s = ss = 0.0\n for egg in self.pastEggs:\n w = np.exp( egg.lweight - self.lZ )\n A += w * egg.A[0]\n AA += w * egg.A[0] * egg.A[0]\n x += w * egg.x[0]\n xx += w * egg.x[0] * egg.x[0]\n s += w * egg.s[0]\n ss += w * egg.s[0] * egg.s[0]\n\n print 'mean(A) = %12.6f, stdev(A) = %12.6f' % (A, np.sqrt( AA - (A * A) ))\n print 'mean(x) = %12.6f, stdev(x) = %12.6f' % (x, np.sqrt( xx - (x * x) ))\n print 'mean(s) = %12.6f, stdev(s) = %12.6f' % (s, np.sqrt( ss - (s * s) ))", "title": "" }, { "docid": "ef5e06ae0a3bd2d0c9857bd1db29d5ef", "score": "0.5782366", "text": "def fv(rate,nper,pmt,pv,when):\n \n \n return ndarray()", "title": "" }, { "docid": "579ca3d0d2d61896f3097e2bc4f997ac", "score": "0.5721197", "text": "def pv(rate,nper,pmt,fv,when):\n \n \n return ndarray()", "title": "" }, { "docid": "7fe1eba5c0db1a1ea0b6d2a94ee06815", "score": "0.5719713", "text": "def calcPowerEV(self):\n self.q_powerEV = helper_functions.calcExpectedValue(self.quality_power)\n self.d_powerEV = helper_functions.calcExpectedValue(self.duration_power)\n self.c_powerEV = helper_functions.calcExpectedValue(self.cost_power)", "title": "" }, { "docid": "7fe1eba5c0db1a1ea0b6d2a94ee06815", "score": "0.5719713", "text": "def calcPowerEV(self):\n self.q_powerEV = helper_functions.calcExpectedValue(self.quality_power)\n self.d_powerEV = helper_functions.calcExpectedValue(self.duration_power)\n self.c_powerEV = helper_functions.calcExpectedValue(self.cost_power)", "title": "" }, { "docid": "05857342e9478291c51bab4c93b4d5ba", "score": "0.56470066", "text": "def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")", "title": "" }, { "docid": "bca0f8d5af2de474e55feaab69b1a8b3", "score": "0.56332994", "text": "def schedule_variance(self):\n \n ev = self.apc * self.budget\n pv = self.ppc * self.budget\n \n return ev - pv", "title": "" }, { "docid": "fce3581240e5a5a3042dae63804e9c2c", "score": "0.5606615", "text": "def valuation(self):\n self.integer.valuation() * (self.p ** self.shift)", "title": "" }, { "docid": "91b400ab519812f046835cc4c8798d2e", "score": "0.55950147", "text": "def valuation(self):\n if not self:\n return 0\n for i, e in enumerate(self.expansion):\n if e:\n return rational.Rational(1, p**i)", "title": "" }, { "docid": "26419a54e64cde2d143b1bef8013b475", "score": "0.55775577", "text": "def evaluate_result( self ):\n \n # Create the exact result\n x_exact = np.exp( np.sin(self.t) )\n # Print the RMS difference\n e_rms = np.sqrt( 1./self.N * ((self.x-x_exact)**2).sum() )\n print( e_rms )\n # Plot the result\n plt.plot( self.t, self.x, label=\"Euler\")\n plt.plot( self.t, x_exact, '--', label=\"Analytic Solution\")\n plt.legend(loc=3)\n plt.show()", "title": "" }, { "docid": "0ea9bace0a44d73df3adea35372a17af", "score": "0.5554894", "text": "def CalculateSensitivitiesForAllDataPoints(self, params):\n varsByCalc = self.GetExperimentCollection().GetVarsByCalc()\n self.calcVals, self.calcSensitivityVals =\\\n self.GetCalculationCollection().CalculateSensitivity(varsByCalc,\n params)\n return self.calcSensitivityVals", "title": "" }, { "docid": "634c80092e8842c970c8514e12302f79", "score": "0.5538802", "text": "def calcSignal(self) -> Tuple[SpectralQty, float]:\n pass", "title": "" }, { "docid": "b24be98c7eb1f222b9465254c6656b10", "score": "0.5521443", "text": "def compute(cls, real_data, synthetic_data):\n real_data = pd.Series(real_data).dropna()\n synthetic_data = pd.Series(synthetic_data).dropna()\n\n if len(synthetic_data) == 0:\n return 0\n\n f_obs, f_exp = get_frequencies(real_data, synthetic_data)\n total_variation = 0\n for i in range(len(f_obs)):\n total_variation += abs(f_obs[i] - f_exp[i])\n\n return 1 - 0.5 * total_variation", "title": "" }, { "docid": "fd9f319765bed022ccb443a27718a9fa", "score": "0.55095655", "text": "def val_eq(self):\n\n # sum of present values of all cash flows\n sum_pv = self.pv_cf()\n\n # Probability of failure\n fail_prob = self.inp_params['failure_prob']\n # Proceeds if firm fails\n proceeds = self.inp_params['proceeds']*self.inp_params['val_of_proceeds']\n\n # Value of operating assets\n val_oa = sum_pv*(1. - fail_prob)\n\n # Value of equity\n val_eq = val_oa - self.inp_params['bk_val_debt'] + self.inp_params['cash_eq']\n\n # Value of options\n #val_op = options_value()\n val_op = 643.8\n\n # common equity value or intrinsic value\n val_eq_common = val_eq - val_op\n\n # common equity value per share\n val_per_share = val_eq_common/self.inp_params['outstanding_shares']\n\n # Price as a % of value\n p_to_val = self.inp_params['curr_stock_price']/val_per_share\n\n return val_per_share,p_to_val", "title": "" }, { "docid": "78bf3697bcf87c5c94a4b5379948b734", "score": "0.55048", "text": "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Average = 0\n if self.mdp.isTerminal(state):\n return 0\n nextStateAndPorb = self.mdp.getTransitionStatesAndProbs(state, action)\n for pair in nextStateAndPorb:\n Q_Average = Q_Average + pair[1] * (self.mdp.getReward(state, action, pair[0]) +\\\n self.discount * self.getValue(pair[0]))\n return Q_Average\n util.raiseNotDefined()", "title": "" }, { "docid": "0169b44ce63b4b146a55466bdb532664", "score": "0.5496748", "text": "def all_evaluate(self):\r\n #self.compute_euclidian_distance()\r\n # print(\"Euclidian distance : \", self.euclidian_distance)\r\n self.compute_MAE()\r\n print(\"MAE : \", self.MAE)\r\n self.compute_RMSE()\r\n print(\"RMSE : \", self.RMSE)\r\n #self.compute_SSIM()\r\n\r\n #self.compute_UQI()\r\n # print(\"UQI : \", self.UQI)\r\n #self.compute_VIF()\r\n #print(\"VIF : \", self.VIF)\r\n return self.MAE, self.RMSE", "title": "" }, { "docid": "a06b27bde45012008083bf67133d907c", "score": "0.54588586", "text": "def calc_values(self):\n atm_contract_index = (\n np.abs(self.chain[\"strike\"] - self.underlying_price)\n ).idxmin()\n atm_impliedvol = self.chain.iloc[atm_contract_index][\"impvol\"]\n\n # Calculate option value for all options using ATM volatility\n self.chain[\"model_value\"] = self.chain.apply(\n lambda x: bs_price(\n x[\"right\"],\n x[\"underprice\"],\n x[\"strike\"],\n self.dte / 252,\n atm_impliedvol,\n self.risk_free_rate,\n ),\n axis=1,\n )\n self.chain[\"mid_price\"] = (self.chain[\"bid\"] + self.chain[\"ask\"]) / 2\n self.chain[\"skew_premium\"] = self.chain[\"mid_price\"] - self.chain[\"model_value\"]", "title": "" }, { "docid": "114c5912cf93838002a78a87daa0d4ca", "score": "0.5439486", "text": "def vat_rate():", "title": "" }, { "docid": "47e222c63e534fa6cf01c3726ed9664e", "score": "0.54364234", "text": "def value(self) -> float:\n return self.days * float(self.rate)", "title": "" }, { "docid": "23e08cd32e8d42a3759c37b082d0aece", "score": "0.5421589", "text": "def pmt(rate,nper,pv,fv,when):\n \n \n return ndarray()", "title": "" }, { "docid": "f52bb576eb8a97c5b98a25fa475c6991", "score": "0.54204094", "text": "def evaluate(self, ts):\n print(ts)\n phases = PI2 * self.freq * ts + self.offset\n print(phases)\n ys = self.amp * np.exp(1j * phases)\n return ys", "title": "" }, { "docid": "b2bbf8d19fb08451ca7f539dfe0ba308", "score": "0.54171187", "text": "def pulse_ratio(pickle_file, neuron_type, neuron_index, var):\n\n net = unpickle_net(pickle_file)\n\n spike_times = extract_presynaptic_spikes(pickle_file, neuron_type, neuron_index, net)\n # print(spike_times)\n if var == \"V\":\n mon = [x[neuron_index]/volt for x in net['{}_V_mon'.format(neuron_type)]['V']]\n if var == \"Ge\":\n mon = [x[neuron_index] for x in net['{}_Ge_total_mon'.format(neuron_type)]['Ge_total']]\n if var == \"Gi\":\n mon = [x[neuron_index] for x in net['{}_Gi_total_mon'.format(neuron_type)]['Gi_total']]\n\n responses = extract_responses(spike_times, mon)\n PPR = [float(x)/float(responses[0]) for x in responses]\n print(PPR)\n return responses", "title": "" }, { "docid": "70fffbb5d0eb840bc63ce7493f050394", "score": "0.5381214", "text": "def Pfe(self, num_si_events, p1=None):\r\n if p1 is None: p1 = self.Pr()\r\n ne = num_si_events\r\n return sum(-(-n/n)**n * p1**n * scipy.special.comb(ne,n) for n in range(1,ne+1))", "title": "" }, { "docid": "c9595658251fffa46a956e0564edb0d3", "score": "0.53695977", "text": "def compute_estimates(self):\n rate_estimate_list = []\n for tau in range(len(self.new_infection_data) - 11):\n estimate = 0\n for j in range(7):\n estimate += self.new_infection_data[tau + j + 4] / self.new_infection_data[tau + j]\n rate_estimate_list.append(estimate)\n rate_estimates = np.array(rate_estimate_list)\n self.infection_rate_estimate = rate_estimates", "title": "" }, { "docid": "498a5c77e9d1bc8f1672a1d9a6a95cef", "score": "0.5363381", "text": "def val_signals(df_prices, df_income_ttm, df_cashflow_ttm,\n shares_index=SHARES_DILUTED):\n\n # Create a DataFrame with the financial data we need.\n #calculate sales per share and earning per share and free cash flow per share\n df = df_income_ttm[[REVENUE, NET_INCOME_COMMON]].copy()\n df[FCF] = df_cashflow_ttm[NET_CASH_OPS] + df_cashflow_ttm[CAPEX]\n df_per_share = df.div(df_income_ttm[shares_index], axis=0)\n \n # Reindex the per-share financial data to daily data-points.\n df_daily = sf.reindex(df_src=df_per_share,\n df_target=df_prices,\n method='ffill')\n \n # Create new DataFrame for the signals. Setting the index improves performance.\n df_signals = pd.DataFrame(index=df_prices.index)\n \n # Use the closing share-price for all these signals.\n df_price = df_prices[CLOSE]\n \n # P/Sales ratio.\n df_signals[PSALES] = df_price / df_daily[REVENUE]\n \n # P/E ratio.\n df_signals[PE] = df_price / df_daily[NET_INCOME_COMMON]\n\n # P/FCF ratio.\n df_signals[PFCF] = df_price / df_daily[FCF]\n\n return df_signals", "title": "" }, { "docid": "e6682c8fad841bf49d023538bc2ba8bc", "score": "0.5337668", "text": "def calc_q_values(self, state):\n pass", "title": "" }, { "docid": "2cc336fab9470fdea1e22392ad9abf8d", "score": "0.5329119", "text": "def evaluate(self, params: np.ndarray) -> float:\n kl = 0\n for sample in self.data:\n kl += np.log(self.vgbs.prob_sample(params, sample))\n return -kl / self.nr_samples", "title": "" }, { "docid": "fc3a60f7a4004fcbc61dab39f721f084", "score": "0.53238505", "text": "def iterate(self,plot=True):\n # Create empty arrays to store values\n F = np.zeros(len(self.y))\n a = np.zeros(len(self.y))\n v = np.zeros(len(self.y))\n P = np.zeros(len(self.y))\n # Initialize at the initial values parsed to the class\n P[0] = self.pardict['P1']\n a[0] = self.y[0]\n sigma_eps2 = self.pardict['sigma_eps2']\n sigma_eta2 = self.pardict['sigma_eta2']\n # Iterate \n for t in range(0,len(self.y)-1):\n F[t] = P[t]+sigma_eps2\n # K is defined as ratio of P and F\n Kt = P[t]/F[t] if np.isfinite(self.y[t]) else 0\n v[t] = self.y[t]-a[t]\n a[t+1] = a[t] + np.nan_to_num(Kt*v[t])\n F[t] = P[t]+sigma_eps2\n P[t+1] = P[t]*(1-Kt)+sigma_eta2\n F[-1] = P[-1]+sigma_eps2\n v[-1] = self.y[-1]-a[-1]\n # Obtain std error of prediction form variance\n std = np.sqrt((P*sigma_eps2)/(P+sigma_eps2))\n \n if plot:\n fig_name = self.var_name + 'Fig26.pdf'\n plot_fig2_6(self.times, self.y, std, P, a, F,fig_name, self.var_name)\n return a, std, P, v, F", "title": "" }, { "docid": "1b8e6c668b0d352fbae386fa33e8f632", "score": "0.53229123", "text": "def _ev(self) -> float:\n return explained_variance_score(\n y_true=self.y_true,\n y_pred=self.y_pred,\n multioutput=self.multioutput,\n )", "title": "" }, { "docid": "ba918ad0b2a1c18ddf0786fa58fe288c", "score": "0.5317666", "text": "def comp_pr(is_stationary, signal, fs, prominence=True):\n\n # Prominence criteria\n freqs = np.arange(90, 11200, 100)\n limit = np.zeros((len(freqs)))\n for i in range(len(freqs)):\n if freqs[i] >= 89.1 and freqs[i] < 1000:\n limit[i] = 9 + 10 * np.log10(1000 / freqs[i])\n if freqs[i] >= 1000 and freqs[i] < 11200:\n limit[i] = 9\n\n if is_stationary == True:\n tones_freqs, pr, prom, t_pr = pr_main_calc(signal, fs)\n tones_freqs = tones_freqs.astype(int)\n\n if prominence == True:\n output = {\n \"name\": \"tone-to-noise ratio\",\n \"freqs\": tones_freqs[prom],\n \"values\": pr[prom],\n \"prominence\": True,\n \"global value\": t_pr,\n }\n\n else:\n output = {\n \"name\": \"tone-to-noise ratio\",\n \"freqs\": tones_freqs,\n \"values\": pr,\n \"prominence\": prom,\n \"global value\": t_pr,\n }\n\n elif is_stationary == False:\n # Signal cut in frames of 500 ms along the time axis\n n = 0.5 * fs\n nb_frame = math.floor(signal.size / n)\n time = np.linspace(0, len(signal) / fs, num=nb_frame)\n time = np.around(time, 1)\n\n # Initialization of the result arrays\n tones_freqs = np.zeros((nb_frame), dtype=list)\n pr = np.zeros((nb_frame), dtype=list)\n prom = np.zeros((nb_frame), dtype=list)\n t_pr = np.zeros((nb_frame))\n\n # Compute PR values along time\n for i_frame in range(nb_frame):\n segment = signal[int(i_frame * n) : int(i_frame * n + n)]\n (\n tones_freqs[i_frame],\n pr[i_frame],\n prom[i_frame],\n t_pr[i_frame],\n ) = pr_main_calc(segment, fs)\n\n # Store the results in a time vs frequency array\n freq_axis = np.logspace(np.log10(90), np.log10(11200), num=1000)\n results = np.zeros((len(freq_axis), nb_frame))\n promi = np.zeros((len(freq_axis), nb_frame), dtype=bool)\n\n if prominence == True:\n for t in range(nb_frame):\n for f in range(len(tones_freqs[t])):\n if prom[t][f] == True:\n ind = np.argmin(np.abs(freq_axis - tones_freqs[t][f]))\n results[ind, t] = pr[t][f]\n promi[ind, t] = True\n else:\n for t in range(nb_frame):\n for f in range(len(tones_freqs[t])):\n ind = np.argmin(np.abs(freq_axis - tones_freqs[t][f]))\n results[ind, t] = pr[t][f]\n promi[ind, t] = prom[t][f]\n\n output = {\n \"name\": \"prominence ratio\",\n \"time\": time,\n \"freqs\": freq_axis,\n \"values\": results,\n \"prominence\": promi,\n \"global value\": t_pr,\n }\n\n return output", "title": "" }, { "docid": "4d981270b716db45ec7dd988c4889158", "score": "0.53150016", "text": "def MSE(pred_signals, true_signals):\n\n dim, n = np.shape(true_signals)\n pred_mean = np.mean(pred_signals,axis=1).reshape(dim, 1)\n pred_std = np.std(pred_signals,axis=1).reshape(dim, 1)\n true_mean = np.mean(true_signals,axis=1).reshape(dim, 1)\n true_std = np.std(true_signals,axis=1).reshape(dim, 1)\n #scale data to have unit variance 1/n*y@y.T=1\n pred_signals = np.divide((pred_signals - pred_mean), pred_std)\n true_signals = np.divide((true_signals - true_mean), true_std)\n MSE = np.zeros(dim)\n MSE_matrix1 = np.zeros((dim, dim))\n MSE_matrix2 = np.zeros((dim, dim))\n\n #calculate MSE between all estimated signals with changed sign and true signals and store in array\n for i in range(0, dim):\n for j in range(0, dim):\n MSE_matrix1[i, j] = mean_squared_error(true_signals[j],pred_signals[i])\n #calculate MSE between all estimated and true signals and store in array\n for i in range(0, dim):\n for j in range(0, dim):\n MSE_matrix2[i, j] = mean_squared_error(true_signals[j],-pred_signals[i])\n \n #find minima of all possible MSE values\n array_min1 = MSE_matrix1.min(axis=1)\n array_min2 = MSE_matrix2.min(axis=1)\n\n #store minima in separate array\n for i in range(dim):\n if(array_min1[i]>array_min2[i]):\n MSE[i] = array_min2[i]\n else:\n MSE[i] = array_min1[i]\n\n return MSE", "title": "" }, { "docid": "7127eb6f05cd675f434d14a9ef15bd82", "score": "0.53097785", "text": "def evaluate(self):\r\n self.evals, self.evects = ln.eigh(self.A)", "title": "" }, { "docid": "4289c7e61101b80ae13d42841f9f1813", "score": "0.53085506", "text": "def pv(flows, r):\n dates=flows.index\n discounts=discount(dates, r)\n return discounts.multiply(flows, axis=0).sum()", "title": "" }, { "docid": "dcddd84e3f3d07df539687f555afaa24", "score": "0.52983016", "text": "def exp(self) -> Series:", "title": "" }, { "docid": "1a6f85d19d8b3dda854bcf07cb730af8", "score": "0.5295143", "text": "def calculate_signals(self, event):\n if event.type == EventType.BAR:\n self._set_correct_time_and_price(event)\n \n # Only trade if we have both observations\n if all(self.latest_prices > -1.0):\n # Create the observation matrix of the latest prices\n # of TLT and the intercept value (1.0) as well as the\n # scalar value of the latest price from IEI\n F = np.asarray([self.latest_prices[0], 1.0]).reshape((1, 2))\n y = self.latest_prices[1]\n \n # The prior value of the states \\theta_t is\n # distributed as a multivariate Gaussian with\n # mean a_t and variance-covariance R_t\n if self.R is not None:\n self.R = self.C + self.wt\n else:\n self.R = np.zeros((2, 2))\n\n # Calculate the Kalman Filter update\n # ----------------------------------\n # Calculate prediction of new observation\n # as well as forecast error of that prediction\n yhat = F.dot(self.theta)\n et = y - yhat\n\n # Q_t is the variance of the prediction of\n # observations and hence \\sqrt{Q_t} is the\n # standard deviation of the predictions\n Qt = F.dot(self.R).dot(F.T) + self.vt\n sqrt_Qt = np.sqrt(Qt)\n\n # The posterior value of the states \\theta_t is\n # distributed as a multivariate Gaussian with mean\n # m_t and variance-covariance C_t\n At = self.R.dot(F.T) / Qt\n self.theta = self.theta + At.flatten() * et\n self.C = self.R - At * F.dot(self.R)\n \n # Finally we generate the trading signals based on the values of etet and Qt−−√Qt. To do this we need to check what the \"invested\" status is - either \"long\", \"short\" or \"None\". \n # Notice how we need to adjust the cur_hedge_qty current hedge quantity when we go long or short as the slope θt0 is constantly adjusting in time:\n \n # Only trade if days is greater than a \"burn in\" period\n if self.days > 1:\n # If we're not in the market...\n if self.invested is None:\n if et < -sqrt_Qt:\n # Long Entry\n print(\"LONG: %s\" % event.time)\n self.cur_hedge_qty = int(floor(self.qty*self.theta[0]))\n self.events_queue.put(SignalEvent(self.tickers[1], \"BOT\", self.qty))\n self.events_queue.put(SignalEvent(self.tickers[0], \"SLD\", self.cur_hedge_qty))\n self.invested = \"long\"\n elif et > sqrt_Qt:\n # Short Entry\n print(\"SHORT: %s\" % event.time)\n self.cur_hedge_qty = int(floor(self.qty*self.theta[0]))\n self.events_queue.put(SignalEvent(self.tickers[1], \"SLD\", self.qty))\n self.events_queue.put(SignalEvent(self.tickers[0], \"BOT\", self.cur_hedge_qty))\n self.invested = \"short\"\n # If we are in the market...\n if self.invested is not None:\n if self.invested == \"long\" and et > -sqrt_Qt:\n print(\"CLOSING LONG: %s\" % event.time)\n self.events_queue.put(SignalEvent(self.tickers[1], \"SLD\", self.qty))\n self.events_queue.put(SignalEvent(self.tickers[0], \"BOT\", self.cur_hedge_qty))\n self.invested = None\n elif self.invested == \"short\" and et < sqrt_Qt:\n print(\"CLOSING SHORT: %s\" % event.time)\n self.events_queue.put(SignalEvent(self.tickers[1], \"BOT\", self.qty))\n self.events_queue.put(SignalEvent(self.tickers[0], \"SLD\", self.cur_hedge_qty))\n self.invested = None", "title": "" }, { "docid": "6946de9557bdc512640937b2d86db275", "score": "0.52889144", "text": "def calcularPrecio(self):", "title": "" }, { "docid": "6946de9557bdc512640937b2d86db275", "score": "0.52889144", "text": "def calcularPrecio(self):", "title": "" }, { "docid": "99ebcde69abad3e3eafe40a8f0a228eb", "score": "0.52832043", "text": "def compute(self):\n self.Fi = self.FI(self.X, self.eta, self.nu)\n self.pmf = self.Pv(self.L)", "title": "" }, { "docid": "52fbe8f19348def0fc1ce0d70e49844f", "score": "0.52728665", "text": "def _calculate_p_value(self, U):\n K = len(U)\n p_value = 2 * np.exp(\n (-6 * np.power(np.max(np.abs(U)), 2)) / (np.power(K, 2) + np.power(K, 3))\n )\n\n return p_value", "title": "" }, { "docid": "36243b482d740c465be81a12aeef1adb", "score": "0.5260407", "text": "def detAcceptanceRateValues(self):\n if(not(self.weight)):\n for nodej in self.g.nodes():\n WeightedSum = len(self.g.in_edges(nodej))\n for edge in self.g.in_edges(nodej):\n self.A[edge[0] + \"-\"+edge[1]] = (1.0/self.NormA[nodej])\n else:\n for nodej in self.g.nodes():\n for edge in self.g.in_edges(nodej):\n self.A[edge[0] + \"-\"+edge[1]] = (self.g.get_edge_data(edge[0],edge[1]) + 0.0)/self.NormA[nodej]", "title": "" }, { "docid": "dbf7aa94bc00a27f8909b581c8022685", "score": "0.5259184", "text": "def PoP(data, **kwargs):\n \n # Correspondance tables\n CORRES_B = {'D':1, 'M':21, 'Q':63, 'H':130, 'A':262} #business days\n CORRES_D = {'D':1, 'M':30, 'Q':91, 'H':182, 'A':365} #calendar days\n CORRES_M = {'M':1, 'Q':3, 'H':6, 'A':12}\n CORRES_Q = {'Q':1, 'H':2, 'A':4}\n CORRES_LABEL = {'D':'DoD', 'M':'MoM', 'Q':'QoQ', 'H':'HoH', 'A':'YoY'}\n\n if \"per\" in kwargs:\n period = kwargs[\"per\"].upper()\n else:\n period = 'A'\n \n if \"freq\" in kwargs:\n frequency = kwargs[\"freq\"].upper()\n else:\n frequency = 'D'\n\n #map correspondance tables\n if frequency == 'D':\n CORRES = CORRES_D\n elif frequency == 'B':\n CORRES = CORRES_B\n elif frequency == 'M':\n CORRES = CORRES_M\n elif frequency == 'Q':\n CORRES = CORRES_Q\n \n meth = 'ratio'\n if \"method\" in kwargs:\n if kwargs[\"method\"] == 'diff':\n meth = kwargs[\"method\"]\n \n #Converts series to DataFrame if necessary\n if isinstance(data, pd.Series):\n data = data.to_frame(data.name)\n \n # Aligns frequency and shift the second series by the period\n data = data.asfreq(frequency, method='pad')\n data_per = data.shift(CORRES[period])\n if meth == 'ratio':\n result = data/data_per-1\n elif meth == 'diff':\n result = data - data_per\n result.columns = [list(result)[0] + ' ' + CORRES_LABEL[period]]\n # Returns the period over period transformation\n return result", "title": "" }, { "docid": "fccbc7b444b581ab06c04ed20f0959de", "score": "0.5256896", "text": "def calculate(self):\n scorer_class_name = self.get_root().scorer_class.name\n\n if len(self.labels) == 0:\n raise ValueError(\n \"No data present across all \"\n \"sequencing libraries [{}]\".format(self.name)\n )\n\n for label in self.labels:\n self.merge_counts_unfiltered(label)\n self.filter_counts(label)\n\n if self.is_barcodevariant() or self.is_barcodeid():\n self.combine_barcode_maps()\n\n self.ensure_main_count_tables_exist_and_populated()\n self.timepoints_contain_variants()\n\n if \"Demo\" in scorer_class_name:\n raise ValueError(\n 'Invalid scoring method \"{}\" '\n \"[{}]\".format(scorer_class_name, self.name)\n )\n\n if \"Regression\" in scorer_class_name and len(self.timepoints) <= 2:\n raise ValueError(\n \"Regression-based scoring \" \"requires three or more time points.\"\n )\n\n scorer = self.get_root().scorer_class(\n store_manager=self, options=self.get_root().scorer_class_attrs\n )\n scorer.run()\n\n # TODO: Write outlier computation as a plugin?\n non_allowed_methods = (\"Counts Only\", \"Demo\")\n scoring_method = scorer_class_name\n\n if scoring_method not in non_allowed_methods and self.component_outliers:\n if self.is_barcodevariant() or self.is_barcodeid():\n self.calc_outliers(\"barcodes\")\n if self.is_coding():\n self.calc_outliers(\"variants\")", "title": "" }, { "docid": "3f2edfc1bbde94d815ef108a7855afcd", "score": "0.52566755", "text": "def _evalue_RR(self, est, lo=None, hi=None, no_effect_baseline=1):\n if est < 0:\n raise ValueError(\"Risk/Rate Ratio cannot be negative\")\n if no_effect_baseline < 0:\n raise ValueError(\"no_effect_baseline value is impossible\")\n if no_effect_baseline != 1:\n self.logger.info(\n 'You are calculating a \"non-null\" E-value, i.e., an E-value for the minimum amount of unmeasured '\n \"confounding needed to move the estimate and confidence interval to your specified no_effect_baseline value \"\n \"rather than to the null value.\"\n )\n if lo is not None and hi is not None:\n if lo > hi:\n raise ValueError(\"Lower confidence limit should be less than upper confidence limit\")\n if lo is not None and est < lo:\n raise ValueError(\"Point estimate should be inside confidence interval\")\n if hi is not None and est > hi:\n raise ValueError(\"Point estimate should be inside confidence interval\")\n\n e_est = self._threshold(est, no_effect_baseline=no_effect_baseline)\n e_lo = self._threshold(lo, no_effect_baseline=no_effect_baseline)\n e_hi = self._threshold(hi, no_effect_baseline=no_effect_baseline)\n\n # if CI crosses null, set its E-value to 1\n null_CI = False\n if est > no_effect_baseline and lo is not None:\n null_CI = lo < no_effect_baseline\n if est < no_effect_baseline and hi is not None:\n null_CI = hi > no_effect_baseline\n if null_CI:\n e_lo = np.float64(1)\n e_hi = np.float64(1)\n\n # only report E-value for CI limit closer to null\n if lo is not None or hi is not None:\n if est > no_effect_baseline:\n e_hi = None\n else:\n e_lo = None\n\n return {\n \"converted_estimate\": est,\n \"converted_lower_ci\": lo,\n \"converted_upper_ci\": hi,\n \"evalue_estimate\": e_est,\n \"evalue_lower_ci\": e_lo,\n \"evalue_upper_ci\": e_hi,\n }", "title": "" }, { "docid": "8e5a456bf36237fe05f66878a584a056", "score": "0.52539456", "text": "def set_rsi(self):\n day_range = self.day_ranges.get('rsi', [14])\n dict = {}\n mtm = self.momentum[[f'{t}_momentum_1d' for t in self.tickers]]\n up = np.maximum(mtm, 0)\n down = -np.minimum(mtm, 0)\n for d in day_range:\n count_ups = (up > 0).rolling(window=d).sum()\n count_downs = (down > 0).rolling(window=d).sum()\n up_avg = up.rolling(window=d).sum() / count_ups\n down_avg = down.rolling(window=d).sum() / count_downs\n dict[d] = up_avg / (up_avg + down_avg)\n self.rsi = self.get_dataframe_from_dict(dict, 'rsi')", "title": "" }, { "docid": "69713d3a408f8f71087b5f3ec4592ff3", "score": "0.5253733", "text": "def sipm_enf(x, spe, spe_sigma, opct, pap, dap):\n sap = spe_sigma # Assume the sigma of afterpulses is the same\n\n pe_signal = np.zeros(x.size)\n\n # Loop over the possible total number of cells fired\n for k in range(1, 250):\n pk = (1-opct) * pow(opct, k-1)\n\n # Consider probability of afterpulses\n papk = pow(1 - pap, k)\n p0ap = pk * papk\n pap1 = pk * (1-papk) * papk\n\n # Combine spread of pedestal and pe (and afterpulse) peaks\n pe_sigma = np.sqrt(k * spe_sigma ** 2)\n ap_sigma = np.sqrt(k * sap ** 2)\n\n # Evaluate probability at each value of x\n pe_signal += (\n p0ap * norm.pdf(x, k * spe, pe_sigma) +\n pap1 * norm.pdf(x, k * spe * (1 - dap), ap_sigma)\n )\n\n return pe_signal", "title": "" }, { "docid": "68d8900ee63fca39075decc55671304a", "score": "0.5247868", "text": "def evaluate( self, tick, value, lastTick, lastValue ):\n for resample in self.resamples:\n resample.evaluate( tick, value, lastTick, lastValue)", "title": "" }, { "docid": "f9adabd60833c423964af2657fe4d22f", "score": "0.523737", "text": "def calculation_and_save(self):\r\n # deallocate memory.\r\n gc.collect()\r\n\r\n prob_data = np.zeros((self.out_column, self.out_row), np.float64)\r\n ln_prob_data = np.zeros((self.out_column, self.out_row), np.float64)\r\n\r\n for j in range(int(np.log(self.out_column) / np.log(2)) + 1):\r\n # calculate the first probability matrix.\r\n if j == 0:\r\n (prob_data, ln_prob_data) = self._calculate_the_probability(self.data, (1, 1))\r\n # calculate other probability based on the previous matrix.\r\n elif j > 0:\r\n (prob_data, ln_prob_data) = self._calculate_from_old_prob_data(self.prob_data_list[j - 1][0],(2, 2))\r\n self.prob_data_list.append([prob_data, ln_prob_data])\r\n\r\n #### calculate multifractal parameters\r\n self.parameters, self.parameters_calculation, self.miu_q_list_list = self._calculate_the_spectrum()", "title": "" }, { "docid": "482a9a01bd7535f41ffc8145d128bac3", "score": "0.52369905", "text": "def calculate_signals(self, event):\n sym = self.symbol_list[0]\n dt = self.datetime_now\n\n if event.type == 'Market':\n self.bar_index += 1\n if self.bar_index >5:\n lags = self.bars.get_latest_bars_value(\n self.symbol_list[0], \"returns\", N=3\n )\n pred_series = pd.Series(\n {\n 'Lag1': lags[1]*100.0,\n 'Lag2': lags[2]*100.0\n }\n )\n pred = self.model.predict(pred_series)\n if pred > 0 and not self.long_market:\n self.long_market = True\n signal = ASignalEvent(1, sym, dt, 'LONG', 1.0)\n self.events.put(signal)\n\n if pred < 0 and self.long_market:\n self.long_market = False\n signal = SignalEvent(1, sym, dt, 'EXIT', 1.0)\n self.events.put(signal)", "title": "" }, { "docid": "f70ade2a39767a02c4cf6187be746b7c", "score": "0.5228082", "text": "def mirr(values,finance_rate,reinvest_rate):\n \n \n return float()", "title": "" }, { "docid": "6cef5da13462f5e26e95fcf593fa61ff", "score": "0.522527", "text": "def momentum_average_and_standard_deviation(self):\r\n F = np.fft.fft(self.x)\r\n prob = np.abs(F)**2\r\n if np.max(prob) != 0.0:\r\n prob = prob/np.sum(prob)\r\n freq = np.fft.fftfreq(self.N, d=self.dx)\r\n p = 2*np.pi*freq*self.hbar/self.L\r\n\r\n expval = np.sum(np.dot(p, prob))\r\n expval2 = np.sum(np.dot(p**2, prob))\r\n sigma = np.sqrt(expval2 - expval**2)\r\n return (expval, sigma)", "title": "" }, { "docid": "c22f2110345eb960e9d39f86f484017e", "score": "0.52177155", "text": "def calculate_ensquared_energy(PSF_model, wave, N_trials, N_rms,\n rms_amplitude, nominal_scale, spaxel_scales, rescale_coeff=None):\n\n N_zern = PSF_model.N_coef\n ensquared_results = []\n\n a_min = 1.0 if rescale_coeff is None else rescale_coeff\n\n print(\"Calculating Ensquared Energy\")\n for scale in spaxel_scales: # Loop over each Spaxel Scale [mas]\n print(\"%d mas spaxel scale\" % scale)\n\n data = np.zeros((2, N_trials * N_rms))\n amplitudes = np.linspace(0.0, rms_amplitude, N_rms)\n i = 0\n p0, s0 = PSF_model.compute_PSF(np.zeros(N_zern)) # Nominal PSF\n # Calculate the EE for the nominal PSF so that you can compare\n EE0 = ensquared_one_pix(p0, pix_scale=nominal_scale, new_scale=2*scale, plot=False)\n\n # 60x120\n # EE0 = ensquared_rectangle(p0, pix_scale=nominal_scale, new_scale=60, plot=False)\n\n for amp in amplitudes: # Loop over coefficient strength\n\n for k in range(N_trials): # For each case, repeat N_trials\n\n c_act = np.random.uniform(-amp, amp, size=N_zern)\n rescale = np.linspace(1, a_min, N_zern)\n c_act *= rescale\n phase_flat = np.dot(PSF_model.model_matrix_flat, c_act)\n rms = wave * 1e3 * np.std(phase_flat)\n p, s = PSF_model.compute_PSF(c_act)\n EE = ensquared_one_pix(p, pix_scale=nominal_scale, new_scale=2*scale, plot=False)\n # EE = ensquared_rectangle(p, pix_scale=nominal_scale, new_scale=60, plot=False)\n dEE = EE / EE0 * 100\n data[:, i] = [rms, dEE]\n i += 1\n\n ensquared_results.append(data)\n\n return ensquared_results", "title": "" }, { "docid": "0318516352678fc935c8e852419e69e8", "score": "0.5211374", "text": "def average(self):\n n,S = 0,0\n for yi in (self.y):\n n = n + 1\n S = S + yi\n return S/n", "title": "" }, { "docid": "9eabe5526ca1fe5076643b90534da2c7", "score": "0.5200672", "text": "def sens(self):\n print(f'Sensitivity is {self.tp / (self.tp + self.fn)}')", "title": "" }, { "docid": "7e979e07d334e57cf388208a6430a1b1", "score": "0.5196765", "text": "def ipmt(rate,per,nper,pv,fv,when):\n \n \n return ndarray()", "title": "" }, { "docid": "c03588f476c22ddb06ffa942462d8a72", "score": "0.5186421", "text": "def calc_stats(self):\n self.happiness += (self.time_delta * self.happiness_rate)\n if self.happiness > self.happiness_max:\n self.happiness = self.happiness_max\n elif self.happiness < 0:\n self.happiness = 0\n\n self.satiation += (self.time_delta * self.satiation_rate)\n if self.satiation > self.satiation_max:\n self.satiation = self.satiation_max\n elif self.satiation < 0:\n self.satiation = 0\n\n self.energy += (self.time_delta * self.energy_rate)\n if self.energy > self.energy_max:\n self.energy = self.energy_max\n elif self.energy < 0:\n self.energy = 0\n\n self.health += (self.time_delta * self.health_rate)\n if self.health > self.health_max:\n self.health = self.health_max\n elif self.health < 0:\n self.health = 0\n return", "title": "" }, { "docid": "a2ffb565258728f618fe9cdcd836df45", "score": "0.51852655", "text": "def Pv(self, l=None, e=None, n=None, Q=None):\n if l is None:\n l = self.L\n if e is None:\n e = self.eta\n if n is None:\n n = self.nu\n if Q is not None:\n #print(\"Before quantization:\", len(self.data))\n \"\"\"\n warnings.filterwarnings('error')\n try:\n print(\"On try\")\n div = self.data / Q\n print(\"div\")\n diq = (div).astype('int')\n print(\"diq\")\n mask = ((div - diq).astype('bool') * -1 + 1).astype('bool')\n print(\"mask\")\n self.data = self.data[mask]\n print(\"new_data\")\n except RuntimeWarning as w:\n print(w)\n warnings.filterwarnings('ignore')\n div = self.data / Q\n diq = (div).astype('int')\n mask = ((div - diq).astype('bool') * -1 + 1).astype('bool')\n self.data = self.data[mask]\n print( len( self.data[ np.isnan(self.data) == True ] ) )\n print(\"After quantization:\", len(self.data))\n\n warnings.filterwarnings('default')\n \"\"\"\n #print(\"Q:\", Q)\n #print(\"self.Q:\", self.Q)\n div = self.data / Q\n #print(div)\n diq = (div).astype('int')\n #print(diq)\n mask = ((div - diq).astype('bool') * -1 + 1).astype('bool')\n #print(mask)\n self.data = self.data[mask]\n self.lower_bound = int(np.floor( np.min(self.data) /2 ) * 2)\n self.upper_bound = int(np.floor( np.max(self.data) /2 ) * 2 +1)\n self.L = np.arange(self.lower_bound, self.upper_bound+1).astype('int') # an interval of integer values. Used to compute the pmf\n #print(self.data)\n \n return self._pv(l, e, n)", "title": "" }, { "docid": "a23ebc86256acecb6326674a6483616f", "score": "0.5184817", "text": "def detRejectanceRateValues(self):\n if(not(self.weight)):\n for nodej in self.g.nodes():\n for edge in self.g.out_edges(nodej):\n self.R[edge[0] + \"-\"+edge[1]] = (1.0/self.NormR[nodej])\n else:\n for nodej in self.g.nodes():\n for edge in self.g.out_edges(nodej):\n self.R[edge[0] + \"-\"+edge[1]] = (1.0 - self.g.get_edge_data(edge[0],edge[1]) )/self.NormR[nodej]", "title": "" }, { "docid": "b824d100fa94f2a751f79972e81db869", "score": "0.51749194", "text": "def calculate_stats(self):\n tps = []\n tns = []\n fps = []\n fns = []\n y_ = np.copy(self.y_)\n thresholds = np.linspace(0, 1.01, 1000)\n for thresh in thresholds:\n tps.append(self.true_positive(y_, thresh))\n tns.append(self.true_negative(y_, thresh))\n fps.append(self.false_positive(y_, thresh))\n fns.append(self.false_negative(y_, thresh))\n tps = np.array(tps)\n tns = np.array(tns)\n fps = np.array(fps)\n sensitivity = tps / (tps + fns)\n specificity = tns / (tns + fps)\n return sensitivity, specificity", "title": "" }, { "docid": "127adaf3507b9e6ca66c3d07190a3c72", "score": "0.5173262", "text": "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n transisions = self.mdp.getTransitionStatesAndProbs(state,action)\n\n QV=0\n\n for (new_state,prob) in transisions:\n QV+=prob*(self.mdp.getReward(state,action,new_state)+self.discount*self.values[new_state])\n return QV", "title": "" }, { "docid": "0bc6e27e447e85785ba57d3f80e0acdb", "score": "0.5170065", "text": "def value(self) -> float:\n return (\n self.__food_quantity + self.__food_quality +\n self.__service_quality + self.__price\n ) / self.__criterias", "title": "" }, { "docid": "829bd5fbec73b05ef70b77eef62e8ddf", "score": "0.5166319", "text": "def calculateOutput(self):\r\n result=0.0\r\n total=0.0\r\n \r\n # adds up all the inputs\r\n i=0\r\n while i < len(self.input_values):\r\n total += self.input_values[i]*self.weights[i]\r\n i+=1\r\n # includes the baseline value\r\n total += self.weights[-1]\r\n\r\n # determines how close to the edges of the range the inputs reached\r\n # one in the positve direction and one in the negative depending on the sign of the total\r\n # pulls the total back into the range of -1 to 1\r\n if total >= 0:\r\n # returns a positive\r\n # makes output closer to 0\r\n total = total / self.sum_of_weights_pos\r\n else:\r\n # returns a negative -> - (-t / -w)\r\n # makes output closer to 1\r\n total = -total / self.sum_of_weights_neg\r\n\r\n # the sigmoid function\r\n result = 1/(1+2.0**(total*self.sigmoid_multiplier))\r\n\r\n self.output=result", "title": "" }, { "docid": "17a1b1f9554027d1490a53ef44ccf719", "score": "0.51648015", "text": "def CalculateForAllDataPoints(self, params):\n self.params.update(params)\n\n varsByCalc = self.GetExperimentCollection().GetVarsByCalc()\n self.calcVals = self.GetCalculationCollection().Calculate(varsByCalc, \n params)\n return self.calcVals", "title": "" }, { "docid": "f662f5a2c5e70cedea34e6aa3eaaeee9", "score": "0.5162285", "text": "def estimate(self, values):\n\n pdf = zeros_like(values)\n\n Ns = len(self.samples) \n\n # Could perform convolution with FFT.\n for m in range(Ns):\n pdf += self.weights[m] * exp(-(values - self.samples[m])**2 / (2 * self.sigmak**2)) / (self.sigmak * sqrt(2 * pi)) / Ns\n\n pdf /= trapz(pdf, values)\n return pdf", "title": "" }, { "docid": "e50299435b6ca9f301f6e345ac1a46b9", "score": "0.5161663", "text": "def calculate_diversification_metrics(self):\n # create a dataframe\n cols = ['Undiv. VaR', 'Div. VaR', 'Div. Effect ($)', 'Div. Effect (%)', 'Div. Ratio']\n self.diversification = pd.DataFrame(None, [''], cols)\n\n # get the diversified VaR\n div_var = self.var.loc['VaR', 'Portfolio']\n self.diversification['Div. VaR'] = div_var\n\n # compute the undiversified VaR\n undiv_var = self.var.loc['VaR', :].drop('Portfolio').sum()\n self.diversification['Undiv. VaR'] = undiv_var\n \n # compute the diversification effects ($)\n self.diversification['Div. Effect ($)'] = undiv_var - div_var\n\n # compute the diversification effects (%)\n self.diversification['Div. Effect (%)'] = (undiv_var - div_var) / undiv_var\n\n # compute the diversification ratio\n self.diversification['Div. Ratio'] = div_var / undiv_var\n\n self.log(self.diversification, 'Diversification Measures', index=False)", "title": "" }, { "docid": "16d27c037b135eabc26c70a6457f000c", "score": "0.5159992", "text": "def eval(self):\n # remove np.spacing(1)\n pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label)\n IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union)\n mIoU = IoU.mean().item()\n return pixAcc, mIoU", "title": "" }, { "docid": "7cc0ab11dcacbf5787b7ce42b860bc77", "score": "0.5156758", "text": "def sigma(self):\n return self._count_rates['flux_ratio'] / self.snr", "title": "" }, { "docid": "5b03afda83a5bbc794c0e0a619965803", "score": "0.5153938", "text": "def volatility(self):\n pass", "title": "" }, { "docid": "79ba4a110ff0d29a797db36781c4ff48", "score": "0.5151857", "text": "def computeAndScaleArraySums(self):\n self.sumRT()\n self.scaleRT()\n self.sumA()\n self.scaleA()\n self.Fluence()", "title": "" }, { "docid": "0ce34b54002d260887e3c1125c6e6369", "score": "0.5151038", "text": "def ss_rate(pwf,pe,rw,re,k,h,mu,B):\n return (7.08*10**(-3))*(k*h/(mu*B))*(pe-pwf)/(np.log(re/rw))", "title": "" }, { "docid": "a3e8926fe1c2c7ce24eda0dfa89ff7ae", "score": "0.51452446", "text": "def singal_measure(self): \n k1_data = math.sqrt(self.__k1_data)\n k2_data = math.sqrt(self.__k2_data)\n b_data = (k2_data - k1_data)/(k2_data + k1_data) \n a_data = 2.95494514*pow(b_data,1) + 0.17671943*pow(b_data,3) + 0.09230694*pow(b_data,5)\n amp_data = (k1_data + k2_data)*(3.20976143+0.9187393*pow(a_data,2)+0.14734229*pow(a_data,4))/8192\n amp_data = amp_data/4096\n freq_data = (0.5+a_data+self.__k1_index)*(self.__sample_rate/8192)\n self.__base_frequency = freq_data\n return(amp_data,'V',freq_data,'Hz')", "title": "" }, { "docid": "e66cafcced081c7b2dc352092086f493", "score": "0.51419634", "text": "def readSensor(self):# -> float:\n self.__RS_Calc = ((self.__VOLT_RESOLUTION * self.__RL ) / self.__sensor_volt ) - self.__RL\n \n if(self.__RS_Calc < 0): \n self.__RS_Calc = 0\n\n self.__ratio = self.__RS_Calc / self.__R0\n \n if(self.__ratio <= 0): \n self.__ratio = 0\n \n if(self.__regressionMethod == 1): \n self.__PPM = self.__a * math.pow( self.__ratio , self.__b )\n \n else:\n ppm_log = (math.log10( self.__ratio ) - self.__b ) / self.__a\n self.__PPM = math.pow(10, ppm_log)\n \n if( self.__PPM < 0): \n self.__PPM = 0\n \n return self.__PPM", "title": "" }, { "docid": "d4e8d90e794506bc11f66c908d37fa61", "score": "0.5141406", "text": "def results(self):\n res = super(TrackedSIR, self).results()\n\n self.hook.finalize_results(initial_results=self.timeseries_results, final_results=res, params=self.params)\n\n # TODO: Fix this dependency issue\n di = res['daily_infections']\n ci = res['currently_infected']\n\n # Calculate the average length of an infection\n n = self._g.nodes\n mil = np.mean([n[x]['days_infected'] for x in n if 'days_infected' in n[x]])\n res['mean_infection_length'] = mil\n\n res['daily_r'] = [((di[x + 1] / ci[x]) if ci[x] != 0 else 0) * mil for x in range(len(ci) - 1)]\n\n return res", "title": "" }, { "docid": "5688967c99e623a99b4c2263c14301ba", "score": "0.5136812", "text": "def calculate(self):", "title": "" }, { "docid": "8f84dc65cdf62c3bbaafde61ab93fc6e", "score": "0.5136497", "text": "def Evaluate():\n # Retrieve dataset files\n data_fls, ref_fls = LoadTroikaDataset()\n errs, confs = [], []\n for data_fl, ref_fl in zip(data_fls, ref_fls):\n # Run the pulse rate algorithm on each trial in the dataset\n errors, confidence = RunPulseRateAlgorithm(data_fl, ref_fl)\n errs.append(errors)\n confs.append(confidence)\n # Compute aggregate error metric\n errs = np.hstack(errs)\n confs = np.hstack(confs)\n return AggregateErrorMetric(errs, confs)", "title": "" }, { "docid": "58c77f68eaf7669325ca3461523bc02c", "score": "0.5134388", "text": "def ratio_p(self):\n ga = self.ga\n Ms = self.Ms\n return (2*ga*Ms**2 - (ga-1))/(ga+1)", "title": "" }, { "docid": "6bcfa1e6e96cbd23d34740eab5dafa0c", "score": "0.51284164", "text": "def GlobalScale(self):\n\t\tprint \"Scale following the order of importance (peaks)\"\n\t\tP = self.GlobalPeaks()[:,0]\n\t\tself.Echelle = []\n\t\tfor i in range(0,len(self.GP)):\n\t\t\tnb_ext = len(self.melodies)\n\t\t\t#tonique = self.melodies[nb_ext-1].tonique()[1]\n\t\t\ttonique = self.GetTheTonic()\n\t\t\tself.Echelle.append(log10(P[i]/tonique)*1000)\n\t\treturn self.Echelle", "title": "" }, { "docid": "6c04ce0eb342ee13eb12197902fb0dd0", "score": "0.5128311", "text": "def calculate_flows(self):\n e = self.alpha_star**(-1) / (1 - self.alpha_star**(-self.n_e))\n s = self.alpha_star**(-1) / (self.alpha_star**(self.n_s+1) - 1)\n e_sum = (e*self.xf / (e+s)).sum()\n s_sum = (s*self.xf / (e+s)).sum()\n\n p = self.user_f * e_sum\n f = self.user_p / e_sum\n\n if p < self.user_p:\n self.p = p\n self.f = self.user_f\n else:\n self.f = f\n self.p = self.user_p\n self.t = self.f * s_sum\n\n self.swu = self.get_swu()\n if self.swu > self.user_swu:\n self.swu = self.user_swu\n\n self.f = self.swu / (self.value_function(self.xp)*e_sum\n + self.value_function(self.xt)*s_sum\n - self.value_function(self.xf))\n self.p = self.f * e_sum\n self.t = self.f * s_sum\n\n return", "title": "" }, { "docid": "34281037e58f3024e2253c1e8909b337", "score": "0.5126956", "text": "def run(self):\n self.report.chapter('Historical Simulation Value-at-Risk')\n self.calculate_shifted_levels()\n self.get_full_revaluation_pnl()\n self.get_ptf_pnl()\n self.bootstrap_pnls()\n self.calculate_var()\n self.calculate_sub_ptf_pnl()\n self.calculate_sub_ptf_var()\n self.calculate_component_var()\n self.calculate_incremental_var()\n self.calculate_diversification_metrics()\n self.plot_hist()\n self.waterfall_plots()", "title": "" }, { "docid": "7fe6757d77a3b9d36fab5477f05b06e9", "score": "0.51256603", "text": "def f(ts):\n # This method is nicer than an inverse fourier transform because\n # results are consistant for differing ranges of ts around the same\n # time. The price payed is that the fft would be an order of\n # magnitude faster, but not reproducible for a slightly different\n # time array.\n values = sum(amp * np.cos(2*np.pi*freq * ts + phase)\n for freq, amp, phase\n in zip(self.freqs, self.amps, self.phases))\n\n # Normalization calculated by guess-and-check,\n # but seems to work fine\n # normalization = np.sqrt(2/len(self.freqs))\n values *= np.sqrt(2/len(self.freqs))\n\n # So far, the units of the values are V/V_rms, so multiply by the\n # rms voltage:\n values *= self.rms\n\n return values", "title": "" }, { "docid": "2d3ab37474c5bdad03e54e309012aa38", "score": "0.5123594", "text": "def production():\r\n # kp is the interval considered\r\n kp = np.linspace(4*10**(-3),16*10**(-3),1000)\r\n \r\n # Aggregate fraction calculated analytically\r\n a_n = 3*k1_n*kp/(kc_n*(kc_n+k1_n))\r\n # Changing units to uM\r\n a_n = a_n*1000\r\n kp = kp * 1000\r\n \r\n # Plotting figure\r\n fig, ax = plt.subplots()\r\n fig.subplots_adjust(left=.15, bottom=.2, right=.95, top=.97) \r\n ax.set_ylabel(r'Aggregated $A\\beta_{42}$ in ISF (pM)')\r\n ax.set_xlabel('Production rate ($pM.h^{-1}$)') \r\n \r\n plt.plot(kp, a_n, 'black', linewidth = 1.0)\r\n ax.axvline(x=kp_n*1000, color = 'g', linestyle='dashed', alpha=0.5)\r\n ax.text(x=kp_n*1000 + 0.2, y=150, s='Normal', alpha=1, color='g', fontsize = 6)\r\n \r\n ax.axvline(x=kp_FAD*1000, color = 'r', linestyle='dashed', alpha=0.5)\r\n ax.text(x=kp_FAD*1000 + 0.2, y=150, s='FAD', alpha=1, color='r', fontsize = 6)\r\n\r\n # Save and display image \r\n fig.set_size_inches(width, height)\r\n fig.savefig('Variations in production rate.png', dpi=1200)\r\n plt.show()\r\n return", "title": "" }, { "docid": "9d0e5a63f7dbf72458d4d8cb9ae05d23", "score": "0.5117035", "text": "def pv(flows, r):\n dates = flows.index\n discounts = discount(dates, r)\n return discounts.multiply(flows, axis=\"rows\").sum()", "title": "" }, { "docid": "945e198e9811ac8aa693449d804faeb4", "score": "0.51122", "text": "def calculate_var(self):\n # create a dataframe for the MC VaR\n self.var = pd.DataFrame(columns=self.position_names, index=['VaR', 'Correlation'])\n\n # iterate over the positions\n ptf_pnl = self.ptf_pnl['P&L ($)']\n for asset in self.position_names:\n asset_pnl = self.full_revaluation_pnl[asset]\n \n # calculate the individual VaR\n self.var.loc['VaR', asset] = np.percentile(asset_pnl, (1 - self.confidence) * 100)\n\n # compute the correlation between the asset P&L and the portfolio P&L\n self.var.loc['Correlation', asset] = pearsonr(asset_pnl, ptf_pnl)[0]\n\n # compute the MC n-day portfolio VaR\n self.var.loc['VaR', 'Portfolio'] = np.percentile(ptf_pnl, (1 - self.confidence) * 100)\n \n self.log(self.var, 'Value at Risk')", "title": "" }, { "docid": "72310e6faec92e55844761cef1671695", "score": "0.5109994", "text": "def pv(flows, r):\n dates = flows.index\n discounts = discount(dates, r)\n return discounts.multiply(flows, axis='rows').sum()", "title": "" }, { "docid": "f5b41a06461174665a0d90c84cfc9563", "score": "0.5109505", "text": "def rsi(df, s, period) -> ReturnType:\n\n delta = df['close'].diff().to_numpy()\n\n # gain\n U = (np.absolute(delta) + delta) / 2.\n # loss\n D = (np.absolute(delta) - delta) / 2.\n\n smma_u = calc_smma(U, period)\n smma_d = calc_smma(D, period)\n\n return 100 - 100 / (1. + smma_u / smma_d), period", "title": "" }, { "docid": "53ae70211db2b39bcf9175c09d8946a2", "score": "0.51083857", "text": "def calculate_values(eq_model, eq_year):\n with open(\"res/api-response.json\", \"r\") as resp:\n data = json.load(resp)\n\n cost = data[eq_model][\"saleDetails\"][\"cost\"]\n market_ratio = data[eq_model][\"schedule\"][\"years\"][eq_year][\"marketRatio\"]\n auction_ratio = data[eq_model][\"schedule\"][\"years\"][eq_year][\"auctionRatio\"]\n\n return {\"marketValue\": cost * market_ratio,\n \"auctionValue\": cost * auction_ratio\n }", "title": "" }, { "docid": "58fc550ecf042d256546e602f715ef15", "score": "0.5105003", "text": "def __call__(self, values):\n r = self.residuals(values)\n return getattr(r, 'SSE', 1e9)", "title": "" }, { "docid": "8aedcd63e22b87624222c89f92773198", "score": "0.5103577", "text": "def testing(predicted_price):\r\n global y_test\r\n test1 = list(range(0,len(y_test)))\r\n test2 = list(range(0,len(y_test)))\r\n a=int()\r\n\r\n for a in range(0,len(y_test)):\r\n test1[a] = y_test[a] - predicted_price['Linear'][a]\r\n predicted_price['Linear'][a] = predicted_price['Linear'][a] + test1[a]\r\n test2[a] = y_test[a] - predicted_price['RBF'][a]\r\n predicted_price['RBF'][a] = predicted_price['RBF'][a] + test2[a]\r\n\r\n avr1 = sum(test1)/len(test1)\r\n avr2 = sum(test2)/len(test2)\r\n\r\n for a in range(len(y_test),len(predicted_price['Linear'])):\r\n predicted_price['Linear'][a] = predicted_price['Linear'][a] + avr1\r\n predicted_price['RBF'][a] = predicted_price['RBF'][a] + avr2\r\n\r\n return int(avr1), int(avr2)", "title": "" }, { "docid": "e9dc83190f57a8f83668584c20e3bd24", "score": "0.51033914", "text": "def test_get_returns_and_pl_average_return(self):\n all_prices = [[1., 2.], [3., 4.], [5., 6.]]\n signals = [['buy', 'buy', 'sell'],\n [None, None, None]]\n returns, p_and_l = stockdata.get_returns_and_pl(all_prices, signals)\n self.assertEqual(returns, [0, -0.38077249551779302])\n self.assertEqual(p_and_l, [0, 0])", "title": "" }, { "docid": "4ae17ca4995597d52aec52653bc471eb", "score": "0.5100543", "text": "def get_measures(self):\n precision = 0\n recall = 0\n f1_score = 0\n if self.t_p > 0:\n precision = self.t_p / (self.t_p + self.f_p)\n recall = self.t_p / (self.t_p + self.f_n)\n if (precision + recall) > 0:\n f1_score = 2*(precision * recall / (precision + recall))\n return precision, recall, f1_score", "title": "" }, { "docid": "4cd0d543787085fd904f9830537bfa4e", "score": "0.51004606", "text": "def pv(l,r):\r\n dates=l.index\r\n discounts = discount(dates,r)\r\n return (discounts*l).sum()", "title": "" }, { "docid": "afcabc478b2d8a8bc373f11317c777d7", "score": "0.5099384", "text": "def value(self) -> float:\n if self._value is None:\n self.lr.fit(self._train[\"V^2\"].values.reshape(-1, 1), self._train[\"Fa\"])\n self._value = self.lr.coef_[0]\n return self._value", "title": "" }, { "docid": "68453bbd8c48b38f16255b6ad75d39e1", "score": "0.50984603", "text": "def calculate_signals(self, tick_event):\n\n return", "title": "" }, { "docid": "d62b154ba0ad6d9d99473f074aabd4fa", "score": "0.5096261", "text": "def weighted_prediction(scores, preds):\n\n if len(scores) == 1: # only one method, return the series\n return preds[0]\n else:\n n = len(scores)\n # first compute total error by series\n total_error = {}\n for i in range(45):\n score = 0\n for j in range(n):\n score += scores[j][i]\n\n total_error[i] = score\n\n # now total_error is a dict with total_error[i] the total error\n # of all methods for the serie-(i+1)\n\n # create array of weight by method and series, being proportional\n # to the error\n normalizer = n-1\n weights = np.zeros(scores.shape)\n for i in range(n):\n for j in range(45):\n weights[i][j] = (1 / float(normalizer)) * (1 - scores[i][j]/total_error[j])\n\n # now weights[i][j] is the weights for serie j+1 given by method i\n\n # create a series to accumulate the mean of all methods\n df = pd.DataFrame(np.zeros(preds[0].shape), index=preds[0].index)\n\n # note that this series does not have the good name for the columns\n # we change that now\n rn = {}\n for i in range(45):\n rn[i] = 'series-' + str(i+1)\n df = df.rename(columns=rn)\n # now df has the good shape, the good name for the columns and zeros\n # everywhere\n\n for i in range(n): # for each method\n for j in range(45): # for each column\n column = 'series-' + str(j+1)\n\n # df[column] receives the weighted sum of values\n df[column] = df[column] + preds[i][column] * weights[i][j]\n\n return df", "title": "" }, { "docid": "47de7f2a6ef88e17daf820992c0f211a", "score": "0.5094053", "text": "def rsi_predictions(prices, rsi):\n days_observed = 14\n true_positive = 0\n false_positive = 0\n true_negative = 0\n false_negative = 0\n try:\n while days_observed < len(prices) - 5:\n change = array(prices[days_observed + 1: days_observed + 6]).mean()\n is_overbought, is_oversold = bought_status(rsi[days_observed])\n if is_oversold:\n if change > prices[days_observed]:\n true_positive += 1\n else:\n false_negative += 1\n elif is_overbought:\n if change <= prices[days_observed]:\n true_negative += 1\n else:\n false_positive += 1\n days_observed += 1\n return calculate_predictions(true_positive, false_positive, true_negative, false_negative)\n except IndexError as error:\n print(error, len(rsi), len(prices))\n return 0.5, 0.5, 0.5", "title": "" }, { "docid": "233cc3ef9c44ae91a03aff95d4eab3e9", "score": "0.5089559", "text": "def performGetValue(self, quant, options={}):\n # proceed depending on quantity\n if 'Waveform' in quant.name:\n # Always update dt\n sampling_rate = self.getValue('Sampling Rate')\n self.dt = 1 / sampling_rate\n\n # Make all of our waveform checks and update what we need to\n if self.basis_rotation_generators_are_stale:\n self.update_basis_rotation_generators()\n self.update_state_preparation_sequence()\n self.update_measurement_basis_sequence()\n self.basis_rotation_generators_are_stale = False\n self.state_prep_sequence_is_stale = False\n self.measurement_basis_sequence_is_stale = False\n\n if self.state_prep_sequence_is_stale:\n self.update_state_preparation_sequence()\n self.state_prep_sequence_is_stale = False\n \n if self.process_sequence_is_stale:\n self.update_process_sequence()\n self.process_sequence_is_stale = False\n\n if self.measurement_basis_sequence_is_stale:\n self.update_measurement_basis_sequence()\n self.measurement_basis_sequence_is_stale = False\n\n if self.readout_is_stale:\n self.update_readout()\n self.readout_is_stale = False\n\n if self.getValue('Process X - G-E') is not None:\n self.update_process_sequence()\n\n # Plot and return the appropriate trace dict:\n if quant.name == 'Waveform - X Signal':\n ge_frequency = 1e-3*self.getValue('G-E Frequency')\n ef_frequency = 1e-3*self.getValue('E-F Frequency')\n percent_of_sequence = self.getValue('Percent of Sequence') / 100\n x_ge_signal = dsp_utils.modulate_signal(self.x_ge.build_sequence(percent_of_sequence),\n self.dt,\n ge_frequency,\n np.pi/2) \\\n + dsp_utils.modulate_signal(self.y_ge.build_sequence(percent_of_sequence),\n self.dt,\n ge_frequency,\n np.pi/2) # sine phase\n x_ef_signal = dsp_utils.modulate_signal(self.x_ef.build_sequence(percent_of_sequence),\n self.dt,\n ef_frequency,\n np.pi/2) \\\n + dsp_utils.modulate_signal(self.y_ef.build_sequence(percent_of_sequence),\n self.dt,\n ef_frequency,\n np.pi/2) \n signal = x_ge_signal + x_ef_signal\n trace = quant.getTraceDict(signal, t0=0.0, dt=self.dt)\n elif quant.name == 'Waveform - Y Signal':\n ge_frequency = 1e-3*self.getValue('G-E Frequency')\n ef_frequency = 1e-3*self.getValue('E-F Frequency')\n percent_of_sequence = self.getValue('Percent of Sequence') / 100\n y_ge_signal = dsp_utils.modulate_signal(self.y_ge.build_sequence(percent_of_sequence),\n self.dt,\n ge_frequency,\n 0) \\\n + dsp_utils.modulate_signal(self.x_ge.build_sequence(percent_of_sequence),\n self.dt,\n ge_frequency,\n 0)\n y_ef_signal = dsp_utils.modulate_signal(self.y_ef.build_sequence(percent_of_sequence),\n self.dt,\n ef_frequency,\n 0) \\\n + dsp_utils.modulate_signal(self.x_ef.build_sequence(percent_of_sequence),\n self.dt,\n ef_frequency,\n 0)\n signal = y_ge_signal + y_ef_signal\n trace = quant.getTraceDict(signal, t0=0.0, dt=self.dt)\n elif quant.name == 'Waveform - Z Signal':\n percent_of_sequence = self.getValue('Percent of Sequence') / 100\n signal = self.z.build_sequence(percent_of_sequence)\n trace = quant.getTraceDict(signal, t0=0.0, dt=self.dt)\n elif quant.name == 'Waveform - Readout I':\n percent_of_sequence = self.getValue('Percent of Sequence') / 100\n signal = self.readout_i.build_sequence(percent_of_sequence)\n trace = quant.getTraceDict(signal, t0=0.0, dt=self.dt)\n elif quant.name == 'Waveform - Readout Q':\n percent_of_sequence = self.getValue('Percent of Sequence') / 100\n signal = self.readout_q.build_sequence(percent_of_sequence)\n trace = quant.getTraceDict(signal, t0=0.0, dt=self.dt)\n elif quant.name == 'Waveform - Readout Trigger':\n percent_of_sequence = self.getValue('Percent of Sequence') / 100\n signal = self.readout_trigger.build_sequence(percent_of_sequence)\n trace = quant.getTraceDict(signal, t0=0.0, dt=self.dt)\n return trace\n\n elif 'QB' in quant.name:\n demodulated_signal = self.demodulate()\n return demodulated_signal\n\n else: \n # for other quantities, just return current value of control\n return quant.getValue()", "title": "" }, { "docid": "6d4cfacfad586eb180e6877bceeb5057", "score": "0.5085563", "text": "def negative_predictive_values(self):\n return self.tn() / (self.fn() + self.tn())", "title": "" }, { "docid": "5bc00ffcc6633b79e548a8addd2ab41f", "score": "0.50805104", "text": "def _calc_p(self):\n assert hasattr(self, '_v_void')\n self.pdf.update_pdf(v_void=self._v_void,\n f=self._f.max(),\n rt_mean=self._v_void / self._f.max())\n self._p = self.pdf.get_p()\n # Log.\n self.log.d_data(self._log_tree, 'p', self._p)", "title": "" }, { "docid": "999b261229b66c0308821615748258f0", "score": "0.5075779", "text": "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qValue = 0\n for nextState, prob in self.mdp.getTransitionStatesAndProbs(state,action):\n qValue += prob*(self.mdp.getReward(state,action,nextState)+self.discount*self.values[nextState])\n # print(qValue)\n return qValue", "title": "" }, { "docid": "be721d7112fc1839aedb29bd4bb6b678", "score": "0.5072971", "text": "def get_statistic(self):\n return self.valdiffs / self.get_se()", "title": "" }, { "docid": "280c0f420c48d26b6838ee8c2358804c", "score": "0.5072796", "text": "def mse(poll_df, prediction_df):\n # TODO: Have data_dict as parameter here? Or hand over data_dict['allensbach'] directly?\n # TODO: Refactor this once the model is wrapped in a class.\n mse = 0\n for party in parties:\n true_results = poll_df[party]\n predicted_results = prediction_df[party][1:1+len(true_results)] # first point is prediction into the future, do not use it\n mse += np.mean((true_results - predicted_results)**2)\n return mse / len(parties)", "title": "" } ]
ac3c81a73e77f650ede7a49991f02b27
Set up all dataset config options.
[ { "docid": "6a018c9b4bf9649f104ecf5b3e637a9e", "score": "0.0", "text": "def configure(self, repo_dir, inner_size, scale_range, do_transforms,\n rgb, shuffle, set_name, subset_pct, macro,\n contrast_range, aspect_ratio):\n assert (subset_pct > 0 and subset_pct <= 100), (\n 'subset_pct must be between 0 and 100')\n assert(set_name in ['train', 'validation'])\n self.set_name = set_name if set_name == 'train' else 'val'\n\n self.repo_dir = repo_dir\n self.inner_size = inner_size\n if isinstance(scale_range, int):\n self.scale_range = (scale_range, scale_range)\n else:\n self.scale_range = scale_range\n self.minibatch_size = self.be.bsz\n\n self.center = not do_transforms\n self.flip = do_transforms\n self.contrast_range = contrast_range if do_transforms else (100, 100)\n self.aspect_ratio = aspect_ratio if do_transforms else 0\n if not do_transforms:\n self.scale_range = (self.scale_range[0], self.scale_range[0])\n if do_transforms:\n assert (self.aspect_ratio == 0 or self.aspect_ratio > 100), (\n 'bad value for aspect_ratio augmentation')\n\n self.shuffle = shuffle if do_transforms else False\n\n self.rgb = rgb\n self.start_idx = 0\n self.macro = macro\n self.batch_prefix = \"macrobatch_\"\n\n if not macro:\n self.filename = os.path.join(repo_dir, 'filelist.txt')\n if not os.path.exists(self.filename):\n raise IOError('Cannot find %s' % self.filename)\n filelist = np.genfromtxt(self.filename, dtype=str)\n self.ndata = int(len(filelist) * subset_pct / 100.)\n assert self.ndata != 0\n self.macro_start = 0\n self.nlabels = 1\n self.nclass = 1\n self.global_mean = None\n self.img_size = inner_size\n return\n\n # Load from repo dataset_cache:\n cache_filepath = os.path.join(repo_dir, self.batch_prefix + 'meta')\n try:\n dataset_cache = dict()\n with open(cache_filepath, 'r') as f:\n for line in f:\n (k, v) = line.split()\n dataset_cache[k] = float(v) if k.endswith('mean') else int(v)\n rgbmean = [[dataset_cache[c + '_mean']] for c in ['B', 'G', 'R']]\n dataset_cache['global_mean'] = np.array(rgbmean, dtype=np.float32)\n except IOError:\n raise IOError(\"Cannot find '%s'. Run batch_writer to preprocess the \"\n \"data and create batch files for imageset\" % (cache_filepath))\n\n # Should have following defined:\n req_attributes = ['global_mean', 'nclass', 'val_start', 'train_start',\n 'train_nrec', 'val_nrec',\n 'item_max_size', 'label_size']\n\n for r in req_attributes:\n if r not in dataset_cache:\n raise ValueError(\n 'Dataset cache missing required attribute %s' % (r))\n\n self.__dict__.update(dataset_cache)\n self.filename = os.path.join(repo_dir, self.batch_prefix)\n\n self.label = 'l_id'\n if isinstance(self.nclass, dict):\n self.nclass = self.nclass[self.label]\n\n self.recs_available = getattr(self, self.set_name + '_nrec')\n self.macro_start = getattr(self, self.set_name + '_start')\n self.ndata = int(self.recs_available * subset_pct / 100.)", "title": "" } ]
[ { "docid": "efb231ed2b6b2f1ce3aa15434c6d4fb4", "score": "0.7039832", "text": "def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == CrowdDataset.ucf_qnrf:\n self.dataset_class = UcfQnrfFullImageDataset\n self.train_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size,\n map_directory_name=settings.map_directory_name)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.unlabeled_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.unlabeled_dataset_size,\n map_directory_name=settings.map_directory_name,\n examples_start=settings.labeled_dataset_size)\n self.unlabeled_dataset_loader = DataLoader(self.unlabeled_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfQnrfTransformedDataset(dataset='test', seed=101,\n map_directory_name=settings.map_directory_name)\n elif settings.crowd_dataset == CrowdDataset.shanghai_tech:\n self.dataset_class = ShanghaiTechFullImageDataset\n self.train_dataset = ShanghaiTechTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size,\n map_directory_name=settings.map_directory_name)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.unlabeled_dataset = ShanghaiTechTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=100,\n number_of_examples=settings.unlabeled_dataset_size,\n map_directory_name=settings.map_directory_name)\n self.unlabeled_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = ShanghaiTechTransformedDataset(dataset='test', seed=101,\n map_directory_name=settings.map_directory_name)\n\n elif settings.crowd_dataset == CrowdDataset.world_expo:\n self.dataset_class = WorldExpoFullImageDataset\n self.train_dataset = WorldExpoTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_cameras=settings.number_of_cameras,\n number_of_images_per_camera=settings.number_of_images_per_camera)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.unlabeled_dataset = WorldExpoTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_cameras=settings.number_of_cameras,\n number_of_images_per_camera=settings.number_of_images_per_camera)\n self.unlabeled_dataset_loader = DataLoader(self.unlabeled_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = WorldExpoTransformedDataset(dataset='validation', seed=101)\n if self.settings.batch_size > self.train_dataset.length:\n self.settings.batch_size = self.train_dataset.length", "title": "" }, { "docid": "3576e67e571d4b2bb3f0cf7892c47e71", "score": "0.6755877", "text": "def test_dataset_config(self):\n dataset = fluid.core.Dataset(\"MultiSlotDataset\")\n dataset.set_thread_num(12)\n dataset.set_filelist([\"a.txt\", \"b.txt\", \"c.txt\"])\n dataset.set_trainer_num(4)\n dataset.set_hdfs_config(\"my_fs_name\", \"my_fs_ugi\")\n dataset.set_download_cmd(\"./read_from_afs my_fs_name my_fs_ugi\")\n dataset.set_enable_pv_merge(False)\n\n thread_num = dataset.get_thread_num()\n self.assertEqual(thread_num, 12)\n\n filelist = dataset.get_filelist()\n self.assertEqual(len(filelist), 3)\n self.assertEqual(filelist[0], \"a.txt\")\n self.assertEqual(filelist[1], \"b.txt\")\n self.assertEqual(filelist[2], \"c.txt\")\n\n trainer_num = dataset.get_trainer_num()\n self.assertEqual(trainer_num, 4)\n\n name, ugi = dataset.get_hdfs_config()\n self.assertEqual(name, \"my_fs_name\")\n self.assertEqual(ugi, \"my_fs_ugi\")\n\n download_cmd = dataset.get_download_cmd()\n self.assertEqual(download_cmd, \"./read_from_afs my_fs_name my_fs_ugi\")", "title": "" }, { "docid": "ecec69c5351f6100df68003d178a48c0", "score": "0.66378176", "text": "def initSetConfig(self):\n pass", "title": "" }, { "docid": "09880be1ce3bb56b50a7dc27a8e7597d", "score": "0.6620552", "text": "def set_defaults(config={}):\n if 'seg_data' not in list(config.keys()):\n config['seg_data'] = 'vz_predict_dataset.csv.gz'\n if 'concern' not in list(config.keys()):\n config['concern'] = ''\n if 'atr' not in list(config.keys()):\n config['atr'] = ''\n if 'tmc' not in list(config.keys()):\n config['tmc'] = ''\n if 'f_cont' not in list(config.keys()):\n config['f_cont'] = ['width']\n if 'f_cat' not in list(config.keys()):\n config['f_cat'] = ['lanes', 'hwy_type', 'osm_speed', 'oneway',\n 'signal']\n\n # Add features for additional data sources\n if 'data_source' in config and config['data_source']:\n for source in config['data_source']:\n config[source['feat']].append(source['name'])\n\n if 'process' not in list(config.keys()):\n config['process'] = True\n if 'time_target' not in list(config.keys()):\n config['time_target'] = [15, 2017]\n if 'weeks_back' not in list(config.keys()):\n config['weeks_back'] = 1\n if 'name' not in list(config.keys()):\n config['name'] = 'boston'\n if 'level' not in list(config.keys()):\n config['level'] = 'week'", "title": "" }, { "docid": "c4aa53dd289e88ee9df10a3b5d11813e", "score": "0.6555243", "text": "def __init__(self, data_path):\n config = self.load_options(data_path)\n for i in config:\n setattr(self, '{}'.format(i), '{}'.format(config[i]))", "title": "" }, { "docid": "a6484a9ad97261f26351e08a8e9db916", "score": "0.64702314", "text": "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n\n if cfg.EVAL_ONLY:\n cfg.DATASETS.TEST = (\"smokenet_test\",)\n test_annotation_file = os.path.join(cfg.DATA_DIR_PATH, \"test.json\")\n # Register into DatasetCatalog\n register_imagenet_instances(cfg.DATASETS.TEST[0], {}, test_annotation_file)\n\n else:\n train_annotation_file = os.path.join(cfg.DATA_DIR_PATH, \"train.json\")\n val_annotation_file = os.path.join(cfg.DATA_DIR_PATH, \"val.json\")\n # Register into DatasetCatalog\n register_imagenet_instances(\"smokenet_train\", {}, train_annotation_file)\n register_imagenet_instances(\"smokenet_val\", {}, val_annotation_file)\n\n cfg.freeze()\n default_setup(cfg, args)\n\n return cfg", "title": "" }, { "docid": "93b1babbfc9dc7b6e1489669b33ed4bf", "score": "0.64027584", "text": "def set_dataset_paths(args):\n if not args.train_path:\n args.train_path = 'datasets/%s/train' % (args.dataset)\n if not args.val_path:\n if args.dataset == 'imagenet' or args.dataset == 'places':\n args.val_path = 'datasets/%s/val' % (args.dataset)\n else:\n args.val_path = 'datasets/%s/test' % (args.dataset)", "title": "" }, { "docid": "4fba0cbbc75905319a8e95f65732a29a", "score": "0.639718", "text": "def manage_options():\n parser = OptionParser(usage=\"usage: %prog [options] dataset_file\", version=\"%prog 1.0\")\n\n parser.add_option(\"-s\", \"--seed\", dest=\"seed\", default=42, help=\"Pseudo-random seed for replicability\", type=\"int\")\n parser.add_option(\"-t\", \"--test_size\", dest=\"test_size\", default=.3, help=\"Test set size in percentage [0,1]\")\n parser.add_option(\"-n\", \"--normalize\", dest=\"normalize\", default=1, help=\"Whether the instances has to be normalized or not (default:1) - 0:No, 1:MinMax standardization, 2:L2 normalization\", type=\"int\")\n parser.add_option(\"-c\", \"--config_file\", dest=\"config_file\", default=\"./config/config.json\", help=\"Configuration file\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, help=\"Verbose output\", action=\"store_true\")\n\n (options, args) = parser.parse_args()\n if len(args) == 0:\n parser.error(\"Wrong arguments\")\n\n out_dict = vars(options)\n out_dict[\"dataset\"] = args[0]\n return out_dict", "title": "" }, { "docid": "dd8ddecb029d2e213f7d0f0a1667c129", "score": "0.6343363", "text": "def set_dataset(self):\n\n fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"splits\", self.opt.split, \"{}_files.txt\")\n test_fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"splits\", \"eigen\", \"test_files.txt\")\n train_filenames = readlines(fpath.format(\"train\"))\n val_filenames = readlines(test_fpath)\n\n train_dataset = KittiDataset(\n self.opt.data_path, self.opt.gt_path, train_filenames, self.opt.height, self.opt.width,\n crph=self.opt.crph, crpw=self.opt.crpw, is_train=True, predang_path=self.opt.predang_path,\n semanticspred_path=self.opt.semanticspred_path\n )\n\n val_dataset = KittiDataset(\n self.opt.data_path, self.opt.val_gt_path, val_filenames, self.opt.height, self.opt.width,\n crph=self.opt.crph, crpw=self.opt.crpw, is_train=False, predang_path=self.opt.predang_path,\n semanticspred_path=self.opt.semanticspred_path\n )\n\n self.train_loader = DataLoader(\n train_dataset, self.opt.batch_size, shuffle=True,\n num_workers=self.opt.num_workers, pin_memory=True, drop_last=True)\n self.val_loader = DataLoader(\n val_dataset, self.opt.batch_size, shuffle=False,\n num_workers=self.opt.num_workers, pin_memory=True, drop_last=True)\n\n self.train_num = train_dataset.__len__()\n self.val_num = val_dataset.__len__()\n self.num_total_steps = self.train_num // self.opt.batch_size * self.opt.num_epochs", "title": "" }, { "docid": "8bdbb0c84b292ff5d9ca46df7ef67483", "score": "0.6332738", "text": "def setup_config(self):\n \n pass", "title": "" }, { "docid": "639b85d214507e0ebda3b0f7961fc135", "score": "0.63219446", "text": "def set_data_config(univ):\n data_config = DataSet()\n data_config.meta_data_root = './meta_data/universe_meta_data.json'\n data_config.symbols_list = univ\n\n return data_config", "title": "" }, { "docid": "439d4e4c36d1933e5e57d08030c501ad", "score": "0.63144433", "text": "def initialize(self):\n self.options.declare('driver_uid')\n self.options.declare('cmdows_path')\n self.options.declare('kb_path')\n self.options.declare('data_folder')\n self.options.declare('base_xml_file')\n self.options.declare('show_model', default=False)", "title": "" }, { "docid": "42f8a6e8f6443775f9d92577ff20c8ac", "score": "0.62298006", "text": "def addDataset(config, datasetName, **settings):\n datasetInstance = getattr(config.Datasets, datasetName, None)\n if datasetInstance == None:\n defaultInstance = getattr(config.Datasets, \"Default\", None)\n\n if defaultInstance != None:\n datasetInstance = copy.deepcopy(defaultInstance)\n datasetInstance._internal_name = datasetName\n setattr(config.Datasets, datasetName, datasetInstance)\n else:\n datasetInstance = config.Datasets.section_(datasetName)\n\n datasetInstance.Name = datasetName\n datasetInstance.Scenario = settings.get(\"scenario\", \"collision\")\n\n datasetInstance.section_(\"Repack\")\n datasetInstance.section_(\"Reco\")\n datasetInstance.section_(\"Alca\")\n datasetInstance.section_(\"WMBSPublish\")\n datasetInstance.section_(\"DQM\")\n\n default_proc_ver = settings.get(\"default_proc_ver\", None)\n\n datasetInstance.Repack.ProcessingVersion = settings.get(\"repack_proc_ver\",\n default_proc_ver)\n \n datasetInstance.Reco.DoReco = settings.get(\"do_reco\", False)\n datasetInstance.Reco.GlobalTag = settings.get(\"global_tag\", None)\n datasetInstance.Reco.CMSSWVersion = settings.get(\"reco_version\", \"Undefined\")\n datasetInstance.Reco.ConfigURL = settings.get(\"reco_configuration\",\n None)\n datasetInstance.Reco.ProcessingVersion = settings.get(\"reco_proc_ver\",\n default_proc_ver)\n\n datasetInstance.Alca.DoAlca = settings.get(\"do_alca\", False)\n datasetInstance.Alca.CMSSWVersion = settings.get(\"alca_version\", \"Undefined\")\n datasetInstance.Alca.ConfigURL = settings.get(\"alca_configuration\",\n None)\n datasetInstance.Alca.ProcessingVersion = settings.get(\"alca_proc_ver\",\n default_proc_ver)\n\n datasetInstance.WMBSPublish.DoWMBSPublish = settings.get(\"do_wmbs_publish\", False)\n datasetInstance.WMBSPublish.DataTiersTo = settings.get(\"wmbs_publish_data_tiers_to\", [] )\n\n datasetInstance.DQM.DoDQM = settings.get(\"do_dqm\", False)\n datasetInstance.DQM.CMSSWVersion = settings.get(\"dqm_version\", \"Undefined\")\n datasetInstance.DQM.ConfigURL = settings.get(\"dqm_configuration\", None)\n datasetInstance.DQM.ProcessingVersion = settings.get(\"dqm_proc_ver\",\n default_proc_ver)\n\n datasetInstance.CustodialNode = settings.get(\"custodial_node\", None)\n datasetInstance.ArchivalNode = settings.get(\"archival_node\", None)\n datasetInstance.CustodialPriority = settings.get(\"custodial_priority\",\n \"high\")\n datasetInstance.CustodialAutoApprove = settings.get(\"custodial_auto_approve\",\n False)\n\n datasetInstance.Tier1Skims = [] \n return", "title": "" }, { "docid": "897595289fb6afd31cabf7530c3a7f82", "score": "0.6225225", "text": "def data_config():\n print(\"Data Config\\n\\n\")\n #pandas_sample(\"/data/datafinal_60-16.json\")\n utils.text_features_grouped(\"/data/datafinal_60-16.json\")", "title": "" }, { "docid": "595a8cb3a99e03013cb593b7f3df49ae", "score": "0.62173253", "text": "def setup(self) -> None:\n self.conf.setup_seed(self.system_conf.train.seed)\n\n self._setup_datasets()\n logging.info(\"Train Data: %d\" % (len(self.train_dataset)))\n logging.info(\"Val Data: %d\" % (len(self.val_dataset)))\n\n self._net = self._setup_network().to(self.device)\n logging.info(\"Network: %s\" % str(self.net))\n\n self._crit = self._setup_crit()\n logging.info(\"Criterion: %s\" % str(self.crit))\n\n self._optim = self._setup_optim()\n logging.info(\"Optimizer: %s\" % str(self.optim))\n\n self._max_epoch = self.system_conf.train.max_epoch\n\n self._metrics = self._setup_metrics()\n logging.info(\"Metrics: %s\", str(self.metrics))\n\n self._setup_extra()\n\n if not osp.exists(self.output_path):\n os.mkdir(self.output_path)", "title": "" }, { "docid": "6fa0547da979a18ecfd06dcada49a389", "score": "0.62071174", "text": "def set_defaults(args):\n # Check critical files exist\n data_dir = os.path.join(args.data_dir, args.dataset_name)\n if not args.only_test:\n train_src = os.path.join(data_dir, args.train_src)\n train_tgt = os.path.join(data_dir, args.train_tgt)\n if not os.path.isfile(train_src):\n raise IOError('No such file: %s' % train_src)\n if not os.path.isfile(train_tgt):\n raise IOError('No such file: %s' % train_tgt)\n train_src_tag = os.path.join(data_dir, args.train_src_tag)\n if not os.path.isfile(train_src_tag):\n raise IOError('No such file: %s' % train_src_tag)\n\n args.train_src_file = train_src\n args.train_tgt_file = train_tgt\n args.train_src_tag_file = train_src_tag\n\n dev_src = os.path.join(data_dir, args.dev_src)\n dev_tgt = os.path.join(data_dir, args.dev_tgt)\n if not os.path.isfile(dev_src):\n raise IOError('No such file: %s' % dev_src)\n if not os.path.isfile(dev_tgt):\n raise IOError('No such file: %s' % dev_tgt)\n dev_src_tag = os.path.join(data_dir, args.dev_src_tag)\n if not os.path.isfile(dev_src_tag):\n raise IOError('No such file: %s' % dev_src_tag)\n\n args.dev_src_file = dev_src\n args.dev_tgt_file = dev_tgt\n args.dev_src_tag_file = dev_src_tag\n\n return args", "title": "" }, { "docid": "e32c3d2677821b54f65040d033d7a8e2", "score": "0.6203413", "text": "def prepare_data(self):\n self.dataset_class(\n self.hparams.data_dir, stage=self.train_stage,\n transform=self.train_transform, pre_transform=self.pre_transform,\n on_device_transform=self.on_device_train_transform, **self.kwargs)\n\n self.dataset_class(\n self.hparams.data_dir, stage=self.val_stage,\n transform=self.val_transform, pre_transform=self.pre_transform,\n on_device_transform=self.on_device_val_transform, **self.kwargs)\n\n self.dataset_class(\n self.hparams.data_dir, stage='test',\n transform=self.test_transform, pre_transform=self.pre_transform,\n on_device_transform=self.on_device_test_transform, **self.kwargs)", "title": "" }, { "docid": "361871ad0dd1d6b9a68e98b17f6b5ff2", "score": "0.6199397", "text": "def dataset_settings(init_context) -> DatasetsSettings:\n return DatasetsSettings(**init_context.resource_config)", "title": "" }, { "docid": "84a5b255a6591955af4470d1eff16b33", "score": "0.61761314", "text": "def initialize_options(self):\n self.test_module = None\n self.branch = False\n self.erase = False\n self.no_annotate = False\n self.no_html = False\n self.omit_prefixes = ['dendropy/test']", "title": "" }, { "docid": "7ea4734f603530c5133ba7cd8a9eb26c", "score": "0.61267513", "text": "def initialize_options(self):", "title": "" }, { "docid": "135e3e32af2dcd6946411957b57c4e68", "score": "0.6121959", "text": "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_ct = os.path.join(opt.dataroot, opt.phase) # create a path '/path/to/data/ct'\n # self.dir_mr = os.path.join(opt.dataroot, opt.phase + 'mr') # create a path '/path/to/data/mr'\n # self.dir_ct_label = os.path.join(opt.dataroot, 'trainct_lables') # create a path '/path/to/data/ct'\n # self.dir_mr_label = os.path.join(opt.dataroot, 'trainmr_lables') # create a path '/path/to/data/mr'\n #\n self.nii_paths = sorted(make_dataset(self.dir_ct, opt.max_dataset_size)) # load images from '/path/to/data/ct'\n # self.mr_paths = sorted(make_dataset(self.dir_mr, opt.max_dataset_size)) # load images from '/path/to/data/mr'\n # self.nii_paths_label = sorted(make_dataset(self.dir_ct_label, opt.max_dataset_size)) # load images from '/path/to/data/ct'\n # self.mr_paths_label = sorted(make_dataset(self.dir_mr_label, opt.max_dataset_size)) # load images from '/path/to/data/mr'\n #\n self.ct_size = len(self.nii_paths) # get the size of dataset ct\n # self.mr_size = len(self.mr_paths) # get the size of dataset mr\n # self.ct_size_label = len(self.nii_paths_label) # get the size of dataset ct\n # self.mr_size_label = len(self.mr_paths_label) # get the size of dataset mr\n mrtoct = self.opt.direction == 'mrtoct'\n input_nc = self.opt.output_nc if mrtoct else self.opt.input_nc # get the number of channels of input image\n output_nc = self.opt.input_nc if mrtoct else self.opt.output_nc # get the number of channels of output image\n # self.transform_ct = get_transform(self.opt)\n # self.transform_mr = get_transform(self.opt)", "title": "" }, { "docid": "85c9cfef9956720172b07b0cc8e89e2a", "score": "0.6118664", "text": "def configure(self, options, conf):", "title": "" }, { "docid": "1150774acded49446ae921ed58f3003b", "score": "0.60992885", "text": "def prepare_datasets(self):\n # download precomputed zca components and mean for CIFAR10\n #urllib.urlretrieve(\"http://cs.virginia.edu/~pcascante/zca_components.npy\", \"zca_components.npy\")\n #urllib.urlretrieve(\"http://cs.virginia.edu/~pcascante/zca_mean.npy\", \"zca_mean.npy\")\n\n # load data\n data = dataloaders.load_data_subsets(self.args.augPolicy, self.args.dataset, self.args.data_dir)\n self.set_data(data)\n\n # load zca for cifar10\n zca_components = np.load('zca_components.npy')\n zca_mean = np.load('zca_mean.npy')\n self.args.zca_components = zca_components\n self.args.zca_mean = zca_mean\n\n # get randomized set for training\n loaders = dataloaders.get_train_dataloaders(self.args.dataset, self.args.train_data, self.args.train_data_noT, self.args.batch_size, self.args.n_cpus, self.args.num_labeled, self.args.num_valid_samples, self.args.seed, self.args.set_labeled_classes, self.args.set_unlabeled_classes, ordered=False)\n self.set_loaders(loaders)\n\n # get test set if in debug mode and for final evaluation\n testloader = dataloaders.get_test_dataloader(self.args.test_data, self.args.batch_size, self.args.n_cpus)\n self.args.testloader = testloader", "title": "" }, { "docid": "ed99b82fb40b8c617989188477577853", "score": "0.6089538", "text": "def config():\n num_workers = 4 # number of workers\n num_train = 0.8 # (rel.) number of training data\n num_val = 0.1 # (rel.) number of validation data\n batch_size = 128 # batch size\n mean = None # dict with property means of dataset\n stddev = None # dict with property stddev of dataset", "title": "" }, { "docid": "d5cda168c9e0ee82a54bfa7ff106c61b", "score": "0.60384506", "text": "def setUp(self):\n with open(os.path.join(os.path.dirname(__file__),\n './data/oecd-canada.json')) as data_file:\n self.oecd_datasets = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/ons.json')) as data_file:\n self.ons_datasets = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/galicia-ds.json')) as data_file:\n self.galicia_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/galicia-2.0.json')) as data_file:\n self.galicia_2_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/QS104EW.json')) as data_file:\n self.uk_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/sample_data.json')) as data_file:\n self.sample_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/us-labor-ds.json')) as data_file:\n self.uslabor_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/statswe.json')) as data_file:\n self.sweden_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/A02Level.json')) as data_file:\n self.ons_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/CPI15.json')) as data_file:\n self.ons_cpi_dataset = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/collection.json')) as data_file:\n self.collection = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/cantabria.json')) as data_file:\n self.cantabria = json.load(data_file,\n object_pairs_hook=OrderedDict)\n with open(os.path.join(os.path.dirname(__file__),\n './data/dimension.json')) as data_file:\n self.dimension = json.load(data_file,\n object_pairs_hook=OrderedDict)", "title": "" }, { "docid": "648dfc930bae8022a47d123f117ed435", "score": "0.60320264", "text": "def set_dataset_dirs(self):\n if self.verbose:\n print('==> Setup directories to store the data files.')\n\n self.save_data_dir = self.set_download_data_dir()\n self.save_cache_dir = self.set_download_cache_dir()", "title": "" }, { "docid": "d6d64987e1216fe82eb812416d929806", "score": "0.6015843", "text": "def SetDataFlags(self):\n self.parser.add_option('--hostname', action='store', dest='hostname',\n help='String of hostname', metavar='hostname',\n default=None)\n self.AddFlagRule('hostname', required=True)\n self.parser.add_option('-z', '--zone-name', action='store',\n dest='zone_name', help='String of the zone name.',\n metavar='<zone-name>', default=None)\n self.SetAllFlagRule('zone_name', required=True)\n self.parser.add_option('-v', '--view-name', action='store',\n dest='view_name',\n help=('String of the view name <view-name>. '\n 'Example: \"internal\"'), metavar='<view-name>',\n default=None)\n self.SetAllFlagRule('view_name', required=True)\n self.parser.add_option('-r', '--recursive', action='store_true',\n dest='recursive', help='Use recursion during lookup',\n metavar='<recursive>', default=False)\n self.SetAllFlagRule('recursive', required=False)", "title": "" }, { "docid": "3baeeaf9bdca35d14cb915d4207a4484", "score": "0.60121846", "text": "def _patch_cls_datasets(config: Config):\n\n assert \"data\" in config\n for subset in (\"train\", \"val\", \"test\", \"unlabeled\"):\n cfg = config.data.get(subset, None)\n if not cfg:\n continue\n cfg.type = \"OTXActionClsDataset\"\n cfg.otx_dataset = None\n cfg.labels = None", "title": "" }, { "docid": "04897b630989af43d14a3f4950376ef6", "score": "0.6010168", "text": "def setOptions(self):\n self.parser.add_option('--outputpath',\n dest='outputpath',\n default=None,\n help='Where the files retrieved will be stored. Defaults to the results/ directory.',\n metavar='URL')\n\n self.parser.add_option('--dump',\n dest='dump',\n default=False,\n action='store_true',\n help='Instead of performing the transfer, dump the source URLs.')\n\n self.parser.add_option('--xrootd',\n dest='xroot',\n default=False,\n action='store_true',\n help='Give XrootD url for the file.')\n\n self.parser.add_option('--jobids',\n dest='jobids',\n default=None,\n help='Ids of the jobs you want to retrieve. Comma separated list of integers.',\n metavar='JOBIDS')\n self.parser.add_option('--checksum',\n dest='checksum',\n default='yes',\n help='Set it to yes if needed. It will use ADLER32 checksum' +\\\n 'Allowed values are yes/no. Default is yes.')\n self.parser.add_option('--command',\n dest='command',\n default=None,\n help='A command which to use. Available commands are LCG or GFAL.')", "title": "" }, { "docid": "476ee9a160c8fa05d06febc9f02eb612", "score": "0.5998177", "text": "def initialize_options(self):\n self.include_extras = None\n self.include_all_extras = None\n self.extra_pkgs = []\n self.dependencies = []", "title": "" }, { "docid": "579b7e4197bfa33717de9efbe81278d6", "score": "0.59928983", "text": "def set_configs(self):\n for config_item in ['tag_creds', 'encrypt_tag' ]:\n define(config_item)\n # Get configuration values from configuration filescript_name = path.basename(__file__)\n script_path = path.abspath(path.realpath(__file__))\n script_dir = path.split(script_path)[0]\n config_path = path.abspath(path.join(script_dir, 'model/EmsgEncrypt.conf'))\n options.parse_config_file(config_path)", "title": "" }, { "docid": "9b0d02ca79a36d360f0feccd0c54aa80", "score": "0.5978503", "text": "def configure(self, **kwargs):", "title": "" }, { "docid": "a1ad16369959a538e0df51f491bf1684", "score": "0.5977355", "text": "def __init__(self, config=None, save=False, store=True):\n \n if config:\n make_dataset(config, dataset=self, save=save, store=store)\n return", "title": "" }, { "docid": "cb94cef3ba79e55937fcd091bf330001", "score": "0.5949031", "text": "def setup(self):\n\n # Create a dataset first\n data = {\n 'name': self.dataset_name,\n 'title': self.dataset_title,\n 'notes': self.dataset_notes\n }\n if not self.dataset_owner is None:\n data['owner_org'] = self.dataset_owner\n\n response = requests.post(\n '{0}/api/action/package_create'.format(self.ckan_url),\n data=json.dumps(data),\n headers={'Content-type': 'application/json',\n 'Authorization': self.api_key},\n verify=self.ssl_verify)\n\n if response.status_code != 200:\n exit('Error creating dataset: {0}'.format(response.content))\n\n dataset_id = response.json()['result']['id']\n\n # Then create a resource, empty at the beginning\n records = []\n\n # Manually set the field types to ensure they are handled properly\n fields = []\n for item in self.STRUCTURE:\n field = {\n 'id': item['id'],\n 'type': item['type']\n }\n fields.append(field)\n\n # Push the records to the DataStore table. This will create a resource\n # of type datastore.\n data = {\n 'resource': {\n 'package_id': dataset_id,\n 'name': self.resource_name,\n 'format': 'csv',\n 'notes': self.resource_notes\n },\n 'records': records,\n 'fields': fields,\n 'primary_key': self.PRIMARY_KEYS,\n }\n\n response = requests.post(\n '{0}/api/action/datastore_create'.format(self.ckan_url),\n data=json.dumps(data),\n headers={'Content-type': 'application/json',\n 'Authorization': self.api_key},\n verify=self.ssl_verify)\n\n if response.status_code != 200:\n exit('Error: {0}'.format(response.content))\n\n resource_id = response.json()['result']['resource_id']\n print('''\nDataset and DataStore resource successfully created with {0} records.\nPlease add the resource id to your ini file:\n\n[{2}]\nresource_id={1}\n '''.format(len(records), resource_id, self.CONFIG_SECTION))", "title": "" }, { "docid": "dc819a6948fcb3aaadd381b6645841bc", "score": "0.5931906", "text": "def _setup_data(self):\r\n input = self.dataset\r\n pnt_attr, cell_attr = get_all_attributes(input)\r\n\r\n self._setup_data_arrays(cell_attr, 'cell')\r\n self._setup_data_arrays(pnt_attr, 'point')", "title": "" }, { "docid": "a3508baacd0188bd0ec735325c3df5be", "score": "0.5902139", "text": "def configuration(self, dataset=None):\n\n dataset = dataset if dataset else self.dataset\n if dataset is None:\n raise Exception(\"dataset in configuration() and self.dataset couldn't be all None\")\n print \"Dataset being used is: \", dataset\n\n params_dict = {\"type\":\"configuration\", \"dataset\":dataset}\n\n return self.easy_response(params_dict, echo=True)", "title": "" }, { "docid": "9f990afb8b62fa9eac5db6ddbd265485", "score": "0.5896834", "text": "def setup(args):\n cfg = get_cfg()\n add_deeplab_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "f69799c977e7e128b2c01740ab7acf86", "score": "0.58842874", "text": "def loadDatasetConfig ( self, dataset ):\n sql = \"SELECT ximagesize, yimagesize, startslice, endslice, zoomlevels, zscale, startwindow, endwindow, starttime, endtime from {} where dataset = \\'{}\\'\".format( ocpcaprivate.datasets, dataset )\n\n\n with closing(self.conn.cursor()) as cursor:\n try:\n cursor.execute ( sql )\n except MySQLdb.Error, e:\n\n logger.error (\"Could not query ocpca datasets database %d: %s. sql=%s\" % (e.args[0], e.args[1], sql))\n raise OCPCAError (\"Could not query ocpca datasets database %d: %s. sql=%s\" % (e.args[0], e.args[1], sql))\n\n # get the project information \n row = cursor.fetchone()\n\n # if the project is not found. error\n if ( row == None ):\n logger.warning ( \"Dataset %s not found.\" % ( dataset ))\n raise OCPCAError ( \"Dataset %s not found.\" % ( dataset ))\n\n [ ximagesz, yimagesz, startslice, endslice, zoomlevels, zscale, startwindow, endwindow, starttime, endtime ] = row\n return OCPCADataset ( int(ximagesz), int(yimagesz), int(startslice), int(endslice), int(zoomlevels), float(zscale), int(startwindow), int(endwindow), int(starttime), int(endtime) )", "title": "" }, { "docid": "ba1018b1394c328561e6e2f8e7e15480", "score": "0.58803207", "text": "def configure(self, **kw):", "title": "" }, { "docid": "3dd253dc9459c998a9d8ea0393ce31cf", "score": "0.5868556", "text": "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))\n self.B_paths = sorted(make_dataset(opt.dataroot_style, opt.max_dataset_size))\n self.A_size = len(self.A_paths) # get the size of dataset A\n self.B_size = len(self.B_paths) # get the size of dataset B\n transform_list = []\n\n transform_list += [transforms.ToTensor()]\n transform_list += [transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])]\n self.transform = transforms.Compose(transform_list)", "title": "" }, { "docid": "4abbe31fe7dcf08b0ad4383537989856", "score": "0.5868543", "text": "def setup(self, stage=None):\n # Load data and split into train and validation sets here\n # Assign train/val datasets for use in dataloaders\n if stage == \"fit\":\n self.tr_dataset = SeqDataDataset(\n self._tr_x, self._tr_yseq, self._tr_ys, self.normalization\n )\n if self.data_val is not None:\n self.val_dataset = SeqDataDataset(\n self._val_x, self._val_yseq, self._val_ys, self.normalization\n )\n else:\n raise NotImplementedError(\"We didn't implement for stage not `fit`\")", "title": "" }, { "docid": "d53bb85f7069b772588b5b30b99a53a2", "score": "0.5862461", "text": "def SetDataFlags(self):\n self.parser.add_option('-v', '--view-name', action='store',\n dest='view_name',\n help=('String of the view name <view-name>. '\n 'Example: \"internal\"'), metavar='<view-name>',\n default=None)\n self.SetAllFlagRule('view_name', required=True)\n self.parser.add_option('-d', '--domain', action='store', dest='base_domain',\n help='String of the base domain.', default=None)\n self.SetAllFlagRule('base_domain', required=True)", "title": "" }, { "docid": "a3c34d3ed95fb93b653890148126799b", "score": "0.5855423", "text": "def apply_options(self):\n\n Application.apply_options(self)\n\n self.conf = self.parse_config(self.args.config_file)", "title": "" }, { "docid": "79652e4c0c1b993887037574c3d9f21f", "score": "0.58543384", "text": "def test_config(self):\n dataset = fluid.InMemoryDataset()\n dataset.set_parse_ins_id(True)\n dataset.set_parse_content(True)\n dataset._set_trainer_num(1)\n self.assertTrue(dataset.parse_ins_id)\n self.assertTrue(dataset.parse_content)\n self.assertEqual(dataset.trainer_num, 1)", "title": "" }, { "docid": "269186104f91e9fba9bebd6ce52faf81", "score": "0.58300835", "text": "def update_config_(args):\n\n # increment the seed at each run\n args.seed = args.seed + args.run\n\n # multiply the number of examples by a factor size. Used to have number of examples depending\n # on number of labels. Usually factor is 1.\n args.datasize.n_examples = args.datasize.factor * args.datasize.n_examples\n\n if args.datasize.n_examples_test == \"train\":\n # use same number of train and test examples\n args.datasize.n_examples_test = args.datasize.n_examples\n\n if args.is_precompute_trnsf and args.train.trnsf_kwargs.is_train:\n # if training transformer then paths need to agree\n assert args.paths[\"trnsf_dirnames\"][0] == args.paths[\"chckpnt_dirnames\"][0]\n\n # monitor training when you randomize the labels because validation does not mean anything\n if args.dataset.kwargs.is_random_targets:\n args.train.trnsf_kwargs.monitor_best = \"train_loss_best\"\n args.train.clf_kwargs.monitor_best = \"train_loss_best\"\n\n if not args.train.is_tensorboard:\n args.paths[\"tensorboard_curr_dir\"] = None\n\n if args.experiment == \"gap\":\n # dib with Q++\n if args.model.name == \"vib\":\n args.model.loss.beta = args.model.loss.beta * 40\n\n elif args.model.name == \"cdibL\":\n args.model.loss.beta = args.model.loss.beta / 100\n\n elif args.model.name == \"cdibS\":\n args.model.loss.beta = args.model.loss.beta * 30\n\n if \"dibL\" in args.model.name:\n # dib with Q++\n args.model.Q_zx.hidden_size = args.model.Q_zy.hidden_size * 64\n\n if \"dibS\" in args.model.name:\n # dib with Q--\n args.model.Q_zx.hidden_size = args.model.Q_zy.hidden_size // 64\n\n if \"dibXS\" in args.model.name:\n # dib with Q------\n args.model.Q_zx.hidden_size = 1\n\n if \"dibXL\" in args.model.name:\n # dib with Q++++++++\n args.model.Q_zx.hidden_size = 8192\n\n short_long_monitor = dict(\n vloss=\"valid_loss_best\", tloss=\"train_loss_best\", vacc=\"valid_acc_best\"\n )\n\n # use short version for name of file\n args.train.monitor_best = invert_dict(short_long_monitor).get(\n args.train.monitor_best, args.train.monitor_best\n )\n\n hyperparam_path = hyperparam_to_path(args.hyperparameters)\n args.paths.merge_with(\n OmegaConf.create(\n format_container(args.paths, dict(hyperparam_path=hyperparam_path))\n )\n )\n # every change that should not modify the name of the file should go below this\n # ----------------------------------------------------------------------------\n\n # use long version in code\n args.train.monitor_best = short_long_monitor.get(\n args.train.monitor_best, args.train.monitor_best\n )\n args.train.trnsf_kwargs.monitor_best = short_long_monitor.get(\n args.train.trnsf_kwargs.monitor_best, args.train.trnsf_kwargs.monitor_best\n )\n args.train.clf_kwargs.monitor_best = short_long_monitor.get(\n args.train.clf_kwargs.monitor_best, args.train.clf_kwargs.monitor_best\n )\n\n if not args.is_precompute_trnsf:\n logger.info(\"Not precomputing the transformer so setting train=False.\")\n args.train.trnsf_kwargs.is_train = False\n args.train.kwargs.lr = args.train.lr_clf # ! DEV\n else:\n if args.model.name == \"wdecayBob\":\n args.train.weight_decay = 1e-4\n\n if args.model.name == \"dropoutBob\":\n args.encoder.architecture.dropout = 0.5\n\n if not args.datasize.is_valid_all_epochs and \"train\" in args.train.monitor_best:\n # don't validate all epochs when validation >>> training and you only look at training\n rm_valid_epochs_()\n\n if args.model.is_joint:\n args.model.gamma_force_generalization = 1\n\n if \"distractor\" in args.clfs.name and not args.is_precompute_trnsf:\n args.dataset.is_use_distractor = True\n\n if \"random\" in args.clfs.name and not args.is_precompute_trnsf:\n # if you want random dataset for classifier then make sure you are not randomizing for encoder\n args.dataset.kwargs.is_random_targets = True\n args.train.clf_kwargs.monitor_best = \"train_loss_best\" # don't monitor val\n\n if isinstance(args.train.kwargs.lr, str) and \"|\" in args.train.kwargs.lr:\n lr, lr_factor_zx = args.train.kwargs.lr.split(\"|\")\n args.train.kwargs.lr = float(lr)\n args.train.lr_factor_zx = float(lr_factor_zx)\n\n if args.model.name == \"vibL\":\n # keep alice the same but increase bob view of alice\n # vib with better approx of I[Z,Y] Q++\n args.model.Q_zy.hidden_size = args.model.Q_zy.hidden_size * 16\n\n if args.model.name == \"wdecay\":\n args.train.weight_decay = 1e-4\n\n if \"correlation\" in args.experiment:\n if args.train.optim == \"rmsprop\":\n if args.train.weight_decay == 0.0005:\n args.train.weight_decay = 0.0003\n\n elif args.train.optim == \"sgd\":\n args.train.kwargs.lr = args.train.kwargs.lr * 50\n\n if \"perminvcdib\" in args.model.name:\n args.encoder.architecture.hidden_size = [1024]\n args.model.architecture.z_dim = 1024\n args.model.Q_zy.hidden_size = 256\n args.model.Q_zy.n_hidden_layers = 1", "title": "" }, { "docid": "4ba99130ce0a7fff3e2621bc00f6c426", "score": "0.58281314", "text": "def SetDataFlags(self):\n self.parser.add_option('--commit', action='store_true', dest='commit',\n help='Commits changes of hosts file without '\n 'confirmation.', default=False)\n self.parser.add_option('--no-commit', action='store_true', dest='no_commit',\n help='Suppresses changes of hosts file.',\n default=False)\n self.AddFlagRule(('no_commit', 'commit'), required=False,\n flag_type='independent_args')\n self.parser.add_option('-z', '--zone-name', action='store',\n dest='zone_name', help='String of the zone name.',\n metavar='<zone-name>', default=None)\n self.SetAllFlagRule('zone_name', required=True)\n self.parser.add_option('-v', '--view-name', action='store',\n dest='view_name',\n help=('String of the view name <view-name>. '\n 'Example: '\n '\"internal\"'), metavar='<view-name>',\n default='None')\n self.SetAllFlagRule('view_name', required=True)\n\n self.parser.add_option('-f', '--file', action='store', dest='file',\n help='File name of hosts file to write to database.',\n metavar='<file-name>', default='hosts_out')\n self.SetAllFlagRule('file', required=True)", "title": "" }, { "docid": "4b5b1a0e46186531ab5207796d6c9535", "score": "0.5824765", "text": "def _load_options(self, opts):\n self.opts = opts\n self.options = opts.get(CONFIG_DATA_SECTION, {})\n\n # Validate required fields in app.config are set\n required_fields = [\"base_url\", \"username\", \"password\", \"query_limit\", \"query_ticket_grouping_types\",\n \"polling_interval\"]\n validate_fields(required_fields, self.options)\n\n # Create Secureworks client\n self.scwx_client = SCWXClient(self.opts, self.options)", "title": "" }, { "docid": "176b8afc4112aebd8db23f9ce0803ed9", "score": "0.58237094", "text": "def setup(self, **kwargs):\n if self.data_dir is not None:\n path = os.path.join(self.data_dir, self.file_name)\n self.dataset = from_pickle(path)\n else:\n # Generate data set here\n X = []\n y = []\n t = []\n for init_cond in self.init_conditions:\n x0, p0, t0, t1, steps = init_cond\n q_p, dq_dp, _, ts = SHM_1D_model(x0=x0, p0=p0, t0=t0, t1=t1, steps=steps)\n X.append(q_p)\n y.append(dq_dp)\n t.append(ts)\n X = torch.cat(X, dim=0)\n y = torch.cat(y, dim=0)\n t = torch.cat(t, dim=0)\n self.dataset = TensorDataset(X, y, t)\n\n # save the dataset\n path = os.path.join(\"../../data/dataset\", self.file_name) # TODO: ugly af path, fix this sometime\n to_pickle(self.dataset, path)\n\n # Split the data set\n portion = [0.9, 0.1, 0.0]\n splits = [int(x * len(self.dataset)) for x in portion]\n if sum(splits) != len(self.dataset):\n splits[1] += len(self.dataset) - sum(splits)\n self.train_set, self.val_set, self.test_set = random_split(self.dataset, splits)", "title": "" }, { "docid": "dce919713ec7a66e13650c49a093a55f", "score": "0.5822433", "text": "def init():\n global OPTIONS\n file_path = config_file()\n config = ConfigParser.ConfigParser()\n config.read(file_path)\n OPTIONS = { 'username': config.get(FLAGS.config_section, 'username'),\n 'password': config.get(FLAGS.config_section, 'password'),\n 'resource': config.get(FLAGS.config_section, 'resource'),\n 'server': config.get(FLAGS.config_section, 'server'),\n 'debug': config.getboolean(FLAGS.config_section, 'debug'),\n 'path': config.get(FLAGS.config_section, 'dds-path'),\n 'log_file': config.get(FLAGS.config_section, 'log') }\n if FLAGS.debug:\n OPTIONS['debug'] = FLAGS.debug\n if FLAGS.dds_path != '/':\n OPTIONS['path'] = FLAGS.dds_path\n if FLAGS.log_file:\n OPTIONS['log_file'] = FLAGS.log_file", "title": "" }, { "docid": "ced7d23e13b2c08530cbb4c2b096dcdf", "score": "0.58214647", "text": "def setUp(self):\n Dataset.objects.create(title='Penguins of Antarctica', dataset_key='7b657080-f762-11e1-a439-00145eb45e9a',\n download_on=dateutil.parser.parse('2010-08-06T13:05:40.000+0000'))\n Dataset.objects.create(title='Antarctic, Sub-Antarctic and cold temperate echinoid database',\n dataset_key='d8b06df0-81b3-41c9-bcf8-6ba5242e2b95',\n download_on=dateutil.parser.parse('2110-04-06T13:05:40.000+0000'))", "title": "" }, { "docid": "417f98a3cdb124bff69dca7fbbf39c7e", "score": "0.58187026", "text": "def setOptions(self):\n self.parser.add_option('--site',\n dest='sitename',\n default=None,\n help='The PhEDEx node name of the site to be checked.')\n self.parser.add_option('--lfn',\n dest='userlfn',\n default=None,\n help='A user lfn address.')\n self.parser.add_option('--checksum',\n dest='checksum',\n default='no',\n help='Set it to yes if needed. It will use ADLER32 checksum' +\\\n 'Allowed values are yes/no. Default is no.')\n self.parser.add_option('--command',\n dest='command',\n default=None,\n help='A command which to use. Available commands are LCG or GFAL.')", "title": "" }, { "docid": "251da479f626b20aa41538b68dee11b6", "score": "0.5818446", "text": "def update_config_datasets_(args, datasets):\n args.datasize.n_examples = len(datasets[\"train\"]) # store as an integer\n steps_per_epoch = len(datasets[\"train\"]) // args.datasize.batch_size\n args.model.loss.warm_Q_zx = steps_per_epoch * args.model.loss.warm_Q_zx\n\n count_targets = datasets[\"train\"].count_targets()\n with omegaconf.open_dict(args):\n args.model.loss.n_per_target = {\n str(k): int(v) for k, v in count_targets.items()\n }", "title": "" }, { "docid": "7e15ba5686390d0ad67491ea8e89891b", "score": "0.5818113", "text": "def setup_task(cls, args, **kwargs):\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n if getattr(args, 'raw_text', False):\n utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')\n args.dataset_impl = 'raw'\n elif getattr(args, 'lazy_load', False):\n utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')\n args.dataset_impl = 'lazy'\n\n paths = args.data.split(':')\n assert len(paths) > 0\n # find language pair automatically\n if args.source_lang is None or args.target_lang is None:\n raise Exception('Could not infer language pair, please provide it explicitly') # See comment below?\n # TRY ADDING: --source-lang true_w,true_p --target-lang reco_w\n # OR ENABLING BELOW HARDCODED DEFAULTS.\n # args.source_lang = \"true_w,true_p\"\n # args.target_lang = \"reco_w\"\n # args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])\n\n assert type(args.source_lang) == str\n if ',' not in args.source_lang:\n raise Exception(\"source-lang is \" + args.source_lang + \" source-lang needs to contain two comma separated strings\")\n # load dictionaries\n src_lang1, src_lang2 = args.source_lang.split(',')\n src_dict1 = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(src_lang1)))\n src_dict2 = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(src_lang2)))\n if ',' in args.target_lang:\n assert args.criterion == 'cross_entropy_dual'\n target_lang, target_lang_extra = args.target_lang.split(',')\n tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(target_lang)))\n tgt_dict_extra = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(target_lang_extra)))\n dual_decoder = True\n else:\n tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))\n dual_decoder = False\n tgt_dict_extra = None\n\n assert src_dict1.pad() == tgt_dict.pad()\n assert src_dict1.eos() == tgt_dict.eos()\n assert src_dict1.unk() == tgt_dict.unk()\n assert src_dict2.pad() == tgt_dict.pad()\n assert src_dict2.eos() == tgt_dict.eos()\n assert src_dict2.unk() == tgt_dict.unk()\n print('| [{}] dictionary: {} types'.format(src_lang1, len(src_dict1)))\n print('| [{}] dictionary: {} types'.format(src_lang2, len(src_dict2)))\n\n if dual_decoder:\n print('| [{}] dictionary: {} types'.format(target_lang, len(tgt_dict)))\n print('| [{}] dictionary: {} types'.format(target_lang_extra, len(tgt_dict_extra)))\n else:\n print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))\n return cls(args, src_dict1, src_dict2, tgt_dict, tgt_dict_extra)", "title": "" }, { "docid": "bc0666efd09345cbba308492506d3049", "score": "0.5815576", "text": "def setup(config, *args, **kwargs):\n cfg_rest = config.get('rest',{}).get('datasets',{})\n db_cfg = cfg_rest.get('database',{})\n\n # add indexes\n db = pymongo.MongoClient(**db_cfg).datasets\n if 'dataset_id_index' not in db.datasets.index_information():\n db.datasets.create_index('dataset_id', name='dataset_id_index', unique=True)\n\n handler_cfg = RESTHandlerSetup(config, *args, **kwargs)\n handler_cfg.update({\n 'database': motor.motor_tornado.MotorClient(**db_cfg).datasets,\n })\n\n return [\n (r'/datasets', MultiDatasetHandler, handler_cfg),\n (r'/datasets/(?P<dataset_id>\\w+)', DatasetHandler, handler_cfg),\n (r'/datasets/(?P<dataset_id>\\w+)/description', DatasetDescriptionHandler, handler_cfg),\n (r'/datasets/(?P<dataset_id>\\w+)/status', DatasetStatusHandler, handler_cfg),\n (r'/datasets/(?P<dataset_id>\\w+)/priority', DatasetPriorityHandler, handler_cfg),\n (r'/datasets/(?P<dataset_id>\\w+)/jobs_submitted', DatasetJobsSubmittedHandler, handler_cfg),\n (r'/dataset_summaries/status', DatasetSummariesStatusHandler, handler_cfg),\n ]", "title": "" }, { "docid": "8942ed4edf5da3d5025a84b2e11af2be", "score": "0.5805535", "text": "def configure_options(self):\n super(MRKmeansStep, self).configure_options()\n self.add_file_option('--prot')", "title": "" }, { "docid": "4f59d041f51b621960766ae1a0ffa9e3", "score": "0.5804469", "text": "def _init_all_settings(self): # noqa: C901\n self.config = self.trainer.config\n if self.trainer.hps and self.trainer.hps.get('trainer'):\n self.config.from_json(self.trainer.hps.get('trainer'))\n self.trainer._init_distributed_setting()\n if self.config.cuda:\n self.trainer._init_cuda_setting()\n self.epochs = self.trainer.epochs\n self.distributed = self.trainer.distributed\n self.trainer.model = self._init_model()\n self.model = self.trainer.model\n self.use_syncbn = self.config.syncbn\n self.trainer.use_syncbn = self.use_syncbn\n if self.use_syncbn:\n self.trainer.model = apex.parallel.convert_syncbn_model(self.trainer.model)\n self.trainer.optimizer = self._init_optimizer()\n self.use_ema = hasattr(self.config, 'model_ema')\n if self.use_ema:\n self.model_ema = self._init_model_ema()\n self.trainer.lr_scheduler = self._init_lr_scheduler()\n self.trainer.loss = self._init_loss()\n if self.distributed:\n self.trainer._init_horovod_setting()\n self.use_amp = self.config.amp\n if self.use_amp:\n self.trainer.model, self.trainer.optimizer = amp.initialize(self.trainer.model,\n self.trainer.optimizer,\n opt_level='O1')\n self._init_dataloader()\n self.trainer.valid_metrics = self.trainer._init_metrics(None)\n self.trainer.callbacks._set_params(self.trainer)\n\n # self.trainer.has_built = True", "title": "" }, { "docid": "037920f09c9fda6c8d2319cff3359cd4", "score": "0.58026177", "text": "def _init_distributed_setting(self):\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)\n set_algo_parameters(elementwise_op_strategy_follow=True)\n context.set_auto_parallel_context(all_reduce_fusion_config=self.config.all_reduce_fusion_config)", "title": "" }, { "docid": "f0b933ceb5b2f7aa85158e2f2d6f0e54", "score": "0.5801967", "text": "def set_spark_configs(d):\n global __init_configs_spark\n __init_configs_spark.update(d)", "title": "" }, { "docid": "23e97844b77ada7ca3be915c95f5d49e", "score": "0.5799616", "text": "def __init__(self,config):\n self.backbone_model = config[\"BackboneClassifier\"]\n self.model_input_size = config[\"ModelInputSize\"]\n self.processed_dataset_folder = config[\"ProcessedDatasetFolder\"]\n self.scale = config[\"Model\"][\"Scale\"]\n self.aspect_ratio = config[\"Model\"][\"AspectRatio\"]\n self.dataset = []\n\n self.load_dataset()", "title": "" }, { "docid": "16d79fab1606d7c0048dd45fe3e94d64", "score": "0.57891893", "text": "def __init__(self, config_manager):\n config = config_manager.get_config()\n raw_filepath = config['GENERAL']['dataset_path']\n assert os.path.isdir(raw_filepath), \\\n \"The dataset filepath provided is not valid.\"\n self._raw_filepath = raw_filepath\n self._mappings = None\n self._validate_data()\n self._port = config.getint(\"BLOCKCHAIN\", \"http_port\")\n self._frac = float(config['DATASET_MANAGER']['sample_fraction'])\n self._ipfs_client = None\n self._db_client = None\n self.classification = config['DATASET_MANAGER']['category']", "title": "" }, { "docid": "85cae6c42b4846bef2e45b30111971bb", "score": "0.57816124", "text": "def prepare_dataset(self):\n self.ctx.log(\"Checking dataset...\")\n selected = self.ctx.config.get('dataset', None)\n if selected is None:\n if DATASET_NAME in self._get_datasets():\n # try to select existing\n AugerConfig(self.ctx).set_data_set(\n DATASET_NAME, '').set_experiment(None)\n self.dataset = DataSet(self.ctx, self.project, DATASET_NAME)\n else:\n # or create new\n self.ctx.log(\"No dataset found, creating the first one...\")\n source = self.ctx.config.get('source', None)\n dataset = DataSet(self.ctx, self.project).create(source)\n AugerConfig(self.ctx).set_data_set(\n dataset.name, source).set_experiment(None)\n self.dataset = DataSet(self.ctx, self.project, dataset.name)\n else:\n self.dataset = DataSet(self.ctx, self.project, selected)\n self.ctx.log(\"Currently selected: %s\" % selected)", "title": "" }, { "docid": "9b03d68dca762028cdbdfb4fba89a315", "score": "0.57779884", "text": "def initialize_options(self):\n build_ext.initialize_options(self)\n self.gen_compiledb = None", "title": "" }, { "docid": "32338252892318ac753ebf3bc1894131", "score": "0.5775403", "text": "def prepare_data(self):\n self.generic_dataset = LIDCNodulesDataset(**self.dataset_params.params)\n log.info(f\"DATASET SIZE: {len(self.generic_dataset)}\")\n\n self.tensor_dataset_path = self.__prepare_tensor_dataset()\n\n self.aug_transform = transforms.Compose([T.FlipNodule3D(), T.RotNodule3D()])\n self.dataset = DatasetFolder(\n self.tensor_dataset_path, torch.load, (\"pt\"), transform=self.__data_transform\n )\n self.dataset.norm = self.generic_dataset.norm\n\n train_inds, val_inds, test_inds = H.train_val_holdout_split(\n self.dataset, ratios=[0.85, 0.14, 0.01]\n )\n self.train_sampler = SubsetRandomSampler(train_inds)\n self.val_sampler = SubsetRandomSampler(val_inds)\n self.test_subset = Subset(self.dataset, test_inds)", "title": "" }, { "docid": "4b638322e2623e701768a00af7dbc729", "score": "0.5774466", "text": "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.sub_list = os.listdir(opt.dataroot)\n self.opt = opt\n\n self.input_nc = self.opt.input_nc\n self.output_nc = self.opt.output_nc", "title": "" }, { "docid": "d733b46ea79ecbd913d57f221312701f", "score": "0.57649636", "text": "def configure(**config):", "title": "" }, { "docid": "305326bce27463db1ff1340000465d08", "score": "0.57591677", "text": "def setup(self, **kwargs):\n if self.data_dir is not None:\n path = os.path.join(self.data_dir, self.file_name)\n self.dataset = from_pickle(path)\n else:\n # Generate data set here\n X = []\n y = []\n t = []\n for init_cond in self.init_conditions:\n x0, p0, t0, t1, steps = init_cond\n q_p, _, _, ts = SHM_1D_model(x0=x0, p0=p0, t0=t0, t1=t1, steps=steps) # TODO: merge acceleration\n ddq = -1. / 1. * q_p[:, 0]\n\n X.append(q_p) # for k=1, m=1 -> p = dq\n y.append(ddq)\n t.append(ts)\n\n X = torch.cat(X, dim=0)\n y = torch.cat(y, dim=0)\n t = torch.cat(t, dim=0)\n self.dataset = TensorDataset(X, y, t)\n\n # save the dataset\n path = os.path.join(\"../../data/dataset\", self.file_name) # TODO: ugly af path, fix this sometime\n to_pickle(self.dataset, path)\n\n # Split the data set\n portion = [0.9, 0.1, 0.0]\n splits = [int(x * len(self.dataset)) for x in portion]\n if sum(splits) != len(self.dataset):\n splits[1] += len(self.dataset) - sum(splits)\n self.train_set, self.val_set, self.test_set = random_split(self.dataset, splits)", "title": "" }, { "docid": "45ab4f4b977270c52ea7170a69452946", "score": "0.57569444", "text": "def setup(self, **kwargs):\n self.configured = True", "title": "" }, { "docid": "4da6a542d4145defa684de097a410c80", "score": "0.57546705", "text": "def setUpTestData(cls):\n\n dataset_create_data = {\n \"verbose_name\": \"0307-全键盘-0号麦克风-2000个样本点\",\n \"dataset_name\": \"20210307\",\n \"data_type\": \"all-0micro\",\n \"project_type\": \"single\",\n \"description\": \"cxx_0307_1__0306_123\",\n \"description_more\": \"0307-30键训练集,其余为测试集,0号麦克风,2000个样本点\",\n \"length\": 2000,\n \"fs\": 48000,\n }\n\n cls.dataset_obj = models.Dataset(**dataset_create_data)\n cls.dataset_obj.save()\n\n model_inst_create_data = {\n \"dataset_id\": cls.dataset_obj.id,\n \"algorithm\": \"blstm\",\n \"save_path\": os.path.join(settings.MODEL_INST_ROOT, \"dataset_id:10:algorithm:blstm/inst_id:7\"),\n \"extra_info\": {\"input_shape_list\": [4, 14], \"max_label_num\": 29},\n \"is_ready\": True,\n \"is_latest\": True,\n }\n cls.model_inst_obj = models.AlgorithmModelInst(**model_inst_create_data)\n cls.model_inst_obj.save()\n\n cls.dataset_unit = load.load_dataset_unit(\n save_path=os.path.join(settings.LIB_ROOT, \"preprocessed_data/2250.pickle\")\n )\n\n super().setUpTestData()", "title": "" }, { "docid": "1e960d29c1acc31b10266df4650ec99b", "score": "0.57419044", "text": "def init_dataset(arguments):\n if arguments.method == 'train':\n return DataSetTrain()\n else:\n return DataSetTest()", "title": "" }, { "docid": "13a26e6d4a9baaaab00566c05b761988", "score": "0.57379067", "text": "def config(self, **kw):\n self.cfg_fixture.config(**kw)", "title": "" }, { "docid": "87306fb100f6faa2896cf7c156c31496", "score": "0.57375973", "text": "def configure(self):\n pass", "title": "" }, { "docid": "87306fb100f6faa2896cf7c156c31496", "score": "0.57375973", "text": "def configure(self):\n pass", "title": "" }, { "docid": "87306fb100f6faa2896cf7c156c31496", "score": "0.57375973", "text": "def configure(self):\n pass", "title": "" }, { "docid": "87306fb100f6faa2896cf7c156c31496", "score": "0.57375973", "text": "def configure(self):\n pass", "title": "" }, { "docid": "fc0d8e116065f043c61c92a457e6c012", "score": "0.57354635", "text": "def __config_init(self):\n ini_path = os.path.join('app', 'common', 'oracle_sql.ini')\n ini_container = 'oracle_sql'\n self.sql_data = get_all_data_from_ini_file(ini_path, ini_container)", "title": "" }, { "docid": "84c705b6bdc8db6f23dcf91809dd0684", "score": "0.5712451", "text": "def configure(self):\r\n pass", "title": "" }, { "docid": "315dd1255f21dcb80630fa1a33a535a5", "score": "0.5712033", "text": "def setup_class(cls):\n cod.get_data_jhu(data_type=\"all\", region=\"global\", update=True)\n cod.get_data_jhu(data_type=\"all\", region=\"us\", update=True)\n\n cod.get_data_nyt(data_type=\"all\", counties=False, update=True)\n cod.get_data_nyt(data_type=\"all\", counties=True, update=True)", "title": "" }, { "docid": "f10a61fe63d5616d5cc284d2c5156940", "score": "0.57063013", "text": "def set_config(self):\n\n self.parser['CONNECTION'] = strings.get_config_connection()\n self.parser['DATABASE'] = strings.get_config_db()\n self.parser['METRICS'] = strings.get_config_metrics()\n\n with open(strings.get_config_path(), 'w') as file:\n self.parser.write(file)", "title": "" }, { "docid": "ebd8368c0cbe6209283b2c1bd611d90f", "score": "0.5705125", "text": "def initialize_config(self):\n return {\n \"model_type\": \"bert_query\",\n \"name\": \"test_bert_query\",\n \"model_path\": os.path.join(self.model_path, \"test_bert_query\"),\n \"setup_args\": {\n \"threshold\": 0.8,\n \"seed\": [\n \"This is a cat\",\n \"Arya is a hungry cat.\",\n ],\n \"infer_config\": {\n \"k\": 2,\n \"segment_config\": {\"window_size\": 5, \"step_size\": 3},\n },\n },\n }", "title": "" }, { "docid": "c37f5744959a1e3cc85e2a90e50fc18a", "score": "0.5704601", "text": "def initialize_from_config(self):", "title": "" }, { "docid": "0be632c8f9133b049eb8f68a25d7bb8d", "score": "0.56914246", "text": "def setup(self, stage):\n trainval_test_split = cfg['data']['trainval_test_split']\n graph_train_val, self.graph_test = self.graph_data.split_data(test_size=trainval_test_split)\n\n train_val_split = cfg['data']['train_val_split']\n self.graph_train, self.graph_val = graph_train_val.split_data(test_size=train_val_split)", "title": "" }, { "docid": "4a37839be163787057d5725023934369", "score": "0.5689095", "text": "def _init_dataloader(self):\n if self.distributed and hvd.local_rank() == 0 and 'remote_data_dir' in self.config.dataset:\n FileOps.copy_folder(self.config.dataset.remote_data_dir, self.config.dataset.data_dir)\n if self.distributed:\n hvd.join()\n args = self.config.dataset\n train_dir = os.path.join(self.config.dataset.data_dir, 'train')\n dataset_train = Dataset(train_dir)\n world_size, rank = None, None\n if self.distributed:\n world_size, rank = hvd.size(), hvd.rank()\n self.trainer.train_loader = create_loader(\n dataset_train,\n input_size=tuple(args.input_size),\n batch_size=args.batch_size,\n is_training=True,\n use_prefetcher=self.config.prefetcher,\n rand_erase_prob=args.reprob,\n rand_erase_mode=args.remode,\n rand_erase_count=args.recount,\n color_jitter=args.color_jitter,\n auto_augment=args.aa,\n interpolation='random',\n mean=tuple(args.mean),\n std=tuple(args.std),\n num_workers=args.workers,\n distributed=self.distributed,\n world_size=world_size,\n rank=rank\n )\n valid_dir = os.path.join(self.config.dataset.data_dir, 'val')\n dataset_eval = Dataset(valid_dir)\n self.trainer.valid_loader = create_loader(\n dataset_eval,\n input_size=tuple(args.input_size),\n batch_size=4 * args.batch_size,\n is_training=False,\n use_prefetcher=self.config.prefetcher,\n interpolation=args.interpolation,\n mean=tuple(args.mean),\n std=tuple(args.std),\n num_workers=args.workers,\n distributed=self.distributed,\n world_size=world_size,\n rank=rank\n )\n self.trainer.batch_num_train = len(self.trainer.train_loader)\n self.trainer.batch_num_valid = len(self.trainer.valid_loader)", "title": "" }, { "docid": "7f0a32ab2cf50c419bde50c74f591b95", "score": "0.56888485", "text": "def _create_default_config(self):\n\t\tparser = self._parser\n\n\t\tparser.remove_section(\"world\")\n\t\tparser.add_section(\"world\")\n\t\tparser.set(\"world\", \"class\", \"portage.sets.base.DummyPackageSet\")\n\t\tparser.set(\"world\", \"packages\", \"@selected @system\")\n\n\t\tparser.remove_section(\"selected\")\n\t\tparser.add_section(\"selected\")\n\t\tparser.set(\"selected\", \"class\", \"portage.sets.files.WorldSelectedSet\")\n\n\t\tparser.remove_section(\"system\")\n\t\tparser.add_section(\"system\")\n\t\tparser.set(\"system\", \"class\", \"portage.sets.profiles.PackagesSystemSet\")\n\n\t\tparser.remove_section(\"usersets\")\n\t\tparser.add_section(\"usersets\")\n\t\tparser.set(\"usersets\", \"class\", \"portage.sets.files.StaticFileSet\")\n\t\tparser.set(\"usersets\", \"multiset\", \"true\")\n\t\tparser.set(\"usersets\", \"directory\", \"%(PORTAGE_CONFIGROOT)setc/portage/sets\")\n\t\tparser.set(\"usersets\", \"world-candidate\", \"true\")\n\n\t\tparser.remove_section(\"live-rebuild\")\n\t\tparser.add_section(\"live-rebuild\")\n\t\tparser.set(\"live-rebuild\", \"class\", \"portage.sets.dbapi.VariableSet\")\n\t\tparser.set(\"live-rebuild\", \"variable\", \"INHERITED\")\n\t\tparser.set(\"live-rebuild\", \"includes\", \"bzr cvs darcs git git-2 mercurial subversion tla\")\n\n\t\tparser.remove_section(\"module-rebuild\")\n\t\tparser.add_section(\"module-rebuild\")\n\t\tparser.set(\"module-rebuild\", \"class\", \"portage.sets.dbapi.OwnerSet\")\n\t\tparser.set(\"module-rebuild\", \"files\", \"/lib/modules\")\n\n\t\tparser.remove_section(\"preserved-rebuild\")\n\t\tparser.add_section(\"preserved-rebuild\")\n\t\tparser.set(\"preserved-rebuild\", \"class\", \"portage.sets.libs.PreservedLibraryConsumerSet\")\n\n\t\tparser.remove_section(\"x11-module-rebuild\")\n\t\tparser.add_section(\"x11-module-rebuild\")\n\t\tparser.set(\"x11-module-rebuild\", \"class\", \"portage.sets.dbapi.OwnerSet\")\n\t\tparser.set(\"x11-module-rebuild\", \"files\", \"/usr/lib/xorg/modules\")\n\t\tparser.set(\"x11-module-rebuild\", \"exclude-files\", \"/usr/bin/Xorg\")", "title": "" }, { "docid": "633037df3a479ad464a73f033a299a85", "score": "0.56801337", "text": "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir = os.path.join(opt.dataroot, opt.phase)\n self.AB_paths = [e[1] for e in sorted(make_numbering_dataset(self.dir, opt.max_dataset_size), key=lambda idx: idx[0])]\n assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')\n self.transform = get_transform(self.opt, convert=False)\n\n with open(opt.captions, 'rb') as f:\n x = pickle.load(f)\n train_captions, test_captions = x[0], x[1]\n self.captions = train_captions if opt.phase == \"train\" else test_captions\n self.ixtoword, self.wordtoix = x[2], x[3]\n del x, train_captions, test_captions\n self.n_words = len(self.ixtoword)\n print('Load from: ', opt.captions)\n self.captions_per_image = opt.captions_per_image\n self.text_words_num = opt.text_words_num", "title": "" }, { "docid": "8c84239b35e972b729c9bc01483c8ef1", "score": "0.5676815", "text": "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_x = os.path.join(opt.dataroot, opt.phase + '_x') \n self.dir_yi = os.path.join(opt.dataroot, opt.phase + '_yi') \n self.dir_yj = os.path.join(opt.dataroot, opt.phase + '_yj') \n\n self.x_paths = [os.path.join(self.dir_x,s) for s in sorted(os.listdir(self.dir_x))]\n self.yi_paths = [os.path.join(self.dir_yi,s) for s in sorted(os.listdir(self.dir_yi))]\n self.yj_paths = [os.path.join(self.dir_yj,s) for s in sorted(os.listdir(self.dir_yj))]\n\n self.dataset_size = len(self.x_paths) # get the size of dataset \n\n self.transform = transforms.Compose([\n # transforms.Resize((132, 100)), \n # transforms.RandomCrop((128, 96)), # basic augmentation\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])", "title": "" }, { "docid": "4cd8fc301a87cbd29a67e725911a4680", "score": "0.5671616", "text": "def setup(args):\n cfg = get_cfg()\n add_kd_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "8b210d74c700b39f10a44632902daf08", "score": "0.5665934", "text": "def update_mart_config_from_args(\r\n config: Dict, args: argparse.Namespace, *, verbose: bool = True) -> Dict[str, Any]:\r\n if args.debug:\r\n config[\"debug\"] = True\r\n if verbose:\r\n print(f\" Change config: Set debug to True\")\r\n if args.dataset_max is not None:\r\n assert args.dataset_max > 0, \"--dataset_max must be positive int.\"\r\n config[\"dataset_train\"][\"max_datapoints\"] = args.dataset_max\r\n config[\"dataset_val\"][\"max_datapoints\"] = args.dataset_max\r\n if verbose:\r\n print(f\" Change config: Set dataset_(train|val).max_datapoints to {args.dataset_max}\")\r\n if args.preload:\r\n config[\"dataset_train\"][\"preload\"] = True\r\n config[\"dataset_val\"][\"preload\"] = True\r\n if verbose:\r\n print(f\" Change config: Set dataset_(train|val).preload to True\")\r\n if args.no_preload or args.validate:\r\n config[\"dataset_train\"][\"preload\"] = False\r\n config[\"dataset_val\"][\"preload\"] = False\r\n if verbose:\r\n print(f\" Change config: Set dataset_(train|val).preload to False (--no_preload or --validate)\")\r\n return config", "title": "" }, { "docid": "e356167dd2993ae85357d6569ba88e41", "score": "0.5659186", "text": "def initialize_data(cls, host_override, directory_override, depth_override):\n cls.HOST = test.select_host(config.HOST, host_override)\n cls.DATA = test.select_directory(config.DATA, directory_override)\n cls.DEPTH = test.select_depth(config.DEPTH, depth_override)\n cls.BASE_URL = test.create_base_url(cls.HOST, cls.PORT)\n cls.JSON_FILE = test.create_json_reference(cls.DATA, cls.JSON_NAME)", "title": "" }, { "docid": "5fc63dffb31d932eb1ad9d628eeced26", "score": "0.56516856", "text": "def config(self, **kwargs: Any) -> None:", "title": "" }, { "docid": "5fc63dffb31d932eb1ad9d628eeced26", "score": "0.56516856", "text": "def config(self, **kwargs: Any) -> None:", "title": "" }, { "docid": "97b37b76b3b44ffd7f424f34ad8d386a", "score": "0.5646425", "text": "def __init__(self, config):\n # this will store the training data headers for value ordering\n self.data_headers = None\n # attribute pre-selection (remove class attribute from select_attr)\n self.class_attr = config['class_attr']\n self.select_attr = config.get('select_attr', [])\n if self.class_attr in self.select_attr:\n self.select_attr.remove(self.class_attr)\n # part of the training data to be used\n self.train_part = config.get('train_part', 1)", "title": "" }, { "docid": "6e717025eb1177f42fedff0e414f3b4c", "score": "0.5639404", "text": "def setUp(self):\n self.options = Options()\n test_path = os.path.dirname(os.path.realpath(__file__))\n self.group_dir = os.path.join(test_path, 'fitbenchmarking_results')\n os.mkdir(self.group_dir)\n self.table_names = {\"compare\": \"compare_table_name.\",\n \"runtime\": \"runtime_table_name.\"}\n self.table_descriptions = {\"compare\": \"compare table descriptions\",\n \"runtime\": \"runtime table descriptions\",\n \"both\": \"both table descriptions\"}\n self.group_name = \"random_name\"", "title": "" }, { "docid": "5856fd48318caf4ef2dcc562a8e174e2", "score": "0.56270903", "text": "def __init__(self, opt):\n #super(BasicDataset).__init__(self, opt)\n self.dir = os.path.join(opt.dataroot, opt.phase) # create a path '/path/to/data/train'\n\n self.paths = sorted(self.make_dataset(self.dir, opt.max_dataset_size)) # load images from '/path/to/data/train'\n self.size = len(self.paths) # get the size of dataset A\n self.input_nc = opt.input_nc # get the number of channels of input image\n self.transform = self.get_transform(opt, grayscale=(self.input_nc == 1))", "title": "" }, { "docid": "352a1b5ea00063885d197137c0d14d32", "score": "0.5625312", "text": "def setup(self, **kwargs):\n self.log('Setting up...', self.LOG_USER)\n\n self.set_read_all(write_log=True)\n self.set_defaults_pv(write_log=True)\n\n # The per band configs. May want to make available per-band values.\n smurf_init_config = self.config.get('init')\n bands = smurf_init_config['bands']\n for b in bands:\n self.set_iq_swap_in(b, smurf_init_config['iqSwapIn'], \n write_log=True, **kwargs)\n self.set_iq_swap_out(b, smurf_init_config['iqSwapOut'], \n write_log=True, **kwargs)\n self.set_ref_phase_delay(b, smurf_init_config['refPhaseDelay'], \n write_log=True, **kwargs)\n self.set_ref_phase_delay_fine(b, \n smurf_init_config['refPhaseDelayFine'], write_log=True, \n **kwargs)\n self.set_tone_scale(b, smurf_init_config['toneScale'], \n write_log=True, **kwargs)\n self.set_analysis_scale(b, smurf_init_config['analysisScale'], \n write_log=True, **kwargs)\n self.set_feedback_enable(b, smurf_init_config['feedbackEnable'],\n write_log=True, **kwargs)\n self.set_feedback_gain(b, smurf_init_config['feedbackGain'], \n write_log=True, **kwargs)\n self.set_lms_gain(b, smurf_init_config['lmsGain'], \n write_log=True, **kwargs)\n\n self.set_feedback_limit_khz(b, 225) # why 225?\n\n self.set_feedback_polarity(b, smurf_init_config['feedbackPolarity'], \n write_log=True, **kwargs)\n # self.set_band_center_mhz(b, smurf_init_config['bandCenterMHz'],\n # write_log=True, **kwargs)\n self.set_synthesis_scale(b, smurf_init_config['synthesisScale'],\n write_log=True, **kwargs)\n\n # This should be part of exp.cfg\n if b == 2:\n self.set_data_out_mux(6, \"UserData\", write_log=True, \n **kwargs)\n self.set_data_out_mux(7, \"UserData\", write_log=True, \n **kwargs)\n self.set_iq_swap_in(b, 1, write_log=True, **kwargs)\n self.set_iq_swap_out(b, 0, write_log=True, **kwargs)\n elif b ==3 :\n self.set_data_out_mux(8, \"UserData\", write_log=True, \n **kwargs)\n self.set_data_out_mux(9, \"UserData\", write_log=True, \n **kwargs)\n self.set_iq_swap_in(b, 0, write_log=True, **kwargs)\n self.set_iq_swap_out(b, 0, write_log=True, **kwargs)\n\n self.set_dsp_enable(b, smurf_init_config['dspEnable'], \n write_log=True, **kwargs)\n\n # Make band dictionaries\n self.freq_resp[b] = {}\n\n self.set_cpld_reset(0, write_log=True)\n\n for i in np.arange(1,5):\n self.set_att_uc(i, 0, write_log=True)\n self.set_att_dc(i, 0, write_log=True)\n\n self.cpld_toggle()", "title": "" }, { "docid": "bde3b5360ad8604a853ebdeb89454b46", "score": "0.5624851", "text": "def initialize_options(self):\n self.orc_version = \"1.6.5\"\n self.output_dir = \"deps/\"\n self.source_url = \"https://www-us.apache.org/dist/orc/\"\n self.build_type = \"debug\"", "title": "" }, { "docid": "fdc46717e986035b03ad0cdf1a653d51", "score": "0.5624445", "text": "def setup(cls, config):", "title": "" }, { "docid": "dd4f3d7e502b04fc70387c1bfa7f511b", "score": "0.56242675", "text": "def set_options (self, data):\r\n if data.get(\"debug\") is not None:\r\n self.debug.setChecked(data[\"debug\"])\r\n if data.get(\"verbose\") is not None:\r\n self.verbose.setChecked(data[\"verbose\"])\r\n if data.get(\"recursionlevel\") is not None:\r\n self.recursionlevel.setValue(data[\"recursionlevel\"])\r\n if data.get(\"warninglines\") is not None:\r\n self.warninglines.setPlainText(data[\"warninglines\"])\r\n if data.get(\"ignorelines\") is not None:\r\n self.ignorelines.setPlainText(data[\"ignorelines\"])", "title": "" }, { "docid": "9fd6e570cbbd289186a7ccfb299c75df", "score": "0.561642", "text": "def dataset_ids(self, dataset_ids):\n self.configuration['dataset_ids'] = dataset_ids", "title": "" }, { "docid": "95cd6fe0dec48cce9df1ae8e84fd4738", "score": "0.5615151", "text": "def setup(self, conda_data):\n self.set_widgets_enabled(False)\n conda_processed_info = conda_data.get('processed_info')\n environments = conda_processed_info.get('__environments')\n packages = conda_data.get('packages')\n self.current_prefix = conda_processed_info.get('default_prefix')\n self.set_environments(environments)\n self.set_packages(packages)", "title": "" }, { "docid": "3769b554422c9a57ddd75250003c84c4", "score": "0.5612766", "text": "def set_config_values(self, args):\n\n # Config file has been specified\n if 'config_file' in args:\n yaml_config = self._load_config_file(args.config_file)\n else:\n yaml_config = None\n for key, value in self._fields.items():\n self._fields[key].set_name(key)\n if key in args:\n self._fields[key].set_value(getattr(args, key))\n elif yaml_config is not None and key in yaml_config:\n self._fields[key].set_value(yaml_config[key])\n elif value.default_value() is not None:\n self._fields[key].set_value(value.default_value())\n elif value.required():\n flags = ', '.join(value.flags())\n raise TritonModelAnalyzerException(\n f'Config for {value.name()} is not specified. You need to specify it using the YAML config file or using the {flags} flags in CLI.'\n )\n self._setup_logger()\n self._preprocess_and_verify_arguments()\n self._autofill_values()", "title": "" }, { "docid": "848d15f8c0e54408aef74b57ebbea7e5", "score": "0.561176", "text": "def setUp(self):\n self.use_data_loader = True\n self.epoch_num = 10\n self.drop_last = False", "title": "" } ]
39a9a2507b6189f125ecdcb37d61cb15
create roi of the liver
[ { "docid": "25b582771929b6f1e3b27a9f7c715656", "score": "0.0", "text": "def find_ROI_segmentation(self):\n\n seg = self.__IsolateBody()\n seg_lung = self.__isolate_lung(seg)\n box = self.__built_box_roi(seg_lung)\n return box, seg_lung", "title": "" } ]
[ { "docid": "47a1aeb5cb060a014f8725ac38518d33", "score": "0.7261228", "text": "def get_roi(self):\n start = self.image.roi.pos()\n size = self.image.roi.size()\n angle = self.image.roi.angle()\n self.ROI = [start[0],start[0] + size[0],\n start[1], start[1] + size[1], angle]\n self.plot_options.set_roi(self.ROI)", "title": "" }, { "docid": "ee057b3599684b8e3ad9f9465c07bc8f", "score": "0.68806237", "text": "def set_roi(self, roi):\n with h5py.File(self.data_file, 'r+') as f:\n if 'roi' not in f:\n roigrp = f.create_group('roi')\n else:\n roigrp = f['roi']\n roigrp.create_dataset('roi{}'.format(self._next_roi_idx), data=np.asarray(roi), compression='lzf')", "title": "" }, { "docid": "9ae0e7c1d66702d26e613f2d087db7e3", "score": "0.6866821", "text": "def button_setroi_clicked(self):\r\n original_workspace = DataModel.g.current_workspace\r\n roi_start = self.roi_start.value()\r\n roi_end = self.roi_end.value()\r\n roi = [\r\n roi_start[0],\r\n roi_start[1],\r\n roi_start[2],\r\n roi_end[0],\r\n roi_end[1],\r\n roi_end[2],\r\n ]\r\n\r\n roi_name = (\r\n DataModel.g.current_workspace\r\n + \"_roi_\"\r\n + str(roi[0])\r\n + \"_\"\r\n + str(roi[3])\r\n + \"_\"\r\n + str(roi[1])\r\n + \"_\"\r\n + str(roi[4])\r\n + \"_\"\r\n + str(roi[2])\r\n + \"_\"\r\n + str(roi[5])\r\n )\r\n\r\n cfg.ppw.clientEvent.emit({\"source\": \"panel_gui\", \"data\": \"make_roi_ws\", \"roi\": roi})\r\n self.add_roi(roi_name, original_workspace, roi)\r\n\r\n cfg.ppw.clientEvent.emit({\"source\": \"panel_gui\", \"data\": \"faster_refresh\", \"value\": None})", "title": "" }, { "docid": "7db344acec9f2030da086eeb59eb145a", "score": "0.6757338", "text": "def getROI(self) -> retval:\n ...", "title": "" }, { "docid": "bed4dbe4a2a572801aebe75858d4a17c", "score": "0.67048454", "text": "def add_roi(self, roi_fname, original_workspace, roi):\r\n if self.annotations_source.value():\r\n original_level = str(self.annotations_source.value().rsplit(\"/\", 1)[-1])\r\n else:\r\n original_level = \"None\"\r\n params = dict(\r\n workspace=original_workspace,\r\n roi_fname=roi_fname,\r\n roi=roi,\r\n original_workspace=original_workspace,\r\n original_level=original_level,\r\n )\r\n result = Launcher.g.run(\"roi\", \"create\", **params)\r\n if result:\r\n rid = result[\"id\"]\r\n rname = result[\"name\"]\r\n self._add_roi_widget(rid, rname, True)\r\n cfg.ppw.clientEvent.emit({\"source\": \"panel_gui\", \"data\": \"refresh\", \"value\": None})", "title": "" }, { "docid": "8020c663269e9fe8c61ff71fb898a6c1", "score": "0.67033744", "text": "def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name):\n iflogger.info(\"Create the ROIs:\")\n output_dir = op.abspath(op.curdir)\n fs_dir = op.join(subjects_dir, subject_id)\n cmp_config = cmp.configuration.PipelineConfiguration()\n cmp_config.parcellation_scheme = \"Lausanne2008\"\n log = cmp_config.get_logger()\n parval = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]\n pgpath = parval['node_information_graphml']\n aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz'))\n asegd = aseg.get_data()\n\n iflogger.info(\"Working on parcellation: \")\n iflogger.info(cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name])\n iflogger.info(\"========================\")\n pg = nx.read_graphml(pgpath)\n # each node represents a brain region\n # create a big 256^3 volume for storage of all ROIs\n rois = np.zeros( (256, 256, 256), dtype=np.int16 )\n\n count = 0\n for brk, brv in pg.nodes_iter(data=True):\n count = count + 1\n iflogger.info(brv)\n iflogger.info(brk)\n if brv['dn_hemisphere'] == 'left':\n hemi = 'lh'\n elif brv['dn_hemisphere'] == 'right':\n hemi = 'rh'\n if brv['dn_region'] == 'subcortical':\n iflogger.info(brv)\n iflogger.info(\"---------------------\")\n iflogger.info(\"Work on brain region: %s\" % (brv['dn_region']))\n iflogger.info(\"Freesurfer Name: %s\" % brv['dn_fsname'])\n iflogger.info(\"Region %s of %s \" % (count, pg.number_of_nodes()))\n iflogger.info(\"---------------------\")\n # if it is subcortical, retrieve roi from aseg\n idx = np.where(asegd == int(brv['dn_fs_aseg_val']))\n rois[idx] = int(brv['dn_correspondence_id'])\n\n elif brv['dn_region'] == 'cortical':\n iflogger.info(brv)\n iflogger.info(\"---------------------\")\n iflogger.info(\"Work on brain region: %s\" % (brv['dn_region']))\n iflogger.info(\"Freesurfer Name: %s\" % brv['dn_fsname'])\n iflogger.info(\"Region %s of %s \" % (count, pg.number_of_nodes()))\n iflogger.info(\"---------------------\")\n\n labelpath = op.join(output_dir, parval['fs_label_subdir_name'] % hemi)\n # construct .label file name\n\n fname = '%s.%s.label' % (hemi, brv['dn_fsname'])\n\n # execute fs mri_label2vol to generate volume roi from the label file\n # store it in temporary file to be overwritten for each region\n\n mri_cmd = 'mri_label2vol --label \"%s\" --temp \"%s\" --o \"%s\" --identity' % (op.join(labelpath, fname),\n op.join(fs_dir, 'mri', 'orig.mgz'), op.join(output_dir, 'tmp.nii.gz'))\n runCmd( mri_cmd, log )\n\n tmp = nb.load(op.join(output_dir, 'tmp.nii.gz'))\n tmpd = tmp.get_data()\n\n # find voxel and set them to intensityvalue in rois\n idx = np.where(tmpd == 1)\n rois[idx] = int(brv['dn_correspondence_id'])\n\n # store volume eg in ROI_scale33.nii.gz\n out_roi = op.join(output_dir, 'ROI_%s.nii.gz' % parcellation_name)\n\n # update the header\n hdr = aseg.get_header()\n hdr2 = hdr.copy()\n hdr2.set_data_dtype(np.uint16)\n\n log.info(\"Save output image to %s\" % out_roi)\n img = nb.Nifti1Image(rois, aseg.get_affine(), hdr2)\n nb.save(img, out_roi)\n\n iflogger.info(\"[ DONE ]\")", "title": "" }, { "docid": "efbad9db36a42640b12611cb690c9ff6", "score": "0.66888976", "text": "def roi(self):\r\n return self._roi_tool.roi()", "title": "" }, { "docid": "dc9825ee374eadef8a96b3897a57ff5c", "score": "0.668095", "text": "def create_FBROI(builder,my_ROI):\n UID_str = my_ROI.image_UID()\n roi_coords = my_ROI.contour()\n if UID_str not in nodule_map:\n nodule_map[UID_str] = {}\n nodule_map[UID_str]['roi_count'] = 1\n else:\n nodule_map[UID_str]['roi_count'] += 1\n roi_count = nodule_map[UID_str]['roi_count']\n roi_uid = 'roi_'+str(roi_count)\n\n contour_lst = []\n FBROI.RegionOfInterestStartContourVector(builder,len(roi_coords))\n for coord in my_ROI.contour():\n builder.PrependUOffsetTRelative(FBPoint.CreatePoint(builder,coord[0],coord[1]))\n contour_lst.append([coord[0],coord[1]])\n nodule_map[UID_str][roi_uid] = contour_lst\n contour = builder.EndVector(len(roi_coords))\n imageUID = builder.CreateString(my_ROI.image_UID())\n FBROI.RegionOfInterestStart(builder)\n FBROI.RegionOfInterestAddImageSOPUID(builder, imageUID)\n FBROI.RegionOfInterestAddInclusion(builder,my_ROI.inclusion())\n FBROI.RegionOfInterestAddContour(builder,contour)\n ROI = FBROI.RegionOfInterestEnd(builder)\n return ROI", "title": "" }, { "docid": "8bda34fe3159095513526076530d7fa2", "score": "0.66760564", "text": "def _add_roi(self):\n with self.debug_output:\n cat = self._roi_cat_sel.value\n r_dict = self._copy_normalized_rois()\n r_dict[cat].append(None)\n self.rois = self._unnormalize_rois(r_dict)\n self._roi_multi_sel.index = len(self._roi_multi_sel.options) - 1", "title": "" }, { "docid": "2cbbe9db0b1888485b9e660dabedac07", "score": "0.66606545", "text": "def get_roi(self):\n hstart=int(self.get_value(\"SUBARRAY HPOS\"))\n hend=hstart+int(self.get_value(\"SUBARRAY HSIZE\"))\n vstart=int(self.get_value(\"SUBARRAY VPOS\"))\n vend=vstart+int(self.get_value(\"SUBARRAY VSIZE\"))\n bin=int(self.get_value(\"BINNING\"))\n return (hstart,hend,vstart,vend,bin)", "title": "" }, { "docid": "3f397b98ea3de302a50ce573e95edf64", "score": "0.6649359", "text": "def _draw_rois(self, *_):\n self._last_draw_rois = Event.now()\n\n # Clear old ROIs\n self.canvas.delete(TAG_ROI)\n self.canvas.delete(TAG_ROI_NAME)\n\n # If there are no ROIs to draw, we’re done here\n roi_collections = self.stack.rois\n if not self.show_rois_var.get() or not roi_collections:\n return\n\n # Get ROIs and display\n for roi_col in roi_collections.values():\n rois = None\n try:\n rois = roi_col[self.i_frame]\n except KeyError:\n rois = None\n if rois is None:\n try:\n rois = roi_col[Ellipsis]\n except KeyError:\n rois = None\n if rois is None:\n continue\n\n col_color = roi_col.color\n if col_color is None:\n col_color = 'yellow'\n col_stroke_width = roi_col.stroke_width\n if col_stroke_width is None:\n col_stroke_width = 1\n\n for roi in rois:\n if not roi.visible and not roi.name_visible:\n continue\n\n color = roi.color\n if color is None:\n color = col_color\n\n if roi.name:\n # user-specified ROI name is base64-encoded to avoid issues due to bad format\n name_tag = f'{TAG_ROI_ID}{base64.b64encode(roi.name.encode()).decode()}'\n else:\n name_tag = None\n\n if roi.visible:\n if name_tag:\n tags = (TAG_ROI, name_tag)\n else:\n tags = TAG_ROI\n\n stroke_width = roi.stroke_width\n if stroke_width is None:\n stroke_width = col_stroke_width\n\n roi_key = roi.key()[0]\n if roi_key == 'raw':\n contour = roi.contour\n if self.scale is not None:\n contour = contour * self.scale\n self.canvas.create_polygon(*contour[:, ::-1].flat, tags=tags,\n fill='', outline=color, width=stroke_width)\n elif roi_key == 'rect':\n corners = roi.corners\n if self.scale is not None:\n corners = corners * self.scale\n self.canvas.create_polygon(*corners[:, ::-1].flat, tags=tags,\n fill='', outline=color, width=stroke_width)\n else:\n print(f\"Undefined ROI type: '{roi_key}'\") #DEBUG\n\n if roi.name and roi.name_visible:\n txtpos = roi.centroid.flat[::-1]\n if self.scale is not None:\n txtpos = txtpos * self.scale\n if name_tag:\n tags = (TAG_ROI_NAME, name_tag)\n else:\n tags = TAG_ROI_NAME\n self.canvas.create_text(*txtpos.flat,\n fill=color, text=roi.name, tags=tags)", "title": "" }, { "docid": "8266c5b5c5b74cbe73e7d8c98b19cefa", "score": "0.6610116", "text": "def set_roi(self, roi):\r\n if roi is not None:\r\n if len(roi) != 4:\r\n raise ValueError(\"ROI must be a list of four integers\")\r\n for x in roi:\r\n if not (isinstance(x, int) and x >= 0):\r\n raise ValueError(\"ROI must be a (x, y, w, h) tuple\")\r\n self.roi = roi", "title": "" }, { "docid": "66d0f1746618ccb71fa6a3948694f300", "score": "0.66022646", "text": "def setRoi(self,roi, profileWidths = (1, 1)):\n if (self.img is None):\n print(\"Error - need to call setImg before setRoi\")\n return(-1)\n\n if ((roi[X_ORIGIN] >= 0) and (roi[X_ORIGIN]<self.imgSizeX)):\n self.roi[X_ORIGIN] = int(roi[X_ORIGIN])\n else:\n print(\"WARNING: ROI[X_ORIGIN] %d Out of Range - using zero\" %\n roi[X_ORIGIN])\n self.roi[X_ORIGIN] = 0\n\n if ((roi[Y_ORIGIN] >= 0) and (roi[Y_ORIGIN]<self.imgSizeY)):\n self.roi[Y_ORIGIN] = int(roi[Y_ORIGIN])\n else:\n print(\"WARNING: ROI[Y_ORIGIN] %d Out of Range - using zero\" %\n roi[Y_ORIGIN])\n self.roi[Y_ORIGIN] = 0\n\n if ((roi[X_ORIGIN] + roi[X_SIZE]) <= self.imgSizeX):\n self.roi[X_SIZE] = int(roi[X_SIZE])\n else:\n self.roi[X_SIZE] = self.imgSizeX - self.roi[X_ORIGIN]\n print(\"WARNING: ROI[X_SIZE] %d Out of Range - using %d\" %\n (roi[X_SIZE], self.roi[X_SIZE]))\n\n if ((roi[Y_ORIGIN] + roi[Y_SIZE]) <= self.imgSizeY):\n self.roi[Y_SIZE] = int(roi[Y_SIZE])\n else:\n self.roi[Y_SIZE] = self.imgSizeY - self.roi[Y_ORIGIN]\n print(\"WARNING: ROI[Y_SIZE] %d Out of Range - using %d\" %\n (roi[Y_SIZE], self.roi[Y_SIZE]))\n\n self.xProfileWidth = profileWidths[0]\n self.yProfileWidth = profileWidths[1]\n self.setProfiles(self.xProfileWidth, self.yProfileWidth)", "title": "" }, { "docid": "5cdcad4c387feb7303b4d6179f9f3a37", "score": "0.6533895", "text": "def build_roi_box_head():\n return ROIBoxHead()", "title": "" }, { "docid": "55a3be74e812b460059c53b570124058", "score": "0.65069735", "text": "def roi(self, xmin, xsize, ymin, ysize, zmin, zsize, tmin=0, tsize=-1):\n self.__args.extend(('-roi',\n xmin, xsize, ymin, ysize,\n zmin, zsize, tmin, tsize))\n return self", "title": "" }, { "docid": "f3dd372b85b16aecbb29dacdf9cb2994", "score": "0.64917934", "text": "def set_roi(self, roi):\n with self._sem:\n self.roi = numpy.ascontiguousarray(roi, numpy.int8)\n self.buffers[\"ROI\"] = pyopencl.array.to_device(self.queue, self.roi)", "title": "" }, { "docid": "c9f4c010d16156b78b46d9f114bd2564", "score": "0.64815354", "text": "def setROI(self,ROI=[]):\n\t\t\n\t\tif not ROI:\n\t\t\t\"\"\"\n\t\t\tUser selection of the ROI\n\t\t\t\n\t\t\t\"\"\"\t\n\t\t\tROI = []\n\t\t\tdef onClick(event):\n\t\t\t\tROI.append([int(event.ydata),int(event.xdata)])\n\t\t\t\tplt.scatter([event.xdata],[event.ydata],c='r')\n\t\t\t\tplt.draw()\n\t\t\t\n\t\t\tfig = plt.figure()\n\t\t\tcid = fig.canvas.mpl_connect('button_press_event', onClick)\n\t\t\tplt.imshow(self[0])\n\t\t\tplt.title(\"Please select ROI coordinates\")\n\t\t\tplt.xlim(0,self[0].shape[1])\n\t\t\tplt.ylim(self[0].shape[0],0)\n\t\t\tplt.show()\n\t\t\t\n\t\t\tprint \"\\n ROI Coordinates: \\n\",ROI\n\t\t\t\n\t\tself.ROI = np.array(ROI)", "title": "" }, { "docid": "f9314e072c90a29f8c25dab9246155ed", "score": "0.6480321", "text": "def __init__(self, *args, **kwargs):\n super(PolygonROI, self).__init__(*args, **kwargs)\n rect = self.imageItem.boundingRect() # type: QRectF\n positions = [\n (rect.bottomLeft().x(), rect.bottomLeft().y()),\n (rect.bottomRight().x(), rect.bottomRight().y()),\n (rect.topRight().x(), rect.topRight().y()),\n (rect.topLeft().x(), rect.topLeft().y()),\n ]\n self._roiItem = BetterPolyLineROI(positions=positions, closed=True, scaleSnap=True, translateSnap=True)\n self.addItem(self._roiItem)", "title": "" }, { "docid": "4c7397298610b93c2f50aa0fb92c6de2", "score": "0.6459811", "text": "def segment_ROI_image(Parameters, ROIs, root_dir, sub_path):\n ch = Parameters[\"channels\"]\n subimage_height = Parameters[\"subimage_height\"] \n subimage_width = Parameters[\"subimage_width\"]\n\n\n print (root_dir, sub_path)\n print(\"Segmenting around the ROIs image.... :)\") \n\n \n \n \n filepath = root_dir/sub_path\n annotations_dir = root_dir/'Annotations'/sub_path.parent\n image_dir = root_dir/'JPEGImages'/sub_path.parent\n \n print (\"IM\", image_dir)\n \n \n for dirs in [annotations_dir, image_dir]:\n if (not os.path.isdir(dirs)):\n os.makedirs(dirs)\n \n \n #Preprocess the image\n \n reader = imageio.get_reader(str(filepath))\n image_open = np.asarray(reader.get_data(ch[0]))\n image_para = np.asarray(reader.get_data(ch[1]))\n image_perp = np.asarray(reader.get_data(ch[2]))\n \n \n img = np.dstack([image_open, image_para, image_perp])\n img = img/np.amax(img) \n img = img*255\n img = img.astype('uint8')\n plt.imshow(img)\n \n height, width, depth = img.shape\n print (\"Shape\", img.shape)\n print (ROIs)\n \n \n for i in range(len(ROIs)):\n x_min = int(ROIs.loc[i, 'xmin'])\n x_max = int(ROIs.loc[i, 'xmax'])\n y_min = int(ROIs.loc[i, 'ymin'])\n y_max = int(ROIs.loc[i, 'ymax'])\n \n \n \n \n #x_length = x_max - x_min\n #y_length = y_max - y_min\n \n \n #Padding can be negative!\n #x_pad = (subimage_width - x_length)//2\n #y_pad = (subimage_height - y_length)//2\n \n x_centroid = (x_max + x_min)//2\n y_centroid = (y_max + y_min)//2\n \n print (f\"Stats: X:{x_min}, {x_max}, {x_centroid} Y:{y_min}, {y_max}, {y_centroid}\")\n\n xmin = max(0, (x_centroid - subimage_width//2))\n xmax = min(width, (x_centroid + subimage_width//2))\n ymin = max(0, (y_centroid - subimage_height//2))\n ymax = min(height, (y_centroid + subimage_height//2))\n \n subimage = img[ymin:ymax, xmin:xmax, :]\n\n subROIs = ROIs[(ROIs['X']>xmin) & \n (ROIs['X']<xmax) & \n (ROIs['Y']>ymin) & \n (ROIs['Y']<ymax)].copy()\n\n\n print (\"Stats:\", \"X\", xmin, xmax, \"Y\", ymin, ymax, subimage.shape, len(subROIs))\n\n #If ROI list is not empty \n if len(subROIs)>0:\n\n #mod ROIs to fit the new size\n subROIs['xmin'] = subROIs['xmin'] - xmin\n subROIs['xmax'] = subROIs['xmax'] - xmin\n subROIs['ymin'] = subROIs['ymin'] - ymin\n subROIs['ymax'] = subROIs['ymax'] - ymin\n\n #Check for any truncations\n subROIs['Truncated'] = ((subROIs['xmin']<0) | (subROIs['xmax']>xmax) | \n (subROIs['ymin']<0) | (subROIs['ymax']>ymax))\n\n\n #print (i, j, xmin, xmax, ymin, ymax, len(subROIs))\n print (subROIs)\n\n #Save the jpeg files\n JPEG_filename = image_dir/sub_path.name.replace('.ome.tif', f'{i}.jpg')\n imageio.imwrite(str(JPEG_filename), subimage)\n \n\n #Output the labels\n labels_filename = annotations_dir/sub_path.name.replace('.ome.tif', f'{i}--labels.xml')\n labels = {'Height': subimage.shape[0], \n 'Width': subimage.shape[1], \n 'Filename' : (sub_path.name.replace('.ome.tif', f'{i}.jpg')) , 'Folder': str(sub_path.parent)} \n output_labels (labels, subROIs, labels_filename)\n \n \n return(None)", "title": "" }, { "docid": "e5e1f3f28458b151418e05c18a42c11f", "score": "0.6402772", "text": "def drawROI(self, image, roi):\n # draw a rectangle around the faces\n for (x, y, w, h) in roi:\n cv2.rectangle(\n image,\n (x, y), # lower left\n (x+w, y+h), # upper right\n (0, 255, 0), # border color\n 2 # line thickness\n )", "title": "" }, { "docid": "eb796bb52c05694eab4adf3b5f12d464", "score": "0.6394815", "text": "def get_roi(image, wname):\n global REFPT, CROPPING, SRC_COORDS, T_COORDS\n clone = image.copy()\n param = [wname, image]\n cv2.namedWindow(wname)\n cv2.setMouseCallback(wname, click_and_crop, param)\n cv2.imshow(wname, image)\n cv2.waitKey(0)\n\n # if there are two reference points, then set the region of interest\n # of the image and display it\n if len(REFPT) == 2:\n if wname == \"SOURCE\":\n SRC_COORDS = REFPT\n if wname == \"TARGET\":\n T_COORDS = REFPT\n\n #print(REFPT) # for debugging\n cv2.destroyAllWindows()", "title": "" }, { "docid": "274bfea1f4a6efbc42e4a2953ca451d1", "score": "0.63507354", "text": "def _fcn_build_roi_list(self):\n # Select volume :\n self.volume.select_volume(str(self._roiDiv.currentText()))\n # Clear widget list and add ROIs :\n self._roiToAdd.clear()\n self._roiToAdd.addItems(self.volume.roi_labels)\n # By default, uncheck items :\n for num in range(self._roiToAdd.count()):\n item = self._roiToAdd.item(num)\n item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)\n item.setCheckState(QtCore.Qt.Unchecked)", "title": "" }, { "docid": "839d6389281e3135a87bb6648699c32a", "score": "0.6344049", "text": "def make_roi_grid(self, toggle=True, method=''):\n method = method if method else self.sender().text()\n pos, shape = self.rh.ROIs[0].roi.pos(), self.rh.ROIs[0].roi.size()\n if method == 'Single ROI':\n for r in self.rh.ROIs:\n r.resize(*map(int, [pos[0], pos[1], shape[0], shape[1]]))\n elif method == 'Square grid':\n n = len(self.rh.ROIs) # number of ROIs\n d = int((n - 1)**0.5 + 1) # number of ROIs per row\n X = int(self.rh.shape[0] / d) # horizontal distance between ROIs\n Y = int(self.rh.shape[1] / int((n - 3/4)**0.5 + 0.5)) # vertical distance\n for i in range(n): # ID of ROI\n try:\n newx, newy = int(X * (i%d + 0.5)), int(Y * (i//d + 0.5))\n if any([newx//self.rh.shape[0], newy//self.rh.shape[1]]):\n warning('Tried to set square ROI grid with (xc, yc) = (%s, %s)'%(newx, newy)+\n ' outside of the image')\n newx, newy = 0, 0\n self.rh.ROIs[i].resize(*map(int, [newx, newy, 1, 1]))\n except ZeroDivisionError as e:\n error('Invalid parameters for square ROI grid: '+\n 'x - %s, y - %s, pic size - %s, roi size - %s.\\n'%(\n pos[0], pos[1], self.rh.shape[0], (shape[0], shape[1]))\n + 'Calculated width - %s, height - %s.\\n'%(X, Y) + str(e))\n elif method == '2D Gaussian masks':\n try: \n im = self.im_canvas.image.copy() - self.rh.bias\n if np.size(np.shape(im)) == 2:\n for r in self.rh.ROIs:\n r.create_gauss_mask(im) # fit 2D Gaussian to max pixel region\n # then block that region out of the image\n try:\n im[r.x-r.w : r.x+r.w+1, r.y-r.h:r.y+r.h+1] = np.zeros((2*r.w+1, 2*r.h+1)) + np.min(im)\n except (IndexError, ValueError): pass\n except AttributeError: pass", "title": "" }, { "docid": "175773cf544079a2053d90f255009b57", "score": "0.6317071", "text": "def __set_roi(self, event, x, y, flags, params):\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n self.ix = x\r\n self.iy = y\r\n self.drawing = True\r\n if event==cv2.EVENT_MOUSEMOVE:\r\n if self.drawing:\r\n self.x = x\r\n self.y = y\r\n self.img = self.current_frame.copy()\r\n cv2.rectangle(self.img, (self.ix, self.iy), (x, y,), (0, 0, 255), 3)\r\n if event==cv2.EVENT_LBUTTONUP:\r\n self.drawing = False", "title": "" }, { "docid": "f51ac614c51ac4c262c16c6176bc61e0", "score": "0.6294511", "text": "def click(event):\n ROIsize = self.settings['ROI_size']\n Ly =self.img.shape[-1]\n Lx =self.img.shape[-2]\n \n if self.settings['selectROI'] and (Lx,Ly)!=(ROIsize,ROIsize):\n event.accept() \n pos = event.pos()\n x = int(pos.x()) #pyqtgraph is transposed\n y = int(pos.y())\n x = max(min(x, Lx-ROIsize//2 ),ROIsize//2 )\n y = max(min(y, Ly-ROIsize//2 ),ROIsize//2 )\n self.settings['roiX']= x\n self.settings['roiY']= y\n if hasattr(self, 'roi'):\n self.imv.removeItem(self.roi) \n self.roi = pg.RectROI([x-ROIsize//2,y-ROIsize//2], [ROIsize,ROIsize])\n self.imv.addItem(self.roi)\n \n self.settings['selectROI'] = False", "title": "" }, { "docid": "907a0595962eb8c87eced18c267928b7", "score": "0.6285353", "text": "def SetRoiBoxSelect(self):\n #Check that a Data set has been loaded\n if self.fname == None:\n return\n\n if self.imageType != 'la':\n print 'Function only valid for linear array probes. '\n return\n\n from matplotlib.widgets import RectangleSelector\n current_ax = plt.subplot(111) # make a new plotingrangej\n\n def on_select(eclick, erelease):\n self.roiX = [0,0]\n self.roiY = [0,0]\n\n self.roiX[0] = int(erelease.xdata/self.deltaX)\n self.roiX[1] = int(eclick.xdata/self.deltaX)\n self.roiX.sort()\n\n self.roiY[0] = int(eclick.ydata/self.deltaY)\n self.roiY[1] = int(erelease.ydata/self.deltaY)\n self.roiY.sort()\n\n # drawtype is 'box' or 'line' or 'none'\n rectprops = dict(facecolor='red', edgecolor = 'red',\n alpha=0.5, fill=False)\n \n rs = RectangleSelector(current_ax, on_select,\n drawtype='box', useblit=True,\n button=[1,3], # don't use middle button\n minspanx=0, minspany=0,\n spancoords='data',\n rectprops = rectprops)\n\n #could be image sequence or just a 2-D image\n import types\n if type(self.data) == types.NoneType:\n self.ReadFrame(0)\n temp = self.data\n\n\n from scipy.signal import hilbert\n from numpy import log10\n bMode = log10(abs(hilbert(temp, axis = 0)))\n bMode = bMode - bMode.max()\n bMode[bMode < -3] = -3\n\n #import matplotlib and create plot\n import matplotlib.cm as cm\n\n plt.imshow(bMode, cmap = cm.gray, extent = [0, self.fovX, self.fovY, 0])\n plt.show()", "title": "" }, { "docid": "a59be2fad99acc7a2ba8e6c23a7e36d1", "score": "0.6275901", "text": "def roi_target(self,rois,gt_box,label,pos_thresh=cfg.rcnn_pos_thresh,\n neg_thresh_lo=cfg.rcnn_neg_thresh_lo,\n neg_thresh_hi=cfg.rcnn_neg_thresh_hi):\n assert rois.shape[1]==4, \"please remove the img_id\"\n rois=torch.cat([rois,gt_box],dim=0) # [a+b,4]\n \n ious=t_box_iou(rois,gt_box) # [a+b,b]\n max_ious,idx=ious.max(dim=1)\n \n # parameterizd box\n gt_loc=encode_box(gt_box[idx],rois)\n\n # assign the neg:\n assign=torch.full([len(rois)],-1).long().type_as(label)\n\n neg_mask=(max_ious>neg_thresh_lo)*(max_ious<neg_thresh_hi)\n # if neg_mask.sum() == 0:\n # tqdm.write(\"Warning: neg_roi for fast r-cnn is zero\",end=\" \")\n # neg_mask=(max_ious<neg_thresh_hi)\n # raise ValueError(\"there is no negative roi for fast r-cnn\")\n assign[neg_mask]=0\n \n # assign the pos:\n pos_mask=max_ious>pos_thresh\n\n # plus one since 0 denotes the neg, we must begin from the 1\n assign[pos_mask]=label[idx][pos_mask].long()+1 \n\n # normalize?\n mean=self.mean # [4]\n std=self.std # [4]\n\n mean=mean[None].expand_as(gt_loc).type_as(gt_loc)\n std=std[None].expand_as(gt_loc).type_as(gt_loc)\n\n gt_loc-=mean\n gt_loc=gt_loc/std\n\n return rois,gt_loc,assign", "title": "" }, { "docid": "8d67f9685f4abf4db6878699a3409800", "score": "0.62747985", "text": "def selectROI(windowName, img, showCrosshair=..., fromCenter=...) -> retval:\n ...", "title": "" }, { "docid": "b44d8d642d88fedfb5e042adb194c018", "score": "0.6271857", "text": "def add_rois(self, rois):\r\n # Load objects\r\n objects_id = str(self.boxes_source.value().rsplit(\"/\", 1)[-1])\r\n # str(self.boxes_source.value())\r\n\r\n logger.debug(f\"Get objects {objects_id}\")\r\n objects_src = DataModel.g.dataset_uri(objects_id, group=\"objects\")\r\n params = dict(workpace=True, src=objects_src, entity_type=\"boxes\")\r\n\r\n result = Launcher.g.run(\"objects\", \"get_entities\", **params)\r\n\r\n if result:\r\n entities_arr = decode_numpy(result)\r\n\r\n rois = entities_arr[:, 4:]\r\n print(rois)\r\n\r\n # Iterate through ROIs and add them to the ROI list\r\n original_workspace = DataModel.g.current_workspace\r\n\r\n for roi in rois:\r\n roi_list = list(roi)\r\n roi_list = [int(el) for el in roi_list]\r\n # reorder to z_st, z_end, x_st, x_end, y_st, y_end\r\n roi_list = [\r\n roi_list[0],\r\n roi_list[3],\r\n roi_list[1],\r\n roi_list[4],\r\n roi_list[2],\r\n roi_list[5],\r\n ]\r\n\r\n roi_name = (\r\n DataModel.g.current_workspace\r\n + \"_roi_\"\r\n + str(roi[0])\r\n + \"_\"\r\n + str(roi[3])\r\n + \"_\"\r\n + str(roi[1])\r\n + \"_\"\r\n + str(roi[4])\r\n + \"_\"\r\n + str(roi[2])\r\n + \"_\"\r\n + str(roi[5])\r\n )\r\n cfg.ppw.clientEvent.emit(\r\n {\"source\": \"panel_gui\", \"data\": \"make_roi_ws\", \"roi\": roi_list}\r\n )\r\n self.add_roi(roi_name, original_workspace, roi_list)", "title": "" }, { "docid": "cabdf45b195176cf9a3d597d7ad953b9", "score": "0.62654203", "text": "def show( self ):\n source = cv.LoadImage( self.files[self.index] )\n width, height = cv.GetSize(source)\n \n center = (width/2) + self.offset;\n \n cv.Line( source, (center,0), (center,height), (0,255,0), 1)\n\n\n if self.roi:\n x,y,a,b = self.roi;\n \n print self.roi\n \n width, height = ((a - x), (b - y))\n mask = cv.CreateImage( (width, height), cv.IPL_DEPTH_8U, 1)\n \n cv.SetImageROI( source, (x, y, width, height)) \n cv.Split( source, None, None, mask, None );\n \n gray = cv.CloneImage( mask );\n \n cv.InRangeS( mask, self.thresholdMin, self.thresholdMax, mask );\n cv.And( mask, gray, gray ); \n \n line = [];\n points = []; \n \n for i in range(0,height-1):\n row = cv.GetRow( gray, i)\n \n minVal,minLoc,maxLoc,maxVal = cv.MinMaxLoc(row);\n \n y = i;\n x = maxVal[0]\n point = (0, 0, height-i)\n \n if x > 0:\n line.append((x,y));\n \n s = x / sin(radians(self.camAngle))\n x = s * cos(self.angles[self.index]) \n z = height - y\n y = s * sin(self.angles[self.index])\n \n point = (round(x,2),round(y,2),z);\n \n points.append(point)\n \n \n cv.PolyLine( source, [line], False, (255,0,0), 2, 8)\n cv.ResetImageROI( source )\n x,y,a,b = self.roi;\n cv.Rectangle( source, (int(x), int(y)), (int(a), int(b)), (255.0, 255, 255, 0) );\n\n if self.roi:\n x,y,a,b = self.roi;\n \n width, height = ((a - x), (b - y))\n mask = cv.CreateImage( (width, height), cv.IPL_DEPTH_8U, 1)\n \n cv.SetImageROI( source, (x-width, y, width, height)) # moves roi to the left\n cv.Split( source, None, None, mask, None );\n \n gray = cv.CloneImage( mask );\n \n cv.InRangeS( mask, self.thresholdMin, self.thresholdMax, mask );\n cv.And( mask, gray, gray ); \n \n line = [];\n points2 = []; \n \n for i in range(0,height-1):\n row = cv.GetRow( gray, i)\n \n minVal,minLoc,maxLoc,maxVal = cv.MinMaxLoc(row);\n \n y = i;\n x = maxVal[0]\n point = (0, 0, height-i)\n \n if x > 0:\n line.append((x,y));\n \n x = width - x; # left to the x-axis\n \n s = x / sin(radians(self.camAngle))\n \n x = s * cos(self.angles[self.index]) \n z = height - y# 500 higher then the other.\n y = s * sin(self.angles[self.index])\n \n a = radians(300)\n \n nx = ( cos(a) * x ) - ( sin(a) * y )\n ny = ( sin(a) * x ) + ( cos(a) * y )\n \n point = (nx,ny,z);\n \n points2.append(point)\n \n cv.PolyLine( source, [line], False, (255,0,0), 2, 8)\n cv.ResetImageROI( source )\n x,y,a,b = self.roi;\n cv.Rectangle( source, (int(x), int(y)), (int(a), int(b)), (255.0, 255, 255, 0) );\n\n if self.mode == 'mask':\n cv.ShowImage( 'preview', mask )\n return\n\n if self.mode == 'record' and self.roi:\n font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,0.5,0.5,1)\n cv.PutText( source, \"recording %d\" % self.index, (20,20), font, (0,0,255))\n self.points.extend(points);\n self.points2.extend(points2);\n #self.colors.extend(colors);\n\n\n\n cv.ShowImage( 'preview', source )", "title": "" }, { "docid": "9a1017cb14597a541268df3a7fe80ccf", "score": "0.6260349", "text": "def setup_roi_fields(self):\r\n apply_roi_button = QPushButton(\"Apply ROI\")\r\n reset_button = QPushButton(\"Reset ROI\")\r\n roi_fields = QGroupBox(\"Select Region of Interest:\")\r\n roi_layout = QGridLayout()\r\n roi_layout.addWidget(QLabel(\"Drag a box in the image window or type manually\"), 0, 0, 1, 3)\r\n roi_layout.addWidget(QLabel(\"Axis\"), 1, 0)\r\n roi_layout.addWidget(QLabel(\"Start Value:\"), 1, 1)\r\n roi_layout.addWidget(QLabel(\"End Value:\"), 1, 2)\r\n roi_layout.addWidget(apply_roi_button, 1, 3)\r\n roi_layout.addWidget(reset_button, 2, 3)\r\n roi_layout.addWidget(QLabel(\"x:\"), 2, 0)\r\n self.xstart_linedt = QLineEdit(\"0\")\r\n self.xstart_linedt.textChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.xstart_linedt, 2, 1)\r\n self.xend_linedt = QLineEdit(\"0\")\r\n self.xend_linedt.textChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.xend_linedt, 2, 2)\r\n roi_layout.addWidget(QLabel(\"y:\"), 3, 0)\r\n self.ystart_linedt = QLineEdit(\"0\")\r\n self.ystart_linedt.textChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.ystart_linedt, 3, 1)\r\n self.yend_linedt = QLineEdit(\"0\")\r\n self.yend_linedt.textChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.yend_linedt, 3, 2)\r\n roi_layout.addWidget(QLabel(\"z:\"), 4, 0)\r\n self.zstart_linedt = QLineEdit(\"0\")\r\n self.zstart_linedt.textChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.zstart_linedt, 4, 1)\r\n self.zend_linedt = QLineEdit(\"0\")\r\n self.zend_linedt.textChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.zend_linedt, 4, 2)\r\n roi_layout.addWidget(QLabel(\"Downsample Factor:\"), 5, 0)\r\n self.downsample_spinner = QSpinBox()\r\n self.downsample_spinner.setRange(1, 10)\r\n self.downsample_spinner.setSpecialValueText(\"None\")\r\n self.downsample_spinner.setMaximumWidth(60)\r\n self.downsample_spinner.valueChanged.connect(self.on_roi_param_changed)\r\n roi_layout.addWidget(self.downsample_spinner, 5, 1)\r\n roi_layout.addWidget(QLabel(\"Estimated datasize (MB):\"), 5, 3)\r\n self.data_size_label = QLabel(\"0\")\r\n roi_layout.addWidget(self.data_size_label, 5, 4)\r\n roi_fields.setLayout(roi_layout)\r\n apply_roi_button.clicked.connect(self.on_roi_apply_clicked)\r\n reset_button.clicked.connect(self.on_roi_reset_clicked)\r\n return roi_fields", "title": "" }, { "docid": "cf3785dff72358282be07f0065e60a3a", "score": "0.6255171", "text": "def enableROIselection(self):\n def click(event):\n \"\"\"\n Resizes imageRaw on click event, to the specified size 'ROI_size'\n around the clicked point.\n \"\"\"\n ROIsize = self.settings['ROI_size']\n Ly =self.img.shape[-1]\n Lx =self.img.shape[-2]\n \n if self.settings['selectROI'] and (Lx,Ly)!=(ROIsize,ROIsize):\n event.accept() \n pos = event.pos()\n x = int(pos.x()) #pyqtgraph is transposed\n y = int(pos.y())\n x = max(min(x, Lx-ROIsize//2 ),ROIsize//2 )\n y = max(min(y, Ly-ROIsize//2 ),ROIsize//2 )\n self.settings['roiX']= x\n self.settings['roiY']= y\n if hasattr(self, 'roi'):\n self.imv.removeItem(self.roi) \n self.roi = pg.RectROI([x-ROIsize//2,y-ROIsize//2], [ROIsize,ROIsize])\n self.imv.addItem(self.roi)\n \n self.settings['selectROI'] = False\n \n self.imv.getImageItem().mouseClickEvent = click\n self.settings['selectROI'] = True", "title": "" }, { "docid": "a8e5505d17ed27644b6a99092349a374", "score": "0.62447166", "text": "def selectROIs(windowName, img, showCrosshair=..., fromCenter=...) -> boundingBoxes:\n ...", "title": "" }, { "docid": "6a348dea5fa2bd00eef486e7c0526feb", "score": "0.6225949", "text": "def asROI(self):\n return PolygonRoi(\n FloatPolygon(*map(list, zip(*map(Ray.getOrigin, self.points)))), # Unpacking x and y coordinates of rays in two lists\n Roi.NORMAL\n )", "title": "" }, { "docid": "e329d10695fded0cfbbdfd7664d1056e", "score": "0.6217549", "text": "def set_rois(self, ROIlist):\n self.rh.create_rois(len(ROIlist))\n self.rh.resize_rois(ROIlist)\n self.display_rois()", "title": "" }, { "docid": "c9a98ecddd77bb70f4bba805f038c7f1", "score": "0.6217537", "text": "def runDefineCropROI(self, cropParam):\n vol \t\t= slicer.mrmlScene.GetNodeByID(cropParam.GetInputVolumeNodeID()\t)\n volBounds\t= [0,0,0,0,0,0]\n vol.GetRASBounds(volBounds)\n logging.info(volBounds)\n\n #Find Dimensions of Image\n volDim\t\t= [ (volBounds[1]-volBounds[0]),\n \t\t\t (volBounds[3]-volBounds[2]),\n \t\t\t (volBounds[5]-volBounds[4]) ]\n roi\t\t\t= slicer.mrmlScene.GetNodeByID(cropParam.GetROINodeID())\n\n #Find Center of Image\n volCenter \t= [ ((volBounds[0]+volBounds[1])/2),\n \t\t\t ((volBounds[2]+volBounds[3])/2),\n ((volBounds[4]+volBounds[5])/2) ]\n\n roi.SetXYZ(volCenter)\n roi.SetRadiusXYZ(volDim[0]/2, volDim[1]/2, volDim[2]/2 )\n return roi", "title": "" }, { "docid": "ccca5a22455665021add321a61b723cd", "score": "0.6213413", "text": "def create_roi(array, loc, kx=3, ky=3, edge_wrap=False):\r\n\r\n dims = array.shape\r\n if (len(dims) != 2):\r\n raise Exception(\"Array Must Be 2 Dimensional!\")\r\n\r\n # Kernel size squared\r\n kx2 = kx**2\r\n ky2 = ky**2\r\n\r\n # Kernel offsets\r\n xoff = kx / 2\r\n yoff = ky / 2\r\n\r\n # Find the seed's neighbours\r\n x = numpy.arange(kx2) % kx + (seed[1] - xoff)\r\n y = numpy.arange(ky2) / ky + (seed[0] - yoff)\r\n roi = (y,x)\r\n\r\n # If the ROI is outside the array bounds it will be set to the min/max array bounds\r\n # otherwise it will be wrap around the edges of the array if edge_wrap is set to True\r\n if not edge_wrap:\r\n # Check if any parts of the roi are outside the image\r\n bxmin = numpy.where(roi[1] < 0)\r\n bymin = numpy.where(roi[0] < 0)\r\n bxmax = numpy.where(roi[1] >= dims[1])\r\n bymax = numpy.where(roi[0] >= dims[0])\r\n\r\n # Change if roi co-ordinates exist outside the image domain.\r\n roi[1][bxmin] = 0\r\n roi[0][bymin] = 0\r\n roi[1][bxmax] = dims[1]-1\r\n roi[0][bymax] = dims[0]-1\r\n\r\n return roi", "title": "" }, { "docid": "1fba9108f0922e840c479b2b06a95426", "score": "0.6209862", "text": "def getRoiImg(self):\n roiImg = cv2.cvtColor(self.img,cv2.COLOR_GRAY2RGB)\n cv2.rectangle(roiImg,\n (self.roi[X_ORIGIN], self.roi[Y_ORIGIN]),\n (self.roi[X_ORIGIN] + self.roi[X_SIZE],\n self.roi[Y_ORIGIN] + self.roi[Y_SIZE]),\n (65535,0,0),\n 3)\n return(roiImg)", "title": "" }, { "docid": "ebcc5929151cf697cd4b92d0953746c1", "score": "0.6206967", "text": "def roi(self):\n w, h, x, y = c_int32(), c_int32(), c_int32(), c_int32()\n cco, packet_size = color_coding_t(), c_int32()\n dll.dc1394_format7_get_roi(self._cam, self._mode_id, pointer(cco),\n byref(packet_size),\n byref(x), byref(y), byref(w), byref(h))\n return ((w.value, h.value), (x.value, y.value),\n color_codings[cco.value], packet_size.value)", "title": "" }, { "docid": "155a1dc6e9674817d64e245dbf31b7d6", "score": "0.617504", "text": "def select_roi(self, mm_generator_kw={}, **rv_kw):\n y0,x0 = int(np.floor(self.motion_borders.ymin)), int(np.floor(self.motion_borders.xmin))\n y1,x1 = int(np.ceil(self.motion_borders.ymax)), int(np.ceil(self.motion_borders.xmax))\n\n def mm_mean_subtracted(downsample=5, mean_src='maxmov', equalize=True):\n \"\"\"\n mean_src : 'maxmov' or 'all'\n \"\"\"\n with h5py.File(self.data_file, 'r+') as f:\n if 'maxmov' in f:\n mean = np.mean(f['maxmov'], axis=0)\n n = len(f['maxmov'])\n else:\n raise Exception('No maxmov available.')\n\n if mean_src == 'maxmov':\n pass\n elif mean_src == 'all':\n mean = self.mean(axis=0)\n \n for i in range(n//downsample):\n with h5py.File(self.data_file, 'r+') as f:\n fr = np.max( f['maxmov'][i*downsample : i*downsample+downsample], axis=0 )\n fr = fr-mean\n if equalize:\n fr = equalize_adapthist((fr-fr.min())/(fr.max()-fr.min()))\n\n minn = np.nanmin(fr)\n\n fr[:y0] = minn\n fr[y1:] = minn\n fr[:x0] = minn\n fr[x1:] = minn\n yield fr\n \n inst = mm_mean_subtracted(**mm_generator_kw)\n self.roiview = ROIView(next(inst), iterator=inst, **rv_kw)\n print('Remember to set roi using Data.set_roi(roi_view.roi).\\nIf you forgot to store roi_view, it is saved in object as Data.roiview.')\n\n return self.roiview", "title": "" }, { "docid": "e25012b4e70cfae0b1304d3a2a44c080", "score": "0.61721134", "text": "def roi(img):\n # Region of interest\n imshape = img.shape\n roi = np.array([[\n (100,100),\n (600, 100), \n (600, 400), \n (100, 400)\n ]], dtype=np.int32)\n\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, roi, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n\n return masked_image", "title": "" }, { "docid": "cf270a56671529e2999c56421c9e05ba", "score": "0.6164488", "text": "def user_roi(self, roi):\n # find which ROI was dragged\n for r in self.rh.ROIs:\n if r.roi == roi:\n break\n x0, y0 = roi.pos() # lower left corner of bounding rectangle\n w, h = map(int, roi.size()) # width, height\n xc, yc = int(x0 + w//2), int(y0 + h//2) # centre of ROI\n r.w, r.h = w, h\n r.label.setPos(x0, y0)\n r.translate_mask(xc, yc)\n for key, val in zip(r.edits.keys(), [xc, yc, w, h]):\n r.edits[key].setText(str(val))", "title": "" }, { "docid": "09a8159ff24ae4894343687daa28ec4e", "score": "0.61622715", "text": "def display_rois(self, n=''):\n if n:\n self.rh.create_rois(int(n))\n viewbox = self.im_canvas.getViewBox()\n for item in viewbox.allChildren(): # remove unused ROIs\n if ((type(item) == pg.graphicsItems.ROI.ROI or \n type(item) == pg.graphicsItems.TextItem.TextItem) and \n item not in [r.roi for r in self.rh.ROIs] + [r.label for r in self.rh.ROIs]):\n viewbox.removeItem(item)\n layout = self.centre_widget.layout()\n k = np.sqrt(len(self.plots))\n for i, r in enumerate(self.rh.ROIs):\n if r.roi not in viewbox.allChildren():\n reset_slot(r.roi.sigRegionChangeFinished, self.user_roi, True) \n reset_slot(r.threshedit.textEdited, self.update_plots, True)\n r.roi.setZValue(10) # make sure the ROI is drawn above the image\n viewbox.addItem(r.roi)\n viewbox.addItem(r.label)\n try:\n self.plots[i]['plot'].setTitle('ROI '+str(r.id))\n for j, label in enumerate(list(r.edits.values())+[r.threshedit, r.autothresh]):\n layout.addWidget(label, (i//k)*3, 7+(i%k)*6+j, 1,1)\n except IndexError as e: pass # warning('Atom Checker has more plots than ROIs')", "title": "" }, { "docid": "99553b92ce7df7c8efd6b73ea679dfd8", "score": "0.6157534", "text": "def roi(self, data):\r\n x, y = self._event_xdata, self._event_ydata\r\n return contour_to_roi(x, y, data)", "title": "" }, { "docid": "97ab9a035b1d90312864dedfc18f9c74", "score": "0.609071", "text": "def load_MPII_annotation(self, index):\n\n \n roi_rec = dict()\n roi_rec['image'] = os.path.join(self.image_path,self.annotation[index]['img_paths'])\n #size = cv2.imread(roi_rec['image']).shape\n #roi_rec['height'] = size[0]\n #roi_rec['width'] = size[1]\n roi_rec['height'] = self.annotation[index]['img_height']\n roi_rec['width'] = self.annotation[index]['img_width']\n\n \n numOtherPerson=self.annotation[index]['numOtherPeople']\n otherPersonJoints=[]\n if numOtherPerson >0:\n if numOtherPerson>1:\n otherPersonJoints=otherPersonJoints+self.annotation[index]['joint_others']\n else:\n otherPersonJoints.append(self.annotation[index]['joint_others'])\n mainPersonJoints=self.annotation[index]['joint_self']\n allPerson=otherPersonJoints+[mainPersonJoints]\n num_objs = len(allPerson)\n\n poses = np.zeros((num_objs, 28), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n class_to_index = dict(zip(self.classes, range(self.num_classes)))\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(allPerson):\n \n cls = class_to_index['person']\n po=np.zeros((16,3),dtype=np.float32)\n po[0:len(obj),:]=np.array(obj,dtype=np.float32)\n assert po.shape[0] ==16,'the image is wrong'\n\n poses[ix, :] = po[self.index2index[1:],:-1].ravel() ### obj must [14,2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n\n roi_rec.update({'poses': poses,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'max_classes': overlaps.argmax(axis=1),\n 'max_overlaps': overlaps.max(axis=1),\n 'flipped': False})\n return roi_rec", "title": "" }, { "docid": "3d6154a1bb42664e34819c26bbdfe0fc", "score": "0.606863", "text": "def __init__(self, roi, points, frame_no):\n # assign next object id\n self.id = Object.counter\n Object.counter += 1\n self.roi = roi\n self.roi_valid = True # whether the RoI is acquired from object detection or predicted\n self.roi_history = {frame_no: roi}\n self.points = set(points)\n # Add random color to the object for visualization\n self.color = colorsys.hsv_to_rgb(random(), 1, 1)", "title": "" }, { "docid": "43d05be7642aaafb6b2c3530cab1874c", "score": "0.605366", "text": "def __init__(self, maglim, roi):\n self.roi = roi\n mask = maglim*np.ones(hp.nside2npix(self.roi.config['coords']['nside_pixel']))\n self.nside = hp.npix2nside(len(mask))\n # Sparse maps of pixels in various ROI regions\n self.mask_roi_sparse = mask[self.roi.pixels]", "title": "" }, { "docid": "7d5b49218197279100d84f4a623bbe11", "score": "0.6052589", "text": "def set_roi(self, mcaroi, name='OCR', min_x=1, size_x=4095):\n mcaroi.configure_mcaroi(\n roi_name=name,\n min_x=min_x,\n size_x=size_x \n )\n mcaroi.name = name\n mcaroi.total_rbv.name = name", "title": "" }, { "docid": "474e81ce28edd8b2f7a4e64386569ca9", "score": "0.604833", "text": "def FindAdaptiveROI(image, center_ROI, aspr_ROI,displayImages, debug = True):\n #inputfilename = 'img6.png'\n #outputfilename = 'edge2.png'\n #nucleation_down = 1 # 0 for nucleation up\n #center_ROI = (511,672) #center of the object to be identified\n #aspr_ROI = 2/3 # x_width/y_width for ROI. This is found by TRAINING\n #debug = True # flag to output ERRRORs\n #remove the strip at the bottom\n #cropsequence = ((0,44),(0,0))\n #img = ReadImage(inputfilename)\n #img = CropImage(img,cropsequence,0)\n #to mainain the aspect ratio of roi to be same as that of image, set the aspect ratio\n #asp_ratio = int(1344/(1066-44))\n #list of pad sizes to be removed along x axis\n array_x_ROI = np.array([100,200,300,400,500,600,700,800,902])\n array_y_ROI = (array_x_ROI*aspr_ROI).astype(int)\n n = array_x_ROI.size\n optimum_x_ROI = 0\n optimum_y_ROI = 0\n #set the array for relative strengths and maxima positions for the unimodal or bimodal distributions.\n array_rel_strength = np.zeros(n)\n array_maximum = np.zeros((n,2))\n displayImages = 0\n for i in np.arange(n):\n x_width = array_x_ROI[i]\n y_width = array_y_ROI[i]\n #set up the cropsequence so that pads are removed centered around the center of the image.\n cropsequence = CropSequenceGenerate(image,(center_ROI,(x_width,y_width)))\n cropimg = CropImage(image,cropsequence,0)\n imgbyte = Img2Ubyte(cropimg,0)\n img_med = MedianFilter(imgbyte,displayImages)\n maximum,rel_strength = modal_analysis(img_med,displayImages,debug) #strength is zero if distribution is unimodal and close to zero if the foreground is very small compared to background or vice versa\n array_rel_strength[i] = rel_strength \n array_maximum[i] = maximum\n if displayImages==1:\n #plot the relative strength variation and choose the appropriate ROI\n plt.figure(),plt.title(\"Finding Optimum ROI by varying xROI\"),plt.plot(array_x_ROI,array_rel_strength)\n #if all are unimodal distributions, then there either is no object to be found or object is beyond the ROI. This means that we need to check for bigger ROIs with progressive increase in y axis width\n max_rel_strength = np.max(array_rel_strength)\n if debug: print(\"maximum relative strength is \" + str(max_rel_strength))\n if max_rel_strength < 0.001:\n optimum_x_ROI = 1000\n else:\n #find the optimum ROI from maximum of the relative strength vs ROI variation\n optimum_x_ROI = array_x_ROI[array_rel_strength.argsort()[-1]]\n optimum_y_ROI = array_y_ROI[array_rel_strength.argsort()[-1]]\n print(\"optimum_x_ROI is \" + str(optimum_x_ROI))\n\n #if optimum ROI is less than 1000, then it probably means that the object is not occluded and search for the ROI is completed. If the ROI is not optimized then we can increase the y_width of ROI further keeping the x_width to be constant at 1022\n if optimum_x_ROI == 1000:\n array_y_ROI = np.array([800,900,1000,1100])\n n = array_y_ROI.size\n array_x_ROI = np.ones(n,dtype = np.int32)*902\n #set the array for relative strengths and maxima positions for the unimodal or bimodal distributions.\n array_rel_strength = np.zeros(n)\n array_maximum = np.zeros((n,2))\n displayImages = 0\n for i in np.arange(n):\n x_width = array_x_ROI[i]\n y_width = array_y_ROI[i]\n #set up the cropsequence so that pads are removed across y axis around the center of the image.\n cropsequence = CropSequenceGenerate(image,(center_ROI,(x_width,y_width)))\n cropimg = CropImage(image,cropsequence,0)\n imgbyte = Img2Ubyte(cropimg,0)\n img_med = MedianFilter(imgbyte,displayImages)\n maximum,rel_strength = modal_analysis(img_med,displayImages,debug) #strength is zero if distribution is unimodal and close to zero if the foreground is very small compared to background or vice versa\n array_rel_strength[i] = rel_strength \n array_maximum[i] = maximum\n displayImages = 1\n if displayImages == 1:\n #plot the relative strength variation and choose the appropriate ROI\n plt.figure(),plt.title(\"Finding Optimum ROI by varying yROI\"),plt.plot(array_y_ROI,array_rel_strength)\n max_rel_strength = np.max(array_rel_strength)\n if max_rel_strength == 0:\n optimum_x_ROI = 0\n optimum_y_ROI = 0\n if debug: print(\"This image needs to be discarded\")\n #find the optimum ROI from maximum of the relative strength vs ROI variation\n optimum_x_ROI = array_x_ROI[array_rel_strength.argsort()[-1]]\n optimum_y_ROI = array_y_ROI[array_rel_strength.argsort()[-1]]\n if optimum_y_ROI == 1300:\n #so the whole image needs to be used for further processing\n optimum_x_ROI = 1022\n optimum_y_ROI = 1344\n #proceed with further processing with optimum ROI\n optimum_ROI = (optimum_x_ROI,optimum_y_ROI)\n if debug: print(\"Optimum ROI is \",optimum_ROI)\n return optimum_ROI", "title": "" }, { "docid": "62bfcb8619b7f5deec11d3d41dec04a3", "score": "0.60479146", "text": "def create_pipeline_nii_to_subj_ROI(\n main_path, filter_gm_threshold=0.9, pipeline_name=\"nii_to_subj_ROI\",\n background_val=-1.0, plot=True, reslice=False, resample=False,\n min_BOLD_intensity=50, percent_signal=0.5):\n if reslice and resample:\n print(\"Only reslice OR resample can be true, setting reslice to False\")\n reslice = False\n\n pipeline = pe.Workflow(name=pipeline_name)\n pipeline.base_dir = main_path\n\n inputnode = pe.Node(niu.IdentityInterface(fields=[\n 'nii_4D_file', 'ROI_mask_file', 'gm_anat_file', 'ROI_coords_file',\n 'ROI_MNI_coords_file', 'ROI_labels_file']), name='inputnode')\n\n # reslice gm\n if reslice:\n\n reslice_gm = pe.Node(interface=spmu.Reslice(), name='reslice_gm')\n pipeline.connect(inputnode, 'ROI_mask_file',\n reslice_gm, 'space_defining')\n pipeline.connect(inputnode, 'gm_anat_file', reslice_gm, 'in_file')\n\n if resample:\n\n resample_gm = pe.Node(interface=RegResample(), name='resample_gm')\n pipeline.connect(inputnode, 'ROI_mask_file', resample_gm, 'ref_file')\n pipeline.connect(inputnode, 'gm_anat_file', resample_gm, 'flo_file')\n\n # Preprocess pipeline,\n filter_ROI_mask_with_GM = pe.Node(\n interface=IntersectMask(), name='filter_ROI_mask_with_GM')\n\n filter_ROI_mask_with_GM.inputs.filter_thr = filter_gm_threshold\n filter_ROI_mask_with_GM.inputs.background_val = background_val\n\n pipeline.connect(inputnode, 'ROI_mask_file',\n filter_ROI_mask_with_GM, 'indexed_rois_file')\n pipeline.connect(inputnode, 'ROI_coords_file',\n filter_ROI_mask_with_GM, 'coords_rois_file')\n pipeline.connect(inputnode, 'ROI_MNI_coords_file',\n filter_ROI_mask_with_GM, 'MNI_coords_rois_file')\n pipeline.connect(inputnode, 'ROI_labels_file',\n filter_ROI_mask_with_GM, 'labels_rois_file')\n\n if reslice:\n pipeline.connect(reslice_gm, 'out_file',\n filter_ROI_mask_with_GM, 'filter_mask_file')\n\n elif resample:\n pipeline.connect(resample_gm, 'out_file',\n filter_ROI_mask_with_GM, 'filter_mask_file')\n\n else:\n pipeline.connect(inputnode, 'gm_anat_file',\n filter_ROI_mask_with_GM, 'filter_mask_file')\n\n # Nodes version: use min_BOLD_intensity and\n # return coords where signal is strong enough\n extract_mean_ROI_ts = pe.Node(interface=ExtractTS(\n plot_fig=plot), name='extract_mean_ROI_ts')\n\n extract_mean_ROI_ts.inputs.percent_signal = percent_signal\n extract_mean_ROI_ts.inputs.min_BOLD_intensity = min_BOLD_intensity\n\n pipeline.connect(inputnode, 'nii_4D_file', extract_mean_ROI_ts, 'file_4D')\n pipeline.connect(filter_ROI_mask_with_GM, 'filtered_indexed_rois_file',\n extract_mean_ROI_ts, 'indexed_rois_file')\n pipeline.connect(filter_ROI_mask_with_GM, 'filtered_MNI_coords_rois_file',\n extract_mean_ROI_ts, 'MNI_coord_rois_file')\n pipeline.connect(filter_ROI_mask_with_GM, 'filtered_coords_rois_file',\n extract_mean_ROI_ts, 'coord_rois_file')\n pipeline.connect(filter_ROI_mask_with_GM, 'filtered_labels_rois_file',\n extract_mean_ROI_ts, 'label_rois_file')\n\n return pipeline", "title": "" }, { "docid": "8a1cff0eea214f649e3e331ef1326f1c", "score": "0.60477114", "text": "def test_apply_roi_2d(self):\r\n client = self.create_client_with_image()\r\n\r\n roi = core.roi.PolygonalROI(vx=[10, 20, 20, 10],\r\n vy=[10, 10, 20, 20])\r\n client.apply_roi(roi)\r\n roi2 = self.im.edit_subset.subset_state.roi\r\n state = self.im.edit_subset.subset_state\r\n\r\n assert roi2.to_polygon()[0] == roi.to_polygon()[0]\r\n assert roi2.to_polygon()[1] == roi.to_polygon()[1]\r\n assert state.xatt is self.im.get_pixel_component_id(1)\r\n assert state.yatt is self.im.get_pixel_component_id(0)", "title": "" }, { "docid": "7d7a77121bd54ce667e3aa5233e16e32", "score": "0.6046832", "text": "def getRoi(self):\n roi = self.img[self.roi[Y_ORIGIN] : \n self.roi[Y_ORIGIN] + self.roi[Y_SIZE],\n self.roi[X_ORIGIN] : \n self.roi[X_ORIGIN] + self.roi[X_SIZE]]\n return roi", "title": "" }, { "docid": "703b707ddce0d52b847e84b7b1527ce3", "score": "0.6044786", "text": "def test_roi(self):\n import_file = os.path.join(\"..\", \"mlx75027.csv\")\n mlx75027 = True\n self.assertTrue(os.path.isfile(import_file))\n reg_dict = mlx.csv_import(import_file)\n\n row_end = 480\n col_end = 640\n row_offset = 1\n col_offset = 1\n mlx.set_roi(reg_dict, col_offset, col_end,\n row_offset, row_end, mlx75027)\n\n cs, ce, rs, re = mlx.calc_roi(reg_dict)\n self.assertEqual(row_offset, rs)\n self.assertEqual(col_offset, cs)\n self.assertEqual(row_end, re)\n self.assertEqual(col_end, ce)\n\n row_offset = 0\n with self.assertRaises(RuntimeError):\n mlx.set_roi(reg_dict, col_offset, col_end,\n row_offset, row_end, mlx75027)\n\n row_offset = 51\n row_end = 240\n col_offset = 50\n col_end = 150\n mlx.set_roi(reg_dict, col_offset, col_end,\n row_offset, row_end, mlx75027)\n cs, ce, rs, re = mlx.calc_roi(reg_dict)\n self.assertEqual(row_offset, rs)\n self.assertEqual(col_offset, cs)\n self.assertEqual(row_end, re)\n self.assertEqual(col_end, ce)\n return", "title": "" }, { "docid": "7cc0129888165c7f7ca238798056105c", "score": "0.6009475", "text": "def roi(self):\n return SheetOverlay([el.get_roi(self.roi_bounds) for el in self.data],\n bounds=self.roi_bounds if self.roi_bounds else self.bounds,\n metadata=self.metadata)", "title": "" }, { "docid": "61ebe7559db5e3b6d665ea50cdb99a26", "score": "0.60076815", "text": "def FindAdaptiveROIversion2(image, center_ROI, aspr_ROI, array_ROI, displayImages, debug = True):\n #inputfilename = 'img6.png'\n #outputfilename = 'edge2.png'\n #nucleation_down = 1 # 0 for nucleation up\n #center_ROI = (511,672) #center of the object to be identified\n #aspr_ROI = 2/3 # x_width/y_width for ROI. This is found by TRAINING\n #debug = True # flag to output ERRRORs\n #remove the strip at the bottom\n #cropsequence = ((0,44),(0,0))\n #img = ReadImage(inputfilename)\n #img = CropImage(img,cropsequence,0)\n #to mainain the aspect ratio of roi to be same as that of image, set the aspect ratio\n #asp_ratio = int(1344/(1066-44))\n #list of pad sizes to be removed along x axis\n array_x_ROI = array_ROI\n array_y_ROI = (array_x_ROI*aspr_ROI).astype(int)\n n = array_x_ROI.size\n optimum_x_ROI = 0\n optimum_y_ROI = 0\n #set the array for relative strengths and maxima positions for the unimodal or bimodal distributions.\n array_rel_strength = np.zeros(n)\n array_maximum = np.zeros((n,2))\n #displayImages = 0\n for i in np.arange(n):\n x_width = array_x_ROI[i]\n y_width = array_y_ROI[i]\n #set up the cropsequence so that pads are removed centered around the center of the image.\n cropsequence = CropSequenceGenerate(image,(center_ROI,(x_width,y_width)))\n cropimg = CropImage(image,cropsequence,0)\n imgbyte = Img2Ubyte(cropimg,0)\n img_med = MedianFilter(imgbyte,displayImages)\n maximum,rel_strength = modal_analysis(img_med,displayImages,debug) #strength is zero if distribution is unimodal and close to zero if the foreground is very small compared to background or vice versa\n array_rel_strength[i] = rel_strength \n array_maximum[i] = maximum\n #displayImages = 1\n if displayImages==1:\n #plot the relative strength variation and choose the appropriate ROI\n plt.figure(),plt.title(\"Finding Optimum ROI by varying xROI\"),plt.plot(array_x_ROI,array_rel_strength)\n #if all are unimodal distributions, then there either is no object to be found or object is beyond the ROI. This means that we need to check for bigger ROIs with progressive increase in y axis width\n max_rel_strength = np.max(array_rel_strength)\n if debug: print(\"maximum relative strength is \" + str(max_rel_strength))\n if max_rel_strength < 0.001:\n optimum_x_ROI = 902\n else:\n #find the optimum ROI from maximum of the relative strength vs ROI variation\n optimum_x_ROI = array_x_ROI[array_rel_strength.argsort()[-1]]\n optimum_y_ROI = array_y_ROI[array_rel_strength.argsort()[-1]]\n #proceed with further processing with optimum ROI\n optimum_ROI = (optimum_x_ROI,optimum_y_ROI)\n if debug: print(\"Optimum ROI is \",optimum_ROI)\n return optimum_ROI", "title": "" }, { "docid": "806207f2d69b6bbdf242c9f5a3e52c63", "score": "0.60006034", "text": "def test_get_roi_ranges(self):\n pass", "title": "" }, { "docid": "fa2e4bd00560fa389cffa10bd1321a48", "score": "0.59888995", "text": "def create_vecs(self,roi):\n y = np.arange(roi[0],roi[1],1)\n x = np.arange(roi[2],roi[3], 1)\n X,Y = np.meshgrid(x,y)\n return X,Y", "title": "" }, { "docid": "940eb0279dd735e81abe2f3bf38bae6c", "score": "0.5977656", "text": "def roi_mask(image):\n imshape = image.shape\n ''' \n y_max = imshape[0]-70\n y_min = 11*imshape[0]/18\n x_min = 0\n x_1 = 9*imshape[1]/20\n x_2 = 11*imshape[1]/20\n x_max = imshape[1]\n '''\n y_max = imshape[0]-70\n y_min = imshape[0]/10\n x_min = 0 + 80\n x_1 = 5*imshape[1]/20\n x_2 = 15*imshape[1]/20\n x_max = imshape[1] - 80\n \n \n vertices = np.array([[(x_min,y_max), (x_1, y_min), (x_2, y_min),(x_max,y_max)]], dtype=np.int32)\n #defining a blank mask to start with\n mask = np.zeros_like(image) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(imshape) > 2:\n channel_count = imshape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, [vertices], ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(image, mask)\n return masked_image", "title": "" }, { "docid": "7e665061d55116e2795386b105752774", "score": "0.59717214", "text": "def _roi_drawn(self, change=None):\n idx = self.image_selector.index\n if idx is not None:\n self.set_rois(idx, self.roi_selector_module.rois)", "title": "" }, { "docid": "459b8764a2b3f6fdca9ea61e11ba07a2", "score": "0.59713686", "text": "def init():\n \n \n im.set_data(np.empty((para.cell_num, para.cell_num)))\n point.set_data([], [])\n text.set_text('')\n x = lsr.get_xdata() \n y = lsr.get_ydata()\n lsr.set_data(x, y)\n ltr.set_text('')\n x = lsw.get_xdata()\n y = lsw.get_ydata()\n lsw.set_data(x, y)\n ltw.set_text('')\n \n return im,", "title": "" }, { "docid": "0ea0da0ee1ff2a4f22edb0385fe270e6", "score": "0.59615475", "text": "def __init__(self, x_res, y_res):\n self._x_res = x_res\n self._y_res = y_res\n self._roi = (0, self._y_res // 2), (self._x_res - 1, self._y_res - 1)\n self._x_size = 64\n self._y_size = 64\n self._x_step = 16\n self._y_step = 16\n self._horizon = 409", "title": "" }, { "docid": "5c211daba039207156c8f1063bf63ea6", "score": "0.5960826", "text": "def set_roi(self, hstart=0, hend=None, vstart=0, vend=None, bin=1):\n with self._reset_buffers():\n self.set_value(\"SUBARRAY MODE\",2)\n hend=hend or self.properties[\"SUBARRAY HSIZE\"].max\n vend=vend or self.properties[\"SUBARRAY VSIZE\"].max\n min_roi,max_roi=self.get_roi_limits()\n if bin==3:\n bin=2\n self.set_value(\"SUBARRAY HSIZE\",min_roi[2])\n self.set_value(\"SUBARRAY HPOS\",(hstart//4)*4)\n self.set_value(\"SUBARRAY HSIZE\",(max(hend-hstart,min_roi[2])//4)*4)\n self.set_value(\"SUBARRAY VSIZE\",min_roi[3])\n self.set_value(\"SUBARRAY VPOS\",(vstart//4)*4)\n self.set_value(\"SUBARRAY VSIZE\",(max(vend-vstart,min_roi[3])//4)*4)\n self.set_value(\"BINNING\",min(bin,max_roi[4]))\n return self.get_roi()", "title": "" }, { "docid": "c0c24818e2f62af5dd80f7f589b102e5", "score": "0.5957256", "text": "def roi(self):\n return (self._slice_0.start,\n self._slice_0.stop,\n self._slice_1.start,\n self._slice_1.stop)", "title": "" }, { "docid": "d47c3e232540ad61b69f47a3c24c62ee", "score": "0.59546846", "text": "def set_image(self, path, roi=None):\r\n self.img = cv2.imread(path)\r\n if self.img is None or self.img.size == 0:\r\n raise IOError(\"Failed to read image %s\" % path)\r\n if roi and len(roi) != 4:\r\n raise ValueError(\"ROI must be a list of four integers\")\r\n\r\n # If a ROI was provided, use only that region.\r\n if roi:\r\n y, y2, x, x2 = roi\r\n self.img = self.img[y:y2, x:x2]\r\n\r\n # Reset image related variables so one instance can be used for multiple\r\n # images.\r\n self.path = path\r\n self.mask = None\r\n self.bin_mask = None\r\n\r\n return self.img", "title": "" }, { "docid": "884317ecc428fb3f438b59817f24d0d4", "score": "0.5953904", "text": "def image_load_ROI(name,ROI):\n x0,y0,w = ROI\n xmin,xmax = x0-(w-1)/2,x0+(w-1)/2+1\n ymin,ymax = y0-(w-1)/2,y0+(w-1)/2+1\n I = array(Image.open(name).convert(\"I\"),float32).T[xmin:xmax,ymin:ymax]\n return I", "title": "" }, { "docid": "fc77859867ecc31362ac5862ed169d76", "score": "0.5938262", "text": "def genIm(self, crop=False):\r\n if not hasattr(self.s, 'actor'):\r\n self.s.addActor()\r\n if not hasattr(self.m, 'actor'):\r\n self.m.addActor()\r\n # Generate a renderer window\r\n win = vtkRenWin()\r\n # Set the number of viewports\r\n win.setnumViewports(1)\r\n # Set the background colour\r\n win.setBackground([1,1,1])\r\n # Set camera projection \r\n # Set camera projection \r\n win.setView([0, -1, 0], 0)\r\n win.SetSize(512, 512)\r\n win.Modified()\r\n win.OffScreenRenderingOn()\r\n self.s.actor.setColor([1.0, 0.0, 0.0])\r\n self.s.actor.setOpacity(0.5)\r\n self.m.actor.setColor([0.0, 0.0, 1.0])\r\n self.m.actor.setOpacity(0.5)\r\n win.renderActors([self.s.actor, self.m.actor])\r\n win.Render()\r\n win.rens[0].GetActiveCamera().Azimuth(0)\r\n win.rens[0].GetActiveCamera().SetParallelProjection(True)\r\n win.Render()\r\n im = win.getImage()\r\n if crop is True:\r\n mask = np.all(im == 1, axis=2)\r\n mask = ~np.all(mask, axis=1)\r\n im = im[mask, :, :]\r\n mask = np.all(im == 1, axis=2)\r\n mask = ~np.all(mask, axis=0)\r\n im = im[:, mask, :]\r\n return im, win", "title": "" }, { "docid": "969c6f592f48d8eafa0c2b55a57aef68", "score": "0.5929706", "text": "def autoROISize(self, filename='', imshape=(1024,1280), widget=None):\n self.fitImage(filename, imshape=imshape)\n for i in reversed(range(5)): # bad ROI size, try another\n if (self.df.isin([np.inf,-np.inf]).values.any() or self.df.isnull().values.any()\n or self.check_outlier('w') or self.check_outlier('h')):\n self._dx = int((i+1)**2 * 5)\n self._dy = int((i+1)**2 * 5)\n try:\n self.fitImage()\n except: pass\n else: break\n # check if ROIs overlap\n overx, overy = 0, 0\n xmins = np.array(sorted(self.df['xc'] - self._dx))\n xmaxs = np.array(sorted(self.df['xc'] + self._dx))\n ymins = np.array(sorted(self.df['yc'] - self._dy))\n ymaxs = np.array(sorted(self.df['yc'] + self._dy))\n # if any boxes are outside camera ROI\n if len(xmins[xmins < 0]):\n overx = np.abs(np.min(xmins[xmins<0]))\n if len(ymins[ymins < 0]):\n overy = np.abs(np.min(ymins[ymins<0]))\n if len(xmaxs[xmaxs > imshape[1]]):\n overx = max(overx, np.max(xmaxs[xmaxs > imshape[1]]) - imshape[1])\n if len(ymaxs[ymaxs > imshape[0]]):\n overy = max(overy, np.max(ymaxs[ymaxs > imshape[0]]) - imshape[0])\n # look at first row, compare first and second column\n if self._s[0] > 1:\n ox = (xmins[1] - xmaxs[0])/2.\n if ox < 0:\n overx = max(overx, np.abs(ox))\n # look at first column, compare first and second row\n if self._s[1] > 1:\n oy = (ymins[1] - ymaxs[0])/2.\n if oy < 0:\n overy = max(overy, np.abs(oy))\n # set new width, height of ROI \n self._dx -= int(round(overx))\n self._dy -= int(round(overy))\n self.fitImage()\n info(\"imageArray fitter reset ROI width, height to %s, %s\"%(self._dy, self._dx))\n xmins, xmaxs = self.df['xc'] - self._dx, self.df['xc'] + self._dx\n ymins, ymaxs = self.df['yc'] - self._dy, self.df['yc'] + self._dy\n # image could be cropped to: [xmin,ymin,xmax,ymax]\n bounds = list(map(int, [np.floor(min(xmins)), np.floor(min(ymins)), \n np.ceil(max(xmaxs)), np.ceil(max(ymaxs))]))\n # plot the bounds\n if widget:\n viewbox = self.plotContours(widget)\n s = pg.ROI((min(xmins), self._imvals.shape[0]-max(ymaxs)), # origin is bottom-left\n (max(xmaxs) - min(xmins), max(ymaxs) - min(ymins)),\n movable=False, pen='w') # rotatable=False, resizable=False, \n viewbox.addItem(s)\n return bounds", "title": "" }, { "docid": "79f8702c0259525f6b06a0606ba11f9b", "score": "0.59073627", "text": "def roi(self, reg=None, wh=None):\n\n # interpret reg\n if reg is not None and wh is not None:\n # reg = getself.__class__(reg) # 2x2?\n wh = argcheck.getvector(wh)\n\n xc = reg[0]\n yc = reg[1]\n if len(wh) == 1:\n w = np.round(wh/2)\n h = w\n else:\n w = np.round(wh[0]/2)\n h = np.round(wh[1]/2)\n left = xc - w\n right = xc + w\n top = yc - h\n bot = yc + h\n\n elif reg is not None and wh is None:\n # reg = getself.__class__(reg)\n\n left = reg[0, 0]\n right = reg[0, 1]\n top = reg[1, 0]\n bot = reg[1, 1]\n\n else:\n raise ValueError(reg, 'reg cannot be None')\n\n # TODO check row/column ordering, and ndim check\n out = []\n for im in self:\n roi = im.image[top:bot, left:right, :]\n o = namedtuple('roi', ['roi',\n 'left',\n 'right',\n 'top',\n 'bot'])(self.__class__(roi),\n left,\n right,\n top,\n bot)\n out.append(o)\n\n return out", "title": "" }, { "docid": "44bf71e8718f9315c053120a164263b4", "score": "0.5904946", "text": "def __init__(self, roi: RegionOfInterest, axis: int,\n left: ROITree, right: ROITree):\n assert axis >= 0 and axis < 3 \n self.roi = roi\n self.axis = axis\n self.left = left\n self.right = right", "title": "" }, { "docid": "4a67cb700f65d1303c5169e30cac2e20", "score": "0.58968097", "text": "def reset_roi_fields(self):\r\n self.xstart_linedt.setText(\"0\")\r\n self.xend_linedt.setText(str(self.data_shape[2]))\r\n self.ystart_linedt.setText(\"0\")\r\n self.yend_linedt.setText(str(self.data_shape[1]))\r\n self.zstart_linedt.setText(\"0\")\r\n self.zend_linedt.setText(str(self.data_shape[0]))\r\n self.roi_changed = False", "title": "" }, { "docid": "097c7bc7ee23bc1bef7766980b18c662", "score": "0.5888198", "text": "def roi_processing(numpy_frame: np.ndarray, roi_points: list):\n final_images = list()\n if len(roi_points) == 0:\n print(\"Incorrect number of Region of Interest points selected... Exiting\")\n sys.exit(1)\n\n base_masked_out = roi_extraction(roi_points=roi_points, numpy_frame=numpy_frame)\n\n x_intervals = round(numpy_frame.shape[1] / 20)\n y_intervals = round(numpy_frame.shape[0] / 30)\n x_start_interval = 0\n y_start_interval = 0\n\n for x_index in range(0, LOCAL_X_AXIS_ITERATIONS):\n x_start_interval, generated_images = roi_rotation(\n masked_image=base_masked_out,\n x_start_interval=x_start_interval,\n y_start_interval=y_start_interval,\n x_intervals=x_intervals,\n y_intervals=y_intervals\n )\n final_images.extend(generated_images)\n\n return final_images", "title": "" }, { "docid": "8270e8a24f553c4d8223e12af95e9bc1", "score": "0.5878978", "text": "def str2roi(self,roiStr):\n originStr, sizeStr = roiStr.split(\":\")\n #print(\"originStr=\",originStr,\" sizeStr=\",sizeStr)\n xOrigin = int(originStr.split(\",\")[0])\n yOrigin = int(originStr.split(\",\")[1])\n xSize = int(sizeStr.split(\",\")[0])\n ySize = int(sizeStr.split(\",\")[1])\n return((xOrigin, yOrigin, xSize, ySize))", "title": "" }, { "docid": "732fffb503966e4f2b6db66cea0564ab", "score": "0.5863586", "text": "def _roi_pooling():\n boxes = np.array([[10, 10, 20, 20], [-10, 10, -20, 30]])\n rois = tf.cast(boxes, tf.float32)\n x1 = rois[..., 0]\n y1 = rois[..., 1]\n x2 = rois[..., 2]\n y2 = rois[..., 3]\n\n rois = rois / 10\n\n x1 = tf.expand_dims(x1, axis=-1)\n y1 = tf.expand_dims(y1, axis=-1)\n x2 = tf.expand_dims(x2, axis=-1)\n y2 = tf.expand_dims(y2, axis=-1)\n\n # rois = tf.concatenate([x1, y1, x2, y2], axis=-1)\n rois = tf.concat([y1, x1, y2, x2], axis=-1)\n rois = tf.reshape(rois, (-1, 4))\n\n rois /=10\n print(rois)", "title": "" }, { "docid": "dd92a6861faeedd8de15f7f90c6c169f", "score": "0.5841793", "text": "def updateROIState(self, v):\n st = {}\n st['angle'] = 0\n st['pos'] = pg.Point(v[0], v[1])\n st['size'] = pg.Point(v[2] - v[0], v[3] - v[1])\n self.roi.setState(st)", "title": "" }, { "docid": "63a99de292d90c331e732ce12b5ede04", "score": "0.5832336", "text": "def find_ROI_segmentation(self):\n pass", "title": "" }, { "docid": "3f1da64f0f9e95a3fde6ed8bfac0da60", "score": "0.5820037", "text": "def get_best_roi(self):\n yvals = np.linspace(0, 100, num=11)*7.2\n w = 20 # half width of the ROI\n roi = []\n for y in yvals:\n roi.append([self.best_fit_p(y)-w, y])\n for y in yvals[::-1]:\n roi.append([self.best_fit_p(y)+w, y])\n return roi", "title": "" }, { "docid": "04cbb0b016818f300f0edcda6fabff5f", "score": "0.5807676", "text": "def roi_at(self, p):\n return self.rois[p - self.pstart]", "title": "" }, { "docid": "d28805a86159eea42088299329941750", "score": "0.5803839", "text": "def project_roi(self, roi, frame_id=None):\n response = self.project_rois(rois=[roi]).points[0]\n\n # Convert to VectorStamped\n result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,\n frame_id=response.header.frame_id)\n\n # If necessary, transform the point\n if frame_id is not None:\n print \"Transforming roi to {}\".format(frame_id)\n result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)\n\n # Return the result\n return result", "title": "" }, { "docid": "5cacd9c05816e2bde1d373d5d5f8f917", "score": "0.5801246", "text": "def partir_4(roi):\n\n aux1 = int(roi[2]/2)\n aux2 = int(roi[3]/2)\n \n roi2=[[roi[0]+(i*aux1),roi[1]+(j*aux2),aux1,aux2] for i in [0,1] for j in [0,1]]\n return roi2", "title": "" }, { "docid": "ed9c1929e912703efd30b68a3bc54078", "score": "0.5798317", "text": "def add_roi_contour_sequence(self, structure, index, coordinates):\n roi_contour_sequence = self.ds_rs.ROIContourSequence[index]\n roi_contour_sequence.ReferencedROINumber = structure[\"ReferencedROINumber\"]\n roi_contour_sequence.ROIDisplayColor = random.sample(range(0, 255), 3)\n\n roi_contour_sequence.add_new('ContourSequence', 'SQ', Sequence())\n\n for i, (sop_instance_uid, coordinate) in enumerate(coordinates):\n roi_contour_sequence.ContourSequence.append(Dataset())\n contour_sequence = roi_contour_sequence.ContourSequence[-1]\n contour_sequence.ContourGeometricType = \"CLOSED_PLANAR\"\n contour_sequence.NumberOfContourPoints = int(len(coordinate) / 3)\n contour_sequence.ContourNumber = i\n contour_sequence.ContourData = coordinate\n\n contour_sequence.add_new('ContourImageSequence', 'SQ', Sequence())\n contour_sequence.ContourImageSequence.append(Dataset())\n contour_image_sequence = contour_sequence.ContourImageSequence[-1]\n contour_image_sequence.ReferencedSOPClassUID = UID('1.2.840.10008.5.1.4.1.1.2')\n contour_image_sequence.ReferencedSOPInstanceUID = sop_instance_uid", "title": "" }, { "docid": "94fba564ff4c73cec8196e8c2267f44c", "score": "0.5791027", "text": "def __init__ ( self,\n source = \"CxiDs1-0|Cspad-0\",\n input = \"Something\", \n output = \"SomethingElse\",\n roi = \"0,1800,0,1800\" ):\n self.source = source\n self.input = input\n self.output = output\n self.roi = map(int,roi.split(',')) \n self.n_std = 3 # n standard deviations (vertical scale region of interest)", "title": "" }, { "docid": "63d36f4a80ee2911e7ab9d04e7789d0b", "score": "0.5783857", "text": "def __built_box_roi(self, seg_lung):\n # create box and roi of left lung\n box_seg = np.zeros((self.aorta_mat.shape))\n seg_left_lung = np.zeros((self.aorta_mat.shape))\n\n # find bounding box for Aorta and lung\n min_sa, min_ca, min_aa, max_sa, max_ca, max_aa = \\\n self.__found_bounding_box(self.aorta_mat)\n seg_left_lung[max_sa:, :, :] = seg_lung[max_sa:, :, :]\n min_s_l, min_c_l, min_a_l, max_s_l, max_c_l, max_a_l = \\\n self.__found_bounding_box(seg_left_lung)\n\n # first box\n ss1 = max_s_l - (((max_s_l - min_s_l) * (S_CON-1)) // S_CON)\n se1 = max_s_l - ((max_s_l - min_s_l) // (S_CON+2))\n cs1 = min_c_l + ((max_c_l - min_c_l) // (S_CON+1))\n ce1 = min_c_l + (((max_c_l - min_c_l) * (S_CON+1)) // (S_CON+2))\n xs1 = min_a_l - ((max_aa - min_aa) // AXIAL_CON*2)\n xe1 = min_a_l + ((max_aa - min_aa) // AXIAL_CON*3)\n\n # second_box\n ss2 = max_s_l - ((max_s_l - min_s_l) // S_CON)\n se2 = max_s_l - ((max_s_l - min_s_l) // (S_CON+1))\n cs2 = min_c_l + ((max_c_l - min_c_l) // (S_CON+1))\n ce2 = min_c_l + (((max_c_l - min_c_l) * (S_CON+1)) // (S_CON+2))\n xs2 = min_aa + (((max_aa - min_aa) * 1) // (S_CON+1))\n xe2 = min_a_l - ((max_aa - min_aa) // AXIAL_CON*3)\n\n # third_box\n ss3 = max_sa - ((max_sa - min_sa) // S_CON)\n se3 = max_sa\n cs3 = min_ca\n ce3 = min_ca + (((max_ca - min_ca) * 1) // S_CON)\n xs3 = min_aa + (((max_aa - min_aa) * ((AXIAL_CON//2) - 1) ) // (AXIAL_CON - 1))\n xe3 = min_aa + (((max_aa - min_aa) * (AXIAL_CON//2)) // (AXIAL_CON - 1))\n\n # update boxes\n box_seg[ss1: se1, cs1:ce1, xs1: xe1] = 1\n box_seg[ss2: se2, cs2:ce2, xs2: xe2] = 1\n box_seg[ss3: se3, cs3:ce3, xs3: xe3] = 1\n\n # delete soft tissues and hard tissues\n box_seg[np.where((self.ct_mat < SOFT_TISSUE) | (self.ct_mat > BONES))] = 0\n\n return box_seg", "title": "" }, { "docid": "bce14242850f5f7eb36a929dd09b8b2f", "score": "0.57798356", "text": "def roi(lista,imagen):\n return imagen[lista[0]:lista[0]+lista[2],lista[1]:lista[1]+lista[3]]", "title": "" }, { "docid": "48d320e689f9c70989486862a0872e45", "score": "0.5764892", "text": "def clear_ROI(self):\n self.camera.OffsetX.SetValue(self.camera.OffsetX.Min)\n self.camera.OffsetY.SetValue(self.camera.OffsetY.Min)\n self.camera.Width.SetValue(self.camera.Width.Max)\n self.camera.Height.SetValue(self.camera.Height.Max)", "title": "" }, { "docid": "4fb0f7f14b6146e04cdf13b1c9fd0aa4", "score": "0.5762916", "text": "def pare_roi(self, roi, min_area=10):\n\n # remove anything outside motion borders\n y0,x0 = int(np.floor(self.motion_borders.ymin)), int(np.floor(self.motion_borders.xmin))\n y1,x1 = int(np.ceil(self.motion_borders.ymax)), int(np.ceil(self.motion_borders.xmax))\n if roi.ndim == 2:\n roi = np.array([roi])\n roi = roi.astype(bool)\n roi[:,:y0,:] = False\n roi[:,y1:,:] = False\n roi[:,:,:x0] = False\n roi[:,:,x1:] = False\n\n roi = roi[np.any(roi, axis=(1,2))]\n\n # remove rois below size threshold\n thresh = self.pixels_per_micron * min_area # pixels \n areas = np.sum(roi, axis=(1,2))\n roi = roi[areas>thresh]\n \n return roi", "title": "" }, { "docid": "f2d1bcf7ffc7a7b379602f0f024eac8b", "score": "0.5729884", "text": "def _build_roi_click_callback(self, func):\n def callback(event):\n nonlocal self, func\n x, y, = event.x, event.y\n d = 3\n items = self.canvas.find_overlapping(x-d, y-d, x+d, y+d)\n names = set()\n for it in items:\n for tag in self.canvas.gettags(it):\n if not tag.startswith(TAG_ROI_ID):\n continue\n name64 = tag[len(TAG_ROI_ID):]\n names.add(base64.b64decode(name64).decode())\n break\n func(event=event, names=names)\n return callback", "title": "" }, { "docid": "5196c15d8bd315e042f9a932da2f9a3d", "score": "0.571907", "text": "def getROImask(self,ROI=None):\t\n\t\tif ROI is None:\n\t\t\tROI = self.ROI\n\t\n\t\tdef point_inside_ROI(point,ROI):\n\n\t\t\tn = ROI.shape[0]\n\t\t\t\n\t\t\tinside = 0\n\t\t\tx,y = point\n\t\t\tp1x = ROI[0,0]\n\t\t\tp1y = ROI[0,1]\n\t\t\tfor i in range(n+1):\n\t\t\t\tp2x = ROI[i % n,0]\n\t\t\t\tp2y = ROI[i % n,1]\n\t\t\t\tif y > min(p1y,p2y) and y <= max(p1y,p2y) and x <= max(p1x,p2x):\n\t\t\t\t\tif p1y != p2y:\n\t\t\t\t\t\txinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n\t\t\t\t\tif p1x == p2x or x <= xinters:\n\t\t\t\t\t\tinside = (inside + 1) % 2\n\t\t\t\t\t\n\t\t\t\tp1x,p1y = p2x,p2y\n\t\t\t\t\t\n\t\t\treturn inside\n\t\t\n\t\tnx = np.max(ROI[:,0]) - np.min(ROI[:,0]) + 1\n\t\tny = np.max(ROI[-1,1]) - np.min(ROI[0,1]) + 1\n\t\t\n\t\t\n\t\txpoints = np.arange(nx) + np.min(ROI[:,0])\n\t\typoints = np.arange(ny) + np.min(ROI[:,1])\n\t\t\n\t\tpointsinROI = np.zeros(self[0].shape,dtype=np.uint8)\n\t\tpointsinROI[...] = False\n\t\tfor x in xpoints:\n\t\t\tfor y in ypoints:\n\t\t\t\tpointsinROI[x,y] = point_inside_ROI((x,y),ROI)\n\t\t\n\t\treturn np.uint8(pointsinROI)", "title": "" }, { "docid": "e75d4391fe2646fed26e3c16b9f05236", "score": "0.5714905", "text": "def generate_ROI(volume):\n\n volume_gpu = cp.asarray(volume)\n\n idx = cp.where(volume_gpu > 0)\n\n roiVolume = cp.zeros(volume_gpu.shape, dtype=cp.float32)\n\n roiVolume[idx] = 1\n\n return roiVolume.get()", "title": "" }, { "docid": "7d475c9188fb9d3f055205c52d558998", "score": "0.5704831", "text": "def apply_roi(self, roi):\r\n cmd = command.ApplyROI(client=self.client, roi=roi)\r\n self._session.command_stack.do(cmd)", "title": "" }, { "docid": "07d4eba1b5e01c7e1f5e4d232fe35d26", "score": "0.5697929", "text": "def get_roi_image(original_image, h1, h2, w1, w2):\n return original_image[h1: h2, w1: w2]", "title": "" }, { "docid": "3409dca63adb16bf0a22339cb367a6c2", "score": "0.56953436", "text": "def from_roi(cls, roi: RegionOfInterest, factor: Cartesian,\n atomic_block_size: Cartesian, atomic_voxel_size: Cartesian):\n pass\n\n # assert roi.voxel_size % atomic_voxel_size == Cartesian(0, 0, 0)\n # assert roi.voxel_size // atomic_voxel_size % factor == Cartesian(0, 0, 0)\n \n # if roi.voxel_size == atomic_voxel_size:\n # # this is the leaf roi/block\n # return cls(roi, None, None, None)\n\n # # find the relatively longest axis to split\n # children_voxel_size = roi.voxel_size // factor\n # block_nums = roi.physical_size / (children_voxel_size * )\n # block_nums = np.ceil(block_nums)\n # axis = np.argmax(block_nums)\n\n # # split along axis\n # left_start = roi.start * factor\n # left_block_nums = \n # left_stop = left_start + \n # left_roi = RegionOfInterest()\n # left = cls.from_roi(left_roi, factor, atomic_block_size, atomic_voxel_size)", "title": "" }, { "docid": "d9c289467dd1d1f4f5af4e0a5a9b7bcd", "score": "0.5691518", "text": "def main():\r\n filepath = askopenfilename(\r\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\r\n )\r\n \r\n window.title(f\"CRAR - {filepath}\")\r\n image=cv2.imread(filepath) #read in the image\r\n\r\n image=cv2.resize(image,(1300,800)) #resizing because opencv does not work well with bigger images\r\n orig=image.copy()\r\n gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) #RGB To Gray Scal\r\n '''\r\ncv2.imshow(\"Title\",gray) '''\r\n blurred=cv2.GaussianBlur(gray,(5,5),0) #(5,5) is the kernel size and 0 is sigma that determines the amount of blur\r\n '''\r\ncv2.imshow(\"Blur\",blurred)'''\r\n edged=cv2.Canny(blurred,30,50) #30 MinThreshold and 50 is the MaxThreshold\r\n '''\r\ncv2.imshow(\"Canny\",edged) '''\r\n contours,hierarchy=cv2.findContours(edged,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) #retrieve the contours as a list, with simple apprximation model\r\n contours=sorted(contours,key=cv2.contourArea,reverse=True)\r\n #the loop extracts the boundary contours of the page\r\n for c in contours:\r\n p=cv2.arcLength(c,True)\r\n approx=cv2.approxPolyDP(c,0.02*p,True)\r\n if len(approx)==4:\r\n target=approx\r\n break\r\n approx=mapper.mapp(target) #find endpoints of the sheet\r\n pts=np.float32([[0,0],[800,0],[800,800],[0,800]]) #map to 800*800 target window\r\n op=cv2.getPerspectiveTransform(approx,pts) #get the top or bird eye view effect\r\n dst=cv2.warpPerspective(orig,op,(800,800))\r\n cv2.imwrite(\"IMAGE_NAME.png\", dst)\r\n cv2.imshow(\"Scanned\",dst)\r\n # creating an image object\r\n image1=cv2.imread(\"IMAGE_NAME.png\")\r\n # loading the pixel data of the image'''\r\n im = Image.open(\"IMAGE_NAME.png\")\r\n # Creating coordinates of the pixel (x,y)\r\n C=['','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','']\r\n C[0]= x, y = 700,340\r\n C[1]= x, y = 700,180\r\n C[2]= x, y = 700,220\r\n C[3]= x, y = 700,260\r\n C[4]= x, y = 700,300\r\n C[5]= x, y = 700,500\r\n C[6]= x, y = 700,540\r\n C[7]= x, y = 700,580\r\n C[8]= x, y = 700,620\r\n C[9]= x, y = 660,180\r\n C[10]= x, y = 660,220\r\n C[11]= x, y = 660,260\r\n C[12]= x, y = 660,300\r\n C[13]= x, y = 660,380\r\n C[14]= x, y = 660,420\r\n C[15]= x, y = 660,500\r\n h=['','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','']\r\n h[0]=im.getpixel(C[0])\r\n h[1]=im.getpixel(C[1])\r\n h[2]=im.getpixel(C[2])\r\n h[3]=im.getpixel(C[3])\r\n h[4]=im.getpixel(C[4])\r\n h[5]=im.getpixel(C[5])\r\n h[6]=im.getpixel(C[6])\r\n h[7]=im.getpixel(C[7])\r\n h[8]=im.getpixel(C[8])\r\n h[9]=im.getpixel(C[9])\r\n h[10]=im.getpixel(C[10])\r\n h[11]=im.getpixel(C[11])\r\n h[12]=im.getpixel(C[12])\r\n h[13]=im.getpixel(C[13])\r\n h[14]=im.getpixel(C[14])\r\n h[15]=im.getpixel(C[15])\r\n a=['','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','',\r\n '','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','','']\r\n for i in range(0,243):\r\n if(str(h[i])==\"(255, 0, 0)\"):\r\n a[i]=\"A\"\r\n elif(str(h[i])==\"(0, 255, 0)\"):\r\n a[i]=\"B\"\r\n elif(str(h[i])==\"(0, 0, 255)\"):\r\n a[i]=\"C\"\r\n elif(str(h[i])==\"(255, 255, 0)\"):\r\n a[i]=\"D\"\r\n elif(str(h[i])==\"(0, 255, 255)\"):\r\n a[i]=\"E\"\r\n elif(str(h[i])==\"(255, 0, 255)\"):\r\n a[i]=\"F\"\r\n elif(str(h[i])==\"(192, 192, 192)\"):\r\n a[i]=\"G\"\r\n elif(str(h[i])==\"(128, 128, 128)\"):\r\n a[i]=\"H\"\r\n elif(str(h[i])==\"(128, 0, 0)\"):\r\n a[i]=\"I\"\r\n elif(str(h[i])==\"(128, 128, 0)\"):\r\n a[i]=\"J\"\r\n elif(str(h[i])==\"(0, 128, 0)\"):\r\n a[i]=\"K\"\r\n elif(str(h[i])==\"(128, 0, 128)\"):\r\n a[i]=\"L\"\r\n elif(str(h[i])==\"(0, 128, 128)\"):\r\n a[i]=\"M\"\r\n elif(str(h[i])==\"(0, 0, 128)\"):\r\n a[i]=\"N\"\r\n elif(str(h[i])==\"(255, 140, 0)\"):\r\n a[i]=\"O\" #'''darkorange'''\r\n elif(str(h[i])==\"(255, 215, 0)\"):\r\n a[i]=\"P\" #'''gold'''\r\n elif(str(h[i])==\"(85, 107, 47)\"):\r\n a[i]=\"Q\" #'''dark olive green'''\r\n elif(str(h[i])==\"(173, 255, 47)\"):\r\n a[i]=\"R\" #'''green yellow'''\r\n elif(str(h[i])==\"(50, 205, 50)\"):\r\n a[i]=\"S\" #''' lime green'''\r\n elif(str(h[i])==\"(0, 250, 154)\"):\r\n a[i]=\"T\" #'''medium spring green'''\r\n elif(str(h[i])==\"(47, 79, 79)\"):\r\n a[i]=\"U\" #'''dark slate gray'''\r\n elif(str(h[i])==\"(0, 206, 209)\"):\r\n a[i]=\"V\" #'''dark turquoise'''\r\n elif(str(h[i])==\"(100, 149, 237)\"):\r\n a[i]=\"W\" #'''corn flower blue'''\r\n elif(str(h[i])==\"(0, 191, 255)\"):\r\n a[i]=\"X\" #'''dep sky blue'''\r\n elif(str(h[i])==\"(127, 255, 212)\"):\r\n a[i]=\"Y\" #''' aqua marine'''\r\n elif(str(h[i])==\"(0, 0, 205)\"):\r\n a[i]=\"Z\" #''' medium blue'''\r\n elif(str(h[i])==\"(138, 43, 226)\"):\r\n a[i]=\"a\" #''' blue violet'''\r\n elif(str(h[i])==\"(123, 104, 238)\"):\r\n a[i]=\"b\" # ''' medium slate blue'''\r\n elif(str(h[i])==\"(148, 0, 211)\"):\r\n a[i]=\"c\" #'''dark violet'''\r\n elif(str(h[i])==\"(139, 0, 139)\"):\r\n a[i]=\"d\" #''' dark mafneta'''\r\n elif(str(h[i])==\"(75, 0, 130)\"):\r\n a[i]=\"e\" #''' indigo'''\r\n elif(str(h[i])==\"(128, 0, 128)\"):\r\n a[i]=\"f\" #''' purple'''\r\n elif(str(h[i])==\"(238, 130, 238)\"):\r\n a[i]=\"g\" #'''violet'''\r\n elif(str(h[i])==\"(199, 21, 133)\"):\r\n a[i]=\"h\" #''' medium violet red'''\r\n elif(str(h[i])==\"(250, 235, 215)\"):\r\n a[i]=\"i\" #''' antique white'''\r\n elif(str(h[i])==\"(139, 69, 19)\"):\r\n a[i]=\"j\" #''' saddle brown'''\r\n elif(str(h[i])==\"(210, 105, 30)\"):\r\n a[i]=\"k\" #''' cholate '''\r\n elif(str(h[i])==\"(244, 164, 96)\"):\r\n a[i]=\"l\" #''' sandy brown '''\r\n elif(str(h[i])==\"(188, 143, 143)\"):\r\n a[i]=\"m\" #''' rosy brown'''\r\n elif(str(h[i])==\"(176, 196, 222)\"):\r\n a[i]=\"n\" #''' light steel vlue'''\r\n elif(str(h[i])==\"(240, 255, 240)\"):\r\n a[i]=\"o\" #'''honey dew'''\r\n elif(str(h[i])==\"(189, 183, 107)\"):\r\n a[i]=\"p\" #''' dark khaki'''\r\n elif(str(h[i])==\"(34, 139, 34)\"):\r\n a[i]=\"q\" #''' forest green'''\r\n elif(str(h[i])==\"(60, 179, 113)\"):\r\n a[i]=\"r\" #'' 'medium sea green'''\r\n elif(str(h[i])==\"(255, 127, 80)\"):\r\n a[i]=\"s\" #''' coral'''\r\n elif(str(h[i])==\"(255, 99, 71)\"):\r\n a[i]=\"t\" #''' tomato'''\r\n elif(str(h[i])==\"(240, 128, 128)\"):\r\n a[i]=\"u\" #''' light coral'''\r\n elif(str(h[i])==\"(255, 160, 122)\"):\r\n a[i]=\"v\" #''' light salmon'''\r\n elif(str(h[i])==\"(70, 130, 180)\"):\r\n a[i]=\"w\" #''' steel blue'''\r\n elif(str(h[i])==\"(176, 224, 230)\"):\r\n a[i]=\"x\" #''' powder blue'''\r\n elif(str(h[i])==\"(30, 144, 255)\"):\r\n a[i]=\"y\" #''' doger blue'''\r\n elif(str(h[i])==\"(230, 230, 250)\"):\r\n a[i]=\"z\" #''' lavender'''\r\n elif(str(h[i])==\"(255, 250, 205)\"):\r\n a[i]=\"0\" #'''lemon chiffon'''\r\n elif(str(h[i])==\"(233, 150, 122)\"):\r\n a[i]=\"1\" #''' dark salmon '''\r\n elif(str(h[i])==\"(255, 105, 180)\"):\r\n a[i]=\"2\" # ''' hot pink'''\r\n elif(str(h[i])==\"(205, 133, 63)\"):\r\n a[i]=\"3\" #''' rosy brown'''\r\n elif(str(h[i])==\"(222, 184, 135)\"):\r\n a[i]=\"4\" #''' burly wood'''\r\n elif(str(h[i])==\"(255, 228, 181)\"):\r\n a[i]=\"5\" #''' mocassin'''\r\n elif(str(h[i])==\"(46, 139, 87)\"):\r\n a[i]=\"6\" #''' sea green'''\r\n elif(str(h[i])==\"(60, 179, 113)\"):\r\n a[i]=\"7\" #''' medium sea green'''\r\n elif(str(h[i])==\"(107, 142, 35)\"):\r\n a[i]=\"8\" #''' dark olive drab'''\r\n elif(str(h[i])==\"(205, 92, 92)\"):\r\n a[i]=\"9\" #''' indian red'''\r\n elif(str(h[i])==\"(147, 112, 219)\"):\r\n a[i]=\"+\" #''' medium purple'''\r\n elif(str(h[i])==\"(245, 222, 179)\"):\r\n a[i]=\"/\" #''' wheat'''\r\n elif(str(h[i])==\"(240, 255, 240)\"):\r\n a[i]=\"=\" #''' honeydew'''\r\n elif(str(h[i])==\"(255, 250, 250)\"):\r\n a[i]=\".\"\r\n else:\r\n a[i]=\"\"\r\n print(h)\r\n print(a)\r\n def listToString(s):\r\n # initialize an empty string\r\n str1 = \"\"\r\n #traverse in the string\r\n for ele in s:\r\n str1 += ele\r\n # return string\r\n return str1 \r\n # Driver code\r\n f=listToString(a)\r\n print(listToString(f))\r\n sd=f.split(\".\",1)[0]\r\n hop=str(sd)\r\n print (sd)\r\n if(sd==\"///Lox///\"):\r\n goth=\"models\\\\fox.obj\"\r\n else:\r\n print(\"Errorcode\")\r\n\r\n homography = None\r\n # matrix of camera parameters (made up but works quite well for me)\r\n camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])\r\n # create ORB keypoint detector\r\n orb = cv2.ORB_create()\r\n # create BFMatcher object based on hamming distance\r\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\r\n # load the reference surface that will be searched in the video stream\r\n dir_name = os.getcwd()\r\n model = cv2.imread(os.path.join(dir_name, filepath), 0)\r\n # Compute model keypoints and its descriptors\r\n kp_model, des_model = orb.detectAndCompute(model, None)\r\n # Load 3D model from OBJ file\r\n obj = OBJ(os.path.join(dir_name, goth), swapyz=True)\r\n # init video capture\r\n cap = cv2.VideoCapture(0)\r\n while True:\r\n # read the current frame\r\n ret, frame = cap.read()\r\n if not ret:\r\n print(\"Unable to capture video\")\r\n return\r\n # find and draw the keypoints of the frame\r\n kp_frame, des_frame = orb.detectAndCompute(frame, None)\r\n # match frame descriptors with model descriptors\r\n matches = bf.match(des_model, des_frame)\r\n # sort them in the order of their distance\r\n # the lower the distance, the better the match\r\n matches = sorted(matches, key=lambda x: x.distance)\r\n # compute Homography if enough matches are found\r\n if len(matches) > MIN_MATCHES:\r\n # differenciate between source points and destination points\r\n src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\r\n dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\r\n # compute Homography\r\n homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\r\n if args.rectangle:\r\n # Draw a rectangle that marks the found model in the frame\r\n h, w = model.shape\r\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\r\n # project corners into frame\r\n dst = cv2.perspectiveTransform(pts, homography)\r\n # connect them with lines\r\n frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\r\n # if a valid homography matrix was found render cube on model plan\r\n if homography is not None:\r\n try:\r\n # obtain 3D projection matrix from homography matrix and camera parameters\r\n projection = projection_matrix(camera_parameters, homography)\r\n # project cube or model\r\n frame = render(frame, obj, projection, model, False)\r\n #frame = render(frame, model, projection)\r\n except:\r\n pass\r\n # draw first 10 matches.\r\n if args.matches:\r\n frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:5], 0, flags=2)\r\n # show result\r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n print(\"Not enough matches found - %d/%d\" % (len(matches), MIN_MATCHES))\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n return 0", "title": "" }, { "docid": "17f976240490cd35c0f206cc4e3169d2", "score": "0.56675357", "text": "def cmd_show_roi(uri):\n\n show_roi(make_tracker_conn(), uri)", "title": "" }, { "docid": "cd40564815358032e342d9db7a7a97c8", "score": "0.5665808", "text": "def add_roi_observation_sequence(self, structure, index):\n roi_observation_sequence = self.ds_rs.RTROIObservationsSequence[index]\n roi_observation_sequence.ObservationNumber = structure[\"ObservationNumber\"]\n roi_observation_sequence.ReferencedROINumber = structure[\"ReferencedROINumber\"]\n roi_observation_sequence.ROIObservationLabel = structure[\"ROIObservationLabel\"]\n roi_observation_sequence.RTROIInterpretedType = structure[\"RTROIInterpretedType\"]\n roi_observation_sequence.ROIInterpreter = structure[\"ROIInterpreter\"]", "title": "" }, { "docid": "8a719412b91970a93feb8aa0824f76c7", "score": "0.56491363", "text": "def __init__(self, infiles, roi):\n self.roi = roi\n mask = ugali.utils.skymap.readSparseHealpixMaps(infiles, field='COVERAGE')\n self.nside = hp.npix2nside(len(mask))\n # Sparse maps of pixels in various ROI regions\n self.mask_roi_sparse = mask[self.roi.pixels]", "title": "" }, { "docid": "e13741463f14fffb20497da7ced677fa", "score": "0.564547", "text": "def build_roi_heads(cfg, input_shape):\n name = cfg.MODEL.ROI_HEADS.NAME\n return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)", "title": "" }, { "docid": "597acc8f6d20a6a89417955e65ae2890", "score": "0.56221104", "text": "def get_crops(self):\n\n cfg = get_cfg()\n\n valid_augs_list = [load_obj(i['class_name'])(**i['params']) for i in cfg['augmentation']['valid']['augs']]\n valid_bbox_params = OmegaConf.to_container((cfg['augmentation']['valid']['bbox_params']))\n valid_augs = A.Compose(valid_augs_list, bbox_params=valid_bbox_params)\n \n test_dataset = ImgDataset(None,\n 'test',\n self.imageDir,\n cfg,\n valid_augs)\n \n test_loader = DataLoader(test_dataset,\n batch_size=cfg.data.batch_size,\n num_workers=cfg.data.num_workers,\n shuffle=False,\n collate_fn=collate_fn)\n \n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') \n model = torch.load(os.path.dirname(os.path.abspath(__file__))+f\"/{str(self.imageDir).lower().split('/')[-1]}/model.pth\", \n map_location=device)\n\n detection_threshold = 0.5\n results = []\n model.eval()\n\n hparams = flatten_omegaconf(cfg)\n\n lit_model = LitImg(hparams=hparams, \n cfg=cfg, \n model=model)\n\n self.results = eval_model(test_loader, \n results, \n detection_threshold, \n device, \n lit_model)\n \n for i in range(len(self.results)):\n if self.results[i]['image_id']+'.JPG' == self.imageList[self.cur-1].split('/')[-1]:\n self.mainPanel.create_rectangle(int(int(self.results[i]['x1'])*self.scale), \n int(int(self.results[i]['y1'])*self.scale),\n int(int(self.results[i]['x2'])*self.scale),\n int(int(self.results[i]['y2'])*self.scale),\n width=2,\n outline='red')\n \n self.text_label.config(text='Crop: \\n'+str(self.imageDir)[40:]+'\\nTotal: \\n'+str(len(self.results)))\n\n self.sub_button.config(state='disabled')", "title": "" }, { "docid": "62a4208559bb856f1deeab82d3aae3c4", "score": "0.561656", "text": "def render_lanes_on_image(self,data,img, calib, img_width, img_height,figg):\n\n print('data in lane_image fucntion',len(data))\n proj_velo2cam2 = self.project_velo_to_cam2(calib)\n fig,ax = plt.subplots(1)\n ax.set_aspect('equal')\n \n \n # for i in range(data.shape[2]):\n # d=data[:,:,i]\n for d in data:\n pts_2d = self.project_to_image(d.transpose(), proj_velo2cam2)\n inds = np.where((pts_2d[0, :] < img_width) & (pts_2d[0, :] > 0) &\n (pts_2d[1, :] < img_height) & (pts_2d[1,:]>0) )[0]\n\n # print(inds)\n\n # Filter out pixels points\n imgfov_pc_pixel = pts_2d[:, inds]\n\n # Retrieve depth from lidar\n imgfov_pc_velo = d[inds, :]\n # imgfov_pc_velo = np.hstack((imgfov_pc_velo, np.ones((imgfov_pc_velo.shape[0], 1))))\n imgfov_pc_cam2 = proj_velo2cam2 @ imgfov_pc_velo.transpose()\n # Create a figure. Equal aspect so circles look circular \n # Show the image\n ax.imshow(img)\n ax.plot(imgfov_pc_pixel[0],imgfov_pc_pixel[1],color='red',linewidth=8)\n \n plt.savefig('video/'+figg+'.png')\n \n # return imgfov_pc_pixel[0], imgfov_pc_pixel[1]", "title": "" }, { "docid": "6cd2ea01ba0b826e37d0d2e99924b70f", "score": "0.5607524", "text": "def test_get_roi_ranges_v2(self):\n pass", "title": "" }, { "docid": "ba5771359d7dd1c21e4d405a5d7bb54f", "score": "0.5607298", "text": "def get_roi(self, idx=None):\n if idx is None:\n idx = self._latest_roi_idx\n if idx is None:\n return None\n\n with h5py.File(self.data_file, 'r') as f:\n roigrp = f['roi']\n _roi = ROI(roigrp['roi{}'.format(int(idx))])\n\n return _roi", "title": "" }, { "docid": "002fe04f2661868d1fcecdbfa4ee3f8e", "score": "0.56045556", "text": "def preprocess_roi(roi):\n roi = cv2.resize(roi, (224, 224))\n roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n processed_roi = cv2.GaussianBlur(roi, (13, 13), 0)\n processed_roi = cv2.equalizeHist(processed_roi)\n return roi, processed_roi", "title": "" } ]
10073b23c2a4f916a7b6da4d1f04804b
Test config entries are reloaded when new info is set.
[ { "docid": "8e1f5233977f8257c1946dfb2fbc1ce9", "score": "0.6789433", "text": "async def test_new_info_reload_config_entries(hass, init_integration, mock_dashboard):\n assert init_integration.state == ConfigEntryState.LOADED\n\n with patch(\"homeassistant.components.esphome.async_setup_entry\") as mock_setup:\n await dashboard.async_set_dashboard_info(hass, \"test-slug\", \"test-host\", 6052)\n\n assert len(mock_setup.mock_calls) == 1\n assert mock_setup.mock_calls[0][1][1] == init_integration\n\n # Test it's a no-op when the same info is set\n with patch(\"homeassistant.components.esphome.async_setup_entry\") as mock_setup:\n await dashboard.async_set_dashboard_info(hass, \"test-slug\", \"test-host\", 6052)\n\n assert len(mock_setup.mock_calls) == 0", "title": "" } ]
[ { "docid": "cec03cd3d4109cf17a5d260e3e613182", "score": "0.71862674", "text": "def test_update_configuration_file(self):\n pass", "title": "" }, { "docid": "29be3542c9b40c6e9f7ae7e822ab3b52", "score": "0.71835166", "text": "def test_update_default_configuration(self):\n pass", "title": "" }, { "docid": "10333df80818da208a0612b862cf567f", "score": "0.7136592", "text": "def config_test(self): # type: ignore", "title": "" }, { "docid": "5e2cd748a741ae396f685369d35d6669", "score": "0.70737314", "text": "def test_config(self):\n self.maxDiff = None\n test_config = config.expand(self.before_config)\n self.assertDictEqual(test_config, self.after_config)", "title": "" }, { "docid": "e463b2eb37251d4a3cd69014c74db7d6", "score": "0.69686246", "text": "def test_configurations_put(self):\n pass", "title": "" }, { "docid": "93a4f6cfc8f69375ebafd2214ccc534d", "score": "0.6806966", "text": "async def test_config_migration(hass: HomeAssistant) -> None:\n\n old_entry_data: dict[str, Any] = {\n \"slots\": 5,\n \"corona_filter\": True,\n \"regions\": {\"083350000000\": \"Aach, Stadt\"},\n }\n\n old_conf_entry: MockConfigEntry = MockConfigEntry(\n domain=DOMAIN, title=\"NINA\", data=old_entry_data\n )\n\n old_conf_entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(old_conf_entry.entry_id)\n await hass.async_block_till_done()\n\n assert dict(old_conf_entry.data) == ENTRY_DATA", "title": "" }, { "docid": "068adc002f97416764ad2b6549584094", "score": "0.6795898", "text": "def test_update_analysis_configuration(self):\n pass", "title": "" }, { "docid": "639ec052199080c80a7b7f8d0247d3c2", "score": "0.6768208", "text": "async def test_load_and_unload(\n hass: HomeAssistant,\n climacell_config_entry_update: pytest.fixture,\n) -> None:\n data = _get_config_schema(hass)(MIN_CONFIG)\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data=data,\n unique_id=_get_unique_id(hass, data),\n version=1,\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n assert len(hass.states.async_entity_ids(WEATHER_DOMAIN)) == 1\n\n assert await hass.config_entries.async_remove(config_entry.entry_id)\n await hass.async_block_till_done()\n assert len(hass.states.async_entity_ids(WEATHER_DOMAIN)) == 0", "title": "" }, { "docid": "9964b249c2bf3803d7bb684b5f5ff2bd", "score": "0.6704383", "text": "def setUp(self):\n main.app.config.update(\n {\n 'DATA_CSV': TEST_DATA_CSV,\n 'DATA_XML': TEST_DATA_XML\n }\n )", "title": "" }, { "docid": "27225cc8282fb4233886564932c32a20", "score": "0.66697", "text": "def test_post_config(self):\n pass", "title": "" }, { "docid": "4e66d191ab3604a9959ccf9a658dcb84", "score": "0.66383064", "text": "def set_config_for_test(testcase, data):\n set_config_data(data)\n testcase.addCleanup(set_config_data, {})", "title": "" }, { "docid": "832a22dab07369bb5c62d2eea21de9c9", "score": "0.66349006", "text": "def setUp(self):\n self.config = TestConfig(show_section_numbers=0)", "title": "" }, { "docid": "832a22dab07369bb5c62d2eea21de9c9", "score": "0.66349006", "text": "def setUp(self):\n self.config = TestConfig(show_section_numbers=0)", "title": "" }, { "docid": "498b9fe8f68fdcd963768ef8f71bf335", "score": "0.6628223", "text": "def test_edit_application_configuration(self):\n pass", "title": "" }, { "docid": "d3987259aedc3d82ec7333fbf8772eb6", "score": "0.65842843", "text": "def test_config_loaded():\n eq_(settings.environment, 'test')\n eq_(settings.database, 'test_database')\n eq_(settings.amqp, 'test_amqp')", "title": "" }, { "docid": "78b8165ca3db085ebd8cefd8d000c4fb", "score": "0.65570647", "text": "async def help_test_reloadable(\n hass: HomeAssistant,\n mqtt_client_mock: MqttMockPahoClient,\n domain: str,\n config: ConfigType,\n) -> None:\n # Set up with empty config\n config = copy.deepcopy(config[mqtt.DOMAIN][domain])\n # Create and test an old config of 2 entities based on the config supplied\n old_config_1 = copy.deepcopy(config)\n old_config_1[\"name\"] = \"test_old_1\"\n old_config_2 = copy.deepcopy(config)\n old_config_2[\"name\"] = \"test_old_2\"\n\n old_config = {\n mqtt.DOMAIN: {domain: [old_config_1, old_config_2]},\n }\n # Start the MQTT entry with the old config\n entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: \"test-broker\"})\n entry.add_to_hass(hass)\n mqtt_client_mock.connect.return_value = 0\n with patch(\"homeassistant.config.load_yaml_config_file\", return_value=old_config):\n await entry.async_setup(hass)\n\n assert hass.states.get(f\"{domain}.test_old_1\")\n assert hass.states.get(f\"{domain}.test_old_2\")\n assert len(hass.states.async_all(domain)) == 2\n\n # Create temporary fixture for configuration.yaml based on the supplied config and\n # test a reload with this new config\n new_config_1 = copy.deepcopy(config)\n new_config_1[\"name\"] = \"test_new_1\"\n new_config_2 = copy.deepcopy(config)\n new_config_2[\"name\"] = \"test_new_2\"\n new_config_extra = copy.deepcopy(config)\n new_config_extra[\"name\"] = \"test_new_3\"\n\n new_config = {\n mqtt.DOMAIN: {domain: [new_config_1, new_config_2, new_config_extra]},\n }\n with patch(\"homeassistant.config.load_yaml_config_file\", return_value=new_config):\n # Reload the mqtt entry with the new config\n await hass.services.async_call(\n \"mqtt\",\n SERVICE_RELOAD,\n {},\n blocking=True,\n )\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all(domain)) == 3\n\n assert hass.states.get(f\"{domain}.test_new_1\")\n assert hass.states.get(f\"{domain}.test_new_2\")\n assert hass.states.get(f\"{domain}.test_new_3\")", "title": "" }, { "docid": "577593c9cf755fe1880059828e436c49", "score": "0.6556689", "text": "def test_config_dict_set(self):\n self.assertNotEqual(CONFIG.config, {})", "title": "" }, { "docid": "4f774736639dcb2d17d22b7751ffa2e4", "score": "0.6549595", "text": "def test_002_changed(self):\n conf_test = make_temp_file(dedent(\"\"\"\n [task.default]\n stderr: true\n stdout_msgtree: false\n stderr_msgtree: false\n engine: select\n port_qlimit: 1000\n auto_tree: false\n local_workername: none\n distant_workername: pdsh\n\n [task.info]\n debug: true\n fanout: 256\n grooming_delay: 0.5\n connect_timeout: 12.5\n command_timeout: 30.5\"\"\"))\n self.defaults = Defaults(filenames=[conf_test.name])\n self.assertTrue(self.defaults.stderr)\n self.assertFalse(self.defaults.stdout_msgtree)\n self.assertFalse(self.defaults.stderr_msgtree)\n self.assertEqual(self.defaults.engine, 'select')\n self.assertEqual(self.defaults.port_qlimit, 1000)\n self.assertFalse(self.defaults.auto_tree)\n self.assertEqual(self.defaults.local_workername, 'none')\n self.assertEqual(self.defaults.distant_workername, 'pdsh')\n # task_info\n self.assertTrue(self.defaults.debug)\n self.assertEqual(self.defaults.fanout, 256)\n self.assertEqual(self.defaults.grooming_delay, 0.5)\n self.assertEqual(self.defaults.connect_timeout, 12.5)", "title": "" }, { "docid": "cb964b1152404b5e6b49671649b84fd5", "score": "0.64850193", "text": "def test_reload(self):\n pass", "title": "" }, { "docid": "0576d1d0217357f9e1a177285ba6dd4a", "score": "0.6464505", "text": "def update_config(self, config):", "title": "" }, { "docid": "1218a320554dba5b23b2faebd708ad8d", "score": "0.63927615", "text": "def tearDown(self):\n\n self.config = configurator.Config()\n self.config.set_config()", "title": "" }, { "docid": "aa689ba6eb9afd61a6b18136b4d8ac1d", "score": "0.6380534", "text": "def override_config() -> None:\n cfg.data_path = Path(\"tests\", \"test_data\")\n cfg.wow = io.reader(name=\"test_dot_pricer\", ftype=\"json\")", "title": "" }, { "docid": "dae2dd9714cb10268ebf7a50c2c8c5a8", "score": "0.63724375", "text": "async def test_v3_load_and_unload(\n hass: HomeAssistant,\n climacell_config_entry_update: pytest.fixture,\n) -> None:\n data = _get_config_schema(hass)(API_V3_ENTRY_DATA)\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data=data,\n unique_id=_get_unique_id(hass, data),\n version=1,\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n assert len(hass.states.async_entity_ids(WEATHER_DOMAIN)) == 1\n\n assert await hass.config_entries.async_remove(config_entry.entry_id)\n await hass.async_block_till_done()\n assert len(hass.states.async_entity_ids(WEATHER_DOMAIN)) == 0", "title": "" }, { "docid": "8a33bd37880173a89dafa4b14473e243", "score": "0.63598603", "text": "def test_caching(self):\n data = utils.get_data()\n main.app.config.update(\n {\n 'DATA_CSV': TEST_DATA_CSV_2,\n }\n )\n new_data = utils.get_data()\n self.assertDictEqual(data, new_data)", "title": "" }, { "docid": "bcf3bafde73389e12c8665c6e732d460", "score": "0.63354975", "text": "def test_configurations_get(self):\n pass", "title": "" }, { "docid": "1b7ac51d851e8b6a486ad2a641091813", "score": "0.6271766", "text": "def test_upate(self): \n config = self.config_config()\n\n config.path_text = 'teste_text 2'\n config.path_video = 'teste_video 2'\n config.version = '2'\n config.Update()\n\n self.clean_config(config) \n\n config.Select()\n \n assert config.path_text == 'teste_text 2'\n assert config.path_video == 'teste_video 2'\n assert config.version == '2'", "title": "" }, { "docid": "1530242965675402674c3ee29278b9c4", "score": "0.62656087", "text": "def setUp(self) -> None:\n\n self.our_config = copy.deepcopy(pyf_test_dataset.DEFAULT_CONFIG)", "title": "" }, { "docid": "3e16005647a3de21a87d28fce10b0ae3", "score": "0.6258549", "text": "async def test_setup_update_unload_entry(hass):\n\n hass.config_entries.async_forward_entry_setups = AsyncMock()\n with patch.object(hass.config_entries, \"async_update_entry\") as p:\n config_entry = MockConfigEntry(\n domain=DOMAIN, data=ADVANCED_USER_INPUT, entry_id=\"test\", unique_id=None\n )\n await hass.config_entries.async_add(config_entry)\n assert p.called\n\n assert await async_setup_entry(hass, config_entry)\n assert DOMAIN in hass.data and config_entry.entry_id in hass.data[DOMAIN]\n\n # check user input is in config\n for key in ADVANCED_USER_INPUT:\n if key in hass.data[DOMAIN][config_entry.entry_id]:\n assert (\n hass.data[DOMAIN][config_entry.entry_id][key]\n == ADVANCED_USER_INPUT[key]\n )\n\n hass.config_entries.async_forward_entry_setups.assert_called_with(\n config_entry, PLATFORMS\n )\n\n # ToDo test hass.data[DOMAIN][config_entry.entry_id][UPDATE_LISTENER]\n\n hass.config_entries.async_reload = AsyncMock()\n assert await async_update_options(hass, config_entry) is None\n hass.config_entries.async_reload.assert_called_with(config_entry.entry_id)\n\n # Unload the entry and verify that the data has been removed\n assert await async_unload_entry(hass, config_entry)\n assert config_entry.entry_id not in hass.data[DOMAIN]", "title": "" }, { "docid": "e7905e45eb48a2b81429251d35a1382d", "score": "0.6248757", "text": "def test_override_config(self):\n processor = Processor()\n self.assertIsNone(processor.config.groups)\n self.assertIsNone(processor.config.policies)\n\n processor.config = Config(GROUPS, POLICIES_ALLOW)\n self.assertEqual(len(processor.config.groups), 1)\n self.assertEqual(len(processor.config.policies), 1)", "title": "" }, { "docid": "1c9337a762826e8652502155e64aa8ba", "score": "0.6238477", "text": "def setUp(self):\n super(TestCOTInjectConfig, self).setUp()\n self.config_file = self.sample_cfg", "title": "" }, { "docid": "f3bb166c3dfdbefe3bb4eeaa4c2e9f2e", "score": "0.6235656", "text": "def test_setup_from_config(self):\n self._test_setup({\"selfheal\": {\"interval\": 30.0}}, 30.0)", "title": "" }, { "docid": "58c0a3045b24f5862bcbd04393c77b47", "score": "0.62166053", "text": "def test_site_config_load():\n clear_site_config()\n for i, path in enumerate(reversed(get_conf_paths())):\n exp_value = 100*(1 + i)\n content = '[main]\\nmemory_usage/timeout = ' + str(exp_value) + '\\n'\n\n conf_fpath = os.path.join(path, 'spyder.ini')\n with open(conf_fpath, 'w') as fh:\n fh.write(content)\n\n config = ConfigurationManager()\n config.reset_to_defaults()\n value = config.get('main', 'memory_usage/timeout')\n\n print(path, value, exp_value)\n\n assert value == exp_value\n clear_site_config()", "title": "" }, { "docid": "b89fd8fe1bca987bbb30599c696bf408", "score": "0.6211188", "text": "def test_configuration(self):\n return False", "title": "" }, { "docid": "919178d9fdb2b2a3664015957d71e66b", "score": "0.62096184", "text": "def setUp(self):\n jsonfig_patcher = mock.patch('otter.tap.api.jsonfig')\n self.jsonfig = jsonfig_patcher.start()\n self.addCleanup(jsonfig_patcher.stop)\n\n self.jsonfig.from_path.return_value = test_config", "title": "" }, { "docid": "f3128ee4cebf5620979fdd54fc1db2d6", "score": "0.6185549", "text": "def setUp(self):\n self.fake_config = {'foo': 'bar'}\n self.stack_count = 0", "title": "" }, { "docid": "75fa6a069ab54d1379b07123c3a7f25a", "score": "0.61517274", "text": "def test_dev_config(self):\n self.assertEqual()", "title": "" }, { "docid": "f91fed02ac6a785913d8869bb9098ba8", "score": "0.61512685", "text": "def test_settings():", "title": "" }, { "docid": "5f75fa031139fd972449182660181439", "score": "0.6129881", "text": "def test_projects_ini_update_config():\n k = project_ini.load_config('tests/data/project_ini/project.ini')\n project_ini.update_config(k, {'app': {'name': 'pytest'}})\n assert 'version' in k['app'], '\"app\" section missing from config'\n assert k['app']['name'] == 'pytest', '\"version\" option missing from \"app\" section'", "title": "" }, { "docid": "77d1bade50b33d1e27f6ae38643fed79", "score": "0.61083764", "text": "def test_update_notification_settings(self):\n pass", "title": "" }, { "docid": "9de6390b37a0239b1f21906b7fd3f5bb", "score": "0.6108358", "text": "def test_config_interface(mock_save):\n config = Config(EXAMPLE_CONFIG_FILE)\n\n with pytest.raises(KeyError):\n config[\"slow_start\"][\"second_season\"] = None\n\n config[\"slow_start\"] = {}\n config[\"slow_start.second_season\"] = \"When?\"\n assert config[\"slow_start\"][\"second_season\"] == \"When?\"\n assert mock_save.call_count == 2\n\n config[\"slow_start.second_season\"] = \"When?\"\n assert mock_save.call_count == 2", "title": "" }, { "docid": "0e4ba17f53309acd1e20f92707828367", "score": "0.6085126", "text": "def testUpdateNothing(self):\n cfg = config_dict.ConfigDict()\n cfg.x = 5\n cfg.y = 9\n cfg.update()\n self.assertLen(cfg, 2)\n self.assertEqual(cfg.x, 5)\n self.assertEqual(cfg.y, 9)", "title": "" }, { "docid": "2b46026763f9e337eec9b1a8b525b913", "score": "0.6082148", "text": "def test_insert(self): \n config = self.config_config()\n\n self.clean_config(config) \n\n config.Select()\n\n self.compare_config(config)", "title": "" }, { "docid": "a444dba4f48e0a17b1522d35e0e33db3", "score": "0.6072898", "text": "def test_real_config(self):\n with patch('gstorage.apps.checks.register') as mock_method:\n app = GStorageConfig.create('gstorage')\n app.ready()\n assert mock_method.call_count == 1", "title": "" }, { "docid": "89b6832db4f2e99f8c7efe0c13443ee0", "score": "0.60726607", "text": "def test_reloading_config_reuses_client(self, mock_close_clients):\n # load a config with a client and connect it\n self.zoidberg.load_config('./tests/etc/zoidberg.yaml')\n client = self.zoidberg.config.gerrits['master']['client']\n self.zoidberg.connect_client(self.zoidberg.config.gerrits['master'])\n client.marker = 'CheckForThisMarker'\n # reload the same config\n self.zoidberg.load_config('./tests/etc/zoidberg.yaml')\n # check the client has not changed\n client = self.zoidberg.config.gerrits['master']['client']\n self.assertEqual('CheckForThisMarker', client.marker)\n\n # reload config with different connection details\n self.zoidberg.load_config('./tests/etc/zoidberg2.yaml')\n # check the client has changed\n client = self.zoidberg.config.gerrits['master']['client']\n self.assertEqual(None, getattr(client, 'marker', None))", "title": "" }, { "docid": "82f58ab24810e52974f220a99a1f5b3b", "score": "0.6065014", "text": "def test_show_config(self):\n pass", "title": "" }, { "docid": "60765b757c49265b57ce78ba6dff6ff8", "score": "0.6064447", "text": "def test_projects_ini_update_config_empty_dict():\n k = project_ini.load_config('tests/data/project_ini/project.ini')\n project_ini.update_config(k, {})\n assert 'version' in k['app'], '\"app\" section missing from config'\n assert k['app']['name'] == 'fcs-etl', 'wrong name'", "title": "" }, { "docid": "9a666a452bd515dbcd10a013ca4142cc", "score": "0.6046669", "text": "def load_config(self):\r\n self.config.reload()\r\n self.validate_config()", "title": "" }, { "docid": "b43dd23350fcb59fb73dca5b4f2e39e6", "score": "0.6029484", "text": "def setup_test_data(self, test_data_config: Union[DictConfig, Dict]):\n raise NotImplementedError()", "title": "" }, { "docid": "cf5f6d55286712c7a761e78727518a25", "score": "0.60251236", "text": "def test_custom_config():\n config = Config(EXAMPLE_CONFIG_FILE)\n\n assert config[\"heartwarming_anime.name\"] == \"Slow Start\"\n assert \"cute_girls\" in config\n assert \"pesky_boys\" not in config\n assert os.path.expanduser(\"~\") in config[\"data_dir\"]\n assert \"{0}slow_start\".format(os.path.sep) in config[\"local_config_file\"]", "title": "" }, { "docid": "aee435be569f88459171e0b412c1b58e", "score": "0.60246325", "text": "def test_config_expand1():\n test_config = config.expand(fixtures.expand1.before_config)\n assert test_config == fixtures.expand1.after_config", "title": "" }, { "docid": "f6d8d139de7b0ac4aab51426f34522b7", "score": "0.5996789", "text": "def test_900_restart_on_config_change(self):\n # Config file affected by juju set config change\n conf_file = '/etc/nova/nova.conf'\n\n # Make config change, check for service restarts\n logging.info('Changing debug config on nova-cloud-controller')\n self.restart_on_changed_debug_oslo_config_file(\n conf_file,\n self.services)", "title": "" }, { "docid": "15f5ee0fe3a0c974f2a1d0ba00c3b596", "score": "0.59943527", "text": "def test_init_settings(self, **kwargs):\n # check by default url setting\n self.assertEqual(atxcf.get_setting(\"program_url\"),\n atxcf.get_default_program_url())\n os.remove(atxcf.get_settings_filename())", "title": "" }, { "docid": "e71c11e1ad58989788746663730d507d", "score": "0.5986337", "text": "def setUp(self):\n\n CONFIG.parse(PROJECT + '/examples/config.yaml')", "title": "" }, { "docid": "764fae636c3a9c1dac031a3b1dc11bcf", "score": "0.5981106", "text": "def after_init_config(self):", "title": "" }, { "docid": "764fae636c3a9c1dac031a3b1dc11bcf", "score": "0.5981106", "text": "def after_init_config(self):", "title": "" }, { "docid": "671fe283385de42cdbbc13f5dae54065", "score": "0.59794176", "text": "def test_config(self, mock_requests):\n app_id = self.create_app()\n\n # check to see that an initial/empty config was created\n url = \"/v2/apps/{app_id}/config\".format(**locals())\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.data)\n self.assertIn('values', response.data)\n self.assertEqual(response.data['values'], {})\n config1 = response.data\n\n # set an initial config value\n body = {'values': json.dumps({'NEW_URL1': 'http://localhost:8080/'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n config2 = response.data\n self.assertNotEqual(config1['uuid'], config2['uuid'])\n self.assertIn('NEW_URL1', response.data['values'])\n\n # read the config\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.data)\n config3 = response.data\n self.assertEqual(config2, config3)\n self.assertIn('NEW_URL1', response.data['values'])\n\n # set an additional config value\n body = {'values': json.dumps({'NEW_URL2': 'http://localhost:8080/'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n config3 = response.data\n self.assertNotEqual(config2['uuid'], config3['uuid'])\n self.assertIn('NEW_URL1', response.data['values'])\n self.assertIn('NEW_URL2', response.data['values'])\n\n # read the config again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.data)\n config4 = response.data\n self.assertEqual(config3, config4)\n self.assertIn('NEW_URL1', response.data['values'])\n self.assertIn('NEW_URL2', response.data['values'])\n\n # unset a config value\n body = {'values': json.dumps({'NEW_URL2': None})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n config5 = response.data\n self.assertNotEqual(config4['uuid'], config5['uuid'])\n self.assertNotIn('NEW_URL2', json.dumps(response.data['values']))\n\n # unset all config values\n body = {'values': json.dumps({'NEW_URL1': None})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertNotIn('NEW_URL1', json.dumps(response.data['values']))\n\n # set a port and then unset it to make sure validation ignores the unset\n body = {'values': json.dumps({'PORT': '5000'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertIn('PORT', response.data['values'])\n\n body = {'values': json.dumps({'PORT': None})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertNotIn('PORT', response.data['values'])\n\n # disallow put/patch/delete\n response = self.client.put(url)\n self.assertEqual(response.status_code, 405, response.data)\n response = self.client.patch(url)\n self.assertEqual(response.status_code, 405, response.data)\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 405, response.data)\n return config5", "title": "" }, { "docid": "62d6886dacfc39e791a76285626e9a65", "score": "0.5972372", "text": "def test_update_fsa_settings(self):\n pass", "title": "" }, { "docid": "a61237f646c0eb78c74a720917b8c544", "score": "0.5970906", "text": "def test_900_restart_on_config_change(self):\n # Config file affected by juju set config change\n conf_file = '/etc/nova/nova.conf'\n\n # Make config change, check for service restarts\n logging.info('Changing the debug config on nova-compute')\n self.restart_on_changed_debug_oslo_config_file(\n conf_file,\n ['nova-compute'])", "title": "" }, { "docid": "afbfbadc31a1e2a612ea29570a2cf90b", "score": "0.59632045", "text": "def mock_config_entry_data():\n return {\n \"product_name\": \"Product Name\",\n \"product_type\": \"product_type\",\n \"serial\": \"aabbccddeeff\",\n \"name\": \"Product Name\",\n CONF_IP_ADDRESS: \"1.2.3.4\",\n }", "title": "" }, { "docid": "e1492313b97d4a0eb815f8dad2784f31", "score": "0.594567", "text": "def setUp(self):\n super(SysChangeMonTestCase, self).setUp()\n self.app.storage = TestStorage()\n self.app.config = TestConfigHandler()\n self.app.log = TestLogHandler()", "title": "" }, { "docid": "4bdf5213a8a44eaee6d99541efab7b4d", "score": "0.5939667", "text": "def reload_config(self):\n\n self.config = load_script_config(\n script='data_taker',\n config=self.gui.config.text(),\n logger=self.log\n )", "title": "" }, { "docid": "e172cd7a3f1f507f5f8af350a8037329", "score": "0.5927022", "text": "def setUp(self) -> None:\n\n self.config_loader = ConfigLoader()\n\n self.tempfile = tempfile.NamedTemporaryFile()\n\n self.our_dataset = {\n \"chrome\": {\n \"linux\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/77.0.3865.116 \"\n \"Safari/537.36 Edg/77.11.4.5118\",\n \"macosx\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4364.0 \"\n \"Safari/537.36\",\n \"win10\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4361.0 \"\n \"Safari/537.36\",\n },\n \"edge\": {\n \"linux\": None,\n \"macosx\": None,\n \"win10\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 \"\n \"Safari/537.36 Edge/18.17763/5.9.7 (Linux;Android 10) \"\n \"ExoPlayerLib/2.9.6\",\n },\n \"firefox\": {\n \"linux\": \"Mozilla/5.0 (Linux x86_64; en-US) Gecko/20130401 \"\n \"Firefox/82.4\",\n \"macosx\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0; \"\n \"en-US) Gecko/20100101 Firefox/74.7\",\n \"win10\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) \"\n \"Gecko/20100101 Firefox/84.0/8mqDiPuL-36\",\n },\n \"ie\": {\n \"linux\": None,\n \"macosx\": None,\n \"win10\": \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; \"\n \"Win64; x64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR \"\n \"2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; Tablet \"\n \"PC 2.0; wbx 1.0.0; wbxapp 1.0.0)\",\n },\n \"opera\": {\n \"linux\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 \"\n \"OPR/73.0.3856.284\",\n \"macosx\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 \"\n \"Safari/537.36 OPR/72.0.3815.400\",\n \"win10\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 \"\n \"Safari/537.36 OPR/73.0.3856.284 (Edition avira-2)\",\n },\n \"safari\": {\n \"linux\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 \"\n \"Safari/537.36 SputnikBrowser/1.2.5.158\",\n \"macosx\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) \"\n \"AppleWebKit/600.8.9 (KHTML, like Gecko) Version/9.0.3 \"\n \"Safari/601.4.4\",\n \"win10\": None,\n },\n }\n\n self.tempfile.write(json.dumps(self.our_dataset).encode())\n self.tempfile.seek(0)\n\n self.user_agent_dataset = UserAgentDataset()\n self.user_agent_dataset.source_file = self.tempfile.name\n\n self.get_content_patch = unittest.mock.patch.object(DatasetBase, \"get_content\")\n self.mock_get_content = self.get_content_patch.start()\n self.mock_get_content.return_value = copy.deepcopy(self.our_dataset)", "title": "" }, { "docid": "aaa443dea22a55c15285d8c7a2e727af", "score": "0.5913581", "text": "def test_update_defaults(self):\n configuration = DiscussionsConfiguration.objects.get(context_key=self.course_key_with_defaults)\n configuration.enabled = False\n configuration.plugin_configuration = {\n 'url': 'http://localhost',\n }\n configuration.provider_type = 'legacy'\n configuration.save()\n configuration = DiscussionsConfiguration.objects.get(context_key=self.course_key_with_defaults)\n assert configuration is not None\n assert not configuration.enabled\n assert configuration.lti_configuration is None\n assert configuration.plugin_configuration['url'] == 'http://localhost'\n assert configuration.provider_type == 'legacy'", "title": "" }, { "docid": "5ec65e4e41d28cc67e65cd569987d4fe", "score": "0.5912548", "text": "def test_220_nova_metadata_propagate(self):\n if self.current_release < self.BIONIC_ROCKY:\n logging.info(\"Feature didn't exist before Rocky. Nothing to test\")\n return\n\n # Expected default and alternate values\n current_value = zaza.model.get_application_config(\n 'nova-cloud-controller')['vendor-data-url']['value']\n new_value = 'http://some-other.url/vdata'\n\n set_default = {'vendor-data-url': current_value}\n set_alternate = {'vendor-data-url': new_value}\n default_entry = {'api': {\n 'vendordata_dynamic_targets': [current_value]}}\n alternate_entry = {'api': {'vendordata_dynamic_targets': [new_value]}}\n\n # Config file affected by juju set config change\n conf_file = '/etc/nova/nova.conf'\n\n # Make config change, check for service restarts\n logging.info(\n 'Setting config on nova-cloud-controller to {}'.format(\n set_alternate))\n self.restart_on_changed(\n conf_file,\n set_default,\n set_alternate,\n default_entry,\n alternate_entry,\n self.services)", "title": "" }, { "docid": "070d6a8493354003596b344fe9b4fb23", "score": "0.5895899", "text": "def _update_infobase_config():\r\n # update infobase configuration\r\n from infogami.infobase import server\r\n if not config.get(\"infobase\"):\r\n config.infobase = {}\r\n # This sets web.config.db_parameters\r\n server.update_config(config.infobase)", "title": "" }, { "docid": "d842f67ba9fe20e33c575d5f9ddc0c87", "score": "0.58910453", "text": "def reload_config(self):\n from samuraix import config # TODO?\n self.dispatch_event('on_load_config', config)", "title": "" }, { "docid": "e37efdbc4a6f74462041208a763045df", "score": "0.5889935", "text": "def test_corrupted_config_data(self):\n self.managed_list.zk_config_manager._data = \"[dasdsad\"\n self.managed_list._reload_config_data()\n self.assertEqual([], self.managed_list.get_list())\n\n self.managed_list.zk_config_manager._data = \"{}\"\n self.managed_list._reload_config_data()\n self.assertEqual([], self.managed_list.get_list())", "title": "" }, { "docid": "c8451b7310582a17e7aaa841385f1dda", "score": "0.5887346", "text": "async def test_manual_configuration_update_configuration(\n hass: HomeAssistant, mock_config_entry, mock_vapix_requests\n) -> None:\n assert mock_config_entry.data[CONF_HOST] == \"1.2.3.4\"\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n\n mock_vapix_requests(\"2.3.4.5\")\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"2.3.4.5\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert mock_config_entry.data[CONF_HOST] == \"2.3.4.5\"", "title": "" }, { "docid": "aede172042886a07b247fa637318ad96", "score": "0.58785474", "text": "def test_config():\n assert not create_app().testing\n assert create_app({\"TESTING\": True}).testing", "title": "" }, { "docid": "5e751f2145364cf0ad9d852b42cb16b6", "score": "0.58688074", "text": "def test_config_with_defaults(self):\n\n with MockConsulKV():\n app = self.create_app()\n consul = Consul(app)\n consul.apply_remote_config()\n\n self.assertEqual(app.config['cfg_1'], 'consul_1')\n self.assertEqual(app.config['cfg_3'], 'consul_3')\n self.assertEqual(app.config['cfg_4'], {'inner': 'value'})", "title": "" }, { "docid": "d6500ea495b8e6ec354d7e3690a52f78", "score": "0.5866945", "text": "def test_read_config_file_old(self):\n es_url = 'http://127.0.0.1:9200'\n # Build\n file_obj = FileTestObj()\n file_obj.write_config(file_obj.args['configfile'], YAMLCONFIG.format(es_url))\n # Test\n build_obj = Builder(configfile=file_obj.args['configfile'])\n assert build_obj.client_args.hosts[0] == es_url\n # Teardown\n file_obj.teardown()", "title": "" }, { "docid": "9ad84a816505064e75b943cf0c49c748", "score": "0.5866806", "text": "def setUp(self):\n self.config = trial.Options()\n self.savedModules = dict(sys.modules)", "title": "" }, { "docid": "5fb8d2fc87cf2091bdd6c702e4ef51b4", "score": "0.58650434", "text": "def test_set_in_config(self):\n options = self._test_options(config={\"option\": \"foo\"})\n self.assertEqual(options.test_option, \"foo\")\n\n options = self._test_options(config={\"option\": \"bar\"})\n self.assertEqual(options.test_option, \"bar\")", "title": "" }, { "docid": "4b026be5e011f9ad664b40c6662f8051", "score": "0.5862787", "text": "async def test_unload_config_entry(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)", "title": "" }, { "docid": "20c25555ab7c88e2071674939b0228cd", "score": "0.5859502", "text": "def testFixConfig(self):\n \n tarball = self.recipe.get_tarball(self.download_url, self.download_dir)\n \n extracted = self.recipe.install_tarball(self.download_dir, tarball, self.destination)\n \n options = {}\n options['admin_username'] = \"foo\"\n options['admin_password'] = \"foo\"\n options['adminserver_hostport'] = \"foo\"\n options['process_uid'] = \"foo\"\n options['process_gid'] = \"foo\"\n options['licenseinfo'] = \"foo\"\n options['httpd_enabled'] = \"foo\"\n options['hostport'] = \"foo\"\n options['live_dir'] = \"foo\"\n options['vod_common_dir'] = \"foo\"\n options['vod_dir'] = \"foo\"\n options['appsdir'] = \"foo\"\n options['js_scriptlibpath'] = \"foo\"\n \n locations = self.recipe.create_config(self.destination, options)\n \n for loc in locations:\n f = open(loc).read()\n self.assertTrue('foo' in f)", "title": "" }, { "docid": "a23d50e982ec4e0f90275287f458141f", "score": "0.5856261", "text": "def test_correct_new_naming_scrape(self):\n # Given\n c = config({\n 'scrap_start_urls': False\n })\n\n config_loaded = ConfigLoader(c)\n\n assert config_loaded.scrape_start_urls == False\n assert config_loaded.scrap_start_urls == config_loaded.scrape_start_urls", "title": "" }, { "docid": "a5d55a50082ac0622529f7fd00754ab5", "score": "0.5852883", "text": "def test():\n c = Config()\n c.defaults()\n c.print_config()", "title": "" }, { "docid": "1d8289aefa98a164c0a99316b18387c6", "score": "0.5851456", "text": "def config_entry_fixture():\n return MockConfigEntry(domain=DOMAIN, title=\"Sonos\")", "title": "" }, { "docid": "ee92e4c04faf05a3eccdb550f7da7ee8", "score": "0.5847191", "text": "def setUp(self):\n self.config = trial.Options()", "title": "" }, { "docid": "082166328d0a75b158a7a8625039097f", "score": "0.5846698", "text": "def test_create_default_configuration(self):\n pass", "title": "" }, { "docid": "9ddfa3cb600b8dd557896e5a9e83fb46", "score": "0.5841066", "text": "def _on_config_changed(self, _):\n current = self.config[\"thing\"]\n if current not in self._stored.things:\n logger.debug(\"found a new thing: %r\", current)\n self._stored.things.append(current)", "title": "" }, { "docid": "9ae3caeb47c00efdb21c4a803d94d9a8", "score": "0.5838629", "text": "def setUpTestData(cls):\r\n cfg.TESTING = True\r\n cfg.IGNORE_SYNC_DURING_CONNECT = False\r\n fake_initialize()", "title": "" }, { "docid": "9b7bf8dc5f92f419a90194935b302df5", "score": "0.5834541", "text": "def test_config_router_put(self):\n pass", "title": "" }, { "docid": "d65e94cdb1db4e3fdadfeda05d5d9c74", "score": "0.5833664", "text": "def reload(self):\n # Load newest config\n self.__loader.load_config()\n self.__properties = self.__loader.properties\n self.load_config_objects()", "title": "" }, { "docid": "df3305fb844848a66d578c75568ae597", "score": "0.58320844", "text": "def setUp(self):\n self.test_app = create_app()\n self.test_app.config.from_object(\"project.config.TestingConfig\")", "title": "" }, { "docid": "74ab7ed6a342387a013326873ebdb50e", "score": "0.5821723", "text": "def test_conf(self):\n self.TESTED_UNIT = 'ceph-fs/0'\n\n def _get_conf():\n \"\"\"get/parse ceph daemon response into dict.\n\n :returns dict: Current configuration of the Ceph MDS daemon\n :rtype: dict\n \"\"\"\n cmd = \"sudo ceph daemon mds.$HOSTNAME config show\"\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n return json.loads(conf['Stdout'])\n\n @retry(wait=wait_exponential(multiplier=1, min=4, max=10),\n stop=stop_after_attempt(10))\n def _change_conf_check(mds_config):\n \"\"\"Change configs, then assert to ensure config was set.\n\n Doesn't return a value.\n \"\"\"\n model.set_application_config('ceph-fs', mds_config)\n results = _get_conf()\n self.assertEqual(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))\n\n # ensure defaults are set\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)\n\n # change defaults\n mds_config = {'mds-cache-memory-limit': '8589934592',\n 'mds-cache-reservation': '0.10',\n 'mds-health-cache-threshold': '2'}\n _change_conf_check(mds_config)\n\n # Restore config to keep tests idempotent\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)", "title": "" }, { "docid": "d0f25b51b32bc79a9cb119514ff57cb4", "score": "0.58194387", "text": "def init_test(cls):\n cls.set_configuration()\n cls._first_read_id = cls._insert_readings_data()\n cls._insert_readings_data()\n payload = PayloadBuilder()\\\n .WHERE(['id', '>=', cls._first_read_id]) \\\n .ORDER_BY(['id', 'ASC']) \\\n .payload()\n readings = cls._readings.query(payload)\n cls._raw_data = readings['rows']\n\n yield\n # Delete all test data from readings and configuration\n cls._storage_client.delete_from_tbl(\"readings\", {})\n payload = PayloadBuilder().WHERE([\"key\", \"=\", cls._CONFIG_CATEGORY_NAME]).payload()\n cls._storage_client.delete_from_tbl(\"configuration\", payload)", "title": "" }, { "docid": "1f27f6258b7429a26a9901bea6d615fd", "score": "0.58063304", "text": "def test_test_config(self):\n app = create_app('appname.config.TestingConfig', env='testing')\n\n assert app.config['DEBUG'] is False", "title": "" }, { "docid": "e24023b611c82117bd20f68a1d1cca7f", "score": "0.580564", "text": "def test_update_value(self):\n fixes.fixes = {}\n old_fixes = fixes.fixes\n fixes._load_file(join_data_path('fixes.py'))\n self.assertIs(fixes.fixes, old_fixes)", "title": "" }, { "docid": "e9ad9f61e71f1b1d8f693b1c6f299b1e", "score": "0.579319", "text": "def test_config_set_same_key(self, mock_requests):\n app_id = self.create_app()\n url = \"/v2/apps/{app_id}/config\".format(**locals())\n\n # set an initial config value\n body = {'values': json.dumps({'PORT': '5000'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertIn('PORT', response.data['values'])\n\n # reset same config value\n body = {'values': json.dumps({'PORT': '5001'})}\n response = self.client.post(url, body)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertIn('PORT', response.data['values'])\n self.assertEqual(response.data['values']['PORT'], '5001')", "title": "" }, { "docid": "4bdb7dfe598c20dcfb58133fdd20b858", "score": "0.5790044", "text": "def test_save_config_by_urls(self):\n\n # our key to use\n key = 'test_save_config_by_urls'\n\n # GET returns 405 (not allowed)\n response = self.client.get('/add/{}'.format(key))\n assert response.status_code == 405\n\n # no data\n response = self.client.post('/add/{}'.format(key))\n assert response.status_code == 400\n\n # No entries specified\n response = self.client.post(\n '/add/{}'.format(key), {'urls': ''})\n assert response.status_code == 400\n\n # Added successfully\n response = self.client.post(\n '/add/{}'.format(key), {'urls': 'mailto://user:pass@yahoo.ca'})\n assert response.status_code == 200\n\n # No URLs loaded\n response = self.client.post(\n '/add/{}'.format(key),\n {'config': 'invalid content', 'format': 'text'})\n assert response.status_code == 400\n\n # Test a case where we fail to load a valid configuration file\n with patch('apprise.AppriseConfig.add', return_value=False):\n response = self.client.post(\n '/add/{}'.format(key),\n {'config': 'garbage://', 'format': 'text'})\n assert response.status_code == 400\n\n with patch('os.remove', side_effect=OSError):\n # We will fail to remove the device first prior to placing a new\n # one; This will result in a 500 error\n response = self.client.post(\n '/add/{}'.format(key), {\n 'urls': 'mailto://user:newpass@gmail.com'})\n assert response.status_code == 500\n\n # URL is actually not a valid one (invalid Slack tokens specified\n # below)\n response = self.client.post(\n '/add/{}'.format(key), {'urls': 'slack://-/-/-'})\n assert response.status_code == 400\n\n # Test with JSON\n response = self.client.post(\n '/add/{}'.format(key),\n data=json.dumps({'urls': 'mailto://user:pass@yahoo.ca'}),\n content_type='application/json',\n )\n assert response.status_code == 200\n\n # Test with JSON (and no payload provided)\n response = self.client.post(\n '/add/{}'.format(key),\n data=json.dumps({}),\n content_type='application/json',\n )\n assert response.status_code == 400\n\n # Test with XML which simply isn't supported\n response = self.client.post(\n '/add/{}'.format(key),\n data='<urls><url>mailto://user:pass@yahoo.ca</url></urls>',\n content_type='application/xml',\n )\n assert response.status_code == 400\n\n # Invalid JSON\n response = self.client.post(\n '/add/{}'.format(key),\n data='{',\n content_type='application/json',\n )\n assert response.status_code == 400\n\n # Test the handling of underlining disk/write exceptions\n with patch('os.makedirs') as mock_mkdirs:\n mock_mkdirs.side_effect = OSError()\n # We'll fail to write our key now\n response = self.client.post(\n '/add/{}'.format(key),\n data=json.dumps({'urls': 'mailto://user:pass@yahoo.ca'}),\n content_type='application/json',\n )\n\n # internal errors are correctly identified\n assert response.status_code == 500\n\n # Test the handling of underlining disk/write exceptions\n with patch('gzip.open') as mock_open:\n mock_open.side_effect = OSError()\n # We'll fail to write our key now\n response = self.client.post(\n '/add/{}'.format(key),\n data=json.dumps({'urls': 'mailto://user:pass@yahoo.ca'}),\n content_type='application/json',\n )\n\n # internal errors are correctly identified\n assert response.status_code == 500", "title": "" }, { "docid": "a1262b915a262e2d12626f58a6e9b929", "score": "0.57868624", "text": "async def test_load_unload_config_entry(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_open_meteo: AsyncMock,\n) -> None:\n mock_config_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config_entry.state is ConfigEntryState.LOADED\n\n await hass.config_entries.async_unload(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert not hass.data.get(DOMAIN)\n assert mock_config_entry.state is ConfigEntryState.NOT_LOADED", "title": "" }, { "docid": "dbeda210a750f8ee4c4509c7db44d1d7", "score": "0.578367", "text": "def test_test_config(self):\n with open(TEST_DATA_PATH / \"test_config.yaml\") as f:\n self.schema.load(yaml.safe_load(f))", "title": "" }, { "docid": "279e6341fa673c0e89954886516219e8", "score": "0.577966", "text": "def test_default_config(mock_load):\n config = Config()\n config.load()\n\n assert \"data_dir\" in config\n assert \"local_config_file\" in config\n assert \"Slow Start\" in config[\"reddit.user_agent\"]\n assert mock_load.call_count == 1", "title": "" }, { "docid": "d32cf09b13497e3691433f288c818aa6", "score": "0.57740635", "text": "def test_update(self):\n pass", "title": "" }, { "docid": "d32cf09b13497e3691433f288c818aa6", "score": "0.57740635", "text": "def test_update(self):\n pass", "title": "" }, { "docid": "bce1d8fde9c6047e29c2de59953cbe73", "score": "0.57730144", "text": "def test_config_whitelist_put(self):\n pass", "title": "" }, { "docid": "e669f5b95793507c27da42c1e0b99296", "score": "0.5764217", "text": "async def test_reauth_multiple_config_entries(\n hass: HomeAssistant, oauth, setup_platform, config_entry\n) -> None:\n await setup_platform()\n\n old_entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n **config_entry.data,\n \"extra_data\": True,\n },\n )\n old_entry.add_to_hass(hass)\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 2\n\n orig_subscriber_id = config_entry.data.get(\"subscriber_id\")\n\n # Invoke the reauth flow\n result = await oauth.async_reauth(config_entry)\n\n await oauth.async_oauth_web_flow(result)\n\n await oauth.async_finish_setup(result)\n\n # Only reauth entry was updated, the other entry is preserved\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 2\n entry = entries[0]\n assert entry.unique_id == PROJECT_ID\n entry.data[\"token\"].pop(\"expires_at\")\n assert entry.data[\"token\"] == {\n \"refresh_token\": \"mock-refresh-token\",\n \"access_token\": \"mock-access-token\",\n \"type\": \"Bearer\",\n \"expires_in\": 60,\n }\n assert entry.data.get(\"subscriber_id\") == orig_subscriber_id # Not updated\n assert not entry.data.get(\"extra_data\")\n\n # Other entry was not refreshed\n entry = entries[1]\n entry.data[\"token\"].pop(\"expires_at\")\n assert entry.data.get(\"token\", {}).get(\"access_token\") == \"some-token\"\n assert entry.data.get(\"extra_data\")", "title": "" }, { "docid": "6bb853ab716fa9c0b6ca3f7987db35c3", "score": "0.57627", "text": "def testReferences(self):\n cfg = _get_test_config_dict()\n cfg.dict_ref = cfg.dict\n\n self.assertEqual(cfg.dict_ref, cfg.dict)", "title": "" }, { "docid": "149321352d7f9526ce11705e5a3c4c40", "score": "0.5761119", "text": "async def test_name_already_configured(hass, api):\n entry = MockConfigEntry(\n domain=transmission.DOMAIN,\n data=MOCK_ENTRY,\n options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},\n )\n entry.add_to_hass(hass)\n\n mock_entry = MOCK_ENTRY.copy()\n mock_entry[CONF_HOST] = \"0.0.0.0\"\n result = await hass.config_entries.flow.async_init(\n transmission.DOMAIN,\n context={\"source\": config_entries.SOURCE_USER},\n data=mock_entry,\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"errors\"] == {CONF_NAME: \"name_exists\"}", "title": "" } ]
052b79f70712e6d9033814bffb508da3
Use this decorator to mark you ploogin function as a handler to call upon teardown.
[ { "docid": "d19096ed85842fc259165a9fd2f7ad0a", "score": "0.7959539", "text": "def upon_teardown(f: Callable):\n return PlooginEventHandler(event=PlooginEvents.TEARDOWN, f=f)", "title": "" } ]
[ { "docid": "122522864231496b572e2050164568f0", "score": "0.70912457", "text": "def teardown_func():\r\n pass", "title": "" }, { "docid": "b304946a999ba13baed34b64e8f6f7fe", "score": "0.6499198", "text": "def teardown_appcontext(self, f):\r\n self.teardown_appcontext_funcs.append(f)\r\n return f", "title": "" }, { "docid": "8bfab0117a997d4c00b8fcac9c9eef31", "score": "0.63808227", "text": "def removes_return():\n\n def decorator(fn):\n fn._sa_instrument_after = \"fire_remove_event\"\n return fn\n\n return decorator", "title": "" }, { "docid": "f62c594e656418483cae9f66e51d4b15", "score": "0.63546515", "text": "def teardown_method(self, method):", "title": "" }, { "docid": "c6824430490ee51c8554ac87fa30f5aa", "score": "0.63046306", "text": "def teardown_method(self):", "title": "" }, { "docid": "9e28b4fd46a02ef78e7bb53df4606602", "score": "0.62905", "text": "def global_teardown():", "title": "" }, { "docid": "799e85de31556bf5ade6b6fb734ff19f", "score": "0.6266707", "text": "def teardown_request(self, f):\r\n self.teardown_request_funcs.setdefault(None, []).append(f)\r\n return f", "title": "" }, { "docid": "1407bd08bd4a0720d8d5c863b8ea3eef", "score": "0.6244014", "text": "def teardown_method(self):\n pass", "title": "" }, { "docid": "7d22dd7492c2352edc502eea1964ff29", "score": "0.6233653", "text": "def teardown_function(function):\n print(\"\\nTEAR DOWN <==\")", "title": "" }, { "docid": "272bdd019cf78c57a02d76dece41ecbf", "score": "0.61997914", "text": "def test_teardown(self):\n assert self.default_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "title": "" }, { "docid": "0dc36fa9a96303b41df6b08092f37669", "score": "0.6186899", "text": "def event(name):\n def decorator(fun):\n unsafe_hook_event(name, fun)\n @plugins.finalizer\n def finalizer():\n unsafe_unhook_event(name, fun)\n return fun\n return decorator", "title": "" }, { "docid": "fd3c9f93161b2d2285c6f4f83c07771e", "score": "0.613534", "text": "def teardown(self):\n if getattr(self.obj, 'im_self', None): \n name = 'teardown_method' \n else: \n name = 'teardown_function' \n obj = self.parent.obj \n meth = getattr(obj, name, None)\n if meth is not None: \n return meth(self.obj)", "title": "" }, { "docid": "bf4dbf9fc42c42f671302c137c8f451b", "score": "0.60999477", "text": "def removes(arg):\n\n def decorator(fn):\n fn._sa_instrument_before = (\"fire_remove_event\", arg)\n return fn\n\n return decorator", "title": "" }, { "docid": "ec171fd49c43d34afeff7ead9a224051", "score": "0.60797954", "text": "def test_teardown(test):", "title": "" }, { "docid": "cca4a866fcbd1b6a7874e67d10483296", "score": "0.6051324", "text": "def process_func(request, resource_handler):\n if request.node.get_closest_marker(\"setup_func\"):\n print(\"SetUp function {0}\".format(request.function.__name__))\n print(request.keywords.node.funcargs.keys())\n resource_handler.clear_result()\n\n yield\n\n if request.node.get_closest_marker(\"setup_func\"):\n print(\"TearDown function {0}\".format(request.function.__name__))\n print(request.keywords.node.funcargs.keys())\n resource_handler.clear_result()", "title": "" }, { "docid": "b62d17afc6bd583791e1bf7784068158", "score": "0.60283476", "text": "def early_teardown():", "title": "" }, { "docid": "9da38c8914547f2c018070df7d55a563", "score": "0.60210234", "text": "def teardown():", "title": "" }, { "docid": "936c217419dc476bb5fee4f59b0b6f00", "score": "0.6016588", "text": "def teardown(self) -> None:", "title": "" }, { "docid": "b99862f8d76c7ba5a0e4cd0069059992", "score": "0.6006935", "text": "def post_register_handler(self, func):\n return self.hook.post_register_handler(func)", "title": "" }, { "docid": "5f3b6108703ab4def60f5e087fd4a4d5", "score": "0.6006008", "text": "def teardown_class(self):", "title": "" }, { "docid": "51c5288f747ef901c42bb38055d315a5", "score": "0.6005165", "text": "def shutdown(self, f):\n self.on_shutdown = f", "title": "" }, { "docid": "0a09d28455627f09cfb6be0c94daca86", "score": "0.5996199", "text": "def teardown_method(self, method):\n pass", "title": "" }, { "docid": "761470949271a19227a3ad6d88f39c76", "score": "0.5993585", "text": "def addCleanup(self, function, *args, **kwargs):\n pass", "title": "" }, { "docid": "fa7dd48c444260e5cc0d0cafee636a9d", "score": "0.5960369", "text": "def unregister_handler(self, func):\n return self.hook.unregister_handler(func)", "title": "" }, { "docid": "8b47a3f0ff39e9c16615ca9049751af5", "score": "0.5933463", "text": "def teardown_backend(self, method):\n pass", "title": "" }, { "docid": "6503edbdd5978a82376935b92fd3835b", "score": "0.5925772", "text": "def addCleanup(self, function, *args, **kwargs):\n if gen.is_coroutine_function(function):\n return self._async_cleanups.append((function, args, kwargs))\n\n return super(BaseTestCase, self).addCleanup(function, *args, **kwargs)", "title": "" }, { "docid": "410fcbccb6f62055bef61a8e2e8f7803", "score": "0.59216607", "text": "def register(hook_fn, *args, **kwargs):\n entry = (hook_fn, args, kwargs)\n _teardown_hooks.append(entry)", "title": "" }, { "docid": "5bc707d25ff5b4825e045055b9aef197", "score": "0.5912825", "text": "def test_teardown(self):\n assert self.http_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "title": "" }, { "docid": "5e94f1b7ce75decc0ae84bf29a7e2615", "score": "0.58993286", "text": "def teardown(self):", "title": "" }, { "docid": "5e94f1b7ce75decc0ae84bf29a7e2615", "score": "0.58993286", "text": "def teardown(self):", "title": "" }, { "docid": "5e94f1b7ce75decc0ae84bf29a7e2615", "score": "0.58993286", "text": "def teardown(self):", "title": "" }, { "docid": "5e94f1b7ce75decc0ae84bf29a7e2615", "score": "0.58993286", "text": "def teardown(self):", "title": "" }, { "docid": "5e94f1b7ce75decc0ae84bf29a7e2615", "score": "0.58993286", "text": "def teardown(self):", "title": "" }, { "docid": "fa3d2ea79b735e744116b6fcd129a3f9", "score": "0.58323044", "text": "def unregister(env, test_type, func, *targs, **kargs):\r\n exithandlers = \"exithandlers__%s\" % test_type\r\n if env.data.get(exithandlers):\r\n env.data[exithandlers].remove((func, targs, kargs))\r\n return func", "title": "" }, { "docid": "d77faca1fb981bcad31cd3a08d4cbc93", "score": "0.5792379", "text": "def handler(event):\n\n def decorator(fn):\n def apply(cls):\n event.connect(fn, sender=cls)\n return cls\n\n fn.apply = apply\n return fn\n\n return decorator", "title": "" }, { "docid": "38b19715c3cc46a5ac54c8d7110cf985", "score": "0.57874817", "text": "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "title": "" }, { "docid": "38b19715c3cc46a5ac54c8d7110cf985", "score": "0.57874817", "text": "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "title": "" }, { "docid": "38b19715c3cc46a5ac54c8d7110cf985", "score": "0.57874817", "text": "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "title": "" }, { "docid": "38b19715c3cc46a5ac54c8d7110cf985", "score": "0.57874817", "text": "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "title": "" }, { "docid": "38b19715c3cc46a5ac54c8d7110cf985", "score": "0.57874817", "text": "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "title": "" }, { "docid": "38b19715c3cc46a5ac54c8d7110cf985", "score": "0.57874817", "text": "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "title": "" }, { "docid": "c8677ee6db6542be0feca3ce2db6d1cc", "score": "0.5783382", "text": "def teardown(self) -> None:\n pass", "title": "" }, { "docid": "8edec6a5489fd343bc75592c24e24e9c", "score": "0.57828677", "text": "def handler(event):\r\n\r\n def decorator(fn):\r\n def apply(cls):\r\n event.connect(fn, sender=cls)\r\n return cls\r\n\r\n fn.apply = apply\r\n return fn\r\n\r\n return decorator", "title": "" }, { "docid": "5669a3f47f28a3d0e4691342a5f74b72", "score": "0.57804215", "text": "def teardown_method(self, method):\n self.hass.stop()", "title": "" }, { "docid": "5669a3f47f28a3d0e4691342a5f74b72", "score": "0.57804215", "text": "def teardown_method(self, method):\n self.hass.stop()", "title": "" }, { "docid": "e392cf0bdb54ab68a8461c198df8680f", "score": "0.5763199", "text": "def after(f, name=None):\n return Interceptor(\n name=name or _interceptor_func_name(f),\n leave=lambda ctx: f(ctx))", "title": "" }, { "docid": "ae95ca2fa8bfce52554b607cc82f8fb1", "score": "0.5745509", "text": "def atexit_register(func):\n try:\n import uwsgi\n\n orig = getattr(uwsgi, \"atexit\", None)\n\n def uwsgi_atexit():\n if callable(orig):\n orig()\n func()\n\n uwsgi.atexit = uwsgi_atexit\n except ImportError:\n atexit.register(func)", "title": "" }, { "docid": "53817cd8cbc6aa3a180120a28f49f907", "score": "0.5720094", "text": "def teardown(self):\n pass", "title": "" }, { "docid": "53817cd8cbc6aa3a180120a28f49f907", "score": "0.5720094", "text": "def teardown(self):\n pass", "title": "" }, { "docid": "53817cd8cbc6aa3a180120a28f49f907", "score": "0.5720094", "text": "def teardown(self):\n pass", "title": "" }, { "docid": "53817cd8cbc6aa3a180120a28f49f907", "score": "0.5720094", "text": "def teardown(self):\n pass", "title": "" }, { "docid": "53817cd8cbc6aa3a180120a28f49f907", "score": "0.5720094", "text": "def teardown(self):\n pass", "title": "" }, { "docid": "53817cd8cbc6aa3a180120a28f49f907", "score": "0.5720094", "text": "def teardown(self):\n pass", "title": "" }, { "docid": "e8a56e292dc5a03ca8729c483c984e0c", "score": "0.57088655", "text": "def teardown(self) -> None:\n raise NotImplementedError # pragma: nocover", "title": "" }, { "docid": "9175735e90047c4930fe248f5b189375", "score": "0.5702149", "text": "def disable_during_tests(f):\n\n @functools.wraps(f)\n def outer(*args, **kwargs):\n if settings.TESTING:\n return\n return f(*args, **kwargs)\n\n return outer", "title": "" }, { "docid": "24efb8965b07ef67a12f5f2d24c9cec5", "score": "0.56644285", "text": "def teardown():\n print(\"TEAR DOWN!\")", "title": "" }, { "docid": "44c4a58de2993be7893494ff90631641", "score": "0.5651762", "text": "def teardown_method(self, method):\n self.patcher.stop()", "title": "" }, { "docid": "8870770df8985ade760c61ef5758d431", "score": "0.5647366", "text": "def teardown_class(klass):", "title": "" }, { "docid": "df59f615d28b73b19a0e28a5f0a31091", "score": "0.5635772", "text": "def teardown_method(self):\n\n del self.const, self.instruments, self.in_kwargs, self.ref_time\n return", "title": "" }, { "docid": "2e1e8c82572cbad4a6dcd1cc2eee41a5", "score": "0.5634583", "text": "def test_teardown(self):\n assert self.oef_search_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "title": "" }, { "docid": "c914acadfe979ecfdc93389813bc52f6", "score": "0.5626401", "text": "def exit_handler():\r\n print(\"Test Ended\")\r\n end_test()", "title": "" }, { "docid": "01ef302f9bfb72c5499b374bea90c7ac", "score": "0.56203854", "text": "def unspy(self, unregister=True):\n if hasattr(self.orig_func, 'im_self'):\n setattr(self.owner, self.func_name, self.orig_func)\n else:\n assert hasattr(self.orig_func, 'spy')\n del FunctionSpy._code_maps[self.orig_func.func_code]\n del self.orig_func.spy\n self.orig_func.func_code = self._old_code\n\n if unregister:\n self.agency.spies.remove(self)", "title": "" }, { "docid": "fabfd6061e736e9eb43e0e92b8f320c4", "score": "0.56188613", "text": "def _register_cleanup_func(self, func):\n self._cleanup_funcs.append(func)", "title": "" }, { "docid": "c9eeb173757014da941d2ceef29cb0a1", "score": "0.56151485", "text": "def remove_handler(self, function):\n try:\n self.handlers.remove(function)\n except ValueError: # handler wasn't in the list, pretend we don't notice\n pass", "title": "" }, { "docid": "d63d1d92479b9e2240bdcc21f435ce0a", "score": "0.5593257", "text": "def register_after(self, handler):\n self._middlewares['after'].append(handler)", "title": "" }, { "docid": "0ff21c6c357fc160e1e4715cde32f87d", "score": "0.55899626", "text": "def thread_shutdown_hook(func): # pragma: no cover\n thread_shutdown_hooks.append(func)\n return func", "title": "" }, { "docid": "d748505956dd3100122c70d89a17b2eb", "score": "0.5589789", "text": "def teardown() -> None:\r\n file_path: str = expression_file_util.EVENT_HANDLER_SCOPE_COUNT_FILE_PATH\r\n file_util.remove_file_if_exists(file_path=file_path)", "title": "" }, { "docid": "8755777020559874cb772e58d7df7047", "score": "0.55887985", "text": "def teardown_class(cls):", "title": "" }, { "docid": "8755777020559874cb772e58d7df7047", "score": "0.55887985", "text": "def teardown_class(cls):", "title": "" }, { "docid": "1802dd1c524cb47e2a3ac187a7e13b48", "score": "0.5583274", "text": "def teardown_test_class(duthost):\n yield\n config_reload(duthost)", "title": "" }, { "docid": "51e7cf4d65de823bf9e60291fda4d19d", "score": "0.5540833", "text": "def register_finalize_hook(fn):\n _FINALIZE_HOOKS.append(fn)\n return fn", "title": "" }, { "docid": "154f2f376f0feed958d5226db9e43724", "score": "0.55214787", "text": "def teardown(self):\n # delete pass after you implement.\n pass", "title": "" }, { "docid": "154f2f376f0feed958d5226db9e43724", "score": "0.55214787", "text": "def teardown(self):\n # delete pass after you implement.\n pass", "title": "" }, { "docid": "154f2f376f0feed958d5226db9e43724", "score": "0.55214787", "text": "def teardown(self):\n # delete pass after you implement.\n pass", "title": "" }, { "docid": "9cb35e394d9058966a81889512cd7f62", "score": "0.5512", "text": "def teardown_function(function):\n if (resource('ska_mid/tm_subarray_node/1').get(\"State\") == \"ON\"):\n the_waiter = waiter()\n the_waiter.set_wait_for_tearing_down_subarray()\n SubArray(1).deallocate()\n the_waiter.wait()\n LOGGER.info(the_waiter.logs)\n if (resource('ska_mid/tm_subarray_node/1').get(\"State\") == \"OFF\"):\n the_waiter = waiter()\n the_waiter.set_wait_for_going_to_standby()\n SKAMid().standby()\n the_waiter.wait()\n LOGGER.info(the_waiter.logs)", "title": "" }, { "docid": "b9e5ff114a527731860035975f1209e8", "score": "0.55105096", "text": "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.spec[self.func_name] = self.func", "title": "" }, { "docid": "67e85ac7e4044c42ad522dc46668ae5f", "score": "0.54970384", "text": "def handler_decorator(_func: AnyFunc) -> AnyFunc:\n\n inner = get_inner(_func)\n signature = inspect.signature(inner)\n parameters = signature.parameters.items()\n converters = get_converters(inner.__name__, parameters, partial(reduce, apply))\n\n target = partial( # type: ignore\n _parse_and_call,\n func=inner,\n converters=converters,\n silent=silent,\n error_handler=error_handler,\n )\n\n if inspect.iscoroutinefunction(inner):\n\n @wraps(inner)\n async def wrapper(request: Request = None, *args, **kwargs) -> Response:\n \"\"\"Entry point to async intent handler\"\"\"\n\n return await target(*(request, *args), **kwargs)\n\n else:\n\n @wraps(inner)\n def wrapper(request: Request = None, *args, **kwargs) -> Response:\n \"\"\"Entry point to sync handler\"\"\"\n\n return target(*(request, *args), **kwargs)\n\n setattr(wrapper, \"__intent_handler__\", True)\n return wrapper", "title": "" }, { "docid": "c33e12d43a755dbbaafeb36feb16bca0", "score": "0.5489948", "text": "def safe_fixture(request):\n print(\"\\n(Begin setting up safe_fixture)\")\n request.addfinalizer(safe_cleanup)\n risky_function()", "title": "" }, { "docid": "19e2ae18c031c7017f533e89a7fb3f07", "score": "0.5484392", "text": "def test_unspy_with_slippery_bound_method(self):\n obj = SlipperyFuncObject()\n spy = self.agency.spy_on(obj.my_func, owner=obj)\n spy.unspy()\n\n self.assertFalse(hasattr(obj.my_func, 'spy'))\n self.assertNotIn('my_func', obj.__dict__)\n\n # Make sure the old behavior has reverted.\n slippery_func.count = 0\n\n func1 = obj.my_func\n func2 = obj.my_func\n self.assertIsNot(func1, func2)\n\n # These will have already had their values set, so we should get\n # stable results again.\n self.assertEqual(func1(), 0)\n self.assertEqual(func1(), 0)\n self.assertEqual(func2(), 1)\n self.assertEqual(func2(), 1)\n\n # These will trigger new counter increments when re-generating the\n # function in the decorator.\n self.assertEqual(obj.my_func(), 2)\n self.assertEqual(obj.my_func(), 3)", "title": "" }, { "docid": "d54629b4974a20a248854a0d0a605bf7", "score": "0.5481681", "text": "def test_wiring_callable_works(self):\n self.w.this.wire(self.returns_42)\n self.addCleanup(self.w.this.unwire, self.returns_42)", "title": "" }, { "docid": "1381f4ca1a2b3295ed8bbbf1105d1d50", "score": "0.5480568", "text": "def teardown_method(self) -> None:\n self.gql_patcher.stop()", "title": "" }, { "docid": "7c894502e12f53ad3f3fb1345d8eb08a", "score": "0.54796493", "text": "def setUp(self):\n self.ResetHandler()", "title": "" }, { "docid": "1f06c63254d3b677496aaca740947bea", "score": "0.54755324", "text": "def to_do_this(self, func):\n def to_do_func(original):\n \"\"\"\n Return supplied function\n \"\"\"\n del original\n return func\n self.__action__ = to_do_func", "title": "" }, { "docid": "93c0f97a0a59c6a29fa33bb9648f8821", "score": "0.545279", "text": "def testTearDown():", "title": "" }, { "docid": "c96874671a416a63db92b783e86f6a8b", "score": "0.54483306", "text": "def register_exit(func):\n\n CLEANUP_LAMBDAS.append(func)", "title": "" }, { "docid": "f76fab3b6d5c206a89089c07effd836a", "score": "0.54418224", "text": "def teardown_method(self, method):\n self.sc.stop()", "title": "" }, { "docid": "f76fab3b6d5c206a89089c07effd836a", "score": "0.54418224", "text": "def teardown_method(self, method):\n self.sc.stop()", "title": "" }, { "docid": "f76fab3b6d5c206a89089c07effd836a", "score": "0.54418224", "text": "def teardown_method(self, method):\n self.sc.stop()", "title": "" }, { "docid": "678c3acff38c88a533e417195ccf9825", "score": "0.5436341", "text": "def teardown_method(self):\n\n del self.py_inst, self.inst_time\n return", "title": "" }, { "docid": "9acee0ceb4163849a843c9a5c061c9ad", "score": "0.5430923", "text": "def unsubscribe(self, f):\r\n self.add_handler(f, type='unsubscribe')\r\n return f", "title": "" }, { "docid": "72041a5359f053692afec9bbf52231d2", "score": "0.5426463", "text": "def setup_handler(self, __setup_handler: SetupHandler) -> None:\r\n self._setup_handler = __setup_handler", "title": "" }, { "docid": "0a81c5006d030d53e4fb1c72961a7db1", "score": "0.54232126", "text": "def teardown_method(self):\n del self.py_inst, self.inst_time\n return", "title": "" }, { "docid": "328d7c3a515f92eacf12d31f02a2974d", "score": "0.5422609", "text": "def upon_setup(f: Callable):\n return PlooginEventHandler(event=PlooginEvents.SETUP, f=f)", "title": "" }, { "docid": "b8cdd0063745ecbe048b156c47720694", "score": "0.54214203", "text": "def _mock_handler(self):\n\n return object()", "title": "" }, { "docid": "0a83163e1b6d07b70f41c68ff815a20e", "score": "0.5399992", "text": "def intercept(func):\n name = func.func_name\n def dec(cls, path, *args, **kwargs):\n for callback in cls.callbacks[name]:\n callback(path, *args, **kwargs)\n return func(cls, path, *args, **kwargs)\n return dec", "title": "" }, { "docid": "60c8d6d4cd1b85a672c967ea7eaff8a6", "score": "0.5397453", "text": "def replaces(arg):\n\n def decorator(fn):\n fn._sa_instrument_before = (\"fire_append_event\", arg)\n fn._sa_instrument_after = \"fire_remove_event\"\n return fn\n\n return decorator", "title": "" }, { "docid": "f4b0b73cb5439f738cb349a7630379d3", "score": "0.539728", "text": "def teardown_method(self):\n del self.test_inst, self.start_time, self.data_list, self.meta, self.out\n return", "title": "" }, { "docid": "1175bd977e6a1a3c89fec7fd85a1fc6c", "score": "0.5396883", "text": "def __del__(self):\n yield self.logout()", "title": "" }, { "docid": "ab4bfb3e47d75db795ff97166ce2f0a0", "score": "0.53966373", "text": "def add_stop_hook(self, handler):\n self.stop_hooks.append(handler)", "title": "" }, { "docid": "89ff09c0d4b5d4b30dedfcc2064e2429", "score": "0.5369737", "text": "def test_tear_down(self):\n ctx = context.Context(self.vm)\n\n with patch.object(ctx, '__exit__', autospec=True) as r:\n ctx.tear_down()\n r.assert_called_once_with()", "title": "" } ]
ae1c483eb54cca9c8de99dade4522368
load all plugins, warn if a plugin couldn't be loaded.
[ { "docid": "5e96272094c0dc55ffcd58db3de76cb8", "score": "0.6897457", "text": "def load_all(self):\n # get the names of all required plugins\n names = samuraix.config.get('core.plugins', [])\n log.debug('loading %s', names)\n # get all available entrypoints and create a dictionary mapping\n # an entrypoint's name to an entrypoint.\n entrypoints = list(pkg_resources.iter_entry_points('samuraix.plugin'))\n dct = dict((ep.name, ep) for ep in entrypoints)\n\n for name in names:\n try:\n ep = dct[name]\n except KeyError:\n log.error(\"The plugin '%s' couldn't be found!\" % name)\n else:\n log.info(\"Loading plugin '%s'...\", name)\n cls = ep.load()\n self[cls.key] = cls(self.app)", "title": "" } ]
[ { "docid": "a0187f0401cfa952954bc543f1edc71f", "score": "0.75690514", "text": "def load_plugins():", "title": "" }, { "docid": "b85d1c938ae197871c603b0950307892", "score": "0.73343587", "text": "def LoadPlugins(self):\n\t\tfor curfile in [f for f in glob.glob(os.path.join('plugins', self.PluginPath, '*.py'))\n\t\t\t\t\tif not os.path.basename(f) in ['__init__.py']]:\n\t\t\tLoadPluginFile(curfile)", "title": "" }, { "docid": "593ce4902165ee83444aed2589435f1d", "score": "0.71911377", "text": "def load_plugins(self, plugins):\r\n\r\n # prints an info message about the import operation\r\n # that is going to be performed in the plugin\r\n self.info(\"Loading plugins (importing %d main module files)...\" % len(plugins))\r\n\r\n # iterates over all the plugins requested for loading and\r\n # runs the import operation for each of them in case that\r\n # operation is required by module inexistence\r\n for plugin in plugins:\r\n # in case the plugin module is already loaded continues\r\n # the loop as no loading is required for it, otherwise\r\n # runs the proper loading process for the plugin logging\r\n # an error in case an exception occurs in the importing\r\n if plugin in sys.modules: continue\r\n try: __import__(plugin)\r\n except Exception as exception:\r\n self.error(\"Problem importing module %s: %s\" % (plugin, legacy.UNICODE(exception)))\r\n\r\n # prints an info message about the fact that the loading\r\n # operation for all of the requested plugin has finished\r\n self.info(\"Finished loading plugins\")", "title": "" }, { "docid": "dc9757ef6ead2d10aa3e85e7faef073c", "score": "0.7007953", "text": "def _load_plugins(self):\n plugins = ait.config.get('server.plugins')\n\n if plugins is None:\n log.warn('No plugins specified in config.')\n else:\n for index, p in enumerate(plugins):\n try:\n plugin = self._create_plugin(p['plugin'])\n self.plugins.append(plugin)\n log.info('Added plugin {}'.format(plugin))\n\n except Exception:\n exc_type, value, tb = sys.exc_info()\n log.error('{} creating plugin {}: {}'.format(exc_type,\n index,\n value))\n if not self.plugins:\n log.warn('No valid plugin configurations found. No plugins will be added.')", "title": "" }, { "docid": "06de2f29a24d1c02d4b296800386dabc", "score": "0.6884243", "text": "def load_plugins(self):\n plugin_manager = PluginManager()\n self.__fs_plugins = plugin_manager.load_filesystem_plugins()", "title": "" }, { "docid": "41f17f756dbe7fa787bc0fe219585060", "score": "0.68089104", "text": "def load():\n global _loaded\n if _loaded:\n return\n log.log.debug(\"Loading plugins\")\n _loaded = True\n\n for folder, sub, files in os.walk(\"nab/plugins/\"):\n sys.path.insert(0, folder)\n for f in files:\n fname, ext = os.path.splitext(f)\n if ext == '.py' and fname != \"__init__\":\n log.log.debug(fname)\n importlib.import_module(fname)", "title": "" }, { "docid": "e7c296bc9610964381613d30a8337554", "score": "0.6773787", "text": "def load_plugins(self):\n # note: need to update if the setup.py module names change\n MODULE_NAME = \"floss\"\n req = pkg_resources.Requirement.parse(MODULE_NAME)\n requested_directory = os.path.join(MODULE_NAME, \"plugins\")\n try:\n plugins_path = pkg_resources.resource_filename(req, requested_directory)\n\n plugnplay.plugin_dirs = [plugins_path]\n plugnplay.load_plugins(logging.getLogger(\"plugin_loader\"))\n except pkg_resources.DistributionNotFound as e:\n self.i(\"failed to load extra plugins: %s\", e)", "title": "" }, { "docid": "0708b4fd3248e392ca6417a85d0fc89f", "score": "0.6760845", "text": "def loadHandlers(tag):\n\n def pr (*args,**keys):\n if not g.app.unitTesting:\n g.es_print(*args,**keys)\n\n plugins_path = g.os_path_abspath(g.os_path_join(g.app.loadDir,\"..\",\"plugins\"))\n files = glob.glob(g.os_path_join(plugins_path,\"*.py\"))\n files = [g.os_path_abspath(theFile) for theFile in files]\n\n s = g.app.config.getEnabledPlugins()\n if not s: return\n\n if not g.app.silentMode:\n pr('@enabled-plugins found in %s' % (\n g.app.config.enabledPluginsFileName),color='blue')\n\n enabled_files = getEnabledFiles(s,plugins_path)\n\n # Load plugins in the order they appear in the enabled_files list.\n if files and enabled_files:\n for theFile in enabled_files:\n if theFile in files:\n loadOnePlugin(theFile)\n\n # Note: g.plugin_signon adds module names to g.app.loadedPlugins\n if 0:\n if g.app.loadedPlugins:\n pr(\"%d plugins loaded\" % (len(g.app.loadedPlugins)), color=\"blue\")", "title": "" }, { "docid": "7d4c9386e45cce1c12e362683fd84987", "score": "0.6735198", "text": "def init_plugins (self):\n self.load_plugins()\n for p in self.plugins:\n p.init()", "title": "" }, { "docid": "8d4a8b252a64c06f381d34e5809f0e4c", "score": "0.67192745", "text": "def load_plugins(self):\n if self.base:\n codecs = self.base + '.plugins.codecs'\n compressions = self.base + '.plugins.compressions'\n assemblies = self.base + '.plugins.assemblies'\n hashs = self.base + '.plugins.hashs'\n formatters = self.base + '.plugins.formatters'\n misc = self.base + '.plugins.misc'\n else:\n codecs = 'deen.plugins.codecs'\n compressions = 'deen.plugins.compressions'\n assemblies = 'deen.plugins.assemblies'\n hashs = 'deen.plugins.hashs'\n formatters = 'deen.plugins.formatters'\n misc = 'deen.plugins.misc'\n codecs = importlib.import_module(codecs)\n self.codecs = self._get_plugin_classes_from_module(codecs)\n compressions = importlib.import_module(compressions)\n self.compressions = self._get_plugin_classes_from_module(compressions)\n assemblies = importlib.import_module(assemblies)\n self.assemblies = self._get_plugin_classes_from_module(assemblies)\n hashs = importlib.import_module(hashs)\n self.hashs = self._get_plugin_classes_from_module(hashs)\n formatters = importlib.import_module(formatters)\n self.formatters = self._get_plugin_classes_from_module(formatters)\n misc = importlib.import_module(misc)\n self.misc = self._get_plugin_classes_from_module(misc)", "title": "" }, { "docid": "ea6e8895e06fdcaaa41cff9117e86010", "score": "0.66606915", "text": "def load_plugins():\n loaded_plugins = []\n plugins_info = get_plugins_info()\n for plugin_info in plugins_info:\n plugin_module = imp.load_module(main_module, *plugin_info[\"info\"])\n loaded_plugins.append(plugin_module)\n return loaded_plugins", "title": "" }, { "docid": "eff480252ce0c401fcaa83e44a23ccfb", "score": "0.6649144", "text": "def notify_load_complete_loaded_plugins(self):\r\n\r\n # retrieves the loaded plugins list\r\n loaded_plugins_list = self.get_all_loaded_plugins()\r\n\r\n # iterates over all the loaded plugins\r\n for loaded_plugin in loaded_plugins_list:\r\n # notifies the plugin about the load complete\r\n loaded_plugin.init_complete()", "title": "" }, { "docid": "7c9868d3b0ce0ec0a4f7aa23b8de00bf", "score": "0.66406846", "text": "def _import_plugins(self):\n if self.detected:\n return\n\n # In some cases, plugin scanning may start during a request.\n # Make sure there is only one thread scanning for plugins.\n self.scanLock.acquire()\n if self.detected:\n return # previous threaded released + completed\n\n try:\n _import_apps_submodule(\"content_plugins\")\n self.detected = True\n finally:\n self.scanLock.release()", "title": "" }, { "docid": "e68366fc43e018fe946db719ed03f550", "score": "0.6628261", "text": "def import_plugins(self):\n if self.imported:\n return\n for plugin_name in settings.PLUGINS:\n module_name, cls = plugin_name.rsplit(\".\", 1)\n module = import_module(module_name)\n plugin = getattr(module, cls)\n self.plugin_classes.append(plugin)", "title": "" }, { "docid": "36a8168789dfbd1fd8d02231891ea390", "score": "0.66164017", "text": "def register_plugins(self):\n\t\tif self.plugin_whitelist:\n\t\t\tplugin_list = self.plugin_whitelist\n\t\telse:\n\t\t\tplugin_list = plugins.__all__\n\t\tfor plugin in plugin_list:\n\t\t\tif plugin in plugins.__all__:\n\t\t\t\tself.registerPlugin(plugin, self.plugin_config.get(plugin, {}), False)\n\t\t\telse:\n\t\t\t\traise NameError(\"No plugin by the name of %s listed in plugins.__all__.\" % plugin)\n\t\t# run post_init() for cross-plugin interaction\n\t\tfor plugin in self.plugin:\n\t\t\tself.plugin[plugin].post_init()", "title": "" }, { "docid": "a35ce8a68001acd50841081ebcca242b", "score": "0.65868634", "text": "def reload_plugins(self):\n self.plugins = []\n self.seen_paths = []\n logger.info(f'Looking for plugins under package {self.plugin_package}')\n self.walk_package(self.plugin_package)", "title": "" }, { "docid": "6bc5daf3337b90ecce8b21a44bf99f5a", "score": "0.6573317", "text": "def load_plugins(self):\r\n\r\n for plugin_path in settings.PLUGINS:\r\n try:\r\n try:\r\n pg_module, pg_classname = plugin_path.rsplit('.', 1)\r\n except ValueError, e:\r\n raise exceptions.ImproperlyConfigured('%s isn\\'t a plugin module' % plugin_path)\r\n\r\n try:\r\n mod = importlib.import_module(pg_module)\r\n except ImportError, e:\r\n raise exceptions.ImproperlyConfigured('Error importing plugin %s: \"%s\"' % (pg_module, e))\r\n\r\n try:\r\n pg_class = getattr(mod, pg_classname)\r\n except AttributeError, e:\r\n raise exceptions.ImproperlyConfigured('Plugins module \"%s\" does not define a \"%s\" class' % (pg_module, pg_classname))\r\n\r\n try:\r\n pg_instance = pg_class(handler=self, protocol=self.protocol)\r\n except TypeError, e:\r\n raise exceptions.InvalidPlugin('Plugins class \"%s\" does not provide a command: %s' % (pg_class, e))\r\n\r\n if hasattr(pg_instance, 'command'):\r\n if isinstance(pg_instance.command, str):\r\n self._command_plugins[pg_instance.command] = pg_instance\r\n else:\r\n for command in pg_instance.command:\r\n self._command_plugins[command] = pg_instance\r\n else:\r\n self._generic_plugins.append(pg_instance)\r\n except exceptions.JeevesException, e:\r\n if pg_classname:\r\n print \"%s: %s\" % (pg_classname, e)\r\n else:\r\n print e", "title": "" }, { "docid": "499980f31a450fedd65b30aaf87bc78a", "score": "0.65441203", "text": "def find_plugins(self):\n if len(self.plugins):\n # only let this happen once per launch\n return\n\n if config:\n path = os.path.abspath(os.path.join(os.path.dirname(config.config.plugin_dir), API_PLUGINS_FOLDER))\n for item in sorted(os.listdir(path)):\n loader_file = os.path.join(path, item, API_PLUGIN_LOADER)\n if os.path.exists(loader_file):\n print(\"Found plugin file: {}\".format(loader_file))\n loader_dir = os.path.join(path, item)\n orig_path = list(sys.path)\n try:\n if loader_dir not in sys.path:\n sys.path.append(loader_dir)\n plugin_module = importlib.machinery.SourceFileLoader(\n 'plugin_{}'.format(item.encode(\n encoding='ascii',\n errors='replace').decode('utf-8').replace('.', '_')),\n loader_file).load_module()\n if \"__plugin__\" in dir(plugin_module):\n plugin = plugin_module.__plugin__\n if not isinstance(plugin, EDMCPluginBase):\n raise ValueError(\"{} from {} is not a subclass of EDMCPluginBase\".format(\n str(plugin), item))\n print(\"Loaded plugin {} from module {}\".format(str(plugin), item))\n self.plugins.append(plugin)\n\n else:\n raise AttributeError(\"plugin {} lacks __plugin__\".format(loader_dir))\n except Exception as err:\n print(\"Error attempting to discover plugin {} : {}\".format(loader_file, err))\n sys.path = orig_path", "title": "" }, { "docid": "076cc8c86b7ea3a7a2cd5c9dac0356a1", "score": "0.65420604", "text": "def load_plugins(self):\n\n ocio_plugins = self.config.get('ocio_plugins', default=dict())\n if not ocio_plugins:\n LOGGER.warning('No OCIO plugins found in configuration file: \"{}\"'.format(self.config.get_path()))\n return\n\n for ocio_plugin_name, ocio_plugin_info in ocio_plugins.items():\n plugin_name = ocio_plugin_info.get('plugin_name', None)\n if not plugin_name:\n LOGGER.warning(\n 'Impossible to load \"{}\" OCIO Plugin because its plugin_name is not defined!'.format(\n ocio_plugin_name))\n continue\n\n if not tp.Dcc.is_plugin_loaded(plugin_name):\n LOGGER.info('Loading OCIO Plugin: \"{}\"'.format(plugin_name))\n try:\n tp.Dcc.load_plugin(plugin_name, quiet=True)\n except Exception as exc:\n LOGGER.error(\n 'Error while loading OCIO Plugin: \"{}\" | {} | {}'.format(\n plugin_name, exc, traceback.format_exc()))\n continue\n\n LOGGER.info('OCIO Plugin \"{}\" loaded successfully!'.format(plugin_name))\n self.__class__._available_plugins.append(ocio_plugin_name)", "title": "" }, { "docid": "ed8e9dc0d50db8bc5cb66061df16c85b", "score": "0.65240896", "text": "def load_plugin_classes(self):\n c.world_load_plugin_classes(self.world)", "title": "" }, { "docid": "57702d14121a171e55d822faa6c25b7e", "score": "0.6510039", "text": "def plugins():\n pass", "title": "" }, { "docid": "2b741b2266d3000680a80836c366666b", "score": "0.6471247", "text": "def _check_plugins(self, name: str) -> None:\n if name not in self._plugins and name not in self._plugin_status:\n raise WechatyPluginError(f'plugins <{name}> not exist')", "title": "" }, { "docid": "4e1662bf6bbaca5c8de8cf0abfd3a8d5", "score": "0.6442013", "text": "def _locate_plugins() -> None:\n path = (\n Path(sys.executable).parent\n if hasattr(sys, \"frozen\")\n else REPOSITORY_DIRECTORY\n )\n current_system = get_current_system()\n\n # This is for debugging purposes\n if current_system.DEBUG_MODE:\n console.print(f\"[bold green]Loading plugins from {path}[/]\")\n console.print(\"[bold green]Plugins found:[/]\")\n\n for plugin in Path(__file__).parent.glob(\"plugins/*_plugin.py\"):\n python_path = plugin.relative_to(path).with_suffix(\"\")\n\n # This is for debugging purposes\n if current_system.DEBUG_MODE:\n console.print(f\" [bold red]{plugin.name}[/]\")\n console.print(f\" [bold yellow]{python_path}[/]\")\n console.print(f\" [bold bright_cyan]{__package__}[/]\")\n console.print(f\" [bold magenta]{python_path.parts}[/]\")\n console.print(\n f\" [bold bright_magenta]{'.'.join(python_path.parts)}[/]\"\n )\n\n module = importlib.import_module(\n \".\".join(python_path.parts), package=__package__\n )\n for _, obj in inspect.getmembers(module):\n if (\n inspect.isclass(obj)\n and issubclass(obj, (PltTA))\n and obj != PlotlyTA.__class__\n ) and obj not in PlotlyTA.plugins:\n PlotlyTA.plugins.append(obj)", "title": "" }, { "docid": "45954d365c4be3e1e436c1ba223e0c90", "score": "0.6440719", "text": "def start_plugins(self):\n good_plugins = []\n for plugin in list(self.plugins):\n print(\"Starting plugin: {}\".format(plugin))\n try:\n plugin.plugin_start()\n good_plugins.append(plugin)\n except Exception as err:\n print(\"Error starting plugin {}\".format(err))\n self.plugins = good_plugins", "title": "" }, { "docid": "14e5755eb839b29fcbb0ad560f91210f", "score": "0.6426013", "text": "def discover():\n if CFG[\"plugins\"][\"autoload\"]:\n experiment_plugins = CFG[\"plugins\"][\"experiments\"].value\n for exp_plugin in experiment_plugins:\n try:\n importlib.import_module(exp_plugin)\n except ImportError as import_error:\n LOG.error(\"Could not find '%s'\", exp_plugin)\n LOG.error(\"ImportError: %s\", import_error.msg)", "title": "" }, { "docid": "7cb930da2ac32e8f69910a6939ef39a0", "score": "0.6419173", "text": "def load_plugins(self, filter=None, use_blacklist=True):\n log.debug('Starting plugin loading.')\n count = len(list(self._plugins.keys()))\n\n # Start loading plugins.\n for name in self.list_plugins(filter):\n # Don't load plugins twice.\n if name in self._plugins:\n continue\n\n # Get the PluginInfo for this.\n info = self._infos.get(name)\n if not info:\n raise KeyError('Tried to load unknown plugin: %s' % name)\n\n # If this is blacklisted, and we care, skip it.\n if use_blacklist and isblacklisted(name):\n log.debug('Skipping blacklisted plugin: %s' % info.nice_name)\n continue\n\n # Try loading it now.\n try:\n self._load_plugin(name, use_blacklist)\n except DependencyError as err:\n log.error('Unable to load plugin: %s\\n%s' %\n (info.nice_name, err))\n except (ImportError, VersionError):\n log.exception('Unable to load plugin: %s' % info.nice_name)\n\n # Count up and log the number of loaded plugins.\n count = len(list(self._plugins.keys())) - count\n log.info('Loaded %d plugins.' % count)", "title": "" }, { "docid": "1cdc35869f5b46ba8f960605f6353486", "score": "0.6406636", "text": "def ready_plugins(self):\n for plugin, plugin_cls in zip(self.plugins, self.plugin_classes):\n logger.info(\"calling ready for plugin %s\", repr(plugin_cls))\n plugin.do_ready()", "title": "" }, { "docid": "1cf4c702bb1a9622a0e6bf5b667ffa7d", "score": "0.6365202", "text": "def load_plugins():\n return load_dynamic_modules(pathfinder.plugins_path(), abstractplugin.AbstractPlugin)", "title": "" }, { "docid": "d8e7a1b11f1957dc1d5fbc9c4777d1be", "score": "0.6350461", "text": "def load_execute_file_plugins():\n textutils.output_info('Executing ' + str(len(file.__all__)) + ' file plugins')\n for plugin_name in file.__all__:\n plugin = __import__ (\"plugins.file.\" + plugin_name, fromlist=[plugin_name])\n if hasattr(plugin , 'execute'):\n plugin.execute()", "title": "" }, { "docid": "aa34ded51019788fa75d90b9acf81f5b", "score": "0.63288945", "text": "def importPlugins(plugins):\n global loaded\n for type_ in plugins:\n # typeMods will be a module reference containing all the modules for type_\n # Eg. module xcluster. Then typeMods.xcluster will be available.\n typeMods = __import__(''.join(['plugins.', type_]), fromlist=plugins[type_])\n for plugin in plugins[type_]:\n try:\n loaded[type_].append(eval('.'.join(['typeMods',plugin])))\n except Exception, err:\n sys.stderr.write(\"PLUGIN IMPORT ERROR:\\n%s\" % str(err))", "title": "" }, { "docid": "ff40c7976203610c9f74eff9cbf61d7b", "score": "0.6314812", "text": "def reloadPlugins(self):\r\n return 0", "title": "" }, { "docid": "c3e393b9d15b6063efbf077505d2237f", "score": "0.6308202", "text": "def _load_plugins(plugins, debug=True):\r\n plugs = []\r\n for plugin in plugins:\r\n setup_class = plugin.get('setup_class')\r\n plugin_name = plugin.get('__name__').split()[-1]\r\n mod_name = '.'.join(setup_class.split('.')[:-1])\r\n class_name = setup_class.split('.')[-1]\r\n try:\r\n mod = __import__(mod_name, globals(), locals(), [class_name])\r\n except SyntaxError, e:\r\n raise exception.PluginSyntaxError(\r\n \"Plugin %s (%s) contains a syntax error at line %s\" %\r\n (plugin_name, e.filename, e.lineno))\r\n except ImportError, e:\r\n raise exception.PluginLoadError(\r\n \"Failed to import plugin %s: %s\" %\r\n (plugin_name, e[0]))\r\n klass = getattr(mod, class_name, None)\r\n if not klass:\r\n raise exception.PluginError(\r\n 'Plugin class %s does not exist' % setup_class)\r\n if not issubclass(klass, clustersetup.ClusterSetup):\r\n raise exception.PluginError(\r\n \"Plugin %s must be a subclass of \"\r\n \"starcluster.clustersetup.ClusterSetup\" % setup_class)\r\n args, kwargs = utils.get_arg_spec(klass.__init__, debug=debug)\r\n config_args = []\r\n missing_args = []\r\n for arg in args:\r\n if arg in plugin:\r\n config_args.append(plugin.get(arg))\r\n else:\r\n missing_args.append(arg)\r\n if debug:\r\n log.debug(\"config_args = %s\" % config_args)\r\n if missing_args:\r\n raise exception.PluginError(\r\n \"Not enough settings provided for plugin %s (missing: %s)\"\r\n % (plugin_name, ', '.join(missing_args)))\r\n config_kwargs = {}\r\n for arg in kwargs:\r\n if arg in plugin:\r\n config_kwargs[arg] = plugin.get(arg)\r\n if debug:\r\n log.debug(\"config_kwargs = %s\" % config_kwargs)\r\n try:\r\n plug_obj = klass(*config_args, **config_kwargs)\r\n except Exception as exc:\r\n log.error(\"Error occured:\", exc_info=True)\r\n raise exception.PluginLoadError(\r\n \"Failed to load plugin %s with \"\r\n \"the following error: %s - %s\" %\r\n (setup_class, exc.__class__.__name__, exc.message))\r\n if not hasattr(plug_obj, '__name__'):\r\n setattr(plug_obj, '__name__', plugin_name)\r\n plugs.append(plug_obj)\r\n return plugs", "title": "" }, { "docid": "8f3598c8156386020240ffa0bd065995", "score": "0.63020456", "text": "def load_plugins():\n _plugins = {}\n for plugin_id, plugin_data in PLUGINS.items():\n plugin_module_name = f\"tgcf_{plugin_id}\"\n plugin_class_name = f\"Tgcf{plugin_id.title()}\"\n try:\n plugin_module = import_module(plugin_module_name)\n plugin_class = getattr(plugin_module, plugin_class_name)\n plugin = plugin_class(plugin_data)\n assert plugin.id_ == plugin_id\n\n except ModuleNotFoundError:\n logging.error(f\"Could not find plugin for {plugin_id}\")\n except AttributeError:\n logging.error(f\"Found plugin {plugin_id}, but failed to load.\")\n else:\n print(f\"Loaded plugin {plugin_id}\")\n _plugins.update({plugin.id_: plugin})\n return _plugins", "title": "" }, { "docid": "92f407c27bb3f65845bc497a59e5605f", "score": "0.62988406", "text": "def test_load_no_plugins(self, no_config_mock):\n modules = plugin.load_plugin_modules([])\n assert modules == []", "title": "" }, { "docid": "286c8f96f25fe42f3c00ff11c1a2a4f7", "score": "0.62756485", "text": "def load_plugins(plugin_dir=None):\n\n if len(loaded_plugins):\n # prevent loading plugins multiple times\n return\n\n plugin_dirs = [\n plugin_dir,\n os.path.join(os.getcwd(), 'scap', 'plugins'),\n os.path.join(os.path.expanduser('~'), '.scap', 'plugins')\n ]\n\n plugins = find_plugins(plugin_dirs)\n if len(plugins) < 1:\n return\n\n # Turn off those obnoxious *.pyc files for plugins so we don't litter\n maybe_write_bytecode = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n\n # import each of the plugin modules\n for plugin in plugins:\n # module path relative to scap.plugins:\n plugin_module = \".%s\" % plugin\n try:\n mod = importlib.import_module(plugin_module, \"scap.plugins\")\n # find classes in mod which extend scap.cli.Application\n for objname in dir(mod):\n obj = getattr(mod, objname)\n if type(obj) is type and issubclass(obj, Application):\n if objname in loaded_plugins:\n # duplicate: another plugin already used the same name\n msg = 'Duplicate plugin named %s, skipping.'\n logging.getLogger().warning(msg, objname)\n continue\n # copy the class into the scap.plugins namespace\n setattr(this_module, objname, obj)\n loaded_plugins[objname] = obj\n __all__.append(objname)\n except Exception as e:\n msg = 'Problem loading plugins from module: scap.plugins.%s (%s)'\n err_msg = type(e).__name__ + ':' + str(e)\n logger = logging.getLogger()\n logger.warning(msg % (plugin, err_msg))\n\n # Restore the original setting\n sys.dont_write_bytecode = maybe_write_bytecode", "title": "" }, { "docid": "47d51b69dba04d0982452ac109d29454", "score": "0.6275517", "text": "def load_execute_host_plugins():\n textutils.output_info('Executing ' + str(len(host.__all__)) + ' host plugins')\n for plugin_name in host.__all__:\n plugin = __import__ (\"plugins.host.\" + plugin_name, fromlist=[plugin_name])\n if hasattr(plugin , 'execute'):\n plugin.execute()", "title": "" }, { "docid": "1a6bcf3e4f8d82a2849f24a51aca2f58", "score": "0.627525", "text": "def get_plugins():\n pass", "title": "" }, { "docid": "9bd08ccb33b2b113965c591b39637367", "score": "0.62589246", "text": "def find_plugins(self):\n\n for loader in iter_entry_points(group='vvv', name=None):\n\n try:\n # Construct the plug-in instance\n name = loader.name\n klass = loader.load()\n instance = klass()\n logger.debug(\"Loaded plug-in: %s\", name)\n self.plugins[name] = instance\n except Exception as e:\n logger.error(\"Could not load plug-in: %s\", loader)\n raise e", "title": "" }, { "docid": "e422b7cc4d5f6e26a5e1e2a64f3f03d1", "score": "0.6235418", "text": "def load_plugin_manager_plugins(self):\r\n\r\n for plugin in self.plugin_instances:\r\n if not PLUGIN_MANAGER_EXTENSION_TYPE in plugin.capabilities: continue\r\n self._load_plugin(plugin, loading_type = PLUGIN_MANAGER_EXTENSION_TYPE)", "title": "" }, { "docid": "0178898fe7c664d0b612a732e36343c3", "score": "0.6128111", "text": "def load(self):\n self._pm.load_plugins(self.path)", "title": "" }, { "docid": "3753a6d9cde87292d257e6602ce31b2a", "score": "0.6127432", "text": "def loader(module):\n\n\t\tplugins = []\n\t\t# Return empty plugins list when do not have plugin\n\t\t# interface\n\t\ttry:\n\t\t\tmodule.countPlugins()\n\t\texcept Exception as e:\n\t\t\tlogger.warning(\"Module: {} in plugin directory is not a plugin\".format(module.__name__))\n\t\t\treturn plugins\n\t\tfor i in range(0, module.countPlugins()):\n\t\t\tpi = module.createPlugin(i, **kwargs)\n\t\t\tif (pi):\n\t\t\t\tplugins.append(pi)\n\t\t\t\tlogger.info(\"Plugin: {} is loaded\".format(pi.namePlugin()) )\n\t\treturn plugins", "title": "" }, { "docid": "ffb30c37a9440315ba2efc19a404cbe1", "score": "0.61070305", "text": "def get_plugins():", "title": "" }, { "docid": "ac5c4bba7e2269126450aa6fa3e90c3c", "score": "0.6105481", "text": "def test_import_plugin(self):\n self.assertEqual(len(loader.import_plugins(['monitoring'])), 1)", "title": "" }, { "docid": "c4bcdae6cceb75ad1b8c39c3fe10d2f1", "score": "0.6092951", "text": "def testPluginsLoaded(self):\r\n sources, targets = self.pl.load_sources_and_targets(self.filename, True)\r\n self.assertEqual(len(sources), 3)\r\n self.assertEqual(len(targets), 2)", "title": "" }, { "docid": "6f674f01a16b67037c9f1c362ee7e131", "score": "0.60923755", "text": "def loadPlugins(path, catalogue):\n pluginsRoot, _ = xmlUtils.loadToTree(catalogue)\n for pluginNode in pluginsRoot:\n name = pluginNode.find('name').text\n location = pluginNode.find('location').text\n if location is None:\n raise PluginError('Installation is corrupted for plugin \"{}\". Check raven/plugins/plugin_directory.xml and try reinstalling using raven/scripts/intall_plugin')\n if name is None:\n name = os.path.basename(location)\n print('Loading plugin \"{}\" at {}'.format(name, location))\n module = loadPluginModule(name, location)\n loadEntities(name, module)", "title": "" }, { "docid": "acaed84ed521d4058c292b4b59f3a9fe", "score": "0.6090377", "text": "def setup_all():\n # Check there is plugin manager instance\n if PluginManager.__instance is None:\n PluginManager()\n if PluginManager.__instance is None:\n return False\n retval = True\n # For each plugin, setup the plugin dependencies\n for name in PluginManager.get_names():\n retval = PluginManager.setup(name) and retval\n return retval", "title": "" }, { "docid": "3cfd572af47c196625bc4d5bf0f7dd9b", "score": "0.6089976", "text": "def test_import_plugin_not_found(self):\n self.assertEqual(len(loader.import_plugins(['bad_name'])), 0)", "title": "" }, { "docid": "c4ce46bc018037d5fdfa621610034ab1", "score": "0.6080716", "text": "def get_all_loaded_plugins(self):\r\n\r\n # creates the loaded plugins instances list\r\n loaded_plugins_instances = []\r\n\r\n # iterates over all the plugin instances\r\n for plugin_instance in self.plugin_instances:\r\n # in case the plugin instance is loaded\r\n if plugin_instance.is_loaded():\r\n # adds the plugin instance to the loaded plugins instances list\r\n loaded_plugins_instances.append(plugin_instance)\r\n\r\n # returns the loaded plugins instances\r\n return loaded_plugins_instances", "title": "" }, { "docid": "7e4517add6f6163c323f7403d7a726b1", "score": "0.6054527", "text": "def external_plugins_loaded(plugins):\n\n for p in plugins:\n command = None\n ready = False\n for app_command in sublime_plugin.application_command_classes:\n if app_command.__name__ == p:\n command = app_command\n break\n if command is not None:\n ready = command.is_ready()\n else:\n # Command isn't found in list, so just return ready\n ready = True\n return ready", "title": "" }, { "docid": "98f254f32f57d13093a2354e0b243735", "score": "0.60352504", "text": "def _initializePlugins(self, dirname, plugins, plugintype, name=None):\n ret = []\n syspathset = set(sys.path)\n\n dirname = os.path.abspath(dirname)\n self.logger.debug(\"looking for plugin(s) in {}\".format(dirname))\n if not os.path.isdir(dirname):\n self.logger.debug(\n \"directory {} does not exist ... moving on\".format(dirname)\n )\n return ret\n\n # Include all plugin directories in sys.path for includes\n if dirname not in sys.path:\n syspathset.add(dirname)\n sys.path = list(syspathset)\n\n # Loop through all files in passed dirname looking for plugins\n for filename in os.listdir(dirname):\n filename = dirname + os.sep + filename\n\n # If the file exists\n if os.path.isfile(filename):\n # Split file into a base name plus extension\n basename = os.path.basename(filename)\n base, extension = os.path.splitext(basename)\n\n # If we're a python file and we don't start with _\n # if extension == \".py\" and not basename.startswith(\"_\"):\n # APPPERF-263: If name param is supplied, only attempt to load\n # {name}.py from {app}/bin directory\n if extension == \".py\" and (\n (name is None and not basename.startswith(\"_\")) or base == name\n ):\n self.logger.debug(\"Searching for plugin in file '%s'\" % filename)\n try:\n # Import the module\n # module = imp.load_source(base, filename)\n\n mod_name, mod_path, mod_desc = imp.find_module(base, [dirname])\n # TODO: Probably need to adjust module.load() to be added later so this can be pickled.\n module = imp.load_module(base, mod_name, mod_path, mod_desc)\n plugin = module.load()\n\n # spec = importlib.util.spec_from_file_location(base, filename)\n # plugin = importlib.util.module_from_spec(spec)\n # spec.loader.exec_module(plugin)\n\n # set plugin to something like output.file or generator.default\n pluginname = plugintype + \".\" + base\n plugins[pluginname] = plugin\n\n # Return is used to determine valid configs, so only return the base name of the plugin\n ret.append(base)\n\n self.logger.debug(\n \"Loading module '%s' from '%s'\" % (pluginname, basename)\n )\n\n # 12/3/13 If we haven't loaded a plugin right or we haven't initialized all the variables\n # in the plugin, we will get an exception and the plan is to not handle it\n if \"validSettings\" in dir(plugin):\n self.config._validSettings.extend(plugin.validSettings)\n if \"defaultableSettings\" in dir(plugin):\n self.config._defaultableSettings.extend(\n plugin.defaultableSettings\n )\n if \"intSettings\" in dir(plugin):\n self.config._intSettings.extend(plugin.intSettings)\n if \"floatSettings\" in dir(plugin):\n self.config._floatSettings.extend(plugin.floatSettings)\n if \"boolSettings\" in dir(plugin):\n self.config._boolSettings.extend(plugin.boolSettings)\n if \"jsonSettings\" in dir(plugin):\n self.config._jsonSettings.extend(plugin.jsonSettings)\n if \"complexSettings\" in dir(plugin):\n self.config._complexSettings.update(plugin.complexSettings)\n except ValueError:\n self.logger.error(\n \"Error loading plugin '%s' of type '%s'\"\n % (base, plugintype)\n )\n except ImportError as ie:\n self.logger.warning(\n \"Could not load plugin: %s, skipping\" % base\n )\n self.logger.exception(ie)\n except Exception as e:\n self.logger.exception(str(e))\n raise e\n return ret", "title": "" }, { "docid": "0a1f2c58bd11b3956b4fd62b22c2d080", "score": "0.6033774", "text": "def unloadAll(self):\n for p in self.__loadedPlugins:\n p.unload()\n self.__loadedPlugins = []", "title": "" }, { "docid": "0d890e45598128c112c936a328361310", "score": "0.6014353", "text": "def _setup():\n poll_plugins = []\n hook_plugins = []\n possible_plugins = set(os.listdir(_PLUGIN_FOLDER)).difference(_BLACKLIST)\n\n # Find plugins\n for i in possible_plugins:\n # Check that this is a valid module\n if not _validate_module_name(i):\n continue\n\n # Import module\n module_name = _extract_module_name(i)\n module = importlib.import_module(\".\" + module_name, package=__name__)\n\n # Module sorting and validation\n try:\n if \"poll\" in module.TYPE:\n if module.API_VERSION >= _MIN_POLL_API_VERSION:\n poll_plugins.append(module)\n else:\n print(_ERROR_TEMPLATES[\"BAD_API\"].format(module_name))\n if \"hook\" in module.TYPE:\n if module.API_VERSION >= _MIN_HOOK_API_VERSION:\n hook_plugins.append(module)\n else:\n print(_ERROR_TEMPLATES[\"BAD_API\"].format(module_name))\n except AttributeError:\n print(_ERROR_TEMPLATES[\"BAD_HEADER\"].format(module_name))\n except SyntaxError:\n print(_ERROR_TEMPLATES[\"BAD_HEADER\"].format(module_name))\n\n return poll_plugins, hook_plugins", "title": "" }, { "docid": "cbfbeec82cd6846e74e35c99ace52edb", "score": "0.59802026", "text": "def plugin_loaded():\r\n\r\n persist.plugin_is_loaded = True\r\n persist.settings.load()\r\n persist.printf('debug mode:', 'on' if persist.debug_mode() else 'off')\r\n util.create_tempdir()\r\n\r\n for linter in persist.linter_classes.values():\r\n linter.initialize()\r\n\r\n plugin = SublimeLinter.shared_plugin()\r\n queue.start(plugin.lint)\r\n\r\n util.generate_menus()\r\n util.generate_color_scheme(from_reload=False)\r\n util.install_syntaxes()\r\n\r\n persist.settings.on_update_call(SublimeLinter.on_settings_updated)\r\n\r\n # This ensures we lint the active view on a fresh install\r\n window = sublime.active_window()\r\n\r\n if window:\r\n plugin.on_activated(window.active_view())", "title": "" }, { "docid": "8d9a341fbbfece3159f7cc8fec68cbfc", "score": "0.59794676", "text": "def get_plugins(self):\n modules = self._get_modules()\n\n # load plugins again only if a plugin was added or removed\n if len(modules) == len(self._plugins):\n return self._plugins\n\n self._plugins = []\n\n for plugin_module in modules:\n modname, fp, path, desc = plugin_module\n\n try:\n # load the module and look for classes\n module = imp.load_module(modname, fp, path, desc)\n except Exception as e:\n error_msg = \"{exception}: {message}\".format(\n exception=e.__class__.__name__,\n message=unicode(e))\n self._errors[fp.name] = error_msg\n continue\n\n classes_in_module = inspect.getmembers(module, inspect.isclass)\n\n for cls in classes_in_module:\n cls = cls[1]\n\n # ignore if it's the base plugin class\n if cls.__name__ == self.plugin_type.base_class.__name__:\n continue\n\n # make sure that the class is a subclass of the plugin's\n # base class\n if self.plugin_type.base_class in cls.__bases__:\n self.register_plugin(cls)\n\n return self._plugins", "title": "" }, { "docid": "e7c8c91212ce59e819da0e42cfc0cdf6", "score": "0.597294", "text": "def command(self, bot, comm, groups):\n plugins = PluginUtils.get_plugins(bot)\n names = ', '.join(p.name for p in plugins)\n bot.reply(comm, 'Loaded Plugins: {0}.'.format(names))\n return True", "title": "" }, { "docid": "e442dfc446f845e9a43f124e885d65ec", "score": "0.59726703", "text": "def test_raises_when_loading_default_plugins_without_allow_qualified(self):\n default_plugin_qualnames = plugin.get_qualified_module_names(\n _repobee.ext.defaults\n )\n\n with pytest.raises(exception.PluginLoadError) as exc_info:\n plugin.load_plugin_modules(default_plugin_qualnames)\n\n assert \"failed to load plugin module\" in str(exc_info.value)", "title": "" }, { "docid": "198ef8a052e161ec2e62d56b9ad57674", "score": "0.5961389", "text": "def load_startup_plugins(self):\r\n\r\n # iterates over all the plugin instances\r\n for plugin in self.plugin_instances:\r\n # searches for the startup type in the plugin capabilities\r\n # in case the plugins contains such capability must load\r\n # it because it's considered to be a startup plugin\r\n if STARTUP_TYPE in plugin.capabilities: self._load_plugin(plugin, loading_type = STARTUP_TYPE)", "title": "" }, { "docid": "373057132c7785d8325862d8e9450550", "score": "0.5956981", "text": "def register_core_plugins():\n register_plugin(rho_bot_scheduler)\n register_plugin(rho_bot_configuration)\n register_plugin(rho_bot_roster)\n register_plugin(rho_bot_storage_client)\n register_plugin(rho_bot_rdf_publish)\n register_plugin(rho_bot_representation_manager)\n register_plugin(rho_bot_get_or_lookup)\n register_plugin(rho_bot_get_or_create)\n register_plugin(export_configuration)\n register_plugin(import_configuration)\n register_plugin(reset_configuration)", "title": "" }, { "docid": "67590941dcbd13a979a5519c331f7a31", "score": "0.59513134", "text": "def start_plugin_polling(self):\r\n for plugin in [key for key in self.config.application.keys()\r\n if key not in self.IGNORE_KEYS]:\r\n LOGGER.info('Enabling plugin: %s', plugin)\r\n plugin_class = None\r\n\r\n # If plugin is part of the core agent plugin list\r\n if plugin in plugins.available:\r\n plugin_class = self._get_plugin(plugins.available[plugin])\r\n\r\n # If plugin is in config and a qualified class name\r\n elif '.' in plugin:\r\n plugin_class = self._get_plugin(plugin)\r\n\r\n # If plugin class could not be imported\r\n if not plugin_class:\r\n LOGGER.error('Enabled plugin %s not available', plugin)\r\n continue\r\n\r\n self.poll_plugin(plugin, plugin_class,\r\n self.config.application.get(plugin))", "title": "" }, { "docid": "77b401dc24a5abf43bfc99e79d5f468f", "score": "0.5948454", "text": "def populate(self, modules):\n for module in modules:\n try:\n m = importlib.import_module(module)\n except SyntaxError as e:\n print(\"module '{}' failed loading with message {}\".format(module,e.msg))\n continue\n\n if hasattr(m, 'Plugin'):\n self.add_plugin(m.Plugin)\n continue\n\n for name, plugin_class in inspect.getmembers(m):\n if 'Plugin' in name:\n p = plugin_class()\n self.add_plugin(p)", "title": "" }, { "docid": "9bd49c8ff5dd8ed2fd4ee3d31d48bdf0", "score": "0.5943103", "text": "def start_plugins(self):\r\n\r\n # retrieves all the plugin classes available\r\n self.plugin_classes = self.get_all_plugin_classes()\r\n\r\n # iterates over all the available plugin classes\r\n for plugin in self.plugin_classes:\r\n # retrieves the plugin id\r\n plugin_id = plugin.id\r\n\r\n # sets the plugin class in the plugin classes map\r\n self.plugin_classes_map[plugin_id] = plugin\r\n\r\n # starts the plugin (creating the singleton) in\r\n # case the plugin is not currently loaded\r\n if not plugin in self.loaded_plugins: self.start_plugin(plugin)", "title": "" }, { "docid": "6a35618734fcaa8dc12034dadfc29fc0", "score": "0.5905988", "text": "def get_plugins(self):\n pass", "title": "" }, { "docid": "b2f67c40ab372888a54f9f0fdf175604", "score": "0.5901993", "text": "def testEnablePlugins(self):\n parser = interface.BaseParser()\n\n parser.EnablePlugins([])\n self.assertEqual(len(parser._plugins_per_name), 0)\n\n parser.EnablePlugins(parser.ALL_PLUGINS)\n self.assertEqual(len(parser._plugins_per_name), 0)", "title": "" }, { "docid": "e8e747338361824281ed449e6332803f", "score": "0.5860712", "text": "def rescan_plugins(self):\n component.get(\"CorePluginManager\").scan_for_plugins()", "title": "" }, { "docid": "e952c39df7cffa1323fd6876a9a4cf41", "score": "0.5859997", "text": "def activate_plugins(plugin_names):", "title": "" }, { "docid": "e88833b7328b31ddd0034c3a331da67e", "score": "0.58597225", "text": "def load(conf):\n global modules, config\n\n config.read(conf)\n flist = sortmodule([f[:-3] for f in os.listdir(basedefs.DIR_PLUGINS) if f[-3:] == \".py\"])\n \n for sec, fname in flist:\n try:\n #modules.append(loadplugins(sec))\n modules[sec] = loadplugins(fname)\n except Exception as e:\n logging.error(e)\n logging.error(traceback.format_exc())\n raise InstallError(\"failed when load plugins\")\n\n return modules", "title": "" }, { "docid": "1c967ee8a8c20d2e1c2020a3dd02859f", "score": "0.58570623", "text": "def get_available_plugins():\n print(\"Available plugins:\\n\")\n\n for instrument, class_name in _available_plugins.iteritems():\n\n print(\"%s for %s\" % (class_name, instrument))", "title": "" }, { "docid": "b4a5447ac807cf56e40fb508b68ec1d6", "score": "0.58551806", "text": "def test_load_plugins_multiple(self, load_config):\n mock_config = MagicMock()\n mock_config.get.return_value = 'lunch, todo'\n load_config.return_value = mock_config\n plugins = PluginManager().load_plugins()\n\n self.assertEqual(2, len(plugins))\n self.assertEqual(\n 'ashaw_notes.plugins.lunch',\n plugins[0].__module__\n )\n self.assertEqual(\n 'ashaw_notes.plugins.todo',\n plugins[1].__module__\n )", "title": "" }, { "docid": "af18a7121862896cd99c62f473f2a7fa", "score": "0.5849801", "text": "def discoverable_plugins():\n discovered_plugins = {}\n for entry_point in iter_entry_points(\"framework\"):\n try:\n ep = entry_point.load()\n discovered_plugins[entry_point.name] = ep\n except (DistributionNotFound, ImportError):\n logging.exception(f\"Plugin {entry_point.name} not found\")\n return discovered_plugins", "title": "" }, { "docid": "2047b0e96b8046ccb42c36fe1f036b4c", "score": "0.58488655", "text": "def load_default_plugins_auto(self, load_default_plugins):\n yield", "title": "" }, { "docid": "61cc98fc936279ea4593dba22a7ae302", "score": "0.58452135", "text": "def _plugins(args):\n\n # plugins\n retval = 0\n for plugin in plugins.PLUGINS:\n if plugins.install(plugin, args) == 1:\n retval = 1\n\n return retval", "title": "" }, { "docid": "71fcc32d5ac4938f29490688d8d108f5", "score": "0.5836939", "text": "def discoverPlugins():\n availablePlugins = {}\n pluginDir = 'plugins'\n #basePluginPath = sys.path[0]\n if sys.platform == 'win32':\n basePluginPath = sys.path[0].split('library.zip')[0]\n if sys.platform == 'darwin':\n basePluginPath = sys.path[0].split('find.app')[0]\n \n # make sure the plugins folder exists\n if not os.path.exists(os.path.join(basePluginPath, pluginDir)):\n raise error.PluginError(\"The \\\"plugins\\\" directory is missing, please place in the same folder as the FIND program file.\")\n return\n \n if not basePluginPath in sys.path:\n sys.path.insert(0, basePluginPath)\n \n #TODO: look at recursive descent for module discovery\n # Accumulate available plugin modules\n errMsg = []\n for type_ in pluginTypes:\n path = os.path.join(basePluginPath, pluginDir, type_)\n if not os.path.exists(path):\n errMsg.append(\"The \\\"%s\\\" plugins directory is missing.\" % type_)\n else:\n availablePlugins[type_] = [fname[:-3] for fname in os.listdir(path) \n if fname.endswith(\".py\") and \n not fname.startswith(\"__init__\")]\n \n # raise error with all missing plugin subdirectories\n if len(errMsg) > 0:\n raise error.PluginError('\\n\\t'.join(errMsg))\n return\n \n return availablePlugins", "title": "" }, { "docid": "6f111ebbda9a8aa2ce3e2927e1ed4fe1", "score": "0.5824811", "text": "async def load_extensions(self) -> None:\n for extension in self.extension_list:\n try:\n self.load_extension(extension)\n logger.debug(f\"Cog {extension} loaded.\")\n except Exception as e:\n logger.error(f\"Cog {extension} failed to load with {type(e)}: {e}\")", "title": "" }, { "docid": "a2615ad584fca8a37ed7c93d3ce6b5c4", "score": "0.58019894", "text": "def discover():\n if CFG[\"plugins\"][\"autoload\"].value():\n log = logging.getLogger('benchbuild')\n experiment_plugins = CFG[\"plugins\"][\"experiments\"].value()\n for ep in experiment_plugins:\n try:\n importlib.import_module(ep)\n log.debug(\"Found experiment: {0}\".format(ep))\n except ImportError as ie:\n log.error(\"Could not find '{0}'\".format(ep))\n log.error(\"ImportError: {0}\".format(ie.msg))", "title": "" }, { "docid": "eed22a941ddea5d5c5f72026b9b8a75d", "score": "0.5801577", "text": "def register_plugins(bus=None):\n from ..plugins import RunnablePlugin\n\n for plugin in get_enabled_plugins().values():\n if isinstance(plugin, RunnablePlugin):\n plugin.bus = bus\n plugin.start()", "title": "" }, { "docid": "4cc6ce98447d2a4204a98706ea0e1ecd", "score": "0.5796698", "text": "def init_plugins(self, disabled_glob=(), enable_plugins=(), cli=None):\n # :api\n if self.conf.plugins:\n self._plugins._load(self.conf, disabled_glob, enable_plugins)\n self._plugins._run_init(self, cli)", "title": "" }, { "docid": "adb3dd35836d0bd0be00bc914d8ea6ef", "score": "0.57896024", "text": "def finishLoadPlugin(name):\n if name not in _delayedPlugins:\n #Try to find module in system (such as from pip install)\n #Note that loadEntities (below) checks if there is a subclass of\n # PluginBase.PluginBase before using it, so this is safe.\n spec = importlib.util.find_spec(name)\n if spec is None:\n return False\n plugin = importlib.util.module_from_spec(spec)\n _delayedPlugins[name] = (spec, plugin, False)\n spec, plugin, loaded = _delayedPlugins[name]\n if not loaded:\n spec.loader.exec_module(plugin)\n loadEntities(name, plugin)\n #Set loaded flag to true to prevent reloading\n _delayedPlugins[name] = (spec, plugin, True)\n return True", "title": "" }, { "docid": "5d3b2c565665ef24d1f508ce0197f5ef", "score": "0.57852995", "text": "def loadedPlugins(self):\n return self.__loadedPlugins", "title": "" }, { "docid": "1434668d47af9a45a3cc7971c75aa1b9", "score": "0.57738394", "text": "def load_plugins(config):\n plugin = {}\n for p in ('retrieval', 'decryption', 'verification', 'scrubbing', 'reporting'):\n try:\n module_name = 'erawan.plugins.{}.{}'.format(p, config['plugins'][p]['name'])\n except KeyError:\n logger.critical('Could not import %s plugin; is it set in config?', p)\n sys.exit(1)\n try:\n plugin[p] = importlib.import_module(module_name)\n except ImportError:\n logger.critical('Unable to load plugin \"%s\"', module_name)\n sys.exit(1)\n return plugin", "title": "" }, { "docid": "ca103e644f66671feac38b2f5232642f", "score": "0.57731414", "text": "def loadPlugins(pluginfldr,hivesdict):\n pluginmanager = PluginManager()\n pluginmanager.setPluginPlaces([pluginfldr])\n pluginmanager.collectPlugins()\n\n print \"\"\n logging.info(\"[PLUGINS IDENTIFIED: %s]\" % len(pluginmanager.getAllPlugins()))\n\n compat_plugins = list()\n incompat_plugins = list()\n for plugin in pluginmanager.getAllPlugins():\n compat = 1\n for req in plugin.plugin_object.getRequirements():\n if (hivesdict[req][0] <> 1):\n compat = 0\n break\n if compat:\n compat_plugins.append(plugin.name)\n else:\n incompat_plugins.append(plugin.name)\n\n logging.info(\" [%s] Compatible Plugins:\" % len(compat_plugins))\n for plugin in compat_plugins:\n logging.info(\" - %s\" % plugin)\n logging.info(\" [%s] Incompatible Plugins:\" % len(incompat_plugins))\n for plugin in incompat_plugins:\n logging.info(\" - %s\" % plugin)\n\n return pluginmanager, compat_plugins", "title": "" }, { "docid": "d86c186eb48e0358d8b7c2f56f51dec0", "score": "0.57704806", "text": "def load_py_files():\n def _load_py_files(env, search_path, auto_enable=None):\n for path in search_path:\n plugin_files = locate(\"*.py\", path)\n for plugin_file in plugin_files:\n try:\n plugin_name = os.path.basename(plugin_file[:-3])\n module_name = _get_module_name(plugin_file)\n import_module(module_name)\n _enable_plugin(env, plugin_name)\n except NotImplementedError, e:\n #print \"Cant Implement This\"\n pass\n\n\n return _load_py_files", "title": "" }, { "docid": "92a08a26d0e9538024234c209ca11eb1", "score": "0.57653815", "text": "def init_plugins(self):\n\n assert self.project_path\n assert os.path.exists(self.project_path)\n\n for plugin_id, instance in self.plugins.items():\n\n try:\n\n plugin_installation = os.path.join(self.installation, plugin_id)\n\n instance.init(\n plugin_id=plugin_id,\n main=self,\n reporter=self.reporter,\n options=self.options_data,\n files=self.files_data,\n installation_path=plugin_installation,\n walker=self.walker,\n project_path=self.project_path\n )\n\n instance.setup_options()\n except Exception as e:\n logger.error(\"Could not initialize plug-in: %s\", plugin_id)\n raise e", "title": "" }, { "docid": "d85918a9bc5eee3652fbfca95d1b746b", "score": "0.57314444", "text": "def loaded_plugins(self):\n return self.__plugins.keys()", "title": "" }, { "docid": "ed219590e4d56d2d6a1e3106f28c98fe", "score": "0.56982917", "text": "def load_modules():\n\n # logger = logging.getLogger(__name__)\n locations = [marvin.support.path.PLUGIN_PATH]\n modules = []\n for finder, name, ispkg in pkgutil.walk_packages(locations):\n try:\n loader = finder.find_module(name)\n mod = loader.load_module(name)\n except:\n Log.warn(\"Skipped loading module '{0}' due to an error.\", name)\n else:\n # if hasattr(mod, 'WORDS'):\n modules.append(mod)\n # else:\n # Log.warn(\"Skipped loading module '{0}' because it misses \" +\n # \"the WORDS constant.\", name)\n modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY')\n else 0, reverse=True)\n return modules", "title": "" }, { "docid": "2f86f70acff17b995cbd67be5b3e67b4", "score": "0.5694283", "text": "def load_main_plugins(self):\r\n\r\n # iterates over all the plugin instances\r\n for plugin in self.plugin_instances:\r\n # searches for the main type in the plugin capabilities\r\n # in case the plugins contains such capability must load\r\n # it because it's considered to be a main plugin\r\n if MAIN_TYPE in plugin.capabilities: self._load_plugin(plugin, loading_type = MAIN_TYPE)", "title": "" }, { "docid": "5b9dfcb1572fc614237e168f57979d76", "score": "0.5691813", "text": "def start_plugin_manager_plugins(self):\r\n\r\n # retrieves all the plugin manager plugin classes available\r\n self.plugin_classes = self.get_all_plugin_classes(PluginManagerPlugin)\r\n\r\n # iterates over all the available plugin manager plugin classes\r\n for plugin in self.plugin_classes:\r\n # retrieves the plugin id\r\n plugin_id = plugin.id\r\n\r\n # sets the plugin class in the plugin classes map\r\n self.plugin_classes_map[plugin_id] = plugin\r\n\r\n # tests the plugin for loading, verifying that it's\r\n # current loaded and then starts the plugin\r\n if not plugin in self.loaded_plugins: continue\r\n self.start_plugin(plugin)", "title": "" }, { "docid": "a4d7bf7cf95dad7aec3324c161b162ff", "score": "0.5677742", "text": "def veerify_installed_plugins(self):\n cmd = \"feature:list\"\n error_flag = 0\n for plugin in config.ODL_PLUGINS:\n pattern = \"{0}\\s+\\|.*\\|\\s*x\\s+\\|\".format(plugin)\n if self.execute_command(cmd=cmd,exp_out=pattern):\n continue\n else:\n self.log_handler.writeTolog(msg=\"****** PLUGIN is NOT INSTALLED Properly, Verification FAILED ******\")\n error_flag += 1 \n return False if error_flag > 0 else True", "title": "" }, { "docid": "cc71a6579b2ce27854d627fe356afc6f", "score": "0.5674591", "text": "def get_plugins(cls):\n return []", "title": "" }, { "docid": "c10d2804bc3eda8526fd37d1c68fd9f1", "score": "0.5656471", "text": "def check_plugins(package):\n if not package.cmake:\n return\n defined_macros = package.source_code.search_for_pattern(PLUGIN_RE)\n existing_plugins = plugin_xml_by_package(package)\n defined_plugins = package.manifest.get_plugin_xmls()\n build_rules = package.cmake.get_source_build_rules('add_library', resolve_target_name=True)\n\n for rel_fn, plugin_info in defined_macros.items():\n library = lookup_library(build_rules, rel_fn)\n # pkg2/name2 is the parent class\n for pkg1, name1, pkg2, name2 in plugin_info:\n # Create file if needed\n if pkg2 not in existing_plugins:\n xml_filename = '%s_plugins.xml' % pkg2\n print('\\tCreating %s' % xml_filename)\n p_xml = PluginXML(xml_filename, os.path.join(package.root, xml_filename))\n package.plugin_configs.append(p_xml)\n existing_plugins[pkg2] = [p_xml]\n\n # Make sure plugins are properly exported\n for plugin_xml in existing_plugins[pkg2]:\n if plugin_xml.rel_fn not in defined_plugins[pkg2]:\n ex_el = package.manifest.add_plugin_export(pkg2, plugin_xml.rel_fn)\n enforce_tabbing_helper(package.manifest, ex_el, 2)\n\n # Make sure the class is in the files\n if not contains_library(existing_plugins[pkg2], library, pkg1, name1):\n # insert into first\n xml = existing_plugins[pkg2][0]\n xml.insert_new_class(library, pkg1, name1, pkg2, name2)", "title": "" }, { "docid": "3c67082771f00c8ced6d0e6a0f44ea51", "score": "0.56513125", "text": "def register_plugins(self):\n self.plugin_manager.register(BasePlugin())\n self.plugin_manager.register(SubcmdPlugin())", "title": "" }, { "docid": "4abac434c9ce320fcaef9200f2a39092", "score": "0.5645184", "text": "def get_plugins():\n installed_plugins = []\n for ep in pr.iter_entry_points(group='gaia.plugins'):\n try:\n module = ep.load()\n importlib.import_module(module.__name__)\n installed_plugins.append(module)\n if hasattr(module, 'get_config'):\n config.update(module.get_config())\n except ImportError:\n logger.error('Could not load module: {}'.format(\n traceback.print_exc()))\n return installed_plugins", "title": "" }, { "docid": "a239dc76fad6393ecf996e606f714d12", "score": "0.5637939", "text": "def load_plugins(config, plugin_kwargs):\n installed_plugins = _gather_installed_plugins()\n metrics_plugin = _get_metrics_plugin(config, installed_plugins)\n if metrics_plugin:\n plugin_kwargs['metrics'] = metrics_plugin\n\n active_plugins = _get_activated_plugins(config, installed_plugins)\n if not active_plugins:\n return [], [], [], None\n plugin_namespaces = _get_plugin_config_keys(active_plugins)\n plugin_configs = _load_plugin_configs(plugin_namespaces, config)\n plugin_names, plugins, errors = _init_plugins(\n active_plugins, installed_plugins, plugin_configs, plugin_kwargs)\n return plugin_names, plugins, errors, plugin_kwargs", "title": "" }, { "docid": "437487fa3ee2fe5cd1186b54bc1d4dbf", "score": "0.56353956", "text": "def configure_plugins(self):\n # :api\n self._plugins._run_config()", "title": "" }, { "docid": "5b7ec78dcd859e4daae914b6be8c23d4", "score": "0.56248754", "text": "def load(self, ignore_blacklist=False):\n if self.is_loaded:\n log.warning('Trying to load already loaded plugin %r.' %\n self.data['name'])\n return\n\n if not ignore_blacklist and self.is_blacklisted:\n raise addons.DependencyError('Plugin %r is blacklisted.' % \n self.data['name'])\n\n # Check our dependencies for safety.\n addons.check_dependencies(self)\n\n # Load our dependencies.\n for name in self.requires.iterkeys():\n if name == '__app__' or (':' in name and not\n name.startswith('plugin:')):\n continue\n dep = addons.get('plugin', name)\n\n # Make sure we're needed.\n if not self.name in dep.needed_by:\n dep.needed_by.append(self.name)\n\n # If it's loaded, just continue.\n if dep.is_loaded:\n continue\n dep.load()\n\n # Okay, now load!\n self._do_load()", "title": "" }, { "docid": "dd151a0874df861566182b10974c604f", "score": "0.56133425", "text": "def command(self, bot, comm, groups):\n name = groups[0]\n plugins = PluginUtils.get_plugins(bot)\n matched_plugins = [p for p in plugins if p.name == name]\n if len(matched_plugins) != 0:\n bot.reply(comm, \"%s is already loaded.\" % name)\n return False\n\n # Fun fact: the fresh thing is just a dummy. It just can't be None\n new_plugin = twisted.plugin.retrieve_named_plugins(\n IPlugin, [name], 'hamper.plugins', {'fresh': True})[0]\n\n bot.factory.loader.registerPlugin(new_plugin)\n bot.reply(comm, 'Loading {0}.'.format(new_plugin))\n return True", "title": "" }, { "docid": "a998050f7e268c9bfd16890e7a7d7a95", "score": "0.5605278", "text": "def load_extra_plugins(pathspec):\n # Collect names of loaded plugins so we have something to test\n # success/failure against\n loaded_plugins = []\n\n paths = pathspec.split(':')\n for path in paths:\n loaded_plugins.extend(_load_plugins_from_dir(path))\n\n return loaded_plugins", "title": "" }, { "docid": "416cb6b3eeda012ff007371776b39107", "score": "0.5588129", "text": "def loadCheckedLoadables(self):\n self.loadableTable.updateCheckstate()\n loadableCount = 0\n for plugin in self.loadablesByPlugin:\n for loadable in self.loadablesByPlugin[plugin]:\n if loadable.selected:\n loadableCount += 1\n self.progress = qt.QProgressDialog(self.window)\n self.progress.minimumDuration = 0\n self.progress.show()\n self.progress.setValue(0)\n self.progress.setMaximum(loadableCount)\n step = 0\n for plugin in self.loadablesByPlugin:\n for loadable in self.loadablesByPlugin[plugin]:\n if self.progress.wasCanceled:\n break\n slicer.app.processEvents()\n self.progress.setValue(step)\n slicer.app.processEvents()\n if loadable.selected:\n self.progress.labelText = '\\nLoading %s' % loadable.name\n slicer.app.processEvents()\n if not plugin.load(loadable):\n qt.QMessageBox.warning(slicer.util.mainWindow(), \n 'Load', 'Could not load: %s as a %s' % (loadable.name,plugin.loadType))\n step += 1\n self.progress.setValue(step)\n slicer.app.processEvents()\n self.progress.close()\n self.progress = None\n self.close()", "title": "" }, { "docid": "f5a0d9b86475fe88e5385586184e960e", "score": "0.5579736", "text": "def load_plugins_dir(dirname):", "title": "" }, { "docid": "a30c64138abb6bdce55323859d29eb77", "score": "0.55797166", "text": "def get_plugin_manager_plugins_loaded(self):\r\n\r\n return self.plugin_manager_plugins_loaded", "title": "" }, { "docid": "db59f48dbaa04346f601e5f6abab9cd0", "score": "0.55631167", "text": "def load_py_files():\r\n def _load_py_files(env, search_path, auto_enable=None):\r\n for path in search_path:\r\n plugin_files = glob(os.path.join(path, '*.py'))\r\n for plugin_file in plugin_files:\r\n try:\r\n plugin_name = os.path.basename(plugin_file[:-3])\r\n env.log.debug('Loading file plugin %s from %s' % \\\r\n (plugin_name, plugin_file))\r\n if plugin_name not in sys.modules:\r\n module = imp.load_source(plugin_name, plugin_file)\r\n if path == auto_enable:\r\n _enable_plugin(env, plugin_name)\r\n except Exception as e:\r\n env.log.error('Failed to load plugin from %s: %s',\r\n plugin_file,\r\n exception_to_unicode(e, traceback=True))\r\n\r\n return _load_py_files", "title": "" } ]
d2de2d49a99e311fe7785f9523cffc0f
Remove a child handler.
[ { "docid": "85b819a21028347d4916f20c7b44ca3d", "score": "0.7669957", "text": "def removeHandler(handler):", "title": "" } ]
[ { "docid": "63c1c16a7834019e5cad6290cb3e4810", "score": "0.7600718", "text": "def remove_handler(self, handler):\n self.handlers.remove(handler)", "title": "" }, { "docid": "3a2d370956a7b1319092fbc4335fbb48", "score": "0.7523634", "text": "def remove_handler(self, fd):", "title": "" }, { "docid": "a11df2d526635964abcd4376be1ed29f", "score": "0.73864824", "text": "def remove_hanlder(self, handler):\n if handler in self.handlers:\n self.handlers.remove(handler)", "title": "" }, { "docid": "f18e7aaef78981bbbab2216cf289b0fc", "score": "0.7103593", "text": "def __remove_handler(self, handler):\n self.h.remove(handler)\n self._logger.removeHandler(handler)", "title": "" }, { "docid": "fa27ba739ff4455767f34eff0cbb1796", "score": "0.6946388", "text": "def removeHandler(self, handler):\n self.streamManager.removeHandler(handler)", "title": "" }, { "docid": "9648c4b6328e5472cb3f67e03b648d97", "score": "0.68827665", "text": "def remove_handler(self, handler=None, request=None):\n code = 0\n if request != None:\n code = codes.codes[request].data()\n\n with self._handlers_lock:\n # Remove Handler\n if code in self._handlers: \n if handler != None and handler in self._handlers[code]:\n self._handlers[code].remove(handler)\n elif handler == None:\n self._handlers[code] = []", "title": "" }, { "docid": "f9adaffb5e7c7b3f8c59f65767df2ce0", "score": "0.6882579", "text": "def child_removed(self, child):\n pass", "title": "" }, { "docid": "a02845085b98432fcfdd562cecd42c53", "score": "0.67751646", "text": "def delete_handler(self):\n if self._hypervisor_handler:\n self._hypervisor_handler.close()\n self._hypervisor_handler = None", "title": "" }, { "docid": "d9a2f22e90e05d0039c1d4788e1343f8", "score": "0.67251134", "text": "def removeHandler(self, handler_name):\n\n if not self.handler_exists(handler_name):\n raise Exception(\"Cannot remove handler {}! No handler with that name found!\".format(handler_name))\n\n super().removeHandler(self.handlers[handler_name])\n self.inheritor_handlers.pop(handler_name)", "title": "" }, { "docid": "a175023370e471b1d82b76a80d9e6039", "score": "0.672014", "text": "def test_removeHandler(self):\n handler = XMPPHandler()\n handler.setHandlerParent(self.factory.streamManager)\n handler.disownHandlerParent(self.factory.streamManager)\n self.assertNotIn(handler, self.factory.streamManager)\n self.assertIdentical(None, handler.parent)", "title": "" }, { "docid": "ea9ad8f3f60f4a7cfab5278d2bee9ae6", "score": "0.6605218", "text": "def remove(self) -> None:\n handler = self.handler()\n engine = self.engine()\n\n if handler is None or engine is None:\n return\n\n if hasattr(handler, \"_parent\"):\n handler = handler._parent()\n if handler is None:\n raise RuntimeError(\n \"Internal error! Please fill an issue on https://github.com/pytorch/ignite/issues \"\n \"if encounter this error. Thank you!\"\n )\n\n if isinstance(self.event_name, EventsList):\n for e in self.event_name:\n if engine.has_event_handler(handler, e):\n engine.remove_event_handler(handler, e)\n else:\n if engine.has_event_handler(handler, self.event_name):\n engine.remove_event_handler(handler, self.event_name)", "title": "" }, { "docid": "d1f30479635ef881430cd1da6c2897e0", "score": "0.6560829", "text": "def add_handler(self, handler, handler_name, replace = False, ignore_duplicate = False, quiet = False):\n super()._add_child(handler, handler_name, replace = replace, ignore_duplicate = ignore_duplicate,\n quiet = quiet)\n return", "title": "" }, { "docid": "916d10ae1190b2579a8d3c79551d967f", "score": "0.65423375", "text": "def removeHandler(self, handler):\n self._logger.removeHandler(handler)", "title": "" }, { "docid": "b4cbfc64a653f319884d7009a54756a4", "score": "0.6493946", "text": "def __isub__(self, handler):\n self.__receivers.remove(handler)\n return self", "title": "" }, { "docid": "cf2282576b9d15bb04bbcdda53648725", "score": "0.6450347", "text": "def remove_handler(self, is_input, index: int) -> None:\n if is_input:\n self.remove_input(index)\n else:\n self.remove_output(index)", "title": "" }, { "docid": "c45942cf8d6dff24924f6e7bf4a04113", "score": "0.64359504", "text": "def remove_handler(self, *handlers):\n handlers = list(set(itertools.chain(*[self._to_handler(h) for h in handlers])))\n for h in handlers:\n h.remove_file(self.name)\n self._update(remove = handlers)", "title": "" }, { "docid": "61cab82df9ed93686ef65f7d55395174", "score": "0.6349693", "text": "def removeHandler(self):\n self.writers[-1].endDocument()\n del self.writers[-1]\n return", "title": "" }, { "docid": "07ea9b9ed86798f928653fc03aeba1b4", "score": "0.6344045", "text": "def remove_handler(key, handler):\n if type(key) is Predicate:\n key = key.name\n getattr(Q, key).remove_handler(handler)", "title": "" }, { "docid": "708a9443eda5047f1b75f9e2ed17c261", "score": "0.63343525", "text": "def remove_child(self, child):\r\n self.children.remove(child)", "title": "" }, { "docid": "d0f6160910a267a201268dfbfe856fb1", "score": "0.6327482", "text": "def child_removed(self, child: WebComponent):\n w = self.widget\n if w is not None:\n w.remove(child.widget)", "title": "" }, { "docid": "3d61132e67fb6fe9dc83fdab8abea9fd", "score": "0.6306453", "text": "def child_removed(self, child):\n if isinstance(child, QtMenu):\n self.widget().removeAction(child.widget().menuAction())\n elif isinstance(child, QtAction):\n self.widget().removeAction(child.widget())\n elif isinstance(child, QtActionGroup):\n self.widget().removeActions(child.actions())", "title": "" }, { "docid": "89586c9c05cc291ef4a39b9420c7b819", "score": "0.6234144", "text": "def remove_handler(self, id):\n handlers = self._handlers\n id = unicode(id)\n if id in handlers:\n handler, logger_name = handlers.pop(id)\n logger = logging.getLogger(logger_name)\n logger.removeHandler(handler)\n for filter_id in self._filters.keys():\n infos = self._filters[filter_id]\n if infos[1] == id:\n del self._filters[filter_id]\n\n self.filter_ids = self._filters.keys()\n self.handler_ids = self._handlers.keys()", "title": "" }, { "docid": "548b7a79813ed0343a24618d325abae3", "score": "0.6232785", "text": "def disownHandlerParent(parent):", "title": "" }, { "docid": "ed30af7004209efaf975812095704e03", "score": "0.6229243", "text": "def remove_handler(self, exception):\n self._exception_handlers.pop(exception, None)", "title": "" }, { "docid": "5d1eebcca932623b8fdd71432b9ea0e5", "score": "0.6176159", "text": "def delete_child(self, child: ParserNode) -> None:\n return # pragma: no cover", "title": "" }, { "docid": "9a06f3fac300aa7ceb71d23b27531f86", "score": "0.613862", "text": "async def _on_child_removed(self, child):\n _L.debug(f'Child removed: {child.pid}, {child.origin}, {child.argv}')", "title": "" }, { "docid": "7d3a31568215953e34fa490e4abdcf83", "score": "0.6090841", "text": "def _remove_handlers(mcs, logger: logging.Logger, file_handler: logging.FileHandler):\n\n logger.removeHandler(file_handler)", "title": "" }, { "docid": "14d0d1b2afdab3c4d2e7e3aed1410057", "score": "0.60696393", "text": "def child_removed(self, child):\n super(NodeContent, self).child_removed(child)", "title": "" }, { "docid": "9692f133fff3369281af4e3c0c2c07e1", "score": "0.6059061", "text": "def unregister_handler(self, handle):\n self.__handler.unregister_handler(handle)", "title": "" }, { "docid": "87b08530eacf4d2290aca0a3ff38f4e1", "score": "0.6041732", "text": "def remove_handlers(self, uavcan_type):\n self._handler_dispatcher.remove_handlers(uavcan_type)", "title": "" }, { "docid": "38d9308501655eacf21338b4cf065f97", "score": "0.6018849", "text": "def del_handler(\n obj: logging.Logger,\n hname: str = \"\",\n htype: logging.Handler = None,\n traverse: bool = True,\n) -> Dict[str, List[logging.Handler]]:\n found = find_handlers(obj=obj, hname=hname, htype=htype, traverse=traverse)\n for name, handlers in found.items():\n for handler in handlers:\n logging.getLogger(name).removeHandler(handler)\n return found", "title": "" }, { "docid": "a697bcc6dd1a9c080587ddaa3b0c4b65", "score": "0.5975939", "text": "def del_slix_event_handler(self, module, event_name, handler):\n self.core.xmpp.del_event_handler(event_name, handler)", "title": "" }, { "docid": "5c671a21c7f2e47bf840a5ed2cd54701", "score": "0.5962276", "text": "def childRemoved(self, childResource:Resource, originator:str) -> None:\n\t\tCSE.notification.checkSubscriptions(self, NotificationEventType.deleteDirectChild, childResource)", "title": "" }, { "docid": "c7d1cf734b69d5d385af27ac746873eb", "score": "0.5951292", "text": "def delete(self):\n\n # TODO find a way to remove this when sub-classing in HCRoot\n self.parent.del_child(self)", "title": "" }, { "docid": "66f04b1af3ada443fd9d3334fb210a5e", "score": "0.59508365", "text": "def del_event_handler(self, event_name, handler):\n return self.api.del_event_handler(event_name, handler)", "title": "" }, { "docid": "a9771b4f8a8d553ada2db1f9086048f5", "score": "0.59346044", "text": "def _unRegisterChild(self, fd):\n \n del self._activeProcesses[fd]", "title": "" }, { "docid": "c0d49a94b0aa94818bccb879f0b4b1fe", "score": "0.58867985", "text": "def remove_handler_by_name(self, handler_name: str):\n for h in self._logger.handlers:\n if h.get_name() == handler_name:\n self._logger.removeHandler(h)\n break", "title": "" }, { "docid": "7955ac4b206718b128f16d1294c58b26", "score": "0.58552915", "text": "def remove_child(self, index):\n\t\tdel self.child[index]", "title": "" }, { "docid": "bdd87e886cd05860ff87af4391fd0d06", "score": "0.5829237", "text": "def removeChild(self, node):\r\n raise NotImplementedError", "title": "" }, { "docid": "a512a25d9699ae37ddf017012dfc6ee3", "score": "0.5819003", "text": "def removeEventHandler(self, eventType, eventHandler):\n self.EventHandlerMap[eventType].remove(eventHandler)", "title": "" }, { "docid": "adb920fede2717a59b5cd9eb476f5ae4", "score": "0.57805306", "text": "def remove_child_vault(self, vault_id, child_id):\n pass", "title": "" }, { "docid": "ac58df37d3563b42a1ada689fa5dede5", "score": "0.5776085", "text": "def _remove_child(self, node: Node, child: Node):\n node.remove_child(child)\n del self._node_store[child.path]", "title": "" }, { "docid": "1668a4273bb188e80dbcb2c2f1561990", "score": "0.57349205", "text": "def child_removed(self, child):\n super(ToolkitObject, self).child_removed(child)\n if isinstance(child, ToolkitObject) and self.proxy_is_active:\n self.proxy.child_removed(child.proxy)", "title": "" }, { "docid": "94fc157a75efc3a48507283ae956ded3", "score": "0.5718932", "text": "def del_event_handler(self, name, pointer):\n\t\tif not name in self.event_handlers:\n\t\t\treturn\n\t\t\n\t\t# Need to keep handlers that do not use\n\t\t# the given function pointer\n\t\tdef filter_pointers(handler):\n\t\t\treturn handler[0] != pointer\n\n\t\tself.event_handlers[name] = filter(filter_pointers, \n\t\t\t\t\t\t self.event_handlers[name])", "title": "" }, { "docid": "a768fef9038570ca5fa98f5e57190dcb", "score": "0.5718361", "text": "def remove_child(self, value):\n if value in self.children:\n del self.children[value]", "title": "" }, { "docid": "7123da8c13e53c13e432b70d38a666f2", "score": "0.57093704", "text": "def _remove_child(self, widget):\n widget.p.parent = None", "title": "" }, { "docid": "aead265fea9e05f046cea6b16e65dac6", "score": "0.5701246", "text": "def remove(self, thischild, delete=False):\n self.psi = None\n if thischild in self.children:\n thischild.parent = None\n self.children.remove(thischild)\n if delete is True:\n thischild.deleteLater()\n else:\n for child in self.children:\n if isinstance(child, QTrapGroup):\n child.remove(thischild, delete=delete)\n if ((len(self.children) == 0) and isinstance(self.parent, QTrapGroup)):\n self.parent.remove(self)", "title": "" }, { "docid": "8b1e98e84bd6160c783cd7fee203a4d7", "score": "0.56933457", "text": "def del_event_handler(self, name: str, callback: Callable):\n if not name:\n for callbacks in self.events.values():\n for priority in callbacks.values():\n for entry in priority[:]:\n if entry == callback:\n priority.remove(callback)\n else:\n callbacks = self.events[name]\n for priority in callbacks.values():\n for entry in priority[:]:\n if entry == callback:\n priority.remove(callback)", "title": "" }, { "docid": "2928fd383701d28d6565a0a574cf9303", "score": "0.5681991", "text": "def remove(self, handler):\n return bool()", "title": "" }, { "docid": "2928fd383701d28d6565a0a574cf9303", "score": "0.5681991", "text": "def remove(self, handler):\n return bool()", "title": "" }, { "docid": "8c0905f92ae3e865bb3490a3f7170065", "score": "0.56665117", "text": "def clear_exit_event_handler(self, handler):\n self.current.exit_event.clear_handler(handler)", "title": "" }, { "docid": "50fc1de3319f30e213b9a41f091b1222", "score": "0.5659567", "text": "def pop_handlers(self):\n return getattr(self, '_handler_stack', []).pop()", "title": "" }, { "docid": "dc92a643b1c1f75981c9e800428d3447", "score": "0.5641592", "text": "def remove_child(self, node, child):\n self._nodes[node].discard(child)", "title": "" }, { "docid": "8f7f20e3ec52398473b7535256b891ac", "score": "0.56173235", "text": "def on_remove(self):\n pass", "title": "" }, { "docid": "bb4b84130831562e5011952072ad7a4d", "score": "0.5594565", "text": "def unhook(callback):\n _listener.remove_handler(callback)", "title": "" }, { "docid": "34de54ffa0b1947dfc8992d9a56f80c3", "score": "0.5548653", "text": "def _remove_form_handler(self, form_handler_name, form_handler_data,\n wait=WAIT_FOR):\n # This part is necessary, since we need to activate the\n # tab with form handlers. Otherwise Selenium raises\n # an exception about non-visible element on the page\n # that we're trying to fetch.\n form_handlers_tab_link = self.driver.find_element_by_xpath(\n \"\"\"//a[@href=\"#tab-form-handlers\"]\"\"\"\n )\n form_handlers_tab_link.click()\n\n # Get the label of the given form element in order to delete it later\n # from the form.\n delete_form_handler_label = self.driver.find_element_by_xpath(\n \"\"\"//td[contains(text(), '{0}')]\"\"\".format(form_handler_name)\n )\n\n # Get the parent of the label\n delete_form_handler_label_parent_container = \\\n delete_form_handler_label.find_element_by_xpath('..')\n\n # Click the add form element button to add a new form element to the\n # form.\n delete_form_handler_link = \\\n delete_form_handler_label_parent_container \\\n .find_element_by_partial_link_text(\n 'Delete'\n )\n delete_form_handler_link.click()\n\n logger.debug(form_handler_name)\n\n # Wait until the fobi page opens with the form element in.\n WebDriverWait(self.driver, timeout=TIMEOUT).until(\n lambda driver: driver.find_element_by_xpath(\n \"\"\"//div[contains(text(), 'The form handler plugin \"{0}\" \"\"\"\n \"\"\"was deleted successfully.') \"\"\"\n \"\"\"and contains(@class, \"alert-info\")]\"\"\".format(\n form_handler_name\n )\n )\n )", "title": "" }, { "docid": "e31891a5595a62c5ce1ef7aae021fd11", "score": "0.55190015", "text": "def remove(self, child):\n self.xmlnode.remove(child.xmlnode)", "title": "" }, { "docid": "3313b90e7ac18c8f2a8601418bde9126", "score": "0.5507837", "text": "def removeChild(*args):\n return _coin.SoVRMLLOD_removeChild(*args)", "title": "" }, { "docid": "24dc5f46e43dfdc4c62fc6e0eec15a4c", "score": "0.5507475", "text": "def sig_handler(cls, sig_num, stack_frame):\n cls.cleanup()", "title": "" }, { "docid": "e4ca215c0f4d32ed44ae6a14ed186192", "score": "0.5506857", "text": "def del_event_handler(self, module, *args, **kwargs):\n return self.plugin_manager.del_event_handler(module, *args, **kwargs)", "title": "" }, { "docid": "6168896018b6c0adf3b81b58d57ca7ed", "score": "0.549339", "text": "def __sub__(self, callback):\r\n\r\n self.callbacks.remove(callback)\r\n return self", "title": "" }, { "docid": "8ac76e7846b604c42fccf171fd3f8c51", "score": "0.5488947", "text": "def delete_child(self):\r\n\r\n child = self.children[-1]\r\n del self.children[-1]\r\n return child", "title": "" }, { "docid": "5bf594700d85a157c024a9a3f4cb13d6", "score": "0.5488658", "text": "def removeChild(*args):\n return _coin.SoVRMLParent_removeChild(*args)", "title": "" }, { "docid": "e6c64f80c0285d3e461f4580e2a725e5", "score": "0.54857415", "text": "def _teardown_collection_handlers(self):\n base_msg_type = self.message.msg_type\n self.bus.remove(base_msg_type + '.handling', self._register_handler)\n self.bus.remove(base_msg_type + '.response', self._receive_response)", "title": "" }, { "docid": "7a7a1e9df5091b16238cf9de66d9477f", "score": "0.54792094", "text": "def removeChild(*args):\n return _coin.SoVRMLSwitch_removeChild(*args)", "title": "" }, { "docid": "d1ca9319f079cabec581a922c0f029ff", "score": "0.54777", "text": "def remove_word_listener(word_or_handler):\n _word_listeners[word_or_handler]()", "title": "" }, { "docid": "af83d76a77c6d0bfedefa745115e9b52", "score": "0.54432476", "text": "def add_handler(self, handler):\n if handler not in self.handlers:\n self.handlers.append(handler)", "title": "" }, { "docid": "4885ae8089d1987d46d1647aba5266eb", "score": "0.54353815", "text": "def removeChild(*args):\n return _coin.SoGroup_removeChild(*args)", "title": "" }, { "docid": "3fac9d0335364881cf95224aeb370559", "score": "0.54325205", "text": "def exposed_delete_chunk(self, chunk_id):\n del self.__class__.handle_table[chunk_id]", "title": "" }, { "docid": "97f82e162bf9a551001d2abdc31f68b3", "score": "0.54276466", "text": "def remove(self, h, handle):\n record_id, data = self._find(h)\n if record_id is None:\n return\n handles = self._unmarshal(data, just_handles=True)\n handles.remove(handle)\n if len(handles) == 0:\n self.block.delete(record_id)\n else:\n self.block.put(record_id, self._marshal(h, handles))", "title": "" }, { "docid": "c74d57c721f0d72ea51062688c620e29", "score": "0.5422727", "text": "def remove_event(reactor, event):\r\n reactor.remove(event)", "title": "" }, { "docid": "6c625709ce7bda506877de28543c0749", "score": "0.5419543", "text": "def rename_handlers(self):\n for handler_name, handler in self.handlers.items():\n handler_operation = handler_name.decode()\n handler_operation = handler_operation.split('.')[-1].encode()\n self.handlers[b'repl:' + self.name + b'.' + handler_operation] = handler\n self.redis_connection.execute_command('DEL', handler_name)", "title": "" }, { "docid": "439f239b53fab15436c559c61ec10792", "score": "0.54055494", "text": "def remove_word_listener(word_or_handler):\n _remove_named_hook(word_or_handler, _word_listeners)", "title": "" }, { "docid": "28d5b61586c7cec3a2c7f23f9e32ad31", "score": "0.53964347", "text": "def tearDown(self):\n self.log.removeHandler(self.handler)\n self.handler.close()", "title": "" }, { "docid": "27a84976b9ef22ec7ff6bb953554a34e", "score": "0.5374946", "text": "def removing(self): # API (gets overwritten) TODO rename to remove_event(), it would make more sense I guess\n pass", "title": "" }, { "docid": "64bb139459f936f4e3b532ac66c07f94", "score": "0.53707385", "text": "def unregister_at_level(self, level):\n self.handler.unregister_at_level(level, dispatcher)", "title": "" }, { "docid": "338876a1b4c635698ef572d78867fcb8", "score": "0.53624785", "text": "def _uninstall_signal_handlers(self):\n loop = asyncio.get_event_loop()\n loop.remove_signal_handler(signal.SIGINT)\n loop.remove_signal_handler(signal.SIGTERM)", "title": "" }, { "docid": "2cba3c09a05c54553acd72dfb5a99d0b", "score": "0.5362352", "text": "def remove(target, identifier, fn):\r\n _event_key(target, identifier, fn).remove()", "title": "" }, { "docid": "16551ab6f66206dc3dd72a314839d520", "score": "0.5353773", "text": "def add_exit_event_handler(self, handler):\n self.current.exit_event.add_handler(handler)", "title": "" }, { "docid": "6e6e5209688b700be7ab010561815aae", "score": "0.53493893", "text": "def unhook(remove):\n _hooks[remove]()", "title": "" }, { "docid": "efbfc7319fe72c40be50f7b59aaac86b", "score": "0.5332615", "text": "def removeChild(self,child):\n self.pv_cdomlette.removeChild(child.pv_cdomlette)\n for i in range(0,len(self.childNodes)-1):\n if id(self.childNodes[i]) == id(child):\n toto = self.childNodes[i]\n toto.pv_deleted = True\n self.notify_observers(toto)\n del self.childNodes[i]\n print \"coucou %s\" % toto\n self.notify_observers(self)", "title": "" }, { "docid": "807410c2641efabffc586ca46dcd60f0", "score": "0.53256506", "text": "def remove_child_qualifier(self, qualifier_id, child_id):\n pass", "title": "" }, { "docid": "8675cc6474c8d17829f609fcfffaceb7", "score": "0.53240865", "text": "def kill(self):\n for handler in self.logger.handlers[:]:\n self.logger.removeHandler(handler)", "title": "" }, { "docid": "e953f8d8eba4ddc9d59185945154c79c", "score": "0.5312111", "text": "def delete_event_callback(self, event):", "title": "" }, { "docid": "dac6789dd46eefb6abc1f87e943a782b", "score": "0.5303661", "text": "def clear(self):\n\n if not self._handler:\n raise ValueError(\"No handler configured\")\n\n self._handler.clear()", "title": "" }, { "docid": "ef47db25ea38d2efd4350e381633f100", "score": "0.5300582", "text": "def signal_unbind(self, signal_handler):\n try:\n handlers = self._signals[\n signal_handler.signal_name]\n except KeyError:\n pass\n else:\n signal_handler.function = None\n try:\n handlers.remove(signal_handler)\n except IndexError:\n pass", "title": "" }, { "docid": "277477aaec1aa82d3e9d230e22a5605c", "score": "0.52999586", "text": "def remove_child(self, child, class_name = None):\n if class_name is None: class_name = self.__class__.__name__\n ## if child_name provided\n if isinstance(child, str):\n if child in self._children: self._remove_child(child)\n else: raise UnknownChild( (f\"Cannot remove {self._child_adj}: '{child}' is not a\"\n f\" known {self._child_alias} name\") )\n ## if Child object provided\n else:\n ## if Child obj is in self.children, retrieve key\n inv_children = dict((v, k) for k, v in self._children.items())\n if child in inv_children: self._remove_child(inv_children[child])\n else: raise UnknownChild( (f\"Cannot remove {self._child_adj}: The {self._child_obj} object\"\n f\" provided is not known to this {class_name}\") )\n return", "title": "" }, { "docid": "e3f64971c111162ae10e8f6b6421c71d", "score": "0.52982634", "text": "def remove() -> None:", "title": "" }, { "docid": "ddffbb04569e5ddbc1684409b150f641", "score": "0.52908236", "text": "def unregister_error_handler() -> None:\n Hub.handle_error = ORIGINAL_ERROR_HANDLER", "title": "" }, { "docid": "2e921d60aab2652531909eacdb97ca6b", "score": "0.52737355", "text": "def remove_child(self, child_name):\n self._update_json()\n put_data = self.json.copy()\n for obj_key in ['links', 'metadata']:\n put_data.pop(obj_key)\n obj_children = put_data.pop('objects')\n put_data['objects'] = []\n child_type = ''\n for obj_child in obj_children:\n if obj_child['name'] != child_name:\n put_data['objects'].append(obj_child)\n else:\n child_type = obj_child['type']\n if child_type:\n logging.warning(\"Removing {} child object {} from parent {}!\".format(child_type, child_name, self.name))\n self.update(put_data)\n else:\n logging.error(\"Child {} not found inside parent {}!\".format(child_name, self.name))", "title": "" }, { "docid": "a6feec5067a9850c6d67f1869e2aee18", "score": "0.5272715", "text": "def unregisterPacketHandler(self, packetType):\n pass", "title": "" }, { "docid": "3af39222a28b9a7395e6374c258b8ccd", "score": "0.5271657", "text": "def remove_hook(self, name, fun):\n\t\tself.hooks[name].remove(fun)", "title": "" }, { "docid": "256f268e566e8d1f9bf1075746200bc6", "score": "0.526498", "text": "def remove(self, widget):\n self._detach_child(widget)\n del self._children[widget]\n self._repack_and_regroup_children()", "title": "" }, { "docid": "8cd4c5e5b999e00b539da04836032f43", "score": "0.5257587", "text": "def callbackWrapper(self):\n self.callback(*self.args)\n self.methodscalled.remove(self)", "title": "" }, { "docid": "f938ddd4a9d844fbb00109182932e6cf", "score": "0.5240832", "text": "def _remove_named_hook(name_or_handler, names):\n if callable(name_or_handler):\n handler = name_or_handler\n try:\n name = next(n for n, h in names.items() if h == handler)\n except StopIteration:\n raise ValueError('This handler is not associated with any name.')\n unhook(handler)\n del names[name]\n else:\n name = name_or_handler\n try:\n handler = names[name]\n except KeyError as e:\n raise ValueError('No such named listener: ' + repr(name), e)\n unhook(names[name])\n del names[name]\n\n return name", "title": "" }, { "docid": "a738913a0eafb0af7f2a8ef6d646c856", "score": "0.52406603", "text": "def off(self, event_name, handler):\n # todo: make this thread safe!!!\n if event_name in self.listeners and handler in self.listeners[event_name]:\n self.listeners[event_name].remove(handler)", "title": "" }, { "docid": "052b4bad8d2a5ccd936c6bb5f7b44485", "score": "0.52362853", "text": "def sig_handler(sig,frame):\n ioloop.IOLoop.instance().add_callback(shutdown)", "title": "" }, { "docid": "052b4bad8d2a5ccd936c6bb5f7b44485", "score": "0.52362853", "text": "def sig_handler(sig,frame):\n ioloop.IOLoop.instance().add_callback(shutdown)", "title": "" }, { "docid": "71ce42a89e2181b3e1163f07c1d3dc59", "score": "0.5233347", "text": "def terminate(self) -> None:\n self.unregister_endpoint(self.handler)", "title": "" }, { "docid": "cc820064bbe1d9f53d9acf7832ce88a3", "score": "0.52318746", "text": "def remove(self, widget):\n self._detach_child(widget)\n self._children.remove(widget)\n del self._sizes[widget]\n self._repack_and_regroup_children()", "title": "" } ]
fe2b6ed15d7c01e5da08eb0482b1fda2
Look for currency code.
[ { "docid": "5fa7adbf3f0a8e6d3acd86e2f51ea3b6", "score": "0.6849101", "text": "def look(query): \n filename = resource_filename(__name__, 'data/currency_code.json')\n with open(filename, 'r') as file:\n currency_code = json.load(file)\n \n def lookname(word):\n import re\n query = re.compile(word, flags=re.I)\n ret = []\n for it in currency_code:\n if re.search(query, it['name']):\n ret.append(it)\n return ret\n \n results = lookname(query)\n if len(results) == 0:\n click.echo('Error: Currency not found.')\n else:\n for r in results:\n currency, code = r['name'], r['code']\n message = \"{1} ({0})\".format(currency, code)\n click.echo(message)", "title": "" } ]
[ { "docid": "ee6e587f59dfd4d74745f86a071eb09a", "score": "0.68294126", "text": "def currency_code(self) -> str:\n return pulumi.get(self, \"currency_code\")", "title": "" }, { "docid": "2ed1c8c097a4e983214ff610ddcc960f", "score": "0.67247885", "text": "def __getitem__(self, value: Union[int, str]) -> Currency:\n if not value:\n raise CurrencyNotFound(f'Currency \"{value}\" not found.')\n\n item = self._format_num_code(value)\n item = item.lower()\n\n try:\n currency = self.currencies[item]\n except KeyError:\n raise CurrencyNotFound()\n\n return currency", "title": "" }, { "docid": "ee981fc5e08ac786f0b21976e6ff79a1", "score": "0.66217786", "text": "def currency_code(self):\n code = self.currency\n\n if code not in CURRENCIES:\n code = common.settings.currency_code_default()\n\n return code", "title": "" }, { "docid": "346b9a130ddde5662cf80d6ad2a1827a", "score": "0.6579701", "text": "def lookup_currency(self): # pragma: no cover\n return NotImplemented", "title": "" }, { "docid": "927cf6ded6020a7a2d43200bf5cba98b", "score": "0.6526249", "text": "def get_currency(self, currency):\n\n if currency not in self._currency_addresses:\n self._currency_addresses = self.get_currencies()\n\n res = None\n if currency[:2] == '0x':\n for token, c in self._currency_addresses.items():\n if c['address'] == currency:\n res = c\n break\n # check if we found the currency\n if res is None:\n raise IdexCurrencyNotFoundException(currency)\n else:\n if currency not in self._currency_addresses:\n raise IdexCurrencyNotFoundException(currency)\n res = self._currency_addresses[currency]\n\n return res", "title": "" }, { "docid": "4bcb2ceace3b3829b9b5730403019f54", "score": "0.6213957", "text": "def get_currency(input_country) -> str:\n file = open(\"country_conversion.json\", \"r\")\n data = json.load(file)\n file.close()\n for cntry in data:\n if cntry['Country'] == input_country:\n input_country = cntry['Code']\n break\n return input_country", "title": "" }, { "docid": "e3d06049a4efb4befb2d1b6cd5ff3695", "score": "0.61457765", "text": "def test_find_currency_data():\n result = dtypes.find_currency_data()\n expected = {\n '$': 'ARS', '﹩': 'ARS', '$': 'ARS', '£': 'FKP', '₭': 'LAK',\n '£': 'FKP', '₩': 'KPW', '¥': 'CNY', '﷼': 'IRR', '₾': 'GEL',\n '₽': 'RUB', '₼': 'AZN', '₺': 'TRY', '₹': 'INR', '₸': 'KZT',\n '₵': 'GHS', '₴': 'UAH', '₲': 'PYG', '₱': 'CUP', '₮': 'MNT',\n '€': 'EUR', '₫': 'VND', '₪': 'ILS', '₩': 'KPW', '₨': 'LKR',\n '₦': 'NGN', '₡': 'CRC', '៛': 'KHR', '฿': 'THB', '৳': 'BDT',\n '؋': 'AFN', '֏': 'AMD', '¥': 'CNY', '¢': 'GHS'\n }\n assert result == expected", "title": "" }, { "docid": "1cb11866dd97e23093c9e2bdeb9da83e", "score": "0.60134774", "text": "def string_to_currency(self, s):\n return # osid.financials.Currency", "title": "" }, { "docid": "a9f088a9e36f3d4471f2a0516b7ccd7a", "score": "0.59600127", "text": "def guess_currency_from_address(address):\n if is_py2:\n fixer = lambda x: int(x.encode('hex'), 16)\n else:\n fixer = lambda x: x # does nothing\n\n first_byte = fixer(b58decode_check(address)[0])\n hits = []\n for currency, data in crypto_data.items():\n if hasattr(data, 'get'): # skip incomplete data listings\n version = data.get('address_version_byte', None)\n if version is not None and first_byte == version:\n hits.append([currency, data['name']])\n\n if hits:\n return hits\n\n raise ValueError(\"Unknown Currency with first byte: %s\" % first_byte)", "title": "" }, { "docid": "da5ce1d29e2b499ada2b3b643218edbc", "score": "0.5934486", "text": "def test_is_currency_code_format(self):\r\n\r\n self.assertTrue(is_currency_code_format(\"USD\"))\r\n self.assertFalse(is_currency_code_format(\"US!\"))\r\n self.assertFalse(is_currency_code_format(\"USSR\"))\r\n self.assertFalse(is_currency_code_format(\" D\"))", "title": "" }, { "docid": "7f6e9b4f82a4e0c8983f61974351e49e", "score": "0.5933416", "text": "def find_by_currency(currency):\n\n\tcountries_info = get_response(\"/currency/\" + currency)\n\treturn get_country_objects(countries_info)", "title": "" }, { "docid": "070304ac4bd019f62116617e4b61070c", "score": "0.5826321", "text": "def test_is_real_currency_code(self):\r\n\r\n self.assertTrue(is_real_currency_code('USD'))\r\n self.assertFalse(is_real_currency_code('ZZZ'))", "title": "" }, { "docid": "b259f1d2c53e28a01027287e1eaca8c6", "score": "0.57734346", "text": "def is_stock_cn(code):\n code = str(code)\n if code[0] in ['5', '6', '9'] or \\\n code[:3] in [\"009\", \"126\", \"110\", \"201\", \"202\", \"203\", \"204\", '688'] or \\\n (code.startswith('XSHG')) or \\\n (code.endswith('XSHG')):\n if (code.startswith('XSHG')) or \\\n (code.endswith('XSHG')):\n if (len(code.split('.')) > 1):\n try_split_codelist = code.split('.')\n if (try_split_codelist[0] == 'XSHG') and (len(try_split_codelist[1]) == 6):\n code = try_split_codelist[1]\n elif (try_split_codelist[1] == 'XSHG') and (len(try_split_codelist[0]) == 6):\n code = try_split_codelist[0]\n if (code[:5] in [\"00000\"]) or \\\n (code[:3] in [\"000\"]):\n return True, QA.MARKET_TYPE.INDEX_CN, 'SH', '上交所指数'\n if code.startswith('60') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SH', '上交所A股'\n elif code.startswith('688') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SH', '上交所科创板'\n elif code.startswith('900') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SH', '上交所B股'\n elif code.startswith('50') == True:\n return True, QA.MARKET_TYPE.FUND_CN, 'SH', '上交所传统封闭式基金'\n elif code.startswith('51') == True:\n return True, QA.MARKET_TYPE.FUND_CN, 'SH', '上交所ETF基金'\n else:\n print(code, True, None, 'SH', '上交所未知代码')\n return True, None, 'SH', '上交所未知代码'\n elif code[0] in ['0', '2', '3'] or \\\n code[:3] in ['000', '001', '002', '200', '300', '159'] or \\\n (code.startswith('XSHE')) or \\\n (code.endswith('XSHE')):\n if (code.startswith('000') == True) or \\\n (code.startswith('001') == True):\n if (code in ['000003', '000112', '000300', '000132', '000133']):\n return True, QA.MARKET_TYPE.INDEX_CN, 'SH', '中证指数'\n else:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SZ', '深交所主板'\n if code.startswith('002') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SZ', '深交所中小板'\n elif code.startswith('003') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SZ', '中广核??'\n elif code.startswith('159') == True:\n return True, QA.MARKET_TYPE.FUND_CN, 'SZ', '深交所ETF基金'\n elif code.startswith('200') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SZ', '深交所B股'\n elif code.startswith('399') == True:\n return True, QA.MARKET_TYPE.INDEX_CN, 'SZ', '中证指数'\n elif code.startswith('300') == True:\n return True, QA.MARKET_TYPE.STOCK_CN, 'SZ', '深交所创业板'\n elif (code.startswith('XSHE')) or \\\n (code.endswith('XSHE')):\n pass\n else:\n print(code, True, None, 'SZ', '深交所未知代码')\n return True, None, 'SZ', '深交所未知代码'\n else:\n print(code, '不知道')\n return False, None, None, None", "title": "" }, { "docid": "3006564fc4ea5475cb662f5573eb246e", "score": "0.57121426", "text": "def getCurrencyRate(self,code):\n currencyrate=self.currencyratedict[code]\n return currencyrate.getCurrencyRate()", "title": "" }, { "docid": "a2171d7de63c5885abb9874d88d535c2", "score": "0.56446785", "text": "def _get_currency(self):\n user = self.env['res.users'].browse(self._uid)\n if user.company_id:\n return user.company_id.currency_id.id\n else:\n return self.env['res.currency'].search(\n [('rate', '=', 1.0)])[0]", "title": "" }, { "docid": "a31e5c700d7aea261fe7ed86117b9f9d", "score": "0.56382793", "text": "def find_by_code(cls, code):\n\n formatted_code = code.upper()\n\n coupon = Coupon.query.filter(Coupon.redeemable, Coupon.code == formatted_code).first()\n\n return coupon", "title": "" }, { "docid": "31dda01bdc976b63e6a03a9b74516226", "score": "0.5622921", "text": "def search_exchange():\n\tdolar = soup.find(\"div\", {\"class\": \"dolar\"})\n\tfor li in dolar.findAll('li'):\n\t\tcurrency, amount = li.text.split('RD$')\n\t\treturn (currency, amount)", "title": "" }, { "docid": "64e8cf1846dd2ba185091ca081f3a21e", "score": "0.5618524", "text": "def find_by_code(cls, code):\n formatted_code = code.upper()\n coupon = Coupon.query.filter(Coupon.redeemable,\n Coupon.code == formatted_code).first()\n\n return coupon", "title": "" }, { "docid": "34bcfdc0842c24acdf06b88d814122b2", "score": "0.5607418", "text": "def find_currency(df):\n doc_currency = \"\"\n # euros = \"Euro|€|EUR\"\n # dollars = \"Dollar|\\$|USD|dols\"\n # pounds = \"£|GBP|BP\"\n currencies = {\"euro\": EUROS, \"dollar\": DOLLARS, \"pound\": POUNDS}\n\n for key, value in currencies.items():\n if df.apply(lambda x: x.str.contains(value, case=False).any(), axis=1).any(\n axis=None\n ):\n # If doc_currency is set two times => set undefined\n if doc_currency:\n doc_currency = \"euro\" # \"undefined\"\n break\n # Set doc currency\n else:\n doc_currency = key\n\n # Create column for unit in df marking the token which contains unit\n df.loc[:, \"currency\"] = False\n for key, value in currencies.items():\n df.loc[df[\"text\"].str.contains(value, case=False), \"currency\"] = True\n # Set default unit to 1\n if not doc_currency:\n doc_currency = \"euro\"\n return doc_currency", "title": "" }, { "docid": "4b951e8bedf117c911b482d92923c1ac", "score": "0.5592632", "text": "def parse_currency(self):\n price = re.search('(\\d\\.*)+', self.pq_form('.price').text())\n p = [float(i) for i in price.group().strip().split(' ')]\n\n return \"%.2f\" % max(p) if len(p) == 2 else \"%.2f\" % p[0]", "title": "" }, { "docid": "a81dfae6f8a698e39ce77fe32c017525", "score": "0.5579692", "text": "def ekat_to_gnc_Currency(GNCBook, EkatCurrency):\n CommodityTable = GNCBook.get_table()\n return CommodityTable.lookup(\"CURRENCY\", str(EkatCurrency))", "title": "" }, { "docid": "b9ccbe027e15cc8282435257c9ec6e43", "score": "0.55710196", "text": "def get_coupon_code_price_in_checkout():\n coupon_code_price_text = Element(\"checkout\", \"coupon_code_price\").get_attribute(\"text\")\n coupon_code_price = coupon_code_price_text[coupon_code_price_text.find(\"$\")+1:]\n return float(coupon_code_price)", "title": "" }, { "docid": "c0dff2676b71692bef34c656d98e3649", "score": "0.5542053", "text": "def currency(self) -> pulumi.Input[Union[str, 'Currency']]:\n return pulumi.get(self, \"currency\")", "title": "" }, { "docid": "e473aaa456d221d68da322d3b068db4d", "score": "0.5538454", "text": "def get_denomination(s):\n\n if is_money_string(s):\n denomination_match = re.search(rf'({_SUPPORTED_DENOMINATIONS})', s)\n if denomination_match:\n denomination = denomination_match.group(0)\n if denomination == '$':\n denomination = 'USD'\n return denomination\n return None", "title": "" }, { "docid": "47344aede6290e298bc5d710217f9ef9", "score": "0.55297154", "text": "def get_rate(self, currency_code): \n return Decimal(self.rates[currency_code])", "title": "" }, { "docid": "e69e46cf09939d9a6734f33014517df0", "score": "0.55219483", "text": "def getCurrency(self):\n return self.base.get(\"currency\", [])", "title": "" }, { "docid": "12e62d6aebf33efc457369adbd74e749", "score": "0.54618835", "text": "def get_currency_type(self):\n return # osid.type.Type", "title": "" }, { "docid": "a6abc535c447c218f08b1a0048668d38", "score": "0.53959584", "text": "def get_currency_type(self):\n return # osid.type.Type", "title": "" }, { "docid": "4ff2f0efdfcef9ef4f33132f9982daba", "score": "0.53625256", "text": "def get_company_code(self):\n code_df = pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0] \n # convert into 6 digits code\n code_df.종목코드 = code_df.종목코드.map('{:06d}'.format) \n # remove unnecessary columns \n code_df = code_df[['회사명', '종목코드']] \n # change Korean to English\n code_df = code_df.rename(columns={'회사명': 'name', '종목코드': 'code'})\n self.code = code_df.query(\"name=='{}'\".format(self.item_name))['code'].to_string(index=False).lstrip()\n print('Item Code:',self.code)\n return self.code", "title": "" }, { "docid": "c96fc91befb6d6daffe6a9c3efea18ea", "score": "0.5346923", "text": "def clean_currency(currency):\n if \"TDM\" in currency:\n currency = \"TDM\"\n elif \"Mio\" in currency and \"DM\" in currency:\n currency = \"Mio DM\"\n elif \"Mrd\" in currency and \"DM\" in currency:\n currency = \"Mrd DM\"\n elif \"DM\" in currency:\n currency = \"DM\"\n elif \"Thlf\" in currency:\n currency = \"Thlf\"\n elif \"hlf\" in currency:\n currency = \"hlf\"\n elif \"mögen\" in currency:\n currency = \"DM\"\n return currency", "title": "" }, { "docid": "4acb2108c1ef6b47cc9f376817d769bd", "score": "0.5315113", "text": "def record_currency(self, record):\n return record.currency_id", "title": "" }, { "docid": "672c3a07646cc6f85d86036e78f25e7f", "score": "0.52842635", "text": "def test_convert_currency(self):\r\n\r\n self.assertEqual(convert_currency('USD', 'USD', '50'), 'US$50.0')", "title": "" }, { "docid": "ce36fe58516565b4bba094857a6469ed", "score": "0.5282381", "text": "def get_cur_by_id(cur_id):\n sql = \"\"\"\n SELECT currency\n FROM currencies\n WHERE id = %s;\n \"\"\"\n args = (cur_id,)\n try:\n query_result = Currency._make_select(sql, args)\n except IntegrityError:\n return False\n return query_result[0]['currency']", "title": "" }, { "docid": "aa88d30e95a9addc5792fd13dfd3064b", "score": "0.52776617", "text": "def test_creates_currency(self):\n Currency('EUR')", "title": "" }, { "docid": "15562f6459e92fd2cd75611fa5a36d47", "score": "0.52687216", "text": "async def test_currency(self) -> None:\n nanites, asp_tokens = await self.character.currency()\n self.assertIsInstance(nanites, int)\n self.assertIsInstance(asp_tokens, int)", "title": "" }, { "docid": "e6d65da97522012b7991b84c015de7bf", "score": "0.5256139", "text": "def get_exchange_rate(currency):\n response = requests.get('https://bitpay.com/api/rates')\n resp_obj = json.loads(response.content)\n result = '<N/A>'\n for elem in resp_obj:\n if elem['code'] == currency:\n result = str(elem['rate'])\n return result", "title": "" }, { "docid": "79243ec60ac490e7e21a78aad59a979a", "score": "0.525378", "text": "def get_source_currency(self, obj: CurrencyExchangeRate) -> str:\n return obj.source_currency.code", "title": "" }, { "docid": "441d19f1ad753cf78c1d00c6b145e7d9", "score": "0.5250038", "text": "def search_okcicredit(cursor, contract_num):\n try:\n cursor.execute(\n \"\"\"\n SELECT\n tc.id,\n tc.client_id,\n CONCAT(\n REPLACE(tper.name3,\"'\", '\"'), ' ',\n LEFT(tper.name , 1), '. ',\n LEFT(tper.name2 , 1), '.'),\n tc.vnoska,\n ts.status,\n tc.egn,\n dwh.GetDolgBody(tc.id, date(now()))+\n dwh.GetDolgFine(tc.id, date(now()))+\n dwh.GetDolgPrc(tc.id, date(now()))\n FROM\n mbank.tcredits tc\n join mbank.tstatuses ts on ts.credit_id = tc.id\n and ts.is_last = 1\n left join mbank.tpersons tper on tper.id = tc.client_id\n WHERE tc.contract_num = {0}\n and ts.status = 59;\n \"\"\".format(contract_num)\n )\n except Exception:\n return None\n\n credit = cursor.fetchall()\n\n if credit:\n return credit\n\n return None", "title": "" }, { "docid": "5765a27fc1d71f6e40e1d92f5f3948ec", "score": "0.52460676", "text": "def get_currency_format_type(self):\n return # osid.type.Type", "title": "" }, { "docid": "8de5e2d0e651ed0703855a491e8cae71", "score": "0.5234212", "text": "def test_retreive_miscapitalized_currency(client):\n\n res = client.get('/v1/currencies/uSd')\n assert res.status_code == 404", "title": "" }, { "docid": "4ddce66c0a37d8ebff9476e709bda5e5", "score": "0.52306783", "text": "def isContainsDollar(entry):\n return entry[1].find('$')!=-1", "title": "" }, { "docid": "a7d9178acced0c0df2b491ede2785eb5", "score": "0.5221874", "text": "def get_source_currency_type(self):\n return # osid.type.Type", "title": "" }, { "docid": "455b6cb0ecd72e95ee19c7948bea71d0", "score": "0.5203276", "text": "def get_currency(self):\n return self.currency", "title": "" }, { "docid": "455b6cb0ecd72e95ee19c7948bea71d0", "score": "0.5203276", "text": "def get_currency(self):\n return self.currency", "title": "" }, { "docid": "79be60e07bc0158716409b8567cd8101", "score": "0.51875174", "text": "def commodity_code(self) -> str:\n return pulumi.get(self, \"commodity_code\")", "title": "" }, { "docid": "2e00b98328b78f993aed9983555cdbc5", "score": "0.51707137", "text": "def get_country_code(country_name):\n for country_code, country in COUNTRIES.items():\n if country_name.lower() == country.lower():\n return country_code\n elif 'congo, dem.' in country_name.lower():\n return 'cd'\n elif 'hong kong' in country_name.lower():\n return 'hk'\n elif 'macao' in country_name.lower():\n return 'mo'\n elif 'egypt' in country_name.lower():\n return 'eg'\n elif 'gambia' in country_name.lower():\n return 'gm'\n elif 'yemen' in country_name.lower():\n return 'ye'\n else:\n return None", "title": "" }, { "docid": "1dbbeb612d99c5533f0e1366ed83b8c4", "score": "0.5159652", "text": "def iscurrency(currency):\n strout = currency_response(\"USD\", currency, 1)\n boolnum = has_noerror(strout)\n return boolnum", "title": "" }, { "docid": "c86bc7a0d9fd0fc39eeffc8b1dac9ad7", "score": "0.5142611", "text": "def currency(self):\n return self._currency", "title": "" }, { "docid": "c86bc7a0d9fd0fc39eeffc8b1dac9ad7", "score": "0.5142611", "text": "def currency(self):\n return self._currency", "title": "" }, { "docid": "c86bc7a0d9fd0fc39eeffc8b1dac9ad7", "score": "0.5142611", "text": "def currency(self):\n return self._currency", "title": "" }, { "docid": "c86bc7a0d9fd0fc39eeffc8b1dac9ad7", "score": "0.5142611", "text": "def currency(self):\n return self._currency", "title": "" }, { "docid": "1784c18e6bdda1ae4e1688d3a36fb826", "score": "0.5138435", "text": "def find_cntry(line):\n match = COUNTRY_REGEX.match(line)\n if match:\n country = match.group(1)\n if \"De facto\" in country:\n return None\n if \"|\" in country:\n return country.partition(\"|\")[2]\n return country\n return None", "title": "" }, { "docid": "8ea7b2be2f5f1a9a7bc0f938900b1195", "score": "0.5131995", "text": "def _money(text,rex=re.compile('(-?)([0-9]*)\\.?([0-9]*)')):\n return rex.match(text).groups()", "title": "" }, { "docid": "662deb19db1c60083daa880fdb9c5b2b", "score": "0.5127136", "text": "def erc20_value_search(coins):\n url2 = \"http://api.binance.com/api/v3/ticker/price\"\n response2 = requests.get(url2)\n rate_list = response2.json()\n pair_conversions = defaultdict(lambda : 0)\n\n for coin in coins:\n for rate in rate_list:\n exchange_coin = rate['symbol']\n exchange_price = rate['price']\n if coin + 'ETH' == exchange_coin:\n pair_conversions[exchange_coin] = exchange_price\n else:\n pass\n \n return pair_conversions", "title": "" }, { "docid": "c65c5d15400f8e8e1d9aa8290ce2297c", "score": "0.51143295", "text": "def _weatherSymbol(self, code):\n\n table = {'partlycloudy': '~☁',\n 'cloudy': '☁',\n 'tstorms': '⚡',\n 'sunny': '☀',\n 'snow': '❄',\n 'sleet': '☄',\n 'rain': '☔',\n 'mostlysunny': '~☀',\n 'mostlycloudy': '~☁',\n 'hazy': '♒',\n 'fog': '♒',\n 'flurries': '❄',\n 'clear': '☼',\n 'chanceflurries': '?❄',\n 'chancerain': '?☔',\n 'chancesleet': '?❄',\n 'chancesnow': '?❄',\n 'chancetstorms': '?☔'}\n # return symbol from table.\n try:\n return table[code]\n except KeyError:\n return \"unknown\"", "title": "" }, { "docid": "8d9721f057827b87b15cbfd1a3ac2500", "score": "0.5104792", "text": "def validate_currency(currency):\n if not currency:\n return False\n\n if not re.search('^\\d+$', currency):\n return False\n\n if not re.search('^0', currency):\n return False\n\n try:\n decimal.Decimal(currency)\n except:\n return False\n\n return True", "title": "" }, { "docid": "daf402fb2b1f3a9252d1337a6b960b29", "score": "0.5099933", "text": "def find_by_callingcode(code_num):\n\tcountries_info = get_response(\"/callingcode/\" + code_num)\n\treturn get_country_objects(countries_info)", "title": "" }, { "docid": "597486b56082becc2c6da756f55c48d2", "score": "0.5079034", "text": "def search_credit(cursor, contract_num):\n try:\n cursor.execute(\n \"\"\"\n SELECT\n tc.id,\n tc.client_id,\n CONCAT(REPLACE(tper.name3,\"'\", '\"'), ' ', LEFT(tper.name , 1), '. ', LEFT(tper.name2 , 1), '.'),\n tc.vnoska,\n ts.status,\n tc.egn,\n dwh.GetDolgBody(tc.id, date(now()))+\n dwh.GetDolgFine(tc.id, date(now()))+\n dwh.GetDolgPrc(tc.id, date(now()))\n FROM\n mbank.tcredits tc\n join mbank.tstatuses ts on ts.credit_id = tc.id\n and ts.is_last = 1\n left join mbank.tpersons tper on tper.id = tc.client_id\n WHERE tc.contract_num = {0}\n and (ts.status = 5 or ts.status = 55 or ts.status = 555);\n \"\"\".format(contract_num)\n )\n except Exception:\n return None\n\n credit = cursor.fetchall()\n\n if credit:\n return credit\n\n return None", "title": "" }, { "docid": "db39390e78aa9d3a1fed693bbdb72298", "score": "0.5078839", "text": "def _find_payment(data):\n transaction_id = data[\"google-order-number\"]\n for Cart in CART_TYPES:\n try:\n return Cart.payment_class.objects.select_related('cart').filter(transaction_id=transaction_id)[0]\n except:\n pass", "title": "" }, { "docid": "051c346c43b8b85215ee9c3b7f29e063", "score": "0.5062812", "text": "def set_currency_symbol(self):\n is_set = None\n try:\n self.logger.info('Start: set currency symbol')\n self._business_admin_page.set_currency_symbol()\n is_set = True\n except WebDriverException as exp:\n is_set = False\n self.logger.error(exp.msg)\n raise\n finally:\n self.logger.info('End: set currency symbol')\n return is_set", "title": "" }, { "docid": "1a92f63c20b03123295110132d514454", "score": "0.50594723", "text": "def valid_currencies():\n return [currency for currency in Money.VALID_CURRENCIES]", "title": "" }, { "docid": "87e19d5a139b383840b7e07816fd0e58", "score": "0.505653", "text": "def get_currency_unit(value):\n currency = \"\"\n unit = \"1\"\n if \"TEUR\" in value:\n currency = \"EUR\"\n unit = \"1000\"\n elif \"TDM\" in value:\n currency = \"DM\"\n unit = \"1000\"\n elif \"Mio\" in value:\n unit = \"1000000\"\n currency = value[value.find(\"Mio\")+3:]\n elif \"Mrd\" in value:\n unit = \"1000000000\"\n currency = value[value.find(\"Mrd\")+3:]\n else:\n numbers = []\n lcidx = 0\n for cidx, char in enumerate(value):\n if char.isdigit():\n numbers.append(char)\n lcidx = cidx\n unit = \"\".join(numbers)\n currency = value[lcidx + 1:].strip()\n currency = clean_currency(currency)\n return currency, unit", "title": "" }, { "docid": "1f2a54b10d945c9151241cf9d658f0ae", "score": "0.5055238", "text": "def remove_currency(raw_price):\n return raw_price.split(\" \")[-1]", "title": "" }, { "docid": "2721c4525ac8eb941edc2647fc12c80e", "score": "0.5054427", "text": "def quote_currency(self):\n\t\treturn self.__quote_currency", "title": "" }, { "docid": "dec2ca60ee3871e62408f5f7b7018a5f", "score": "0.50301546", "text": "def findCode(self, COMP_NAME, TARGET): \n\t\tMIN_ERR=1.0e20\n\t\tBEST_CODE=-1\n\t\t\n\t\tif (COMP_NAME=='C1'):\n\t\t\tm=self.calcC1\n\t\telif (COMP_NAME=='C2'):\n\t\t\tm=self.calcC2\n\t\telif (COMP_NAME=='R2'):\n\t\t\tm=self.calcR2\n\t\telif (COMP_NAME=='C3'):\n\t\t\tm=self.calcC3\n\t\telif (COMP_NAME=='R3'):\n\t\t\tm=self.calcR3\n\t\telse:\n\t\t\tprint \"Error: Wrong Name of the LMS8001 IC Loop Filter Component. Exiting. Returning -1.\"\n\t\t\treturn BEST_CODE\n\t\t\n\t\tfor CODE in range(0,16):\n\t\t\tABS_ERR=abs(m(CODE)-TARGET)\n\t\t\tif (ABS_ERR<MIN_ERR):\n\t\t\t\tMIN_ERR=ABS_ERR\n\t\t\t\tBEST_CODE=CODE\n\n\t\treturn BEST_CODE", "title": "" }, { "docid": "134f8f5a73940fa989981cfb694e81eb", "score": "0.5020025", "text": "def fetch_key_bcp_lang_code(lang):\n for r in query_omw(\"\"\"SELECT id\n FROM lang\n WHERE (bcp47 = ?)\"\"\", [lang]):\n return r['id']", "title": "" }, { "docid": "47d9f80a4bc708ada05613465e18ade1", "score": "0.5015696", "text": "def get_currency():\n if default_currency:\n \t#\n # url calling format = 'http://apilayer.net/api/live?access_key={}&currencies=EUR,CNY,GBP,MXN&format=1'\n #\n qs = Currency.objects.all().values_list('code', flat=True)\n url = 'http://apilayer.net/api/live?access_key={}&currencies={}&format=1'.format(settings.API_LAYER_ACCESS_KEY, ','.join(qs))\n print (url)\n\n r = requests.get(url)\n\n result = json.loads(r.text)\n\n for q in result['quotes']:\n code = q[3:]\n currency = Currency.objects.filter(code=code).first()\n currency.rate = result['quotes'][q]\n currency.save()", "title": "" }, { "docid": "7a0696364920f4f3646e20f6f3d20f49", "score": "0.5005908", "text": "def get_code(self):\r\n with open('data/county-fips-to-name.csv', 'r') as csvfile:\r\n csvdata = csv.reader(csvfile)\r\n for row in csvdata:\r\n if row[1] in self.county:\r\n code = row[0]\r\n break\r\n\r\n # county fips code has to be length of 5\r\n if len(code) == 4:\r\n code = '0' + code\r\n\r\n return code", "title": "" }, { "docid": "30affd6f50b6dbe3237da1843ce32487", "score": "0.50012606", "text": "def _currency_symbols():\n current = locale.getlocale()\n locales = list(locale.locale_alias.values())\n symbols = set()\n\n for loc in locales:\n try:\n locale.setlocale(locale.LC_MONETARY, locale.normalize(loc))\n currency = \"{int_curr_symbol}\".format(**locale.localeconv())\n if currency != \"\":\n symbols.add(currency.strip())\n except (locale.Error, UnicodeDecodeError):\n continue\n\n locale.setlocale(locale.LC_MONETARY, current)\n return list(symbols)", "title": "" }, { "docid": "297d6d4da94fd2cccc12617bb854a105", "score": "0.49998227", "text": "def get_currency(self, userid):\n self.cursor.execute(\"SELECT currency, userid FROM users WHERE userid = ?\", (userid,))\n currencyfetch = self.cursor.fetchone()\n if currencyfetch is not None:\n currency = currencyfetch[0]\n else:\n currency = 0\n return currency", "title": "" }, { "docid": "df8b6f2edc6da5167e1d56ca38de42c4", "score": "0.49984938", "text": "def country_code(cls, valid):\n return 1 if valid else \"FAKE\"", "title": "" }, { "docid": "413f3ee5a44707f0be5a18ce93275577", "score": "0.4996388", "text": "def test_remove_dollar_symbols(self):\n item = '3773.49'\n result = self.open_csv.remove_dollar_symbols(item)\n\n print 'item: %s' % item\n print 'result: %s' % result\n\n self.assertNotIn('$', result)", "title": "" }, { "docid": "71d6250f09304775b0cb5630eb6d2420", "score": "0.49881542", "text": "def code(self):\n return self._bank_code", "title": "" }, { "docid": "2e549861eb4bcffc07c364b91c27721f", "score": "0.49851966", "text": "def parse_currency(s):\n if not isinstance(s, text_type):\n return float('nan')\n s = re.sub(u'[\\$\\u20AC\\u00A3\\uFFE1\\u00A5\\uFFE5]|(EUR)', '', s)\n s = s.replace(',', '')\n try:\n return float(s)\n except:\n return float('nan')", "title": "" }, { "docid": "d431064777b66734e060344c7e2b62fa", "score": "0.4984376", "text": "def currency(self):\n return self.properties.get('currency', CurrencyColumn())", "title": "" }, { "docid": "86ecbfef3b55058fbc31d11a240cbfa7", "score": "0.4983195", "text": "def _extractCurrencies(self, numbers_node):\n currs_nodes = numbers_node.getElementsByTagName('currencies')\n if not currs_nodes:\n return\n\n currencies = InheritingDictionary()\n for curr_node in currs_nodes[0].getElementsByTagName('currency'):\n type = curr_node.getAttribute('type')\n currency = LocaleCurrency(type)\n\n nodes = curr_node.getElementsByTagName('symbol')\n if nodes:\n currency.symbol = self._getText(nodes[0].childNodes)\n currency.symbolChoice = \\\n nodes[0].getAttribute('choice') == u'true' \n\n nodes = curr_node.getElementsByTagName('displayName')\n if nodes:\n currency.displayName = self._getText(nodes[0].childNodes)\n\n currencies[type] = currency\n\n return currencies", "title": "" }, { "docid": "0602c7cc52171f745e91273fc6a4addb", "score": "0.4977636", "text": "def _get_carrier(self, number):\n number = self.number\n if not self.prefix or not self.country:\n return None\n try:\n mod = importlib.import_module(\n 'plnumbers.{}'.format(self.country)\n )\n except ImportError:\n return None\n\n self._NUMBERTYPES = mod.LANE_TYPE\n match = None\n for pattern, value in mod.LOOKUPS:\n if re.search(pattern, number):\n match = mod.CARRIERS[value]\n break\n\n if match:\n self.numbertype = self._NUMBERTYPES.get(match[1])\n return match[0]\n else:\n self.numbertype = self._NUMBERTYPES.get('u')\n return None", "title": "" }, { "docid": "2efd2934cd989281be4558bc4fcea4ff", "score": "0.49642444", "text": "def search_skycredit(cursor, contract_num):\n try:\n cursor.execute(\n \"\"\"\n SELECT\n tc.id,\n tc.client_id,\n CONCAT(REPLACE(tper.name3,\"'\", '\"'), ' ', LEFT(tper.name , 1), '. ', LEFT(tper.name2 , 1), '.'),\n tc.vnoska,\n ts.status,\n tc.egn,\n dwh.GetDolgBody(tc.id, date(now()))+\n dwh.GetDolgFine(tc.id, date(now()))+\n dwh.GetDolgPrc(tc.id, date(now()))\n FROM\n mbank.tcredits tc\n join mbank.tstatuses ts on ts.credit_id = tc.id\n and ts.is_last = 1\n left join mbank.tpersons tper on tper.id = tc.client_id\n WHERE tc.contract_num = {0}\n and (ts.status = 55 or ts.status = 555);\n \"\"\".format(contract_num)\n )\n except Exception:\n return None\n\n credit = cursor.fetchall()\n\n if credit:\n return credit\n\n return None", "title": "" }, { "docid": "ebd2f845dc6b8c1b1ed9db2023624901", "score": "0.49468762", "text": "def currency_code(self, currency_code):\n\n self._currency_code = currency_code", "title": "" }, { "docid": "ebd2f845dc6b8c1b1ed9db2023624901", "score": "0.49468762", "text": "def currency_code(self, currency_code):\n\n self._currency_code = currency_code", "title": "" }, { "docid": "34dd4b341635f85a6d38546c29469839", "score": "0.49370578", "text": "def account_available(currency: str) -> int:\n if currency in sc.AVAILABLE_CURRENCY_ACCOUNTS:\n return -1\n else:\n return 1", "title": "" }, { "docid": "972344c48da4e0dd81189ee3f43c3569", "score": "0.4933475", "text": "def get_currencies(self):\n\n endpoint = self.__endpoints.get(\"currencies\")\n payload = {\"accessToken\": self.__api_key}\n\n response = self.__request(endpoint, payload)\n if response[\"success\"]:\n return response\n raise KolletErrors(response[\"message\"])", "title": "" }, { "docid": "072b9471f95d3c886425c65af7080cc8", "score": "0.49302197", "text": "def verse_num_extractor(data):\n if re.search('॥ [०१२३४५६७८९]+ ॥\\n', data):\n num = re.sub(r'.*॥ ([०१२३४५६७८९]+) ॥\\n', r'\\g<1>', data)\n num = transliterate(num, 'devanagari', 'slp1')\n num = int(num)\n return num\n else:\n return False", "title": "" }, { "docid": "9a07b1f20a23d728d5cdf4ed91850e83", "score": "0.49299568", "text": "def _parse(self, data: Tuple[bytes, bytes]) -> TypeCurrencyIndex:\n currencies = {}\n counter = 0\n\n LOG.debug('Parsing data ...')\n\n index = self._index_currency\n format_num = self._format_num_code\n\n for sub_data in data:\n root = ElementTree.fromstring(sub_data)\n\n for child in root:\n props = {}\n for prop in child:\n props[prop.tag] = prop.text\n\n num = props['ISO_Num_Code'] or None\n if num:\n # ISO numeric code like '036' is loaded like '36', so it needs to be formatted into ISO 4217,\n # also data from the Bank of Russia contains replaced currencies that do not have ISO attributes.\n # additional If-statement was added to exclude format None\n num = format_num(num)\n\n currency = Currency(\n id=child.attrib['ID'],\n name_eng=props['EngName'],\n name_ru=props['Name'],\n code=props['ISO_Char_Code'],\n num=num,\n par=Decimal(props['Nominal']),\n )\n\n counter += 1\n currencies.update(index(currency))\n\n LOG.debug(f\"Parsed: {counter} currencies\")\n return currencies", "title": "" }, { "docid": "ddef1e8a27fb0f9e23a3018308992692", "score": "0.49265334", "text": "def test_get_currency():\n response = client.get(\"/util/currency?from_curr=USD&to_curr=INR&amt=1\")\n assert response.status_code == 200\n assert response.json() >= 70", "title": "" }, { "docid": "e1f313f626fa43dead65fe37818e2eec", "score": "0.49244246", "text": "def convert_to(self, currency):\n return None # TODO", "title": "" }, { "docid": "404441da833e468aa19ffea6e5d8e644", "score": "0.49193448", "text": "def return_currency_instance_from_exchange_pk(exchange_pk, exchange_instance):\n exchange_pk_instance = None\n try:\n exchange_pk_instance = CurrencyExchangePK.objects.get(\n exchange=exchange_instance, key=exchange_pk)\n except CurrencyExchangePK.DoesNotExist:\n pass\n\n if exchange_pk_instance:\n return exchange_pk_instance.currency\n\n return None", "title": "" }, { "docid": "85cdbef879c617ace2731e20e41ece74", "score": "0.49176356", "text": "def get_target_currency_type(self):\n return # osid.type.Type", "title": "" }, { "docid": "75b54ab89489867ae2f8610a70e2b401", "score": "0.49009168", "text": "def get_base_symbols(self):\n with self._lock:\n self._update_mkt_info()\n return list(set([mkt[\"BaseCurrencyCode\"] for mkt in self._mkt_info[\"result\"]]))", "title": "" }, { "docid": "a973a44bd00bf6402a3655763da91451", "score": "0.48923814", "text": "def country(self, key):\n\n return key[:2]", "title": "" }, { "docid": "cbf8ad54a8de0f71fe4a073be7cc90c9", "score": "0.48863983", "text": "def symbol_lookup(self):\n try: \n url = SYMBOL_LOOKUP_URL + self.stock.symbol\n result = urllib2.urlopen(url).url\n cboe_id = result.split(\"?\")[1]\n self.stock.cboe_id = cboe_id\n except urllib2.URLError, e:\n cboe_id = \"\"\n\n return cboe_id", "title": "" }, { "docid": "01e9c1c938432b79a1524311c8f9a74a", "score": "0.48590714", "text": "def currency_db():\n # type: () -> List[Text]\n return [\"vnd\", \"usd\", \"euro\"]", "title": "" }, { "docid": "730dfc990e898be6b3af1fcf7c77dadf", "score": "0.48570985", "text": "def CodeLookup(self, item: QtWidgets.QTreeWidgetItem, codelist: QtWidgets.QTreeWidget, gid: str):\n # Initialize vars\n wlist = [w.widget() for w in self.mdi.subWindowList() if isinstance(w.widget(), Database)\n or isinstance(w.widget(), CodeList) and w.widget().TreeWidget is not codelist]\n lsplt = re.split('[ \\n]', item.text(1))\n totalen = len(lsplt)\n\n # Begin search!\n for widget in wlist:\n\n # Mark code matches from different game ids with an additional asterisk\n regmatch = int(not(bool(widget.gameID == gid))) + 1\n\n # Process the widget's tree\n for child in filter(lambda x: x.text(1) and 'Unknown Code' not in x.text(0),\n widget.TreeWidget.findItems('', Qt.MatchContains | Qt.MatchRecursive)):\n matches = 0\n\n # For each code, check each line of the code we're looking a name for\n for line in lsplt:\n if line in child.text(1):\n matches += 1\n\n # If more than 2/3rds of the code match, we found the code we were looking for\n if matches / totalen >= 2 / 3:\n item.setText(0, child.text(0) + '*' * regmatch)\n item.setText(2, child.text(2)) # Copy comment\n item.setText(4, child.text(4)) # Copy author\n return", "title": "" }, { "docid": "7fc56a8261e1fbd46de10a6bd80e45f1", "score": "0.485295", "text": "def get_currency_name(self):\n try:\n self.logger.info('Start: get currency name')\n return self._business_admin_page.get_currency_name()\n except WebDriverException as exp:\n self.logger.error(exp.msg)\n raise\n finally:\n self.logger.info('End: get currency name')", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.48456493", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.48456493", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "1765af00ce468af9c89fdfd0cae26c48", "score": "0.4832439", "text": "def get_current_exchange_rate(src_currency, tgt_currency):\n\timport urllib2\n\n\tresponse = urllib2.urlopen('http://www.google.com/ig/calculator?hl=en&q=1%s=?%s' % (src_currency, tgt_currency))\n\tdata = response.read()\n\t\n\t# Returns hash: {lhs: \"1 Euro\",rhs: \"1.3118 U.S. dollars\",error: \"\",icc: true}\n\tprint(data)\n\n\tindex = data.find('rhs: \"') + 6\n\tworking_str = data[index:]\n\tprint(working_str)\n\texchange_rate, discard_remainder = working_str.split(' ', 1)\n\tprint(exchange_rate)\n\treturn float(exchange_rate)", "title": "" }, { "docid": "a71885ff21b27e994c4d569f0eca35e2", "score": "0.4832284", "text": "def test_euro(self):\n self.assertTrue(ord(unicodedata.lookup('EURO SIGN')) in self.font)", "title": "" }, { "docid": "5c2850e3664a7dba21be74b2854f380e", "score": "0.4831148", "text": "def process_coin(amt, moneyIn):\n if moneyIn == 'quarter' or moneyIn == 'quarters':\n return amt * .25\n elif moneyIn == 'dime' or moneyIn == 'dimes':\n return amt * .10\n elif moneyIn == 'nickel' or moneyIn == 'nickels':\n return amt * .05\n elif moneyIn == 'penny' or moneyIn == 'pennies':\n return amt * .01", "title": "" }, { "docid": "ab01337328b42be31dcfe6fd2b9daf91", "score": "0.48303366", "text": "def deck_flag_convert(country_code):\n cc_dict = {\n \"en\": \"gb\",\n \"ko\": \"kr\",\n \"ja\": \"jp\",\n \"zh\": \"cn\",\n \"hi\": \"in\",\n \"sr\": \"rs\",\n \"la\": \"va\",\n \"el\": \"gr\",\n }\n\n # Only return a value different from country_code if an alternative \n # is specified in cc_dict\n if country_code in cc_dict:\n return cc_dict[country_code]\n\n return country_code", "title": "" } ]
1be57fd6b647dff48b37a3519962bba8
Assign groups to the usernames provided in the data
[ { "docid": "8ab869b356ec5f6a72bf0fdc936d85c1", "score": "0.7481766", "text": "def assign_user_groups(self, group_name: str, rows: list[tuple[str, str]]) -> None:\n group = Group.objects.get(name=group_name)\n\n for username, _ in rows:\n user = User.objects.get(username=username)\n user.groups.add(group)", "title": "" } ]
[ { "docid": "4195b1546c4302f61e324df7124c0277", "score": "0.65661025", "text": "def set_user_groups(self):\n # Notice that we only add or remove groups that we had\n # offered as options on the form, and don't touch any other\n # groups that the user might belong to.\n for value, label in self.fields['help_desk_group'].choices:\n if value:\n group = Group.objects.get(name=value)\n if value in self.cleaned_data['help_desk_group']:\n self.instance.groups.add(group)\n else:\n self.instance.groups.remove(group)", "title": "" }, { "docid": "e3171e6ddd397ed7b992ec276b6af7fe", "score": "0.6380261", "text": "def group_user(self, registrant):\n if self.groups is not None:\n groups_qs = Group.objects.filter(name__in=self.groups.split(','))\n group_list = [group for group in groups_qs]\n registrant.groups.add(*group_list)", "title": "" }, { "docid": "1f97adb20039af67c5a1d77f6beac656", "score": "0.6095477", "text": "def NameGroups(data_arr,id_key):\n\t\tnew_data_arr = []\n\t\tfor data in data_arr:\n\t\t\ttry:\n\t\t\t\tdata_arr[id_key] = clc._GROUP_MAPPING[data[id_key]]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tnew_data_arr.append(data)\n\t\tif clc.args: clc.v1.output.Status(\"ERROR\",2,\"Group name conversion not yet implemented\")\n\t\treturn(new_data_arr)", "title": "" }, { "docid": "fd4bdfb1386cfecc2aac63bb4bac72ad", "score": "0.60787326", "text": "def assign_user_to_group(global_token, regional_token, contractid, region,\n username, groupname):\n try:\n # if user exists return its id otherwise return 'None'\n userid = get_itemid(get_keystoneobject_list(\n regional_token, region, contractid, 'users'), username, 'users')\n # if group exists return its id otherwise return 'None'\n groupid = get_itemid(get_keystoneobject_list(\n regional_token, region, contractid, 'groups'), groupname, 'groups')\n region = 'gls'\n identityURL = 'https://identity.' + region + \\\n '.cloud.global.fujitsu.com/v3/groups/' + groupid + '/users/' + userid\n # make the put rest request\n response = requests.put(identityURL,\n headers={'X-Auth-Token': global_token,\n 'Content-Type': 'application/json'})\n return response\n except:\n return 'Failed to assign user to group'", "title": "" }, { "docid": "77fab95e1d3a628d6830039ce6162d45", "score": "0.6037697", "text": "def sync_groups_and_permissions(self, data):\n def get_perm(p):\n \"\"\"\n ``p`` format: '<ct_app_label>.<ct_model>.<p_codename>'\n \"\"\"\n try:\n ct_app_label, ct_model, p_codename = p.split('.')\n except ValueError:\n raise ValueError(u'Value must be in format \"<ct_app_label>.<ct_model>.<p_codename>\". Got \"%s\"' % p)\n try:\n return Permission.objects.get(content_type__app_label = ct_app_label,\n content_type__model = ct_model,\n codename = p_codename)\n except Permission.DoesNotExist:\n raise Permission.DoesNotExist(u'Permission \"%s\" does not exist.' % p)\n\n for group_name, perms in data.items():\n group, created = Group.objects.get_or_create(name=group_name)\n for p in perms:\n try:\n perm = get_perm(p)\n group.permissions.add(perm)\n except Permission.DoesNotExist as e:\n print(e)", "title": "" }, { "docid": "d0dd2b53855ea048220b4c1147cd09a9", "score": "0.60220426", "text": "def GroupUsers(self, Group, Count):", "title": "" }, { "docid": "98ed61d7985a739980ea979c1f2b67ea", "score": "0.6008645", "text": "def servergroupassign(value):\n\n SERVERGROUP[\"members\"].append(value)", "title": "" }, { "docid": "2568020385b7669bb418a6088f65188a", "score": "0.5981344", "text": "async def set_(self, ctx, group_name: str.title):\n valid_groups = ('Rosso', 'Verde', 'Blu')\n if group_name not in valid_groups:\n await ctx.send('Gruppo invalido. Scegline uno tra: ' + ', '.join(valid_groups))\n else:\n await self.db.find_one_and_update(\n {'user_id': str(ctx.author.id)},\n {'$set': {'group': group_name}},\n upsert=True\n )\n await ctx.send(f'Benvenuto in {group_name}!')", "title": "" }, { "docid": "f5aa61b241b424c021a24b6f307a7163", "score": "0.596635", "text": "def name_mapping_unix_group_add_user(self, group_name, user_name):\n return self.request( \"name-mapping-unix-group-add-user\", {\n 'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],\n 'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],\n }, {\n } )", "title": "" }, { "docid": "2f3f2c71cbff64c15f437d93d8a81aac", "score": "0.59597236", "text": "def create_user_groups(sender, **kwargs):\n verbosity = kwargs.get(\"verbosity\")\n if verbosity > 0:\n print \"Initializing data post_syncdb\"\n for group in dashboard_group_permissions:\n role, created = Group.objects.get_or_create(name=group)\n if verbosity > 1 and created:\n print \"Creating group {0}\".format(group)\n for perm in dashboard_group_permissions[group]:\n role.permissions.add(Permission.objects.get(codename=perm))\n if verbosity > 1:\n print \"Permitting {0} to {1}\".format(group, perm)\n role.save()", "title": "" }, { "docid": "6f2fbde5b7830238d109b73b9344dc40", "score": "0.59519976", "text": "def create(self, validated_data):\n user = User(username=validated_data['username'])\n user.set_password(validated_data['password'])\n user.save()\n for item in validated_data['groups']:\n user.groups.add(Group.objects.get(name=item))\n return user", "title": "" }, { "docid": "fae5df138c2ca4197f6b5297cb1f0210", "score": "0.59002745", "text": "def test_get_user_groups(self):\n pass", "title": "" }, { "docid": "3b92c7ecd945d875f2d56d9075f7d434", "score": "0.5897225", "text": "def get_users_in_group(df_groups, group_id):\n df = df_groups[(df_groups[0] == group_id)][[1,2]]\n df = [{'name' : list(df[1])[i].lower(), 'id' : list(df[2])[i]} for i in range(len(df))]\n for d in range(len(df)):\n if 't-' in df[d]['name']:\n df[d]['name'] = df[d]['name'][4:(len(df[d]['name'])-1)]\n return df", "title": "" }, { "docid": "3b92c7ecd945d875f2d56d9075f7d434", "score": "0.5897225", "text": "def get_users_in_group(df_groups, group_id):\n df = df_groups[(df_groups[0] == group_id)][[1,2]]\n df = [{'name' : list(df[1])[i].lower(), 'id' : list(df[2])[i]} for i in range(len(df))]\n for d in range(len(df)):\n if 't-' in df[d]['name']:\n df[d]['name'] = df[d]['name'][4:(len(df[d]['name'])-1)]\n return df", "title": "" }, { "docid": "4e5c4ed80c78da600662359d8fed5ec2", "score": "0.5861429", "text": "def update_groups(self, props, **kws):\n new_groups = props.get('group', None)\n if new_groups is not None:\n if isinstance(new_groups, str):\n new_groups = new_groups,\n [self._group.add(g) for g in new_groups]\n if self._family is not None:\n self._group.add(self._family)", "title": "" }, { "docid": "39e4cf10ad8f2a4e50f1334dc4ab7ea4", "score": "0.58612275", "text": "def set_members(self, groups, **kwargs):\n status, data = self.run_gerrit_command('set-members', groups, **kwargs)\n\n return status, data", "title": "" }, { "docid": "076eea61693b6884d106e63f42f0ed6e", "score": "0.5826847", "text": "def setgroup(self, group_name, members):\n is_valid_redis_key(group_name)\n if 'all' in group_name:\n raise ValueError('Choose another group name, reserverd word: \"all\"')\n\n group_name = group_name.lower()\n try:\n self.getgroup(group_name)\n except ValueError:\n # If does not exists, move to the next step\n pass\n else:\n raise GroupError('Group already exists')\n\n if type(members) is not list:\n raise TypeError('Expect a list. Found: %s' % type(members))\n\n if 'any' in members:\n raise ValueError('Wrong member identified, reserverd word: \"any\"')\n \n gmembers, ipmembers = [], []\n for member in members:\n data = isvalidtype(member)\n if type(data) is IPNetwork:\n ipmembers.append(str(data))\n else:\n gmembers.append(member)\n\n key = self.namespace(group_name)\n ipkey = self.ipnamespace(group_name)\n with self.redis.pipeline() as pipe:\n if gmembers:\n pipe.sadd(key, *gmembers)\n if ipmembers:\n pipe.sadd(ipkey, *ipmembers)\n pipe.sadd(':'.join(('list', 'groups')), key)\n pipe.execute()", "title": "" }, { "docid": "23aae09bcd96b6fa9b2adbde36a7f4bd", "score": "0.5807303", "text": "def fetch_data(self, connection: oracledb.Connection, group_name: str) -> None:\n\n self.stdout.write(f\"Adding users to {group_name} group\")\n query = GROUPS_TO_ROLES[group_name]\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n\n while True:\n rows = cursor.fetchmany(1000)\n\n if not rows:\n break\n\n if group_name in (\"Importer User\", \"Exporter User\"):\n self.assign_org_permissions(group_name, rows)\n elif group_name == \"Constabulary Contact\":\n self.assign_constabulary_contacts(rows)\n else:\n self.assign_user_groups(group_name, rows)", "title": "" }, { "docid": "bc525004378bb0ff19488838511638f1", "score": "0.57904255", "text": "def create():\n\n if request.method == \"POST\":\n # Get the data on the user creating the group\n current_user = list(users.find({\"_id\": session[\"user_id\"]}))[0]\n users_list = [current_user['username']]\n for key in request.form:\n if \"username\" in key:\n if not check_valid_user(request.form[key]):\n return apology(\"Invalid Username for Member\")\n users_list.append(request.form[key])\n # Create the group and add it to the database\n new_group = [{\n \"name\": request.form.get(\"name\"),\n \"users\": users_list,\n \"expenses\": []\n }]\n id = groups.insert_many(new_group)\n # Redirect the user back to homepage\n return redirect(\"/\")\n else:\n return render_template(\"create.html\")", "title": "" }, { "docid": "52cc63773fe3c97c919c2e2fca0103a8", "score": "0.5774964", "text": "def _apply_groups(self, gdb):\n for chk_grp in self._groups:\n if not chk_grp in gdb:\n gdb[chk_grp] = Group(chk_grp, set([self.handle,]))\n logger.info ('%s: Created new group, %r', self.handle, chk_grp)\n # ensure membership in existing groups\n group = gdb[chk_grp]\n if not self.handle in group.members:\n group.add (self.handle)\n group.save ()\n for gname, group in gdb.iteritems():\n if gname not in self._groups and self.handle in group.members:\n group.remove (self.handle)\n group.save ()", "title": "" }, { "docid": "b4486681e9579d89a9a5aaa47f8fa2e9", "score": "0.57730323", "text": "def groups_add(self, name):\n group, _ = Group.objects.get_or_create(name=name)\n group.user_set.add(self)\n\n return self", "title": "" }, { "docid": "82967eb91ecc9bb3d5dbca34408bd7da", "score": "0.57272804", "text": "def add_groups(self, *group_names):\n self._used_groups |= set(group_names)\n\n self.invalidate_members()", "title": "" }, { "docid": "1e04fa212a845efea66b1ad3be2f0f7a", "score": "0.571146", "text": "def add2group(user, group):\n # usermod -a G \n proc_usermod = subprocess.run(\n [\"usermod\", \"-a\", \"-G\", group, user]\n )\n if proc_usermod.returncode:\n raise ValueError(\"usermod returned non-zero!\")", "title": "" }, { "docid": "0f4715cf613244cd0c5c77ad7ec44d92", "score": "0.5691425", "text": "def lti_to_askup_user(self):\n askup_user, _ = User.objects.get_or_create(username=self.user_id)\n askup_user.groups = [Group.objects.get(name='Student')]\n askup_user.save()\n self.askup_user = askup_user\n self.save()", "title": "" }, { "docid": "6d71b56ebe217cc2df406d87acaee6e2", "score": "0.5683542", "text": "async def acl_groups(self, user_identity):\n pass # pragma: no cover", "title": "" }, { "docid": "cbceb9f35698e6674e1d19191cd8376d", "score": "0.5655622", "text": "def groups(self, groups):\n \n self._groups = groups", "title": "" }, { "docid": "e6da21aec076fa354456d417addf3d49", "score": "0.5629212", "text": "def test_changing_user_profile_of_a_user_should_update_groups_of_user(self):\n self.user.user_profile_id = self.user_profile2\n for group in self.user_profile2.groups_id:\n self.assertIn(\n group, self.user.groups_id,\n 'The user has not the new dependence!')", "title": "" }, { "docid": "fc790896ae703e954748014441723676", "score": "0.5621036", "text": "def get_members(group_list):\n username=[]\n for groupname in group_list:\n try: \n group=grp.getgrnam(groupname)\n username.extend(group.gr_mem)\n except:\n pass\n \n return username", "title": "" }, { "docid": "b547b643aab57837bd015745fdf6b49c", "score": "0.5616088", "text": "def group(self, irc, msg, args, name):\n group = name\n \n try:\n l = ldap.open(\"ldap.geeksoc.org\")\n l.protocol_version = ldap.VERSION3\n except ldap.LDAPError, e:\n irc.reply('Error getting info for user: \"%s\"' % name)\n return\n \n baseDN = \"cn=%s,ou=Groups, dc=geeksoc, dc=org\" %(group)\n searchFilter = \"(memberUid=*)\"\n \n try:\n results = l.search_s(baseDN, ldap.SCOPE_SUBTREE, searchFilter)\n string = ''\n for dn, entry in results:\n for u in entry['memberUid']:\n string += u + ' '\n irc.reply(\"Group: %s, Members: %s\" % (group, string))\n\n except ldap.LDAPError, e:\n irc.reply(e)", "title": "" }, { "docid": "505a75d78ccf546d612ec984be735a2c", "score": "0.5597802", "text": "def update_account_groups(self, user, account, groups, replace=False):\n return", "title": "" }, { "docid": "9fe72c2ca227d20ef4dff35cae235d8f", "score": "0.5594665", "text": "def add_group(self, username, group):\n doc = self.__get_user(username)\n groups = doc['user']['groups']\n if group not in groups:\n groups.append(group)\n\n self._db.save(doc)", "title": "" }, { "docid": "c811253b5f88dd55ec85feb14e481f3e", "score": "0.55838674", "text": "def _manage_groups(self):\n if len(self._argsdict) == 0:\n self._logger.error('usage: proctor.py group {create, append} [-h]')\n self._logger.error(\n \"proctor.py group: error: command must include subcommand 'create' or 'append'. Try proctor.py -h.\")\n sys.exit(-1)\n\n subcommand = sys.argv[2]\n if subcommand == 'create':\n self._create_server_group(self._args.groupname)\n return\n if subcommand == 'append':\n self._add_users_to_server_group(self._args.groupname, self._args.emails)\n else:\n raise ValueError(f'Unknown group subcommand: {subcommand}')", "title": "" }, { "docid": "e4469e556bfe69a36c0e795bef45162e", "score": "0.55825984", "text": "def assign_group_numbers_to_data(data_list:list) -> list:\n list_size = len(data_list)\n for i in range(list_size):\n data_list[i] = data_list[i].assign(separate_num=i)\n return data_list", "title": "" }, { "docid": "c2366be983de91640910a0ed92081683", "score": "0.55739546", "text": "def set_group_name(self, name):\n self.groupname = name", "title": "" }, { "docid": "bb0e37022327253915179938cada26c8", "score": "0.5558407", "text": "def shutil_setuid(user = None, group = None, xgroups = None):\n if group:\n gid = grp.getgrnam(group).gr_gid\n os.setgid(gid)\n logg.debug(\"setgid %s for %s\", gid, strQ(group))\n groups = [gid]\n try:\n os.setgroups(groups)\n logg.debug(\"setgroups %s < (%s)\", groups, group)\n except OSError as e: # pragma: no cover (it will occur in non-root mode anyway)\n logg.debug(\"setgroups %s < (%s) : %s\", groups, group, e)\n if user:\n pw = pwd.getpwnam(user)\n gid = pw.pw_gid\n gname = grp.getgrgid(gid).gr_name\n if not group:\n os.setgid(gid)\n logg.debug(\"setgid %s for user %s\", gid, strQ(user))\n groupnames = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]\n groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]\n if xgroups:\n groups += [g.gr_gid for g in grp.getgrall() if g.gr_name in xgroups and g.gr_gid not in groups]\n if not groups:\n if group:\n gid = grp.getgrnam(group).gr_gid\n groups = [gid]\n try:\n os.setgroups(groups)\n logg.debug(\"setgroups %s > %s \", groups, groupnames)\n except OSError as e: # pragma: no cover (it will occur in non-root mode anyway)\n logg.debug(\"setgroups %s > %s : %s\", groups, groupnames, e)\n uid = pw.pw_uid\n os.setuid(uid)\n logg.debug(\"setuid %s for user %s\", uid, strQ(user))\n home = pw.pw_dir\n shell = pw.pw_shell\n logname = pw.pw_name\n return {\"USER\": user, \"LOGNAME\": logname, \"HOME\": home, \"SHELL\": shell}\n return {}", "title": "" }, { "docid": "a027d668730a828cd37febf3486162aa", "score": "0.5551757", "text": "def addGroupMember(user,group):\n group.members.append(user)\n db.session.commit()", "title": "" }, { "docid": "e84c45b39f957d34875a30c792d93f8a", "score": "0.5541557", "text": "def _add_to_db(self, principalUID, members):\n for member in members:\n yield self.execute(\n \"\"\"\n insert into GROUPS (GROUPNAME, MEMBER)\n values (:1, :2)\n \"\"\", (principalUID.decode(\"utf-8\"), member,)\n )", "title": "" }, { "docid": "2382bc274127528f780d5a746e3d368a", "score": "0.5526822", "text": "def assign_users(G,participants,breakouts):\n assign = {}\n i=0\n for p in participants:\n assign_p = {}\n for l in p[1]['learn']:\n if l in breakouts:\n assign_p[l] = 'breakout'\n else:\n edges = G.edges(p[0], data=True)\n common = []\n \n for e in edges:\n if (l in e[2]['common']) and (l!=''):\n common.append(e[1])\n else: \n pass\n \n if common != []:\n assign_p[l] = random.choice(common) \n \n assign[p[0]] = assign_p\n i+=1\n \n return assign", "title": "" }, { "docid": "52568660abad3114b7dc5ea6a89c6ad0", "score": "0.55113107", "text": "def create_person_group(self):\n url = self.base_url + \"persongroups/\" + self.pg_name\n response = requests.put(url, headers=self.headers, json={\"name\" : self.pg_name})\n if response.status_code == 200 :\n print(\"added \" + self.pg_name + \" person group\")\n else:\n print(response.json())", "title": "" }, { "docid": "2942802e325cec573289c0a39f4e7480", "score": "0.5499307", "text": "def _add_users_to_server_group(self, group_name, emails_file_name):\n email_list = self._get_emails_from_file(emails_file_name)\n self._server.add_users_to_group(group_name, email_list)", "title": "" }, { "docid": "b356c9a7b912dbe4fbf4ca98c6f65cbf", "score": "0.5491542", "text": "def add_group(username, group_name, logger, client):\n client.users.add_to_group(username, group_name)\n logger.info('User `{0}` added successfully to group '\n '`{1}`'.format(username, group_name))", "title": "" }, { "docid": "6b7489873492418b0040dbfa39379695", "score": "0.54899526", "text": "def groups(self, groups):\n\n self._groups = groups", "title": "" }, { "docid": "6b7489873492418b0040dbfa39379695", "score": "0.54899526", "text": "def groups(self, groups):\n\n self._groups = groups", "title": "" }, { "docid": "6b7489873492418b0040dbfa39379695", "score": "0.54899526", "text": "def groups(self, groups):\n\n self._groups = groups", "title": "" }, { "docid": "6b7489873492418b0040dbfa39379695", "score": "0.54899526", "text": "def groups(self, groups):\n\n self._groups = groups", "title": "" }, { "docid": "6b7489873492418b0040dbfa39379695", "score": "0.54899526", "text": "def groups(self, groups):\n\n self._groups = groups", "title": "" }, { "docid": "ec2723071fc401ad36197d6cc2f83f3b", "score": "0.5487621", "text": "def userGroup(self, _):\n logging.info(\"Querying registered groups ...\")\n status, data = self._httpRequest(\"GET\", \"/reqmgr/reqMgr/group\")\n groups = json.loads(data)\n logging.info(data)\n logging.info(\"Querying registered users ...\")\n status, data = self._httpRequest(\"GET\", \"/reqmgr/reqMgr/user\")\n logging.info(data)\n logging.info(\"Querying groups membership ...\")\n for group in groups:\n status, data = self._httpRequest(\"GET\", \"/reqmgr/reqMgr/group/%s\" % group)\n logging.info(\"Group: '%s': %s\" % (group, data))", "title": "" }, { "docid": "b32c0afc9f0eb98da8b3f77f35d6f8e3", "score": "0.54645133", "text": "def _parse_data_to_users(self):\n while True:\n dn, status, label, value = yield\n user = self._parse_dn(dn)\n\n if user not in self.users:\n u = User(user)\n self.users[user] = u\n else:\n u = self.users[user]\n u.add_data({'Status': status, label: value})", "title": "" }, { "docid": "414f34fbbb8bd679a9d2e0a78078e6e7", "score": "0.5460207", "text": "def setGroup(self, node):\n\t\tself.group = node.getGroup()\n\t\tself.groupName = node.getGroupName()", "title": "" }, { "docid": "d863b6794e09c27ed41cb8c007fac0b3", "score": "0.5457117", "text": "def set(self, group_name: str, tags: List[str]) -> None:\n self.set_group[group_name] = tags", "title": "" }, { "docid": "d863b6794e09c27ed41cb8c007fac0b3", "score": "0.5457117", "text": "def set(self, group_name: str, tags: List[str]) -> None:\n self.set_group[group_name] = tags", "title": "" }, { "docid": "72708b75875ad7ea17c522f4749c30d5", "score": "0.5441494", "text": "def setUp(self):\n self.groups = GroupProfile.objects.all()\n for gr in GroupProfile.objects.all():\n self.members[gr.name] = User.objects.filter(groups=gr.group)\n # create 2 groups\n #self.users.append(create_user(\"__user1__\", \"Joe\", \"Smith\"))\n #self.users.append(create_user(\"__user2__\", \"Alice\", \"Jones\"))\n #self.users.append(create_user(\"__user3__\", \"Mary\", \"Willson\"))\n #self.users.append(create_user(\"__user4__\", \"Mary\", \"Willson\"))\n #self.users.append(create_user(\"__user5__\", \"Mary\", \"Willson\"))\n #self.users.append(create_user(\"__user6__\", \"Mary\", \"Willson\"))\n #self.members[\"__group1__\"] = [self.users[0], self.users[3],]\n #self.members[\"__group2__\"] = [self.users[1], self.users[2], self.users[3], self.users[4],]\n #self.groups.append(create_group(\"__group1__\", self.users[0], self.members[\"__group1__\"]))\n #self.groups.append(create_group(\"__group2__\", self.users[1], self.members[\"__group2__\"]))\n # create 3 goals\n #self.goals[\"__group1__\"] = [create_goal(\"__goal1.1__\", self.groups[0], self.users[0]),\n # create_goal(\"__goal1.2__\", self.groups[0], self.users[0]),\n # create_goal(\"__goal1.3__\", self.groups[0], self.users[2]),\n # ]\n #self.goals[\"__group2__\"] = [create_goal(\"__goal2.1__\", self.groups[1], self.users[1]),\n # create_goal(\"__goal2.2__\", self.groups[1], self.users[2]),\n # ]", "title": "" }, { "docid": "841fda894e959596e9156215325e0aa0", "score": "0.54232115", "text": "def save(self, *args, **kwargs):\n\t\tbase_group = Group.objects.get_or_create(name='usuario_base')[0]\n\t\tself.user.groups.add(base_group)\n\t\treturn super(BaseUser, self).save(*args, **kwargs)", "title": "" }, { "docid": "a08180febd23e08d900ea795cafad2e9", "score": "0.5408937", "text": "def get_groups(name):\r\n groups = {}\r\n isa_structure = [\"Project\", \"Investigation\", \"Study\", \"Assay\"]\r\n groupname = name.split('/')[-1]\r\n groups[isa_structure[len(name.split('/'))-1]] = groupname", "title": "" }, { "docid": "0c1113417c495c49ed6fd7b5a1473f74", "score": "0.54006624", "text": "def test_add_mft_user_group(self):\n pass", "title": "" }, { "docid": "b5189cbb611d0cb00f6617611ddfb5b4", "score": "0.53828937", "text": "async def add_group_user(request: web.Request):\n user_id = request[RQT_USERID_KEY]\n gid = request.match_info[\"gid\"]\n new_user_in_group = await request.json()\n\n assert \"uid\" in new_user_in_group or \"email\" in new_user_in_group # nosec\n\n new_user_id = new_user_in_group[\"uid\"] if \"uid\" in new_user_in_group else None\n new_user_email = (\n parse_obj_as(LowerCaseEmailStr, new_user_in_group[\"email\"])\n if \"email\" in new_user_in_group\n else None\n )\n\n await api.add_user_in_group(\n request.app,\n user_id,\n gid,\n new_user_id=new_user_id,\n new_user_email=new_user_email,\n )\n raise web.HTTPNoContent", "title": "" }, { "docid": "0496cf3dcf5cc19e37002ea22f8ed59d", "score": "0.5369904", "text": "def process_mesh_vertex_groups(mesh_data, obj_user):\n mesh_data[\"vertex_groups\"] = []\n\n if obj_user and obj_user.vertex_groups:\n for vertex_group in obj_user.vertex_groups:\n vertex_group_data = OrderedDict()\n vertex_group_data[\"name\"] = vertex_group.name\n vertex_group_data[\"index\"] = vertex_group.index\n\n mesh_data[\"vertex_groups\"].append(vertex_group_data)", "title": "" }, { "docid": "ae6942e3b6855109bf33966566e2727a", "score": "0.5366977", "text": "def test_get_mft_user_groups(self):\n pass", "title": "" }, { "docid": "d5aba429aa69ac2885b09652b9b0b319", "score": "0.53580564", "text": "def test_put_groups_id(self):\n pass", "title": "" }, { "docid": "3736bfbf09e33862d5d524cd4cdc4848", "score": "0.5339213", "text": "def assign_org_permissions(self, group_name: str, rows: list[tuple[str, str, int]]) -> None:\n for username, roles, org_id in rows:\n user = User.objects.get(username=username)\n\n if group_name == \"Importer User\":\n org = Importer.objects.get(pk=org_id)\n else:\n org = Exporter.objects.get(pk=org_id)\n\n assign_manage = \":AGENT_APPROVER\" in roles\n\n organisation_add_contact(org, user, assign_manage)\n\n # Check user should have view permissions\n if \":VIEW\" not in roles:\n obj_perms = get_org_obj_permissions(org)\n remove_perm(obj_perms.view, user, org)\n\n # Check user should have edit permissions\n if \":EDIT_APP\" not in roles and \":VARY_APP\" not in roles:\n obj_perms = get_org_obj_permissions(org)\n remove_perm(obj_perms.edit, user, org)", "title": "" }, { "docid": "717b958d5b1bcad7f756ea3ecd336425", "score": "0.533714", "text": "def set_default_names(self):\n for i, group in enumerate(self.groups):\n group.name = f'{self.name}-{i + 1}'", "title": "" }, { "docid": "94a3adf877e471b484d755a7e87374f9", "score": "0.53289646", "text": "def assign(self):\r\n for symbolData in self.symbolData:\r\n symbolData.assign(self.group)", "title": "" }, { "docid": "cd6892bdcadfafbb86815bc9b5fde913", "score": "0.5322197", "text": "def groups(user):\n return [group.name for group in user.groups.all()]", "title": "" }, { "docid": "2145982f72284691b8d0411466ffff25", "score": "0.5309958", "text": "def group_create(name, gid=None):\n\toptions = []\n\tif gid:\n\t\toptions.append(\"-g '%s'\" % (gid))\n\tsudo(\"groupadd %s '%s'\" % (\" \".join(options), name))", "title": "" }, { "docid": "0f7da911a16a0a57a0bd1b070f662ecc", "score": "0.5302245", "text": "def test_update_mft_user_group(self):\n pass", "title": "" }, { "docid": "2789cfba02137174c5782ac4a5f9ac26", "score": "0.5300633", "text": "def group2db(user, user_group):\n group = Group()\n group.user_group = user\n group.gitlab_id = user_group.id\n group.name = user_group.name\n group.description = user_group.description\n group.visibility = user_group.visibility\n group.ssh_url_to_repo = user_group.ssh_url_to_repo\n group.http_url_to_repo = user_group.http_url_to_repo\n group.web_url = user_group.web_url\n group.name_with_namespace = user_group.name_with_namespace\n group.path = user_group.path\n group.path_with_namespace = user_group.path_with_namespace\n group.created_at = user_group.created_at\n group.last_activity_at = user_group.last_activity_at\n\n return group", "title": "" }, { "docid": "81c13fbd0b39aaaedae553ce70ece968", "score": "0.5298014", "text": "def load_groups() -> dict:\n dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(dir)\n os.chdir('data')\n os.chdir('server_data')\n with open('groups.csv', mode='r') as infile:\n reader = csv.reader(infile)\n group = {rows[0].upper(): [rows[1].upper(), rows[2].upper(), rows[3].upper(), rows[4].upper(),\n rows[5].upper(), rows[6].upper()] for rows in reader}\n\n for key in group.keys():\n group[key] = list(filter(None, group[key]))\n\n return group", "title": "" }, { "docid": "b60082d7537b9b30fb2354c5c63a1abf", "score": "0.5295297", "text": "def _set_user_permissions_for_volumes(users, volumes):\n\n group_name = 'volumes'\n\n user_data_script_section = f\"\"\"\ngroupadd {group_name}\n\"\"\"\n\n for user in users:\n user_data_script_section += f\"\"\"\nusermod -a -G {group_name} {user.login}\n\"\"\"\n for volume in volumes:\n user_data_script_section += f\"\"\"\nchgrp -R {group_name} {volume.mount}\nchmod -R 2775 {volume.mount} \n\"\"\"\n\n return user_data_script_section", "title": "" }, { "docid": "81aba641c992f48111b2dfb07da610b0", "score": "0.52936935", "text": "def test_update_users_of_a_group(self):\n profile = self.env['res.users'].create(\n {'name': 'P', 'login': 'p_login', 'is_user_profile': True})\n user = self.env['res.users'].create(\n {'name': 'U', 'login': 'u_login', 'user_profile_id': profile.id})\n self.group1.with_context(use_pdb=True).users |= profile\n self.assertIn(\n self.group1, user.groups_id,\n 'User U should have Group 1 in dependencies!')", "title": "" }, { "docid": "800fd3d88b5063cd87449f3e8edd85e6", "score": "0.52934253", "text": "def add_user_to_group(sender, instance, created, **kwargs):\n\n try:\n if created:\n if instance.role is 'instructor':\n instance.groups.add(Group.objects.get(pk=3))\n else:\n instance.groups.add(Group.objects.get(pk=4))\n except Group.DoesNotExist:\n pass", "title": "" }, { "docid": "046de7ce7a8f16a32ebe2e6ff137f468", "score": "0.5292615", "text": "def get_all_group_name(self, username):\n return self.fetch_to_list(self.fetch_data(\"select distinct Group_name from Task where username = '{}';\".format(username)))", "title": "" }, { "docid": "794f46ba0f19aa7047f9292b677d8132", "score": "0.52788293", "text": "def set_group(self, group):\n if type(group) in (list, tuple):\n self.groups = group\n else:\n self.groups = (group,)", "title": "" }, { "docid": "492a2123572f8acfb83bd63edf84d096", "score": "0.5275229", "text": "def save(self, *args, **kwargs):\n # Save the group instance\n group = super(CSVGroupCreationForm, self).save(*args, **kwargs)\n\n # Add users to the group\n group.user_set.add(*self.__initial_users)", "title": "" }, { "docid": "0496bed4c4120e0f6f4deb600b61dff7", "score": "0.52697587", "text": "def _link_groups_and_permissions(self, user: CustomUser) -> CustomUser:\n\n groups, permissions = self.data_groups, self.data_user_permissions\n groups, permissions = [groups] if isinstance(groups, str) else groups, [permissions] if isinstance(permissions, str) else permissions\n\n try:\n if groups is not None:\n if len(groups) > 0 and len(groups[0]) > 0:\n user.groups.set([Group.objects.get_by_natural_key(group) for group in groups])\n else:\n user.groups.set([])\n\n if permissions is not None:\n if len(permissions) > 0 and len(permissions[0]) > 0:\n user.user_permissions.set([Permission.objects.get(codename=permission) for permission in permissions])\n else:\n user.user_permissions.set([])\n\n except ObjectDoesNotExist as error:\n raise ObjectNotFound(_(str(error)))\n\n user.save()\n return user", "title": "" }, { "docid": "a472ce3f2d630da3e873c74e7e1aab39", "score": "0.5267011", "text": "def change_user_group(self, user_group_id, user_group_data):\r\n return self.put(\r\n 'usergroups/{}'.format(user_group_id),\r\n payload=user_group_data).json()", "title": "" }, { "docid": "949ebc48dfeadba08e7460e15c7ec6e4", "score": "0.5263152", "text": "def test_changing_group_dependencies_should_update_groups_of_users(self):\n self.group1.implied_ids = [(4, self.group2.id)]\n self.assertIn(\n self.group2, self.user.groups_id,\n 'The user has not the new dependence!')", "title": "" }, { "docid": "a389cfaf84cdf2963673a4b25d96377f", "score": "0.5262883", "text": "def create_grp(conn, id, name):\n cur = conn.cursor()\n try:\n info = (id, name)\n cur.execute(d.sql_cmd['createGrp'], info)\n except Error as e:\n print(e)", "title": "" }, { "docid": "411ee83f11e7fc621648da4cc9f73c20", "score": "0.5262673", "text": "def group_members(self, group_name):\n if group_name not in self._group_members:\n members = self.rest.get(\n f'/a/groups/{self.group(group_name)[\"group_id\"]}/members').json\n self._group_members[group_name] = [{\n \"id\": str(member['_account_id']),\n \"name\": member.get('name', ''),\n \"username\": member.get('username'),\n \"github_id\": str(self.oauth_id(member['_account_id'])),\n \"github_username\":\n str(self.oauth_id(member['_account_id'])),\n \"email\": member['email'] if 'email' in member else ''\n } for member in members]\n return self._group_members[group_name]", "title": "" }, { "docid": "bbe9c70f99026eff468bc4adecae349d", "score": "0.5248733", "text": "def groupfinder(userid, request) -> List[str]:\n user = request.user\n groups = None\n if user:\n groups = []\n for group in user.groups:\n groups.append(\"group:%s\" % group.name)\n groups.append(\"user:%s\" % user.id)\n return groups", "title": "" }, { "docid": "ed0789646c53fc3f88ba92497a9dc88d", "score": "0.5246923", "text": "def addGroup(name):\n group = Group()\n group.name = name\n group.members.append(current_user)\n db.session.add(group)\n db.session.commit()\n return group.id", "title": "" }, { "docid": "13ddc95537ce62988e09b5b0efdb069c", "score": "0.5245989", "text": "def _get_groups(self, username):\n\n groups = set([username])\n for provider in self.group_providers:\n for group in provider.get_permission_groups(username):\n groups.add(group)\n \n # TODO: Necessary? What is this for?\n perms = PermissionSystem(self.env).get_all_permissions()\n repeat = True\n while repeat:\n repeat = False\n for subject, action in perms:\n if subject in groups and action.islower() and action not in groups:\n groups.add(action)\n repeat = True \n \n return groups", "title": "" }, { "docid": "51cc622f8d4bb17d15f4266aa6690397", "score": "0.5234191", "text": "def add_group(self, path):\n self.pwd.create_group[path]", "title": "" }, { "docid": "adbded0c3dbf92d87fbc5763f24179b0", "score": "0.5225458", "text": "def parse_group_file(group_file):\n\n user2groups = {}\n groups_encountered = set()\n\n with group_file:\n for index, group_data in enumerate(group_file.read().splitlines()):\n group_data = group_data.split(\":\")\n try:\n validate_group_format(group_data, index + 1, groups_encountered)\n except Exception as e:\n sys.exit(str(e))\n\n group_name = group_data[GroupFieldEnum.GROUPNAME]\n groups_encountered.add(group_name)\n users = group_data[GroupFieldEnum.USERS]\n if users == '':\n continue\n for user in users.split(\",\"):\n if user not in user2groups:\n user2groups[user] = []\n\n user2groups[user].append(group_name)\n\n return user2groups", "title": "" }, { "docid": "23ff2db1bf879c76aefbc7abad2ab861", "score": "0.5221914", "text": "def default_groups(database):\n return create_default_groups()", "title": "" }, { "docid": "b7cd5fe7c0355d4a7defbd2519278c72", "score": "0.52143776", "text": "def add_to_team(self, user, groups=None, **kw):\n # TODO: user argument should be renamed to userid for clarity\n # however doing so now would break backwards compatibility\n data = kw.copy()\n data[\"user\"] = user\n if groups is not None:\n data[\"groups\"] = groups = set(groups)\n members = self.members\n if not user or user not in members:\n data[\"UID\"] = uid = getUtility(IUUIDGenerator)()\n key = user or uid\n data[\"_mtime\"] = DateTime()\n if groups is None:\n data[\"groups\"] = groups = set()\n members[key] = data\n for name, func in self.counters:\n if func(data):\n if name not in self.context._counters:\n self.context._counters[name] = Length()\n self.context._counters[name].change(1)\n membership = self.membership_factory(self, data)\n membership.handle_added()\n membership._update_groups(set(), groups)\n notify(TeamMemberAddedEvent(self.context, membership))\n self.context.reindexObject(idxs=[\"workspace_members\", \"workspace_leaders\"])\n else:\n membership = self.membership_factory(self, self.members[user])\n membership.update(data)\n return membership", "title": "" }, { "docid": "6f9e9835ead6b52622802ac6a80fc4b9", "score": "0.5209074", "text": "def add_to_group(switch, member, group):\n if switch not in GroupManager.group_to_member:\n GroupManager.group_to_member[switch] = {}\n\n if group not in GroupManager.group_to_member[switch]:\n GroupManager.group_to_member[switch][group] = []\n\n if member not in GroupManager.group_to_member[switch].get(group, []):\n GroupManager.group_to_member[switch].get(group).append(member)", "title": "" }, { "docid": "aad5949c3a50ecd22346e533de342dc2", "score": "0.52085155", "text": "def editgroup(self, group_name, members):\n is_valid_redis_key(group_name)\n group_name = group_name.lower()\n group, groupips = self.getgroup(group_name)\n if not group and not groupips:\n raise GroupError('Group \"%s\" does not exists' % group_name)\n if type(members) is not list:\n raise TypeError('Expect a list. Found: %s' % type(members))\n\n gmembers, ipmembers = [], []\n for member in members:\n data = isvalidtype(member)\n if type(data) is IPNetwork:\n ipmembers.append(str(data))\n else:\n gmembers.append(member)\n\n if 'any' in members:\n raise ValueError('Could not add \"any\" type members')\n\n key = self.namespace(group_name)\n ipkey = self.ipnamespace(group_name)\n\n with self.redis.pipeline() as pipe:\n if gmembers:\n pipe.sadd(key, *gmembers)\n if ipmembers:\n pipe.sadd(ipkey, *ipmembers)\n pipe.sadd(':'.join(('list', 'groups')), key)\n pipe.execute()", "title": "" }, { "docid": "a08480f5ab64dbd5e55cdb6b54d41ba2", "score": "0.52025557", "text": "def switch_user_type(self):\n self.user.groups.all().delete()\n user_type = self.get_user_type()\n if user_type == 'performer':\n self.user.groups.add(Group.objects.get_or_create(name=\"Clients\")[0])\n else:\n self.user.groups.add(Group.objects.get_or_create(name=\"Performers\")[0])", "title": "" }, { "docid": "efdbd3138fd3a0dcd8d2aa2d380dd219", "score": "0.51879734", "text": "def set_perms(header):\n payload = [\n {'group_id': '36124', 'permission': 'admin'},\n {'group_id': '41497', 'permission': 'write'},\n {'group_id': '45777', 'permission': 'read'},\n {'group_id': '36180', 'permission': 'write'}\n ]\n for group_perms in payload:\n try:\n r = requests.post(base_url + \"/repositories/\" + org + \"/\" + new_repo + \"/groups/\", json=group_perms, headers=header)\n r.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print red(\"Setting perms failed: {}\").format(err)\n sys.exit(1)\n print green(\"All perms set.\")", "title": "" }, { "docid": "e809cff4c7d178bfc464bd4130b4d1c4", "score": "0.5174541", "text": "def write_groups(out_file, groupname):\r\n print(\"To create a single group please just enter the main group name i.e. Group Name\")\r\n print('To create a subgroup to an exisitng group, please enter /Group Name/Subgroup Name/etc/etc/')\r\n print() \r\n attributes = {}\r\n print(\"Enter attributes for\", groupname)\r\n meta = input(\"Is there a metadata file? (Y/N): \")\r\n if meta == \"Y\" or meta == \"y\":\r\n metapath = input(\"Enter metadata file path: \")\r\n with open(metapath, 'r') as metafile:\r\n for line in metafile:\r\n line = line.split('\\t')\r\n item = line[0].strip('\\n')\r\n value = line[-1].strip('\\n')\r\n if item in attributes.keys():\r\n attributes[item].append(value)\r\n else:\r\n attributes[item] = [value]\r\n else:\r\n input_attributes = input(\"Enter an attribute followed by a value. i.e. Project Name: iknowit, Date: 04-11-2019: \")\r\n for attribute in input_attributes.split(','):\r\n attribute = attribute.split(':')\r\n attributes[attribute[0].strip(' ')] = attribute[1].strip(' ')\r\n data_file = h5py.File(out_file, 'a')\r\n dset = data_file.create_group(groupname)\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v", "title": "" }, { "docid": "f2735c4bb13c4d07c7d21299c9de6526", "score": "0.5172792", "text": "def _group(self, data, group_assignments):\n groups = np.unique(group_assignments)\n group_to_data = defaultdict(dict)\n for group in groups:\n for k, v in data.items():\n if isinstance(v, np.ndarray):\n assert len(group_assignments) == len(v), \\\n f'group_assignments and \"{k}\" must be the same length'\n group_to_data[group][k] = v[group_assignments == group]\n return group_to_data", "title": "" }, { "docid": "07c14e4dd0b87e3c36d397faf69ddbbd", "score": "0.51625884", "text": "def _create_server_group(self, group_name):\n self._server.create_group(group_name)", "title": "" }, { "docid": "c414d67009ccf48d55a2d1b2c47dbf37", "score": "0.5162409", "text": "def save(self, *args, **kwargs):\n\t\tadmin_group = Group.objects.get_or_create(name='administrator')[0]\n\t\tself.user.groups.add(admin_group)\t\t\n\t\treturn super(AdminUser, self).save(*args, **kwargs)", "title": "" }, { "docid": "483b86fe6581bed3dc5d5a7631019a97", "score": "0.5162018", "text": "def set_groups(self):\n self.groups = self.sub_data.groupby([self.name]) # List of grouped data by this attribute\n\n # For every distinct value, create new sub attribute object\n for i in range(len(self.values)):\n data = self.groups.get_group((self.values[i])) # Sub data set for the value\n\n # New object\n new_sub_att = sa.SubAttribute(self.values[i], data, self.classes)\n\n # Add new sub attribute to the list\n self.sub_values.append(new_sub_att)", "title": "" }, { "docid": "07036db69edd89a7df5c6deafff57d8a", "score": "0.51615137", "text": "def __get_groups(self):\n search_filter = '(objectClass=group)'\n try:\n raw_result = self.ldap_con.search_s(\n settings.AUTH_LDAP_GROUP_SEARCH, ldap.SCOPE_SUBTREE, search_filter)\n except ldap.NO_SUCH_OBJECT as e:\n raise\n\n for group in raw_result:\n group_cn = group[1]['cn'][0].lower()\n try:\n alphanumeric_dash_underscore(group_cn)\n except:\n logging.warn(\n 'group %s is not valid for the name of a mailing list' %\n group_cn)\n continue\n self.ldap_groups[group_cn] = group[1]['member']", "title": "" }, { "docid": "29f02e5130953452b5de5091f50010ee", "score": "0.5155799", "text": "def registerServerGroup(self, name, hosts):\n pgcCmd = PGC_HOME + os.sep + \"pgc register GROUP --json \\\"\" + name + \"\\\"\"\n pgcProcess = subprocess.Popen(pgcCmd, stdout=subprocess.PIPE, shell=True)\n pgcData = pgcProcess.communicate()\n yield self.session.publish('com.bigsql.onRegisterServerGroup', pgcData)\n import util\n util.map_group_hosts(name, hosts, p_group_id=0, isJson=False, printMsg=False)", "title": "" }, { "docid": "f3c80c42887a4bab920bc73b599236a3", "score": "0.5152189", "text": "def test_get_groups(self):\n pass", "title": "" }, { "docid": "679ae5d67e6b2ad605aa0480dc1502d0", "score": "0.51420146", "text": "def test_jenkins_groups(User, JenkinsRoleDefaults):\n jenkins_name = JenkinsRoleDefaults[\"jenkins_name\"]\n jenkins_groups = User(jenkins_name).groups\n assert set([\"adm\", \"sudo\", \"jenkins\"]).issubset(jenkins_groups)", "title": "" }, { "docid": "de970ca1a6a9942d9270d95f2d5641af", "score": "0.5141631", "text": "def addgrp(user=None, grp=None):\n if not grp:\n print(\"No group name provided.. exiting.\")\n sys.exit()\n logger.scriptlog(\"\\n%s\" % env.host)\n addgroup = sudo(\"groupadd %s\" % grp)\n if addgroup.return_code == 0:\n logger.scriptlog(\"Group added %s\" % grp)\n elif addgroup.return_code == 9:\n logger.scriptlog(\"Group already exists: %s\" % grp)\n else:\n logger.scriptlog(\"An error occured while adding group: %s\" % grp)\n try:\n sudo(\"usermod -aG %s %s\" % (grp, user))\n logger.scriptlog(\"Added %s to group: %s\" % (user, grp))\n except Exception as e:\n logger.error(e)", "title": "" }, { "docid": "da5c85a627016a1dcd73db9987144ca8", "score": "0.51397866", "text": "def addGroupToDevice(self):\n groups = self.groups\n if self.groupname not in groups:\n groups.append(self.groupname)\n groups.sort()\n self.device.setGroups(groups)\n commit()", "title": "" } ]
98152f6bc8440d2b9e45186f6f94403b
GIVEN a flask application WHEN user makes advanced search with specified search inputs THEN return recipes satisfy search parameters
[ { "docid": "2613a22a3e801f9c6d7ed8349eb75375", "score": "0.67837054", "text": "def test_advanced_search_results_applies_filter_to_recipes(self, test_client, db, search_term, allergies, diet,\n cal_range, time):\n response = advanced_search_function(test_client, search_term=search_term, diet_type=diet, cal_range=cal_range,\n time=time)\n assert response.status_code == 200\n assert search_term.encode() in response.data\n\n min_cal = int(cal_range.split(',')[0])\n max_cal = int(cal_range.split(',')[1])\n user_allergies = list(map(int, allergies))\n\n response_recipe_ids = get_recipe_ids(test_client, response)\n\n from app.models import Recipes\n recipes = db.session.query(Recipes).filter(Recipes.recipe_id.in_(response_recipe_ids)).all()\n\n for recipe in recipes:\n assert recipe.diet_type[0].diet_type_id >= diet\n assert search_term in recipe.recipe_name.lower()\n assert min_cal <= int(recipe.nutrition_values.calories) <= max_cal\n assert recipe.total_time <= time\n\n # recipe_allergies = [allergy.allergy_id for allergy in recipe.allergies]\n # for allergy in user_allergies:\n # assert allergy not in recipe_allergies", "title": "" } ]
[ { "docid": "d34d8d2e127a11f836cd282e7367d401", "score": "0.7443953", "text": "def search_recipes():\n if request.method == 'POST':\n cuisine = request.form.get(\"cuisine_name_filter\")\n allergen = request.form.get(\"allergen_name_filter\")\n meal = request.form.get(\"meal_type_filter\")\n search_item = request.form.get('search_item')\n \n \n query = {}\n if cuisine and allergen and meal:\n filter_recipes = mongo.db.recipes.find({\"cuisine_name\":cuisine, \"allergen_name\":allergen, \"meal_type\":meal})\n query = {cuisine,allergen,meal}\n \n elif cuisine and allergen:\n filter_recipes = mongo.db.recipes.find({\"cuisine_name\":cuisine, \"allergen_name\":allergen})\n query = {cuisine,allergen}\n\n elif cuisine and meal:\n filter_recipes = mongo.db.recipes.find({\"cuisine_name\":cuisine, \"meal_type\":meal})\n query = {cuisine,meal}\n\n elif allergen and meal:\n filter_recipes = mongo.db.recipes.find({\"allergen_name\":allergen, \"meal_type\":meal})\n query = {allergen,meal}\n\n elif cuisine:\n filter_recipes=mongo.db.recipes.find({\"cuisine_name\":cuisine})\n query = {cuisine}\n\n elif allergen:\n filter_recipes = mongo.db.recipes.find({\"allergen_name\":allergen})\n query = {allergen}\n\n elif meal:\n filter_recipes = mongo.db.recipes.find({\"meal_type\":meal})\n query = {meal}\n \n else:\n # create the index\n filter_recipes = mongo.db.recipes.create_index( [(\"$**\", 'text')] )\n page, per_page, offset = get_page_args(page_parameter = 'page',\n per_page_parameter = 'per_page')\n\n # search with the search term that came through the form\n cursor = mongo.db.recipes.find({ \"$text\": { \"$search\": search_item } })\n total_filter_recipes = cursor.count()\n pagination = Pagination(page = page,\n per_page = ITEMS_PER_PAGE,\n total = total_filter_recipes,\n record_name = 'Recipes')\n recipes = [recipe for recipe in cursor]\n \n # send recipes to page\n return render_template('search.html', \n recipes = recipes, \n query = search_item,\n page = page,\n per_page = per_page,\n pagination = pagination)\n \n \n page, per_page, offset = get_page_args(page_parameter = 'page',\n per_page_parameter = 'per_page')\n total_filter_recipes = filter_recipes.count()\n recipes_fetched = filter_recipes.skip((page - 1)*ITEMS_PER_PAGE).limit(ITEMS_PER_PAGE)\n pagination = Pagination(page = page,\n per_page = ITEMS_PER_PAGE,\n total = total_filter_recipes,\n record_name = 'Recipes')\n \n return render_template('search.html',\n recipes = recipes_fetched,\n page = page,\n query = query,\n per_page = per_page,\n pagination = pagination)", "title": "" }, { "docid": "0e342c8a03e29122482602bab0253101", "score": "0.72639793", "text": "def search():\n # Search Buttons function\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "title": "" }, { "docid": "8e245e87fbc36fd84f22a94e0a8498b9", "score": "0.7207813", "text": "def search_by_recipe(recipe_name):\n # recipe_name_list=ingredient_name.split(\" \")\n # recipe_name_format=\"+\".join(recipe_name_list)\n \n searched_recipes=get_search_by_recipe(recipe_name)\n \n return render_template('searches.html',recipes=searched_recipes)", "title": "" }, { "docid": "a9ea5e1a1b3cd7d4a945b44adad9775d", "score": "0.7062741", "text": "def search():\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "title": "" }, { "docid": "5989decddcf5552f4e4ebc7f3e631f0f", "score": "0.7018663", "text": "def search():\n query = request.form.get(\"query\")\n recipes = mongo.db.recipes.find({\"$text\": {\"$search\": query}})\n return render_template('recipes.html', recipes=recipes,\n categories=mongo.db.categories.find())", "title": "" }, { "docid": "f2322ac395b81a7064b5ff49a6f0cad1", "score": "0.6964389", "text": "def search():\n # Wildcard text search index\n mongo.db.recipes.create_index([(\"$**\", pymongo.TEXT)])\n # Results per page\n p_limit = 9\n current_page = int(request.args.get('current_page', 1))\n # Input term for search query\n word_search = request.args.get('word_search')\n # Results for search sorted by ID\n results = mongo.db.recipes.find({'$text': {'$search': str(word_search)}}, {\"score\": {\"$meta\": 'textScore'}}).sort('_id', pymongo.ASCENDING).skip((current_page -1)*p_limit).limit(p_limit)\n # Pagination\n results_count = mongo.db.recipes.find({'$text': {'$search': str(word_search)}}).count()\n results_pages = range(1, int(math.ceil(results_count / p_limit)) + 1)\n total_page_no = int(math.ceil(results_count/p_limit))\n \n # Most Popular recipes - appear when there are no results to the user's query\n recommended = mongo.db.recipes.find().sort(\"favourite_count\", pymongo.DESCENDING).limit(3)\n \n return render_template(\"search.html\", \n p_limit = p_limit,\n current_page=current_page, \n results_count=results_count,\n word_search=word_search,\n results=results,\n results_pages=results_pages,\n total_page_no=total_page_no,\n recommended=recommended)", "title": "" }, { "docid": "4c87406c5498045544ca25cbc0bd939b", "score": "0.6890836", "text": "def search_by_ingredients(ingredient_name):\n # recipe_name_list=ingredient_name.split(\" \")\n # recipe_name_format=\"+\".join(recipe_name_list)\n searched_recipes=get_search_by_ingredients(ingredient_name)\n return render_template('search.html',recipes=searched_recipes)", "title": "" }, { "docid": "7a7112f1b549bfc777a2049ae2ce605a", "score": "0.6860355", "text": "def search():\n search = request.form.get(\"search\")\n types = list(mongo.db.type.find().sort(\"type_name\", 1))\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": search}}))\n flash(\"{} Results for '{}'\".format(len(recipes), search))\n return render_template(\"recipes.html\", recipes=recipes, types=types)", "title": "" }, { "docid": "edec680426b6b94882a250d4b5993ea7", "score": "0.6691776", "text": "def search_recipes(query):\n # search Recipes function\n if search:\n recipes = list(\n mongo.db.recipes.find({\"category_name\": query}))\n\n return render_template(\"recipes.html\", recipes=recipes)", "title": "" }, { "docid": "bd82c3e788872091ac7e6bfb70961775", "score": "0.6485085", "text": "def view_recipes_with_ingredients():\r\n\r\n ingredient_list = request.args.get('ingredients')\r\n ing_list = crud.string_to_list(ingredient_list)\r\n ing_id_list = []\r\n for ingr in ing_list:\r\n ingred_by_name = crud.get_ingredient_by_name(ingr)\r\n ing_id_list.append(ingred_by_name.ingredient_id)\r\n recipe_search_results = crud.get_recipes_by_multiple_ing(ing_id_list)\r\n\r\n return render_template('ing_recipe_search_results.html', see_recipes=recipe_search_results)", "title": "" }, { "docid": "eb1ee04354501f9af87e6391d489f999", "score": "0.64551157", "text": "def test_search_recipes(self):\n nutrients = {'maxCarbs': '100'}\n ingredients = ['potatoes']\n diet = 'vegetarian'\n intolerances = ['dairy']\n recipe_list = RecipeRecommender.search_recipes(ingredients, nutrients,\n diet, intolerances)\n self.assertIsInstance(recipe_list, list)\n if len(recipe_list) > 0:\n self.assertIsInstance(recipe_list[0], Recipe)", "title": "" }, { "docid": "b9e2b75a19daf7b318b88bffe21389ab", "score": "0.64290017", "text": "def search(self, **kwargs):", "title": "" }, { "docid": "50254ace5f6e7f0d82864ce49c642b1e", "score": "0.6413538", "text": "def search_gear():\n\n gear_keyword = request.form.get(\"gear_name\")\n category = request.form.get(\"category\")\n brand = request.form.get(\"brand\")\n # zipcode = request.form.get(\"zipcode\")\n if category == \"All Categories\" and brand == \"All Brands\":\n equipSearch = Equipment.query.filter(Equipment.gear_name.contains(gear_keyword), Equipment.available.is_(True)).all()\n elif category != \"All Categories\" and brand == \"All Brands\":\n equipSearch = Equipment.query.filter(Equipment.category == category, Equipment.gear_name.contains(gear_keyword), Equipment.available.is_(True)).all()\n elif category == \"All Categories\" and brand != \"All Brands\":\n equipSearch = Equipment.query.filter(Equipment.brand == brand, Equipment.gear_name.contains(gear_keyword), Equipment.available.is_(True)).all()\n else:\n equipSearch = Equipment.query.filter(Equipment.category == category, Equipment.brand == brand, Equipment.gear_name.contains(gear_keyword), Equipment.available.is_(True)).all()\n\n equipment = []\n\n # print equipSearch\n\n for e in equipSearch:\n d = {\n 'gear_id' : e.gear_id,\n 'gear_name': e.gear_name,\n 'category' : e.category,\n 'brand' : e.brand,\n 'lender_email' : e.lender_email,\n 'gear_photo_url' : e.gear_photo_url,\n 'zipcode' : e.zipcode\n }\n equipment.append(d)\n\n return jsonify(equipment)", "title": "" }, { "docid": "6e2c3c45116018faa04088cc58b7c0b6", "score": "0.63533044", "text": "def get_recipes(request):\n if not request.body:\n return JsonResponse({})\n\n # parse json string to list of ingredient names\n ing = request.body.decode(\"utf-8\")\n ingredients_to_search_by = ing[1:-1].replace('\"', \"\").split(',')\n # save ingredients for the future\n # get user id, if applicable. Else default to 0\n usr_id = 0\n if request.user:\n save_ingredients_to_user(request.user, ingredients_to_search_by, request.session)\n usr_id = request.user.id\n # send ingredients to search algorithm\n values = IngredientUtils(usr_id).find_recipes(ingredients_to_search_by)\n # convert queryset to JSON!!! (Now made serializable in the find_recipes call)\n for recipe in values:\n recipe['appliances'] = list(Appliance.objects.filter(recipe=recipe['id']).values('name'))\n\n return JsonResponse({'results': values})", "title": "" }, { "docid": "4800436b60324eebca1d49b9a4c93ec7", "score": "0.6332947", "text": "def search_books():\n \n # Retrieve input prompted by the user\n search_input = request.args.get(\"search\")\n\n if search_input == \"\":\n return render_template(\"search.html\", search=search_input)\n\n # Fetch database\n database_response = db.execute(\"SELECT * FROM books WHERE title iLIKE :entry OR author iLIKE :entry OR isbn iLIKE :entry\", {'entry': \"%\" + search_input + \"%\"}).fetchall()\n\n return render_template(\"search.html\", results=database_response, search=search_input)", "title": "" }, { "docid": "7096118bc097e9c224f60b1784c7f405", "score": "0.6324785", "text": "def search():\n\n search_this_string = request.form['search']\n cursor = gear.find({'$text': {'$search': search_this_string}})\n return render_template('searchresults.html', result=list(cursor),\n categories=list(categories.find()))", "title": "" }, { "docid": "3caf852ab6929412009e0660220b69c3", "score": "0.632282", "text": "def search():\n drug = request.args.get('drug').lower()\n location = request.args.get('location')\n\n # get DB\n db = get_db()\n cur = db.cursor()\n\n # store search\n now = now_int()\n cur.execute('INSERT INTO Searches VALUES(?,?)', (drug, now))\n db.commit()\n\n # find drug in DB\n cur.execute('SELECT drug_id FROM DrugNames WHERE Name = ?', (drug,))\n exact_row = cur.fetchone()\n found = exact_row is not None\n\n # find variants\n no_spaces = re.sub('\\\\s+', '', drug)\n id_counts = dict()\n for idx in range(len(no_spaces)-2):\n trigram = no_spaces[idx:(idx+3)]\n for drug_id in cur.execute(\"\"\"SELECT drug_id\n FROM DrugNameParts\n WHERE Part = ?\n LIMIT 100\"\"\", (trigram,)):\n id_counts[drug_id[0]] = id_counts.get(drug_id[0], 0) + 1\n\n sorted_id_counts = sorted(list(id_counts.items()),\n key=lambda tup: -1 * tup[1])\n\n result = {'known': found}\n\n if found:\n drug_id = exact_row[0]\n result['drug-id'] = drug_id\n\n # TODO get location using https://bcgov.github.io/ols-devkit/ale/\n loc_lat = 49.28202\n loc_long = -123.11875\n\n # record request\n cur.execute('INSERT INTO DrugRequests VALUES (?,?,?,?)',\n (drug_id, now, loc_lat, loc_long))\n db.commit()\n\n # fetch availability\n pharmacies = list()\n # TODO: Order pharmacies by distance\n \"\"\"\n for row in cur.execute(\n '''SELECT pharma_id, (lat - ?) * (lat - ?) + (long - ?) * (long - ?) AS distance\n FROM PharmaLoc\n ORDER BY distance ASC\n LIMIT 10''', (loc_lat, loc_lat, loc_long, loc_long)):\n pharma_id = row[0]\n pharmacies.append({'id': pharma_id})\n \"\"\"\n\n for row in cur.execute(\n ''' SELECT pharma_id, availability, when_reported\n FROM Availabilities\n WHERE drug_id = ? AND availability = 1\n ORDER BY when_reported ''', (drug_id,)):\n pharma_id = row[0]\n pharmacies.append({'id': pharma_id})\n\n for index in range(len(pharmacies)):\n pharma_id = pharmacies[index]['id']\n pharmacy = cur.execute('SELECT *, rowid FROM PharmaDoc WHERE rowid = ?', (pharma_id,)).fetchone()\n pharmacies[index] = {\n 'name': pharmacy[0],\n 'address': pharmacy[1],\n 'phone': pharmacy[2],\n 'fax': pharmacy[3],\n 'manager': pharmacy[4],\n 'hours': pharmacy[5],\n 'latitude': pharmacy[6],\n 'longitude': pharmacy[7],\n 'pharma_id': pharmacy[8]\n }\n\n\n if not pharmacies:\n #If pharmacies list is empty, there are no pharmacies that have the medication\n result['unavailableAtPharmacies'] = True\n result['pharmacies'] = pharmacies\n\n variants = []\n for variant_id, count in sorted_id_counts[:10]:\n cur.execute('SELECT name FROM DrugNames WHERE drug_id = ?',\n (variant_id,))\n variants.append({\n 'id': variant_id,\n 'name': cur.fetchone()[0]\n })\n\n result['other-drugs'] = variants\n\n return jsonify(result)", "title": "" }, { "docid": "8a0307aa13ebf3479143b33f5c05112a", "score": "0.622523", "text": "def test_search_function_applies_preferences_with_logged_in_user(self, test_client, user, logged_in_user, db, itr,\n search_term):\n # User details as stored in db\n user_diet_name = user.diet_preferences[0].diet_type.diet_name\n user_diet_id = user.diet_preferences[0].diet_type.diet_type_id\n user_allergy_names = [allergy.allergy.allergy_name for allergy in user.allergies]\n user_allergy_ids = [allergy.allergy.allergy_id for allergy in user.allergies]\n\n # User details as presented in front-end, through config global variables\n diet_name = str((config.DIET_CHOICES[user_diet_id - 1])[1]).lower()\n allergy_names = [str((config.ALLERGY_CHOICES[i - 1])[1]).lower() for i in user_allergy_ids]\n\n response = search_function(test_client, search_term)\n assert b'Based on saved user preferences, we have applied the following filters:' in response.data\n # Check the Flash message\n assert b'Diet type: ' + diet_name.encode() in response.data\n assert b'Allergies: ' in response.data\n for allergy_name in allergy_names:\n assert allergy_name.encode() in response.data\n\n \"\"\"\n GIVEN a flask application and registered user (with randomly generated diet type and food preferences)\n WHEN user requests to view all recipes\n THEN returned recipes satisfies user's diet preferences and allergies\n \"\"\"\n # Pull the recipe ids that are returned in on the response page (as a list). See conftest helper function.\n # We will compare these recipe ids in a db query to ensure that the filters have been applied\n response_recipe_ids = get_recipe_ids(test_client, response)\n\n from app.models import Recipes, RecipeDietTypes, RecipeAllergies\n\n for id in response_recipe_ids: # for all recipes that are returned in results\n # Query all recipe ids which have the user's allergies\n blacklist = db.session.query(RecipeAllergies.recipe_id) \\\n .filter(RecipeAllergies.allergy_id.in_(user_allergy_ids)) \\\n .distinct().subquery()\n\n # Use outerjoin to exclude blacklisted recipes in query\n query = db.session.query(Recipes) \\\n .outerjoin(blacklist, Recipes.recipe_id == blacklist.c.recipe_id) \\\n .join(RecipeDietTypes) \\\n .join(RecipeAllergies) \\\n .filter(Recipes.recipe_id == id) \\\n .filter(RecipeDietTypes.diet_type_id >= user_diet_id)\n # db recipe id should = recipe id on page\n # db diet type should be >= user's saved diet preference\n # This query should be not None (i.e. it should return something) if the filters have been applied\n assert query is not None", "title": "" }, { "docid": "bfc3514a140a19cfbe36bf25600d0da2", "score": "0.62131435", "text": "def search(**kwargs):\n filter_list = []\n if 'main_search_term' in kwargs:\n term = kwargs['main_search_term']\n filter_list.append(or_(\n Listing.name.ilike('%{}%'.format(str(term))),\n Listing.product_id.ilike('%{}%'.format(str(term))),\n Listing.description.ilike('%{}%'.format(str(term))))\n )\n if 'strict_name_search' in kwargs:\n term = kwargs['strict_name_search']\n filter_list.append(Listing.name.like('%{}%'.format(str(term))))\n if 'name_search_term' in kwargs and kwargs['name_search_term']:\n term = kwargs['name_search_term']\n vendors = Vendor.query.filter(Vendor.company_name.ilike('%{}%'.format(str(term)))).all()\n vendor_ids = [vendor.id for vendor in vendors]\n filter_list.append(Listing.vendor_id.in_(vendor_ids))\n\n # used by vendors to filter by availability\n if 'avail' in kwargs:\n avail_criteria = kwargs['avail']\n format(avail_criteria)\n if avail_criteria == \"non_avail\":\n filter_list.append(Listing.available == False)\n if avail_criteria == \"avail\":\n filter_list.append(Listing.available == True)\n if avail_criteria == \"both\":\n filter_list.append(or_(Listing.available == True,\n Listing.available == False))\n\n # used by merchants to filter by availability\n if 'available' in kwargs:\n filter_list.append(Listing.available == True)\n\n if 'fav_vendor' in kwargs and kwargs['fav_vendor']:\n bookmarked_vendor_ids = BookmarkVendor.query.filter_by(merchant_id=current_user.id).all()\n ids = []\n for bookmark in bookmarked_vendor_ids:\n ids.append(bookmark.vendor_id)\n filter_list.append(Listing.vendor_id.in_(ids))\n\n if 'favorite' in kwargs and kwargs['favorite']:\n bookmarked_vendor_ids = Bookmark.query.filter_by(merchant_id=current_user.id).all()\n ids = []\n for bookmark in bookmarked_vendor_ids:\n ids.append(bookmark.listing_id)\n filter_list.append(Listing.id.in_(ids))\n\n if 'min_price' in kwargs and kwargs['min_price']:\n filter_list.append(Listing.price >= kwargs['min_price'])\n\n if 'max_price' in kwargs and kwargs['max_price']:\n filter_list.append(Listing.price <= kwargs['max_price'])\n\n filtered_query = Listing.query.filter(*filter_list)\n print Listing.query.filter(*filter_list)\n if 'sort_by' in kwargs and kwargs['sort_by']:\n sort = kwargs['sort_by']\n format(sort)\n else:\n sort = None\n\n if sort == \"low_high\":\n sorted_query = filtered_query.order_by(Listing.price)\n elif sort == \"high_low\":\n sorted_query = filtered_query.order_by(desc(Listing.price))\n elif sort == \"alphaAZ\":\n sorted_query = filtered_query.order_by(func.lower(Listing.name))\n elif sort == \"alphaZA\":\n sorted_query = filtered_query.order_by(desc(func.lower(Listing.name)))\n else: # default sort\n sorted_query = filtered_query.order_by(Listing.price)\n return sorted_query", "title": "" }, { "docid": "d958957bc44e58ea62a899c111a260c5", "score": "0.61862475", "text": "def search(self):", "title": "" }, { "docid": "f17d676912eef501c2927bdea9c3694e", "score": "0.61715937", "text": "def search(self, *args, **kwargs):", "title": "" }, { "docid": "f17d676912eef501c2927bdea9c3694e", "score": "0.61715937", "text": "def search(self, *args, **kwargs):", "title": "" }, { "docid": "39225a18a2d33d748b4987938888d014", "score": "0.61298484", "text": "def search_main():\n form = SearchForm()\n user = http_auth.username()\n if form.validate_on_submit():\n query_terms = form.search_terms.data.split(' ')\n answer = OrderedDict()\n for query in query_terms:\n result = str(searcher.query(query))\n answer[query] = result.split(' ', 1)[1]\n return render_template('search_results.html', title='Search', answer=answer, user=user)\n return render_template('search.html', title='Search', zone=FORWARD_ZONE, form=form, user=user)", "title": "" }, { "docid": "000894a50f9672cc8b408ac34d4623e6", "score": "0.61075556", "text": "def getResults(ingredient):\n puppy = Puppy()\n recipelist = puppy.search_recipe(ingredient)\n\n return recipelist", "title": "" }, { "docid": "4abf100a357f8c541475efb313d4715d", "score": "0.6101594", "text": "def search():\n search_string = str(flask.request.args['search_val'])\n \n min_rating = flask.request.args['min_rating']\n max_rating = flask.request.args['max_rating']\n\n page = int(flask.request.args['page'])\n page_offset = (page - 1) * 10\n\n genres = flask.request.args['genres']\n genres_list = json.loads(genres)\n\n # TODO: actually store genres in a separate table to avoid this terrible regex hack\n # for now, this is reasonably performant at this scale\n query_regex = \".*\" + \".*|.*\".join(genres_list) + \".*\"\n\n\n count_query = (\"SELECT \" \n \"count(*) \"\n \"FROM \"\n \"movies \"\n \"INNER JOIN avg_ratings ON movies.id = avg_ratings.movie_id \"\n \"WHERE \"\n \"avg_ratings.avg_rating >= %s and avg_ratings.avg_rating <= %s \"\n \"AND \"\n \"movies.genres ~ %s \")\n\n with db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cur:\n cur.execute(count_query,\n (float(min_rating), float(max_rating), query_regex, )\n )\n result = cur.fetchone()\n count = result[0]\n\n # This query retrieves data for the movie list view using a view previously calculated for average rating\n # It also uses\n\n base_query = (\"SELECT \" \n \"movies.id, movies.title, movies.genres, avg_ratings.avg_rating \"\n \"FROM \"\n \"movies \"\n \"INNER JOIN avg_ratings ON movies.id = avg_ratings.movie_id \"\n \"WHERE \"\n \"avg_ratings.avg_rating >= %s and avg_ratings.avg_rating <= %s \"\n \"AND \"\n \"movies.genres ~ %s \")\n\n # order by rating if the search query is empty\n if search_string == \"\":\n with db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cur:\n cur.execute(\n base_query +\n \"ORDER BY avg_ratings.avg_rating DESC, movies.id DESC LIMIT 10 OFFSET %s;\",\n (float(min_rating), float(max_rating), query_regex, page_offset,)\n )\n result = cur.fetchmany(10)\n return flask.json.dumps(search_results_to_dict(result, count), default=decimal_default)\n # otherwise, order by search closeness\n else:\n with db.cursor(cursor_factory = psycopg2.extras.DictCursor) as cur:\n cur.execute(\n base_query +\n \"ORDER BY SIMILARITY(title, %s) DESC, movies.id DESC LIMIT 10 OFFSET %s;\",\n (float(min_rating), float(max_rating), query_regex, search_string, page_offset,)\n )\n result = cur.fetchmany(10)\n\n return flask.json.dumps(search_results_to_dict(result, count), default=decimal_default)", "title": "" }, { "docid": "45fbe39b2e72f8e470a3101018855f8b", "score": "0.607085", "text": "def search(json, session):\n reload(logic)\n return logic.search(json, collection, session)", "title": "" }, { "docid": "98e9186fbbc43f49c4f7106a3c4fc826", "score": "0.60681784", "text": "def search_saved():\n user = session.get('user', 'Guest')\n search = request.form.get(\"search\")\n types = list(mongo.db.type.find().sort(\"type_name\", 1))\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": search}}))\n saved_list = list(mongo.db.users.distinct(\n \"saved_recipes\", {\"user_name\": user}))\n # clears any empty entries\n check_list = []\n for item in saved_list:\n if item == \"\":\n continue\n else:\n check_list.append(item)\n # length of the list shows the rating count\n size = len(check_list)\n return render_template(\"saved_recipes.html\", recipes=recipes, types=types,\n check_list=check_list, size=size, user=user)", "title": "" }, { "docid": "21d2078f3b4d86f4337b5f5dd60ce340", "score": "0.60452926", "text": "def search(query, **parameters):", "title": "" }, { "docid": "21d2078f3b4d86f4337b5f5dd60ce340", "score": "0.60452926", "text": "def search(query, **parameters):", "title": "" }, { "docid": "892d55a87d19cd4c3cc9923acfa488c6", "score": "0.60349613", "text": "def searchDocuments(self, REQUEST):\n if self.onSearch:\n # Manually generate a result set\n try:\n results = self.runFormulaScript(\n 'from_%s_onsearch' % self.id,\n self,\n self.onSearch)\n except PlominoScriptException, e:\n if self.REQUEST:\n e.reportError('Search event failed.')\n return self.OpenForm(searchresults=[])\n else:\n # Allow Plomino to filter by view, default query, and formula\n db = self.getParentDatabase()\n searchview = db.getView(self.getSearchView())\n\n #index search\n index = db.getIndex()\n query = {'PlominoViewFormula_'+searchview.getViewName(): True}\n\n for f in self.getFormFields(\n includesubforms=True,\n request=REQUEST):\n fieldname = f.id\n #if fieldname is not an index -> search doesn't matter and returns all\n submittedValue = REQUEST.get(fieldname)\n if submittedValue:\n submittedValue = asUnicode(submittedValue)\n # if non-text field, convert the value\n if f.getFieldType() == \"NUMBER\":\n settings = f.getSettings()\n if settings.type == \"INTEGER\":\n v = long(submittedValue)\n elif settings.type == \"FLOAT\":\n v = float(submittedValue)\n elif settings.type == \"DECIMAL\":\n v = decimal(submittedValue)\n elif f.getFieldType() == \"DATETIME\":\n # if datetime widget, the request param contains an object\n if getattr(REQUEST.get(fieldname), 'year'):\n submittedValue = \"%(year)s-%(month)s-%(day)s %(hour)s:%(minute)s\" % self.REQUEST.get('birthDate')\n v = StringToDate(submittedValue, format='%Y-%m-%d %H:%M')\n else:\n v = submittedValue\n # rename Plomino_SearchableText to perform full-text\n # searches on regular SearchableText index\n if fieldname == \"Plomino_SearchableText\":\n fieldname = \"SearchableText\"\n query[fieldname] = v\n\n sortindex = searchview.getSortColumn()\n if not sortindex:\n sortindex = None\n results = index.dbsearch(\n query,\n sortindex=sortindex,\n reverse=searchview.getReverseSorting())\n\n #filter search with searchformula\n searchformula = self.getSearchFormula()\n if searchformula:\n filteredResults = []\n try:\n for doc in results:\n valid = self.runFormulaScript(\n SCRIPT_ID_DELIMITER.join(['form', self.id, 'searchformula']),\n doc.getObject(),\n self.SearchFormula)\n if valid:\n filteredResults.append(doc)\n except PlominoScriptException, e:\n e.reportError('Search formula failed')\n results = filteredResults\n\n return self.OpenForm(searchresults=results)", "title": "" }, { "docid": "b44e0b7310731adcde15d418e63a7f66", "score": "0.60249716", "text": "def newquerysearch():\n id = request.form[\"id\"]\n search = request.form[\"search\"]\n if (len(search) >= 2):\n results = []\n res2 = []\n try:\n r = requests.post(\"https://www.zefix.ch/ZefixREST/api/v1/firm/search.json\", json={\"name\": search,\"languageKey\":\"de\",\"maxEntries\":4})\n x = r.json()[\"list\"]\n for y in x:\n name = y[\"name\"]\n uid = y[\"uid\"]\n che = uid[0:3] + \"-\" + uid[3:6] + \".\" + uid[6:9] + \".\" + uid[9:12]\n if len(res2) < 1:\n res = query(\"SELECT * FROM liquidations WHERE che='{}'\".format(che))\n if len(res) > 0:\n res2.append(res)\n else:\n results.append([name, che])\n else:\n results.append([name, che])\n except:\n pass\n if len(res2) > 0:\n results.append(res2[0])\n if len(results) == 0:\n search = [id, search, [], False, 0]\n return render_template(\"section3.html\", search=search)\n if isinstance(results[-1][0], str):\n r = False\n else:\n r = True\n search = [id, search, results, r, len(results)]\n else:\n search = [id, search, [], False, 0]\n return render_template(\"section3.html\", search=search)", "title": "" }, { "docid": "90f50215d680daaba55133f7137cd380", "score": "0.5978783", "text": "def query_criteria():", "title": "" }, { "docid": "53f5bf88f37e7ac3813379aca7ed712c", "score": "0.59494305", "text": "def test_search_find_one_result_by_name(self):\n make_recipe(\n \"legalaid.case\",\n reference=\"ref1\",\n personal_details__full_name=\"xyz\",\n personal_details__postcode=\"123\",\n **self.get_extra_search_make_recipe_kwargs()\n )\n\n self.resource.personal_details.full_name = \"abc\"\n self.resource.personal_details.postcode = \"123\"\n self.resource.personal_details.save()\n self.resource.reference = \"ref2\"\n self.resource.save()\n\n response = self.client.get(\n self.list_url, data={\"search\": \"abc\"}, HTTP_AUTHORIZATION=self.get_http_authorization()\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(1, len(response.data[\"results\"]))\n self.assertCaseEqual(response.data[\"results\"][0], self.resource)", "title": "" }, { "docid": "c13b98cbbb643da3d3546f938e885068", "score": "0.59477454", "text": "def test_search_page(self):\n res = self.client.get('/search', follow_redirects=True, data={\n 'recipe_name': 'Pumpkin rice',\n 'recipe_description': 'From brownies and pancakes to veggie-packed curries, stir-fries and salads, these vegan recipes are vibrant and delicious.',\n 'recipe_instructions': 'Add all of the ingredients',\n 'recipe_keywords': 'vegan, pumpkin, rice, butternut squash'\n })\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'vegan' in data\n assert 'Pumpkin' in data\n assert 'delicious' in data\n assert 'butternut' in data", "title": "" }, { "docid": "72b05eb19781242644ca48149f75afa6", "score": "0.5934002", "text": "def search(self, event):\r\n filter_choice = self.categories.GetValue()\r\n keyword = self.search_ctrl.GetValue()\r\n self.skeleton_results = controller.search_records(\r\n self.session, filter_choice, keyword)\r\n self.update_skeleton_results()", "title": "" }, { "docid": "b9cc9081fe156f407138c25695500d69", "score": "0.59256864", "text": "def search():\n search_query = request.args.get(\"q\")\n\n if search_query is None or len(search_query) < 1:\n response_json = {\"status\": 422}\n if search_query is None:\n response_json[\"message\"] = \"Search query not found. Use ?q=searchquery.\"\n else:\n response_json[\"message\"] = \"Search query length must be at least 1.\"\n response = jsonify(response_json)\n response.status_code = 422\n return response\n\n search_query = search_query.lower()\n response_json = {\"status\": 200}\n\n model_queries = {\n \"cities\": City.query,\n \"doctors\": Doctor.query,\n \"specialties\": Specialty.query,\n }\n for model_name in [\"cities\", \"doctors\", \"specialties\"]:\n response_json[model_name] = search_model(\n search_query,\n model_queries[model_name],\n SEARCH_ATTRIBUTES[model_name],\n SERIALIZE_ATTRIBUTES[model_name],\n )\n response_json[f\"num_{model_name}\"] = len(response_json[model_name])\n\n response_json[\"num_results\"] = sum(\n response_json[attr] for attr in [\"num_cities\", \"num_doctors\", \"num_specialties\"]\n )\n\n response_json[\"page\"] = 1\n response_json[\"total_pages\"] = 1\n response = jsonify(response_json)\n response.status_code = 200\n return response", "title": "" }, { "docid": "dbac4911c206691027a4166cf627df86", "score": "0.59028435", "text": "def recommend(ingredienten, nietappliance):\n\n # make the query\n begin = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rp: <http://www.groepsproject.com/recipes/>\nSELECT DISTINCT ?title ?link ?linktitle ?description\nWHERE {\n ?recipe rdf:type rp:Recipe.\n \"\"\"\n # remove the recipes with other ingredients\n for ingredient in range(len(ingredienten)):\n begin += \"\"\"FILTER NOT EXISTS {?recipe rp:hasIngredient ?ingredient%i.\n ?ingredient%i rdf:type rp:%s}\n \"\"\" % (ingredient, ingredient, ingredienten[ingredient])\n\n # remove the recipes for which you don't have the appliance\n for appliance in nietappliance:\n begin += \"\"\"FILTER NOT EXISTS {?recipe rp:needsAppliance rp:%s}\n \"\"\" % appliance\n\n # get the recipes data\n begin += \"\"\"?recipe rp:hasTitle ?title;\n rp:hasLink ?link;\n rp:hasDescription ?description;\n rp:hasLinkTitle ?linktitle.\n} ORDER BY DESC(?link)\"\"\"\n\n # display the query in terminal\n print(begin)\n\n # fill in your own reposity here\n sparql = SPARQLWrapper(\"http://localhost:7200/repositories/project\")\n\n # set the right format, and type of request method\n sparql.setReturnFormat(JSON)\n sparql.setMethod(POST)\n\n # run the query\n sparql.setQuery(begin)\n results = sparql.query()\n # set max colwidth off\n pd.set_option('display.max_colwidth', -1)\n # load as pandas dataframe\n processed_results = json.load(results.response)\n cols = processed_results['head']['vars']\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n df = pd.DataFrame(out, columns=cols)\n df.reindex(columns=['linktitle', 'link', 'description', 'title']).to_html(\"templates/test.html\")\n with open(\"templates/test.html\", \"r\") as f1, open(\"templates/results.html\", \"w\", encoding=\"utf-8\") as f2:\n f2.write(\"\"\"<head>\n <link href= {{ url_for('static', filename = 'css-custom.css') }} rel=\"stylesheet\">\n <script src=\"https://code.jquery.com/jquery-3.3.1.slim.min.js\" integrity=\"sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo\" crossorigin=\"anonymous\"></script>\n <script>\n $(document).ready(function(){\n $('td').each(function(){\n var content = $(this).html();\n var Rexp = /((http|https|ftp):\\/\\/[\\w?=&.\\/-;#~%-]+(?![\\w\\s?&.\\/;#~%\"=-]*>))/g;\n content = content.replace(Rexp, \"<a href='$1' target='_blank'>$1</a>\");\n $(this).html(content);\n });\n });\n</script>\n</head>\n\"\"\")\n for line in f1:\n f2.write(line)\n os.remove(\"templates/test.html\")\n return True", "title": "" }, { "docid": "4300f62b4505ed3c3c9545b7073a9c05", "score": "0.5902526", "text": "def user_search():\n search = request.form.get(\"search\")\n types = list(mongo.db.type.find().sort(\"type_name\", 1))\n recipes = list(mongo.db.recipes.find().sort(\"recipe_name\", 1))\n users = list(mongo.db.users.find({\"$text\": {\"$search\": search}}))\n products = list(\n mongo.db.products.find().sort(\"product_name\", 1))\n tools = list(mongo.db.tools.find().sort(\"name\", 1))\n count = len(users)\n # checks if no users match search\n if count == 0:\n flash(\"NO USERS FOUND\")\n return render_template(\"management.html\", users=users, types=types,\n recipes=recipes, count=count, search=search)\n # returns results that match\n else:\n flash(\"Search results: {} for '{}'\".format(len(users), search))\n return render_template(\"management.html\", users=users, types=types,\n recipes=recipes, count=count, search=search,\n tools=tools, products=products)", "title": "" }, { "docid": "c3876bbf72d9d3ff2b8ce175b0a97e02", "score": "0.58976126", "text": "def search():\n\n search_term = request.values.get(\"search_term\")\n category = request.values.get(\"category\")\n todo_list = collection.find({category: search_term})\n\n return render_template('search.html', todo_list=todo_list)", "title": "" }, { "docid": "f5c349001c9881fa824305799cfacd06", "score": "0.5897531", "text": "def search():\n query = request.args.get('query', '')\n results = search_client.search(query)\n return jsonify(results)", "title": "" }, { "docid": "23d20b3f9223921230e80b490020e86c", "score": "0.5896862", "text": "def vegan_filter():\n types = list(mongo.db.type.find().sort(\"type_name\", 1))\n recipes = list(mongo.db.recipes.find({\"vegan\": \"Yes\"}))\n\n return render_template(\"recipes.html\", types=types,\n recipes=recipes)", "title": "" }, { "docid": "26af26d25d80c12b687933c2b644e511", "score": "0.58917093", "text": "def search():\n # request.form = {\"form_name\": \"hi\"}\n form_name = request.form.get(\"form_name\")\n if form_name == \"search\":\n checked = ['checked=\"true\"', '', '']\n keys = request.form.get('key_word', \"\")\n category = request.form.get('search_field')\n print(\"from search.controllers.search: category hasn't been added as a search option, will do later\")\n session['checked'] = checked\n session['keys'] = keys\n else:\n if session.get(\"checked\") is None or session.get(\"keys\") is None:\n print('unrecognized form named {}'.format(form_name))\n return error_page()\n else:\n checked = session['checked']\n keys = session['keys']\n\n if is_valid_keys(keys):\n flag, page, docid = searchidlist(keys)\n session['page'] = page\n session['s_flag'] = flag\n session['doc_id'] = docid\n\n if flag == 0: return error_page()\n\n docs = cut_page(page, 0, docid)\n\n return render_template('high_search.html', checked=checked, key=keys, docs=docs, page=page,\n nonempty=True, welcome=False, browse_categories=browse_categories)\n else:\n return redirect(url_for('main'))", "title": "" }, { "docid": "5b2f7e986422895ef3132d6bb76f75b5", "score": "0.5891332", "text": "def api_search():\n category = request.args.get('category')\n search = request.args.get('search')\n return jsonify(get_search_results(category, search, current_user=current_user))", "title": "" }, { "docid": "ee7ceda59a188d4d517c1e45b8b8b193", "score": "0.5882391", "text": "def search_results():\n terms = request.args.get('q')\n\n return render_template('search.html', terms=terms)", "title": "" }, { "docid": "9405d118b77f363a99424be2dae6cad9", "score": "0.58684224", "text": "def search(self):\r\n pass", "title": "" }, { "docid": "ed8e74e2da111cc3fc92acf2c9ab0d15", "score": "0.5853514", "text": "def search():\n metadata = METADATA.copy()\n\n # extract search term from the HTML form\n search_term = request.form['search']\n\n if search_term != '':\n # put search term in same vector space as data model was trained on\n search_transformed = PIPELINE.transform([search_term])\n # put search in topic space\n search_topics = TOPIC_MODEL.transform(search_transformed)\n\n # calculate cosine similarity matrix\n doc_topic_matrix = metadata[metadata.columns[6:]].values\n similarity = cosine_similarity(doc_topic_matrix, search_topics)\n top_10_idx = np.argsort(similarity.flatten())[-10:]\n metadata = metadata.iloc[list(top_10_idx)]\n\n metadata = apply_threshold_to_topics(metadata)\n\n return render_template('index.html',\n comedy_info=metadata.values.tolist(),\n dropdown_options=['Observational', 'Black Culture', 'British & Australian',\n 'Political', 'Immigrant Upbringing', 'Relationships & Sex'],\n search_text=search_term)", "title": "" }, { "docid": "a5a6f2ada8bafdaf43e45f61be876581", "score": "0.5838428", "text": "def test_ingredient_search(self):\n pass", "title": "" }, { "docid": "302da5baf997582a6f3c5ca20e166f37", "score": "0.5821525", "text": "def process_search(self, request):\n cat = self.cleaned_data['cat']\n search_query = self.cleaned_data['q']\n if cat != '' or search_query != '':\n self.set_search_query_set(request)\n if cat != '' and search_query != '':\n self.search_category_and_query()\n elif cat != '':\n self.search_category_only()\n else:\n self.search_query_string_only()\n coupons = self.intersect_index_and_db_results(request)\n else:\n coupons = self.no_query_found(request)\n return coupons, self.suggestion", "title": "" }, { "docid": "e6d923c09d171e62803313ef0725ef72", "score": "0.5811273", "text": "def search_restaurant():\n\n\tlocation = request.form.get('location')\n\tterm = request.form.get('term')\n \n \n\tresults = yelp.get_results(location=location, term=term)\n\t\n \n\n\treturn jsonify(results=results)", "title": "" }, { "docid": "e3117a08ac8a588968575e9fbfa0c23d", "score": "0.58079803", "text": "def search():\n context = make_context()\n\n return make_response(render_template('search.html', **context))", "title": "" }, { "docid": "fe76696fced27b2c2c9c4833b711d933", "score": "0.57921505", "text": "def search():\n pattern = flask.request.args.get('pattern', \"*\").strip().lower()\n\n # if the pattern contains \"in:<collection>\" (eg: in:builtin),\n # filter results to only that (or those) collections\n # This was kind-of hacked together, but seems to work well enough\n collections = [c[\"name\"].lower() for c in current_app.kwdb.get_collections()]\n words = []\n filters = []\n if pattern.startswith(\"name:\"):\n pattern = pattern[5:].strip()\n mode = \"name\"\n else:\n mode=\"both\"\n\n for word in pattern.split(\" \"):\n if word.lower().startswith(\"in:\"):\n filters.extend([name for name in collections if name.startswith(word[3:])])\n else:\n words.append(word)\n pattern = \" \".join(words)\n\n keywords = []\n for keyword in current_app.kwdb.search(pattern, mode):\n kw = list(keyword)\n collection_id = kw[0]\n collection_name = kw[1].lower()\n if len(filters) == 0 or collection_name in filters:\n url = flask.url_for(\".doc_for_library\", collection_id=kw[0], keyword=kw[2])\n row_id = \"row-%s.%s\" % (keyword[1].lower(), keyword[2].lower().replace(\" \",\"-\"))\n keywords.append({\"collection_id\": keyword[0],\n \"collection_name\": keyword[1],\n \"name\": keyword[2],\n \"synopsis\": keyword[3],\n \"version\": __version__,\n \"url\": url,\n \"row_id\": row_id\n })\n\n keywords.sort(key=lambda kw: kw[\"name\"])\n return flask.render_template(\"search.html\",\n data={\"keywords\": keywords,\n \"version\": __version__,\n \"pattern\": pattern\n })", "title": "" }, { "docid": "73af425a18f76c5f126622e6c2a3db0c", "score": "0.57903445", "text": "def test_search_query_string_with_booleans(workbook, es_testapp):\n search = '/search/?type=Biosource&q=GM12878'\n res_stem = es_testapp.get(search).json\n assert len(res_stem['@graph']) > 1\n bios_uuids = [r['uuid'] for r in res_stem['@graph'] if 'uuid' in r]\n swag_bios = '331111bc-8535-4448-903e-854af460b888'\n assert swag_bios in bios_uuids\n # assert induced_stem_uuid not in not_induced_uuids\n # now search for stem +induced (AND is now \"+\")\n search_and = '/search/?type=Biosource&q=swag+%2BGM12878'\n res_both = es_testapp.get(search_and).json\n both_uuids = [r['uuid'] for r in res_both['@graph'] if 'uuid' in r]\n assert len(both_uuids) == 1\n assert swag_bios in both_uuids\n # search with OR (\"|\")\n search_or = '/search/?type=Biosource&q=swag+%7CGM12878'\n res_or = es_testapp.get(search_or).json\n or_uuids = [r['uuid'] for r in res_or['@graph'] if 'uuid' in r]\n assert len(or_uuids) > 1\n assert swag_bios in or_uuids\n # search with NOT (\"-\")\n search_not = '/search/?type=Biosource&q=GM12878+-swag'\n res_not = es_testapp.get(search_not).json\n not_uuids = [r['uuid'] for r in res_not['@graph'] if 'uuid' in r]\n assert swag_bios not in not_uuids", "title": "" }, { "docid": "2bd286c31b0b2164f4db540bca304209", "score": "0.57748353", "text": "def findRecipe(keywords=\"Calhacks\"):\n \n apiKey = \"dvxI9e588cOKNvbOzd24EXsVRW9Y2OW8\"\n\n # Base URL for Big Oven API\n url = \"http://api.bigoven.com/recipes?pg=0&rpp=1\"\n\n # Searching for the input Keywords\n url += \"&title_kw=\" + keywords.replace(\" \", \"+\")\n\n # Adding the developer's API Key\n url += \"&api_key=\" + apiKey\n\n recipeCount = parsePage(url, ['ResultCount'])[1][0]\n \n # No recipes are found. \n if recipeCount == \"0\":\n return keywords, set([\"No such recipe found\"]), \"\", \"\"\n\n randomRecipe = randint(1, int(recipeCount))\n url = url.replace(\"pg=0\", \"pg=\" + str(randomRecipe))\n\n xml, [recipeId] = parsePage(url, ['RecipeID'])\n \n try:\n cuisineType = parseList(xml, 'Cuisine')[0].firstChild.nodeValue\n except (IndexError, AttributeError):\n cuisineType = \"\"\n \n # Return the list of ingredients in the Recipe\n recipeUrl = \"http://api.bigoven.com/recipe/\" + recipeId\n recipeUrl += \"?api_key=\" + apiKey\n \n xml, [title, inst] = parsePage(recipeUrl, ['Title', 'Instructions'])\n\n recipeList = set()\n try:\n for el in parseList(xml, 'Name'):\n recipeList.add(el.firstChild.nodeValue)\n except AttributeError:\n recipeList.add(\"Ingredients are in the Instructions\")\n\n return title, recipeList, cuisineType, inst", "title": "" }, { "docid": "5deff366a389cb2b4bb3c80786d39a02", "score": "0.57731175", "text": "def search_for_outcome():\n try:\n # matching input to variable: https://stackoverflow.com/questions/14105452/what-is-the-cause-of-the-bad-request-error-when-submitting-form-in-flask-applica\n req = request.form\n print(req)\n query = db.session.query(COPA_Case_Attributes)\n logger.info(\"Query initialized\")\n field_list = [\"police_shooting\", \"race_complainants\", \"sex_complainants\", \"age_complainants\",\n \"race_officers\",\"sex_involved_officers\", \"age_officers\", \"excessive_force\",\n \"years_on_force_officers\"]\n field_label = {\"police_shooting\":\"Police shooting: \", \"race_complainants\":\"Race of complainant: \", \"sex_complainants\": \"Gender of complainant: \",\n \"age_complainants\": \"Age of complainant: \",\n \"race_officers\": \"Race of officer: \", \"sex_involved_officers\": \"Gender of officer: \",\n \"age_officers\": \"Age of officer: \", \"excessive_force\": \"Excessive force: \",\n \"years_on_force_officers\": \"Years on force of officer: \"}\n messages = [\"Showing: Predictions displayed based on selection - \"]\n detail = \"\"\n # build query dynamically: https://stackoverflow.com/questions/37336520/sqlalchemy-dynamic-filter\n # build query dynamically: https://stackoverflow.com/questions/7604967/sqlalchemy-build-query-filter-dynamically-from-dict\n # get query attr rather than string: https://stackoverflow.com/questions/10251724/how-to-give-column-name-dynamically-from-string-variable-in-sql-alchemy-filter\n for f in field_list:\n if request.form[f]:\n query = query.filter(getattr(COPA_Case_Attributes, f) == request.form[f])\n detail = detail+field_label[f]+request.form[f]+\". \"\n if detail == \"\":\n detail = \"None\"\n logger.info(\"Perform query\")\n cases = query.limit(app.config[\"MAX_ROWS\"]).all()\n logger.info(\"Query results: {}\".format(cases))\n messages.append(detail)\n return render_template('index.html', cases=cases, messages = messages)\n except Exception as e:\n logger.error(e)\n logger.warning(\"Not able to display search, error page returned\")\n return render_template('error.html')", "title": "" }, { "docid": "4cb968761c90b686d9e59e383661abda", "score": "0.5769029", "text": "def get_recipes(recipe):\n\n selectlist_recipes, selectlist_styles, selectlist_user, sel_user_styles = get_selectlists(session.get(\"user_id\"))\n deleteable = False\n if Recipe.query.filter_by(name=recipe).one().user_id == session.get(\"user_id\"):\n deleteable = True\n recipe, color = get_recipe_instructions(recipe)\n color = color_conversion(color)\n return render_template(\"explore_brews.html\", selectlist_recipes=selectlist_recipes,\n selectlist_styles=selectlist_styles, recipe=recipe,\n color=color, deleteable=deleteable)", "title": "" }, { "docid": "5223ab9b566325ba09bdab1c73a4297f", "score": "0.57479477", "text": "def search(query, restaurants):\n # BEGIN Question 10\n \"*** REPLACE THIS LINE ***\"\n # END Question 10", "title": "" }, { "docid": "c54c47aaf9f52e2661012975537b8ae8", "score": "0.57413507", "text": "def search(self):\n logger.info(f'Received a request to search. Search type: {self.search_type}')\n\n queried_store, security = self.get_db_specific_sections()\n search, query, skip, limit = self.arg_parser()\n\n # Pipeline initially made use of 'project', I've opted to instead\n # do all post processing in 'post_processing()' for simplicity.\n pipeline = search + query + security\n start = time.time()\n full_search_res = list(queried_store.aggregate(pipeline))\n logger.info(f'Got search response in {time.time() - start} seconds.'\\\n f'Pipeline: {pipeline} First two results: {full_search_res[0:1]}')\n final_result = self.post_processing(full_search_res, skip, limit)\n return final_result", "title": "" }, { "docid": "10d82fdbad0cca70bb8ee3b66bd59ac3", "score": "0.57355696", "text": "def find_restaurants(request, list):\n return matching(request, {}, list)", "title": "" }, { "docid": "70c7a6d4fb4e121269e8c2480cc251ce", "score": "0.57247204", "text": "def search(request):\n logger.info(\"__ input_forms search __\")\n\n term = request.GET[\"term\"]\n results = []\n\n provider_list = EndpointGroup.objects.all().order_by(\"name\")\n for provider in provider_list:\n\n provider_instance = endpoint_provider.get_provider_instance_from_group(provider.id)\n provider_instance.apply_filter(\"Name\", term)\n endpoints_list = provider_instance.get_page(0, 128)\n\n for endpoint in endpoints_list:\n r = dict()\n r[\"value\"] = str(provider.id) + \":\" + str(endpoint.get('id', 0))\n r[\"label\"] = provider.name + \"/\" + endpoint.get('name', 'unknown')\n results.append(r)\n\n return HttpResponse(json.dumps(results), content_type=\"application/json\")", "title": "" }, { "docid": "ae4d1f976957f156d57111d361c8afb9", "score": "0.5721357", "text": "def get_queryset(self):\n\n name = self.request.query_params.get('name')\n queryset = Recipe.objects.all()\n\n if name:\n queryset = queryset.filter(name__icontains=name)\n\n return queryset", "title": "" }, { "docid": "faac51fdcada1a00243487bc4181977e", "score": "0.5710637", "text": "def search():\n query = bt.request.query.q\n t = bt.request.query.t\n mode, model, count = (\n (\"games\", Game, 10) if t == \"categories\" else (\"users\", Streamer, 5)\n )\n search_results = Helix.get(f\"search/{t}?query={query}&first={count}\")\n ids = {int(result[\"id\"]) for result in search_results}\n\n asyncio.run(Db.cache(ids, mode=mode))\n if t == \"categories\":\n results = model.select().where(model.id.in_(ids))\n else:\n results = [\n Result(result, model.get_by_id(int(result[\"id\"])))\n for result in search_results\n ]\n return bt.template(\"search.tpl\", query=query, mode=mode, results=results)", "title": "" }, { "docid": "bbc4d7cbc7e65843d2cfed4782280c20", "score": "0.56858754", "text": "def FoodSearch(self, **dico):\n if not 'input' in dico:\n #TODO: APIError\n raise Exception(\"input is missing\") \n return self.oauth.ApiCall(\"foods/search\", \"GET\", dico)", "title": "" }, { "docid": "22d281c15fb2028a398b7b9e8bcec1f1", "score": "0.5680111", "text": "def search():\n\n fuzzy_term = None\n if 'search' in request.args:\n fuzzy_term = request.args['search']\n session['search'] = fuzzy_term\n\n items = []\n\n searchable_classes = [Document, User]\n for cls in searchable_classes:\n items.extend(\n map(\n lambda x: (x, cls.__name__.lower()),\n cls.fuzzy_search(fuzzy_term)\n )\n )\n\n print('Found these items for term \"{}\":'.format(fuzzy_term))\n print(items)\n\n return items", "title": "" }, { "docid": "f41827a56cd90552e5f1cbe6d93dd72d", "score": "0.5675769", "text": "def check_ingredients():\n query = request.args.get(\"q\")\n print(f\"Query recieved: {query}\")\n ingredients = db.check_ingredient_from_db(query)\n return jsonify(ingredients)", "title": "" }, { "docid": "b592e615c9b7f10b0474d98ef4bbffff", "score": "0.56677955", "text": "def test_autocomplete_ingredient_search(self):\n pass", "title": "" }, { "docid": "fbb69dcc4a5a9f621bc073885f7a8623", "score": "0.56562763", "text": "def search_cupcake():\n search = request.args[\"searchTerm\"]\n\n cupcakes = Cupcake.query.filter(Cupcake.flavor.ilike(f\"%{search}%\")).all()\n\n serialized = [c.serialize() for c in cupcakes]\n\n return (jsonify(cupcakes=serialized), 200)", "title": "" }, { "docid": "35d4b1b4ae0b890d96a80ab06e5beb40", "score": "0.5648273", "text": "def search():\n global searching\n global searchQuery\n\n if request.method == \"POST\":\n query = request.form.get(\"query\")\n searching = True\n searchQuery = query\n\n films = list(mongo.db.film_list.find({\"$text\": {\"$search\": query}}))\n return render_template(\n \"get_films.html\", films=films, search=searching,\n searchQuery=searchQuery)\n\n return get_films()", "title": "" }, { "docid": "dfcb3768490aff1fbf14975ea87037de", "score": "0.56399745", "text": "def artists_search(mode: str, form: FlaskForm, simple_search_term: str = None) -> dict:\n return ncsg_search(mode, form, get_entity(ARTIST_TABLE), simple_search_term=simple_search_term)", "title": "" }, { "docid": "b6d475728fd1a4ceead5b6a2e8e903cf", "score": "0.56354815", "text": "def searchProgram():\n return", "title": "" }, { "docid": "8504852f423a62b8c6dc87b7048fcb04", "score": "0.5621537", "text": "def search_book():\n if not g.user:\n flash('You have to Login')\n return redirect('/')\n term = request.args['term']\n \n b = Book.query.filter(Book.title.ilike(f'%{term}%')).all()\n\n results = [book.serialize() for book in b]\n \n return jsonify(books=results)", "title": "" }, { "docid": "258e5c4deee79cd2db6eb64847dc5ee3", "score": "0.5621454", "text": "def handle_cloth_search(request):\n searched_name = request.GET.get('cloth_name')\n context = dict()\n \n if searched_name:\n search_result = Cloth.objects.filter(name__icontains=searched_name)\n context = {'search_result': search_result}\n \n return render(request, 'clothing/search_result_list.html', context)", "title": "" }, { "docid": "a65dcf26dc253551dea0b68e79fd99c4", "score": "0.5612832", "text": "def search(items, target):\n # TODO: Fill this in.\n return", "title": "" }, { "docid": "5394cd29e5329e4366e086fa14b3aea1", "score": "0.560931", "text": "def search():\n # Search google \n search_google()", "title": "" }, { "docid": "e295fddc82333156dd4a279832d7cc97", "score": "0.5592493", "text": "def search():\n term = request.args[\"term\"]\n sort = request.args[\"sort\"]\n return f\"<h1>Search Results For: {term}</h1> <p>Sorting by: {sort}</p>\"", "title": "" }, { "docid": "29f49f76f32be498d896d89670acb718", "score": "0.55910236", "text": "def get_queryset(self):\n self.queryset = Recipe.objects.all()\n name = self.request.query_params.get('name', None)\n ingredients = self.request.query_params.get('ingredients')\n\n if name:\n return self.queryset.filter(name__startswith=name)\n\n if ingredients:\n ingredient_ids = self._params_to_ints(ingredients)\n print(ingredient_ids)\n self.queryset = queryset.filter(ingredients__name__in=ingredients)\n\n\n return self.queryset", "title": "" }, { "docid": "9ed037ae98a92d047b7f2c7ad4335c87", "score": "0.5580274", "text": "def get_search():\n getreq_url = SEARCH_BASE_URL + request.form.get('search')\n r = requests.get(url = getreq_url)\n data = r.json()\n\n return render_template('search_results.html', books=data)", "title": "" }, { "docid": "e5c3cdd7e48c11d43ceca2bd36ad2677", "score": "0.5579418", "text": "def test_search_find_one_result_by_postcode(self):\n make_recipe(\n \"legalaid.case\",\n personal_details__postcode=\"123\",\n personal__details__full_name=\"abc\",\n **self.get_extra_search_make_recipe_kwargs()\n )\n\n response = self.client.get(\n self.list_url,\n data={\"search\": self.resource.personal_details.postcode},\n HTTP_AUTHORIZATION=self.get_http_authorization(),\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(1, len(response.data[\"results\"]))\n self.assertCaseEqual(response.data[\"results\"][0], self.resource)", "title": "" }, { "docid": "fababc0cca564f36a5dd42425a0a8b50", "score": "0.55777663", "text": "def search(self, query):\n # SO, let's create our results\n results = None\n for shelf in self.library:\n # First shelf we're running the search on\n if results is None:\n results = shelf.search(query)\n # We use upgrade_and_extend, because multiple hits can happen.\n else:\n results.upgrade_and_extend(shelf.search(query))\n\n return results", "title": "" }, { "docid": "b8840b500c07bd06ed5c1925492d9632", "score": "0.5576955", "text": "def post(self):\n text = self.get_argument(\"text\")\n __type = self.get_argument(\"type\")\n\n\n if __type == \"dish\":\n \"\"\"\n It might be a possibility that the user enetered the dish which wasnt in autocomplete\n then we have to search exact dish name of seach on Laveneshtein algo\n \"\"\"\n ##search in ES for dish name \n\n result = list()\n __result = ElasticSearchScripts.get_dish_match(text)\n for dish in __result:\n eatery_id = dish.get(\"eatery_id\")\n __eatery_details = r_clip_eatery.find_one({\"eatery_id\": eatery_id})\n for e in [\"eatery_highlights\",\n \"eatery_cuisine\", \"eatery_trending\", \"eatery_known_for\", \"eatery_type\", \"_id\"]:\n try:\n __eatery_details.pop(e)\n except Exception as e:\n print e\n pass\n dish.update({\"eatery_details\": __eatery_details})\n result.append(dish)\n\n elif __type == \"cuisine\":\n ##gives out the restarant for cuisine name\n print \"searching for cuisine\"\n result = list()\n __result = ElasticSearchScripts.eatery_on_cuisines(text)\n print __result\n for eatery in __result:\n __result= short_eatery_result_collection.find_one({\"__eatery_id\": eatery.get(\"__eatery_id\")}, {\"_id\": False, \"food\": True, \"ambience\": True, \\\n \"cost\":True, \"service\": True, \"menu\": True, \"overall\": True, \"location\": True, \"eatery_address\": True, \"eatery_name\": True, \"__eatery_id\": True})\n\n eatery.update({\"eatery_details\": __result})\n result.append(eatery)\n\n elif __type == \"eatery\":\n \n ##TODO : Some issue with the restaurant chains for example\n ##big chills at different locations, DOnt know why ES\n ##not returning multiple results\n ##TODO: dont know why dropped nps are still in result.\n result = r_eateries.find_one({\"eatery_name\": text},\n {\"_id\": False, \"eatery_known_for\": False,\n \"droppped_nps\": False,\n \"eatery_trending\": False,\n \"eatery_highlights\": False})\n \n print result.get(\"eatery_id\")\n __result = process_result(result)\n \n pictures = result.pop(\"pictures\")\n result.update({\"pictures\": pictures[0:2]})\n \n result.update(__result)\n result = [result]\n\n elif not __type:\n print \"No type defined\"\n\n else:\n print __type\n self.write({\"success\": False,\n\t\t\t \"error\": True,\n\t\t\t \"messege\": \"Maaf kijiyega, Yeh na ho paayega\",\n\t\t\t })\n self.finish()\n return \n print result\n self.write({\"success\": True,\n\t\t\t \"error\": False,\n\t\t\t \"result\": result,\n\t\t\t})\n self.finish()\n return", "title": "" }, { "docid": "d396f9be5eceab3cbb62987c1686e7c4", "score": "0.5575648", "text": "def search_app(q='', fq=None, app=True, **kw):\n from allura.model import ArtifactReference\n from allura.lib.security import has_access\n\n history = kw.pop('history', None)\n if app and kw.pop('project', False):\n # Used from app's search controller. If `project` is True, redirect to\n # 'entire project search' page\n redirect(c.project.url() + 'search/?' +\n urlencode(dict(q=q, history=history)))\n search_comments = kw.pop('search_comments', None)\n limit = kw.pop('limit', None)\n page = kw.pop('page', 0)\n default = kw.pop('default', 25)\n allowed_types = kw.pop('allowed_types', [])\n parser = kw.pop('parser', None)\n sort = kw.pop('sort', 'score desc')\n fq = fq if fq else []\n search_error = None\n results = []\n count = 0\n matches = {}\n limit, page, start = g.handle_paging(limit, page, default=default)\n if not q:\n q = ''\n else:\n # Match on both `title` and `text` by default, using 'dismax' parser.\n # Score on `title` matches is boosted, so title match is better than body match.\n # It's 'fuzzier' than standard parser, which matches only on `text`.\n if search_comments:\n allowed_types += ['Post']\n if app:\n fq = [\n 'project_id_s:%s' % c.project._id,\n 'mount_point_s:%s' % c.app.config.options.mount_point,\n '-deleted_b:true',\n 'type_s:(%s)' % ' OR '.join(\n ['\"%s\"' % t for t in allowed_types])\n ] + fq\n search_params = {\n 'qt': 'dismax',\n 'qf': 'title^2 text',\n 'pf': 'title^2 text',\n 'fq': fq,\n 'hl': 'true',\n 'hl.simple.pre': '#ALLURA-HIGHLIGHT-START#',\n 'hl.simple.post': '#ALLURA-HIGHLIGHT-END#',\n 'sort': sort,\n }\n if not history:\n search_params['fq'].append('is_history_b:False')\n if parser == 'standard':\n search_params.pop('qt', None)\n search_params.pop('qf', None)\n search_params.pop('pf', None)\n try:\n results = search(\n q, short_timeout=True, ignore_errors=False,\n rows=limit, start=start, **search_params)\n except SearchError as e:\n search_error = e\n if results:\n count = results.hits\n matches = results.highlighting\n\n def historize_urls(doc):\n if doc.get('type_s', '').endswith(' Snapshot'):\n if doc.get('url_s'):\n doc['url_s'] = doc['url_s'] + \\\n '?version=%s' % doc.get('version_i')\n return doc\n\n def add_matches(doc):\n m = matches.get(doc['id'], {})\n title = h.get_first(m, 'title')\n text = h.get_first(m, 'text')\n if title:\n title = (markupsafe.escape(title)\n .replace('#ALLURA-HIGHLIGHT-START#', markupsafe.Markup('<strong>'))\n .replace('#ALLURA-HIGHLIGHT-END#', markupsafe.Markup('</strong>')))\n if text:\n text = (markupsafe.escape(text)\n .replace('#ALLURA-HIGHLIGHT-START#', markupsafe.Markup('<strong>'))\n .replace('#ALLURA-HIGHLIGHT-END#', markupsafe.Markup('</strong>')))\n doc['title_match'] = title\n doc['text_match'] = text or h.get_first(doc, 'text')\n return doc\n\n def paginate_comment_urls(doc):\n if doc.get('type_s', '') == 'Post':\n artifact = doc['_artifact']\n if artifact:\n doc['url_paginated'] = artifact.url_paginated()\n return doc\n\n def filter_unauthorized(doc):\n aref = ArtifactReference.query.get(_id=doc.get('id'))\n # cache for paginate_comment_urls to re-use\n doc['_artifact'] = aref and aref.artifact\n # .primary() necessary so that a ticket's comment for example is checked with the ticket's perms\n if doc['_artifact'] and not has_access(doc['_artifact'].primary(), 'read', c.user):\n return None\n else:\n return doc\n\n filtered_results = [_f for _f in map(filter_unauthorized, results) if _f]\n count -= len(results) - len(filtered_results)\n results = filtered_results\n results = map(historize_urls, results)\n results = map(add_matches, results)\n results = map(paginate_comment_urls, results)\n\n # Provide sort urls to the view\n score_url = 'score desc'\n date_url = 'mod_date_dt desc'\n try:\n field, order = sort.split(' ')\n except ValueError:\n field, order = 'score', 'desc'\n sort = ' '.join([field, 'asc' if order == 'desc' else 'desc'])\n if field == 'score':\n score_url = sort\n elif field == 'mod_date_dt':\n date_url = sort\n params = request.GET.copy()\n params.update({'sort': score_url})\n score_url = url(request.path, params=params)\n params.update({'sort': date_url})\n date_url = url(request.path, params=params)\n return dict(q=q, history=history, results=list(results) or [],\n count=count, limit=limit, page=page, search_error=search_error,\n sort_score_url=score_url, sort_date_url=date_url,\n sort_field=field)", "title": "" }, { "docid": "f3ff9eab02b3a9cb4280c54363cece15", "score": "0.5573637", "text": "def test_search_find_one_result_by_ref(self):\n make_recipe(\n \"legalaid.case\",\n personal_details__full_name=\"abc\",\n personal_details__postcode=\"123\",\n **self.get_extra_search_make_recipe_kwargs()\n )\n\n response = self.client.get(\n self.list_url, data={\"search\": self.resource.reference}, HTTP_AUTHORIZATION=self.get_http_authorization()\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(1, len(response.data[\"results\"]))\n self.assertCaseEqual(response.data[\"results\"][0], self.resource)", "title": "" }, { "docid": "f82c245bbbbd336d3d782f23d8c82c6a", "score": "0.5571029", "text": "def search_by_query(self):\n\n if self.uri:\n self.user_query = input('\\n\\t9) What do you want to search ?\\n\\t')\n\n print('\\n\\tSearch results...\\n\\t{result_data}'.format(\n result_data=self.pyro_obj.get_by_query(self.user_query))\n )", "title": "" }, { "docid": "bd88235e0591068ceebc9aaccf12d7e9", "score": "0.55677366", "text": "def run_keyword_search_process():\n\n print(\"This will search for matches in the title and notes fields.\")\n search_term = input(\"Enter a search term: \").strip()\n\n contains_search_term = ((Time_Entry.title.contains(search_term))\n | (Time_Entry.notes.contains(search_term)))\n\n matching_entries = Time_Entry.select().where(contains_search_term)\n\n clear_screen()\n\n if len(matching_entries) == 0:\n print(\"No matching entries found.\")\n dummy = input(\"Press enter to continue. \")\n else:\n run_options_loop(matching_entries)", "title": "" }, { "docid": "5aef14ba8df317f5a827902d64eb63f8", "score": "0.55642", "text": "def genre_search(genre):\n global searching\n global searchQuery\n\n searching = True\n searchQuery = genre\n\n films = list(mongo.db.film_list.find({\"$text\": {\"$search\": genre}}))\n return render_template(\n \"get_films.html\", films=films, search=searching,\n searchQuery=searchQuery)", "title": "" }, { "docid": "ff6c1b05cdd6222288a0a684a6442dbc", "score": "0.5560062", "text": "def search_wines():\n\n title_keywords = request.args.get('title_keywords')\n title_keywords = title_keywords.title()\n wines = crud.search_wines(title_keywords)\n\n if not wines:\n title_keywords = title_keywords.upper()\n wines = crud.search_wines(title_keywords)\n\n if 'user_id' in session:\n user = crud.get_user_by_id(session['user_id'])\n return render_template('search_results.html', wines=wines, user=user)\n else:\n return render_template('search_results.html', wines=wines)\n\n if 'user_id' in session:\n user = crud.get_user_by_id(session['user_id'])\n return render_template('search_results.html', wines=wines, user=user)\n else:\n return render_template('search_results.html', wines=wines)", "title": "" }, { "docid": "8965ad2d058519370db92cdd744a095f", "score": "0.5551595", "text": "def search(self, query, maxresults=20):\n pass", "title": "" }, { "docid": "47b15b40ab4e89e6a497a59b17552d85", "score": "0.55505663", "text": "def search(self, **kwargs):\n resource = self.resource + 'search'\n required = ['q']\n metadata, response = self.__get__(resource, kwargs) # use metadata for something else.\n return response", "title": "" }, { "docid": "b51c612202c0eab6a1df6c2dbb8793af", "score": "0.55419123", "text": "def search():\n\n # Make sure that the search bar is not empty\n if not request.form.get(\"keyword\"):\n return apology(\"search criteria must not be empty\", 403)\n\n keyword = request.form.get(\"keyword\")\n\n # Add in the wildcard characters for the SQL queries\n like_keyword = \"%\" + keyword + \"%\"\n\n # Get all posts that match the keyword(s)\n posts = db.execute(\n \"SELECT title, datetime, posts.id AS post_id, user_id, username FROM posts INNER JOIN users ON posts.user_id = users.id WHERE title LIKE ? OR contents LIKE ? ORDER BY datetime DESC\", (like_keyword), (like_keyword))\n\n if len(posts) == 0:\n no_posts = True\n else:\n no_posts = False\n\n # Get all people that match the keyword(s)\n people = db.execute(\"SELECT id AS user_id, username FROM users WHERE username LIKE ?\", (like_keyword))\n\n if len(people) == 0:\n no_people = True\n else:\n no_people = False\n\n return render_template(\"search.html\", posts=posts, no_posts=no_posts, people=people, no_people=no_people)", "title": "" }, { "docid": "59dc767c55a5cb20d0031a9b6c23661d", "score": "0.5539616", "text": "def search(request, **kwargs):\r\n return SearchView.as_view(**kwargs)(request)", "title": "" }, { "docid": "9eaf1e4c914f16a3d0281bb5531e8b58", "score": "0.55371684", "text": "async def search(self, *args: Any, **kwargs: Any) -> List[dict]:\n await self.wait_for(self.connection.search(*args, **kwargs))\n return self.connection.response", "title": "" }, { "docid": "70e1d787867d2d87a194956dc557d9e9", "score": "0.553495", "text": "def find_recipe(input_text, recipes, p2v_our_emoji):\n\tprint(input_text)\n\ttokens = tokenizer.tokenize(input_text)\t\n\tprint(tokens)\n\tvector = np.sum([p2v_our_emoji[t] for t in tokens], axis=0) / len(tokens)\n\t\n\tkey, value = min(recipes.items(), key=lambda kv: (distance.euclidean(vector, np.array(kv[1]['recipe vector']))))\n\n\tresult = {}\n\tresult['title'] = value['title']\n\tresult['instructions'] = value['instructions']\n\n\treturn json.dumps(result)", "title": "" }, { "docid": "cfa0f3a32da4a4e4e943c3fbaa93c17f", "score": "0.55348986", "text": "def search(self, search):\n return self.backend.search(search)", "title": "" }, { "docid": "2d678bbbaeb1c1705e495fb6ee1fc45e", "score": "0.55339485", "text": "def search_route():\n\n with sqlite3.connect(DBPATH) as conn:\n query = request.get_json().get(\"query\")\n res = conn.execute(\n \"select id, title from answers where title like ? \", [f\"%{query}%\"],\n )\n answers = [{\"id\": r[0], \"title\": r[1]} for r in res]\n print(query, \"--> \")\n pprint(answers)\n return jsonify(answers), 200", "title": "" }, { "docid": "add902afc70ea874d20a8a1244441f25", "score": "0.55326116", "text": "def search():\n if request.args.get('q'):\n g.search_form.q.data = request.args.get('q')\n q = g.search_form.q.data if g.search_form.q.data else None\n\n if q is None:\n return render_template('404.html'), 404\n\n page = request.args.get('page', 1, type=int)\n per_page = app.config['PER_PAGE']\n\n paginated_papers, total = Paper.search(q, page, per_page)\n\n href=\"search?q={}\".format(q) + '&page={0}' ##customizing to include search query parameter\n pagination = Pagination(href=href, page=page, per_page=per_page, total=total, record_name='papers',format_total=True, format_number=True)\n # print(pagination.__dict__)\n\n return render_template('papers.html', papers=paginated_papers, pagination=pagination, per_page=per_page, categories=CATEGORY_LIST, icondict=icondict)", "title": "" }, { "docid": "c87e446336cfff2ed89d5cb1d990f022", "score": "0.55319804", "text": "def do_search(request):\n products = Product.objects.filter(name__icontains=request.Get['q'])\n return render(request, \"products.html\", {\"products\": products})", "title": "" }, { "docid": "0e46ab899a626dbf93cb43a7ce62c44a", "score": "0.5531373", "text": "def search(request):\n # r = requests.get(\"http://api.angel.co/1/jobs\")\n # response_data = r.json()\n # final_jobs = []\n # for job in response_data[\"jobs\"]:\n # job_tags = job['tags']\n # job_match = True\n # for tag in job_tags:\n # if tag['tag_type'] is \"SkillTag\" and not business_search(tag['name']):\n # job_match = False\n # if tag['tag_type'] is \"LocationTag\" and not location_search(location, tag['name']):\n # job_match = False\n # # if category matches business and location matches search, check for desc and add it to list.\n # if job_match and (keyword in job['description']):\n # final_jobs.append(job)\n # context = {'jobs': final_jobs}\n # return render(request, 'search/search.html', context)\n return HttpResponse(\"Works for now\")", "title": "" }, { "docid": "510daa725373e06e01db50bd2c307089", "score": "0.5529226", "text": "def search_criteria():\n global search_term\n global location\n\n raw_search_term = input(\"What would you like to find?\")\n split_search = raw_search_term.split(\" \")\n if len(split_search) > 1:\n search_term = \"+\".join(split_search)\n else:\n search_term = raw_search_term\n raw_location = input(\"What city would you like to search in?\")\n split_location = raw_location.split(\" \")\n if len(split_location) > 1:\n location = \"+\".join(split_location)\n else:\n location = raw_location", "title": "" }, { "docid": "0eaeace753c6815c28529c2ef29c9a91", "score": "0.55195177", "text": "def search_db():\n search_term = request.args.get(\"search-text\")\n search_term_formatted = '%' + search_term + '%'\n \n search_results = Truck_schedule.query.filter(or_(Truck_schedule.truck_name.ilike(search_term_formatted), Truck_schedule.extra_text.ilike(search_term_formatted))).distinct(Truck_schedule.truck_id).all()\n\n if search_results:\n pass\n else:\n search_results = None\n\n return render_template(\"search_results.html\", search_term=search_term, search_results=search_results)", "title": "" }, { "docid": "a83ffa094a3467a0e0ea58166ca767b3", "score": "0.55110025", "text": "def npo_search():\n tag = request.form[\"tag\"]\n orders = Order.query.filter_by(user_id=current_user.id)\n # details = OrderDetails.query.all()\n if not tag:\n flash(\"Missing keyword\")\n return redirect(url_for(\"views.dashboard\", user=current_user, username=current_user.username))\n\n search = \"%{}%\".format(tag)\n location = User.query.filter(User.location.like(search)).all()\n food = Food.query.all()\n\n if location is not None:\n for i in location:\n filtered = Food.query.filter_by(users_id=i.id).all()\n\n return render_template(\n 'npo.html',\n businessname=current_user.businessname,\n food=food,\n filtered=filtered,\n users=location,\n tag=tag,\n orders=orders,\n user=current_user)\n\n businessname = User.query.filter(User.businessname.like(search)).all()\n\n if businessname is not None:\n for i in businessname:\n filtered = Food.query.filter_by(users_id=i.id).all()\n return render_template(\n 'npo.html',\n businessname=current_user.businessname,\n food=food,\n filtered=filtered,\n users=businessname,\n tag=tag,\n orders=orders,\n user=current_user)\n\n flash(\"Not found\")\n return render_template(\n 'npo.html',\n businessname=current_user.businessname,\n food=food,\n user=current_user,\n tag=tag\n )", "title": "" }, { "docid": "c61fc536630739a75ab74a8c72c1fb94", "score": "0.55088633", "text": "def search(request, template_name=\"aggregator/search.html\", template_loader=loader, extra_context=None, context_processors=None, mimetype=None):\n # Create search terms list.\n if request.GET.__contains__('search'):\n search_string = request.GET['search']\n \n # First, split on double-quotes to extract any multi-word terms\n search_terms = search_string.split('\"')\n cleaned_search_terms = []\n \n # Then, remove any unnecessary whitespace at the beginings or ends of the terms\n for item in search_terms:\n if not item.startswith(' ') and not item.endswith(' ') and not item == '':\n cleaned_search_terms.append(item)\n if item.startswith(' '):\n cleaned_search_terms.append(item[1:])\n if item.endswith(' '):\n cleaned_search_terms.append(item[:-1])\n \n \n # Filtering by model\n model_query = request.GET.get('models', 'default')\n model_query_list = request.GET.getlist('models')\n \n if model_query == \"default\":\n content_types = get_aggregator_content_types(type_set=\"default\")\n model_list = [ ct.app_label + \".\" + ct.model for ct in content_types]\n model_string = \"default\"\n elif model_query == \"all\":\n model_string = \"all\"\n content_types = get_aggregator_content_types(type_set=\"all\")\n else:\n model_string = \",\".join(model_query_list)\n model_list = model_query_list\n content_types = []\n for item in model_list:\n for model in settings.AGGREGATOR_MODELS:\n if item == model['model']:\n app_label = item.split('.')[0]\n model = item.split('.')[1]\n content_type = ContentType.objects.get(app_label=app_label, model=model)\n content_types.append(content_type)\n \n # Set up a list to put results into\n search_results = []\n\n # Search each model for the search terms\n for content_type in content_types:\n content_type_string = content_type.app_label + \".\" + content_type.model\n model = content_type.model_class()\n manager = model._default_manager\n opts = model._meta\n query = Q()\n queries = []\n for model in settings.AGGREGATOR_MODELS:\n if content_type_string == model['model']:\n search_fields = model['search_fields']\n\n for term in cleaned_search_terms:\n for field in search_fields:\n lookup = field + \"__icontains\"\n kwargs = { lookup: term }\n q = Q(**kwargs)\n queries.append(q)\n\n for q in queries:\n query = query | q\n\n results = manager.filter(query)\n \n # Create a list of primary keys for each item in the results, then use that list to get a \n # QuerySet of associated ContentItem objects. Append the results of that QuerySet to the\n # search_results list.\n result_pks = [ i['pk'] for i in results.values('pk') ]\n result_content_items = ContentItem.objects.filter(content_type=content_type, object_id__in=result_pks)\n search_results.extend(result_content_items)\n items = dictsortreversed(search_results, 'timestamp')\n else:\n cleaned_search_terms = []\n search_string = \"\"\n items = []\n content_types = []\n model_string = \"\"\n hits = 0\n # Build the context\n context = RequestContext(request, {\n \"search_terms\" : cleaned_search_terms,\n \"search_string\" : search_string,\n \"items\" : items,\n \"all_content_types\" : get_aggregator_content_types(type_set=\"all\"),\n \"content_types\" : content_types,\n \"model_string\" : model_string,\n \"hits\" : len(items),\n }, context_processors)\n if extra_context:\n for key, value in extra_context.items():\n if callable(value):\n context[key] = value()\n else:\n context[key] = value\n \n # Load, render, and return\n t = template_loader.get_template(template_name)\n return HttpResponse(t.render(context), mimetype=mimetype)", "title": "" } ]
4e02ba312122b710df2ee54808e84bad
Displays UI for a download error
[ { "docid": "0e70bafff92bfcb6561858673ec8d1cb", "score": "0.80256706", "text": "def show_download_error(self, name, err):\n self.kodiUi.show_error(30952, self.language(30953).format(name, err))", "title": "" } ]
[ { "docid": "0583d83d4d08c67fa3f074b1be0bc052", "score": "0.70555574", "text": "def download_message(self):\n\n print('Failed to find any DR %s files'% self.subset)\n print('')", "title": "" }, { "docid": "9b23a29c0c13ae8fd955349fe1e02077", "score": "0.67040294", "text": "def _url_error(self, err):\n\n self.info_state_lbl.setText(\"\")\n self.progress_lbl.hide()\n self.frame_select_insta.show()\n\n error_pop_up = QMessageBox(self)\n error_pop_up.setIcon(QMessageBox.Critical)\n error_pop_up.setStyleSheet(style_sheets.qmsg_box_style)\n if err == \"url\":\n error_pop_up.setText('Invalid instagram post url')\n if err == \"timeout\":\n error_pop_up.setText(\"Timeout, couldn't get a response for this url\")\n if err == \"connection\":\n error_pop_up.setText(\"Can't establish a connection to the requested url,\"\n \"\\ncheck your connection\")\n error_pop_up.show()", "title": "" }, { "docid": "6deb02ef56a95ed1f2f79e7eb685002c", "score": "0.654053", "text": "def download_message(self):\n\n print('Failed to find any Flowers %s files'% self.subset)\n print('')\n print('If you have already downloaded and processed the data, then make '\n 'sure to set --data_dir to point to the directory containing the '\n 'location of the sharded TFRecords.\\n')\n print('Please see README.md for instructions on how to build '\n 'the flowers dataset using download_and_preprocess_flowers.\\n')", "title": "" }, { "docid": "a7f12a75740c836bb95ec5e58a214a1f", "score": "0.6322234", "text": "def error(self, req):\r\n import cgitb\r\n req.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\r\n cgitb.html(sys.exc_info()))", "title": "" }, { "docid": "55f479c6257a784fb3451e045185dcae", "score": "0.6170256", "text": "def showError(self, err):\n self.beginResetModel()\n self.HEADER = self.ERROR\n self.rows= [PyRExTV.ResRow([None, err])]\n self.endResetModel()", "title": "" }, { "docid": "d85010b6985d874124812d390e069f7e", "score": "0.6166488", "text": "def _display_error(self, title, message):\n message = 'ERROR: ' + message\n self._set_status_var_text(message)\n tkMessageBox.showerror(title, message)", "title": "" }, { "docid": "5329a7b9c683c2663844702dcc149a3e", "score": "0.6147564", "text": "def print_err_message():\n err = \"[-] Something went wrong!\"\n text_progress.configure(state='normal')\n text_progress.delete(1.0, END)\n text_progress.insert(INSERT, err)\n text_progress.configure(state='disabled')", "title": "" }, { "docid": "05f1d5f82431eff36e3a6663c1e070e6", "score": "0.61249125", "text": "def test_static_web_ui__error(self, _):\n resp, code = self.api_server.static_web_ui('kytos', 'napp', 'filename')\n\n self.assertEqual(resp, '')\n self.assertEqual(code, 404)", "title": "" }, { "docid": "dd20cf1d6f2231c2db7457202dd69a8e", "score": "0.61151296", "text": "def showErrorDialogue(self, e):\n\n self.errorDialogue.show()\n self.errorLabel[\"text\"] = str(e)", "title": "" }, { "docid": "274bbbf644d908e1a9579aaa4d58d941", "score": "0.60758865", "text": "def Write404Error(self):\n self.error(404)\n self.response.out.write('Error 404, file not found')", "title": "" }, { "docid": "ca31c7fda8825080b136068a01a3e948", "score": "0.60434794", "text": "def _MWSfileOpenErrorDialog(self):\n\n title = \"Error in opening a MWSDeck file\"\n msg = \"Specified file contains no card data or is corrupted\"\n QtGui.QMessageBox.warning(self, title, msg)", "title": "" }, { "docid": "07fddf193d6462af6d3ddcb66a00416b", "score": "0.6027234", "text": "def error(e):\n\n #return repr(e)\n return render_template(\"error.html\", titel=\"Error 404\", text=\"Sidan du sökte hittades inte, vänligen testa igen.\")", "title": "" }, { "docid": "52d2098fa52f5ba3d19475bab6beb225", "score": "0.5993623", "text": "def show_missing_extractor_error(self):\n self.kodiUi.show_error(30952, 30954, time=10000)", "title": "" }, { "docid": "3e1b14550b18363df5492d27e6bc6771", "score": "0.5990825", "text": "def _txtFileOpenErrorDialog(self):\n\n title = \"Error in opening a text file\"\n msg = \"Specified file contains no card data or is corrupted\"\n QtGui.QMessageBox.warning(self, title, msg)", "title": "" }, { "docid": "3ae5d5ed98670ae8fd90c0992ae152e1", "score": "0.5983925", "text": "def showError(self, error):\n self.model.showError(error)", "title": "" }, { "docid": "f2e48e6ff5b6dec99d38ebe0faa4a7e4", "score": "0.59500605", "text": "def write_error(self, status_code, **kwargs):\n print 'In get_error_html. status_code: ', status_code\n try:\n # handle exception here\n # get request and look up self.resources\n # if status_code in [403, 404, 500, 503]:\n error_class, message, traceback_object = kwargs[\"exc_info\"]\n except Exception, e:\n message = \"Woha, %s\" % e\n\n self.write('%s Error, message: %s' % (status_code, message))", "title": "" }, { "docid": "8730d402c7a29dbd3181c17a7bae8a1c", "score": "0.594727", "text": "def show_error(self, message=\"\", *args, **nargs):\n # update attribute with data-status=\"error\"\n self.attribs['data-status'] = 'error'\n if message:\n self._error = message\n self.show = True", "title": "" }, { "docid": "514125b184d77a68faf78dcf34ee70b0", "score": "0.59212035", "text": "def show_error(self, pHeading, pMessage):\n self.kodiUi.show_error(pHeading, pMessage)", "title": "" }, { "docid": "3e07fea7075775aded8ffc0843df4801", "score": "0.59114474", "text": "def error(self, message):\n self.print_usage(sys.stdout)\n self.exit(3, gettext('%s: error: %s\\n') % (self.prog, message))", "title": "" }, { "docid": "0a6772d2b5268d553893758b34246f4f", "score": "0.59029454", "text": "def ShowError(self, error):\n self.SetOrientation() # Always show text \"right side up\".\n lcd.font(lcd.FONT_DejaVu18, rotate=0, transparent=True)\n lcd.fill(RED)\n print('ShowError: error=%r' % error)\n self.print_exception(error)\n lcd.print('%s' % error, 10, 10, WHITE)", "title": "" }, { "docid": "df22af9899e09925a075e3a9a50f79e9", "score": "0.5902446", "text": "def error_page(self):\n return content", "title": "" }, { "docid": "2c0607456d9298981485c647d9cf3f88", "score": "0.58983326", "text": "def display_error(msg: str) -> None:\r\n messagebox.showerror(\"Error\", msg)", "title": "" }, { "docid": "00e5e13208f9853802ccfcce436e1231", "score": "0.5882246", "text": "def _error(self):\n self.redirect(self.error_action)", "title": "" }, { "docid": "e7cf63f39e090ffc8e3040292abebb25", "score": "0.58759266", "text": "def show_error(self, msg):\n self.send_event(bridge.Command.ERROR, msg)", "title": "" }, { "docid": "0e6957a56e1a19da9efc10b755107284", "score": "0.5871805", "text": "def error_message_view():\n\n print(\"\\nDésolé mais votre réponse ne décrit pas une action possible.\\n\"\n \"Veuillez essayer de nouveau\\n\")", "title": "" }, { "docid": "69b94a9037123bcf4f574951b4fda92f", "score": "0.5854621", "text": "def _display_error(error: BaseException):\n print(\"***** Error\")\n print(error)", "title": "" }, { "docid": "26fc490341cbbec5cdb8cc76c0d10345", "score": "0.5849313", "text": "def on_error(self, status):\n print('Error status code : ' + str(status))", "title": "" }, { "docid": "57307f270d81d4143f814a413d920be4", "score": "0.584909", "text": "def __showError(self, out):\n self.errorGroup.show()\n self.errors.insertPlainText(out)\n self.errors.ensureCursorVisible()", "title": "" }, { "docid": "57307f270d81d4143f814a413d920be4", "score": "0.584909", "text": "def __showError(self, out):\n self.errorGroup.show()\n self.errors.insertPlainText(out)\n self.errors.ensureCursorVisible()", "title": "" }, { "docid": "b504e2abd8d827a4fe3c55d424ca0599", "score": "0.58407325", "text": "def on_error(self, status):\r\n print(status)", "title": "" }, { "docid": "f9d2ec11fed66c95c2e4d386e1b3f6bf", "score": "0.58246607", "text": "def playstatus_error(self, updater, exception: Exception) -> None:\n print(self.formatter(output(False, exception=exception)), flush=True)", "title": "" }, { "docid": "0d54a66b16ebc618b540ce887eb048d3", "score": "0.5817636", "text": "def error(self, message):\n self.success = False\n print(f'Error in {self.filename}: {message}')", "title": "" }, { "docid": "82a637c019b6bfa9e68c309ab8e67625", "score": "0.58109105", "text": "def error (self, error_string):\n self.set_value('error', error_string)\n self.render('error.html')", "title": "" }, { "docid": "e60f98e4c3b07090b15b6c8dc95ecd41", "score": "0.5806683", "text": "def server_error_500(error):\n logging.exception('An error occurred during a request..' + str(error))\n return render_template(\"filemanager/failure.html\")", "title": "" }, { "docid": "59e3df5b481cfa423297933aefbc993d", "score": "0.5792314", "text": "def show_error(self, msg=\"\"):\n\n self.show_error_signal.emit(msg)", "title": "" }, { "docid": "59e3df5b481cfa423297933aefbc993d", "score": "0.5792314", "text": "def show_error(self, msg=\"\"):\n\n self.show_error_signal.emit(msg)", "title": "" }, { "docid": "f98b5bdd52b00604685091e845943e3a", "score": "0.5772071", "text": "def result_err(failure):\n print \"{0}: failure... {1!r}\".format(self._name, failure)\n # failure.raiseException()\n if USE_GUI:\n title = self.tr(\"Operation Error\")\n msg = self.tr(\"{0}: your operation failed.\").format(self._name)\n QtGui.QMessageBox.critical(None, title, msg)", "title": "" }, { "docid": "380cc1327720a7d5eeaef397c6db2b09", "score": "0.5763218", "text": "def error_page(request, error):\n return default_render('error.html')", "title": "" }, { "docid": "c2ff76bcedbdeb462b88612d3294ddc0", "score": "0.57516384", "text": "def error(self, msg):\n click.echo(click.style(msg, bg='red', fg='white'))\n exit()", "title": "" }, { "docid": "0afbbccbd4b0fad0ee4f01125d4db48f", "score": "0.5741481", "text": "def show_error(self):\n self.input = tkMessageBox.showerror(self.name, self.message)\n return", "title": "" }, { "docid": "4f4853de125f65e32501af888d8d641d", "score": "0.5723974", "text": "def error(self, message):\n\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)", "title": "" }, { "docid": "abd9dd7de6d3959383d49f951618e33f", "score": "0.5723259", "text": "def generate_summary(self):\n message = self.result_description\n if message is None:\n message = \"unknown error... contact administrator.\"\n return f\"HTML/URL Renderer: {message}\"", "title": "" }, { "docid": "6f58941998f38dcb734618fa8afa0d59", "score": "0.5720473", "text": "def Start_Download(self):\n url = self.lineEdit.text()\n save_location = self.lineEdit_2.text()\n\n try:\n\n urreq.urlretrieve(url, save_location, self.Handel_Progress)\n\n except Exception:\n\n QMessageBox.warning(self, 'Download Error', 'The Download Filed')\n return\n\n QMessageBox.information(self, 'Download Completed', 'The Download Finished')\n\n self.progressBar.setValue(0)\n self.lineEdit.setText('')\n self.lineEdit_2.setText('')", "title": "" }, { "docid": "6b8761af94a28dce915d994a99290809", "score": "0.57128114", "text": "def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")", "title": "" }, { "docid": "6b8761af94a28dce915d994a99290809", "score": "0.57128114", "text": "def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")", "title": "" }, { "docid": "0b4b9aacd6369314a36ca8bc80ae0996", "score": "0.57080424", "text": "def show_error_box(msg):\n Ui_MainWindow.show_message(\"There's a problem...\", msg, QMessageBox.Critical)", "title": "" }, { "docid": "43ebddc432525adf0d9a68b85157d44a", "score": "0.5699492", "text": "def error(self, msg, *args):\n self.log(click.style(\"## ERROR - %s\" % msg, *args, fg='red'))", "title": "" }, { "docid": "90b460005399771431987a29c7e46d24", "score": "0.56919736", "text": "def displayError(self, msg):\n dialog = Gtk.MessageDialog(\n self, Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR,\n Gtk.ButtonsType.CLOSE, msg)\n dialog.run()\n dialog.destroy()", "title": "" }, { "docid": "4b68879eccf67c6cf36d7290ca57c9eb", "score": "0.5680849", "text": "def error(error_msg):\n return render_template('error.html', error=error_msg, title=\"ERROR\")", "title": "" }, { "docid": "16cddf90356f80c6698432b32be0ac6a", "score": "0.56776005", "text": "def draw_error_message(self, context):\n self.layout.label(text=bpy.context.window_manager.send2ue.error_message)", "title": "" }, { "docid": "1976c96691ab6e9ff5001abcb5e941a5", "score": "0.5667852", "text": "def show_download_progress(self):\n self.kodiUi.show_progress_dialog(30955)", "title": "" }, { "docid": "bfd04a91e0f147fcac4725b628992fa4", "score": "0.566438", "text": "def write_error(self, status_code, **kwargs):\n if self.settings.get(\"serve_traceback\") and \"exc_info\" in kwargs:\n # in debug mode, try to send a traceback\n self.set_header('Content-Type', 'text/plain')\n for line in traceback.format_exception(*kwargs[\"exc_info\"]):\n self.write(line)\n self.finish()\n else:\n if status_code == 404:\n # 重载404页面\n self.render(\"404.html\")\n\n self.finish(\"<html><title>%(code)d: %(message)s</title>\"\n \"<body>%(code)d: %(message)s</body></html>\" % {\n \"code\": status_code,\n \"message\": self._reason,\n })", "title": "" }, { "docid": "213c0bc8c6178b05106e3373789641c0", "score": "0.5660347", "text": "def show_error(parent, title, text):\n return show_dialog_with_icon(parent=parent, title=title, text=text,\n buttons=RESPONSES_OK, icon_name=\"qubes-info\")", "title": "" }, { "docid": "d359ea32ecc9783097a39c006a5c3445", "score": "0.5657757", "text": "def alerterror(title, message):\n messagebox.showerror(title, message)", "title": "" }, { "docid": "b96d39c055b33cd368339910dac49753", "score": "0.5646884", "text": "def error_page(self):\n return None", "title": "" }, { "docid": "2c1d01c8af030f6d3250a4eec30d7992", "score": "0.5642748", "text": "def on_error(self, status_code, data):\n print status_code, data\n #self.disconnect()", "title": "" }, { "docid": "3fff0ef73b8a7945040338ce94500b30", "score": "0.56341285", "text": "def errorMessages(self,index):\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Critical)\n\n if index == 0:\n msgBox.setText(\"Error: It was not possible to load the bag file!\"\n \"Reason: topic incompatibility.\")\n elif index == 1:\n msgBox.setText(\"Error: It was not possible to load the annotation file!\"\n \"Reason: topic incompatibility.\")\n elif index == 2:\n msgBox.setText(\"Error: You must select the topics you are interested.\")\n elif index == 3:\n msgBox.setText(\"Error: You must load a bag file and/or an annotation file!\")\n elif index == 4:\n msgBox.setText(\"Error: Error when opening the bag file!\")\n elif index == 5:\n msgBox.setText(\"Error: Error when opening the annotation json file!\")\n msgBox.resize(100,40)\n msgBox.exec_()", "title": "" }, { "docid": "3a99e176d3db725358cb91650456ff2f", "score": "0.5625095", "text": "def message(self, request):\n return \"An exception error occurred.\"", "title": "" }, { "docid": "eb42735ef5bfc7d4ead7b5dc6e5ad626", "score": "0.56234473", "text": "def error(self, message) -> None:\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)", "title": "" }, { "docid": "98b126421bc2dd06f77aeafaaa481321", "score": "0.56231016", "text": "def display_import_result(self):\n reply = QtGui.QMessageBox.question(self, 'Import Result',\n \"Imported Cache successfully\")", "title": "" }, { "docid": "fdc333ce0ae8c6ba8918690ed07bc5f1", "score": "0.56229734", "text": "def query_failure():\n writer = codecs.getwriter('utf-8')(sys.stdout.buffer)\n response_html_dir = os.path.join(\n os.path.split(\n os.path.split(\n os.path.abspath(__file__))[0])[0], \n \"query_failure.html\")\n response_html = open(response_html_dir).read()\n #HTTP Headers\n writer.write(\"Content-Type: text/html; charset=utf-8\" + '\\r\\n')\n writer.write(\"Content-Length: \" + str(len(response_html)) + '\\r\\n')\n # Seperator between header and HTML\n writer.write('\\r\\n')\n #HTML\n writer.write(response_html)", "title": "" }, { "docid": "e760b2495c4b18ee2389af589819bc05", "score": "0.5619451", "text": "def renderError(self, error_code):\n\n\t\tself.error(error_code)\n\t\tif (error_code == 404):\n\t\t\tself.response.write(str(error_code) + \": The resource could not be found\")\n\t\telse:\n\t\t\tself.response.write(\"Oops! Something went wrong.\")", "title": "" }, { "docid": "d8ee045fa03e428bba53e73170e34293", "score": "0.5614987", "text": "def error(self, message: str):\n sys.stderr.write(f\"Error {message}\\n\")\n self.print_help()\n sys.exit(1)", "title": "" }, { "docid": "e34208438f46a8f39f1eaa6512ccaad9", "score": "0.5603017", "text": "def errorBox(self, msg: str):\n exit(f'{self.__getTime()}{self.__getError(msg)}')", "title": "" }, { "docid": "a7542139420bba2366ea5fa77340c11b", "score": "0.55986756", "text": "def ShowError(self, msg):\n acm.UX().Dialogs().MessageBox(self.m_fuxDlg.Shell(), 'Error', \"Error: \" + msg,\n 'Cancel', None, None, 'Button1', 'Button1')", "title": "" }, { "docid": "5110bf4a62455e3062806be4de195140", "score": "0.55972624", "text": "def display_warning(self):\n reply = QtGui.QMessageBox.question(self, 'Error', \"No Object Selected\")\n\n # updates text representing write directory", "title": "" }, { "docid": "c8666ffac29597285dce0028a6905d38", "score": "0.5595954", "text": "def error(self, exit_code):\n print('error: command exited with status %s\\n' % exit_code)", "title": "" }, { "docid": "e99d9139d405855b6d717a4245b35f24", "score": "0.55914426", "text": "def error_msg(self,error): \n self.error = error\n print \"\"\n print \"ERROR: \"+self.error\n print \"\"", "title": "" }, { "docid": "b6f641ab70e7150fa6bff24031b26a9a", "score": "0.5588419", "text": "def error(msg):\n click.echo(click.style(msg, bold=True, fg=\"red\"), err=True)", "title": "" }, { "docid": "6bad5f5899147376cdb5a53413e6e24c", "score": "0.55665463", "text": "def on_download_page_fail(self, f, host):\n error_msg = f.getErrorMessage()\n log.debug(\"Error downloading page: %s\", error_msg)\n d = f\n if f.check(PageRedirect):\n # Handle redirect errors\n location = urljoin(self.host_to_url(host), error_msg.split(\" to \")[1])\n self.redirects[host] = url_to_host(location)\n d = self.download_page(host, url=location)\n d.addCallbacks(self.on_download_page_complete, self.on_download_page_fail,\n errbackArgs=(host,))\n\n return d", "title": "" }, { "docid": "0657970672b98b3ca39cfa08b223922e", "score": "0.55642885", "text": "def download_page(self):\n try:\n logging.info('Downloading page %s ....', self.download_data.url)\n html = network.download_html(self.download_data.url)\n self.comic_html = html\n except Exception as e:\n self.page_download_status = WebComicDownloadStatus.FAILED.name\n logging.debug('Download failed on URL, %s because of %s', self.download_data.url, str(e) )", "title": "" }, { "docid": "84f0cb330cbe50abc31f366e6ae87455", "score": "0.5558545", "text": "def default_error_handler(status_code, successful, failed, errors):\n msg = \"%d points out of %d had errors\\n\" \\\n \"HTTP status returned from Apptuit: %d\\n\" \\\n \"Detailed error messages: %s\\n\" % \\\n (failed, successful + failed, status_code, str(errors))\n sys.stderr.write(msg)", "title": "" }, { "docid": "55c6c1167d8b4b3e26f690209a4a1c0c", "score": "0.5558469", "text": "def on_login_failed(browser_instance, gui):\n showerror('Ok', 'Login Unsuccessful')", "title": "" }, { "docid": "69a67c8a3f1b7b0c0a17a1e486262679", "score": "0.55516905", "text": "def displayError(parent, message):\n\n messageDialog = wx.MessageDialog(parent, message, \"Error\",\n wx.OK|wx.CENTRE|wx.ICON_ERROR)\n messageDialog.Center(wx.BOTH)\n messageDialog.ShowModal()", "title": "" }, { "docid": "7199fca9368ef37ea03e09a515660309", "score": "0.55443347", "text": "def display_error(self, msg, type = \"Error\"):\n mbox.showerror(type, msg)", "title": "" }, { "docid": "b04f4c35f3094ed547d352c152712253", "score": "0.55437934", "text": "def error_page(self, vr):\n return jinja.get_template('error.html').render(\n errors=vr.errors,\n warnings=vr.warnings\n )", "title": "" }, { "docid": "31223b08fa0ea4bb0751013aff5a54ff", "score": "0.55414", "text": "def _show_error(self):\n\n sublime.error_message(\n \"An error occurred installing Package Control\\n\\n\"\n \"Please check the Console for details\\n\\n\"\n \"Visit https://packagecontrol.io/installation for manual instructions\")", "title": "" }, { "docid": "b75ab9fdef7eab7b2eeb3ccd4e084049", "score": "0.55318207", "text": "def _handle_error(self, reason, request):\n reason.printTraceback()", "title": "" }, { "docid": "ff1c45405c35e294db9b812bfe45ba4a", "score": "0.55314916", "text": "def error(self, msg):\n pass", "title": "" }, { "docid": "59bf79228619071a93034652b80981d1", "score": "0.5531016", "text": "def error(string):\n click.secho('ERROR: {}'.format(string), fg='red', err=True)", "title": "" }, { "docid": "5090d867ea8767151e63ebf1f4d8c4d0", "score": "0.55282885", "text": "def _error(self, msg):\n sys.stderr.write(\"\\nerror: \" + msg + \"\\n\")\n sys.stdout.write(\"\\n\")", "title": "" }, { "docid": "d2dd204595502c37ebbea525e371e53a", "score": "0.5528245", "text": "def display_errors(self, error_db):\n error_report = error_db.report_errors(command_line=False,\n file_output=False)\n translated_report = wx.GetTranslation(error_report)\n window = wx.MessageBox(error_report,\n caption=_('Errors logged in error_report.txt'))", "title": "" }, { "docid": "ed84588e565baffdede0fdbcd4358b56", "score": "0.552524", "text": "def err_back(self, failure):\n\n file_error_urls = open(ERROR_DIRECTION_URLS + str(self.now.day) + str(self.now.hour) + str(self.now.minute), 'a+')\n file_error_proxy = open(ERROR_DIRECTION_PROXY + str(self.now.day) + str(self.now.hour) + str(self.now.minute), 'a+')\n\n if failure.check(HttpError):\n # these exceptions come from HttpError spider middleware\n # you can get the non-200 response\n response = failure.value.response\n self.logger.error(\"!!HttpError on %s\", response.url)\n file_error_urls.write(response.url + \"\\n\")\n file_error_proxy.write(response.meta[\"proxy\"] + \"\\n\")\n\n elif failure.check(DNSLookupError):\n # this is the original request\n request = failure.request\n self.logger.error(\"!!DNSLookupError on %s\", request.url)\n file_error_urls.write(request.url + \"\\n\")\n file_error_proxy.write(request.meta[\"proxy\"] + \"\\n\")\n\n elif failure.check(TimeoutError, TCPTimedOutError):\n request = failure.request\n self.logger.error(\"!!TimeoutError on %s\", request.url)\n file_error_urls.write(request.url + \"\\n\")\n file_error_proxy.write(request.meta[\"proxy\"] + \"\\n\")\n\n file_error_urls.close()\n file_error_proxy.close()", "title": "" }, { "docid": "22a21f6dccb7b8cd878189d8f2a43cac", "score": "0.5523169", "text": "def error(self, s):\n print(\" ERROR: '%s', %s\" % (self.src_id, s), file=sys.stderr)", "title": "" }, { "docid": "d5c534b1e3adf3883e7081ff384d31cb", "score": "0.55210364", "text": "def error(self):\n pass", "title": "" }, { "docid": "5137840a894ff230b7ce278a0fbb4c9a", "score": "0.55150735", "text": "def show_database_error(self, err):\n self.kodiUi.show_error(30951, '{}'.format(err))", "title": "" }, { "docid": "1eb9a2a744c95987f60983dbcc3215de", "score": "0.5509607", "text": "def _ExceptionResponse(self):\n\n status = '500 Server Error'\n response_headers = [('Content-type', 'text/html; charset=utf-8')]\n self.start_response(status, response_headers)\n return target_errors.ExcInfoAsHtml()", "title": "" }, { "docid": "d62ced1a085bf7b994a491f988739b33", "score": "0.5492359", "text": "def __refresh_failed_slot(self, filename):\n self.send_message_signal.emit(\"Refresh of \" + filename + \" has started unsuccessfully\")", "title": "" }, { "docid": "42837a8c57b84f58c951d068bba1b393", "score": "0.54920834", "text": "def print_login_error(self, message):\n label = Label(self.error_frame, text=message, fg=\"red\", anchor=CENTER, justify=CENTER,\n bg=self.controller.color_1)\n label.grid(column=0, row=0, sticky=EW)", "title": "" }, { "docid": "64d33bababac488de6dddd22a946ec35", "score": "0.54873234", "text": "def buildErrorGUI(self):\n\n self.errorDialogue = DirectDialog(frameSize = (-0.8, 0.8, -0.4, 0.4),\n frameColor = (0.2, 0.3, 0.7, 1),\n fadeScreen = 0.4,\n image = None,\n geom = None,\n relief = DGG.FLAT)\n\n self.errorTitle = DirectLabel(text = \"Error!\",\n scale = 0.09,\n text_align = TextNode.ACenter,\n text_fg = (1, 1, 1, 1),\n parent = self.errorDialogue,\n pos = (0, 0, 0.3),\n relief = None)\n self.errorLabel = DirectLabel(text = \"<Error text here>\",\n scale = 0.07,\n text_align = TextNode.ACenter,\n text_fg = (1, 1, 1, 1),\n parent = self.errorDialogue,\n pos = (0, 0, 0.1),\n relief = None)\n\n self.errorDoneBtn = DirectButton(text = \"Close\", command = self.hideErrorDialogue,\n scale = 0.05,\n text_align = TextNode.ACenter,\n pos = (0, 0, -0.35),\n parent = self.errorDialogue,\n text_bg = (0.1, 0.8, 0.2, 1))", "title": "" }, { "docid": "1ed2b57be47413d1c08586a5fd4aa9a0", "score": "0.5485057", "text": "def display_error_message(self):\n\n self._error_message.setVisible(True)", "title": "" }, { "docid": "758fe71a554951f7fc2ceb7dd7b7ef92", "score": "0.5481646", "text": "def error_message(self):\n pass", "title": "" }, { "docid": "6f5df56c6f586734960f5a21d1d74d6e", "score": "0.54746085", "text": "def handle_error(err, halt=True):\n print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))\n if halt:\n sys.exit(1)", "title": "" }, { "docid": "45102d15cf919e707046179902c49cda", "score": "0.5467163", "text": "def display_export_result(self):\n reply = QtGui.QMessageBox.question(self, 'Export Result',\n \"Files saved to Directory: %s\" % self.file_location)", "title": "" }, { "docid": "2541857d4f34a6173a5af6d93b92272e", "score": "0.54597694", "text": "def error_404(error):\n return 'Sorry, nothing at this URL.'", "title": "" }, { "docid": "07c52ea3a9688fec7f296714da2ce422", "score": "0.5458576", "text": "def show_error_message(self, msg):\n msgbox = QMessageBox()\n msgbox.critical(dialog, \"Error\", msg)", "title": "" }, { "docid": "2c525949f47ba9fdff84a5eefc115387", "score": "0.54544574", "text": "def display_error_msg():\n print(\"\\n !! mauvaise saisie !!\\n\")", "title": "" }, { "docid": "c616695ae4cd395babca5148f88b9888", "score": "0.545296", "text": "def print_http_error(url, status_code, response):\n error(\"FAILED!\\nStatus code: {}\\nResponse: {}\\nURL: {}\".format(\n status_code, response, url))", "title": "" }, { "docid": "344b648ab0184da724cb78ba21b010e4", "score": "0.5452114", "text": "def error_404(error):\n return 'Sorry, Nothing at this URL.'", "title": "" }, { "docid": "8ecddb1065e7b686db09438d379400ba", "score": "0.54326767", "text": "def send_error_page(request, error):\n return response_ajax_or_not(request, {'htmlname': 'error.html',\n 'message': error})", "title": "" } ]
0bcbb126ae5223f3d1f5ca2950034367
Validates if the output is not the same as input
[ { "docid": "75fdf2e321d1a07332176bae092a4632", "score": "0.60977846", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n\tif not inputVolumeNode:\n\t logging.debug('isValidInputOutputData failed: no input volume node defined')\n\t return False\n\tif not outputVolumeNode:\n\t logging.debug('isValidInputOutputData failed: no output volume node defined')\n\t return False\n\tif inputVolumeNode.GetID()==outputVolumeNode.GetID():\n\t logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n\t return False\n\treturn True", "title": "" } ]
[ { "docid": "4435bb4b59cd7ac285ad925231272dda", "score": "0.7131176", "text": "def check_output(output, expected_output):\r\n o = copy.deepcopy(output) # so that we don't mutate input\r\n e = copy.deepcopy(expected_output) # so that we don't mutate input\r\n\r\n o.sort()\r\n e.sort()\r\n return o == e", "title": "" }, { "docid": "75e1998a0bc2c0379c6edb2c60d13706", "score": "0.7067986", "text": "def check_output(output, expected_output):\n o = copy.deepcopy(output) # so that we don't mutate input\n e = copy.deepcopy(expected_output) # so that we don't mutate input\n \n o.sort()\n e.sort()\n return o == e", "title": "" }, { "docid": "282980064d519f21e29c23c9cf51d43b", "score": "0.6973755", "text": "def check_output(output, expected_output):\n print(output)\n o = copy.deepcopy(output) # so that we don't mutate input\n e = copy.deepcopy(expected_output) # so that we don't mutate expected output\n \n o.sort()\n e.sort()\n return o == e", "title": "" }, { "docid": "c49093131ad982787b01caf821630388", "score": "0.65401703", "text": "def not_equal_to(self, input1, input2, output, callback=None):\n args = []\n args.append(\"--input1='{}'\".format(input1))\n args.append(\"--input2='{}'\".format(input2))\n args.append(\"--output='{}'\".format(output))\n return self.run_tool('not_equal_to', args, callback) # returns 1 if error", "title": "" }, { "docid": "0e74f1e8c7b92e717d8c23fe85990428", "score": "0.62993574", "text": "def test_wtf_positive(input_1, input_2, output):\n assert wtf(input_1, input_2) == output", "title": "" }, { "docid": "44c86d85c8da7ede9a8f069dee724c78", "score": "0.62793255", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n if not inputVolumeNode:\n logging.debug('isValidInputOutputData failed: no input volume node defined')\n return False\n if not outputVolumeNode:\n logging.debug('isValidInputOutputData failed: no output volume node defined')\n return False\n if inputVolumeNode.GetID()==outputVolumeNode.GetID():\n logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n return False\n return True", "title": "" }, { "docid": "44c86d85c8da7ede9a8f069dee724c78", "score": "0.62793255", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n if not inputVolumeNode:\n logging.debug('isValidInputOutputData failed: no input volume node defined')\n return False\n if not outputVolumeNode:\n logging.debug('isValidInputOutputData failed: no output volume node defined')\n return False\n if inputVolumeNode.GetID()==outputVolumeNode.GetID():\n logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n return False\n return True", "title": "" }, { "docid": "44c86d85c8da7ede9a8f069dee724c78", "score": "0.62793255", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n if not inputVolumeNode:\n logging.debug('isValidInputOutputData failed: no input volume node defined')\n return False\n if not outputVolumeNode:\n logging.debug('isValidInputOutputData failed: no output volume node defined')\n return False\n if inputVolumeNode.GetID()==outputVolumeNode.GetID():\n logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n return False\n return True", "title": "" }, { "docid": "44c86d85c8da7ede9a8f069dee724c78", "score": "0.62793255", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n if not inputVolumeNode:\n logging.debug('isValidInputOutputData failed: no input volume node defined')\n return False\n if not outputVolumeNode:\n logging.debug('isValidInputOutputData failed: no output volume node defined')\n return False\n if inputVolumeNode.GetID()==outputVolumeNode.GetID():\n logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n return False\n return True", "title": "" }, { "docid": "44c86d85c8da7ede9a8f069dee724c78", "score": "0.62793255", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n if not inputVolumeNode:\n logging.debug('isValidInputOutputData failed: no input volume node defined')\n return False\n if not outputVolumeNode:\n logging.debug('isValidInputOutputData failed: no output volume node defined')\n return False\n if inputVolumeNode.GetID()==outputVolumeNode.GetID():\n logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n return False\n return True", "title": "" }, { "docid": "660df1c7a069772253febe3bb689f890", "score": "0.62490493", "text": "def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\r\n if not inputVolumeNode:\r\n logging.debug('isValidInputOutputData failed: no input volume node defined')\r\n return False\r\n if not outputVolumeNode:\r\n logging.debug('isValidInputOutputData failed: no output volume node defined')\r\n return False\r\n if inputVolumeNode.GetID()==outputVolumeNode.GetID():\r\n logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\r\n return False\r\n return True", "title": "" }, { "docid": "39fd04a6aeee9b62960ee36d515c0fa7", "score": "0.6244626", "text": "def testCheckInputNegative2(self):\n self.assertListEqual(self.result2, self.expected2)", "title": "" }, { "docid": "65fdd432fddd8050ff5fd2043394d824", "score": "0.6186744", "text": "def testCheckInputNegative1(self):\n self.assertListEqual(self.result1, self.expected1)", "title": "" }, { "docid": "7f748fa4fb9604bc09e04b43637792fb", "score": "0.6133478", "text": "def compare_output(expected_output, received_output):\r\n\r\n for i, val in enumerate(expected_output):\r\n if val != 2:\r\n msg = \"received and expected output does not match at postion {}\".format(i)\r\n assert received_output[i] == val, msg", "title": "" }, { "docid": "ef0c98264bfb54049aff27bfc9f847ea", "score": "0.6129218", "text": "def validate(self, out):\n\n outlines = self.stdout.strip().split(\"\\n\")\n testlines = out.strip().split(\"\\n\")\n\n if len(outlines) > len(testlines):\n raise ExcessiveOutput()\n elif len(outlines) < len(testlines):\n raise MissingOutput()\n\n for i in range(len(outlines)):\n if outlines[i] != testlines[i]:\n raise WrongOutput()", "title": "" }, { "docid": "56e82f53709bdc6d8b16b5ee4c9c3f80", "score": "0.60609865", "text": "def check_output(self, out):\n # Output should follow a uniform distribution in [0, 1<<32)\n expected_avg = 1 << 31\n expected_std = (1 << 32) / np.sqrt(12)\n rtol = 0.05 # given enough iterations\n np.testing.assert_allclose(out.mean(), expected_avg, rtol=rtol)\n np.testing.assert_allclose(out.std(), expected_std, rtol=rtol)", "title": "" }, { "docid": "5aeb6f945aef3891ef41761cc5d719f7", "score": "0.60038656", "text": "def _checkDuplicate(self, input):\n if \"red_team\" not in input or \"blue_team\" not in input:\n # siamo nell'update..non sono campi obbligatori\n return\n if input.get(\"red_team\") == input.get(\"blue_team\"):\n raise GraphQLError(\n u\"Non si può impostare due volte la stessa squadra per un match.\" # noqa\n )", "title": "" }, { "docid": "6183bbadbd1d6f2aece2ee0d3aa05706", "score": "0.60011446", "text": "def test_part_2(problem_input, output):\n assert part_2.solution(problem_input) == output", "title": "" }, { "docid": "b2ea70baa886b85ae6b00395bee7b98a", "score": "0.59514016", "text": "def isconsistent(self, other) -> bool:", "title": "" }, { "docid": "90e8ba8c1a34ea315346707bfad6ef15", "score": "0.58970046", "text": "def validate(self, input):", "title": "" }, { "docid": "a018fc8dfed76655820027b03a1c967e", "score": "0.5868509", "text": "def err_no_ordered(res, outputs):\n this_outs = np.array(outputs, copy=True).T\n this_res = np.resize(np.array(res, copy=True).T, this_outs.shape)\n\n return np.sum(this_res != this_outs)", "title": "" }, { "docid": "98f1dcce0708401bfba2dd1c59fe4c69", "score": "0.5866357", "text": "def testComplementOfEquivalence(self):\n self.assertOutput(\n original=\"Process.pid != 10\",\n output=\"Process.pid != 10\")", "title": "" }, { "docid": "5f6d07772759616b6a707ae1b8a9c6c8", "score": "0.5821444", "text": "def test_part_1(problem_input, output):\n assert part_1.solution(problem_input) == output", "title": "" }, { "docid": "5f6d07772759616b6a707ae1b8a9c6c8", "score": "0.5821444", "text": "def test_part_1(problem_input, output):\n assert part_1.solution(problem_input) == output", "title": "" }, { "docid": "22aa3a843fbaf43c6926ff15fab1710e", "score": "0.580891", "text": "def __eq__(self, other):\r\n return other.outputs == self.outputs", "title": "" }, { "docid": "cdda169727196925126521ca8b2353d7", "score": "0.5807647", "text": "def check_output_duplicates(self):\n seen = dict()\n idx = None\n for name, value in self.output._allitems():\n if name is None:\n if idx is None:\n idx = 0\n else:\n idx += 1\n if value and value in seen:\n raise WorkflowError(\n \"Duplicate output file pattern in rule {}. First two \"\n \"duplicate for entries {} and {}.\".format(\n self.name, seen[value], name or idx\n )\n )\n seen[value] = name or idx", "title": "" }, { "docid": "0fd07527cccf7b00d4859b64dda1fa45", "score": "0.579142", "text": "def validValidity():", "title": "" }, { "docid": "0fd07527cccf7b00d4859b64dda1fa45", "score": "0.579142", "text": "def validValidity():", "title": "" }, { "docid": "48f5069ca1cda746deb7223eb070b84d", "score": "0.5776022", "text": "def test():\n in_out = [([], 0),\n ([15], 15),\n ([15,30], 30),\n ([30,15], 30),\n ([15,15,15], 30),\n ([15,150,15], 150),\n ([30, 15, 60, 75, 45, 15, 15, 45], 180),\n ([30, 15, 60, 75, 45, 45, 15, 45], 195),\n ([30, 45, 60, 90, 45, 15, 15, 45], 195)]\n \n for input, output in in_out:\n try:\n assert f1(input) == output\n except:\n print(input,f1(input),output)", "title": "" }, { "docid": "4cbc29a4e977a88e59a21b2c8ae56922", "score": "0.5759873", "text": "def test_mismatch(self) -> None:\n result = object()\n other = object()\n self.assertThat(\n returns(Is(result)).match(lambda: other),\n Not(Is(None)),\n )", "title": "" }, { "docid": "3600d66c96b8bd5ac5f8f684fc2ee9ff", "score": "0.57452494", "text": "def testEquality(self):\n self.assertEqual(self.bin1, self.bin2, \"Unequal bins with the same outcome repeated\")", "title": "" }, { "docid": "fd393d4e4cf705c8e054cc4e832b44cd", "score": "0.57303685", "text": "def test__validate_syncing__0():\n for input_value, expected_output in (\n (True, True),\n (False, False)\n ):\n output = validate_syncing(input_value)\n vampytest.assert_eq(output, expected_output)", "title": "" }, { "docid": "e197a3733580292e2e3d827eb99e6f55", "score": "0.5711529", "text": "def check_output(gold_mr, out_mr, fix_type='all'):\n # count the errors in the output, looking at the MR\n added, missing, valerr, repeated = 0, 0, 0, 0\n diff = {}\n for slot in set(gold_mr.keys()) | set(out_mr.keys()):\n if slot in gold_mr and slot not in out_mr:\n if fix_type != 'all' and 'missing' not in fix_type: # ignore missing stuff -- adjust out_mr\n out_mr[slot] = dict(gold_mr[slot])\n else:\n missing += sum(gold_mr[slot].values())\n diff[slot] = {val: -count for val, count in gold_mr[slot].items()}\n elif slot not in gold_mr and slot in out_mr:\n if fix_type != 'all' and 'added' not in fix_type: # ignore added stuff -- adjust out_mr\n del out_mr[slot]\n else:\n added += sum(out_mr[slot].values())\n diff[slot] = out_mr[slot]\n else:\n # remove repeated first (check if MR has same val less than out + same value more than 1x)\n for val in out_mr[slot].keys():\n if val in gold_mr[slot] and gold_mr[slot][val] < out_mr[slot][val]:\n repeated += out_mr[slot][val] - gold_mr[slot][val]\n out_mr[slot][val] = gold_mr[slot][val]\n # now compute the diff in the # of value occurrences\n slot_diff = {val: gold_mr[slot].get(val, 0) - out_mr[slot].get(val, 0)\n for val in set(gold_mr[slot].keys()) | set(out_mr[slot].keys())}\n if fix_type != 'all':\n for val, val_diff in list(slot_diff.items()):\n if 'missing' not in fix_type and val_diff > 0: # ignore missing stuff -- adjust out_mr\n out_mr[slot][val] = out_mr[slot].get(val, 0) + val_diff\n del slot_diff[val]\n if 'added' not in fix_type and val_diff < 0: # ignore added stuff -- adjust out_mr\n out_mr[slot][val] = out_mr[slot].get(val, 0) - val_diff\n del slot_diff[val]\n diff[slot] = {val: -count for val, count in slot_diff.items() if count != 0}\n # diffs both ways\n mr_not_out = sum([count for count in slot_diff.values() if count > 0])\n out_not_mr = - sum([count for count in slot_diff.values() if count < 0])\n # value errors up to the same # of values\n valerr += min(mr_not_out, out_not_mr)\n # others fall under missing & added\n missing += max(mr_not_out - out_not_mr, 0)\n added += max(out_not_mr - mr_not_out, 0)\n\n diff = json.dumps({slot: vals for slot, vals in diff.items() if vals})\n return added, missing, valerr, repeated, diff, out_mr", "title": "" }, { "docid": "87733e706297acda0c0cf150ae9cfbf6", "score": "0.5707366", "text": "def test_valid_inputs(self):\n for input_value, expected_output in get_items(self.valid_inputs):\n initial_input_value = copy.deepcopy(input_value)\n\n serializer = self.serializer_class(data=input_value)\n serializer.is_valid()\n\n assert serializer.initial_data == initial_input_value\n assert self.field.run_validation(initial_input_value) == expected_output", "title": "" }, { "docid": "6f30ede80f9a9be6525140017f26c6c1", "score": "0.56929", "text": "def check_input(self, input):\n pass", "title": "" }, { "docid": "8e696a5e2699def0183a0863bf457205", "score": "0.568187", "text": "def _numpy_checker(x, y):\n x, y = x[0], y[0]\n if (x.dtype != y.dtype or x.shape != y.shape\n or numpy.any(numpy.abs(x - y) > 1e-10)):\n raise Exception(\"Output mismatch.\", {'performlinker': x, 'clinker': y})", "title": "" }, { "docid": "adfab7077ce81d9ca0f8b4d8426a20b3", "score": "0.56721437", "text": "def result_should_be(self, expected):\n if self._result != expected:\n raise AssertionError('%s != %s' % (self._result, expected))", "title": "" }, { "docid": "9144536b77f19fa9699d1e9aeb25a2a6", "score": "0.5655695", "text": "def test_input_unchanged(self):\n matrix_1 = [[6, 6], [3, 1]]\n matrix_2 = [[1, 2], [3, 4]]\n matrix_1_original = deepcopy(matrix_1)\n matrix_2_original = deepcopy(matrix_2)\n add(matrix_1, matrix_2)\n self.assertEqual(matrix_1, matrix_1_original)\n self.assertEqual(matrix_2, matrix_2_original)", "title": "" }, { "docid": "b2ede05d0fa7cbc699d67556e2bdb814", "score": "0.56550705", "text": "def verify(self, testInput, testOutput):\n expOutput = self.get_expected_result(testInput)\n info = 'Input: {}, Size of input: {}\\nOutput: {}, \\nExpected: {}'.format(\n testInput, len(testInput), testOutput, expOutput)\n\n # Test correct inversions\n self.assert_equal(expOutput, testOutput, info)", "title": "" }, { "docid": "b8127c50bad105bf4250a70a7e60ad35", "score": "0.56527394", "text": "def test_not_equal_on_not_equal_unique_identifier(self):\n a = objects.EncryptionKeyInformation(\n unique_identifier=\"100182d5-72b8-47aa-8383-4d97d512e98a\"\n )\n b = objects.EncryptionKeyInformation(\n unique_identifier=\"00000000-1111-2222-3333-444444444444\"\n )\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "3a51e12a73b2739801872442407bcfa3", "score": "0.5620477", "text": "def failIfDifferent(self, first_file, second_file):\n diff_cmd = ['diff', first_file, second_file]\n if subprocess.call(diff_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0:\n self.fail('Output does not match!')", "title": "" }, { "docid": "d3fe5cb32ac07f5345159d8539c4c728", "score": "0.5611779", "text": "def test_input_output(self):\n ovf = OVF(self.input_ovf, self.working_dir, self.temp_file)\n ovf.write()\n self.check_diff('')\n\n # Filename output too\n ovf = OVF(self.input_ovf, self.working_dir, self.temp_file + \".a.b.c\")\n ovf.write()\n self.check_diff('', file2=(self.temp_file + \".a.b.c\"))", "title": "" }, { "docid": "61296a7cf76fcf941bc9a3cf483d4707", "score": "0.56083167", "text": "def validate_output(self):\n for f_out, nifti_mrs_img in zip(self.fileoutNames, self.imageOut):\n nifti_mrs_img.validate()", "title": "" }, { "docid": "5c5632f57391261d278013ca88b3b3a8", "score": "0.5585658", "text": "def test_validate_outputs_invalid_case(hrm):\n output_dict = {\"num_beats\": 10000, \"duration\": 60, \"voltage_extremes\": (\n 0, 200)}\n\n with pytest.raises(ValueError):\n hrm.validate_outputs(output_dict)", "title": "" }, { "docid": "e4472e935e7da0a84b8f0773879f376f", "score": "0.5583051", "text": "def test_validity_function_invalid(capsys):\n # get auth_tool object\n auth_tool = get_auth_tool_object(Namespace(json=None))\n # define a correct value\n correct = \"a\"\n incorrect = \"b\"\n hint = \"Test hint\"\n # run function with patched input\n with patch('builtins.input', side_effect=[incorrect, correct]):\n # run validity function\n auth_tool._ensure_valid_value_for(\n \"test\", lambda x: x == correct, hint=hint\n )\n out, err = capsys.readouterr()\n assert not bool(out)\n assert err == hint", "title": "" }, { "docid": "992e68f4b36c8c3445ecf11baa85978e", "score": "0.55691046", "text": "def testCheckInputPositive(self):\n self.assertListEqual(self.result3, self.expected3)", "title": "" }, { "docid": "b6cbf39ff494bffe79af394688eec788", "score": "0.5550888", "text": "def user_input_validate(print_statement):\n print(print_statement)\n first_value = input('\\r - Enter: ')\n second_value = input('\\r - Re-enter: ')\n if first_value == second_value:\n return first_value\n else:\n colour_print(f'(colour_warning)\\r # The values entered did not match, please try again.(colour_clear)')\n return False", "title": "" }, { "docid": "6b7d4afa42c33544264008d236badb06", "score": "0.5540823", "text": "def shape_check(output: torch.Tensor, target: torch.Tensor) -> None:\n output_shape = list(output.shape)\n output_shape.pop(1)\n if output_shape != list(target.shape):\n raise ValueError(\n \"Got {} shape for output and {} for target. \"\n \"Shape should be equal except for 1 dimension of output (specifying classes).\".format(\n output.shape, target.shape\n )\n )", "title": "" }, { "docid": "32ef6f06b05710c514e8f7c0a6c5fa7e", "score": "0.5534121", "text": "def is_inconsistent(self) -> bool:\n return False", "title": "" }, { "docid": "f403d7c34f9ae824510495a878144249", "score": "0.55231065", "text": "def same_output_than(self, other):\n for t in [t for t in self.tables if t.type.name == \"output\"]:\n is_same = False\n for t2 in [t2 for t2 in other.tables if t2.type.name == \"output\"]:\n if t.model == t2.model and t.tablename == t2.tablename:\n is_same = True\n break\n if not is_same:\n return False\n\n for f in [f for f in self.files if f.type.name == \"input\"]:\n is_same = False\n for f2 in [f2 for f2 in other.files if f2.type.name == \"input\"]:\n if (f.name == f2.name and\n f.path == f2.path):\n is_same = True\n break\n if not is_same:\n return False\n return True", "title": "" }, { "docid": "ba358fb98aa4c62372610eb3e978847a", "score": "0.5521292", "text": "def compare_output(self, input, output, expected):\n if isinstance(input, unicode):\n input = input.encode('raw_unicode_escape')\n if sys.version_info > (3,):\n # API difference: Python 3's node.__str__ doesn't escape\n #assert expected is None or isinstance(expected, unicode)\n if isinstance(expected, bytes):\n expected = expected.decode('utf-8')\n if isinstance(output, bytes):\n output = output.decode('utf-8')\n else:\n if isinstance(expected, unicode):\n expected = expected.encode('raw_unicode_escape')\n if isinstance(output, unicode):\n output = output.encode('raw_unicode_escape')\n # Normalize line endings:\n if expected:\n expected = '\\n'.join(expected.splitlines())\n if output:\n output = '\\n'.join(output.splitlines())\n try:\n self.assertEquals(output, expected)\n except AssertionError, error:\n print >>sys.stderr, '\\n%s\\ninput:' % (self,)\n print >>sys.stderr, input\n try:\n comparison = ''.join(self.compare(expected.splitlines(1),\n output.splitlines(1)))\n print >>sys.stderr, '-: expected\\n+: output'\n print >>sys.stderr, comparison\n except AttributeError: # expected or output not a string\n # alternative output for non-strings:\n print >>sys.stderr, 'expected: %r' % expected\n print >>sys.stderr, 'output: %r' % output\n raise error", "title": "" }, { "docid": "e33bb90afcd9067e40a55c370304e6dc", "score": "0.5476065", "text": "def test_equality(self):\n pass", "title": "" }, { "docid": "31fec2a4a86c2ebefb77e8dcc64a669e", "score": "0.5474954", "text": "def __ne__(self, other: 'ResourceRecordInputRdataRdataAaaaRecord') -> bool:\n return not self == other", "title": "" }, { "docid": "ac05c306755d65c964f0dd08b1d14bbe", "score": "0.54709154", "text": "def not_equal(x1, x2, out=None):\n if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):\n return _np.not_equal(x1, x2, out=out)\n return _api_internal.not_equal(x1, x2, out)", "title": "" }, { "docid": "91f8192c136330fc7bd7113f25e045d5", "score": "0.54685175", "text": "def test_isscalar(input, output):\n assert output == utils.isscalar(input)", "title": "" }, { "docid": "ddd8f35f2920a36fdebc337c4e95b683", "score": "0.54648757", "text": "def test_equal_on_not_equal_unique_identifier(self):\n a = objects.EncryptionKeyInformation(\n unique_identifier=\"100182d5-72b8-47aa-8383-4d97d512e98a\"\n )\n b = objects.EncryptionKeyInformation(\n unique_identifier=\"00000000-1111-2222-3333-444444444444\"\n )\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "title": "" }, { "docid": "b8b4641aeceab8e773d46ac3f28069b0", "score": "0.54492074", "text": "def test_eq_false(self):\n loc = Location(\"SLO\", 35.3, -120.7)\n loc1 = Location(\"LA\", -30.3, -131.21)\n loc2 = Location(\"SJ\", 23.4, -110.9)\n self.assertNotEqual(loc,loc1)\n self.assertNotEqual(loc1,loc2)\n self.assertNotEqual(loc2,loc)", "title": "" }, { "docid": "e0530d00c0cb99b10874a4da13c2edcb", "score": "0.54431087", "text": "def _almost_equal(current, new):\n if current is None or new is None:\n return False\n return abs(current - new) / current < 1e-03", "title": "" }, { "docid": "1ca04a4fc5f18bdd30c16f0075c02050", "score": "0.5442778", "text": "def __ne__(self, other: 'ResourceRecordUpdateInputRdataRdataAaaaRecord') -> bool:\n return not self == other", "title": "" }, { "docid": "dd080dc2f4704f7ab3a007e213779d2e", "score": "0.54427606", "text": "def same_gen_check():\r\n print('-' * 5 + 'same_gen_check' + '-' * 5)\r\n a = test_gen()\r\n b = test_gen()\r\n\r\n print('a == b : ', a==b)\r\n print('a is b : ', a is b)", "title": "" }, { "docid": "7c75a7528bd1d17935af9164db727e15", "score": "0.5442165", "text": "def test_card_not_equal_other(self):\n card = Card(Suit.SPADE, 2)\n for bad_value in (\"a\", \"1\", \"2\", 2, 20.0, Card):\n self.assertNotEqual(card, bad_value)", "title": "" }, { "docid": "8e46f73aa43a0a606b5b19c0764f1a89", "score": "0.5439961", "text": "def test_not_equal_on_not_equal_unique_identifier(self):\n a = objects.MACSignatureKeyInformation(\n unique_identifier=\"100182d5-72b8-47aa-8383-4d97d512e98a\"\n )\n b = objects.MACSignatureKeyInformation(\n unique_identifier=\"00000000-1111-2222-3333-444444444444\"\n )\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "c548e9a3bc682343904412f052a3da19", "score": "0.54387414", "text": "def __ne__(self, other: 'ResourceRecordInputRdataRdataTxtRecord') -> bool:\n return not self == other", "title": "" }, { "docid": "669c60b31bb6657e88ec711c4cc524ec", "score": "0.5436216", "text": "def test_equality_of_mana_when_they_are_not_equal():\n mana = ertai.Mana(\"Black\", \"Blue\", \"Red\", \"Red\")\n other_mana = ertai.Mana(\"Black\", \"Blue\", \"Red\")\n assert mana != other_mana", "title": "" }, { "docid": "4d9bca8af462f6df5297aa872777bbca", "score": "0.54290247", "text": "def test_not_equal_on_type_mismatch(self):\n a = objects.MACSignatureKeyInformation()\n b = 'invalid'\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "2c6c704ce4b8bd887a680d348441fadc", "score": "0.5428968", "text": "def __ne__(self, other: 'ResourceRecordUpdateInputRdataRdataTxtRecord') -> bool:\n return not self == other", "title": "" }, { "docid": "6fad971253de5babe8ce7013643e7709", "score": "0.54234815", "text": "def __ne__(self, other: 'ResourceRecordInputRdataRdataMxRecord') -> bool:\n return not self == other", "title": "" }, { "docid": "fa1d482e501daac7a7a5b348d1292f0d", "score": "0.54188776", "text": "def check_same_states(result_1: Result, result_2: Result) -> None:\n\n # Get all state of result_1\n states_result_1 = []\n for sample in result_1:\n states_result_1.append(sample.state.int)\n\n # Get all state of result_2\n states_result_2 = []\n for sample in result_2:\n states_result_2.append(sample.state.int)\n\n states_result_1.sort()\n states_result_2.sort()\n\n # Check content equality\n assert states_result_1 == states_result_2", "title": "" }, { "docid": "94fff62b936026879aa966e6b1e8276a", "score": "0.5417261", "text": "def test_not_equal_on_type_mismatch(self):\n a = objects.EncryptionKeyInformation()\n b = 'invalid'\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "dff16808bff9a13fe7c05513a15d8956", "score": "0.5400954", "text": "def test_assertNotEqual_unequal(self):\n for first, second in self.unequal_pairs:\n try:\n self.assertNotEqual(first, second)\n except:\n raise AssertionError, \\\n \"unittests.assertNotEqual failed on input %s and %s\" \\\n % (`first`, `second`)", "title": "" }, { "docid": "844f7ee721e6243404bf3e27db57b407", "score": "0.5398239", "text": "def _validate_source(self, other_replica_uid, other_generation,\n other_transaction_id):\n (old_generation,\n old_transaction_id) = yield self._get_replica_gen_and_trans_id(\n other_replica_uid)\n if other_generation < old_generation:\n raise InvalidGeneration\n if other_generation > old_generation:\n return\n if other_transaction_id == old_transaction_id:\n return\n raise InvalidTransactionId", "title": "" }, { "docid": "8e0701bfc5105a6cd477f88404a93c42", "score": "0.5387769", "text": "def test_error_on_wrong_update(inp1, inp2):\n metric = LearnedPerceptualImagePatchSimilarity()\n with pytest.raises(ValueError, match=\"Expected both input arguments to be normalized tensors .*\"):\n metric(inp1, inp2)", "title": "" }, { "docid": "e38acc7ea08fd2e9dd562c8df85f5e90", "score": "0.5381053", "text": "def check_results_equality(result_1: Result, result_2: Result, amplitude: bool = True) -> None:\n\n # First we check the two results have the same states\n check_same_states(result_1, result_2)\n\n # Then we check the two results have the same probabilities\n check_same_probabilities(result_1, result_2)\n\n # Finally we check the two results have the same stats properties\n if amplitude:\n check_same_state_properties(result_1, result_2)\n else:\n check_same_state_properties(result_1, result_2, amplitude)", "title": "" }, { "docid": "0972941ab7a2fccb2ccef33add61bd21", "score": "0.53801984", "text": "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self.same_files(other, \"input\") and\n self.same_tables(other, \"input\") and\n self.same_files(other, \"output\") and\n self.same_tables(other, \"output\") and\n self.same_options(other))", "title": "" }, { "docid": "37f6b33e5cd3be14fd7689b4f6269bb5", "score": "0.53799206", "text": "def test_onestep_identical(inputs):\n seq_x, seq_y = inputs\n\n output = onestep.onestep(seq_x, seq_y, order=2, verbose=False, check=True)\n\n assert output is None", "title": "" }, { "docid": "3f17459c3337ae4cff1f309bfd2798b6", "score": "0.5377264", "text": "def testEquivalent(self):\r\n a = Vector3(1, 2, 3)\r\n b = Vector3(1, 2, 3)\r\n c = Vector3(3, 2, 1)\r\n self.assertEqual(a, b)\r\n self.assertNotEqual(a, c)", "title": "" }, { "docid": "031fb79bf5377b6a817a73d9cb045590", "score": "0.53721464", "text": "def _inner_consistency_check(self, other):\n # chack mesh\n if self.mesh is not other.mesh:\n raise Exception(\"Mesh of the forms do not match\")", "title": "" }, { "docid": "29de9d0bcbcdf74d3bf2978f0a486bd6", "score": "0.53686476", "text": "def test_not_match(self):\n put_in_stdin('x\\ny')\n with redirect_stdout() as output:\n res = click.prompt(\"\", type=InputValidator(converters=[YES, NO]))\n self.assertTrue(res.kind_of(YES))\n self.assertTrue(\"Error: x. Help: y - yes; n - no\" in output.getvalue())", "title": "" }, { "docid": "66b01fdfea5c6ccf99437d401f114300", "score": "0.5365897", "text": "def test_not_equal_on_not_equal_mac_signatures(self):\n a = objects.KeyWrappingData(mac_signature=b'\\x01')\n b = objects.KeyWrappingData(mac_signature=b'\\x10')\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "ee33d5401fea0d4797bf13f5c9389740", "score": "0.5364966", "text": "def __ne__(self, other: 'ResourceRecordUpdateInputRdataRdataMxRecord') -> bool:\n return not self == other", "title": "" }, { "docid": "a7f0dc294bce0cac5665078240fd32a6", "score": "0.53580844", "text": "def test_sameas(value, other):\r\n return value is other", "title": "" }, { "docid": "3ae64d092af5146f3fad262ec6dc633e", "score": "0.53479916", "text": "def __ne__(self, other: 'ResourceRecordInputRdataRdataARecord') -> bool:\n return not self == other", "title": "" }, { "docid": "ae8600cd52e03de7e6f50667bd0c1d62", "score": "0.534541", "text": "def assertEqual_(self, output, expected):\n self.assertEqual(output, expected, 'Expected \"%s\". Got \"%s\".' % (expected, output))", "title": "" }, { "docid": "102bc05291914f285e698aeae237be48", "score": "0.53439385", "text": "def test_equality_of_mana_when_not_comparing_the_same_objects():\n mana = ertai.Mana(\"Black\", \"Blue\", \"Red\", \"Red\")\n assert mana != \"A string\"", "title": "" }, { "docid": "773136141f3341a6c9a90ed17ef9416a", "score": "0.53420055", "text": "def test_not_equal_on_type_mismatch(self):\n a = objects.KeyWrappingData()\n b = 'invalid'\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "bcc2a31ce637f9b0ca481d01a60fdee1", "score": "0.5341564", "text": "def _expected_not_equal_to_actual(eq: EquipmentSinglePhase):\n if (eq.state is None) or (eq.expected_state is None):\n return False\n else:\n return eq.expected_state != eq.state", "title": "" }, { "docid": "f45a917695f33a1ae1f5193ca7320862", "score": "0.53412175", "text": "def test_ne(self):\n self.assertTrue(self.oc1 != self.oc3)\n self.assertFalse(self.oc1 != self.oc2)", "title": "" }, { "docid": "ef801b8d2007b27d9eab1f4bf42f3492", "score": "0.5335848", "text": "def check_placeholder_output(self, prog, outputs_name):\n block = prog[\"main\"]\n input_name = [x.name for x in list(block.inputs.values())]\n with block:\n new_outputs = []\n for output, output_name in zip(block.outputs, outputs_name):\n if output.name not in input_name or output.name == output_name:\n new_output = output\n else:\n new_output = mb.identity(x=output, name=output_name)\n new_outputs.append(new_output)\n block.set_outputs(new_outputs)", "title": "" }, { "docid": "c0a2fbe1725941edfb4fb841f8a6d484", "score": "0.5334173", "text": "def equal_to(self, input1, input2, output, callback=None):\n args = []\n args.append(\"--input1='{}'\".format(input1))\n args.append(\"--input2='{}'\".format(input2))\n args.append(\"--output='{}'\".format(output))\n return self.run_tool('equal_to', args, callback) # returns 1 if error", "title": "" }, { "docid": "35b99b18beb52498cad5db57464f5dd4", "score": "0.53287077", "text": "def _assert_output_vs_expected(output, expected):\n import unittest\n if isinstance(output, dict):\n testcase = unittest.TestCase('__init__')\n try:\n testcase.assertDictEqual(expected, output,\n msg=\"\\n\\nUnfortunately, the output is *not* correct..\")\n except AssertionError as error:\n print(error)\n return 0\n print(\"\\nWell done! Output is correct!\")\n return 1\n print(\"\\n\\nUnfortunately, the output is *not* a dictionary!\")\n return 0", "title": "" }, { "docid": "3053d582e09491948d969ec69e5b20bf", "score": "0.53269976", "text": "def test_wtf_negative(input_1, input_2, output):\n\n with pytest.raises(output):\n wtf(input_1, input_2)", "title": "" }, { "docid": "0da1bfe5d9a3b408acdcf3d02dab3b44", "score": "0.532426", "text": "def __ne__(self, other):\n return self._num != other._num and self._denom != other._denom", "title": "" }, { "docid": "37e82cc40fd82503bf9871ca27470f8e", "score": "0.532312", "text": "def test__validate_old_attributes__0():\n for input_value, expected_output in (\n (None, {}),\n ({}, {}),\n ({'a': 'b'}, {'a': 'b'})\n ):\n output = validate_old_attributes(input_value)\n vampytest.assert_eq(expected_output, output)", "title": "" }, { "docid": "bb61c65d9695700849551a820b2dc15a", "score": "0.5318516", "text": "def test_output_is_input(self):\n cl_object = processed_config_line('', self.args)\n expect = ''\n actual = cl_object.result()\n self.assertEqual(expect, actual)", "title": "" }, { "docid": "de0610778f3ee013cd25cf9389aedf81", "score": "0.53174376", "text": "def __ne__(self, other: 'ResourceRecordUpdateInputRdataRdataARecord') -> bool:\n return not self == other", "title": "" }, { "docid": "147f52296cbdaaec02b830d227f1a520", "score": "0.5312722", "text": "def transfer_id_same():\n\n print(\"Target account ID cannot be same as user account ID\")", "title": "" }, { "docid": "64f0df94bb81705a3783f8667ad4d5c5", "score": "0.5306945", "text": "def is_correct(self, result):\n pass", "title": "" }, { "docid": "6821d13a4192e399342a1fb84324967c", "score": "0.5299344", "text": "def __ne__(self, other):\n if not isinstance(other, VmDiskOrderByInput):\n return True\n\n return self.to_dict() != other.to_dict()", "title": "" }, { "docid": "49955bfb108a6287ab78bb7f429aacef", "score": "0.5297024", "text": "def validate_unique(self):\n pass", "title": "" }, { "docid": "25f9b130d1bd1789e7d4b8cddd52c4bc", "score": "0.52968687", "text": "def test_model_output_between_zero_and_one():\n\n np.testing.assert_array_less(0.0, result)\n\n np.testing.assert_array_less(result, 1.0)", "title": "" } ]