query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Add headers to both force latest IE rendering engine or Chrome Frame, and also to cache the rendered page for 10 minutes.
def add_header(r): r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" r.headers["Pragma"] = "no-cache" r.headers["Expires"] = "0" r.headers['Cache-Control'] = 'public, max-age=0' return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=60'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response", "def add_header(response):\r\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\r\n response.headers['Cache-Control'] = 'public, max-age=0'\r\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response", "def add_header(response):\r\n # response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\r\n response.headers['Cache-Control'] = 'no-cache, no-store'\r\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Cache-Control'] = 'no-cache, no-store' #'public, max-age=0'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = \"IE=Edge,chrome=1\"\n response.headers['Cache-Control'] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response", "def add_header(r):\r\n r.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n return r", "def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r", "def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response", "def add_header(request):\n request.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n request.headers[\"Pragma\"] = \"no-cache\"\n request.headers[\"Expires\"] = \"0\"\n request.headers['Cache-Control'] = 'public, max-age=0'\n return request", "def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r", "def add_header(r):\r\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n r.headers[\"Pragma\"] = \"no-cache\"\r\n r.headers[\"Expires\"] = \"0\"\r\n r.headers['Cache-Control'] = 'public, max-age=0'\r\n return r", "def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response", "def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response", "def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r", "def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers[\"Cache-Control\"] = \"public, max-age=0\"\n return r", "def add_header(r):\n\tr.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n\tr.headers[\"Pragma\"] = \"no-cache\"\n\tr.headers[\"Expires\"] = \"0\"\n\tr.headers['Cache-Control'] = 'public, max-age=0'\n\treturn r", "def add_header(response):\n response.cache_control.public = True\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"0\"\n return response", "def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n return r", "def AddHeader(r):\n\tr.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n\tr.headers[\"Pragma\"] = \"no-cache\"\n\tr.headers[\"Expires\"] = \"0\"\n\tr.headers['Cache-Control'] = 'public, max-age=0'\n\treturn r", "def add_header(req):\n\n req.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n req.headers[\"Pragma\"] = \"no-cache\"\n req.headers[\"Expires\"] = \"0\"\n req.headers['Cache-Control'] = 'public, max-age=0'\n return req", "def add_headers(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n return r", "def add_header(req):\n req.headers[\"Cache-Control\"] = \"no-cache\"\n return req", "def SetCacheHeaders(self, response):\n headers = framework_helpers.StaticCacheHeaders()\n for name, value in headers:\n response.headers[name] = value", "def add_header(r):\n r.headers['Acess-Control-Allow-Origin'] = '*'\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r", "def app_nocache(app):\n @app.after_request\n def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r", "def add_cache_headers(headers, cached):\n # type: (dict, dict) -> None\n if cached[\"etag\"] is not None:\n headers[\"If-None-Match\"] = cached[\"etag\"]\n if cached[\"last_modified\"] is not None:\n headers[\"If-Modified-Since\"] = datetime_to_httpdate(cached[\"last_modified\"])", "def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response", "def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)", "def SetCachingHeaders(self, revalidate):\n max_age = self.MAX_AGE\n #self.response.headers['Expires'] = email.Utils.formatdate(\n # time.time() + max_age, usegmt=True)\n cache_control = []\n if self.PUBLIC:\n cache_control.append('public')\n cache_control.append('max-age=%d' % max_age)\n if revalidate:\n cache_control.append('must-revalidate')\n self.response.headers['Cache-Control'] = ', '.join(cache_control)", "def _send_regenerated_head(self, content):\n self.send_response(200)\n self.send_header(\"Content-type\", 'text/html')\n self.send_header(\"Content-Length\", len(content))\n self.send_header(\"Last-Modified\", self.date_time_string())\n self.end_headers()", "def do_PREPARE_STANDARD_WEBSITE_HEADERS(self):\n\n self.send_response(200)\n self.headers.add_header('accept-ranges', 'bytes')\n self.headers.add_header('X-Content-Type-Options', 'nosniff')\n self.headers.add_header('X-Frame-Options', 'sameorigin')", "def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)", "def register_caching(app):\n if 'DEBUG' in app.config and app.config['DEBUG']:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response", "def disable_cache(response):\n\n response.headers['Cache-Control'] = 'max-age=0, no-cache, no-store, must-revalidate, private'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n return response", "def cache_control(value):\n response = view_get()\n response.headers[\"Cache-Control\"] = \"public, max-age={0}\".format(value)\n return response", "def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def main(request, response):\n header = request.GET.first(b\"header\")\n\n response.headers.set(b\"Origin-Policy\", header)\n response.headers.set(b\"Content-Type\", b\"text/html\")\n\n return u\"\"\"\n <!DOCTYPE html>\n <meta charset=\"utf-8\">\n <title>Origin policy bad header subframe</title>\n \"\"\"", "def set_res_headers(response):\n response.headers[\"Server\"] = \"OurTieba\"\n response.headers[\"X-Content-Type-Options\"] = \"nosniff\"\n response.headers[\"X-Frame-Options\"] = \"sameorigin\"\n if app.config.get(\"ENABLE_CSP\"):\n response.headers[\"Content-Security-Policy\"] = \"script-src \" + \" \".join(WHITELIST) + \"; object-src 'self'\"\n return response", "async def cache_control(request, handler):\n if request.path.startswith(\"/static/\"):\n\n def add_headers(obj):\n obj.headers[\"Cache-Control\"] = \"max-age=3600\"\n\n else:\n\n def add_headers(obj):\n obj.headers[\"Cache-Control\"] = \"no-store\"\n\n try:\n response = await handler(request)\n add_headers(response)\n return response\n except aiohttp.web.HTTPException as exc:\n add_headers(exc)\n raise", "def _SetCommonResponseHeaders(self):\n frame_policy = self.app.config.get('framing_policy', constants.DENY)\n frame_header_value = constants.X_FRAME_OPTIONS_VALUES.get(\n frame_policy, '')\n if frame_header_value:\n self.response.headers['X-Frame-Options'] = frame_header_value\n\n hsts_policy = self.app.config.get('hsts_policy',\n constants.DEFAULT_HSTS_POLICY)\n if self.request.scheme.lower() == 'https' and hsts_policy:\n include_subdomains = bool(\n hsts_policy.get('includeSubdomains', False))\n subdomain_string = '; includeSubdomains' if include_subdomains else ''\n hsts_value = 'max-age=%d%s' % (int(hsts_policy.get('max_age')),\n subdomain_string)\n self.response.headers['Strict-Transport-Security'] = hsts_value\n\n self.response.headers['X-XSS-Protection'] = '1; mode=block'\n self.response.headers['X-Content-Type-Options'] = 'nosniff'\n\n csp_policy = self.app.config.get(\n 'csp_policy', constants.DEFAULT_CSP_POLICY)\n report_only = False\n if 'reportOnly' in csp_policy:\n report_only = csp_policy.get('reportOnly')\n del csp_policy['reportOnly']\n header_name = ('Content-Security-Policy%s' %\n ('-Report-Only' if report_only else ''))\n policies = []\n for (k, v) in csp_policy.iteritems():\n policies.append('%s %s' % (k, v))\n csp = '; '.join(policies)\n\n # Set random nonce per response\n csp = csp % {'nonce_value': self.csp_nonce}\n\n self.response.headers.add(header_name, csp)", "def _setHeaders(self):\r\n if not self.headers_set:\r\n self.headers_set = 1\r\n for key in self.headers_out.keys():\r\n self._response.setHeader(key, self.headers_out[key])\r\n self._response.setContentType(self.content_type)", "def renderPage(c, page, request = None, response = None, cache = True, indexing = False):\n if request is None:\n # page rendered within a feed or batch context\n key = \"soup:\" + '_' + page.headers['name']\n else:\n # page rendered for online viewing or indexing\n key = \"soup:\" + page.headers['name']\n if not cache:\n return subRender(c,page,request,response,indexing)\n else:\n if \"x-cache-control\" in page.headers.keys():\n control = page.headers[\"x-cache-control\"].lower()\n m = MAX_AGE_REGEX.match(control)\n if m:\n seconds = int(m.group(3))\n try:\n if (c.cache.mtime(key) + seconds) < time.time():\n del(c.cache[key])\n except KeyError:\n pass\n try:\n if c.store.mtime(page.headers['name']) > c.cache.mtime(key):\n del(c.cache[key])\n raise KeyError\n else:\n return c.cache[key]\n except KeyError:\n c.cache[key] = buffer = subRender(c,page,request,response,indexing)\n return buffer\n # end else", "def test_cache_control_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Cache-Control') == 'no-cache, no-store, must-revalidate, max-age=0'\n assert headers.get('Pragma') == 'no-cache'", "def setHeader(object, event):\n\n request = event.request\n\n if isThemeEnabled(request):\n request.environ['HTTP_X_THEME_ENABLED'] = True", "def add_default_headers(headers):\n headers[\"Allow\"] = \", \".join(server_constants.SUPPORTED_METHODS)\n headers[\"Connection\"] = \"keep-alive\"\n headers[\"Date\"] = get_rfc_822_time()", "def _update_headers(self):\n if not self._header_updated:\n headers = self.head_obj(self._client, self._spec)\n self._headers.update(headers)\n self._header_updated = True" ]
[ "0.82239413", "0.8223081", "0.8223081", "0.8223081", "0.8223081", "0.8196386", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8113264", "0.8087198", "0.7973406", "0.7866322", "0.76620805", "0.7237988", "0.71606845", "0.7153763", "0.7153763", "0.7151734", "0.7151734", "0.714256", "0.7137825", "0.706971", "0.7060523", "0.7042182", "0.6963361", "0.68843406", "0.68829817", "0.6805885", "0.6749851", "0.66758037", "0.65287817", "0.6459716", "0.6357989", "0.63252634", "0.63252634", "0.61986053", "0.5990623", "0.5956298", "0.5954509", "0.5940061", "0.5841903", "0.57935256", "0.5705751", "0.5703662", "0.5653951", "0.55910814", "0.558507", "0.54570967", "0.5443477", "0.5422608", "0.5404912", "0.5289727", "0.5279486", "0.5271359" ]
0.71290123
63
Returns a list of the currently connected playes (on the MC server). First tries to hit the cache to see if this has been checked recently. If there is no cache entry, queries the Minecraft server's zombiepygman API to get the list of currently connected players.
def _get_connected_player_list(self): if not zpgapi.is_zgp_api_enabled(): # API is not configured, skip this. return [] cache_key = 'api_connected_players' cache_val = cache.get(cache_key) if cache_val != None: return cache_val api = zpgapi.get_zpg_api_iface() try: api_response = api.cmd_list_connected() cache_val = api_response['player_list'] except urllib2.URLError: # Error with zombiepygman. # This will get cached, but that's OK. It will prevent request # pileup on the gunicorn workers. cache_val = [] cache.set(cache_key, cache_val, 60) return cache_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_players(self):\n return self.server.status().players.online", "def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players", "def players(self):\n return self.currents.player", "def get_players(self):\n\n # Append the current player to the list and return it\n players_list = list(self._players.queue)\n players_list.append(self._current_player)\n return players_list", "def getPlayers(self):\n\t\tself.server.playerMutex.lock()\n\t\tplayers = [ (player[0], player[1][3]) for player in self.server.players.items() ]\n\t\tself.server.playerMutex.unlock()\n\t\treturn players", "def get_players(self):\r\n return self.players.values()", "def players(self):\n return self._get(\"players\")", "def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()", "def active_players(self):\n return self.players.join(players_active).all()", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def current_players(self):\n return self.previous_event.current_players", "def get_players():\n return [Mpris_Player(item)\n for item in Mpris_Utils.get_session().list_names()\n if re.match(Mpris_Interfaces.MEDIA_PLAYER, item) > 0]", "def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)", "def get_all_players(self):\n\n self._logger.debug(\"Getting player list\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT first_name, last_name, nickname, time FROM player \\\n ORDER BY time DESC\")\n players = cursor.fetchall()\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return players", "def getPlayers(self):\n return iter(self.players)", "def get_online_list(self) -> list:\n return self._get_json(self._URLS['GetOnlineList'])[1:]", "def get_player_list():\r\n return list(\r\n pymongo.MongoClient('mongodb://localhost:27017/')['wows']['na_player_list'].find( # !!!!!!!!!!!!!!!!!!!!!!!!!\r\n {'scraped': False}, {'_id': 0, 'player_id': 1, 'player_name': 1, 'clan': 1}\r\n )\r\n )", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def getPlayerList(self):\n return(self.playerList)", "def get_active_players(self, season):\n try:\n cursor = self.conn.cursor()\n command = '''\n SELECT Player\n FROM InLeague\n WHERE League IN (SELECT L_ID\n FROM League\n WHERE Season = ?)\n '''\n cursor.execute(command, (season,))\n players = []\n for p in cursor.fetchall():\n players.append(p[0])\n return players\n except BaseException as e:\n self.log.log_error('Fehler beim laden der aktiven Spieler', e)\n raise e", "def _get_live_games(self):\n response = requests.get(self._get_score_url())\n if response.status_code == 200:\n return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state]", "def getPlayers(self):\n players = []\n for pgp in self.sandboxplayergroupplayer_set.filter(quit=False):\n players.append(pgp.player)\n return players", "async def fetch_games(self):\n return await self.http.get_game_list()", "def players(self):\n return self._get_by_class(Player)", "def current_wifi_clients(self) -> list:\n self._parse_clients_info()\n return self._current_wifi_clients", "def players(self) -> List[Player]:\n return [self.white_player, self.black_player]", "def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers", "def players(self):\n return Player.objects.filter(team=self)", "def list_available_clients(self):\n connected_clients = self.all_clients.keys()\n return connected_clients", "def get_played(self):\n return self.get_challenges().filter(status__in=('D', 'P'))", "def get_registered_clients(self):\n return self.hub.get_registered_clients(self.get_private_key())", "def channels(self):\n return [channel for channel in self.client.channels if channel.has_nick(self)]", "def media_players(self) -> List[dict]:\n return self.items_by_domain(\"media_player\")", "def playlists(self):\n return self._playlists", "async def get_cache_names(self) -> list:\n conn = await self.random_node()\n return await cache_get_names_async(conn)", "def get_active_cache(reactor, connection, tenant_id, group_id):\n eff = CassScalingGroupServersCache(tenant_id, group_id).get_servers(True)\n disp = get_working_cql_dispatcher(reactor, connection)\n d = perform(disp, eff)\n return d.addCallback(lambda (servers, _): {s['id']: s for s in servers})", "def get_free_games(self) -> List[Game]:", "def get_current_user_games_playing():\n return Game.get_user_games_playing(users.GetCurrentUser())", "def get_cached_games(self, user_id: str) -> Optional[List[types.GameInformation]]:\n raise NotImplementedError", "def get_players(self, address):\n room = None\n\n if self.config.get('wc_room_workaround', True):\n room = self.room_manager.find_room_for_client_ip(address[0])\n\n if room is None:\n room = self.room\n\n return room.players", "def get_currently_playing(self):\r\n return requests.get(\r\n f\"{API_URL}/me/player/currently-playing\",\r\n headers={\r\n \"Accept\": \"application/json\",\r\n \"Authorization\": f\"Bearer {self.access_token}\"\r\n }\r\n )", "def get_user_games_playing(user):\n if not user: return []\n playing = db.Query(GamePlayer).filter('user =', user)\n return [p.game for p in playing]", "def _get_player_info(self):\n return [player._player_info() for player in self.players.values()]", "def online(self):\n api_call = self.presence()\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n return api_call.get('online')\n return None", "def get_sessions(self):\n return self.current_sessions", "def GetPlaylists(self):\n return self.__playlists.copy()", "def clients(self):\n self.update_results()\n return self._clients", "def get_counters():\n servers = get_servers()\n\n online_players = sum([server.players.current for server in servers])\n active_servers = sum([1 for server in servers if server.players.current > 0])\n total_servers = len(servers)\n\n return (online_players, active_servers, total_servers)", "def list(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.list_conns()", "def players(self) -> dict[int, Player]:\n return self._players", "def get_players(self, all=False):\n if all:\n return self.all_players\n else:\n return self.players", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "def get_playlists(self):\n if self.youtube is None:\n self.youtube = __get_client()\n return self.youtube.playlists().list(part=\"snippet\", mine=True)\\\n .execute()", "def waiting_clients(self):\n return self.storage.iterkeys()", "def get_player_order(session):\n players = [session.current_player()]\n for p in session.players():\n if p is not session.current_player():\n players.append(p)\n return players", "def get_all_games():\n games = brain.get_all_games()\n return games", "def _retrieve_plays(self):\n try:\n recents = self._spotify._get(\"me/player/recently-played\", limit=50)\n except SpotifyException as se:\n if 'The access token expired' in se.msg:\n self._renew_tokens()\n recents = self._spotify._get(\"me/player/recently-played\", limit=50)\n else:\n raise\n self._plays = recents['items']", "async def listplayers(self, ctx, *, server_name=None):\n if server_name:\n server_name = server_name.replace('_', ' ').title()\n msg = await ctx.send(f'**Getting Data for the {server_name} server**')\n await ctx.channel.trigger_typing()\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/{server_name}/listplayers/',\n headers=self.bot.auth_header\n )\n if resp.status == 200:\n message = '\\n'.join(await resp.json())\n await ctx.channel.trigger_typing()\n await msg.delete()\n await ctx.send(f'**Players currently on the {server_name} server:**\\n{message}')\n return\n elif resp.status < 500:\n message = (await resp.json()).get('details', 'There was a problem. Please try again')\n else:\n message = \"There was an error on my server. I have notified the maintainers.\"\n await ctx.send(message)\n else:\n futures = []\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/',\n headers=self.bot.auth_header\n )\n if resp.status != 200:\n await ctx.send('There was a problem getting the servers for this guild.')\n return\n guild_servers = await resp.json()\n for server in guild_servers:\n msg = await ctx.send(f'**Getting Data for the {server[\"name\"]} server**')\n\n # noinspection PyShadowingNames\n async def _listplayers(server_name: str, msg: discord.Message):\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/{server_name}/listplayers/',\n headers=self.bot.auth_header\n )\n if resp.status == 200:\n message = '\\n'.join(await resp.json())\n await ctx.channel.trigger_typing()\n await msg.delete()\n await ctx.send(f'**Players currently on the {server_name} server:**\\n{message}')\n return\n elif resp.status < 500:\n message = f'Error getting data for {server_name}' + \\\n (await resp.json()).get('details', 'Please try again')\n else:\n message = \"There was an error on my server. I have notified the maintainers.\"\n await ctx.send(message)\n\n futures.append(_listplayers(msg=msg, server_name=server['name']))\n if futures:\n asyncio.ensure_future(asyncio.gather(*futures))\n else:\n await ctx.send('There are no available servers for this guild.')", "def get_clients(self, channel):\n if channel not in self.clients.keys():\n return []\n return self.clients[channel]", "def get_channels_for(self, server, nick):\n channels = []\n for channel in self.serverchans[server.lower()].values():\n if irc.strings.lower(nick) in channel.users:\n channels.append(channel)\n return channels", "def fusion_api_get_active_sessions(self):\n return self.loginsession.get_active_sessions()", "def peer_list_active(self):\n return self.client.call('GET', self.name + 'peer-list/active')", "def clients(self):\n return self._clients", "def channels(self):\n if not self.is_loaded():\n return []\n else:\n return ipmi_channels()", "def get_users_for(self, server, channame):\n skey = server.lower()\n ckey = irc.strings.lower(channame)\n users = []\n if skey in self.serverchans and ckey in self.serverchans[skey]:\n users = self.serverchans[skey][ckey].users.keys()\n return users", "def get_sessions(self):\n\n return self.all_sessions", "async def tod_list(self, ctx, *args):\n message = \"__Currently Playing__\\n\"\n if len(self.players) == 0:\n message = \"There are currently no users playing.\"\n for player in self.players:\n message += f\"> {str(player)[:-5]}\\n\"\n await ctx.send(message)", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def peer_list_reachable(self):\n return self.client.call('GET', self.name + 'peer-list/reachable')", "def get_winners_of_game(self):\n return self.game_winners", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def get_connections(self, name):\n cls, pending, connected = self._proxies[name]\n return list(connected)", "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def get_scnlist_tilecache(self):\n scns2tilecache = list()\n if self.calc_scn_tilecache():\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.or_(\n EDDSentinel1ASF.ExtendedInfo.is_(None),\n sqlalchemy.not_(EDDSentinel1ASF.ExtendedInfo.has_key('tilecache'))),\n EDDSentinel1ASF.Invalid == False,\n EDDSentinel1ASF.ARDProduct == True).order_by(EDDSentinel1ASF.Acquisition_Date.asc()).all()\n if query_result is not None:\n for record in query_result:\n scns2tilecache.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns2tilecache", "def show_cache(print_out=True):\n out = []\n out.append('# PVName ChannelID/Context Connected?')\n out.append('#--------------------------------------------')\n for context, context_chids in list(_cache.items()):\n for vname, val in list(context_chids.items()):\n chid = val['chid']\n if len(vname) < 15:\n vname = (vname + ' '*15)[:15]\n out.append(\" %s %s/%s %s\" % (vname, repr(chid),\n repr(context),\n isConnected(chid)))\n out = strjoin('\\n', out)\n if print_out:\n write(out)\n else:\n return out", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def db_get_channels(self, guildID: int):\n query = \"SELECT rowid, * FROM wormhole_channel WHERE guildID = ?\"\n channels = self.bot.db_query(query, (guildID,), astuple=True)\n # come as: (rowid, name, channelID, guildID, type, webhookID,\n # webhookTOKEN)\n res: List[WormholeChannel] = []\n for row in channels:\n res.append(WormholeChannel(*row[1:5]))\n res[-1].id = row[0]\n return res if len(res) > 0 else None", "def player_has_active_games(self, player):\n return self.filter(active=True, finished=False, player=player)", "def get_online_users(self):\n self.data = {}\n iq = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n ONLINE_USERS)\n sessionid = iq['command']['sessionid']\n\n form = self.xmpp.plugin['xep_0004'].make_form(ftype='submit')\n field = form.add_field(\n ftype='hidden',\n type='hidden',\n var='FORM_TYPE',\n value=ADMIN)\n field['type'] = 'hidden'\n form.add_field(var='max_items', value='100')\n\n result = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n ONLINE_USERS,\n sessionid=sessionid,\n payload=form)\n\n field = result['command']['form']['fields']['onlineuserjids']\n\n if not field['value']: # no user online, abort\n return\n\n for item in field['value'].split('\\n'):\n self.get_user_statistics(item)\n Gdk.threads_enter()\n for jid in self.data:\n data = self.data[jid]\n row = self.store.append(None, [\n jid, int(data['rostersize']),\n len(data['onlineresources']),\n '', '', False\n ])\n resources = self.data[jid]['onlineresources']\n for index, resource in enumerate(resources):\n self.store.append(row,\n [\n '%s/%s' % (jid, resource),\n int(data['rostersize']),\n len(data['onlineresources']),\n data['ipaddresses'][index][1],\n data['ipaddresses'][index][0],\n False\n ]\n )\n countries = []\n for addr in data['ipaddresses']:\n countries.append(addr[1])\n countries = Counter(countries)\n self.store.set_value(row, 3, max(countries.items(), key=lambda tup: tup[1])[0])\n Gdk.threads_leave()", "async def cgames(self, ctx):\r\n server = ctx.message.server\r\n members = server.members\r\n\r\n freq_list = {}\r\n for member in members:\r\n if member != None and member.game != None and member.game.name != None and not member.bot:\r\n if member.game.name not in freq_list:\r\n freq_list[member.game.name] = 0\r\n freq_list[member.game.name]+=1\r\n\r\n sorted_list = sorted(freq_list.items(), key=operator.itemgetter(1), reverse = True) \r\n\r\n if not freq_list:\r\n await self.bot.say(\"Surprisingly, no one is playing anything.\")\r\n else: \r\n # create display\r\n msg = \"```These are the server's most played games at the moment: \\n\\n\"\r\n msg += \"{:<25s}{:>25s}\\n\".format(\"Game:\", \"# Playing:\")\r\n max_games = min(len(sorted_list), 10)\r\n for i in range(max_games):\r\n game, freq = sorted_list[i]\r\n if len(game) > 25:\r\n trunc_game = game [0:21] + \"...\"\r\n msg+= \"{:<25s}{:>25d}\\n\".format(trunc_game, freq_list[game])\r\n else:\r\n msg+= \"{:<25s}{:>25d}\\n\".format(game, freq_list[game])\r\n msg += \"```\" \r\n await self.bot.say(msg)", "def get_reachable_servers(self) -> List[Server]:\n pass", "def get_online():\n print( \"Online CPUs:\" + \"\".join( f\" {cpu}\" for cpu in _cpu.get_online_cpus() ) )", "async def list(self, ctx):\n\n cursor = await db.execute(\"Select MessageID, TimeEnding, Members, ChannelID from Giveaway \"\n \"where GuildID = ? and Ended = ?\", (ctx.guild.id, False))\n result = await cursor.fetchall()\n\n for i, tup in enumerate(result):\n try:\n msg = await ctx.guild.get_channel(tup[3]).fetch_message(tup[0])\n tup = list(tup)\n tup[0] = msg\n result[i] = tup\n except:\n result.remove(tup)\n await db.execute(\"Delete from Giveaway where MessageID = ?\", (tup[0],))\n await db.commit()\n\n if not result:\n return await send_embed(ctx, \"No active giveaways on this server.\", negative=True)\n\n embeds = []\n fields = []\n\n for i, tup in enumerate(result, start=1):\n fields.append((str(tup[0].id),\n f\"Prize: {tup[0].embeds[0].author.name}\\n\"\n f\"{tup[2]} possible winners\\n\"\n f\"Ends at {datetime.utcfromtimestamp(tup[1]).strftime('%Y-%m-%d %H:%M:%S')}\"))\n\n if i % 10 == 0 or i == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n title=\"Active Giveaways\"\n )\n\n for field in fields:\n embed.add_field(name=field[0], value=field[1], inline=False)\n\n embeds.append(embed)\n fields = []\n\n await self.bot.paginate(ctx, embeds)", "def games_played(self):\n return self._games_played", "def func(self):\n player = self.caller\n session_list = [\n ob\n for ob in SESSIONS.get_sessions()\n if ob.account and ob.account.show_online(player)\n ]\n session_list = sorted(session_list, key=lambda o: o.account.key.lower())\n sparse = \"sparse\" in self.switches\n watch_list = player.db.watching or []\n if self.cmdstring == \"doing\":\n show_session_data = False\n else:\n show_session_data = player.check_permstring(\n \"Immortals\"\n ) or player.check_permstring(\"Wizards\")\n total_players = len(set(ob.account for ob in session_list))\n number_displayed = 0\n already_counted = []\n public_members = []\n if \"org\" in self.switches:\n from world.dominion.models import Organization\n\n try:\n org = Organization.objects.get(name__iexact=self.args)\n if org.secret:\n raise Organization.DoesNotExist\n except Organization.DoesNotExist:\n self.msg(\"Organization not found.\")\n return\n public_members = [\n ob.player.player\n for ob in org.members.filter(deguilded=False, secret=False)\n ]\n if show_session_data:\n table = prettytable.PrettyTable(\n [\"{wPlayer Name\", \"{wOn for\", \"{wIdle\", \"{wRoom\", \"{wClient\", \"{wHost\"]\n )\n for session in session_list:\n pc = session.get_account()\n if pc in already_counted:\n continue\n if not session.logged_in:\n already_counted.append(pc)\n continue\n delta_cmd = pc.idle_time\n if \"active\" in self.switches and delta_cmd > 1200:\n already_counted.append(pc)\n continue\n if \"org\" in self.switches and pc not in public_members:\n continue\n delta_conn = time.time() - session.conn_time\n plr_pobject = session.get_puppet()\n plr_pobject = plr_pobject or pc\n base = str(session.get_account())\n pname = self.format_pname(session.get_account())\n char = pc.char_ob\n if \"watch\" in self.switches and char not in watch_list:\n already_counted.append(pc)\n continue\n if not char or not char.item_data.fealty:\n fealty = \"---\"\n else:\n fealty = char.item_data.fealty\n if not self.check_filters(pname, base, fealty):\n already_counted.append(pc)\n continue\n pname = crop(pname, width=18)\n if (\n session.protocol_key == \"websocket\"\n or \"ajax\" in session.protocol_key\n ):\n client_name = \"Webclient\"\n else:\n # Get a sane client name to display.\n client_name = session.protocol_flags.get(\"CLIENTNAME\")\n if not client_name:\n client_name = session.protocol_flags.get(\"TERM\")\n if client_name and client_name.upper().endswith(\"-256COLOR\"):\n client_name = client_name[:-9]\n\n if client_name is None:\n client_name = \"Unknown\"\n\n client_name = client_name.capitalize()\n\n table.add_row(\n [\n pname,\n time_format(delta_conn)[:6],\n time_format(delta_cmd, 1),\n hasattr(plr_pobject, \"location\")\n and plr_pobject.location\n and plr_pobject.location.dbref\n or \"None\",\n client_name[:9],\n isinstance(session.address, tuple)\n and session.address[0]\n or session.address,\n ]\n )\n already_counted.append(pc)\n number_displayed += 1\n else:\n if not sparse:\n table = prettytable.PrettyTable([\"{wPlayer name\", \"{wFealty\", \"{wIdle\"])\n else:\n table = prettytable.PrettyTable([\"{wPlayer name\", \"{wIdle\"])\n\n for session in session_list:\n pc = session.get_account()\n if pc in already_counted:\n continue\n if not session.logged_in:\n already_counted.append(pc)\n continue\n if \"org\" in self.switches and pc not in public_members:\n continue\n delta_cmd = pc.idle_time\n if \"active\" in self.switches and delta_cmd > 1200:\n already_counted.append(pc)\n continue\n if not pc.db.hide_from_watch:\n base = str(pc)\n pname = self.format_pname(pc, lname=True, sparse=sparse)\n char = pc.char_ob\n if \"watch\" in self.switches and char not in watch_list:\n already_counted.append(pc)\n continue\n if not char or not char.item_data.fealty:\n fealty = \"---\"\n else:\n fealty = str(char.item_data.fealty)\n if not self.check_filters(pname, base, fealty):\n already_counted.append(pc)\n continue\n idlestr = self.get_idlestr(delta_cmd)\n if sparse:\n width = 30\n else:\n width = 55\n pname = crop(pname, width=width)\n if not sparse:\n table.add_row([pname, fealty, idlestr])\n else:\n table.add_row([pname, idlestr])\n already_counted.append(pc)\n number_displayed += 1\n else:\n already_counted.append(pc)\n is_one = number_displayed == 1\n if number_displayed == total_players:\n string = \"{wPlayers:{n\\n%s\\n%s unique account%s logged in.\" % (\n table,\n \"One\" if is_one else number_displayed,\n \"\" if is_one else \"s\",\n )\n else:\n string = (\n \"{wPlayers:{n\\n%s\\nShowing %s out of %s unique account%s logged in.\"\n % (\n table,\n \"1\" if is_one else number_displayed,\n total_players,\n \"\" if total_players == 1 else \"s\",\n )\n )\n self.msg(string)", "def NowPlaying(self):\n self.logger.debug(\"Fetching currently playing information\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n player = xbmc.Player.GetActivePlayers()[0]\n playerid = player['playerid']\n\n if player['type'] == 'video':\n playerprop = ['speed', 'position', 'time', 'totaltime',\n 'percentage', 'subtitleenabled', 'currentsubtitle',\n 'subtitles', 'currentaudiostream', 'audiostreams']\n itemprop = ['thumbnail', 'showtitle', 'season', 'episode', 'year', 'fanart']\n\n elif player['type'] == 'audio':\n playerprop = ['speed', 'position', 'time', 'totaltime', 'percentage']\n itemprop = ['thumbnail', 'title', 'artist', 'album', 'year', 'fanart']\n\n app = xbmc.Application.GetProperties(properties=['muted', 'volume'])\n player = xbmc.Player.GetProperties(playerid=playerid, properties=playerprop)\n item = xbmc.Player.GetItem(playerid=playerid, properties=itemprop)\n\n return {'playerInfo': player, 'itemInfo': item, 'app': app}\n except:\n self.logger.debug(\"Unable to fetch currently playing information!\")\n return", "def get_games_from_database (self):\n r = requests.get (self.url_endpoint)\n if (r.status_code != 200):\n print (\"Failed to get games:\\n\", r.text)\n return r\n \n games = json.loads (r.text)['games']\n return_list = []\n for game in games:\n return_list.append (game['game_state'])\n return return_list", "def get_memcached_hosts():\n cache_info = settings.CACHES[DEFAULT_FORWARD_CACHE_ALIAS]\n backend = cache_info['BACKEND']\n locations = cache_info.get('LOCATION', [])\n\n if 'memcached' not in backend or not locations:\n locations = []\n elif not isinstance(locations, list):\n locations = [locations]\n\n return locations", "def get_cached_polling_list(self, objs):\n return [self._cache[obj.pk] for obj in objs]", "def get_chatrooms(self):\n return list(self.chatrooms)", "def list():\n\n return {\"cncs\": [{\"id\": id.split(\"/\")[-1]} for id in sorted(flask.current_app.redis.keys(\"/cnc/*\"))]}", "def get_cached_playlist(self):\n if self.cached_playlist is None:\n self.get_playlist()\n return self.cached_playlist", "def known_nodes(self) -> List[Client]:\n return list(self.in_memory_client_registry.values())", "def current_user_playlists(self, limit: int = 20, offset: int = 0):\n return self._get('me/playlists', limit=limit, offset=offset)", "def playlists(self):\r\n return v3.Playlists(self)", "def list_connections(self):\n return self.network.list_connections()", "def get_players():\n return {\"X\": play_human, \"O\": play_ai}", "def get_global_active_list(self):\n return self.api.get_active_global_version_manager()", "def all(self, skip_cache=False):\n now = _time_ms(datetime.datetime.utcnow())\n if skip_cache or now - self._last_updated > CACHE_LIMIT:\n self._process_stations()\n return self._stations_lst" ]
[ "0.7143071", "0.68768233", "0.6365127", "0.635414", "0.62231076", "0.60774004", "0.6067745", "0.60203606", "0.6001943", "0.5917321", "0.5900113", "0.58974326", "0.5869177", "0.5867893", "0.5851105", "0.58382463", "0.5821852", "0.57409716", "0.5719336", "0.57017183", "0.5654622", "0.5652596", "0.5646992", "0.5596401", "0.557703", "0.5576145", "0.5571599", "0.5567744", "0.5566548", "0.55665255", "0.553591", "0.55246294", "0.5456939", "0.5448428", "0.544581", "0.54296166", "0.53979546", "0.53881973", "0.53747195", "0.53562135", "0.53498954", "0.5349476", "0.5344395", "0.53426754", "0.5342432", "0.5334593", "0.53341645", "0.5320952", "0.53069943", "0.5297473", "0.528417", "0.5284144", "0.52837825", "0.5271782", "0.5250296", "0.52478486", "0.52311593", "0.52089304", "0.52013594", "0.51913434", "0.5188242", "0.51776063", "0.5175422", "0.5164698", "0.515862", "0.5146897", "0.5140317", "0.51324326", "0.51294714", "0.51248175", "0.51178986", "0.5109089", "0.5107864", "0.51050663", "0.50960404", "0.50940025", "0.509193", "0.5090698", "0.50829077", "0.507294", "0.5067299", "0.50646293", "0.5062332", "0.5062079", "0.5061087", "0.50605154", "0.50558364", "0.5053", "0.504154", "0.50297093", "0.50289583", "0.5024387", "0.50228655", "0.50158066", "0.50048435", "0.49979216", "0.49952006", "0.49900636", "0.49810633", "0.49692932" ]
0.82015663
0
Ensure the value of 'done' is set to False when creating an item
def test_done_default_value_is_False(self): item = Item(name = "A test item") self.assertEqual(item.name, "A test item") self.assertFalse(item.done)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)", "def test_create(self):\n Todo = self.env[\"todo.task\"]\n task = Todo.create({'name': 'Test Task'})\n self.assertItemsEqual(task.is_done, False)", "def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True", "def _is_done(self):\n pass", "def is_done():\n return False", "def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def create(self):\n return (True == self.client.put(self.name).getBodyData(\"ok\"))", "def settle_self(self):\n self.state = 'completed'\n self.save()\n self.safe_post()", "def test_create_item_good(test_client, item):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 201\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']\n assert data['item']['id'] > 0", "def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True", "def done(self):\n return False", "def create_item(self, user: User, **kwargs) -> None:", "def _isDone(self) -> bool:\n pass", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def test_create_item_missing_value(test_client, item_without_value):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item_without_value),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST", "def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'", "def done(self) -> bool:", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)", "def create_work_item(self):", "def test_vault_create_new_vault_item(self):\n pass", "def test_create_a_todo(self):\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # test with invalid data\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"TODO item requires state, due_date and text\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_add_item_using_post(self):\n pass", "def pending(self):\n self.state = Step.State.PENDING", "def mark_as_done(self):\n self.status = \"DONE\"", "def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as done')\n return\n self.in_progress_scroll_cell.remove_selected_item()\n self.done_scroll_cell.add_item(done)", "def post(self):\n task = self.params.task\n task.completed = not task.completed\n task.put()\n render_json(self, obj=task.as_json())", "def done(self):\n self.status = 'completed'\n self.end = datetime.datetime.now()\n self.save()", "def _isDone(self):\n return self.steps >= self.max_steps or len(self.food_ids) <= 0", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def test_make_draft(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'create_draft'}\r\n )\r\n # Update the draft version and check that published is different.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'metadata': {'due': '2077-10-10T04:00Z'}}\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass", "def can_mark_as_done(self):\n if (not self.event_store.done) and \\\n ((not self.file_submission_required) or self.event_store.has_file_submission) and \\\n (not self.contains_questions):\n return True\n return False", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def test_completed(self):\n return False", "def test_create_item_missing_name(test_client, item_without_name):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item_without_name),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST", "def force_input(self, foodItem):\r\n\r\n self.identified_food = foodItem\r\n self.has_been_checked = False", "def test_completed_dont_count(self):\r\n self._login_admin()\r\n\r\n # add out completed one\r\n q = ImportQueue(\r\n username=u'admin',\r\n file_path=u'testing.txt'\r\n )\r\n q.completed = datetime.now()\r\n q.status = 2\r\n DBSession.add(q)\r\n transaction.commit()\r\n\r\n # now let's hit the import page, we shouldn't get a form, but instead a\r\n # message about our import\r\n res = self.app.get('/admin/import')\r\n\r\n self.assertTrue('<form' in res.body, \"We should have a form\")", "def is_created(self) -> bool:\n return self.state == Order.OrderState.CREATED.choice_value", "def force_done(self):\n\n if self.can_done():\n return self.done()\n else:\n # we can not set that quest to done regularly, so we force it\n # nobody gets any experience and we might need a special notification for this\n self.quest.done = True\n self.quest.save()\n signals.quest_done.send(None, quest=self.quest)", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "async def additem(self, ctx, *, name: str):\n try:\n item = dict()\n item[\"name\"] = name\n check = lambda x: x.channel is ctx.channel and x.author is ctx.author\n await ctx.send(await _(ctx, \"Describe the item (a description for the item)\"))\n response = await self.bot.wait_for(\"message\", timeout=120, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n\n item[\"description\"] = response.content\n item[\"meta\"] = dict()\n\n await ctx.send(\n await _(ctx, \"Additional information? (Attributes formatted in a list i.e `color: 400, value: 200` \"\n \"Set an image for this item with the `image` key i.e. `image: http://image.com/image.png` \"\n \"Set this item as usable by adding `used` key i.e. `used: You open the jar and the bird flies away`\"))\n while True:\n response = await self.bot.wait_for(\"message\", timeout=60, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n elif response.content.lower() == \"skip\":\n await ctx.send(await _(ctx, \"Skipping!\"))\n break\n else:\n try:\n if \"\\n\" in response.content:\n res = response.content.split(\"\\n\")\n else:\n res = response.content.split(\",\")\n for val in res:\n key, value = val.split(\": \")\n key = key.strip().casefold()\n value = value.strip()\n item[\"meta\"][key] = value\n else:\n break\n except:\n await ctx.send(await _(ctx, \"Invalid syntax, try again.\"))\n await self.bot.di.new_item(ctx.guild, ServerItem(**item))\n await ctx.send(await _(ctx, \"Item successfully created\"))\n\n except asyncio.TimeoutError:\n await ctx.send(await _(ctx, \"Timed out! Try again\"))", "def complete(self):\n self.completed = peewee.datetime.date.today()\n self.save()", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False", "def disable_if_done(self, commit=True):\n if self._is_billing_complete() and not self.disabled:\n self.disabled = True\n\n if commit:\n self.save()", "def uncomplete(self):\n ### TODO: needs test code for code coverage!\n ## (it has been tested through the calendar-cli test code)\n if not hasattr(self.vobject_instance.vtodo, \"status\"):\n self.vobject_instance.vtodo.add(\"status\")\n self.vobject_instance.vtodo.status.value = \"NEEDS-ACTION\"\n if hasattr(self.vobject_instance.vtodo, \"completed\"):\n self.vobject_instance.vtodo.remove(self.vobject_instance.vtodo.completed)\n self.save()", "def test_add_item_at_using_put(self):\n pass", "def is_done(self):\n return self._done", "def get_isDone(self):\n pass", "def done(self):\n raise NotImplementedError()", "def test_set_task_incomplete_view(self):\n self.task.status = Task.STATUS_CHOICES.ready_for_review\n self.task.save()\n pk = self.task.pk\n url = reverse('set_task_incomplete', kwargs={'pk': pk})\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n task = Task.objects.get(pk=pk)\n self.assertEqual(task.status, Task.STATUS_CHOICES.incomplete)", "def test_create_drives_drive_suspend_item(self):\n pass", "def test_creating_shopping_item(create_shopping_item, create_user):\n owner = create_user\n shopping_item = create_shopping_item\n assert shopping_item.owner == owner", "def test_create_item(self):\n\n url = reverse('stock-item-create')\n\n response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(url, {'part': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from a valid item, valid location\n response = self.client.get(url, {'location': 1, 'copy': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from an invalid item, invalid location\n response = self.client.get(url, {'location': 999, 'copy': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def done(self):\n return self._info['status'] == 'DONE'", "def _advance_to_pending(self):\n if all(signup.status != GameSignup.REGISTERED for signup in self.signups.all()):\n try:\n with transaction.atomic():\n self.status = self.PENDING\n self._create_characters()\n self.save()\n except DatabaseError:\n pass\n else:\n raise ValidationError('All user signups must be accepted, rejected, or withdrawn before continuing.')", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def can_update_order_items(self) -> bool:\n return self.is_created or self.is_pending", "def test_create_new_shopping_list(create_shopping_list):\n shopping_list = create_shopping_list\n assert shopping_list.items.values_list().count() == 0\n assert shopping_list.budget == 0", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def process_item(self, item, spider):\n\n session = self.Session()\n\n try:\n CreateOrUpdate().create_or_update(item=item, session=session)\n session.commit()\n except:\n # undo in case of errors\n session.rollback()\n raise\n finally:\n session.close()\n\n return item", "def test_0_0_create(self):\n\n self.assertTrue(self.b1)", "def test_profile_is_complete(empty_field):\n profile = ProfileFactory.create()\n if empty_field:\n setattr(profile, empty_field, \"\")\n assert profile.is_complete is False\n else:\n assert profile.is_complete is True", "def verify_done():\n if SAVE_EXISTENT == []:\n print \"\\nCan't generate the invoice because You have not bought\"\n press_enter()\n reset()\n show_products()\n sell_products()\n else:\n reset()\n invoice()\n press_enter()\n delete_lists()\n reset()\n main_menu()", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def _apply_item(self, item: Item) -> bool:\n return False", "def test_create_draft_with_update(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'metadata': {'due': '2077-10-10T04:00Z'},\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def test_adding_a_new_item_with_no_supply(self): \n print '\\n'\n logger.debug('Add a new item to a current PO via PUT')\n print '\\n'\n \n #Verifying po in database\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n self.assertEqual(self.po.grand_total, Decimal('129.58'))\n self.assertEqual(timezone('Asia/Bangkok').normalize(self.po.order_date).date(), datetime.datetime.now().date())\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.quantity, 10)\n self.assertEqual(item.total, Decimal('121.1'))\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n modified_po_data['items'][1]['unit_cost'] = Decimal('11.99')\n modified_po_data['items'][1]['comments'] = 'test change'\n modified_po_data['items'][1]['description'] = \"test description change\"\n del modified_po_data['items'][1]['supply']\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po_data)\n \n #Verify the response\n self.assertEqual(resp.status_code, 200, msg=resp)\n po = resp.data\n self.assertEqual(po['id'], 1)\n self.assertEqual(po['supplier']['id'], 1)\n self.assertEqual(po['vat'], 7)\n #self.assertEqual(Decimal(po['grand_total']), Decimal('74.85'))\n self.assertEqual(po['discount'], 0)\n self.assertEqual(po['revision'], 1)\n self.assertEqual(len(po['items']), 2)\n #self.assertEqual(po['status'], 'PAID')\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(po['pdf']['url'])\n \n item1 = po['items'][0]\n logger.debug(item1)\n self.assertEqual(item1['id'], 2)\n self.assertEqual(item1['quantity'], Decimal('10.0000000000'))\n self.assertEqual(item1['description'], u'Pattern: Maxx, Col: Blue')\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('12.1100'))\n self.assertEqual(Decimal(item1['total']), Decimal('121.10'))\n\n item2 = po['items'][1]\n logger.debug(item2)\n self.assertEqual(item2['id'], 3)\n self.assertEqual(item2['quantity'], Decimal('3.0000000000'))\n self.assertEqual(item2['comments'], 'test change')\n self.assertEqual(item2['description'], 'test description change')\n self.assertEqual(Decimal(item2['unit_cost']), Decimal('11.99'))\n self.assertEqual(Decimal(item2['total']), Decimal('35.97'))\n \n #Verify database record\n po = PurchaseOrder.objects.get(pk=1)\n \n self.assertEqual(po.supplier.id, 1)\n #self.assertEqual(timezone('Asia/Bangkok').normalize(po.order_date), datetime.datetime.now().date())\n self.assertEqual(po.vat, 7)\n self.assertEqual(po.grand_total, Decimal('168.06'))\n self.assertEqual(po.items.count(), 2)\n \n # Check new item in the database\n item2_d = po.items.all().order_by('id')[1]\n self.assertEqual(item2_d.id, 3)\n self.assertEqual(item2_d.description, 'test description change')\n self.assertEqual(item2_d.comments, 'test change')\n self.assertEqual(item2_d.quantity, 3)\n self.assertEqual(item2_d.unit_cost, Decimal('11.99'))\n self.assertEqual(item2_d.total, Decimal('35.97'))\n\n # Check new supply product in the database\n products = SupplyProduct.objects.filter(supply=item2_d.supply, supplier=self.po.supplier)\n self.assertEqual(products.count(), 1)\n product = products.all()[0]\n self.assertEqual(product.supply.id, item2_d.supply.id)\n self.assertEqual(product.supplier.id, self.po.supplier.id)\n self.assertEqual(product.cost, Decimal('11.99'))", "def markSuccess(self, *args):\n self.add(True)", "def post_create(self, state):\n\n self.id = self.get_flags_from_list(self.id)\n self.flags = self.get_flags_from_list(self.flags)", "def purchase_item(self):\r\n self.purchased_callback()\r\n self.status = 'purchased'\r\n self.fulfilled_time = datetime.now(pytz.utc)\r\n self.save()", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def done(self) -> bool:\n return pulumi.get(self, \"done\")", "def create_meal():", "def form_valid(self, form):\n context = self.get_context_data()\n ingredients = context['ingredients']\n # .atomic() - If there is an exception, the changes are rolled back.\n with transaction.atomic():\n form.instance.author = self.request.user\n self.object = form.save()\n if ingredients.is_valid():\n ingredients.instance = self.object\n ingredients.save()\n return super(RecipeCreate, self).form_valid(form)", "def set_delayed_test_to_done(self, guid_):\n db = DatabaseManager()\n query = \"\"\"UPDATE delayedTestData\n SET done=TRUE\n WHERE guid=%(guid)s\n AND done=FALSE\"\"\"\n db.execute_query_and_close(query, {\"guid\": guid_})\n return True", "def create_flight_needs_task(self):\n duration = self.trip.arrival_date_time - self.trip.departure_date_time\n if duration > timedelta(hours=2):\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"It's a long flight ! Don't forget your earplugs and your sleep mask.\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))\n else:\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"Take some food and some drinks for your flight\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))", "def xtest_adding_a_new_item_with_no_supply(self): \n print '\\n'\n logger.debug('Add a new item to a current PO via PUT')\n print '\\n'\n \n #Verifying po in database\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n self.assertEqual(self.po.grand_total, Decimal('129.58'))\n self.assertEqual(timezone('Asia/Bangkok').normalize(self.po.order_date).date(), datetime.datetime.now().date())\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.quantity, 10)\n self.assertEqual(item.total, Decimal('121.1'))\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n modified_po_data['items'][1]['unit_cost'] = Decimal('11.99')\n modified_po_data['items'][1]['comments'] = 'test change'\n modified_po_data['items'][1]['description'] = \"test description change\"\n modified_po_data['status'] = 'PROCESSED'\n\n logger.debug(modified_po_data)\n\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po_data)\n \n #Verify the response\n self.assertEqual(resp.status_code, 200, msg=resp)\n po = resp.data\n self.assertEqual(po['id'], 1)\n self.assertEqual(po['supplier']['id'], 1)\n self.assertEqual(po['vat'], 7)\n #self.assertEqual(Decimal(po['grand_total']), Decimal('74.85'))\n self.assertEqual(po['discount'], 0)\n self.assertEqual(po['revision'], 1)\n self.assertEqual(len(po['items']), 2)\n #self.assertEqual(po['status'], 'PAID')\n #Check the new pdf\n #webtbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(po['pdf']['url'])\n \n item1 = po['items'][0]\n logger.debug(item1)\n self.assertEqual(item1['id'], 2)\n self.assertEqual(item1['quantity'], '10.0000000000')\n self.assertEqual(item1['description'], u'Pattern: Maxx, Col: Blue')\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('12.1100'))\n self.assertEqual(Decimal(item1['total']), Decimal('121.10'))\n\n item2 = po['items'][1]\n logger.debug(item2)\n self.assertEqual(item2['id'], 3)\n self.assertEqual(item2['quantity'], '3.0000000000')\n self.assertEqual(item2['comments'], 'test change')\n self.assertEqual(item2['description'], 'test description change')\n self.assertEqual(Decimal(item2['unit_cost']), Decimal('11.99'))\n self.assertEqual(Decimal(item2['total']), Decimal('35.97'))\n \n #Verify database record\n po = PurchaseOrder.objects.get(pk=1)\n \n self.assertEqual(po.supplier.id, 1)\n self.assertEqual(po.status, 'PROCESSED')\n #self.assertEqual(timezone('Asia/Bangkok').normalize(po.order_date), datetime.datetime.now().date())\n self.assertEqual(po.vat, 7)\n self.assertEqual(po.grand_total, Decimal('168.07'))\n self.assertEqual(po.items.count(), 2)\n \n # Check new item in the database\n item2_d = po.items.all().order_by('id')[1]\n self.assertEqual(item2_d.id, 203)\n self.assertEqual(item2_d.description, 'test description change')\n self.assertEqual(item2_d.comments, 'test change')\n self.assertEqual(item2_d.quantity, 3)\n self.assertEqual(item2_d.unit_cost, Decimal('11.99'))\n self.assertEqual(item2_d.total, Decimal('35.97'))\n\n # Check new supply product in the database\n products = SupplyProduct.objects.filter(supply=item2_d.supply, supplier=self.po.supplier)\n self.assertEqual(products.count(), 1)\n product = products.all()[0]\n self.assertEqual(product.supply.id, item2_d.supply.id)\n self.assertEqual(product.supplier.id, self.po.supplier.id)\n self.assertEqual(product.cost, Decimal('11.99'))", "def update_or_create_delivery(self, orderitem_data):", "def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return", "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def mark_as_done(self):\n if self.can_mark_as_done():\n return self.__set_completion_status(True)\n return False", "def testAssistantOwnershipAfterCreate(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='create'), \"designated assistant is not listed as an owner\")", "def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False", "def done(self):", "def done(self):", "def test_deleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO3\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo in event.todoitem_set.all()\n\n self.client.get(reverse('todo_delete', args=[todo.pk]))\n\n assert event.todoitem_set.all().count() == 0", "def test_promote_goes_no_further_than_done(self):\n todo = Todo.create(title=\"Thing to do\", status=Todo.CHOICES[-1][0])\n assert todo.status == todo.CHOICES[-1][0]\n\n todo.promote()\n\n assert todo.status == todo.CHOICES[-1][0]", "def test_create_drives_drive_smartfail_item(self):\n pass", "def test_todo_create(client):\n # creates Model.Todo object\n with db.atomic():\n todo = models.Todo.create(name='Shopping')\n\n assert isinstance(todo, models.Todo)\n assert todo.name == 'Shopping'\n assert hasattr(todo, 'id')\n assert todo.id is 1", "def test_edited(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO4\",\n due=datetime.date.today(), additional=\"\",\n )\n\n url, form = self._get_initial_form('todo_edit', todo.pk)\n form['title'] = \"Test TODO4 - new title\"\n form['completed'] = True\n form['additional'] = ''\n\n rv = self.client.post(reverse('todo_edit', args=[todo.pk]), form)\n assert rv.status_code == 302\n\n todo.refresh_from_db()\n\n assert todo.title == \"Test TODO4 - new title\"\n assert todo.completed is True", "def mark_succeed(self):\n self.status = self.SUCCEED\n self.traceback = None\n self.save(update_fields={'status', 'traceback', 'updated_at'})" ]
[ "0.73595536", "0.69080955", "0.6256873", "0.6184523", "0.61320096", "0.60282576", "0.6025874", "0.59891725", "0.58720154", "0.5870808", "0.58685875", "0.5864126", "0.5850043", "0.5843042", "0.580223", "0.5788047", "0.5772935", "0.5769292", "0.5767891", "0.5756782", "0.57303107", "0.5727828", "0.5699209", "0.5697862", "0.56792533", "0.5639623", "0.56324", "0.56297165", "0.5627014", "0.5625253", "0.5571627", "0.5563974", "0.5557099", "0.5545007", "0.5521362", "0.5512483", "0.54838", "0.5479526", "0.547357", "0.54658234", "0.54554105", "0.5450185", "0.54468936", "0.54430217", "0.5432793", "0.5430676", "0.5414472", "0.5409174", "0.54023296", "0.5395431", "0.5394366", "0.5393998", "0.53900206", "0.53863215", "0.5383111", "0.53772426", "0.53721863", "0.5364068", "0.53637606", "0.5362962", "0.53569275", "0.5348478", "0.5343172", "0.5334324", "0.5333612", "0.532779", "0.5319638", "0.5314896", "0.5308468", "0.5308196", "0.5305818", "0.5304481", "0.53035533", "0.52960604", "0.52800995", "0.5275389", "0.5259387", "0.5257812", "0.5256503", "0.5255377", "0.5252906", "0.52475715", "0.5244743", "0.52412325", "0.5240018", "0.5238479", "0.52344406", "0.52330565", "0.5229771", "0.5226098", "0.5215788", "0.52130973", "0.52127033", "0.52127033", "0.5212057", "0.5211499", "0.5210598", "0.52052736", "0.5204462", "0.51987386" ]
0.7237571
1
Ensure the value of 'done' is True when set to True when creating an item
def test_done_value_can_be_set_to_True(self): item = Item(name = "A test item", done = True) self.assertEqual(item.name, "A test item") self.assertTrue(item.done)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_done_default_value_is_False(self):\n item = Item(name = \"A test item\")\n self.assertEqual(item.name, \"A test item\")\n self.assertFalse(item.done)", "def test_create(self):\n Todo = self.env[\"todo.task\"]\n task = Todo.create({'name': 'Test Task'})\n self.assertItemsEqual(task.is_done, False)", "def _is_done(self):\n pass", "def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True", "def is_done():\n return False", "def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True", "def done(self) -> bool:", "def done(self):\n return False", "def _isDone(self) -> bool:\n pass", "def mark_as_done(self):\n self.status = \"DONE\"", "def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def settle_self(self):\n self.state = 'completed'\n self.save()\n self.safe_post()", "def create(self):\n return (True == self.client.put(self.name).getBodyData(\"ok\"))", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)", "def done(self):\n self.status = 'completed'\n self.end = datetime.datetime.now()\n self.save()", "def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)", "def done(self):\n return self._info['status'] == 'DONE'", "def is_done(self):\n return self._done", "def test_completed(self):\n return False", "def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True", "def _isDone(self):\n return self.steps >= self.max_steps or len(self.food_ids) <= 0", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def test_create_item_good(test_client, item):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 201\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']\n assert data['item']['id'] > 0", "def get_isDone(self):\n pass", "def done(self) -> bool:\n return pulumi.get(self, \"done\")", "def force_done(self):\n\n if self.can_done():\n return self.done()\n else:\n # we can not set that quest to done regularly, so we force it\n # nobody gets any experience and we might need a special notification for this\n self.quest.done = True\n self.quest.save()\n signals.quest_done.send(None, quest=self.quest)", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def can_mark_as_done(self):\n if (not self.event_store.done) and \\\n ((not self.file_submission_required) or self.event_store.has_file_submission) and \\\n (not self.contains_questions):\n return True\n return False", "def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as done')\n return\n self.in_progress_scroll_cell.remove_selected_item()\n self.done_scroll_cell.add_item(done)", "def done(self):\n raise NotImplementedError()", "def complete(self):\n self.completed = peewee.datetime.date.today()\n self.save()", "def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return", "def markSuccess(self, *args):\n self.add(True)", "def pending(self):\n self.state = Step.State.PENDING", "def mark_as_done(self):\n if self.can_mark_as_done():\n return self.__set_completion_status(True)\n return False", "def done(self):", "def done(self):", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def post(self):\n task = self.params.task\n task.completed = not task.completed\n task.put()\n render_json(self, obj=task.as_json())", "def test_create_a_todo(self):\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # test with invalid data\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"TODO item requires state, due_date and text\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False", "def mark_succeed(self):\n self.status = self.SUCCEED\n self.traceback = None\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def done(self) -> bool:\n return self._done", "def done(self):\n return self._is_done", "def test_make_draft(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'create_draft'}\r\n )\r\n # Update the draft version and check that published is different.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'metadata': {'due': '2077-10-10T04:00Z'}}\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def task_done(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying mark queue job item as done')\n\t\t\tself.queue.task_done()\n\t\t\tself.logger.debug('Queue job item mark as done')\n\t\t\treturn True\n\t\texcept ValueError, e:\n\t\t\tself.logger.error('Error method task_done, error: %s'%(e),exc_info=True)\n\t\t\treturn False", "def test_vault_create_new_vault_item(self):\n pass", "def create_item(self, user: User, **kwargs) -> None:", "def is_complete(self):\n pass", "def verify_done():\n if SAVE_EXISTENT == []:\n print \"\\nCan't generate the invoice because You have not bought\"\n press_enter()\n reset()\n show_products()\n sell_products()\n else:\n reset()\n invoice()\n press_enter()\n delete_lists()\n reset()\n main_menu()", "def is_created(self) -> bool:\n return self.state == Order.OrderState.CREATED.choice_value", "def set_delayed_test_to_done(self, guid_):\n db = DatabaseManager()\n query = \"\"\"UPDATE delayedTestData\n SET done=TRUE\n WHERE guid=%(guid)s\n AND done=FALSE\"\"\"\n db.execute_query_and_close(query, {\"guid\": guid_})\n return True", "def test_add_item_using_post(self):\n pass", "def _update_done(self) -> None:\n if None not in (self._last_readback, self._last_setpoint):\n is_done = self.done_comparator(self._last_readback, self._last_setpoint)\n done_value = int(is_done)\n if done_value != self.done.get():\n self.done.put(done_value, internal=True)", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def create_work_item(self):", "def mark_successful(self):\r\n self.require_item()\r\n\r\n url = '{0}/mark_successful'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n\r\n return request, parsers.parse_empty", "def action_done(self):\n pass", "def test_profile_is_complete(empty_field):\n profile = ProfileFactory.create()\n if empty_field:\n setattr(profile, empty_field, \"\")\n assert profile.is_complete is False\n else:\n assert profile.is_complete is True", "def disable_if_done(self, commit=True):\n if self._is_billing_complete() and not self.disabled:\n self.disabled = True\n\n if commit:\n self.save()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def success(self):\n self.succeeded = True", "def test_completed_dont_count(self):\r\n self._login_admin()\r\n\r\n # add out completed one\r\n q = ImportQueue(\r\n username=u'admin',\r\n file_path=u'testing.txt'\r\n )\r\n q.completed = datetime.now()\r\n q.status = 2\r\n DBSession.add(q)\r\n transaction.commit()\r\n\r\n # now let's hit the import page, we shouldn't get a form, but instead a\r\n # message about our import\r\n res = self.app.get('/admin/import')\r\n\r\n self.assertTrue('<form' in res.body, \"We should have a form\")", "def test_create_item_missing_value(test_client, item_without_value):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item_without_value),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def purchase_item(self):\r\n self.purchased_callback()\r\n self.status = 'purchased'\r\n self.fulfilled_time = datetime.now(pytz.utc)\r\n self.save()", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def _advance_to_pending(self):\n if all(signup.status != GameSignup.REGISTERED for signup in self.signups.all()):\n try:\n with transaction.atomic():\n self.status = self.PENDING\n self._create_characters()\n self.save()\n except DatabaseError:\n pass\n else:\n raise ValidationError('All user signups must be accepted, rejected, or withdrawn before continuing.')", "def force_input(self, foodItem):\r\n\r\n self.identified_food = foodItem\r\n self.has_been_checked = False", "def can_update_order_items(self) -> bool:\n return self.is_created or self.is_pending", "def done(self):\n return self._done.get()", "def test_set_task_incomplete_view(self):\n self.task.status = Task.STATUS_CHOICES.ready_for_review\n self.task.save()\n pk = self.task.pk\n url = reverse('set_task_incomplete', kwargs={'pk': pk})\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n task = Task.objects.get(pk=pk)\n self.assertEqual(task.status, Task.STATUS_CHOICES.incomplete)", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def test_0_0_create(self):\n\n self.assertTrue(self.b1)", "async def additem(self, ctx, *, name: str):\n try:\n item = dict()\n item[\"name\"] = name\n check = lambda x: x.channel is ctx.channel and x.author is ctx.author\n await ctx.send(await _(ctx, \"Describe the item (a description for the item)\"))\n response = await self.bot.wait_for(\"message\", timeout=120, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n\n item[\"description\"] = response.content\n item[\"meta\"] = dict()\n\n await ctx.send(\n await _(ctx, \"Additional information? (Attributes formatted in a list i.e `color: 400, value: 200` \"\n \"Set an image for this item with the `image` key i.e. `image: http://image.com/image.png` \"\n \"Set this item as usable by adding `used` key i.e. `used: You open the jar and the bird flies away`\"))\n while True:\n response = await self.bot.wait_for(\"message\", timeout=60, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n elif response.content.lower() == \"skip\":\n await ctx.send(await _(ctx, \"Skipping!\"))\n break\n else:\n try:\n if \"\\n\" in response.content:\n res = response.content.split(\"\\n\")\n else:\n res = response.content.split(\",\")\n for val in res:\n key, value = val.split(\": \")\n key = key.strip().casefold()\n value = value.strip()\n item[\"meta\"][key] = value\n else:\n break\n except:\n await ctx.send(await _(ctx, \"Invalid syntax, try again.\"))\n await self.bot.di.new_item(ctx.guild, ServerItem(**item))\n await ctx.send(await _(ctx, \"Item successfully created\"))\n\n except asyncio.TimeoutError:\n await ctx.send(await _(ctx, \"Timed out! Try again\"))", "def set_done(self):\n self._set_new_trajectory()", "def test_add_item_at_using_put(self):\n pass", "def item_done(self, rsp=None):\n if self.current_item is None:\n raise error_classes.UVMSequenceError(\"You must call get_next_item before calling item_done\")\n\n with self.current_item.finish_condition:\n self.current_item.finish_condition.notify_all()\n self.current_item = None\n if rsp is not None:\n self.put_response(rsp)", "def item_status(item_id):\n\n item_completed = request.form.get(\"item_completed\", \"off\")\n list_id = request.form[\"list_id\"]\n\n item_completed = item_completed == \"on\"\n\n to_do_item = ToDoItem.query.get(item_id)\n to_do_item.completed = item_completed\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def complete(self):\n self._is_complete = True", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False", "def has_success(self, value: bool):\n self._has_success = value", "def declareDone(self, cmd):\n pass", "def post_create(self, state):\n\n self.id = self.get_flags_from_list(self.id)\n self.flags = self.get_flags_from_list(self.flags)", "def uncomplete(self):\n ### TODO: needs test code for code coverage!\n ## (it has been tested through the calendar-cli test code)\n if not hasattr(self.vobject_instance.vtodo, \"status\"):\n self.vobject_instance.vtodo.add(\"status\")\n self.vobject_instance.vtodo.status.value = \"NEEDS-ACTION\"\n if hasattr(self.vobject_instance.vtodo, \"completed\"):\n self.vobject_instance.vtodo.remove(self.vobject_instance.vtodo.completed)\n self.save()", "def test_set_task_ready_view(self):\n self.task.status = Task.STATUS_CHOICES.incomplete\n self.task.save()\n pk = self.task.pk\n url = reverse('set_task_ready', kwargs={'pk': pk})\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n task = Task.objects.get(pk=pk)\n self.assertEqual(task.status, Task.STATUS_CHOICES.ready_for_review)\n self.assertIsNotNone(task.completed_at)", "def test_create_draft_with_update(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'metadata': {'due': '2077-10-10T04:00Z'},\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def mark_ready_for_review(self, user: User) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"CREATED\" state, raise an\n # OperationForbiddenError\n if not self.is_created:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.PENDING.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order should contain at least one Order item before it '\n 'can be marked as \"PENDING\".'\n )\n\n # Update the order to \"PENDING\" state\n self.update(user, state=Order.OrderState.PENDING.choice_value)", "def test_creating_shopping_item(create_shopping_item, create_user):\n owner = create_user\n shopping_item = create_shopping_item\n assert shopping_item.owner == owner", "def _apply_item(self, item: Item) -> bool:\n return False", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def done(self):\n # We have the same statement twice to try and avoid updating.\n if self.state == 'completed':\n return True\n if not self._updating:\n self.update()\n if self.state == 'completed':\n return True\n return False" ]
[ "0.7198816", "0.6800873", "0.65567577", "0.6417597", "0.6363705", "0.6359525", "0.6172014", "0.6137984", "0.61083496", "0.6047177", "0.60278106", "0.6007678", "0.59839237", "0.59442586", "0.590649", "0.5906427", "0.5846286", "0.5799823", "0.5794618", "0.5780171", "0.57660437", "0.576509", "0.57642", "0.57324827", "0.5731095", "0.57280755", "0.5707831", "0.57019037", "0.56942606", "0.56815094", "0.567984", "0.5643761", "0.56241024", "0.56067586", "0.560519", "0.55809426", "0.55773294", "0.55773294", "0.5563361", "0.55377764", "0.5535513", "0.5518558", "0.5517754", "0.5506543", "0.54961216", "0.54914993", "0.5490628", "0.5478996", "0.54610014", "0.545527", "0.54505694", "0.5440443", "0.54366016", "0.54357326", "0.5435471", "0.54188025", "0.53909177", "0.5383671", "0.5383504", "0.53823173", "0.53802246", "0.5376005", "0.5376005", "0.5376005", "0.5376005", "0.5376005", "0.5376005", "0.5375713", "0.53656274", "0.5364057", "0.5358287", "0.53570443", "0.53542006", "0.5348473", "0.53243905", "0.5312222", "0.5304633", "0.5301657", "0.5301065", "0.53010494", "0.5300992", "0.5299016", "0.52895004", "0.52833635", "0.5277366", "0.5272898", "0.5271825", "0.5266586", "0.5266204", "0.52651036", "0.5261712", "0.5261423", "0.525941", "0.52578276", "0.52565026", "0.5254656", "0.52534413", "0.5250691", "0.52501553", "0.5242349" ]
0.76738805
0
Ensure the string value of the object is equal to the item name
def test_object_name_is_equal_to_item_name(self): item = Item(name = "A test item") self.assertEqual(str(item), "A test item")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_str(self):\n item = self.item\n\n self.assertEqual(str(item), self.item_raw['name'])", "def _valid_object_with_name(ui_object):\n return ui_object.obj_name", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def test_values_single(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n self.assertEqual(il._values.get(\"name\"), [\"foo\"])", "def test_name3(self):\n new = self.value()\n self.assertEqual(type(new.name), str)", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)", "def set_name(self, item_name):\r\n self.name = item_name", "def test_printing_shoppping_item_returns_name(create_shopping_item):\n item = create_shopping_item\n assert item.__str__() == 'shopping item one'", "def __getitem__(self, item):\n return self._object_names[item]", "def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']", "def validate(self, name):\n return name in self.dict", "def test_asset_name():\n\n invalid = {}\n inventory_ = copy.deepcopy(self._inventory)\n inventory_[\"assets\"].append(invalid)\n\n for name in (\"mixedCaseOk\",\n \"lowercaseok\",\n \"underscore_ok\"):\n invalid[\"name\"] = name\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n for name in (\"spaces not ok\",\n \"special~characters$not^ok\",\n \"dash-not-ok\"):\n invalid[\"name\"] = name\n\n assert_raises(\n schema.ValidationError,\n inventory.save,\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )", "def _check_name(self):\n\t\tpass", "def test_name_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.name = 'bar'\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)", "def test_set_value_not_str(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.assertRaises(TypeError, lambda: self.helper.set_value([\"Hello\", \"World!\"]))", "def need_name(dictionary, raise_error=True):\r\n return key_checker(['name'])(dictionary, raise_error)", "def test_correct_upload_item(upload_items: List[JSONDict]) -> None:\n validated = UploadItem(**upload_items[0])\n assert validated.dict() == upload_items[0]", "def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )", "def exists(self, obj):\n\t\tif obj.get('name') and obj.get('type'):\n\t\t\treturn self.db.sql(\"select name from `%s` where name=%s\" % \\\n\t\t\t \t(obj['type'],'%s'), obj['name'])", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)", "def getName(cls, itemValue):\n for name, value in cls.iterate():\n if itemValue == value:\n return name\n\n raise ValueError('Value {0} not found in {1}'.format(itemValue, cls.__name__))", "def check_attributes(self):\n self.assertEqual(type(self.amenity_1.name), str)", "def test_keep_single_value(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\"]})", "def _check_key_name(cls, name):\n return (isinstance(name, basestring) and\n re.match('^[A-Za-z][A-Za-z0-9_]*$', name) and\n not hasattr(cls, name))", "def test_name(self):\n place = Place()\n self.assertTrue(hasattr(place, \"name\"))\n self.assertEqual(type(place.name), str)\n self.assertEqual(place.name, \"\")", "def check_all_objects_have_names(self):\n for entity in crest.get_all_crest_objects(self.model):\n assert entity._name is not None, f\"Object {entity} has no name\"", "def test_strings(self):\n\n for cls in [IndependentMoney, Beneficiary, CommitteeBenefactor,\n OtherBenefactor, PersonBenefactor, Benefactor,\n PartyBenefactor, Committee]:\n if cls.objects.all().count() == 0: # bad :(\n try:\n obj = cls()\n except:\n continue\n else:\n obj = cls.objects.all()[0]\n\n self.assertNotIn('Object', str(obj), cls.__name__)\n self.assertNotIn('Object', unicode(obj), cls.__name__)\n\n self.assertNotEqual('', str(obj), cls.__name__)\n self.assertNotEqual('', unicode(obj), cls.__name__)", "def get_item_name(self, i):\n for item in self.items:\n if item['id'] == i:\n return item['localized_name']\n return 'Unknown Item'", "def test_required_name_attribute_is_in_the_request_payload_and_has_a_value(self):\n with self.client:\n token = self.get_user_token()\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'No attribute or value was specified, nothing was changed')", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def _valid_typable_object_with_name(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return (ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys() and\n _valid_object_with_name(ui_object))\n else:\n assert False, 'Wrong Platform'", "def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")", "def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"", "def getName(self,item):\n return item.s", "def _valid_clickable_object_with_name(ui_object, platform=Platform.ANDROID):\n return (not _valid_typable_object_with_name(ui_object, platform) and\n _valid_object_with_name(ui_object))", "def is_valid_menu_item(self, item_name: str) -> bool:\n return item_name in self._items()", "def validate_name(self, value):\n if not value:\n raise serializers.ValidationError(\"Name cannot be null\")\n return value", "def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")", "def test_string_representation(self) -> None:\n item = Item(text=\"some text\")\n self.assertEqual(str(item), \"some text\")", "def __getattr__(self, item: str): # noqa: U100", "def __str__(self):\n return self.item_name", "def test_name_empty_string(self):\r\n self.name = \"\"", "def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)", "def test_name(self):\r\n name, path, args, kwargs = self.field.deconstruct()\r\n self.assertIsNone(name)\r\n self.field.set_attributes_from_name(\"segments\")\r\n name, path, args, kwargs = self.field.deconstruct()\r\n self.assertEqual(name, \"segments\")", "def __contains__(self, item: object) -> bool:\n if isinstance(item, tuple) and len(item) == 2:\n var, value = item\n else:\n return False\n if isinstance(var, str):\n if var and var[0] == '$':\n var = var[1:]\n try:\n return self._mapping._fixup[var.casefold()].value == conv_kv(value)\n except KeyError:\n return False\n return False", "def test_name_property(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = 'foo'\n\n self.assertEqual(v1.name, expected)", "def attr(self, name):\r\n return Assert(getattr(self.obj, name))", "def test_add_value_singlevalue_singlevalue(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n il.add_value(\"name\", \"bar\")\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"bar\"]})", "def test_only_guid(only_guid_item: JSONDict) -> None:\n validated = UploadItem(**only_guid_item)\n assert validated.dict() == only_guid_item", "def test_name_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'name': 'clear', 'units': 'mag',\n 'description': 'Un-filtered apparent magnitude.'})", "def test_make_order_with_name_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 10, 'item_price': 50, 'quantity': 3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Item name must be a string')", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def get_name(self):\n if self.name != None: return self.name\n else: return self.get_name_from_items(self.items.values())", "def __getitem__(self, name):\n return self._items[name.lower()][1]", "def _check_value(self,val,obj=None):\n if not val in self.objects:\n # CEBALERT: can be called before __init__ has called\n # super's __init__, i.e. before attrib_name has been set.\n try:\n attrib_name = self._attrib_name\n except AttributeError:\n attrib_name = \"\"\n raise ValueError(\"%s not in Parameter %s's list of possible objects\" \\\n %(val,attrib_name))", "def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")", "def get(self, item_name):\n if isinstance(item_name, BaseItem):\n return item_name\n return self.all_items.get(item_name)", "def contains(dumpable_object, item_name) -> bool:\n str_io = io.StringIO()\n with redirect_stdout(str_io):\n dumpable_object.dump()\n return item_name in str_io.getvalue()", "def test_item_id(item):\n assert item.item_id == 'exopy_pulses.Item'", "def clean_mysteries(self, item):\n if \"[???]\" in item['name']:\n item['name'] = item['name'][6:]", "def items_contains_name(items, name):\n ret = 0\n # Loops all items and saves the searched one\n for x in range(len(items)):\n if items[x]['name'] == name:\n ret = x\n return ret", "def __contains__(self, item):\n return item.upper() in self.keys", "def test_reserved_name(self):\n with self.assertRaises(ValidationError):\n field_name_validator('_id')", "def name(self, value):\n\n if value is not None:\n assert is_string(value), (\n ('\"{0}\" attribute: \"{1}\" is not a \"string\" like object!'\n ).format('name', value))\n self._name = value", "def __eq__(self, name):\n return self.name == name", "def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")", "def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)", "def update_element_name(self, items, new_name):\n if new_name != '':\n for i in items:\n if i.text() == new_name:\n #print(\"Name already exists\")\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Information)\n msgBox.setText(\"Element with this name already exists.\")\n msgBox.setWindowTitle(\"QMessageBox Example\")\n msgBox.setStandardButtons(QMessageBox.Ok)\n msgBox.exec()\n return False\n return new_name\n else:\n if self.list_of_elements.count() == 0:\n new_name = self.element_name+\"_\"+str(0)\n return new_name\n\n for i in range(0, self.list_of_elements.count()+1):\n new_name = self.element_name+\"_\"+str(i)\n exists = self.list_of_elements.findItems(new_name,\n QtCore.Qt.MatchExactly)\n if len(exists) == 0:\n return new_name\n return False", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def verify_unique_names(items):\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)", "def test_interaction_accepts_name():\n dmi = DMI(1)\n assert hasattr(dmi, 'name')", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def _set_unless_fail(self, schema, key, object_name, object_type, *args):\n if (not object_name.isupper()) and ('\"' not in object_name):\n # then this must be a chaotic evil case-sensitive object name\n object_name = '\"%s\"' % object_name\n try: \n schema[key] = object_type(object_name, *args) \n except AttributeError: \n if self.omit_error_objects: \n LOG.warning(\"Couldn't get details for %s\" % (key, )) \n else: \n raise", "def test_Stock_output_named_tuple_vs_dictionary_1():\n assert Stock_tuple[0][0] == Stock_list_dict[0]['name'], \"Name is not getting stored properly\"", "def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name", "def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name", "def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)", "def test_set_name_not_str(self) -> None:\n\n given = [\"Hello\", \"World\"]\n\n self.assertRaises(TypeError, lambda: self.helper.set_name(given))", "def check_dog_name(dog):\n if not isinstance(dog.name, str):\n raise NotStringError(\"Dog name entered is not a string\")", "def resolve_name(self, object_name, accept_unquoted_strs=False):\n\n # assume elt is a string\n if isinstance(object_name, (float, int)):\n return object_name\n elif isinstance(object_name, str) and self.quoted_string_re.match(object_name):\n # quoted strings are not interpreted as names\n return object_name\n\n elif isinstance(object_name, str) and object_name in self.name_mapping:\n return self.name_mapping[object_name]\n else:\n if accept_unquoted_strs:\n return object_name\n else:\n raise ValueError(f\"unknown name (or type): {object_name}\")", "def test_location_name(self):\n self.assertIsInstance(self.location.name, str)\n self.assertEqual(self.location.name, \"Chez Toto\")", "def test_str_method(self):\n _name = 'test-name'\n el = MarkerId(_name)\n self.assertEqual(el.__str__(), _name)", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def __getitem__(self, item):\n if type(item) == str:\n return self.__dict__[item]\n else:\n return self.__dict__", "def Item(self) -> str:", "def Item(self) -> str:", "def Item(self) -> str:", "def test_name_attribute_is_set_in_bucket_creation_request(self):\n with self.client:\n response = self.client.post(\n '/bucketlists',\n headers=dict(Authorization='Bearer ' + self.get_user_token()),\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'], 'failed')\n self.assertTrue(data['message'], 'Missing name attribute')", "def handle_dict_items(self, object, name, old, new):\n raise NotImplementedError", "def tesName(self):\n place = Place()\n self.assertTrue(hasattr(place, \"name\"))\n self.assertEqual(place.name, \"\")", "def test_loads_item_valid(self):\n item: Item = Item.Schema().loads(json.dumps(item_valid))\n assert item.product_type == item_valid[\"product-type\"]\n assert item.artist_markup == item_valid[\"artist-markup\"]\n assert item.options == item_valid[\"options\"]\n assert item.quantity == item_valid[\"quantity\"]", "def test_title(self):\n\n # list instead of string\n self.validator.adata.uns[\"title\"] = [\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['title']' in 'uns['title']' is not valid, \"\n \"it must be a string.\"\n ],\n )", "def setName(self, value):\n dataDict = self.__dict__\n if (value is not None):\n if (isinstance(value, memops.api.Implementation.String.PythonType)):\n pass\n else:\n raise ApiError(\"\"\"%s.setName:\n memops.Implementation.Text input is not of a valid type\"\"\" % self.qualifiedName\n + \": %s\" % (value,)\n )\n\n _lenValue = len(value)\n if (_lenValue > 254):\n raise ApiError(\"\"\"%s.setName:\n memops.Implementation.Text maximum length is 254 violated by value\"\"\" % self.qualifiedName\n + \": %s\" % (value,)\n )\n\n if (_lenValue < 1):\n raise ApiError(\"\"\"%s.setName:\n memops.Implementation.Text: Empty string not allowed\"\"\" % self.qualifiedName\n + \": %s\" % (value,)\n )\n\n topObject = dataDict.get('topObject')\n currentValue = self.getName()\n notInConstructor = not (dataDict.get('inConstructor'))\n\n root = topObject.__dict__.get('memopsRoot')\n notOverride = not (root.__dict__.get('override'))\n notIsReading = not (topObject.__dict__.get('isReading'))\n notOverride = (notOverride and notIsReading)\n if (notIsReading):\n if (notInConstructor):\n if (not (topObject.__dict__.get('isModifiable'))):\n raise ApiError(\"\"\"%s.setName:\n Storage not modifiable\"\"\" % self.qualifiedName\n + \": %s\" % (topObject,)\n )\n\n if (dataDict.get('isDeleted')):\n raise ApiError(\"\"\"%s.setName:\n called on deleted object\"\"\" % self.qualifiedName\n )\n\n if (value == currentValue):\n return\n\n self.varName = value\n if (notIsReading):\n if (notInConstructor):\n topObject.__dict__['isModified'] = True", "def _name_check(self, name, *args, chk_dict=None):\n if name is not None and len(name) > 0:\n lst = list(args)\n lst.append(name)\n if self._key_check(lst, chk_dict=chk_dict):\n result = EnvironmentDict._EXISTS\n else:\n result = EnvironmentDict._VALID\n else:\n result = EnvironmentDict._INVALID\n raise ValueError(f'Invalid name: {name}')\n return result", "def test_name_validation(self, attr):\n kwargs = {'kind': POSITIONAL_ONLY, attr: 3}\n with pytest.raises(TypeError) as excinfo:\n FParameter(**kwargs)\n assert excinfo.value.args[0] == \\\n '{} must be a str, not a {}'.format(attr, 3)", "def test_unique_item_properties_failed(self):\n check_value = [{\"a\": 1, \"b\": 3}, {\"a\": 1, \"b\": 2}]\n\n with pytest.raises(AssertionError):\n unique_item_properties(check_value, \"a\")", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)" ]
[ "0.72171146", "0.6880246", "0.6773977", "0.6383563", "0.6352", "0.6319917", "0.62903845", "0.6150476", "0.60850054", "0.60359854", "0.60222614", "0.5974848", "0.5964395", "0.59447414", "0.5942534", "0.59195846", "0.59191364", "0.5898834", "0.58966243", "0.5865687", "0.5844299", "0.5837067", "0.58190876", "0.5812556", "0.58083355", "0.58058465", "0.57844996", "0.57691634", "0.5719298", "0.5715222", "0.57096416", "0.5698853", "0.5693931", "0.5686329", "0.5679273", "0.566962", "0.5648079", "0.5647352", "0.56388485", "0.56384635", "0.5625968", "0.5620058", "0.5616981", "0.55975693", "0.55973667", "0.5571425", "0.5548816", "0.55369425", "0.5533843", "0.5531167", "0.5518983", "0.5513655", "0.5511047", "0.5507225", "0.5496487", "0.549247", "0.54840446", "0.54817796", "0.5477511", "0.5475861", "0.54718345", "0.5467018", "0.5464534", "0.546378", "0.54610986", "0.5446647", "0.54458493", "0.54427654", "0.54423475", "0.5440325", "0.5435829", "0.54345834", "0.54334337", "0.54302275", "0.5428316", "0.54281", "0.5423855", "0.5423855", "0.54204595", "0.54182494", "0.5414868", "0.5398972", "0.5398853", "0.5397711", "0.5395053", "0.53935754", "0.53926665", "0.53926665", "0.53926665", "0.5391117", "0.5381291", "0.53810346", "0.53809595", "0.53722274", "0.5370931", "0.5369512", "0.536641", "0.53645647", "0.5356146", "0.534965" ]
0.7843017
0
Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky ifelse logic here.
def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type == "sem_seg": return SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, num_classes=4, ignore_label=255 ) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() >= comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) if len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)", "def evaluate(self, dataset):\n\t\tpass", "def default_builder(self, dataset_name, eval_dataset_name):\n builder = tfds.builder(dataset_name, data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec()\n logging.info('Training on TFDS dataset %s with split %s',\n dataset_name, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=self.shuffle_train_files)\n\n if eval_dataset_name is None:\n logging.info('Evaluating on TFDS dataset %s with split %s',\n dataset_name, 'validation' + shard_spec)\n eval_data = self.default_eval_builder(builder, shard_spec)\n else:\n eval_dataset, *eval_split = eval_dataset_name.split(':')\n if not eval_split:\n eval_split = 'validation'\n else:\n eval_split = eval_split[0]\n logging.info('Evaluating on TFDS dataset %s with split %s',\n eval_dataset, eval_split + shard_spec)\n eval_builder = tfds.builder(eval_dataset, data_dir=self.data_dir)\n eval_data = eval_builder.as_dataset(split=eval_split + shard_spec,\n shuffle_files=False)\n return train_data, eval_data", "def evaluate(self, eval_data, eval_labels, eval_input_fn=\"default\"):\n # Validations:\n # If it is of type str, make sure is a valid\n if isinstance(eval_input_fn, str):\n # We use a list in case we want to extend in the future.\n if eval_input_fn in [\"default\"]:\n if eval_input_fn == \"default\":\n # pylint: disable=no-member\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_res = self.classifier.evaluate(input_fn=eval_input_fn)\n return eval_res", "def eval(self, dataset=None, criterion=None):\n # Recover the defaults, if missing\n dataset, criterion = self._resolve_defaults(testset=dataset, criterion=criterion)\n # Sample the test batch\n inputs, targets = dataset.sample(self._config)\n # Compute and return the evaluation result\n return criterion(self.run(inputs), targets)", "def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator", "def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures", "def _create_evaluators(self):\n pass", "def _get_iterator(self, dataset_type, eval_mode, **kwargs):", "def get_eval_data() -> GraphDataset:\n _load_data_if_needed()\n return eval_data", "def _create_default_metrics_eval_func(self, grid_points):\n eval_func = set([self._get_default_eval_func(point.metrics) for point in grid_points])\n if len(eval_func) != 1:\n raise RuntimeError(\"Error in retrieving evaluation function\")\n return list(eval_func)[0]", "def build_train_and_eval_datasets(self,\n dataset_name,\n eval_dataset_name,\n paracrawl_size=PARACRAWL_DEFAULT_SIZE,\n newscommentary_size=None,\n newscomment_sample_ratio=1.0):\n self.paracrawl_size = paracrawl_size\n if newscommentary_size:\n self.newscommentary_size = newscommentary_size\n self.newscomment_sample_ratio = newscomment_sample_ratio\n if dataset_name in self.custom_dataset.keys():\n logging.info('Building custom datatset: %s', dataset_name)\n return self.custom_dataset[dataset_name]()\n else:\n logging.info('Building DEFAULT datatset: %s', dataset_name)\n return self.default_builder(dataset_name, eval_dataset_name)", "def load_eval_datasets(cfg):\n # Temporarily change dataset type to be frame_by_frame\n cur_dataset_type = cfg.dataset_type\n if cfg.dataset_type == 'graph_net':\n cfg.dataset_type = 'single_frame_graph_net'\n else:\n cfg.dataset_type = 'frame_by_frame'\n\n # Get the evaluation (frame by frame) datasets\n train_set, val_set, test_set = get_split_datasets(cfg.dataset)\n\n # Restore dataset type\n cfg.dataset_type = cur_dataset_type\n return train_set, val_set, test_set", "def evaluator(*args, clusters: bool=True, configuration: Union[AnyStr, List[AnyStr], bool]=\"\",\n enable: bool=True, info: bool=True, name: Union[AnyStr, bool]=\"\", nodeType:\n Union[AnyStr, List[AnyStr], bool]=\"\", nodeTypeChildren: bool=True, priority:\n Union[int, bool]=0, valueName: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def create_data_set(file_location, eval_data):\n data = np.load(os.path.join(DATA_LOCATION, file_location))\n print(file_location)\n for name in cell_names:\n print(name+\":\"+str(data[name].shape[0]))\n data_set = dict()\n data_set_labels = dict()\n for i, name in enumerate(cell_names):\n if not eval_data:\n # make the random data consistent across runs\n np.random.seed(1)\n # Shuffle the data\n perm = np.arange(data[name].shape[0])\n np.random.shuffle(perm)\n data_set[name] = data[name][perm]\n else:\n data_set[name] = data[name]\n data_set_labels[name] = to_categorical(np.full((data_set[name].shape[0], 1), i, dtype=int), NUM_CLASSES)\n return data_set, data_set_labels", "def evaluator(test_config: TestConfig, criterion: nn.Module, model: nn.Module,\n device: torch.device) -> Engine:\n metrics, eval_metric, *_ = test_config\n metrics['loss'] = Loss(criterion,\n output_transform=lambda data: (data[0], data[1]))\n val_evaluator = create_supervised_evaluator(model, metrics, device,\n prepare_batch=prepare_batch)\n return val_evaluator", "def evaluate(parser):\n required_args = (\n 'train_tfrecord',\n 'valid_tfrecord',\n 'predicted_data',\n 'actual_data',\n )\n cli_args = add_all_args(parser, EVALUATION, *required_args)\n evaluator = Evaluator(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n predicted = pd.read_csv(cli_args.predicted_data)\n actual = pd.read_csv(cli_args.actual_data)\n evaluator.calculate_map(\n prediction_data=predicted,\n actual_data=actual,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n save_figs=cli_args.save_figs,\n plot_results=cli_args.plot_stats,\n )", "def eval(self):\n dataset = self.config.dataset\n class_config = dataset.class_config\n # it might make sense to make excluded_groups a field in an EvalConfig\n # in the future\n excluded_groups = ['train_scenes']\n\n scene_id_to_cfg = {s.id: s for s in dataset.all_scenes}\n\n @lru_cache(maxsize=len(dataset.all_scenes))\n def build_scene(scene_id: str) -> Scene:\n cfg = scene_id_to_cfg[scene_id]\n scene = cfg.build(\n class_config, self.tmp_dir, use_transformers=True)\n return scene\n\n # build and run each EvaluatorConfig for each scene group\n for e in self.config.evaluators:\n for group_name, group_ids in dataset.scene_groups.items():\n if group_name in excluded_groups:\n continue\n if len(group_ids) == 0:\n log.info(f'Skipping scene group \"{group_name}\". '\n 'Empty scene group.')\n continue\n group_scenes = (build_scene(id) for id in group_ids)\n evaluator = e.build(\n class_config, scene_group=(group_name, group_scenes))\n\n log.info(f'Running {type(evaluator).__name__} on '\n f'scene group \"{group_name}\"...')\n try:\n evaluator.process(group_scenes, self.tmp_dir)\n except FileNotFoundError:\n log.warn(f'Skipping scene group \"{group_name}\". '\n 'Either labels or predictions are missing for '\n 'some scene.')", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception(\"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn)", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def CreateValidationDataset(all_arrays):\n validation_dataset = Dataset()\n validation_dataset._addData(all_arrays[2])\n validation_dataset._addData(all_arrays[7])\n return validation_dataset", "def __prepare_val_dataset(dataset, save_prefix='tmp', data_shape=512, verbose=True):\r\n supported_datasets = ['coco', 'voc']\r\n if isinstance(dataset, ExternalDataset):\r\n if dataset.dataset_type.lower() not in supported_datasets:\r\n raise UserWarning(\"dataset_type must be one of: \", supported_datasets)\r\n\r\n dataset_root = dataset.path\r\n\r\n if dataset.dataset_type.lower() == 'voc':\r\n from gluoncv.data import VOCDetection\r\n\r\n dataset = VOCDetection(root=dataset_root,\r\n splits=[(2007, 'test')])\r\n val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=dataset.classes)\r\n return dataset, val_metric\r\n elif dataset.dataset_type.lower() == 'coco':\r\n from gluoncv.data import COCODetection\r\n\r\n dataset = COCODetection(root=dataset_root, splits='instances_val2017',\r\n skip_empty=False)\r\n val_metric = COCODetectionMetric(\r\n dataset, os.path.join(save_prefix, 'eval'), cleanup=False, data_shape=(data_shape, data_shape))\r\n return dataset, val_metric\r\n elif isinstance(dataset, DetectionDataset) or issubclass(type(dataset), DetectionDataset):\r\n eval_metric = DetectionDatasetCOCOEval(dataset.classes, data_shape)\r\n dataset.set_image_transform(ImageToNDArrayTransform())\r\n dataset.set_target_transform(BoundingBoxListToNumpyArray())\r\n return dataset, eval_metric\r\n else:\r\n print(\"Dataset type {} not supported\".format(type(dataset)))\r\n return dataset, None", "def runner_decrator(cls):\n\n def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n def custom_test_with_TTA(cls, cfg, model):\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n cls.build_evaluator = classmethod(custom_build_evaluator)\n cls.test_with_TTA = classmethod(custom_test_with_TTA)\n\n return cls", "def get_eval_dataloader(self, eval_dataset=None):\n if self.eval_dataloader is not None:\n return self.eval_dataloader\n\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n assert self.schema is not None, \"schema is required to generate Eval Dataloader\"\n return T4RecDataLoader.parse(self.args.data_loader_engine).from_schema(\n self.schema,\n self.eval_dataset_or_path,\n self.args.per_device_eval_batch_size,\n max_sequence_length=self.args.max_sequence_length,\n drop_last=self.args.dataloader_drop_last,\n shuffle=False,\n shuffle_buffer_size=self.args.shuffle_buffer_size,\n )", "def simple_dataset() -> Dataset:\n graph = Dataset()\n graph.default_context.add((EGSCHEME.subject, EGSCHEME.predicate, EGSCHEME.object))\n graph.default_context.add((EGURN.subject, EGURN.predicate, EGURN.object))\n graph.default_context.add((EGDC.subject, EGDC.predicate, Literal(\"typeless\")))\n graph.get_context(EGSCHEME.graph).add(\n (EGSCHEME.subject, EGSCHEME.predicate, EGSCHEME.object)\n )\n graph.get_context(EGSCHEME.graph).add(\n (EGSCHEME.subject, EGSCHEME.predicate, Literal(12))\n )\n graph.get_context(EGSCHEME.graph).add(\n (\n EGDC.subject,\n EGDC.predicate,\n Literal(\"ๆ—ฅๆœฌ่ชžใฎ่กจ่จ˜ไฝ“็ณป\", lang=\"jpx\"),\n )\n )\n graph.get_context(EGSCHEME.graph).add(\n (EGURN.subject, EGSCHEME.predicate, EGSCHEME.subject)\n )\n graph.get_context(EGURN.graph).add(\n (EGSCHEME.subject, EGSCHEME.predicate, EGSCHEME.object)\n )\n graph.get_context(EGURN.graph).add((EGSCHEME.subject, EGDC.predicate, EGDC.object))\n graph.get_context(EGURN.graph).add(\n (EGSCHEME.subject, EGDC.predicate, Literal(\"XSD string\", datatype=XSD.string))\n )\n return graph", "def evaluate(\n self,\n test_dataset: Union[Dataset, InstanceDataset],\n batch_size: int = 16,\n lazy: bool = False,\n output_dir: Optional[Union[str, Path]] = None,\n verbose: bool = True,\n ) -> Dict[str, Any]:\n trainer = Trainer(self, lazy=lazy)\n\n return trainer.test(\n test_dataset, batch_size=batch_size, output_dir=output_dir, verbose=verbose\n )", "def evaluator(model, config, test_dir=None):\n shottype = config.shottype\n dataset = config.data_set\n seed = config.seed\n if test_dir is None:\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=None\n )\n if test_dir is not None:\n print(\"Evaluating directory: '{}'.\".format(test_dir))\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=test_dir\n )\n score = model.evaluate_generator(test_data_gen_dir)\n print(\n \"Test metrics: \"\n \"Loss: {:.4f}, \"\n \"Accuracy: {:.4f}, \"\n \"Top 3 accuracy: {:.4f}\".format(score[0], score[1], score[2])\n )\n return score", "def evaluator(self, candidates, args):\n\t\traise NotImplementedError", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator", "def createDataset(template_name, dataset, dataset_enabled):\n\n # disable FutureWarning from trimboth\n # caused by conflict of scipy 1.10 and numpy 1.15\n warnings.simplefilter(action='ignore', category=FutureWarning)\n\n # determine if there are sufficient images to initialize the dataset\n if(not dataset_enabled and len(dataset[1]) >= 50):\n # if so get values\n dataset_values = np.array(dataset[1])\n\n # use 25% trimmed mean\n filtered_dataset_values = trimboth(dataset_values, 0.25)\n\n # filter out outliers using standard deviation\n #mean = np.mean(dataset_values)\n #std_dev = np.std(dataset_values)\n #filtered_dataset_values = dataset_values[abs(dataset_values - mean) < 2 * std_dev]\n\n # determine standard deviation and mean for filtered dataset\n filtered_mean = np.mean(filtered_dataset_values)\n filtered_std_dev = np.std(filtered_dataset_values)\n num_filtered_values = filtered_dataset_values.size\n\n # update dataset array\n dataset = [[filtered_mean, filtered_std_dev, num_filtered_values],[]]\n\n # output to user\n print(\"\\n\\nDataset Created:\")\n print(\"Mean: %0.2f MSE\" % filtered_mean)\n print(\"Standard Deviation: %0.2f MSE\" % filtered_std_dev)\n print(\"Number of Values: %d\" % num_filtered_values)\n\n # convert from numpy to list\n elif dataset_enabled: dataset = np.array(dataset).tolist()\n\n # write changes to config file\n config = configparser.ConfigParser()\n config.read('./AppData/preferences.cfg')\n config.set('Template Registration Dataset', template_name, \\\n str(dataset).replace(\"array(\", \"\").replace(\")\", \"\"))\n with open('./AppData/preferences.cfg', 'w') as configfile:\n config.write(configfile)", "def evaluator(self, candidates, args):\r\n raise NotImplementedError", "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\"Dataset {0} is NOT supported\".format(dataset_name))\n\n # Performing pre-processing specifically for images datasets.\n if data_details['data type'] == 'image':\n x_train = _pre_process_images(x_train, data_details)\n x_test = _pre_process_images(x_test, data_details)\n\n return x_train, y_train, x_test, y_test", "def evaluationset(self, batchsize=None, flatten=True):\n if batchsize is None:\n batchsize = self.batchsize\n\n return self.GENERATOR(self.x_eval, self.y_eval, batchsize, flatten=flatten, evaluate=True)", "def create_multi_node_evaluator(actual_evaluator, communicator):\n\n actual_evaluator._mn_original_evaluate = actual_evaluator.evaluate\n actual_evaluator._mn_communicator = communicator\n\n def new_evaluate(self):\n local_mean_dict = self._mn_original_evaluate()\n global_mean_dict = {\n name:\n self._mn_communicator.allreduce_obj(\n value) / self._mn_communicator.size\n for name, value in sorted(local_mean_dict.items())\n }\n return global_mean_dict\n\n actual_evaluator.evaluate = six.create_bound_method(\n new_evaluate, actual_evaluator)\n return actual_evaluator", "def build_evaluation(self, predictions, examples, **kwargs):\n return {}", "def make_dataset(dataset_name):\n return {\n\n 'duc': DUCDataset(),\n\n 'icsi-asr': ICSIASRDataset(),\n 'icsi-ht': ICSIHumanTranscriptDataset(),\n\n 'inspec-train': InspectTrainingDataset(),\n 'inspec-val': InspectValidationDataset(),\n 'inspec-test': InspectTestDataset(),\n\n 'nus': NUSDataset()\n\n }[dataset_name]", "def make_eval_input_tensors(dataset, dataset_name, \n trial_split='val',\n update_params=None,\n save_file=True,\n return_dict=True,\n save_path=\"eval_input.h5\"):\n assert isinstance(dataset, NWBDataset), \"`dataset` must be an instance of NWBDataset\"\n assert dataset_name in PARAMS.keys(), f\"`dataset_name` must be one of {list(PARAMS.keys())}\"\n assert isinstance(trial_split, (pd.Series, np.ndarray, list)) or trial_split in ['train', 'val', 'test'], \\\n \"Invalid `trial_split` argument. Please refer to the documentation for valid choices\"\n\n # Fetch and update params\n params = PARAMS[dataset_name].copy()\n if update_params is not None:\n params.update(update_params)\n # Add filename extension if necessary\n if not save_path.endswith('.h5'):\n save_path = save_path + '.h5'\n\n # Unpack params\n spk_field = params['spk_field']\n hospk_field = params['hospk_field']\n make_params = params['make_params'].copy()\n make_params['allow_nans'] = True\n \n # Prep mask\n trial_mask = _prep_mask(dataset, trial_split)\n\n # Make output spiking arrays and put into data_dict\n if not np.any(dataset.trial_info[trial_mask].split == 'test'):\n eval_dict = make_stacked_array(dataset, [spk_field, hospk_field], make_params, trial_mask)\n data_dict = {\n 'eval_spikes_heldin': eval_dict[spk_field],\n 'eval_spikes_heldout': eval_dict[hospk_field],\n }\n else:\n eval_dict = make_stacked_array(dataset, [spk_field], make_params, trial_mask)\n data_dict = {\n 'eval_spikes_heldin': eval_dict[spk_field],\n }\n\n # Save and return data\n if save_file:\n save_to_h5(data_dict, save_path, overwrite=True)\n if return_dict:\n return data_dict", "def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n output_list, ref_list = [], []\n for i in range(n_data):\n output_list.append(data[i]['system_output'])\n ref_list.append(data[i]['reference'])\n\n input_list = add_question(dimension=dim, output=output_list, ref=ref_list, task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores", "def _get_evaluators(self):\n if self._evaluator_overrides is not None:\n return self._evaluator_overrides\n return self._create_evaluators()", "def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n # Calculate average sentence-level scores for 'consistency' and 'fluency'\n if dim == 'consistency' or dim == 'fluency':\n src_list, output_list = [], []\n n_sents = [] # the number of sentences in each generated summary\n for i in range(n_data):\n source = data[i]['source']\n system_outputs = sent_tokenize(data[i]['system_output'])\n n_sents.append(len(system_outputs))\n for j in range(len(system_outputs)):\n src_list.append(source)\n output_list.append(system_outputs[j])\n input_list = add_question(dimension=dim, output=output_list, src=src_list, task=self.task)\n sent_score = self.scorer.score(input_list, self.task, category, dim)\n\n # Get average score for each sample\n start_idx = 0\n score = []\n for cur_n_sent in n_sents:\n # prevent denominator from being 0\n score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]) / (cur_n_sent + 1e-6))\n start_idx += cur_n_sent\n\n # Calculate summary-level score for 'coherence' and 'relevance'\n elif dim == 'coherence' or dim == 'relevance':\n src_list, output_list, ref_list = [], [], []\n for i in range(n_data):\n src_list.append(data[i]['source'])\n output_list.append(data[i]['system_output'])\n if dim == 'relevance':\n ref_list.append(data[i]['reference'])\n input_list = add_question(dimension=dim, output=output_list, src=src_list, ref=ref_list, task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n # Please customize other dimensions here for summarization\n else:\n raise NotImplementedError('The input format for this dimension is still undefined. \\\n Please customize it first.')\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)", "def get_eval_input_fn(self, input_name):\n def eval_input_fn():\n # Get filenames\n data_dir = pathlib.Path(self._TEST_DATA_DIR)\n list_ds = tf.data.Dataset.list_files(str(data_dir / '*'))\n\n # Create data pre-processing functions\n funcs = self._get_data_preprocessing_fns()\n\n # Get labeled dataset\n ds = list_ds.map(funcs.process_path, num_parallel_calls=AUTOTUNE)\n # Format conversion\n ds = ds.map(funcs.convert_format, num_parallel_calls=AUTOTUNE)\n # Resizing\n ds = ds.map(funcs.resize, num_parallel_calls=AUTOTUNE)\n\n # Prepare for tf.estimator\n ds = ds.map(lambda img, label: ({input_name: img}, label))\n\n # Batch, prefetch\n ds = ds.batch(self._TEST_BATCH_SIZE)\n ds = ds.prefetch(buffer_size=self._PREFETCH_BUFFER_SIZE)\n\n return ds\n return eval_input_fn", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def create_data(storage, df, df_contains='xy', y_col_name=None, y_pred_col_name=None):\n return DataFactory.factories[storage].create(df, df_contains, y_col_name, y_pred_col_name)", "def create_setops_evaluator(\r\n base_model,\r\n classifier,\r\n setops_model,\r\n metrics={},\r\n device=None):\r\n if device:\r\n base_model.to(device)\r\n classifier.to(device)\r\n setops_model.to(device)\r\n\r\n def _inference(engine, batch):\r\n\r\n base_model.eval()\r\n classifier.eval()\r\n setops_model.eval()\r\n\r\n with torch.no_grad():\r\n input_a, input_b, target_a, target_b = _prepare_batch(batch, device=device)\r\n\r\n #\r\n # Apply the classification model\r\n #\r\n embed_a = base_model(input_a)\r\n output_a = classifier(embed_a)\r\n embed_b = base_model(input_b)\r\n output_b = classifier(embed_b)\r\n\r\n #\r\n # Apply the setops model.\r\n #\r\n outputs_setopt = setops_model(embed_a, embed_b)\r\n fake_a, fake_b, a_S_b, b_S_a, a_U_b, b_U_a, a_I_b, b_I_a, \\\r\n a_S_b_b, b_S_a_a, a_I_b_b, b_I_a_a, a_U_b_b, b_U_a_a, \\\r\n a_S_b_I_a, b_S_a_I_b, a_S_a_I_b, b_S_b_I_a = \\\r\n [classifier(o) for o in outputs_setopt]\r\n fake_a_em, fake_b_em = outputs_setopt[:2]\r\n\r\n #\r\n # Calculate the target setops operations\r\n #\r\n target_a_bt = target_a.type(torch.cuda.ByteTensor)\r\n target_b_bt = target_b.type(torch.cuda.ByteTensor)\r\n\r\n target_a_I_b = target_a_bt & target_b_bt\r\n target_a_U_b = target_a_bt | target_b_bt\r\n target_a_S_b = target_a_bt & ~target_a_I_b\r\n target_b_S_a = target_b_bt & ~target_a_I_b\r\n\r\n target_a_I_b = target_a_I_b.type(torch.cuda.FloatTensor)\r\n target_a_U_b = target_a_U_b.type(torch.cuda.FloatTensor)\r\n target_a_S_b = target_a_S_b.type(torch.cuda.FloatTensor)\r\n target_b_S_a = target_b_S_a.type(torch.cuda.FloatTensor)\r\n\r\n return dict(\r\n outputs={\r\n \"real class a\": output_a,\r\n \"real class b\": output_b,\r\n \"fake class a\": fake_a,\r\n \"fake class b\": fake_b,\r\n \"a_S_b class\": a_S_b,\r\n \"b_S_a class\": b_S_a,\r\n \"a_U_b class\": a_U_b,\r\n \"b_U_a class\": b_U_a,\r\n \"a_I_b class\": a_I_b,\r\n \"b_I_a class\": b_I_a,\r\n \"fake embed a\": fake_a_em,\r\n \"fake embed b\": fake_b_em,\r\n },\r\n targets={\r\n \"class a\": target_a,\r\n \"class b\": target_b,\r\n \"a_S_b class\": target_a_S_b,\r\n \"b_S_a class\": target_b_S_a,\r\n \"a_U_b class\": target_a_U_b,\r\n \"a_I_b class\": target_a_I_b,\r\n \"embed a\": embed_a,\r\n \"embed b\": embed_b,\r\n }\r\n )\r\n\r\n engine = Engine(_inference)\r\n\r\n for name, metric in metrics.items():\r\n metric.attach(engine, name)\r\n\r\n return engine", "def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds", "def to_evaluation_dataset(self, path=None, feature_names=None) -> EvaluationDataset:\n return EvaluationDataset(\n data=self._features,\n targets=self._targets,\n path=path,\n feature_names=feature_names,\n )", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20", "def specific_evaluator(self, evaluator: Path, bundle: Bundle):\n pass", "def make_eval_target_tensors(dataset, dataset_name, \n train_trial_split='train',\n eval_trial_split='val',\n update_params=None,\n save_file=True,\n return_dict=True,\n save_path=\"target_data.h5\",\n include_psth=False):\n assert isinstance(dataset, NWBDataset), \"`dataset` must be an instance of NWBDataset\"\n assert dataset_name in PARAMS.keys(), f\"`dataset_name` must be one of {list(PARAMS.keys())}\"\n assert isinstance(train_trial_split, (pd.Series, np.ndarray, list)) or train_trial_split in ['train', 'val', 'test'], \\\n \"Invalid `train_trial_split` argument. Please refer to the documentation for valid choices\"\n assert isinstance(eval_trial_split, (pd.Series, np.ndarray, list)) or eval_trial_split in ['train', 'val', 'test'], \\\n \"Invalid `eval_trial_split` argument. Please refer to the documentation for valid choices\"\n \n # Fetch and update params\n params = PARAMS[dataset_name].copy()\n if update_params is not None:\n params.update(update_params)\n # Add filename extension if necessary\n if not save_path.endswith('.h5'):\n save_path = save_path + '.h5'\n\n # unpack params\n spk_field = params['spk_field']\n hospk_field = params['hospk_field']\n make_params = params['eval_make_params'].copy()\n behavior_source = params['behavior_source']\n behavior_field = params['behavior_field']\n behavior_make_params = _prep_behavior(dataset, params.get('lag', None), make_params)\n eval_tensor_params = params.get('eval_tensor_params', {}).copy()\n fp_len = params['fp_len']\n fp_steps = fp_len / dataset.bin_width\n\n # Properly name output fields based on submission bin width\n suf = '' if (dataset.bin_width == 5) else f'_{dataset.bin_width}'\n \n # Prep masks\n train_mask = _prep_mask(dataset, train_trial_split)\n eval_mask = _prep_mask(dataset, eval_trial_split)\n if isinstance(eval_trial_split, str) and eval_trial_split == 'test':\n ignore_mask = dataset.trial_info.split == 'none'\n else:\n ignore_mask = ~(train_mask | eval_mask)\n\n if not ('align_field' in make_params and 'align_range' in make_params):\n # Stack jagged arrays by padding with NaNs if uneven trials\n train_dict = make_jagged_array(dataset, [hospk_field], make_params, train_mask, **eval_tensor_params)[0]\n eval_dict = make_jagged_array(dataset, [hospk_field], make_params, eval_mask, **eval_tensor_params)[0]\n else:\n # Make standard 3d arrays\n eval_dict = make_stacked_array(dataset, [hospk_field], make_params, eval_mask)\n if behavior_source == 'data':\n # Use `make_jagged_arrays` for RTT, in case some data is cut short at edges\n btrain_dict = make_jagged_array(dataset, [behavior_field], behavior_make_params, train_mask)[0]\n beval_dict = make_jagged_array(dataset, [behavior_field], behavior_make_params, eval_mask)[0]\n \n # Retrieve behavioral data\n if behavior_source == 'trial_info':\n train_behavior = dataset.trial_info[train_mask][behavior_field].to_numpy().astype('float')\n eval_behavior = dataset.trial_info[eval_mask][behavior_field].to_numpy().astype('float')\n else:\n train_behavior = btrain_dict[behavior_field]\n eval_behavior = beval_dict[behavior_field]\n # Mask some behavioral data if desired\n if 'behavior_mask' in params:\n if callable(params['behavior_mask']):\n train_behavior_mask = params['behavior_mask'](dataset.trial_info[train_mask])\n eval_behavior_mask = params['behavior_mask'](dataset.trial_info[eval_mask])\n else:\n train_behavior_mask, eval_behavior_mask = params['behavior_mask']\n train_behavior[~train_behavior_mask] = np.nan\n eval_behavior[~eval_behavior_mask] = np.nan\n \n # Prepare forward prediction spiking data\n fp_make_params = _prep_fp(make_params, fp_steps, dataset.bin_width)\n fp_dict = make_stacked_array(dataset, [spk_field, hospk_field], fp_make_params, eval_mask)\n \n # Construct data dict\n data_dict = {\n dataset_name + suf: {\n 'eval_spikes_heldout': eval_dict[hospk_field],\n 'train_behavior': train_behavior,\n 'eval_behavior': eval_behavior,\n 'eval_spikes_heldin_forward': fp_dict[spk_field],\n 'eval_spikes_heldout_forward': fp_dict[hospk_field],\n }\n }\n\n # Include `decode_masks` to train separate decoders for different data\n if 'decode_masks' in params:\n if callable(params['decode_masks']):\n train_decode_mask = params['decode_masks'](dataset.trial_info[train_mask])\n eval_decode_mask = params['decode_masks'](dataset.trial_info[eval_mask])\n else:\n train_decode_mask, eval_decode_mask = params['decode_masks']\n data_dict[dataset_name + suf]['train_decode_mask'] = train_decode_mask\n data_dict[dataset_name + suf]['eval_decode_mask'] = eval_decode_mask\n \n # Calculate PSTHs if desired\n if include_psth:\n psth_params = params.get('psth_params', None)\n if psth_params is None:\n logger.warning(\"PSTHs are not supported for this dataset, skipping...\")\n else:\n psths = _make_psth(dataset, eval_mask, ignore_mask, **psth_params)\n data_dict[dataset_name + suf]['psth'] = psths\n\n # Save and return data\n if save_file:\n save_to_h5(data_dict, save_path, overwrite=True)\n if return_dict:\n return data_dict", "def get_dataset(opts):\n dataset_type = opts.dataset_params.dataset_type\n if dataset_type in 'synth':\n return synthgraph.SynthGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthnoise':\n return synthgraph.SynthNoiseGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthoutlier':\n return synthgraph.SynthOutlierGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'rome16kgeom':\n return spreal.GeomKNNRome16KDataset(opts, opts.dataset_params)\n elif dataset_type in 'graffiti':\n return graffiti.GraffitiDataset(opts, opts.dataset_params)\n else:\n print(\"ERROR: Dataset type {} not implemented yet\".format(dataset_type))\n sys.exit(1)", "def _eval_input_fn():\n features_placeholder = {\n k: tf.compat.v1.placeholder(v.dtype, v.shape)\n for k, v in six.iteritems(features)\n }\n if use_multi_head:\n placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)\n labels_placeholder = {\n _PRIMARY_HEAD: placeholder,\n _SECONDARY_HEAD: placeholder,\n }\n else:\n labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)\n dataset = tf.data.Dataset.from_tensors(\n (features_placeholder, labels_placeholder))\n iterator = tf.compat.v1.data.make_initializable_iterator(dataset)\n if use_multi_head:\n feed_dict = {\n labels_placeholder[head_name]: labels\n for head_name in labels_placeholder\n }\n else:\n feed_dict = {labels_placeholder: labels}\n\n feed_dict.update(\n {features_placeholder[k]: features[k] for k in features_placeholder})\n iterator_initializer_hook.iterator_initializer_fn = (\n lambda sess: sess.run(iterator.initializer, feed_dict=feed_dict))\n return iterator.get_next()", "def __init__(self, experiment_file: str, data_config: dict, data_splitter_config: dict = None,\n load_experiment: bool = False, save_experiment: bool = True):\n self.data_cfg = data_config\n self.experiment_name = experiment_file.split(os_sep)[-1].removesuffix('.cfg')\n self.experiment_folder = os_sep.join(experiment_file.split(os_sep)[:-1])\\\n + os_sep + self.experiment_name + os_sep\n self.save_experiment = save_experiment\n\n # Hotfix ast_literal_eval doesn't allow nan\n def backstep_ast_literal_eval(s: pd.Series) -> list:\n output = []\n know_type = None\n for value in s:\n try:\n output.append(obj := ast.literal_eval(value))\n if know_type is None:\n know_type = type(obj)\n know_type = know_type()\n except ValueError:\n if know_type is None:\n output.append('Unknown')\n else:\n output.append(know_type)\n if know_type is None:\n raise ValueError\n correct_output = [x if x != 'Unknown' else know_type for x in output]\n return correct_output\n\n if data_splitter_config is not None:\n self.data = pd.read_csv(self.data_cfg['datafile'], usecols=self.data_cfg['use_columns'])\n # TEMPORARY\n # self.data = self.data.dropna()\n # END\n for column in self.data_cfg['ast_columns']:\n self.data[column] = backstep_ast_literal_eval(self.data[column])\n self.data = self.data.set_index(self.data_cfg['index_columns'])\n\n self.splitter_cfg = data_splitter_config\n self.data_splitter = InterfaceFactory.get_interface('Splitters', self.splitter_cfg['method'])\n for data_name, dataframe in self.data_splitter.iget_splitter(self.data, **self.splitter_cfg['parameters']).items():\n setattr(self, data_name, dataframe)\n self.eval_data = 'test'\n elif load_experiment:\n # If implement multiple eval datasets, use dynamic loading with setattr()\n self.train = pd.read_csv(self.experiment_folder + 'train.csv', index_col=self.data_cfg['index_columns'])\n self.test = pd.read_csv(self.experiment_folder + 'test.csv', index_col=self.data_cfg['index_columns'])\n for column in self.data_cfg['ast_columns']:\n self.train[column] = backstep_ast_literal_eval(self.train[column])\n self.test[column] = backstep_ast_literal_eval(self.test[column])\n dw_columns = [col for col in self.test.columns if col[:3] in ['R__', 'N__', 'F__', 'M__']]\n for col in dw_columns:\n self.test[col] = self.test[col].apply(lambda x: ast.literal_eval(x))\n # Multiple eval datasets not implemented\n self.eval_data = 'test'\n else:\n raise NotImplementedError", "def create_eval(self):\n self.ev_id = \"ev-\" + base64.b32encode(os.urandom(10)).decode(\"ascii\")\n self.ev_name = \"Evaluation: \" + self.ml_name\n self._ml.create_evaluation(\n EvaluationId=self.ev_id,\n EvaluationName=self.ev_name,\n MLModelId=self.ml_id,\n EvaluationDataSourceId=self.fold.eval_ds_id\n )\n logger.info(\"Created Evaluation \" + self.ev_id)", "def load_eval_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))", "def eval_input_fn(features, labels, batch_size):\n #features=dict(features)\n features = dataframetodict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def evaluate(self, X, y, hypes={}, n_splits=5, shuffle=True, standardize=True, groups=None):\n \n ### SET HYPERPARAMETERS ###\n model = clone(self.estimator) # Gotta do this otherwise funky things happen\n model.set_params(**hypes)\n \n ### INITIALIZE SCORING DATAFRAME ###\n fractions = ['train', 'val']\n scoring_metrics = ['mae', 'mape', 'medape', 'pearson', 'spearman']\n score_columns = pd.MultiIndex.from_product([fractions, scoring_metrics]) # This sets up a heirarchical index for the results dataframe\n score = pd.DataFrame(columns=score_columns)\n\n ### SET UP X-VALIDATION ###\n \n if groups is not None:\n cv = model_selection.LeaveOneGroupOut()\n splitter = enumerate(cv.split(X,y,groups))\n else:\n cv = model_selection.KFold(n_splits=n_splits, shuffle=shuffle)\n splitter = enumerate(cv.split(X,y))\n\n ### RUN CV AND SCORE MODEL ###\n last_splits = [] # Keep track of split indices for forensics\n for idx, (train, val) in splitter:\n\n X_train = X.iloc[train,:]; y_train = y.iloc[train]\n X_val = X.iloc[val,:]; y_val = y.iloc[val]\n \n if standardize:\n std = preprocessing.StandardScaler()\n std.fit(X_train)\n X_train, X_val = std.transform(X_train), std.transform(X_val)\n\n # if idx==0:\n # for v in ['X_train','y_train','X_val','y_val']:\n # print('{} shape: {}'.format(v, eval('{}.shape'.format(v))))\n\n ### INSTANTIATE AND FIT MODEL ###\n last_splits.append((train, val))\n model.fit(X_train, y_train)\n\n for frac in ['train','val']:\n \n # y_true will either be y_train or y_val depending on what 'frac' is. Kind of hacky.\n y_true = eval('y_'+frac)\n y_pred = model.predict(eval('X_'+frac))\n \n # Calculate MAE\n score.loc[idx, (frac,'mae')] = \\\n metrics.mean_absolute_error(y_true, y_pred)\n \n # Calculate MAPE\n score.loc[idx, (frac,'mape')] = \\\n mean_absolute_percentage_error(y_true, y_pred)\n \n # Calculate MedAPE\n score.loc[idx, (frac,'medape')] = \\\n median_absolute_percentage_error(y_true, y_pred)\n\n # Calculate pearson\n score.loc[idx, (frac,'pearson')] = \\\n stats.pearsonr(y_true, y_pred)[0]\n\n # Calculate spearman\n score.loc[idx, (frac,'spearman')] = \\\n stats.spearmanr(y_true, y_pred)[0]\n\n self.estimator = model\n self.last_scores = score\n self.last_hypes = hypes\n self.last_splits = last_splits\n\n return score", "def evaluate(self,\n data: Union[\"SparkXShards\",\n \"SparkDataFrame\",\n \"TFDataset\",\n \"ray.data.Dataset\",\n Callable],\n batch_size: int=32,\n num_steps: Optional[int]=None,\n verbose: Union[str, int]=1,\n sample_weight: Optional[\"np.ndarray\"]=None,\n callbacks: Optional[List[\"Callback\"]]=None,\n data_config: Optional[Dict]=None,\n feature_cols: Optional[List[str]]=None,\n label_cols: Optional[List[str]]=None) -> Dict:\n if not isinstance(data, types.FunctionType):\n invalidInputError(isinstance(batch_size, int) and batch_size > 0,\n \"batch_size should be a positive integer\")\n else:\n # batch_size can be None if the return of data_creator already generates batches\n if batch_size:\n invalidInputError(isinstance(batch_size, int) and batch_size > 0,\n \"batch_size should be a positive integer\")\n # Use the local batch size for each worker to convert to XShards\n if batch_size:\n local_batch_size = batch_size // self.num_workers\n if local_batch_size <= 0:\n local_batch_size = 1\n else:\n local_batch_size = None\n logger.info(\"Starting validation step.\")\n params = dict(\n batch_size=batch_size,\n verbose=verbose,\n sample_weight=sample_weight,\n steps=num_steps,\n callbacks=callbacks,\n data_config=data_config,\n )\n from bigdl.orca.data import SparkXShards\n from bigdl.orca.data.tf.data import Dataset\n from bigdl.orca.data.tf.tf2_data import TF2Dataset\n\n data, _ = maybe_dataframe_to_xshards(data,\n validation_data=None,\n feature_cols=feature_cols,\n label_cols=label_cols,\n mode=\"evaluate\",\n num_workers=self.num_workers,\n accept_str_col=True,\n shard_size=local_batch_size)\n\n if isinstance(data, SparkXShards):\n # Make sure each worker can get at least one data partition\n if data.num_partitions() < self.num_workers:\n data = data.repartition(self.num_workers)\n if data._get_class_name() == 'pandas.core.frame.DataFrame':\n data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)\n ray_xshards = RayXShards.from_spark_xshards(data) # type:ignore\n worker_stats = self._evaluate_ray_xshards(ray_xshards, params)\n elif isinstance(data, Dataset):\n ray_xshards = TF2Dataset(data).get_ray_xshards(self.num_workers)\n worker_stats = self._evaluate_ray_xshards(ray_xshards, params)\n elif isinstance(data, ray.data.Dataset):\n shards = data.split(n=self.num_workers, locality_hints=self.remote_workers)\n\n remote_worker_stats = []\n for shard, worker in zip(shards, self.remote_workers):\n params[\"data_creator\"] = self.process_ray_dataset(shard,\n label_cols,\n feature_cols,\n data_config)\n remote_worker_stats.append(worker.validate.remote(**params))\n worker_stats = ray.get(remote_worker_stats)\n else: # data_creator functions; should return Iter or DataLoader\n params[\"data_creator\"] = data # type:ignore\n params_list = [params] * self.num_workers\n\n worker_stats = ray.get([w.validate.remote(**params_list[i])\n for i, w in enumerate(self.remote_workers)])\n stats = worker_stats[0].copy()\n return stats", "def evaluateWithSeveralMetrics(self, dataset, metricSets=None):\n if metricSets is None: # all metrics\n metricSets = [{\"metricName\": \"areaUnderROC\"},\n {\"metricName\": \"areaUnderPR\"},\n {\"metricName\": \"precisionAtGivenRecall\", \"metricParams\": {\"recallValue\": 0.05}}] \n resultMetricSets = [None for _ in range(len(metricSets))]\n pagrs = []\n for i in range(len(metricSets)):\n params = metricSets[i]\n if params[\"metricName\"] != \"precisionAtGivenRecall\":\n value = self.evaluate(dataset, params)\n if len(params.keys()) == 1:\n key = params[\"metricName\"]\n else:\n key = params[\"metricName\"] + \" at recallValue \" + str(params[\"metricParams\"][\"recallValue\"])\n resultMetricSets[i] = {key:value}\n else: \n pagrs.append([i,params[\"metricParams\"][\"recallValue\"]])\n continue\n if None in resultMetricSets:\n pr_params = {\"metricName\": \"precisionAtGivenMultipleRecalls\", \"metricParams\": {\"recallValues\": [x[1] for x in pagrs]}}\n precisions = self.evaluate(dataset, pr_params)\n i = 0\n for item in pagrs:\n key = \"precisionAtGivenRecall\" + \" at recallValue \" + str(pagrs[i][1])\n resultMetricSets[item[0]] = {key:precisions[i]}\n i += 1 \n \n return resultMetricSets", "def setup_evaluation(evalfile, solufile, tolerance, evalstring=False):\n if evalstring:\n evaluation = IPETEvaluation.fromXML(evalfile)\n else:\n evaluation = IPETEvaluation.fromXMLFile(evalfile[\"path\"])\n\n evaluation.set_grouptags(True)\n evaluation.set_validate(solufile)\n evaluation.set_feastol(tolerance)\n return evaluation", "def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result", "def create(self, validated_data):\n\n # Use an atomic transaction for managing dataset and authors\n with transaction.atomic():\n # Pop off authors data, if exists\n author_data = []\n sites_data = []\n plots_data = []\n variables_data = []\n if \"authors\" in validated_data.keys():\n author_data = validated_data.pop('authors')\n\n if \"sites\" in validated_data.keys():\n sites_data = validated_data.pop('sites')\n\n if \"plots\" in validated_data.keys():\n plots_data = validated_data.pop('plots')\n\n if \"variables\" in validated_data.keys():\n variables_data = validated_data.pop('variables')\n\n # Create dataset first\n dataset = DataSet.objects.create(**validated_data)\n dataset.clean()\n dataset._change_reason = f'Created Dataset Metadata'\n dataset.save()\n\n # save the author data\n reasons = set()\n if len(author_data) > 0:\n reasons.add(\"authors\")\n self.add_authors(author_data, dataset)\n for obj in sites_data:\n reasons.add(\"sites\")\n dataset.sites.add(obj)\n for obj in plots_data:\n reasons.add(\"plots\")\n dataset.plots.add(obj)\n for obj in variables_data:\n reasons.add(\"variables\")\n dataset.variables.add(obj)\n\n if len(reasons) > 0:\n dataset._change_reason = f'Added {\", \".join(reasons)}'\n dataset.save()\n\n return dataset", "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def evaluate(self, sess, outputs, dSet=None, dataX=None, dataY=None):\n\n feed_dict = {self.isTraining : False}\n\n if (dataX is not None) and (dataY is not None):\n feed_dict[self.features] = dataX\n feed_dict[self.labels] = dataY\n return sess.run(output, feed_dict) \n\n if dSet is \"train\":\n sess.run(self.train_iter.initializer)\n feed_dict[self.handle] = self.train_handle\n elif dSet is \"val\":\n sess.run(self.valid_iter.initializer)\n feed_dict[self.handle] = self.valid_handle\n elif dSet is \"test\":\n sess.run(self.test_iter.initializer)\n feed_dict[self.handle] = self.test_handle\n else:\n print(\"ERROR: Do not recognize dataset {}\".format(dSet))\n raise RuntimeError\n\n runningOutput = []\n while True:\n # Get batch data\n try :\n runningOutput.append(sess.run(output, feed_dict))\n except tf.errors.OutOfRangeError:\n break\n\n return runningOutput", "def evaluate(self, dataset, metric='auto', verbose=True, batch_size=64):\n if(batch_size < 1):\n raise ValueError(\"'batch_size' must be greater than or equal to 1\")\n\n extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size)\n extracted_features[self.target] = dataset[self.target]\n return self.classifier.evaluate(extracted_features, metric = metric)", "def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)", "def test_eval_values(sdc_builder, sdc_executor, gcp, data_drift, eval_value):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n data = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data=data,\n stop_after_first_batch=True)\n\n # Build Expression Evaluator\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name}]\n )\n\n # Google BigQuery destination stage\n dataset_config = \"${record:attribute('sdc.dataset.name')}\" if eval_value in {\"DATASET\", \"BOTH\"} else dataset_name\n table_config = \"${record:attribute('sdc.table.name')}\" if eval_value in {\"TABLE\", \"BOTH\"} else table_name\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_config,\n table=table_config,\n bucket=bucket_name,\n enable_data_drift=data_drift,\n create_table=data_drift,\n create_dataset=data_drift,\n purge_stage_file_after_ingesting=True)\n\n dev_raw_data_source >> expression_evaluator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n if not data_drift:\n logger.info('Creating dataset %s and table %s using Google BigQuery client ...', dataset_name, table_name)\n bigquery_client.create_dataset(dataset_ref)\n table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=SCHEMA))\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n if data_drift:\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n\n # Verify by reading records using Google BigQuery client\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n expected_data = [tuple(v for v in d.values()) for d in ROWS_IN_DATABASE]\n\n assert len(data_from_bigquery) == len(expected_data)\n assert data_from_bigquery == expected_data\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def __get_dataset_type(dataset):\n op_type = None\n if isinstance(dataset, de.ShuffleDataset):\n op_type = OpName.SHUFFLE\n elif isinstance(dataset, de.MindDataset):\n op_type = OpName.MINDRECORD\n elif isinstance(dataset, de.BatchDataset):\n op_type = OpName.BATCH\n elif isinstance(dataset, de.SyncWaitDataset):\n op_type = OpName.BARRIER\n elif isinstance(dataset, de.ZipDataset):\n op_type = OpName.ZIP\n elif isinstance(dataset, de.ConcatDataset):\n op_type = OpName.CONCAT\n elif isinstance(dataset, de.MapDataset):\n op_type = OpName.MAP\n elif isinstance(dataset, de.FilterDataset):\n op_type = OpName.FILTER\n elif isinstance(dataset, de.RepeatDataset):\n op_type = OpName.REPEAT\n elif isinstance(dataset, de.SkipDataset):\n op_type = OpName.SKIP\n elif isinstance(dataset, de.TakeDataset):\n op_type = OpName.TAKE\n elif isinstance(dataset, de.ImageFolderDatasetV2):\n op_type = OpName.IMAGEFOLDER\n elif isinstance(dataset, de.GeneratorDataset):\n op_type = OpName.GENERATOR\n elif isinstance(dataset, de.TransferDataset):\n op_type = OpName.DEVICEQUEUE\n elif isinstance(dataset, de.RenameDataset):\n op_type = OpName.RENAME\n elif isinstance(dataset, de.TFRecordDataset):\n op_type = OpName.TFREADER\n elif isinstance(dataset, de.ProjectDataset):\n op_type = OpName.PROJECT\n elif isinstance(dataset, de.MnistDataset):\n op_type = OpName.MNIST\n elif isinstance(dataset, de.ManifestDataset):\n op_type = OpName.MANIFEST\n elif isinstance(dataset, de.VOCDataset):\n op_type = OpName.VOC\n elif isinstance(dataset, de.Cifar10Dataset):\n op_type = OpName.CIFAR10\n elif isinstance(dataset, de.Cifar100Dataset):\n op_type = OpName.CIFAR100\n elif isinstance(dataset, de.CelebADataset):\n op_type = OpName.CELEBA\n elif isinstance(dataset, de.RandomDataset):\n op_type = OpName.RANDOMDATA\n elif isinstance(dataset, de.TextFileDataset):\n op_type = OpName.TEXTFILE\n else:\n raise ValueError(\"Unsupported DatasetOp\")\n\n return op_type", "def get_dataset(args):\n\n if args.dataset == 'cifar':\n data_dir = 'data/cifar/'\n apply_transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True,\n transform=apply_transform)\n\n # sample training data amongst users\n if args.iid:\n user_groups = cifar_iid(train_dataset, args.num_users)\n else:\n if args.unequal:\n # Chose euqal splits for every user\n user_groups = cifar_noniid(train_dataset, args.num_users)\n else:\n user_groups = cifar_noniid_class(train_dataset, args.num_users, args.class_per_user)\n \n elif args.dataset == 'mnist' or 'fmnist':\n if args.dataset == 'mnist':\n data_dir = 'data/mnist/'\n else:\n data_dir = 'data/fmnist/'\n\n apply_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])\n\n train_dataset = datasets.MNIST(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.MNIST(data_dir, train=False, download=True,\n transform=apply_transform)\n\n # sample training data amongst users\n if args.iid:\n # Sample IID user data from Mnist\n user_groups = mnist_iid(train_dataset, args.num_users)\n else:\n # Sample Non-IID user data from Mnist\n if args.unequal:\n # Chose uneuqal splits for every user\n user_groups = mnist_noniid_unequal(train_dataset, args.num_users)\n else:\n # Chose euqal splits for every user\n user_groups = mnist_noniid_class(train_dataset, args.num_users, args.class_per_user)\n\n return train_dataset, test_dataset, user_groups", "def evaluation(store, evaluation_obj):\n evaluation_obj['institute'] = store.institute(evaluation_obj['institute_id'])\n evaluation_obj['case'] = store.case(evaluation_obj['case_id'])\n evaluation_obj['variant'] = store.variant(evaluation_obj['variant_specific'])\n evaluation_obj['criteria'] = {criterion['term']: criterion for criterion in\n evaluation_obj['criteria']}\n evaluation_obj['classification'] = ACMG_COMPLETE_MAP[evaluation_obj['classification']]\n return evaluation_obj", "def evaluate(self, dataset):\n logging.info('Start evaluation')\n\n loss, predictions, labels = self.run_one_epoch(dataset, RunnerPhase.VALIDATE)\n\n metrics_dict = self.metric_class.get_metrics_dict(predictions, labels)\n\n eval_info = self.metric_class.metrics_dict_to_str(metrics_dict)\n\n logging.info(eval_info)\n\n logging.info('Evaluation finished')\n\n return metrics_dict", "def evaluate(self, session, *args, evaluate_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement evaluate() method\")", "def evaluate(QualityMeasure,ModelClass,dataset,subgroup,target1,target2): \r\n evaluator = {\r\n QualityMeasure.SCD: evaluate_scd,\r\n }\r\n return evaluator.get(QualityMeasure)(ModelClass,dataset,subgroup,target1,target2)", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n return dataset", "def predict_and_eval_in_val(self, sess, tst_reader, metrics):\n raise NotImplementedError(\"\"\"please customize predict_and_eval_in_val\"\"\")", "def _create_model_and_data(\n dataset_name: str, use_synthetic_data: bool\n) -> Tuple[constants.ModelFnType, constants.FederatedDatasetsType,\n constants.ProcessFnType, constants.SplitDataFnType, str]:\n if dataset_name == 'emnist':\n return emnist.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'stackoverflow':\n return stackoverflow.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'landmark':\n return landmark.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'ted_multi':\n return ted_multi.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n raise ValueError(f'Accepted dataset names: {constants.DATASET_NAMES}, but '\n f'found {dataset_name}. Please provide a valid name.')", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def create_dataset(self, config, rng):\n raise NotImplementedError()", "def __prepare_dataset(dataset, verbose=True):\r\n supported_datasets = ['coco', 'voc']\r\n if isinstance(dataset, ExternalDataset):\r\n if dataset.dataset_type.lower() not in supported_datasets:\r\n raise UserWarning(\"ExternalDataset dataset_type must be one of: \", supported_datasets)\r\n\r\n dataset_root = dataset.path\r\n\r\n if verbose:\r\n print(\"Loading {} type dataset...\".format(dataset.dataset_type))\r\n\r\n if dataset.dataset_type.lower() == 'voc':\r\n from gluoncv.data import VOCDetection\r\n\r\n dataset = VOCDetection(root=dataset_root,\r\n splits=[(2007, 'trainval'), (2012, 'trainval')])\r\n\r\n elif dataset.dataset_type.lower() == 'coco':\r\n from gluoncv.data import COCODetection\r\n\r\n dataset = COCODetection(root=dataset_root,\r\n splits=['instances_train2017'])\r\n if verbose:\r\n print(\"ExternalDataset loaded.\")\r\n return dataset\r\n elif isinstance(dataset, DetectionDataset) or issubclass(type(dataset), DetectionDataset):\r\n dataset.set_image_transform(ImageToNDArrayTransform())\r\n dataset.set_target_transform(BoundingBoxListToNumpyArray())\r\n return dataset\r\n else:\r\n raise ValueError(\"Dataset type {} not supported\".format(type(dataset)))", "def eval(x: jnp.ndarray, data_batch: Dataset, **kwargs) -> jnp.ndarray:\n pass", "def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n # Calculate summation score for 'engagingness'\n if dim == 'engagingness':\n src_list, output_list, context_list = [], [], []\n n_sents = [] # the number of sentences in each generated response\n for i in range(n_data):\n source = data[i]['source']\n context = data[i]['context']\n system_outputs = sent_tokenize(data[i]['system_output'])\n n_sents.append(len(system_outputs))\n for j in range(len(system_outputs)):\n src_list.append(source)\n context_list.append(context)\n output_list.append(system_outputs[j])\n input_list = add_question(dimension=dim,\n output=output_list,\n src=src_list,\n context=context_list,\n task=self.task)\n sent_score = self.scorer.score(input_list, self.task, category, dim)\n\n # Get the summation score for each sample\n start_idx = 0\n score = []\n for cur_n_sent in n_sents:\n score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]))\n start_idx += cur_n_sent\n\n # Calculate turn-level score for other dimensions\n elif dim in ['naturalness', 'coherence', 'groundedness', 'understandability']:\n src_list, output_list, context_list = [], [], []\n for i in range(n_data):\n src_list.append(data[i]['source'])\n output_list.append(data[i]['system_output'])\n context_list.append(data[i]['context'])\n input_list = add_question(dimension=dim,\n output=output_list,\n src=src_list,\n context=context_list,\n task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n # Please customize other dimensions here for summarization\n else:\n raise NotImplementedError('The input format for this dimension is still undefined. \\\n Please customize it first.')\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n inputs = features\n else:\n inputs = (features, labels)\n\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n \n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n return dataset", "def evaluate(hparams, summary_dir, num_gpus, model_type, eval_set, eval_size,\n eval_shard, data_dir, num_targets, dataset, validate, seed,\n shuffled, shift, pad, batch_size=100, checkpoint=None):\n output_dir = summary_dir\n load_dir = summary_dir + '/train/'\n summary_dir += '/eval/' + FLAGS.dataset + '/' + eval_set\n with tf.Graph().as_default():\n features = get_features(eval_set, batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=True,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad, eval_shard=eval_shard)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n test_writer = tf.summary.FileWriter(summary_dir)\n seen_step = -1\n paused = 0\n while paused < 360:\n print('start evaluation, model defined')\n if checkpoint:\n step = extract_step(checkpoint)\n last_checkpoint = checkpoint\n else:\n step, last_checkpoint = find_checkpoint(load_dir, seen_step)\n if step == -1:\n time.sleep(60)\n paused += 1\n else:\n paused = 0\n seen_step = step\n run_experiment(load_eval, last_checkpoint, test_writer,\n eval_experiment, model, result,\n eval_size // batch_size, features=features,\n eval_set=eval_set, output_dir=output_dir,\n unsupervised=hparams.unsupervised,\n num_gpus=num_gpus)\n if checkpoint:\n break\n\n test_writer.close()", "def _check_evaluate_implementation(self) -> None:\n logging.debug(f\"Evaluate_batch_defined: {self._evaluate_batch_defined()}.\")\n logging.debug(f\"Evaluate full dataset defined: {self._evaluate_full_dataset_defined()}.\")\n check.not_eq(\n self._evaluate_batch_defined(),\n self._evaluate_full_dataset_defined(),\n \"Please define exactly one of: `evaluate_batch()` or `evaluate_full_dataset()`. \"\n \"For most use cases `evaluate_batch()` is recommended is recommended because \"\n \"it can be parallelized across all devices.\",\n )", "def generate_gene_set_data(\n data,\n genes,\n gene_name_type=\"entrez\",\n gene_set_category=\"c6.all\",\n metric=\"mean\",\n standardize=False,\n data_dir=\"../../Data/examples/Gene_Sets/MSigDB.v7.0/\",\n):\n\n sample_name = None\n if isinstance(data, pd.DataFrame):\n sample_name = data.index\n data = data.values\n elif not isinstance(data, np.ndarray):\n print(\"Input data must be a numpy array or pandas data frame\")\n sys.exit(1)\n\n if standardize:\n scaler = StandardScaler()\n data = scaler.fit_transform(data)\n\n genes = [str(i) for i in genes]\n\n if gene_name_type == \"entrez\":\n gene_set_category = gene_set_category + \".v7.0.entrez.gmt\"\n if gene_name_type == \"symbols\":\n gene_set_category = gene_set_category + \".v7.0.symbols.gmt\"\n f = open(data_dir + gene_set_category, \"r\")\n x = f.readlines()\n gene_sets = {}\n for i in range(len(x)):\n temp = x[i].split(\"\\n\")[0].split(\"\\t\")\n gene_sets[temp[0]] = temp[2:]\n\n gene_set_data = np.empty((data.shape[0], len(gene_sets)))\n gene_set_data.fill(np.nan)\n gene_set_names = np.array(list(gene_sets.keys()))\n for i in range(len(gene_set_names)):\n idi = np.where(np.isin(genes, gene_sets[gene_set_names[i]]))[0]\n if len(idi) > 0:\n if metric == \"sum\":\n gene_set_data[:, i] = np.nansum(data[:, idi], axis=1)\n elif metric == \"max\":\n gene_set_data[:, i] = np.nanmax(data[:, idi], axis=1)\n elif metric == \"min\":\n gene_set_data[:, i] = np.nanmin(data[:, idi], axis=1)\n elif metric == \"abs_mean\":\n gene_set_data[:, i] = np.nanmean(np.absolute(data[:, idi]), axis=1)\n elif metric == \"abs_maximum\":\n gene_set_data[:, i] = np.nanmax(np.absolute(data[:, idi]), axis=1)\n else: # 'mean'\n gene_set_data[:, i] = np.nanmean(data[:, idi], axis=1)\n\n if sample_name is None:\n gene_set_data = pd.DataFrame(gene_set_data, columns=gene_set_names)\n else:\n gene_set_data = pd.DataFrame(\n gene_set_data, columns=gene_set_names, index=sample_name\n )\n keep_id = np.where(np.sum(np.invert(pd.isna(gene_set_data)), axis=0) > 0)[0]\n gene_set_data = gene_set_data.iloc[:, keep_id]\n\n return gene_set_data", "def eval_data(dataset):\n # If dataset.num_examples is not divisible by BATCH_SIZE\n # the remainder will be discarded.\n # Ex: If BATCH_SIZE is 64 and training set has 55000 examples\n # steps_per_epoch = 55000 // 64 = 859\n # num_examples = 859 * 64 = 54976\n #\n # So in that case we go over 54976 examples instead of 55000.\n steps_per_epoch = dataset.num_examples // BATCH_SIZE\n num_examples = steps_per_epoch * BATCH_SIZE\n total_acc, total_loss = 0, 0\n sess = tf.get_default_session()\n for step in range(steps_per_epoch):\n batch_x, batch_y = dataset.next_batch(BATCH_SIZE)\n loss, acc = sess.run([loss_op, accuracy_op], feed_dict={x: batch_x, y: batch_y})\n total_acc += (acc * batch_x.shape[0])\n total_loss += (loss * batch_x.shape[0])\n return total_loss/num_examples, total_acc/num_examples" ]
[ "0.71811223", "0.7178588", "0.7118122", "0.62564474", "0.6133015", "0.5892592", "0.5807464", "0.5648055", "0.5615826", "0.5607056", "0.55071527", "0.54952985", "0.5480512", "0.5452857", "0.54165035", "0.54085374", "0.5403301", "0.5399699", "0.5389954", "0.5385437", "0.53759825", "0.5367189", "0.5319838", "0.53112316", "0.53108835", "0.5299113", "0.52670974", "0.52670974", "0.5256187", "0.5223315", "0.5216478", "0.5203548", "0.5200659", "0.51986414", "0.5183397", "0.514251", "0.51401275", "0.5124436", "0.5119179", "0.5112114", "0.51031375", "0.5081295", "0.50769883", "0.5074288", "0.5069736", "0.50678647", "0.5066806", "0.50655234", "0.50621474", "0.5061822", "0.5050357", "0.50427264", "0.5026052", "0.50180155", "0.50101984", "0.5007401", "0.50070065", "0.49877933", "0.49842486", "0.49611038", "0.49604434", "0.49492973", "0.49484202", "0.49418917", "0.4934474", "0.4933944", "0.49332225", "0.49175996", "0.49111134", "0.4902983", "0.49006227", "0.489936", "0.4892493", "0.48797113", "0.48781732", "0.48749447", "0.48707908", "0.48648342", "0.4864039", "0.48547885", "0.48544204", "0.48512828", "0.4848911", "0.48448712", "0.48440814", "0.48426124", "0.48394355", "0.4830945", "0.48300105", "0.4828041", "0.48278025", "0.48265034", "0.4824333", "0.4824333", "0.4824333", "0.4821715", "0.4818165", "0.4807153", "0.48005012", "0.4797276" ]
0.69981503
3
Create a postvalidator function that makes sure the value of this item is a key in the sibling dictionary 'sib_name'. Raises a ValueError if not. This generally assumes siblings[sib_name] is a required CategoryElement.
def is_sib_key(sib_name): def is_sib_key_val(siblings, value): if value not in siblings[sib_name].keys(): raise ValueError( "Must be a key of {}, but got {}" .format(sib_name, value)) return value return is_sib_key_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n for search_tag_name in self.get_search_tag_names():\n search_tag_obj = Tag(search_tag_name)\n for search_tag_value in self.get_search_tag_values(search_tag_name):\n for new_tag_name in self.get_new_tag_names(search_tag_name, search_tag_value):\n new_tag_obj = Tag(new_tag_name)\n new_tag_value = self.get_new_tag_value(search_tag_name, search_tag_value, new_tag_name)\n if new_tag_obj.repeatable:\n if not isinstance(new_tag_value, list):\n raise KeyError('%s needs a list'%(new_tag_name))\n else:\n if isinstance(new_tag_value, list):\n raise KeyError('%s needs a scalar value'%(new_tag_name))", "def check_items_slugs(cls, slugs, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for slug in slugs:\n try:\n item = i[1]['_class'].objects.get(slug=slug)\n raise ItemAttributeChoicesSlugsDuplicateItemInstanceSlug(cls, item)\n except ObjectDoesNotExist:\n pass", "def validate_children(self, source, **kwargs):\n # TODO cache this loaded data keyed on a hashed version of kwargs\n children = self._load_json(\"children\", source, **kwargs)\n self._validate_against_schema(\"children\", children)\n\n strand = getattr(self, \"children\", [])\n\n # Loop the children and accumulate values so we have an O(1) check\n children_keys = {}\n for child in children:\n children_keys[child[\"key\"]] = children_keys.get(child[\"key\"], 0) + 1\n\n # Check there is at least one child for each item described in the strand\n # TODO add max, min num specs to the strand schema and check here\n for item in strand:\n strand_key = item[\"key\"]\n if children_keys.get(strand_key, 0) <= 0:\n raise exceptions.InvalidValuesContents(f\"No children found matching the key {strand_key}\")\n\n # Loop the strand and add unique keys to dict so we have an O(1) check\n strand_keys = {}\n for item in strand:\n strand_keys[item[\"key\"]] = True\n\n # Check that each child has a key which is described in the strand\n for child in children:\n child_key = child[\"key\"]\n if not strand_keys.get(child_key, False):\n raise exceptions.InvalidValuesContents(\n f\"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine.\"\n )\n\n # TODO Additional validation that the children match what is set as required in the Twine\n return children", "def _validate(self, value, name):\n validated = self._validate_impl(value, name)\n return self._validate_post(value, name, validated)", "def check_categories_slugs(cls, slugs):\n CategoryModel = apps.get_model(settings.DJCAT_CATEGORY_MODEL)\n for node in CategoryModel.objects.all():\n if node.slug in slugs:\n raise ItemAttributeChoicesSlugsDuplicateWithcCategory(cls, node)", "def check_dict_alg(dic, validator, entry_list, messages, whole_validator, current_elem):\n for node in validator:\n new_list = dc(entry_list)\n node_value = validator[node]\n if node != 'isReference':\n if not ('isReference' in node_value and len(entry_list) == 0):\n if is_operator(node):\n handle_operator(\n node, dic, validator, new_list, messages, whole_validator, current_elem\n )\n elif is_leaf(node_value):\n new_list.append(node)\n check_leaf(node_value, dic, new_list, messages, current_elem)\n else:\n new_list.append(node)\n check_dict_alg(\n dic, node_value, new_list, messages, whole_validator, current_elem\n )", "def GetSubkeyByName(self, name):", "def grandparent_splitter(fn, valid_name=\"valid\", train_name=\"train\"):\n gp = fn.parent.parent.name\n if gp == valid_name:\n return True\n elif gp == train_name:\n return False\n return", "def hasSiblings():", "def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values", "def test_split_nested_class_from_key_lambda(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class$$Lambda$1')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertEqual(part2, '$Lambda$1')", "def validate(self, source_value):\n errors = defaultdict(list)\n\n for field in self.get_mapping().fields:\n value = get_attribute(source_value, field.name)\n try:\n field.is_valid(value)\n except ValidationError as e:\n errors[field.name].append(e.message)\n\n if errors:\n raise ValidationError(errors)\n else:\n return super(Nested, self).validate(source_value)", "def OnRenameAccept(self, item, value):\r\n\r\n le = TreeEvent(wxEVT_TREE_END_LABEL_EDIT, self.GetId())\r\n le._item = item\r\n le.SetEventObject(self)\r\n le._label = value\r\n le._editCancelled = False\r\n\r\n return not self.GetEventHandler().ProcessEvent(le) or le.IsAllowed()", "def validate(self, item):\n attempt, pkg_analyzer, journal_and_issue_data = item[:3]\n j_publisher_name = journal_and_issue_data.get('journal', {}).get('publisher_name', None)\n if j_publisher_name:\n data = pkg_analyzer.xml\n xml_publisher_name = data.findtext('.//journal-meta/publisher/publisher-name')\n\n if xml_publisher_name:\n if self._normalize_data(xml_publisher_name) == self._normalize_data(j_publisher_name):\n r = [models.Status.ok, 'Valid publisher name: ' + xml_publisher_name]\n else:\n r = [models.Status.error, 'Mismatched data: %s. Expected: %s' % (xml_publisher_name, j_publisher_name)]\n else:\n r = [models.Status.error, 'Missing data: publisher name']\n else:\n r = [models.Status.error, 'Missing data: publisher name, in scieloapi']\n return r", "def post_validated(self, struct, item, value):\n return value", "def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False", "def validateName(self, info):\n for name, childInfo in info.devices.iteritems():\n if name != childInfo.name:\n raise ConfigurationNameMismatchError(name, childInfo.name)\n self.validateName(childInfo)", "def _validate_post(self, value, name, result):\n return result", "def validate_insert(self, s, internal=True):\n super(FieldSet, self).validate_insert(s, internal) # mandatory check\n if s and s not in [d[0] for d in self.details]:\n valid = []\n for k,v in self.details:\n valid.append(\"%s=%s\" % (k, v))\n raise FilemanError(\"\"\"Value [%s] is not valid. must be one of: %s\"\"\" % (s, \", \".join(valid)))", "def _validate(self, instance, value):", "def _validate_impl(self, value, name):\n raise NotImplementedError()", "def check_attr_key(cls, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for a in i[1]['attrs'].items():\n if a[1]['key'] == cls.attr_key:\n raise ItemAttributeKeyDuplicate(a[1]['class'], cls, cls.attr_key)", "def __getitem__(self, item):\n if self.child_keys is None:\n self.child_keys = sorted(self.children.keys(), key=str.lower)\n return self.children[self.child_keys[item]]", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n for child in paramInput.subparts:\n if child.getName() == \"state\":\n outcome = child.parameterValues[\"outcome\"]\n value = child.value\n self.mapping[outcome] = value\n try:\n float(outcome)\n self.isFloat = True\n except:\n self.isFloat = False\n if outcome in self.values:\n self.raiseAnError(IOError,'Categorical distribution has identical outcomes')\n else:\n self.values.add(float(outcome) if self.isFloat else outcome)\n else:\n self.raiseAnError(IOError,'Invalid xml node for Categorical distribution; only \"state\" is allowed')\n self.initializeDistribution()\n self.upperBoundUsed = True\n self.lowerBoundUsed = True", "def handle_operator(node, dic, validator, entry_list, messages, whole_validator, current_elem):\n if node == '$reference':\n new_list = dc(entry_list)\n new_list.append(validator[node])\n check_dict_alg(\n dic, whole_validator[validator[node]], new_list, messages, whole_validator, current_elem\n )\n elif node == '$forElem':\n traversed_dic = traverse_dict(dic, entry_list)\n if traversed_dic is not None:\n for elem in traversed_dic:\n new_list = dc(entry_list)\n new_list.append(elem)\n check_dict_alg(\n dic, validator['$forElem'], new_list, messages, whole_validator, elem\n )\n else:\n add_message(messages, current_elem, \"Error in traversing dict!\")\n elif node.startswith('$selection__'):\n select_type = node.split('__')[1]\n select_dic = traverse_dict(dic, entry_list)\n if select_type in select_dic:\n select = select_dic[select_type]\n rest_validator = validator[node][select]\n check_dict_alg(dic, rest_validator, entry_list, messages, whole_validator, current_elem)\n else:\n add_message(\n messages, current_elem, \"Could not find \" + select_type + \" in \" + str(entry_list)\n )\n elif node.startswith('$exists__'):\n # TODO handle it somehow...\n pass", "def test_basic_singleton_key_error(self):\n\n schema = {\n ('root', str): {\n ('sample node', str, 'sample'): ('node', str, r'[a-z]*')\n }\n }\n data = {'root': {'not sample': 'node'}}\n\n ERRORS = lws.return_errors()\n expected_schema = {\n ('root', 'root'): [('sample node', ERRORS['key'])]\n }\n expected_data = {\n ('root', 'root'): [('not sample', ERRORS['key'])]\n }\n\n assert dict(lws.validate_schema(schema, data)) == expected_schema\n assert dict(lws.validate_data(schema, data)) == expected_data", "def _check_nested(self, key, self_val, nested):\n nested_val = getattr(nested, key)\n assert self_val == nested_val, \\\n \"selector['{}']='{}' in '{}' doesn't match header['{}']='{}' in nested file '{}'.\".format(\n key, self_val, self.filename, key, nested_val, nested.filename)", "def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))", "def process_item(self, item, spider):\n session = self.Session()\n product = Product()\n subcategory = Subcategory()\n category = Category()\n product.name = item[\"title\"]\n product.source = item[\"source\"]\n if 'rate' in item:\n product.rate = item[\"rate\"]\n if 'safety' in item:\n product.safety = item[\"safety\"]\n if 'quality' in item:\n product.quality = item[\"quality\"]\n subcategory.name = item[\"subcategory\"]\n category.name = item[\"category\"]\n\n # Check for product duplicate\n exist_product = session.query(Product).filter_by(name = product.name).first()\n if exist_product is not None:\n exist_product.rate = product.rate\n exist_product.safety = product.safety\n exist_product.quality = product.quality\n exist_product.source = product.source\n else:\n # Check for subcategory duplicate\n exist_subcategory = session.query(Subcategory).filter_by(name = subcategory.name).first()\n if exist_subcategory is not None:\n exist_subcategory.products.append(product)\n else:\n subcategory.products.append(product)\n # Check for category duplicate\n exist_category = session.query(Category).filter_by(name = category.name).first()\n if exist_category is not None:\n exist_category.subcategories.append(subcategory)\n else:\n category.subcategories.append(subcategory)\n \n try:\n session.add(product)\n except:\n session.rollback()\n raise\n\n try:\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item", "def compare(self, subnode) -> bool:\n\t\t# OK the node if it has a different name.\n\t\tif subnode.name != self.name:\n\t\t\treturn True\n\t\t# Alter self if incorrect type\n\t\tself._get_true_type(subnode.get_typestring())\n\t\t# Add filenames\n\t\tif subnode.name == \"File\":\n\t\t\tself.filenames.update(subnode.filenames)", "def clean_parent(self):\r\n data = self.cleaned_data['parent']\r\n if data == self.instance:\r\n raise forms.ValidationError(\r\n _('A category cannot be parent of itself.'))\r\n return data", "def validate(self, node):", "def process_key(self, key, value, fields, rule_processing_key, tag_index):\r\n if fields[key].type == 'function':\r\n if not fields[key].function(rule_processing_key, tag_index):\r\n rule_response = \"Field has Invalid Value:\\t\" + str(rule_processing_key[METADATA][tag_index][key])\r\n return False, rule_response\r\n\r\n elif fields[key].type == 'regex':\r\n fields[key].attributefound()\r\n if re.fullmatch(fields[key].regex, value):\r\n fields[key].attributevalid()\r\n self.required_fields_index[fields[key].position].increment_count()\r\n elif re.fullmatch(fields[key].regex, str(value).upper()):\r\n rule_processing_key[METADATA][tag_index][key] = str(value).upper()\r\n fields[key].attributevalid()\r\n self.required_fields_index[fields[key].position].increment_count()\r\n else:\r\n rule_response = \"Field has Invalid Value:\\t\" + str(rule_processing_key[METADATA][tag_index][key])\r\n fields[key].attributeinvalid()\r\n return False, rule_response\r\n\r\n return True, \"\"", "def exists(self, words):\n current_node = self.root\n\n for word in words:\n if word not in current_node.children:\n return self.notfoundhandler\n current_node = current_node.children[word]\n\n if current_node.handler == None:\n return self.notfoundhandler\n return current_node.handler", "def sibling(self, v):\n # method here", "def _contains_in_self_or_parent(self, name: str) -> bool:\n return name in self", "def validate_custom_name(self, name):\n if not re.match( r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$', name):\n raise ValueError('Invalid name for node (%s)' % name)\n return", "def _test_sibl_invalid_relations_should_fail(client, json_headers, invalids,\n status_code=400):\n api_endpoint_documents = \"invenio_app_ils_relations.docid_relations\"\n api_endpoint_series = \"invenio_app_ils_relations.serid_relations\"\n\n for invalid in invalids:\n first_pid = invalid[\"first_pid\"]\n first_pid_type = invalid[\"first_pid_type\"]\n second_pid = invalid[\"second_pid\"]\n second_pid_type = invalid[\"second_pid_type\"]\n relation_type = invalid[\"relation_type\"]\n\n api_endpoint = (\n api_endpoint_documents\n if first_pid_type == \"docid\"\n else api_endpoint_series\n )\n\n url = url_for(api_endpoint, pid_value=first_pid)\n payload = {\n \"pid\": second_pid,\n \"pid_type\": second_pid_type,\n \"relation_type\": relation_type,\n }\n\n res = client.post(url, headers=json_headers, data=json.dumps(payload))\n assert res.status_code == status_code\n if status_code == 400:\n error = json.loads(res.data.decode(\"utf-8\"))\n assert \"message\" in error\n assert first_pid in error[\"message\"]\n assert second_pid in error[\"message\"]", "def validate(prop, string, node, match, entry_start, entry_end):\n return True", "def validateNameValue(value):\n ret = libxml2mod.xmlValidateNameValue(value)\n return ret", "def validate(self, instance, value):", "def validate(self, instance, value):", "def check_cls_choices_slugs(cls, slugs):\n for s in slugs:\n if settings.DJCAT_ITEM_SLUG_DELIMITER in s:\n raise ItemAttributeChoicesSlugNotValid(cls)\n\n if not len(set(slugs)) == len(slugs):\n raise ItemAttributeChoicesSlugsDuplicate(cls)", "def find_sibling(self, hashv):\n address = self.nodes[hashv]['address']\n if address == '':\n return None, None\n addr_n = int(address, 2)\n if addr_n % 2 == 1:\n sibl_n = addr_n - 1\n left_sibl = True\n else:\n sibl_n = addr_n + 1\n left_sibl = False\n sibl_address = int_to_address(sibl_n, len(address))\n if sibl_address in self.addresses:\n return self.addresses[sibl_address], left_sibl\n else:\n return hashv, False", "def validate_sub(self, sub, *args, **kwargs):\n raise NotImplementedError('Subclasses must implement this method.')", "def isValid(self, s: str) -> bool:\n stack = list()\n for c in s:\n if c in Solution.corresponding_parenthesis:\n stack.append(Solution.corresponding_parenthesis[c])\n elif not stack or stack.pop() != c:\n return False\n return not stack", "def validate(self, name):\n return name in self.dict", "def keyDependsOnKey(self, k1Name, k2Name):\n if k1Name == k2Name: return 0\n k1 = self.infoKinds[k1Name]\n k2 = self.infoKinds[k2Name]\n if k1.superNames != k2.superNames:\n allSuperK1 = set()\n toDoK1 = list(k1.superNames)\n allSuperK2 = set()\n toDoK2 = list(k2.superNames)\n while (len(toDoK1) > 0 or len(toDoK2) > 0):\n if len(toDoK1) > 0:\n el1Name = toDoK1.pop()\n if k2Name == el1Name:\n return 1\n el1 = self.infoKinds[el1Name]\n if el1.kindStr in self and not el1.kindStr in allSuperK1:\n toDoK1.append(el1.kindStr)\n for subEl in el1.superNames:\n if not subEl in allSuperK1:\n toDoK1.append(subEl)\n allSuperK1.update(el1.superNames)\n if len(toDoK2) > 0:\n el2Name = toDoK2.pop()\n if k1Name == el2Name:\n return -1\n el2 = self.infoKinds[el2Name]\n if el2.kindStr in self and not el2.kindStr in allSuperK2:\n toDoK2.append(el2.kindStr)\n for subEl in el2.superNames:\n if not subEl in allSuperK2:\n toDoK2.append(subEl)\n allSuperK2.update(el2.superNames)\n return None", "def mt_score_CHILD(signame):\n return ((signame, score(DE, LINCS, signame)))", "def _validate(self):\n if not self._children:\n self._leaves = {self.name}\n return self._leaves, [self.name]\n\n self._leaves = set()\n class_names = [self.name]\n total = 0.0\n for asset_class, ratio in self._children:\n assert ratio >= 0.0 and ratio <= 1.0, (\n f'Bad ratio provided to Asset Class ({ratio})')\n total += ratio\n temp_leafs, temp_classes = asset_class._validate()\n self._leaves.update(temp_leafs)\n class_names += temp_classes\n\n # Check if all percentages add up to 100%\n assert abs(total - 1) < 1e-6, (\n f'Sum of sub-classes is not 100% (actual: {total * 100}%)')\n\n return self._leaves, class_names", "def test_tag_keys_dynamic_field_validation_failure(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"bad_tag\": \"*\"}\n serializer = OCIGroupBySerializer(data=query_params, tag_keys=tag_keys)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)", "def validate_subclassof_field(self, subclassof_value):\n subclassof_value = dict2list(subclassof_value)\n for record in subclassof_value:\n if record[\"@id\"] not in self.all_classes:\n raise KeyError('Value of subclassof : {} is not defined in the schema.'.format(record[\"@id\"]))", "def test_tag_keys_dynamic_field_validation_success(self):\n tag_keys = [\"valid_tag\"]\n query_params = {\"valid_tag\": \"*\"}\n serializer = OCIGroupBySerializer(data=query_params, tag_keys=tag_keys)\n self.assertTrue(serializer.is_valid())", "def getName(cls, itemValue):\n for name, value in cls.iterate():\n if itemValue == value:\n return name\n\n raise ValueError('Value {0} not found in {1}'.format(itemValue, cls.__name__))", "def handle_item(item):\n\n if item.metadata.name == \"jeffsbooks\":\n if 'deepsecurity-policy' in item.metadata.labels:\n logger.info(item.metadata)\n policy_name = item.metadata.labels['deepsecurity-policy']\n assign_cluster_policy(dsm, policy_name, item)\n else:\n raise Rejection(\"Label 'deepsecurity-policy' missing from {}:{}\".format(item.metadata.namespace, item.metadata.name), 'MissingPolicy')\n\n return item #We aren't changing any data, so simply return the item.", "def check_catalog_item_choices_slugs(cls, slugs, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for a in [a for a in i[1]['attrs'].items() if a[1]['type'] == 'choice']:\n choices = a[1].get('choices')\n if len(set(slugs) & set(choices)):\n raise ItemAttributeChoicesSlugsDuplicateInCatalogItem(cls, a[1].get('_class'))", "def traverse(self, s):\n current, parent, offset = self.root, None, 0\n for p, k in enumerate(s):\n if offset == 0:\n if k not in current.children:\n raise ConstructionError(\"Suffix {suffix} not found in the tree.\")\n parent = current\n current = current.children[k]\n offset += 1\n else:\n if k == self.get_char_at_offset(current, offset):\n offset += 1\n else:\n raise ConstructionError(\"Suffix {suffix} not found in tree.\")\n if offset == current.label.length:\n offset = 0\n\n return current, parent, offset", "def insert_suffix(self, prefix, idx):\n parent_pos = self.path_to_matching_prefix(prefix)[-1]\n\n has_inserted = False\n for child_pos in self.children(parent_pos):\n if child_pos.element()._label[0] == prefix[0]:\n # Intermediate node is added between parent and child.\n j = 0\n while j < len(child_pos.element()._label) and \\\n child_pos.element()._label[j] == prefix[j]:\n j += 1\n\n # Update tree structure\n intermediate_pos = self._add(parent_pos, self._SuffixNode(prefix[:j], -1))\n intermediate_node = self._validate(intermediate_pos)\n\n child_node = self._validate(child_pos)\n child_node._parent = intermediate_node\n intermediate_node._children[child_node] = child_node\n parent_node = self._validate(parent_pos)\n del parent_node._children[child_node]\n\n # Set label of child node to be unmatched part of child label.\n child_pos.element()._label = child_pos.element()._label[j:]\n # create new leaf node containing unmatched part of suffix.\n self._add(intermediate_pos, self._SuffixNode(prefix[j:], idx))\n # break from for loop.\n has_inserted = True\n break\n\n # New node is inserted as child of parent.\n if not has_inserted:\n self._add(parent_pos, self._SuffixNode(prefix, idx))", "def visit_tertiary_node(self, node, children):\n return {node.rule_name: children[0]}", "def __contains__(self, key):\n\n if type(key) != self.type:\n return False\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n return False\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n return False\n\n return True\n else:\n return others in self.children[first_char]", "def checkUniqueChild(RelationShipList, child):\r\n if not RelationShipList:\r\n return 1\r\n for i in RelationShipList:\r\n if i[0] == child:\r\n return 0\r\n return 1", "def _handle_key_for_individual(self, key, value, i_name, relation_concept_role_mappings):\n\n # get the role (also called property)\n property_object = self.name_mapping.get(key)\n if not property_object:\n # key_name was not found\n return None\n\n if isinstance(value, str):\n value_object = self.name_mapping.get(value)\n else:\n value_object = None\n accept_unquoted_strs = str in property_object.range\n\n if property_object in self.relation_concept_main_roles:\n if relation_concept_role_mappings is not None:\n # save the relevant information for later processing. value is still a unparsed\n relation_concept_role_mappings[property_object] = value\n return None\n elif isinstance(value, list):\n property_values = self.get_objects_from_sequence(value, accept_unquoted_strs)\n elif isinstance(value, str) and value_object:\n property_values = value_object\n elif isinstance(value, (float, int, str)):\n # todo: raise exception for unallowed unquoted strings here\n property_values = value\n else:\n msg = (\n f\"Invalid type ({type(value)}) for property '{key}' of individual '{i_name}'.\"\n f\"Expected int, float, str or list.\"\n )\n raise TypeError(msg)\n\n return {key: property_values}", "def _validateChildren(self) -> None:\n\t\t# Check whether we already are in validation the children (ie prevent unfortunate recursion by the Dispatcher)\n\t\tif self.__validating:\n\t\t\treturn\n\t\tself.__validating = True\n\n\t\t# Only get the CINs in raw format. Instantiate them as resources if needed\n\t\tcinsRaw = cast(List[JSON], sorted(CSE.storage.directChildResources(self.ri, T.CIN, raw = True), key = lambda x: x['ct']))\n\t\tcni = len(cinsRaw)\t\t\t\n\t\t\t\n\t\t# Check number of instances\n\t\tif (mni := self.mni) is not None:\n\t\t\twhile cni > mni and cni > 0:\n\t\t\t\t# Only instantiate the <cin> when needed here for deletion\n\t\t\t\tcin = Factory.resourceFromDict(cinsRaw[0]).resource\n\t\t\t\tL.isDebug and L.logDebug(f'cni > mni: Removing <cin>: {cin.ri}')\n\t\t\t\t# remove oldest\n\t\t\t\t# Deleting a child must not cause a notification for 'deleteDirectChild'.\n\t\t\t\t# Don't do a delete check means that CNT.childRemoved() is not called, where subscriptions for 'deleteDirectChild' is tested.\n\t\t\t\tCSE.dispatcher.deleteResource(cin, parentResource = self, doDeleteCheck = False)\n\t\t\t\tdel cinsRaw[0]\t# Remove from list\n\t\t\t\tcni -= 1\t# decrement cni when deleting a <cin>\n\n\t\t# Calculate cbs of remaining cins\n\t\tcbs = sum([ each['cs'] for each in cinsRaw])\n\n\t\t# check size\n\t\tif (mbs := self.mbs) is not None:\n\t\t\twhile cbs > mbs and cbs > 0:\n\t\t\t\t# Only instantiate the <cin> when needed here for deletion\n\t\t\t\tcin = Factory.resourceFromDict(cinsRaw[0]).resource\n\t\t\t\tL.isDebug and L.logDebug(f'cbs > mbs: Removing <cin>: {cin.ri}')\n\t\t\t\t# remove oldest\n\t\t\t\tcbs -= cin.cs\n\t\t\t\t# Deleting a child must not cause a notification for 'deleteDirectChild'.\n\t\t\t\t# Don't do a delete check means that CNT.childRemoved() is not called, where subscriptions for 'deleteDirectChild' is tested.\n\t\t\t\tCSE.dispatcher.deleteResource(cin, parentResource = self, doDeleteCheck = False)\n\t\t\t\tdel cinsRaw[0]\t# Remove from list\n\t\t\t\tcni -= 1\t# decrement cni when deleting a <cin>\n\n\t\t# Some attributes may have been updated, so store the resource \n\t\tself['cni'] = cni\n\t\tself['cbs'] = cbs\n\t\tself.dbUpdate()\n\t\n\t\t# End validating\n\t\tself.__validating = False", "def _testKeySubNsAdd(self):\n if len(self._getKeyList()) == 0 and len(self._getSubNsList()) == 0:\n parent = self.parent()\n if parent:\n parent._newChild(self.path[-1])", "def validate(self, attrs):\n tag_name = attrs['tag_name']\n club = attrs['club']\n request = self.context['request']\n profile = UserProfile.objects.get(user=request.user)\n if (club not in profile.get_club_privileges() and\n club not in profile.get_workshop_privileges().values_list('club', flat=True)):\n raise serializers.ValidationError(\"You are not allowed to create tag for this club\")\n if Tag.objects.filter(tag_name=tag_name, club=club):\n raise serializers.ValidationError(\"The tag already exists for this club\")\n return attrs", "def _conversion_checks(item, keys, box_config, check_only=False, pre_check=False):\n if box_config['box_duplicates'] != 'ignore':\n if pre_check:\n keys = list(keys) + [item]\n key_list = [(k, _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix'])) for k in keys]\n if len(key_list) > len(set(x[1] for x in key_list)):\n seen = set()\n dups = set()\n for x in key_list:\n if x[1] in seen:\n dups.add('{0}({1})'.format(x[0], x[1]))\n seen.add(x[1])\n if box_config['box_duplicates'].startswith('warn'):\n warnings.warn('Duplicate conversion attributes exist: {0}'.format(dups))\n else:\n raise BoxError('Duplicate conversion attributes exist: {0}'.format(dups))\n if check_only:\n return\n for k in keys:\n if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix']):\n return k", "def pre_validated(self, struct, item, value):\n return value", "def validate(self, name, values):\r\n \r\n pass", "def validate_sub_block_attributes(self, value, prop_name):\n cbi = self.cbi\n if cbi is None:\n return value\n if len(value) != cbi[-1]:\n raise properties.ValidationError(\n \"{} attributes must have length equal to \"\n \"total number of sub blocks\".format(prop_name),\n prop=prop_name,\n instance=self,\n reason=\"invalid\",\n )\n return value", "def test_getSiblingExists(self):\n d = self.contentStore1.getSiblingObject(self.testObject.objectId)\n def _cb(o):\n self.o = o\n d.addCallback(_cb)\n self.assertIdentical(self.o, self.testObject)", "def post_process_cif_category(cif, category_name):\n if not cif[category_name]: # nothing in the category => should be removed\n cif.pop(category_name)\n return\n\n for k, v in cif[category_name].items():\n if isinstance(v, list):\n if len(v) == 1:\n cif[category_name][k] = v[0]\n\n if not v:\n cif.pop(category_name)\n return", "def __call__(self, **kwargs):\n lst = [s for s in self.parent if s.name_ == self.name_ and (\n all([k in s and s[k][0] == v for k, v in kwargs.items()]) or\n (all([k[0] == 'v' for k in kwargs]) and all([s[int(k[1:])] == v for k, v in kwargs.items()]))\n )]\n assert len(lst) == 1\n return lst[0]", "def validate(self):\r\n # Check KeyError\r\n try:\r\n self.fields[\"product_name_fr\"]\r\n self.fields[\"generic_name\"]\r\n self.fields[\"url\"]\r\n self.fields[\"nutrition_grade_fr\"]\r\n self.fields[\"categories\"]\r\n self.fields[\"stores\"]\r\n self.fields[\"brands\"]\r\n except KeyError:\r\n return False\r\n\r\n # Check empty field and lenght of generic_name\r\n for key, value in self.fields.items():\r\n if value == '':\r\n return False\r\n break\r\n if key == \"generic_name\":\r\n if len(value) > 255:\r\n return False\r\n\r\n try:\r\n self.categories = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"categories\"], 100)\r\n self.stores = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"stores\"], 45)\r\n self.brands = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"brands\"], 45)\r\n self.category_index = self.categories.index(self.category)\r\n except KeyError:\r\n return False\r\n except ValueError:\r\n return False\r\n except AttributeError:\r\n self.errors += 1\r\n print(self.errors)\r\n return False", "def sibling(self, segment):\n return self.__class__(self._url.sibling(_encode_reserved(segment)))", "def __delitem__(self, x):\r\n x = str(x) # convert int to string\r\n if (len(x) > 1) and all([c in 'LR123456789' for c in x]): # binary string of the form LLLRLR or 1213 (or mixed)\r\n del self._namedkid[x[0]][x[1:]]\r\n elif x in self._namedkid:\r\n child = self._namedkid[x]\r\n # Delete from name dictionary\r\n for name, kid in self._namedkid.items():\r\n if kid is child: del self._namedkid[name]\r\n # Delete from list of children\r\n for i, kid in enumerate(self.children):\r\n if kid is child: del self.children[i]\r\n else:\r\n raise AttributeError, \"The subtree \" + x + \" does not exist\"", "def _validate_tags(\n instance: typing.Dict[str, typing.Any],\n schema: typing.Dict[str, typing.Any], path: typing.List[str],\n strict: bool = False\n) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'tags'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'tags':\n raise ValidationError('expected _type \"tags\"', path)\n if not isinstance(instance['tags'], list):\n raise ValidationError('tags must be list', path)\n errors = []\n tags = []\n for index, item in enumerate(instance['tags']):\n if not isinstance(item, str):\n errors.append(ValidationError('invalid tag type: {}'.format(type(item)), path + ['tags', str(index)]))\n elif item in tags:\n errors.append(ValidationError('duplicate tag: {}'.format(item), path + ['tags', str(index)]))\n elif item.lower() != item:\n errors.append(ValidationError('tag not lowercase: {}'.format(item), path + ['tags', str(index)]))\n elif any(c not in 'abcdefghijklmnopqrstuvwxyz0123456789_-รครถรผรŸ' for c in item):\n errors.append(ValidationError('tag contains invalid character: {}'.format(item), path + ['tags', str(index)]))\n elif strict and all(c in string.digits for c in item) and not flask.current_app.config['ENABLE_NUMERIC_TAGS']:\n errors.append(ValidationError('numeric tags are not supported', path + ['tags', str(index)]))\n else:\n tags.append(item)\n\n if len(errors) == 1:\n raise errors[0]\n elif len(errors) > 1:\n raise ValidationMultiError(errors)", "def validate_unique_domain(self, cleaned_data):\n if self.instance:\n if self._meta.model.objects.exclude(id=self.instance.id).\\\n filter(user=self.request.user, domain=cleaned_data.get('domain')).exists():\n self._errors[\"domain\"] = self.error_class([ugettext('Shop with this name already exists')])\n del cleaned_data[\"domain\"]", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def subNode(self, name):\n for nd in self.kids:\n if nd.name == name:\n return nd\n raise LookupError( 'name not found \"' + name + '\"' )", "def test_translate_struct_dict_nonunique_key(self):\n root = netapp_api.NaElement('root')\n child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]\n root.translate_struct(child)\n self.assertEqual(len(root.get_children()), 3)\n children = root.get_children()\n for c in children:\n if c.get_name() == 'e1':\n self.assertIn(c.get_content(), ['v1', 'v3'])\n else:\n self.assertEqual(c.get_content(), 'v2')", "def _validate_entry(\n self,\n the_id: str,\n the_name: str,\n the_parent: str) -> Tuple[str, str, str]:\n\n # validate inputs\n if not (isinstance(the_id, str) and isinstance(the_name, str) and isinstance(the_parent, str)):\n raise TypeError(\n 'the_id, the_name, and the_parent must all be string type, got '\n 'types {}, {}, {}'.format(type(the_id), type(the_name), type(the_parent)))\n the_id = the_id.strip()\n the_name = the_name.strip()\n the_parent = the_parent.strip()\n\n # verify that values are permitted and sensible\n if the_id == '':\n raise ValueError('the_id value `` is reserved.')\n if the_name == '':\n raise ValueError('the_name value `` is not permitted.')\n if the_id == the_parent:\n raise ValueError('the_id cannot be the same as the_parent.')\n\n # try to determine parent from name if not a valid id\n if the_parent != '' and the_parent not in self.labels:\n prospective_parent = self.get_id_from_name(the_parent)\n if prospective_parent is None:\n raise ValueError('the_parent {} matches neither an existing id or name.'.format(the_parent))\n the_parent = prospective_parent\n\n return the_id, the_name, the_parent", "def validateNamesValue(value):\n ret = libxml2mod.xmlValidateNamesValue(value)\n return ret", "def _validate_value(\n cls,\n attribute: models.Attribute,\n value_data: dict,\n is_swatch_attr: bool,\n ):\n value = value_data.get(\"name\")\n if value is None:\n raise ValidationError(\n {\n cls.ATTRIBUTE_VALUES_FIELD: ValidationError(\n \"The name field is required.\",\n code=AttributeErrorCode.REQUIRED.value,\n )\n }\n )\n\n if is_swatch_attr:\n cls.validate_swatch_attr_value(value_data)\n else:\n cls.validate_non_swatch_attr_value(value_data)\n\n slug_value = value\n value_data[\"slug\"] = slugify(unidecode(slug_value))\n\n attribute_value = models.AttributeValue(**value_data, attribute=attribute)\n try:\n attribute_value.full_clean()\n except ValidationError as validation_errors:\n for field, err in validation_errors.error_dict.items():\n if field == \"attribute\":\n continue\n errors = []\n for error in err:\n error.code = AttributeErrorCode.INVALID.value\n errors.append(error)\n raise ValidationError({cls.ATTRIBUTE_VALUES_FIELD: errors})", "def check_name_duplication(self, other):\n self_names = set(\n [node.get(\"name\") for node in self.root.findall(\"./*[@name]\")])\n other_names = set(\n [node.get(\"name\") for node in other.root.findall(\"./*[@name]\")])\n if len(set.intersection(self_names, other_names)):\n raise NameDuplicationError()", "def test_translate_struct_dict_nonunique_key(self):\n root = netapp_api.NaElement('root')\n child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]\n root.translate_struct(child)\n self.assertEqual(3, len(root.get_children()))\n children = root.get_children()\n for c in children:\n if c.get_name() == 'e1':\n self.assertIn(c.get_content(), ['v1', 'v3'])\n else:\n self.assertEqual('v2', c.get_content())", "def _validate_link_name(klass, name):\n split_name = name.split(\"__\")\n if len(split_name) > 1:\n relationship_name = split_name[0]\n if relationship_name not in klass.__relationships_fields_set__:\n raise ValueError(f\"'{relationship_name}' is not a valid relationship for {klass.__name__}.\")", "def _test_entry_to_criterion_match(self, entry, criterion):\r\n parent = self._get_parent_node(entry, criterion[\"key\"])\r\n parent_type = type(parent)\r\n if parent_type == list:\r\n unicriterions = { k.decode('utf8', \"replace\"): v.decode('utf8', \"replace\") for k, v in criterion[\"value\"].items() }\r\n for item in parent:\r\n if set(unicriterions.items()) <= set({ k: v for k, v in item.items() }.items()):\r\n return True\r\n return False\r\n if parent_type == str or int:\r\n return str(parent) == str(criterion[\"value\"])", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def superbubble_detection(self, sorted_node):\n\n source = sorted_node[0]\n path = []\n bubble = {}\n\n # transverse topologically sorted node\n for i in sorted_node: \n \n count_prefix = 0\n # check if it is the exit of bubble\n for node in self.prefix[i]:\n if node in path:\n count_prefix += 1\n\n # if it the potential exit of the bubble\n if count_prefix == 2:\n # if it the last node\n if len(self.suffix[i]) == 0:\n bubble[source] = i\n break\n # if it is the real exit\n for temp in self.suffix[i]:\n if len(self.prefix[temp]) == 1:\n # add source and target to dictionary \n bubble[source] = i\n source = i\n path.clear()\n break\n\n for j in self.suffix[i]:\n if len(self.suffix[i]) == 1 and len(path) == 0:\n source = j\n\n if j != source:\n path.append(j)\n\n return bubble", "def test_add_category_slug_special(self):\n self.add_success(self.test_data['polo-shirts'])\n assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))\n\n self.add_fail(self.test_data['symbols'], '')\n\n self.add_success('Add')", "def validate_category(self, data):\n try:\n if data['category_name'] == \"\":\n return \"Category_name cannot be blank\"\n if 'category_name' not in data.keys():\n return \"Enter category_name\"\n if not re.match(r\"^[a-zA-Z0-9 _]*$\", data['category_name']):\n return \"category name should contain alphanumerics only\"\n if len(data.keys()) > 1:\n return \"Invalid fields added\"\n else:\n return \"category_valid\"\n except KeyError:\n return \"Add required keys\"", "def _is_valid_for(self, object, name, value):\n if self.is_valid_for(value):\n return value\n\n self.error(object, name, value)", "def _is_valid_for(self, object, name, value):\n if self.is_valid_for(value):\n return value\n\n self.error(object, name, value)", "def validate_present(self, obj):\n for k, v in obj.items():\n func = self.validation.get(k)\n if func:\n func(k, v)", "def _checkSigns(self, node):\n\n # Check whether I am the correct node\n try:\n if node.name == MAGICVAR:\n # If i am, return the \"direct hit\" code.\n return 1\n except AttributeError:\n pass\n\n # We keep an index to see what child we are checking. This \n # is important for binary operations, were we are only interested\n # in the second part. (\"a-10\" has to change to \"a+10\", \n # but \"10-a\" shouldn't change to \"+10-a\")\n index = 0\n\n # Recursively check my children\n for child in ast.iter_child_nodes( node ):\n retVal = self._checkSigns( child )\n # Direct hit. The child I just searched contains the magicvar.\n # Check whether this node is one of the special cases.\n if retVal == 1:\n # Unary substitution.\n if isinstance(node, UnarySub):\n self.negative = True\n self.unary = True\n # Binary substitution. Only the second child is of importance.\n elif isinstance(node, Sub) and index == 1:\n self.negative = True\n # Binary addition. Only the second child is of importance.\n elif isinstance(node, Add) and index == 1:\n self.add = True\n # Return the \"bail out\" code, whether we found some\n # special case or not. There can only be one magicvar in the\n # code, so once that is found we can stop looking.\n return 0\n # If the child returns a bail out code, we leave this routine\n # without checking the other children, passing along the\n # bail out code.\n elif retVal == 0:\n return 0 # Nothing more needs to be done.\n\n # Next child.\n index += 1\n\n # We searched all children, but couldn't find any magicvars. \n return -1", "def validate(self, data):\n if HashTag.objects.filter(name=data[\"hash_tag\"][\"name\"]).first() is None:\n raise serializers.ValidationError(\n {\"hash_tag\": data[\"hash_tag\"][\"name\"] + \" \" + _(\"HashTag does not exist.\")}\n )\n\n return data", "def series_statement_added_entry_corporate_name(self, key, value):\n indicator_map1 = {\n \"0\": \"Inverted name\",\n \"1\": \"Jurisdiction name\",\n \"2\": \"Name in direct order\"}\n indicator_map2 = {\n \"0\": \"Main entry not represented by pronoun\",\n \"1\": \"Main entry represented by pronoun\"}\n field_map = {\n 'p': 'name_of_part_section_of_a_work',\n '6': 'linkage',\n 'u': 'affiliation',\n 'b': 'subordinate_unit',\n '4': 'relator_code',\n 'x': 'international_standard_serial_number',\n 'n': 'number_of_part_section_meeting',\n 'a': 'corporate_name_or_jurisdiction_name_as_entry_element',\n '8': 'field_link_and_sequence_number',\n 'k': 'form_subheading',\n 't': 'title_of_a_work',\n 'e': 'relator_term',\n 'l': 'language_of_a_work',\n 'c': 'location_of_meeting',\n 'g': 'miscellaneous_information',\n 'f': 'date_of_a_work',\n 'd': 'date_of_meeting_or_treaty_signing',\n 'v': 'volume_sequential_designation',\n }\n\n order = utils.map_order(field_map, value)\n\n if key[3] in indicator_map1:\n order.append('type_of_corporate_name_entry_element')\n\n if key[4] in indicator_map2:\n order.append('pronoun_represents_main_entry')\n\n return {\n '__order__': tuple(order) if len(order) else None,\n 'name_of_part_section_of_a_work': utils.force_list(\n value.get('p')\n ),\n 'linkage': value.get('6'),\n 'affiliation': value.get('u'),\n 'subordinate_unit': utils.force_list(\n value.get('b')\n ),\n 'relator_code': utils.force_list(\n value.get('4')\n ),\n 'international_standard_serial_number': value.get('x'),\n 'number_of_part_section_meeting': utils.force_list(\n value.get('n')\n ),\n 'corporate_name_or_jurisdiction_name_as_entry_element': value.get('a'),\n 'field_link_and_sequence_number': utils.force_list(\n value.get('8')\n ),\n 'form_subheading': utils.force_list(\n value.get('k')\n ),\n 'title_of_a_work': value.get('t'),\n 'relator_term': utils.force_list(\n value.get('e')\n ),\n 'language_of_a_work': value.get('l'),\n 'location_of_meeting': value.get('c'),\n 'miscellaneous_information': value.get('g'),\n 'date_of_a_work': value.get('f'),\n 'date_of_meeting_or_treaty_signing': utils.force_list(\n value.get('d')\n ),\n 'volume_sequential_designation': value.get('v'),\n 'type_of_corporate_name_entry_element': indicator_map1.get(key[3]),\n 'pronoun_represents_main_entry': indicator_map2.get(key[4]),\n }", "def create_validation_function(name_of_slot):\n def validate_slot(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n \"\"\"Validate user input.\"\"\"\n\n if value.lower() in self.answers_db()[name_of_slot]:\n # validation succeeded, set the value of the slot to \n # user-provided value\n return {name_of_slot: value}\n else:\n # find the closest answer by some measure (edit distance?)\n choices = self.answers_db()[name_of_slot]\n answer = process.extractOne(value.lower(), choices)\n\n # check to see if distnace is greater than some threshold\n if answer[1] < 45:\n # if so, set slot to \"other\"\n return {name_of_slot: \"other\"}\n else:\n return {name_of_slot: answer[0]}\n \n return(validate_slot)", "def _uniqueness_check(self, cls, unique_in = None, **attr):\n # under the same datasource, only 1 subsystem, 1 neuropil, 1 tract of the name can exist\n # under the same neuropil, only 1 neuron of the name can exist\n # multiple (collections of) synapses can exist between two neurons\n if cls == 'Species':\n tmp = self.sql_query(\n \"\"\"select from Species where (name = \"{name}\" or \"{name}\" in synonyms) and stage = \"{stage}\" and sex = \"{sex}\" \"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Species {name} at {stage} stage ({sex}) already exists with rid = {rid}\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Species {name} (as its synonym) at {stage} stage ({sex}) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = obj._id, formalname = obj.name))\n elif cls == 'DataSource':\n objs = self.find_objs('DataSource', name=attr['name'], version=attr['version'])\n #if self.exists(cls, name = attr['name'], version = attr['version']):\n if len(objs):\n raise NodeAlreadyExistError(\"\"\"{} Node with attributes {} already exists with rid = {}\"\"\".format(\n cls, ', '.join([\"\"\"{} = {}\"\"\".format(key, value) \\\n for key, value in attr.items()]), objs[0]._id))\n elif cls == 'Neurotransmitter':\n tmp = self.sql_query(\n \"\"\"select from Neurotransmitter where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n name = attr['name']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Neurotransmitter {name} already exists with rid = {rid}\"\"\".format(\n name = attr['name'], rid = objs[0]._id))\n return objs\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Neurotransmitter {name} (as its synonym) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], rid = obj._id, formalname = obj.name))\n elif cls in ['Subsystem', 'Neuropil', 'Subregion', 'Tract']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'], formalname = obj.name,\n ds = unique_in.name,\n version = unique_in.version, rid = obj._id))\n # Alternatively, try:\n # tmp = self.sql_query(\n # \"\"\"select from {cls} where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n # cls = cls, name = attr['name']))\n # ds = tmp.owned_by(cls = 'DataSource').has(rid = datasource)\n # if len(ds):\n # tmp1 = tmp.has(name = attr['name'])\n # if len(tmp1.owned_by(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = datasource.name,\n # version = datasource.version))\n # else:\n # all_synonym_objs = (tmp - tmp1).node_objs\n # for obj in objs:\n # if len(QueryWrapper.from_rids(obj._id).has(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = datasource.name,\n # version = datasource.version))\n\n # Alternatively 2, try: (will be slow when it has a lot of Owns edges)\n # tmp = sql_query(\n # \"\"\"\n # select from (select expand(out('Owns')[@class = \"{cls}\"]) from {rid}) where name = \"{name}\" or \"{name}\" in synonyms\n # \"\"\"\n # )\n # elif cls in ['Subregion']:\n # if not isinstance(unique_in, models.Neuropil):\n # raise TypeError('To check the uniqueness of a {} instance, unique_in must be a Neuropil object'.format(cls))\n # tmp = self.sql_query(\n # \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='ucls' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n # rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n # if len(tmp):\n # objs = tmp.node_objs\n # if attr['name'] in [obj.name for obj in objs]:\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = unique_in.name))\n # else:\n # for obj in objs:\n # if name in obj.synonyms:\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = unique_in.name))\n elif cls in ['Neuron', 'NeuronFragment']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where uname = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists with rid = {rid}, under DataSource {ds} version {version}\"\"\".format(\n cls = cls, name = attr['name'], rid = objs[0]._id,\n ds = unique_in.name,\n version = unique_in.version))\n elif cls == 'Circuit':\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n elif cls == 'ArborizationData':\n if not isinstance(unique_in, (models.Neuron, models.Synapse)):\n raise TypeError('To check the uniqueness of a ArborizationData instance, unique_in must be a Neuron or a Synapse object')\n tmp = self.sql_query(\n \"\"\"select from (select expand(out(HasData)) from {rid}) where @class = 'ArborizationData' \"\"\".format(rid = unique_in._id))\n if len(tmp):\n raise NodeAlreadyExistError(\"\"\"ArborizationData already exists for {node} {uname} with rid = {rid}. Use NeuroArch.update_{node}_arborization to update the record\"\"\".format(\n node = unique_in.element_type.lower(), rid = tmp.node_objs[0]._id, uname = unique_in.uname))\n else:\n raise TypeError('Model type not understood.')\n return True", "def minidom_namednodemap_has_key(self, key): \n if type(key) is types.TupleType:\n return self._attrsNS.has_key(key)\n else:\n return self._attrs.has_key(key)" ]
[ "0.45828247", "0.4572428", "0.45380762", "0.44724888", "0.42696497", "0.42584473", "0.42416134", "0.41420826", "0.41245428", "0.41224957", "0.4097658", "0.4080648", "0.40750405", "0.40694186", "0.40613383", "0.4061312", "0.40393326", "0.40243196", "0.3988627", "0.3969611", "0.39599988", "0.3955689", "0.39445496", "0.39434463", "0.39084497", "0.3879124", "0.3877231", "0.38762397", "0.3867846", "0.38664564", "0.3866444", "0.38636956", "0.385796", "0.38324332", "0.38262537", "0.38123137", "0.380791", "0.37841558", "0.3778691", "0.37780994", "0.3773963", "0.3773963", "0.37711117", "0.3765519", "0.37638947", "0.37613028", "0.37607405", "0.3757254", "0.3753696", "0.37533727", "0.3742191", "0.37386817", "0.37331378", "0.37329784", "0.37205487", "0.37197983", "0.3715381", "0.37152204", "0.3709749", "0.3707622", "0.37051287", "0.36986706", "0.36981675", "0.36979324", "0.36932805", "0.3692403", "0.369152", "0.36883092", "0.36866465", "0.36820823", "0.36785123", "0.3675143", "0.3671919", "0.36702493", "0.3667211", "0.36661735", "0.36587957", "0.3657813", "0.3650015", "0.3646825", "0.36451542", "0.36451465", "0.36419445", "0.36345893", "0.36281735", "0.36275214", "0.36246714", "0.36237478", "0.3623455", "0.36227998", "0.362077", "0.3613928", "0.3613928", "0.3599225", "0.35920364", "0.35853738", "0.3582225", "0.3580652", "0.3576123", "0.35699975" ]
0.62593156
0
89 / 89 test cases passed.
def maxTurbulenceSize(self, arr: List[int]) -> int: if len(arr) == 1: return 1 ret = 1 tmp_ret = 0 last_flag = None for i in range(1, len(arr)): if arr[i] == arr[i - 1]: current_flag = None else: current_flag = arr[i] > arr[i - 1] if current_flag is None: ret = max(ret, tmp_ret) tmp_ret = 1 elif last_flag is None or last_flag == current_flag: ret = max(ret, tmp_ret) tmp_ret = 2 else: tmp_ret += 1 last_flag = current_flag return max(ret, tmp_ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_T01():", "def test_T4():", "def test_T4():", "def exercise_b2_106():\r\n pass", "def exercise_b2_113():\r\n pass", "def test_basic(self):\n self.assertEqual(solution(\"\"\"11111\n19991\n19191\n19991\n11111\"\"\"), 6)\n self.assertEqual(solution(\"\"\"5483143223\n2745854711\n5264556173\n6141336146\n6357385478\n4167524645\n2176841721\n6882881134\n4846848554\n5283751526\"\"\"), 195)", "def exercise_b2_53():\r\n pass", "def exercise_b2_107():\r\n pass", "def test_4_4_1_1(self):\n pass", "def exercise_b2_70():\r\n pass", "def test_T3():", "def test_T3():", "def exercise_b2_56():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_93():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_98():\r\n pass", "def test_4():", "def test_T0():", "def exercise_b2_39():\r\n pass", "def exercise_b2_95():\r\n pass", "def test_task107_main_logic(number, expected_value):\r\n assert algo.Task107.main_logic(number) == expected_value", "def test_task108_main_logic(number, expected_value):\r\n assert algo.Task108.main_logic(number) == expected_value", "def test_task87_main_logic(number, quantity, expected_value):\r\n assert algo.Task87.main_logic(number, quantity) == expected_value", "def test_2():\n results = base_tests()\n correct = {\n \"Consequence\": \"synonymous_variant\",\n \"Codons\": \"tgC/tgT\",\n \"Amino_acids\": \"C\",\n \"Gene\": \"ENSG00000130164\",\n \"SYMBOL\": \"LDLR\",\n \"Feature\": \"ENST00000558013\",\n \"EXON\": \"2/18\",\n \"PolyPhen\": \"\",\n \"SIFT\": \"\",\n \"Protein_position\": \"27/858\",\n 'BIOTYPE\"': \"protein_coding\",\n }\n assert results[0] == correct", "def exercise_b2_82():\r\n pass", "def test_task243b_main_logic(number, expected_value):\r\n assert algo.Task243b.main_logic(number) == expected_value", "def exercise_b2_26():\r\n pass", "def test_apply_endorsements(self):", "def test_task331b_main_logic(number, expected_value):\r\n assert algo.Task331b.main_logic(number) == expected_value", "def test_task243a_main_logic(number, expected_value):\r\n assert algo.Task243a.main_logic(number) == expected_value", "def test_task88a_main_logic(number, expected_value):\r\n assert algo.Task88a.main_logic(number) == expected_value", "def test_task331a_main_logic(number, expected_value):\r\n assert algo.Task331a.main_logic(number) == expected_value", "def test_5():", "def test_3():", "def test_anglicize1to19():\n print('Testing anglicize1to19')\n\n result = funcs.anglicize1to19(1)\n introcs.assert_equals(\"one\", result)\n\n result = funcs.anglicize1to19(2)\n introcs.assert_equals(\"two\", result)\n\n result = funcs.anglicize1to19(3)\n introcs.assert_equals(\"three\", result)\n\n result = funcs.anglicize1to19(4)\n introcs.assert_equals(\"four\", result)\n\n result = funcs.anglicize1to19(5)\n introcs.assert_equals(\"five\", result)\n\n result = funcs.anglicize1to19(6)\n introcs.assert_equals(\"six\", result)\n\n result = funcs.anglicize1to19(7)\n introcs.assert_equals(\"seven\", result)\n\n result = funcs.anglicize1to19(8)\n introcs.assert_equals(\"eight\", result)\n\n result = funcs.anglicize1to19(9)\n introcs.assert_equals(\"nine\", result)\n\n result = funcs.anglicize1to19(10)\n introcs.assert_equals(\"ten\", result)\n\n result = funcs.anglicize1to19(11)\n introcs.assert_equals(\"eleven\", result)\n\n result = funcs.anglicize1to19(12)\n introcs.assert_equals(\"twelve\", result)\n\n result = funcs.anglicize1to19(13)\n introcs.assert_equals(\"thirteen\", result)\n\n result = funcs.anglicize1to19(14)\n introcs.assert_equals(\"fourteen\", result)\n\n result = funcs.anglicize1to19(15)\n introcs.assert_equals(\"fifteen\", result)\n\n result = funcs.anglicize1to19(16)\n introcs.assert_equals(\"sixteen\", result)\n\n result = funcs.anglicize1to19(17)\n introcs.assert_equals(\"seventeen\", result)\n\n result = funcs.anglicize1to19(18)\n introcs.assert_equals(\"eighteen\", result)\n\n result = funcs.anglicize1to19(19)\n introcs.assert_equals(\"nineteen\", result)", "def test_task86a_main_logic(number, expected_value):\r\n assert algo.Task86a.main_logic(number) == expected_value", "def test_T2():", "def test_T2():", "def test_task559_main_logic(number, expected_value):\r\n assert algo.Task559.main_logic(number) == expected_value", "def exercise_b2_86():\r\n pass", "def test_get_digits():\n assert(get_digits(333) != (0, 0, 0))\n assert(get_digits(333) == (3, 3, 3))\n assert(get_digits(100) == (1, 0, 0))\n assert(get_digits(571) == (5, 7, 1))\n assert(get_digits(0) == (0, 0, 0))\n assert(get_digits(999) == (9, 9, 9))\n print(\"All unit tests passed\")", "def test_update9(self):\n pass", "def exercise_b2_43():\r\n pass", "def test_task88b_main_logic(number, expected_value):\r\n assert algo.Task88b.main_logic(number) == expected_value", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def testBeliefs1sk(self):", "def test_task88c(input_value, expected_value):\r\n assert algo.Task88c.main_logic(input_value) == expected_value", "def test_task178e(input_value, expected_value):\r\n assert algo.Task178e.main_logic(input_value) == expected_value", "def test_sanity(self):\n self.assertEquals(2 + 2, 4)", "def test_compare(self):", "def test_task88d(input_value, expected_value):\r\n assert algo.Task88d.main_logic(input_value) == expected_value", "def test_task226_main_logic(number1, number2, expected_value):\r\n assert algo.Task226.main_logic(number1, number2) == expected_value", "def exercise_b2_27():\r\n pass", "def test_open_fill(self):", "def test_task330_main_logic(number, expected_value):\r\n assert list(algo.Task330.main_logic(number)) == expected_value", "def test_decode_barcode_8_ok(self):\r\n self.assertEqual(decode_barcode_8(self.valid_bc_1),\r\n (self.valid_bc_1, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_2),\r\n (self.valid_bc_2, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_3),\r\n (self.valid_bc_3, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_4),\r\n (self.valid_bc_4, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_5),\r\n (self.valid_bc_5, 0))", "def test_more_values(self):\n self.assertEqual(poker(100 * [self.fh]), 100 * [self.fh])", "def test_get_game_diff(self):\n pass", "def test_task554_main_logic(number, expected_value):\r\n assert algo.Task554.main_logic(number + 1) == expected_value", "def test_task86b_main_logic(number, expected_value):\r\n assert algo.Task86b.main_logic(number) == expected_value", "def test_02_visit_again(self):", "def test_change_provisioned_throughput_usual_case():", "def test_T1():", "def test_T1():", "def test_anglicize20to99():\n print('Testing anglicize20to99')\n\n result = funcs.anglicize20to99(35)\n introcs.assert_equals(\"thirty five\", result)\n\n result = funcs.anglicize20to99(50)\n introcs.assert_equals(\"fifty\", result)\n\n result = funcs.anglicize20to99(99)\n introcs.assert_equals(\"ninety nine\", result)", "def test_task178b_main_logic(sequence, expected_value):\r\n assert algo.Task178b.main_logic(sequence) == expected_value", "def test_basic_execution(self):", "def test_if_array_is_good(self):\n testing_param = random.randint(1, 100)\n self.assertEqual(self.exercice.main(testing_param),\n list(range(2, testing_param+1, 2)))", "def test_task178d(input_value, expected_value):\r\n assert algo.Task178d.main_logic(input_value) == expected_value", "def test_encode():", "def test_01_lighting(self):", "def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)", "def test_example_day9_pt2():\n assert find_pt2(ex_data, 127) == 62", "def test_num_buses_5(self):\n actual = a1.num_buses(1001)\n expected = 21\n self.assertEqual(actual,expected)", "def test_match_left_none():\r\n runmatch(lcode_left_none)", "def test_task178c_main_logic(sequence, expected_value):\r\n assert algo.Task178c.main_logic(sequence) == expected_value", "def test_examples():\n assert nz_bank_validate(*'01-902-0068389-00'.split('-'))\n assert nz_bank_validate(*'08-6523-1954512-001'.split('-'))\n assert nz_bank_validate(*'26-2600-0320871-032'.split('-'))", "def test_calc_waiting_1():\n print '\\nTesting calc_waiting_1'\n expected = 14\n actual = sim.calc_waiting(26, 32, 20)\n if expected == actual:\n print 'calc_waiting(26, 32, 20) test passed.'\n else:\n print 'calc_waiting(26, 32, 20) test failed.'\n print 'expected: ', expected, ' actual: ', actual\n print ''", "def test_passed():\n pass", "def test_check_barcode(self):\r\n self.assertEqual(check_barcode('AA', None, ['AA']), (False, 'AA',\r\n False))\r\n self.assertEqual(check_barcode('GCATCGTCCACA', 'golay_12',\r\n ['GCATCGTCAACA']), (2, 'GCATCGTCAACA', True))\r\n # num errors for golay code is currently in bits\r\n self.assertEqual(check_barcode('GGTT', 4, ['TTTT']), (2, 'TTTT', True))", "def test_part1_example1(example1):\n assert aoc.part1(example1) == 2 + 2 + 654 + 33583", "def test_problem1b():\n print()\n print('--------------------------------------------------')\n print('Testing the problem1b function:')\n print('--------------------------------------------------')\n\n ####################################################################\n # THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.\n # You may add more tests if you want,\n # but you are not required to do so.\n ####################################################################\n\n # Test 1:\n expected = True\n answer = problem1b(17, 2)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 2:\n expected = False\n answer = problem1b(18, 2)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 3:\n expected = True\n answer = problem1b(85, 3)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 4:\n expected = True\n answer = problem1b(89, 3)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 5:\n expected = False\n answer = problem1b(90, 3)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 6:\n expected = False\n answer = problem1b(449, 4)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 7:\n expected = True\n answer = problem1b(450, 4)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 8:\n expected = True\n answer = problem1b(457, 4)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 9:\n expected = False\n answer = problem1b(458, 4)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 10:\n expected = False\n answer = problem1b(569, 5)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 11:\n expected = True\n answer = problem1b(570, 5)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 12:\n expected = True\n answer = problem1b(571, 5)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 13:\n expected = False\n answer = problem1b(572, 5)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 14:\n expected = True\n answer = problem1b(15610, 6)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 15:\n expected = False\n answer = problem1b(15600, 6)\n print()\n print('Test 15 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 16:\n expected = False\n answer = problem1b(10000, 6)\n print()\n print('Test 16 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 17:\n expected = True\n answer = problem1b(5861, 6)\n print()\n print('Test 17 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 18:\n expected = False\n answer = problem1b(5862, 6)\n print()\n print('Test 18 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')", "def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)", "def test_case_01(self):\n if True:\n self.fail()", "def test_task555(input_value, expected_value):\r\n assert list(algo.Task555.main_logic(input_value)) == expected_value", "def test_get_list8(self):\n pass", "def test_hackerrank_sample2(self):\n result = find_digits(1012)\n self.assertEquals(result, 3)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_03_visit_special(self):", "def test_calc_waiting():\n print '\\nTesting calc_waiting'\n expected = 142\n actual = sim.calc_waiting(106, 35, 71)\n if expected == actual:\n print 'calc_waiting(106, 35, 71) test passed.'\n else:\n print 'calc_waiting(106, 35, 71) test failed.'\n print 'expected: ', expected, ' actual: ', actual\n print ''", "def test_quick_answer(self):\n pass", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def problem_298():\n pass", "def test3(self):\n cases = (\n (2**10*'a',),\n (2**10*'abcd',),\n #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium.\n )\n\n for i in range(len(cases)):\n res = self.compare(cases[i][0])\n if res is not None:\n d1, d2 = res\n message = cases[i][0]\n self.print_diff(message, d1, d2)\n assert res is None", "def test_num_buses_4(self):\n actual = a1.num_buses(71)\n expected = 2\n self.assertEqual(actual,expected)", "def test_uparforvarg(self):", "def test_active_inference_SPM_1b(self):", "def test_golay600_codes(self):\r\n for bc in golay600:\r\n corr, num_errs = golay.decode(bc)\r\n self.assertEqual(corr, bc)\r\n self.assertEqual(num_errs, 0)" ]
[ "0.6906367", "0.6610178", "0.6610178", "0.65972733", "0.6592675", "0.65818673", "0.6549294", "0.6506812", "0.64938533", "0.64341944", "0.6431647", "0.6431647", "0.6419195", "0.6403939", "0.6397478", "0.6384199", "0.63813823", "0.6361559", "0.6293006", "0.62877226", "0.625898", "0.62479085", "0.61775064", "0.6167871", "0.616109", "0.6158607", "0.6128992", "0.6112889", "0.6098218", "0.60980594", "0.60936797", "0.6078805", "0.60656446", "0.6058948", "0.6053124", "0.60426474", "0.60393107", "0.6028942", "0.6028942", "0.60210353", "0.60198873", "0.6014646", "0.6012042", "0.60066444", "0.5992745", "0.5990048", "0.5981694", "0.59771246", "0.5969459", "0.5965444", "0.59634274", "0.59563226", "0.59432983", "0.5942362", "0.5941129", "0.5931594", "0.5921285", "0.59187424", "0.5910295", "0.5905344", "0.5900954", "0.59007263", "0.58972824", "0.5895201", "0.5895201", "0.58935046", "0.5892783", "0.5888732", "0.5880208", "0.5877335", "0.5861459", "0.5860668", "0.58581793", "0.58560073", "0.585521", "0.5853683", "0.5849589", "0.5847466", "0.5844844", "0.58414227", "0.58373386", "0.5832972", "0.58315307", "0.58304346", "0.58212984", "0.581168", "0.5806215", "0.5805412", "0.5804456", "0.5804456", "0.58029044", "0.58007663", "0.58006376", "0.57948864", "0.57948864", "0.57884425", "0.5787467", "0.57863253", "0.57847947", "0.5781872", "0.57817066" ]
0.0
-1
Try find newer version for test. if next version has already been failed, stop further upgrades.
def increase_version(self): # type: (...) -> bool if self.type == RequirementType.FINAL_LATEST_VERSION: return False # get the latest version that may work if self.type == RequirementType.NOT_LATEST_VERSION: # get version between the current and last version_to_test = self.package.get_middle_version( self.version, self.error_version) if not version_to_test: self.type = RequirementType.FINAL_LATEST_VERSION return False else: version_to_test = self.package.last_version if version_to_test == self.version: # The latest version is already installed return False self.previous_version = self.version self.version = version_to_test return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_to_latest_but_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def test_upgrade_to_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\"upgrade\", *self.LOCAL, self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def test_can_downgrade(self):\n self.change_status(self.version_1_2_0, amo.STATUS_PENDING)\n for v in Version.objects.filter(pk__gte=self.version_1_2_1):\n v.delete()\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n\n assert version == self.version_1_1_3", "def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_higher_version_always_preferred(self):\n try:\n self.prepare()\n self.assertEquals((1, 2, 4), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def test_sequential_version_numbers(tmp_path):\n db_path = tmp_path / Path('v19_log.db')\n upgrade_manager = UpgradeManager(db_filename=db_path)\n\n old_db_filename = tmp_path / Path('v16_log.db')\n storage = None\n\n upgrade_functions = [Mock(), Mock(), Mock()]\n\n upgrade_functions[0].return_value = 17\n upgrade_functions[1].return_value = 18\n upgrade_functions[2].return_value = 19\n\n with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16):\n storage = setup_storage(old_db_filename)\n storage.update_version()\n\n with ExitStack() as stack:\n stack.enter_context(patch(\n 'raiden.utils.upgrades.UPGRADES_LIST',\n new=upgrade_functions,\n ))\n stack.enter_context(patch(\n 'raiden.utils.upgrades.RAIDEN_DB_VERSION',\n new=19,\n ))\n older_db_file = stack.enter_context(patch('raiden.utils.upgrades.older_db_file'))\n older_db_file.return_value = str(old_db_filename)\n\n upgrade_manager.run()\n\n upgrade_functions[0].assert_called_once_with(ANY, 16, 19)\n upgrade_functions[1].assert_called_once_with(ANY, 17, 19)\n upgrade_functions[2].assert_called_once_with(ANY, 18, 19)\n\n assert get_db_version(str(db_path)) == 19", "def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def test_upgrade(self):\n with cd(self.latest_agent_name):\n latest_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # upgrade again to check it workd with upgraded version\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # compare both configuration files, except the agent name and the author\n upgraded_agent_dir = Path(self.agent_name)\n latest_agent_dir = Path(self.latest_agent_name)\n lines_upgraded_agent_config = (\n (upgraded_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n lines_latest_agent_config = (\n (latest_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n # the slice is because we don't compare the agent name and the author name\n assert lines_upgraded_agent_config[2:] == lines_latest_agent_config[2:]\n\n # compare vendor folders.\n assert are_dirs_equal(\n upgraded_agent_dir / \"vendor\", latest_agent_dir / \"vendor\"\n )", "def test_case03(self):\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)", "def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass", "def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True", "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def test_patch(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[2] = int(new_version_parts[2]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def test_upgrade_apply_from_previous(setup, platform, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def _is_method_version_upgrade(current_version, new_version):\n return version.parse(current_version) < version.parse(new_version)", "def test_installed_beta_no_newer_stable(self):\n self.change_version(self.version_1_2_2, '1.2beta')\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "async def test_warn_upgrade_old_install(config: Config, time: Time):\n with open(config.get(Setting.DATA_CACHE_FILE_PATH), \"w\") as f:\n data = {\n \"upgrades\": [\n {\n \"prev_version\": str(Version.default()),\n \"new_version\": \"0.108.1\",\n \"date\": time.now().isoformat()\n }\n ]\n }\n json.dump(data, f)\n cache = DataCache(config, time)\n assert cache.notifyForIgnoreUpgrades\n assert not cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS)", "def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)", "def test_finder_detects_latest_already_satisfied_find_links(data: TestData) -> None:\n req = install_req_from_line(\"simple\")\n # the latest simple in local pkgs is 3.0\n latest_version = \"3.0\"\n satisfied_by = Mock(\n location=\"/path\",\n version=parse_version(latest_version),\n )\n req.satisfied_by = satisfied_by\n finder = make_test_finder(find_links=[data.find_links])\n\n with pytest.raises(BestVersionAlreadyInstalled):\n finder.find_requirement(req, True)", "def checkNewVersionAvailable(self):\n try:\n current = open('docs/VERSION', 'r').read()\n \"\"\"\n Fix bug#13\n \"\"\"\n available = urllib2.urlopen('https://sourceforge.net/p/pytbull/code/ci/master/tree/docs/VERSION?format=raw', timeout=self.timeout).read()\n if current!=available:\n return available.split('\\n')[0]\n else:\n return 0\n except Exception, err:\n print \"***ERROR in checkNewVersionAvailable: %s\" % err\n print \"If you use a proxy, check your configuration.\"\n sys.exit()", "def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return", "async def test_warn_upgrade_new_install(config: Config, time: Time):\n cache = DataCache(config, time)\n assert not cache.notifyForIgnoreUpgrades\n assert cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS)", "def test_upgrade_required_mock(self):\n with patch(\n \"aea.cli.upgrade.ItemUpgrader.check_upgrade_is_required\",\n return_value=\"100.0.0\",\n ):\n result = self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n catch_exceptions=False,\n )\n assert result.exit_code == 0", "async def check_new_version(now):\n result = await get_newest_version(hass, huuid, include_components)\n\n if result is None:\n return\n\n newest, releasenotes, android, apt = result\n\n # Load data from supervisor on hass.io\n if hass.components.hassio.is_hassio():\n newest = hass.components.hassio.get_homeassistant_version()\n\n # Validate version\n if StrictVersion(newest) > StrictVersion(current_version):\n _LOGGER.info(\"The latest available version is %s\", newest)\n info = 'Dostฤ™pna jest nowa wersja ' + newest + '. ' + releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Aktualizacja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": True,\n \"reinstall_android_app\": android,\n \"apt\": apt\n }\n )\n # add all entities to keep the order\n # hass.async_add_job(\n # hass.services.async_call(\n # 'group',\n # 'set', {\n # \"object_id\": \"dom_system_version\",\n # \"entities\": [\n # \"sensor.version_info\",\n # \"script.ais_update_system\",\n # \"camera.remote_access\",\n # \"input_boolean.ais_remote_access\",\n # \"sensor.ais_secure_android_id_dom\",\n # \"script.ais_scan_network_devices\",\n # \"script.ais_restart_system\",\n # \"script.ais_stop_system\"]}))\n\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Zainstaluj aktualizacjฤ™',\n \"icon\": \"mdi:download\"\n }\n )\n\n else:\n info = 'Twรณj system jest aktualny, wersja ' + newest + '. '\n info += releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Wersja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": False,\n \"reinstall_android_app\": False,\n \"apt\": apt\n }\n )\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Sprawdลบ dostฤ™pnoล›ฤ‡ aktualizacji',\n \"icon\": \"mdi:refresh\"\n }\n )\n _LOGGER.info(\n \"You are on the latest version (%s) of Assystent domowy\", newest)", "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def test_ifVersionIsCorrect():\n \n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"version\" in testConfig.config:\n print \"Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfVersionIsExact, testConfig.config\n \n if \"minimum_version\" in testConfig.config:\n print \"Minimum Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfMinimumVersionIsMet, testConfig.config", "def test_check_no_download(self):\n output = self.run_command(\"selfupdate --check\", exitcode=0)\n contains_latest_version = (\"Already at latest version\" in output)\n contains_new_version = (\"New version available\" in output)\n assert (contains_latest_version or contains_new_version)\n self.assertNotIn(\"Url: \", output)\n self.assertNotIn(\"Update completed.\", output)\n self.assertNotIn(\"Failed to update. Please try again.\", output)", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def test_version_check_does_not_exist(self):\n output = self.run_command(\"selfupdate --check selfupdate_test_does_not_exist\", exitcode=0)\n self.assertIn(\"Target: ywangd:selfupdate_test_does_not_exist\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertIn(\"Error: \", output)", "def check(self):\n current = self._get_current()\n # There is no version, so don't attempt to upgrade\n if current[-1]:\n return False\n\n highest = self._get_highest_version()\n return highest > current", "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "def test_python_version(container, python_next_version=\"3.10\"):\n LOGGER.info(f\"Checking that python version is lower than {python_next_version}\")\n c = container.run(\n tty=True,\n command=[\"start.sh\"],\n )\n cmd = c.exec_run(\"python --version\")\n output = cmd.output.decode(\"utf-8\")\n assert \"ERROR\" not in output\n assert \"WARNING\" not in output\n actual_python_version = version.parse(output.split()[1])\n assert actual_python_version < version.parse(\n python_next_version\n ), f\"Python version shall be lower than {python_next_version}\"", "def _check_for_updated_ballet() -> Optional[str]:\n latest = _get_latest_ballet_version_string()\n current = ballet.__version__\n parse = packaging.version.parse\n if latest and parse(latest) > parse(current):\n return latest\n else:\n return None", "def download_updates_if_available(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n latest_version = self.get_latest_version()\n\n print('Checking version... ')\n print('\\tCurrent version: ', current_version)\n print('\\tLatest version: ', latest_version)\n\n if not latest_version:\n return False\n\n if (not current_version) or (latest_version > current_version):\n print('Updating...')\n if not self.path_exists(self._module):\n os.mkdir(self._module)\n\n # Check if there's a botched download already. If next directory already exists remove it and tree.\n if self.path_exists(self.get_module_and_path('next')):\n self.rmtree(self.get_module_and_path('next')) # Remove the 'next' directory and contents.\n\n # Create the next directory and download the source files.\n os.mkdir(self.get_module_and_path('next'))\n self.download_all_files(self._github_repo + '/contents/' + self._main_dir, latest_version)\n\n # Last step is to write the .version file only if we have completed the download\n with open(self.get_module_and_path('next/.version'), 'w') as versionfile:\n versionfile.write(latest_version)\n versionfile.close()\n\n return True\n return False", "def test_above_24_latest_version(self):\n self.data['version'] = ''\n self.data['appVersion'] = '28.0'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()", "def test_new_version_no_op(mocker, state, slack, clusters):\n state.exists.return_value = True\n state.get.return_value = upgrade_version # same version, already notified\n ouw.notify_cluster_new_version(clusters, state=state, slack=slack)\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0", "def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"", "def upgrade_myro(url=None, version=None):\n if url == None:\n url = \"http://www.betterbots.com/upgrade/\"\n if version != None:\n version = version.split(\".\")\n install_count = 0\n if not url.startswith(\"http://\"):\n print \"Looking for Myro upgrades in file\", url, \"...\"\n install_count += import_file(url) # which is a filename\n else: \n print \"Looking for Myro upgrades at\", url, \"...\"\n myro_ver = myro_version.split(\".\")\n # go to site, check for latest greater than our version\n infp = urllib.urlopen(url)\n contents = infp.read()\n lines = contents.split(\"\\n\")\n infp.close()\n for filename in lines:\n filename = filename.strip()\n if filename != \"\" and filename[0] != '#':\n print \"Considering\", filename, \"...\"\n if filename.startswith(\"myro-upgrade-\"):\n end = filename.index(\".zip\")\n patch_ver = filename[13:end].split(\".\")\n if (version != None): # get specific version\n if map(int, patch_ver) == map(int, version):\n print \" Downloading...\"\n install_count += import_url(url + filename)\n elif map(int, patch_ver) > map(int, myro_ver):\n # download it\n print \" Downloading...\"\n install_count += import_url(url + filename)\n if install_count > 0:\n print \"Done upgrading! Please exit and restart Python and Myro\"\n else:\n print \"Nothing to upgrade in Myro; it's up-to-date.\"\n return install_count", "def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_higher_version_preferred_even_when_tag_is_on_top_of_the_tree(self):\n try:\n self.prepare(tag_latest_version=True)\n self.assertEquals((1, 3, 0), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)", "def test_version_time_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version time bad_version \"%s\"'\n % self._test_date)\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")", "def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def test_finder_detects_latest_already_satisfied_pypi_links() -> None:\n req = install_req_from_line(\"initools\")\n # the latest initools on PyPI is 0.3.1\n latest_version = \"0.3.1\"\n satisfied_by = Mock(\n location=\"/path\",\n version=parse_version(latest_version),\n )\n req.satisfied_by = satisfied_by\n finder = make_test_finder(index_urls=[\"http://pypi.org/simple/\"])\n\n with pytest.raises(BestVersionAlreadyInstalled):\n finder.find_requirement(req, True)", "def _next_version(self, dirpath: str) -> int:\n try:\n version_re = re.compile(r\"version_(\\d+)\")\n\n def is_valid_version(v: str):\n return version_re.search(v) is not None\n\n versions = tuple(filter(is_valid_version, os.listdir(dirpath)))\n if not versions:\n # No versions yet\n return 0\n current_version = natsorted(versions, reverse=True)[0]\n # Get the version number using the version pattern\n current_version = int(version_re.search(current_version).group(1))\n return current_version + 1\n except Exception as e:\n logger.warning(f\"Starting from version 0 because of error: {e}\")\n return 0", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def test_existing_over_wheel_priority(self, data: TestData) -> None:\n req = install_req_from_line(\"priority\")\n latest_version = \"1.0\"\n satisfied_by = Mock(\n location=\"/path\",\n version=parse_version(latest_version),\n )\n req.satisfied_by = satisfied_by\n finder = make_test_finder(find_links=[data.find_links])\n\n with pytest.raises(BestVersionAlreadyInstalled):\n finder.find_requirement(req, True)", "def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return", "def check_conflict(ctx, config):\n log.info('Checking for old test directory...')\n testdir = misc.get_testdir(ctx)\n processes = ctx.cluster.run(\n args=[\n 'test', '!', '-e', testdir,\n ],\n wait=False,\n )\n failed = False\n for proc in processes:\n try:\n proc.wait()\n except run.CommandFailedError:\n log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)\n failed = True\n if failed:\n raise RuntimeError('Stale jobs detected, aborting.')", "def check_updates(self):\n try:\n if not common.latest_version(version):\n self.update_notify()\n except:\n self.neterror()", "def check_version():\r\n\r\n session.forget()\r\n session._unlock(response)\r\n\r\n new_version, version_number = check_new_version(request.env.web2py_version,\r\n WEB2PY_VERSION_URL)\r\n\r\n if new_version == -1:\r\n return A(T('Unable to check for upgrades'), _href=WEB2PY_URL)\r\n elif new_version != True:\r\n return A(T('web2py is up to date'), _href=WEB2PY_URL)\r\n elif platform.system().lower() in ('windows','win32','win64') and os.path.exists(\"web2py.exe\"):\r\n return SPAN('You should upgrade to version %s' % version_number)\r\n else:\r\n return sp_button(URL('upgrade_web2py'), T('upgrade now')) \\\r\n + XML(' <strong class=\"upgrade_version\">%s</strong>' % version_number)", "def test_version_add_error_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version add 1.0 \"%s\"' % self._test_date)\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_version_remove_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version remove bad_version')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def upgrade_if_clean(dburl):\n alembic_cfg = alembic_config(dburl)\n engine = create_engine(dburl)\n script_ = script.ScriptDirectory.from_config(alembic_cfg)\n if not table_exists('results_schema_versions', engine):\n logger.info(\"No results_schema_versions table exists, which means that this installation \"\n \"is fresh. Upgrading db.\")\n upgrade_db(dburl=dburl)\n return\n with engine.begin() as conn:\n current_revision = conn.execute(\n 'select version_num from results_schema_versions limit 1'\n ).scalar()\n logger.debug(\"Database's triage_metadata schema version is %s\", current_revision)\n triage_head = script_.get_current_head()\n logger.debug(\"Code's triage_metadata schema version is %s\", triage_head)\n database_is_ahead = not any(\n migration.revision == current_revision\n for migration in script_.walk_revisions()\n )\n if database_is_ahead:\n raise ValueError(\n f\"Your database's results schema version, {current_revision}, is not a known \"\n \"revision to this version of Triage. Usually, this happens if you use a branch \"\n \"with a new results schema version and upgrade the database to that version. \"\n \"To use this version of Triage, you will likely need to check out that branch \"\n f\"and downgrade to {triage_head}\",\n )\n elif current_revision != triage_head:\n raise ValueError(\n f\"Your database's results schema revision, {current_revision}, is out of date \"\n \"for this version of Triage. However, your database can be upgraded to this \"\n \"revision. If you would like to upgrade your database from the console, and \"\n \"you've installed Triage, you may execute `triage db upgrade`. \"\n \"If the `triage` command is unavailable, (because you are running Triage directly \"\n \" from a repository checkout), then `manage alembic upgrade head`. \"\n \"The database changes may take a long time on a heavily populated database. \"\n \"Otherwise, you can also downgrade your Triage version to match your database.\"\n )", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)", "def test_get_version(self):\n pass", "def test_release_update_available_CURRENT(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n self.assertRaises(U.RequiredComponentError, self.u.release_update_available, errorsto='exception')", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def test_compare_local_version_is_older(self):\n\n given = \"2.34.0.dev (Hello, World)\"\n expected = True\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)", "def upgrade_test_mixed(self):\n self.upgrade_scenario(mixed_version=True)", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def test_no_version(self):\r\n errstring = \"unknown version\"\r\n with self.assertRaisesRegexp(ValueError, errstring):\r\n convert_between_versions(self.no_version, self.result_dir)", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def test_version(self):\n pass", "def _update_version_watch(self, new_version, _):\n if new_version is None:\n self._stopped = True\n return False\n\n persistent_update_version = retry_data_watch_coroutine(\n self.version_node, self.update_version\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_version, new_version)", "def test_upgrade_to_non_registered(self):\n with pytest.raises(\n ClickException,\n match=r\".* with id .* is not registered. Please use the `add` command. Aborting...\",\n ):\n self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n \"nonexits/dummy:0.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_init_unsupported_version(self, monkeypatch, runway_config, runway_context):\n monkeypatch.setattr(MODULE + \".__version__\", \"1.3\")\n with pytest.raises(SystemExit) as excinfo:\n assert not Runway(runway_config, runway_context)\n assert excinfo.value.code == 1", "def given_version_is_newer(original_version, new_version):\n orig_version_parts = [int(x) for x in original_version.replace('-', '.').split('.')]\n new_version_parts = [int(x) for x in new_version.replace('-', '.').split('.')]\n\n return new_version_parts > orig_version_parts", "def test_new_version_notify(mocker, state, slack, clusters):\n state.exists.return_value = True\n state.get.return_value = old_version # different version\n ouw.notify_cluster_new_version(clusters, state=state, slack=slack)\n assert slack.chat_post_message.call_count == 1\n assert state.add.call_count == 1", "def test_changeVersions(self):\n self._testVersionChanging(8, 2, 3)", "def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_package_can_not_upgraded_cause_required(self):\n with self.with_config_update():\n with patch(\n \"aea.cli.upgrade.ItemRemoveHelper.check_remove\",\n return_value=(\n set([PackageId(\"connection\", PublicId(\"test\", \"test\", \"0.0.1\"))]),\n set(),\n dict(),\n ),\n ), pytest.raises(\n ClickException,\n match=r\"Can not upgrade .* because it is required by '.*'\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_compatibility_of_version_of_installed_rpm_packages(self):\n incompatible_packages = get_incompatible_packages()\n error_msg = linesep + 'List of incompatible packages: '\n for package in incompatible_packages:\n error_msg += linesep + package\n self.assertFalse(incompatible_packages, error_msg)", "def fail_check(version, num):\n f1 = open(\"replace/outputs/t\" + str(num), 'r')\n f2 = open(\"replace/outputs/v\" + str(version) + \"/t\" + str(num), 'r')\n ret = f1.readlines() != f2.readlines()\n f1.close()\n f2.close()\n return ret", "def have_python_version(name, cache={}):\n if name not in cache:\n cache[name] = os.system(name + ' -c \"import test.test_support\"') == 0\n return cache[name]", "def test_live_migration_common_check_service_different_version(self):\n dest = 'dummydest'\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n\n # compute service for destination\n s_ref = self._create_compute_service(host=i_ref['host'])\n # compute service for original host\n s_ref2 = self._create_compute_service(host=dest,\n hypervisor_version=12002)\n\n # mocks\n driver = self.scheduler.driver\n self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')\n driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)\n\n self.mox.ReplayAll()\n self.assertRaises(exception.DestinationHypervisorTooOld,\n self.scheduler.driver._live_migration_common_check,\n self.context, i_ref, dest, False)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def test_version_rename_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version rename 1.0 9.9')\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_compare_local_version_is_newer(self):\n\n given = \"1.15.0.dev (Hello, World)\"\n expected = False\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)", "def downgrade():\n pass", "def downgrade():\n pass", "def test_present_in_both_db(self):\n for i in range(5):\n price = find_cheapest_price(\"Star Wars: Episode VI - Return of the Jedi\")\n if price is \"69.5\":\n break\n time.sleep(1)\n self.assertTrue(price == \"69.5\")", "def test_16_24_newest_hotfix(self):\n self.data['version'] = '20130826.01'\n self.data['appVersion'] = '16.0.2'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "def checkIfMinimumVersionIsMet(testConfig):\n assert \"name\" in testConfig\n assert \"binary\" in testConfig\n assert \"minimum_version\" in testConfig\n \n #Set default version command as \"testConfig[\"name\"] --version\"\n #Otherwise, use testConfig[\"version_command\"]\n if \"version_command\" in testConfig:\n versionCommand = testConfig[\"version_command\"]\n else:\n versionCommand = testConfig[\"binary\"]+r\" --version\"\n \n #Run the version command, grab stdout and stderr\n p = subprocess.Popen(versionCommand.split(), stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n versionOut,versionError = p.communicate()\n versionOut = str(versionOut)+str(versionError)\n \n #Find all instances of something that could be the version number in the output\n installedVersion = re.findall(r\"([0-9]+\\.)+[0-9]+\", versionOut)\n \n #Go through all the matches, if anything starts with our expected version,\n #Set test as pass\n testPass=False\n for version in installedVersion:\n if LooseVersion(str(version)) >= LooseVersion(testConfig[\"minimum_version\"]):\n testPass=True\n break\n \n assert testPass,\"\\nVersion output was :\"+versionOut+\\\n \"\\nExpected minimum version: \"+testConfig[\"minimum_version\"]+\\\n \"\\n Test failed.\"", "def check_first_write(plugin, data_version):\n backend = plugin.backend\n\n logging.info(\"Comparing backup version {} versus first write version {}\".format(\n backend.version, data_version\n ))\n\n if backend.version == data_version - 1:\n logging.info(\"Versions match up\")\n return True\n\n elif backend.prev_version == data_version - 1 and plugin.backend.rewind():\n logging.info(\"Last changes not applied, rewinding non-committed transaction\")\n return True\n\n elif backend.prev_version > data_version - 1:\n kill(\"Core-Lightning seems to have lost some state (failed restore?). Emergency shutdown.\")\n\n else:\n kill(\"Backup is out of date, we cannot continue safely. Emergency shutdown.\")", "def wait_for_upgrade(self, timeout=60):\n ret = None\n try:\n ret = self.upgradeprocess.wait(timeout=timeout)\n except psutil.TimeoutExpired as timeout_ex:\n msg = \"StarterManager: Upgrade command [%s] didn't finish in time: %d\" % (\n str(self.basedir),\n timeout,\n )\n raise TimeoutError(msg) from timeout_ex\n logging.info(\n \"StarterManager: Upgrade command [%s] exited: %s\",\n str(self.basedir),\n str(ret),\n )\n if ret != 0:\n raise Exception(\"Upgrade process exited with non-zero reply\")", "def _check_version(version):\n # Update cache if needed.\n if _check_version._versions_cache is None:\n log.debug(\"Loading versions cache ...\")\n _check_version._versions_cache = __salt__[\"netbeans.list_versions\"]()\n\n # Convert latest.\n if version is None or version == \"latest\":\n return __salt__[\"netbeans.pick_latest_version\"](\n _check_version._versions_cache\n )\n\n # Check if version is available.\n if version not in _check_version._versions_cache:\n return None\n return version", "def test_nothing_to_upgrade(self, mock_click_echo):\n agent_config = self.load_agent_config(self.agent_name)\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Checking if there is a newer remote version of agent package '{agent_config.public_id}'...\"\n )\n mock_click_echo.assert_any_call(\n \"Package not found, continuing with normal upgrade.\"\n )\n mock_click_echo.assert_any_call(\"Everything is already up to date!\")" ]
[ "0.7359393", "0.7041746", "0.68207544", "0.6809215", "0.6583295", "0.6510883", "0.64602184", "0.6447396", "0.64244986", "0.6342098", "0.63395315", "0.63182235", "0.63094175", "0.63028187", "0.63012606", "0.6289545", "0.62604177", "0.6240322", "0.61509264", "0.61496556", "0.61277044", "0.61260206", "0.61152864", "0.6104232", "0.6092955", "0.6092449", "0.607802", "0.60421145", "0.60185343", "0.6007684", "0.60049254", "0.59885", "0.5976896", "0.5952415", "0.59447294", "0.5944203", "0.5943514", "0.5918846", "0.5880632", "0.5877131", "0.58701885", "0.5858867", "0.58515936", "0.58514917", "0.584417", "0.58399594", "0.5836475", "0.5793118", "0.5791331", "0.5784909", "0.57783335", "0.575956", "0.57518435", "0.57495433", "0.57415855", "0.57390684", "0.57275283", "0.5727056", "0.5715648", "0.57154596", "0.5708868", "0.5707093", "0.5704887", "0.5697222", "0.569626", "0.56783724", "0.5676324", "0.56681097", "0.5661378", "0.56586736", "0.56486034", "0.5648226", "0.5647849", "0.56367093", "0.56221026", "0.56219673", "0.5613755", "0.5603212", "0.5588393", "0.5584133", "0.5578369", "0.55761874", "0.5572836", "0.55622065", "0.55612105", "0.5548001", "0.5547366", "0.553536", "0.55340075", "0.5530751", "0.55270094", "0.55253714", "0.55253714", "0.55243313", "0.5512053", "0.55088675", "0.5508722", "0.5503629", "0.5496778", "0.54964536" ]
0.6330563
11
get requirements file line.
def get_line(self): # type: () -> str line = "{}=={}".format(self.name, self.version) if self.type != RequirementType.LATEST_VERSION: line += ' # ' + TEMPLATES[self.type] if self.type == RequirementType.NOT_LATEST_VERSION: line = line.replace(r'(\S*)', self.error_version) return line + '\n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None", "def find_requirements():\n with open(\"requirements.txt\", 'r') as f:\n return f.read().splitlines()", "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def GetLine(line):\r\n pass", "def parse_requirements(requirements_file='requirements.txt'):\n lines = []\n with open(requirements_file) as reqs:\n for _ in reqs:\n line = _.split('#')[0]\n if line.strip():\n lines.append(line)\n return lines", "def get_requirements():\n with open('requirements.txt') as fd:\n lines = fd.read().splitlines()\n requires, links = [], []\n for line in lines:\n if line.startswith('git+'):\n links.append(line)\n elif line:\n requires.append(line)\n return requires, links", "def _get_dependencies(requirements_file: Path) -> List[str]:\n lines = requirements_file.read_text().strip().split('\\n')\n return [line for line in lines if not line.startswith('#')]", "def get_requirements(req):\n\n install_requires = []\n with open(req) as f:\n for line in f:\n if not line.startswith(\"#\"):\n install_requires.append(line.strip())\n return install_requires", "def get_requirements():\n\n with open('requirements.txt', 'r') as f:\n requirements = f.readlines()\n requires = []\n for require in requirements:\n if require.startswith(\"#\") or require.startswith(\"\\n\"):\n continue\n else:\n requires.append(require.replace(\"\\n\", \"\"))\n return requires", "def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]", "def get_requirements_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-13]\n req_path = os.path.join(root, 'requirements.txt')\n\n return req_path", "def get_readme_line(self, test_name, line_match):\n return self.get_output_line(test_name, line_match, \"README\")", "def get_requirement_info():\n links, requirements = [], []\n info = {'dependency_links': links, 'install_requires': requirements}\n requirements_path = 'requirements.txt'\n\n if not os.path.isfile(requirements_path):\n print('requirements.txt not found. Did you forget it?')\n return info\n\n reqs = filter(None, map(str.strip, open(requirements_path)))\n for line in reqs:\n if is_http(line):\n i = line.find('#egg=')\n if i == -1:\n raise SetupError('Missing \\'#egg=\\' in requirement link.')\n links.append(line[:i])\n requirements.append(line[i+5:])\n else:\n requirements.append(line)\n return info", "def parse_requirement(req_text):\n req_text = req_text.strip()\n if not req_text:\n return None\n if req_text[0] == \"#\":\n return None\n return pkg_resources.Requirement.parse(req_text)", "def read_requirements(filepath):\n with open(filepath, 'r') as fd:\n return fd.read().split('\\n')", "def parse_requirements(fn):\n with open(fn) as f:\n rv = []\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n rv.append(line)\n return rv", "def first_line(self):\n with open(self.file_path) as file:\n return file.readline()", "def read_requirements():\r\n reqs_path = os.path.join('.', 'requirements.txt')\r\n with open(reqs_path, 'r') as f:\r\n requirements = [line.rstrip() for line in f]\r\n return requirements", "def test_req_file_parse_egginfo_end_of_line_with_url(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"https://example.com/foo.tar.gz#egg=wat\")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert len(reqs) == 1\n assert reqs[0].name == \"wat\"", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def read_requirements():\n reqs_path = path.join('.', 'requirements.txt')\n with open(reqs_path, 'r') as f:\n requirements = [line.rstrip() for line in f]\n return requirements", "def read_requirements():\n with open('requirements.txt') as f:\n requirements = f.readlines()\n return [element.strip() for element in requirements]", "def get_version(rel_path: str) -> str:\n for line in read(rel_path).splitlines():\n if line.startswith(\"VERSION\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(\"Unable to find version string.\")", "def _path_and_line(self):\n path, line = (re.match(r'-r (.*) \\(line (\\d+)\\)$',\n self._req.comes_from).groups())\n return path, int(line)", "def getLine(self):\n\t\tif len(self._completeLines) > 0:\n\t\t\treturn self._completeLines.pop(0)\n\t\telse:\n\t\t\treturn None", "def readline(self) -> Optional[str]:", "def _update_properties_file(self, lines, filename):\n found_version_line = False\n if filename.endswith('cogent-requirements.txt'):\n for lineno, line in enumerate(lines):\n if 'packages/source/c/cogent' in line:\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n http_base = lines[lineno].rsplit('/',1)[0]\n lines[lineno] = '%s/PyCogent-%s.tgz\\n' % (http_base, self.Version)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def get_input(line):\n tex_input_filename_re = r\"\"\"{[^}]*\"\"\"\n m = re.search(tex_input_filename_re, line)\n return m.group()[1:]", "def parse_requirements_txt():\n root = os.path.dirname(os.path.abspath(__file__))\n\n requirements = []\n dependencies = []\n\n with open(os.path.join(root, 'requirements.txt'), 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if not line or line.startswith('#'):\n continue\n\n egg = re.match('git\\+.*#egg=(.*)$', line)\n if egg is not None:\n egg = egg.groups()[0]\n requirements.append(egg)\n dependencies.append(line)\n else:\n requirements.append(line)\n\n return requirements, dependencies", "def parse_requirements_file(filename):\n with open(filename) as input_file:\n return input_file.read().splitlines()", "def get_line(file, linenum):\n try:\n with open(file, \"r\") as f:\n return f.readlines()[linenum - 1].replace(\"\\n\", \"\")\n except:\n return f\"[ERROR]: could not open '{file}'\"", "def get_output_line(self, test_name, line_match, doc_name):\n subpath = self.get_subdirectory(test_name)\n with open(os.path.join(subpath, doc_name),\"r\") as rfile:\n rlines = rfile.readlines()\n for rline in rlines:\n if line_match in rline:\n return rline.strip()\n return None", "def get_valid_requirements(req_path):\n return [r for r in open(req_path, \"r\").readlines() if r[0] != \"#\"]", "def read_requirements(path=\"requirements.txt\"):\n full_path = os.path.join(LOCAL_DIR, path)\n\n def yield_line(path):\n with open(path, \"r\") as fid:\n for line in fid.readlines():\n yield line\n\n return [\n requirement.strip()\n for requirement in yield_line(full_path)\n if not requirement.startswith(\"#\")\n ]", "def parse_requirements(filename, *args, **kwargs):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "def read_requirements(file_name):\n reqs = read_file(file_name).splitlines()\n if not reqs:\n raise RuntimeError(\n \"Unable to read requirements from the %s file\"\n \"That indicates this copy of the source code is incomplete.\"\n % file_name\n )\n return reqs", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(f\"Unable to find a valid __version__ string in {rel_path}.\")", "def get(self):\n\t\t\n\t\treturn self.line", "def __get_line(file_path: str, line_no: int, errors: str = 'ignore') -> str:\n try:\n with open(file_path, mode='r',\n encoding='utf-8', errors=errors) as f:\n for line in f:\n line_no -= 1\n if line_no == 0:\n return line\n return ''\n except IOError:\n LOG.error(\"Failed to open file %s\", file_path)\n return ''", "def getLineInformation(line):\n \n pass", "def get_line(file: str, line: str, logfile: str) -> Union[str, bool]:\n current_file = read_file_log(file, logfile)\n m = re_search(rf\"{line}\", current_file)\n if m is not None:\n return m.group(0)\n return False", "def get_input_line(self, lineno):\n try: return self.input_lines[lineno - 1]\n except IndexError: return None", "def parse_requirements(filename):\r\n lineiter = (line.strip() for line in open(filename))\r\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "def req_from_line(line: str) -> Req:\n before, after = re.match(rgx, line).groups()\n return (before, after)", "def get_install_requires():\n requirements = []\n for line in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):\n continue\n # add line to requirements\n requirements.append(line)\n return requirements", "def test_req_file_parse_comment_start_of_line(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"# Comment \")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert not reqs", "def _parse_line(self, line):\n with open(self._manifest.path, 'r') as manifest_file:\n if isinstance(line, str):\n assert line in self.BASE_INFORMATION.keys(), \\\n 'An attempt to get non-existent information from the manifest'\n for _ in range(self.BASE_INFORMATION[line]):\n fline = manifest_file.readline()\n return json.loads(fline)[line]\n else:\n assert self._index, 'No prepared index'\n offset = self._index[line]\n manifest_file.seek(offset)\n properties = manifest_file.readline()\n return json.loads(properties)", "def get_requirements():\n requirements_list = []\n\n if not os.path.isfile(REQUIREMENTS_FILE):\n # Check if requirements file did not exist.\n return requirements_list\n\n with open(REQUIREMENTS_FILE) as reqs:\n for install in reqs:\n requirements_list.append(install.strip())\n\n return requirements_list", "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "def get_line(cls, node):\n return cls.lines[node.lineno - 1].strip()", "def _get_requirements_file_contents() -> Dict[str, str]:\n requirements_contents: Dict[str, str] = collections.defaultdict()\n with utils.open_file(\n common.COMPILED_REQUIREMENTS_FILE_PATH, 'r') as f:\n trimmed_lines = (line.strip() for line in f.readlines())\n for line_num, line in enumerate(trimmed_lines, start=1):\n if not line or line.startswith('#') or line.startswith('--hash='):\n continue\n\n if line.startswith('git'):\n match = GIT_DIRECT_URL_REQUIREMENT_PATTERN.match(line)\n if not match:\n raise Exception(\n '%r on line %d of %s does not match '\n 'GIT_DIRECT_URL_REQUIREMENT_PATTERN=%r' % (\n line, line_num,\n common.COMPILED_REQUIREMENTS_FILE_PATH,\n GIT_DIRECT_URL_REQUIREMENT_PATTERN.pattern))\n library_name, version_string = match.group(2, 1)\n\n else:\n library_name, version_string = line.split(' ')[0].split('==')\n\n # Libraries with different case are considered equivalent libraries:\n # e.g 'Flask' is the same library as 'flask'. Therefore, we\n # normalize all library names in order to compare libraries without\n # ambiguities.\n normalized_library_name = (\n normalize_python_library_name(library_name))\n requirements_contents[normalized_library_name] = version_string\n return requirements_contents", "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith('#')]", "def get_first_line(file: str) -> str:\n with open(file) as f:\n return f.readline().split('\\n')[0]", "def _get_line(self, line: int) -> str:\n line_offsets_with_sentinel = self._line_offsets + [len(self._text)]\n return self._text[line_offsets_with_sentinel[line]:line_offsets_with_sentinel[line+1]]", "def get_first_line(filename):\n try:\n with open(filename, \"r\") as ff:\n first_line = ff.readlines()[0].strip(\" \\n\\r\")\n except FileNotFoundError: # pragma: no cover\n first_line = \"xxx\"\n return first_line", "def find_the_path(filename):\n input_file = open(os.path.join(os.path.dirname(__file__), filename), 'r')\n steps = set()\n requirements = defaultdict(set)\n\n for line in input_file:\n matcher = re.match(\"Step\\s(\\w).*step\\s(\\w).*\", line)\n if matcher is not None:\n precondition = matcher.group(1)\n postcondition = matcher.group(2)\n\n steps.add(precondition)\n steps.add(postcondition)\n requirements[postcondition].add(precondition)\n else:\n raise ValueError(f\"Could not find a match with {line}\")\n answer = \"\"\n sorted_steps = sorted(steps)\n while len(sorted_steps) > 0:\n for step in sorted_steps:\n requirements_met = True\n for requirement in requirements[step]:\n if requirement in sorted_steps:\n requirements_met = False\n break\n if requirements_met:\n answer += step\n sorted_steps.remove(step)\n break\n return answer", "def load_requirements(fn):\n with open(fn, 'r') as f:\n return [x.rstrip() for x in list(f) if x and not x.startswith('#')]", "def getTextLine(self, index, path):\n\n # Set data to none by default\n data = None\n\n # Check if the file is accessible\n if not os.path.exists(path):\n self.logger.error(\"Cannot find file at path provided in config file: \".format(path))\n return None\n\n # Try to read text file and return the value of the last line in the file\n try:\n with open(path, 'r') as file:\n data = file.readline(index)\n except IOError:\n self.logger.error(\"Error when opening file, check if file is readable: \".format(path))\n finally:\n return data", "def read_requirements(*parts):\n requirements = []\n for line in read(*parts).splitlines():\n line_2 = re.sub(\n \"(\\s*)?#(?!egg=).*$\", # the space immediately before the hash mark, the hash mark, and anything that follows it, but not \"#egg=\" fragments\n \"\", # replace with a blank string\n line,\n )\n line_3 = re.sub(\n \"(\\s*)?-r.*$\", # we also can't reference other requirement files\n \"\", # replace with a blank string\n line_2,\n )\n if line_3: # i.e. we have a non-zero-length string\n requirements.append(line_3)\n return requirements", "def get_version(file_data):\n for individual in file_data:\n if 'opam-version' in individual:\n version = individual.split('\"')\n return version[1]", "def get_build_line(latest_build):\n proc = Popen([\"osg-koji\", \"buildinfo\", latest_build],\n stdout=PIPE)\n build_line = proc.stdout.readline().decode(\"latin-1\").strip()\n ret = proc.wait()\n if ret != 0 or not build_line:\n return\n return build_line", "def GetLine(self):\r\n retline = None\r\n outline = None\r\n try:\r\n retline= str(self.file.readline())\r\n except IOError:\r\n self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, \"cannot read a line from\" )\r\n finally: \r\n #outline1 = retline.replace(\"/\",\"\")\r\n #if( (retline !=\"\") and (retline !=\"\\n\")) :\r\n outline = str(retline)\r\n return outline.replace(\"+\",\"\")\r\n #return unicodedata.normalize('NFKD', outline).encode('ascii','ignore')\r", "def get_requirements():\n raw_requirements = read(\"requirements.txt\")\n requirements = []\n dependencies = []\n\n for req in raw_requirements.splitlines():\n req = req.strip()\n if not req:\n continue\n\n if req.startswith(\"#\"):\n continue\n\n if \"+\" in req:\n dependencies.append(req)\n else:\n requirements.append(req)\n\n return requirements, dependencies", "def is_requirement(line):\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+')\n)", "def extract_requiremens(file):\n\n with open(file, 'r') as file:\n return file.read().splitlines()", "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not\n line.startswith(\"#\")]", "def parse_requirements(filename):\n try:\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n except OSError:\n return []", "def read_line(filename):\n line = \"Unknown\"\n try:\n with open(filename) as f:\n line = f.readline().strip()\n finally:\n return line", "def get_min_required(self) -> str:\n version = self.terraform_block.get(\"required_version\")\n\n if version:\n if re.match(r\"^!=.+\", version):\n LOGGER.error(\n \"min required Terraform version is a negation (%s) \"\n \"- unable to determine required version\",\n version,\n )\n sys.exit(1)\n else:\n version = re.search(r\"[0-9]*\\.[0-9]*(?:\\.[0-9]*)?\", version)\n if version:\n LOGGER.debug(\"detected minimum Terraform version is %s\", version)\n return version.group(0)\n LOGGER.error(\n \"Terraform version specified as min-required, but unable to \"\n \"find a specified version requirement in this module's tf files\"\n )\n sys.exit(1)", "def _get_line(self):\n line = self.file.readline(self.maxline + 1)\n if len(line) > self.maxline:\n print(f\"ERROR: got more than {self.maxline} bytes\")\n if not line:\n print(\"Received EOF\")\n if line[-2:] == CRLF:\n line = line[:-2]\n elif line[-1:] in CRLF:\n line = line[:-1]\n return line + CRLF", "def _read_sourced_path(self, line):\n # type: (str)->tp.Optional[str]\n if line.startswith('source '):\n sline = [x.strip() for x in line.split()]\n sline.pop(0)\n path = ' '.join(sline)\n if not os.path.isabs(path):\n current_root = self._root_interfaces_path\n if os.path.isfile(current_root):\n current_root = os.path.dirname(current_root)\n path = os.path.join(current_root, path)\n return path\n return None", "def get_install_requires_version():\n require_str = \"pyscaffold>={major}.{minor}a0,<{next_major}.0a0\"\n major, minor, *rest = (parse_version(pyscaffold_version)\n .base_version.split('.'))\n next_major = int(major) + 1\n return require_str.format(major=major, minor=minor, next_major=next_major)", "def parse_requirements(filename):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith((\"#\", \"--\"))]", "def get_match(field, line):\n match = re.search(field, line)\n if match:\n return match.group(1)", "def findInLine(self) -> str:\n raise NotImplementedError", "def get_requires(path=REQUIRE_PATH):\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith('#'):\n yield line", "def get_requirements(*args):\n requirements = set()\n with open(get_absolute_path(*args)) as handle:\n for line in handle:\n # Strip comments.\n line = re.sub(r'^#.*|\\s#.*', '', line)\n # Ignore empty lines\n if line and not line.isspace():\n requirements.add(re.sub(r'\\s+', '', line))\n return sorted(requirements)", "def get_header(file):\n with open(file, 'r') as f:\n return f.readline()", "def discover_requirements_sha(\n path=\"playbooks/defaults/repo_packages/openstack_services.yml\"\n):\n with open(path, \"r\") as os_repos_yaml:\n repos = yaml.safe_load(os_repos_yaml)\n return repos[\"requirements_git_install_branch\"]", "def readline(self) -> str | None:", "def parse_reqs(req_path='./requirements.txt'):\n req = []\n with codecs.open(req_path, 'r') as handle:\n # remove comments and empty lines\n lines = (line.strip() for line in handle\n if line.strip() and not line.startswith('#'))\n\n for line in lines:\n # check for nested requirements files\n if line.startswith('-r'):\n # recursively call this function\n req += parse_reqs(req_path=line[3:])\n\n else:\n # add the line as a new requirement\n req.append(line)\n\n return req", "def _parse_requirements(path: pathlib.Path):\n lines = [line.strip() for line in path.read_text().splitlines() if line]\n return [line for line in lines if not line.startswith('#')]", "def is_requirement(line):\n # Remove whitespace at the start/end of the line\n line = line.strip()\n\n # Skip blank lines, comments, and editable installs\n return not (\n line == '' or\n line.startswith('-r') or\n line.startswith('#') or\n line.startswith('-e') or\n line.startswith('git+')\n )", "def line(self):\n\n\t\treturn self.__line", "def parse_req_file(req_file, verbatim=False):\n req_list = []\n requirements = req_file.readlines()\n for requirement in requirements:\n requirement_no_comments = requirement.split(\"#\")[0].strip()\n\n # if matching requirement line (Thing==1.2.3), update dict, continue\n req_match = re.match(\n r\"\\s*(?P<package>[^\\s\\[\\]]+)(?P<extras>\\[\\S+\\])?==(?P<version>\\S+)\",\n requirement_no_comments,\n )\n req_ignore = requirement.strip().endswith(\" # norot\")\n\n if req_match:\n req_list.append(\n (req_match.group(\"package\"), req_match.group(\"version\"), req_ignore)\n )\n elif requirement_no_comments.startswith(\"-r\"):\n try:\n base_dir = os.path.dirname(os.path.abspath(req_file.name))\n except AttributeError:\n print(\n \"Recursive requirements are not supported in URL based \" \"lookups\"\n )\n continue\n\n # replace the -r and ensure there are no leading spaces\n file_name = requirement_no_comments.replace(\"-r\", \"\").strip()\n new_path = os.path.join(base_dir, file_name)\n try:\n if verbatim:\n req_list.append((None, requirement, req_ignore))\n req_list.extend(parse_req_file(open(new_path), verbatim=verbatim))\n except IOError:\n print(\"Failed to import {}\".format(file_name))\n elif verbatim:\n req_list.append((None, requirement, req_ignore))\n return req_list", "def read_requirements():\n reqs_path = os.path.join(__location__, 'requirements.txt')\n with open(reqs_path, encoding='utf8') as f:\n reqs = [line.strip() for line in f if not line.strip().startswith('#')]\n\n names = []\n links = []\n for req in reqs:\n if '://' in req:\n links.append(req)\n else:\n names.append(req)\n return {'install_requires': names, 'dependency_links': links}", "def parse_requirements(filename):\n lines = (line.strip() for line in open(filename))\n return [line.strip() for line in lines if line and not line.strip().startswith(\"#\")]", "def test_url_preserved_line_req(self):\n url = 'git+http://foo.com@ref#egg=foo'\n req = InstallRequirement.from_line(url)\n assert req.link.url == url", "def __call__( self, line ):\n return self.__getitem__( line )", "def __call__( self, line ):\n return self.__getitem__( line )", "def test_req_file_parse_comment_end_of_line_with_url(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"https://example.com/foo.tar.gz # Comment \")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert len(reqs) == 1\n assert reqs[0].link.url == \"https://example.com/foo.tar.gz\"", "def _get_requirement_text(node):\n if isinstance(node, tree.Name):\n # This happens when the users write a constant variable\n return node.name # pragma: no cover\n\n if isinstance(node, tree.String):\n # The most common occurrence - people just write the requirement as a string\n #\n # Remove the surrounding \"\"s of the string\n #\n return node.value.strip(\"'\").strip('\"')\n\n return \"\"", "def report_extract_request(red):\n line_req = red.get_text_line(0) # First\n if REQ_KEY not in line_req: return None\n req_opts= line_req[line_req.index(REQ_KEY)+len(REQ_KEY):]\n return req_opts", "def requires(self):\n\t\treturn (self.source_name, )" ]
[ "0.6740572", "0.6701391", "0.65815175", "0.64647526", "0.6415879", "0.6412485", "0.6349819", "0.63256943", "0.63117", "0.6291996", "0.62328476", "0.6201596", "0.6177119", "0.6160064", "0.6157574", "0.615133", "0.6138522", "0.6128042", "0.6090447", "0.6074949", "0.6072613", "0.6064727", "0.6062027", "0.6029496", "0.6026504", "0.5988322", "0.59870327", "0.59679586", "0.59501535", "0.5935482", "0.5934801", "0.5906755", "0.59063935", "0.5904332", "0.5891468", "0.58881843", "0.5887182", "0.5887182", "0.5887182", "0.5887182", "0.5875182", "0.5871748", "0.58641666", "0.5862754", "0.5840322", "0.58355755", "0.5801073", "0.5783933", "0.57561606", "0.57400805", "0.5736761", "0.57307196", "0.57290363", "0.57290363", "0.57290363", "0.57120275", "0.5700869", "0.56942636", "0.5693693", "0.56906426", "0.5690233", "0.5682941", "0.5680446", "0.5677051", "0.56760734", "0.5674078", "0.56592447", "0.56558883", "0.5650922", "0.5647933", "0.564106", "0.5632226", "0.5630451", "0.5625946", "0.56234974", "0.562067", "0.5616603", "0.5612679", "0.56119055", "0.5609232", "0.56074643", "0.56057674", "0.5602314", "0.5602256", "0.5591556", "0.5587219", "0.5579921", "0.5549055", "0.55464274", "0.5545767", "0.55431396", "0.55380386", "0.55372906", "0.5523345", "0.54998535", "0.54998535", "0.54776657", "0.54593223", "0.5456564", "0.54547817" ]
0.6781809
0
Split line on text and comment
def split_line(self, line): # type: (str) -> tuple parts = [s.strip() for s in line.split('#', 1)] package = parts[0] comment = parts[1] if len(parts) >= 2 else '' return package, comment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_comment(cls, code):\r\n if '#' not in code: return code\r\n #: Remove comments only (leave quoted strings as they are)\r\n subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)\r\n return re.sub(cls.re_pytokens, subf, code)", "def to_multi_line_comment(text: str) -> str:\n pass", "def standalone_comment_split(\n line: Line, features: Collection[Feature], mode: Mode\n) -> Iterator[Line]:\n if not line.contains_standalone_comments(0):\n raise CannotSplit(\"Line does not have any standalone comments\")\n\n current_line = Line(\n mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets\n )\n\n def append_to_line(leaf: Leaf) -> Iterator[Line]:\n \"\"\"Append `leaf` to current line or to new line if appending impossible.\"\"\"\n nonlocal current_line\n try:\n current_line.append_safe(leaf, preformatted=True)\n except ValueError:\n yield current_line\n\n current_line = Line(\n line.mode, depth=line.depth, inside_brackets=line.inside_brackets\n )\n current_line.append(leaf)\n\n for leaf in line.leaves:\n yield from append_to_line(leaf)\n\n for comment_after in line.comments_after(leaf):\n yield from append_to_line(comment_after)\n\n if current_line:\n yield current_line", "def DropComment(text):\n grp = re.compile(r'/\\*[^/]*\\*/').split(text)\n result = string.join(grp);\n grp = re.compile(r'//.*').split(result);\n result = string.join(grp);\n #result = string.join(result.split('\\n')) #remove the line break\n return(' '+result);", "def to_single_line_comment(text: str) -> str:\n pass", "def splitBodyLines(cls, text):\n\n def remove_comments(line):\n \"\"\"\n Returns the given line stripped of any comments.\n \"\"\"\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line\n\n # Remove comments, strip whitespace, and return only non-blank lines\n lines = map(str.strip, map(remove_comments, text.splitlines()))\n return [l for l in lines if l]", "def splitdefines(txt):\n pre = []\n c = []\n for line in txt.split(\"\\n\"):\n if line.startswith(\"#\"):\n pre.append(line)\n else:\n c.append(line)\n return pre, c", "def get_title_block(txt):\n res = \"\"\n in_title = False\n for line in txt.splitlines():\n if line.startswith(\"#!\"):\n in_title = True\n res += clean_comment(line)\n continue\n if in_title:\n if line.startswith(\"#\"):\n res += clean_comment(line)\n else:\n break\n return res", "def line_split(self, line):\n parts = []\n part = None\n quote = None\n for c in line:\n if part is None and not self.is_space(c):\n quote = c if self.is_quote(c) else None\n part = c if quote is None else \"\"\n elif part is not None and quote is None and not self.is_space(c):\n part += c\n elif part is not None and quote is not None:\n if c != quote:\n part += c\n else:\n parts.append(part)\n part = None\n quote = None\n elif part is not None and quote is None and self.is_space(c):\n parts.append(part)\n part = None\n quote = None\n if part is not None:\n parts.append(part)\n return parts", "def block_comments(code):\n block = list()\n for line in code:\n if bool(line.strip()): # If line is not empty\n if line.strip()[0] == '!': # If the first character of the string is the start of a comment it adds it\n block.append(identify_comment(line))\n elif bool(line.strip()): # If the first character of the string is not the start of a comment or its not empty it exits\n break\n return block", "def parse_space_in_comment(comment):\n max_spaces_dict = {}\n for line in comment:\n if (not line.strip()) or line.find(\" \") == -1:\n # empty line or line do not have spaces in it.\n continue\n max_spaces_dict[line] = max(len(list(v)) for k, v in groupby(line) if k == \" \")\n\n sep = [(line.index(\" \" * count) + count) for line, count in max_spaces_dict.items()]\n sep.sort()\n count_dict = {len(list(v)):k for k, v in groupby(sep)}\n\n if max(count_dict.keys()) < 3:\n return {}, comment\n\n comment_dict = {}\n # more than 3 lines following the same pattern, extract from it.\n sep_position = count_dict[max(count_dict.keys())] - 1\n debug(\"found boundary: %s\" % sep_position)\n\n def line_match_pattern(line, position, prev_line=None, next_line=None, recursive=True):\n \"\"\"\n for a line to match a pattern, its next line or its prev line must\n also match the pattern. Notice that the function would call itself\n to see if its next/prev line matches the pattern. So we used a flag\n to stop it from going deeper into the loop.\n \"\"\"\n if line.strip() and len(line) <= position + 1:\n return False\n if not (line[position] == \" \" and line[position+1] != \" \"):\n # The line itself must match the pattern.\n return False\n if (prev_line is None) and (next_line is None) and recursive:\n print(\"##### Bad way to call this function. ####\")\n return False\n\n if not recursive:\n # If we do not go deeper, then the current line just match the pattern.\n return True\n\n if prev_line and prev_line.strip() and not (line_match_pattern(prev_line, position, recursive=False)):\n return False\n\n if next_line and next_line.strip() and not (line_match_pattern(next_line, position, recursive=False)):\n return False\n\n return True\n\n comment_copy = copy(comment)\n for index, line in enumerate(comment_copy):\n if (not line.strip()) or line.find(\" \") == -1 or len(line) < sep_position:\n # empty line, or line has no space, or line to short.\n continue\n if index == 0:\n if line_match_pattern(line, sep_position, next_line=comment_copy[1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"First line, but it does not match\")\n continue\n elif index == len(comment_copy)-1:\n if line_match_pattern(line, sep_position, prev_line=comment_copy[-1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n else:\n debug(\"last line, but it does not match\")\n continue\n elif line_match_pattern(line, sep_position, prev_line=comment_copy[index-1], next_line=comment_copy[index+1]):\n key = line[:sep_position].strip(STRIPS)\n value = line[sep_position:].strip(STRIPS)\n debug(\"space || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n return comment_dict, comment", "def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]", "def test_remove_single_line_comments_annotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\t//@Test //comment\n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\t//@Test //comment\n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def splitLine(text):\r\n sp = text.split(\" \")\r\n try:\r\n a = sp[0]\r\n b = \" \".join(sp[1:])\r\n except:\r\n a = text\r\n b = \"\"\r\n return a, b", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def _parse_comments(reader):\n regex = r'\\s*(#|\\/{2}).*$'\n regex_inline = r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'\n\n pipe = []\n for line in reader:\n if re.search(regex, line):\n if re.search(r'^' + regex, line, re.IGNORECASE): continue\n elif re.search(regex_inline, line):\n pipe.append(re.sub(regex_inline, r'\\1', line))\n else:\n pipe.append(line)\n return \"\\n\".join(pipe)", "def Split_to_Lines(self):\r\n\r\n line = []\r\n word = \"\"\r\n comment = False\r\n String = False\r\n for i in range(0, len(self.Code)):\r\n if self.Code[i] == '\\n':\r\n if word != '':\r\n if (String is True) and (word[0] != word[len(word) - 1]):\r\n return False\r\n line.append(word)\r\n if len(line) != 0:\r\n self.Code_Lines.append(line)\r\n if len(line) >= 2:\r\n if line[0] == \"end\":\r\n break\r\n word = \"\"\r\n line = []\r\n comment = False\r\n String = False\r\n elif not comment:\r\n if self.Code[i] == ' ':\r\n if not String:\r\n if word != \"\" and word != '':\r\n line.append(str(word))\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == '\"':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\"'\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n elif self.Code[i] == '\\'':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\\''\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if String:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == ';':\r\n comment = True\r\n\r\n elif self.Code[i] in self.Special_Symbols:\r\n if word != '':\r\n line.append(word)\r\n line.append(self.Code[i])\r\n word = \"\"\r\n else:\r\n line.append(self.Code[i])\r\n\r\n else:\r\n word += self.Code[i].lower()\r\n\r\n return self.Code_Lines", "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def clean_comment(line):\n if line.startswith(\"#!\"):\n line = line[2:]\n else:\n line = line[1:]\n if line.startswith(\" \"):\n line = line[1:]\n if not line.endswith('\\n'):\n line += '\\n'\n return line", "def getHTMLComments(self, text):\n return self.doSpecial(text, '<!--', '-->', self.fParseHTMLComments)", "def test_remove_single_line_comments_noannotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def split_pun(comment):\n abb_regex = get_abbreviations_regex()\n \n comment = re.sub(r'([\\W_]+)',r' \\1 ',comment,flags=re.IGNORECASE)\n \n comment = re.sub(r' \\' ','\\'',comment,flags=re.IGNORECASE)\n \n comment = re.sub(r' \\. ','.',comment,flags=re.IGNORECASE)\n \n comment = re.sub(abb_regex,r\" . \",comment,flags=re.IGNORECASE)\n \n comment = re.sub(r'\\s{2,}',\" \",comment,flags=re.IGNORECASE)\n \n return comment", "def listFromLines(lines):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n temp = [x for x in temp if x]\n return temp", "def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )", "def splitlines(self) -> List[String]:\n pass", "def separate_comments(self):\n if not hasattr(self, 'cleaned_html'):\n self.cleaned_html = self.clean_html()\n \n self.separated_comments = self.cleaned_html.split(self.post_splitter)\n return self.separated_comments", "def Comment(self, comment):\n self.script.append(\"\")\n for i in comment.split(\"\\n\"):\n self.script.append(\"# \" + i)\n self.script.append(\"\")", "def _split_lines(self, lines, separator_marker):\n result = []\n current_group = []\n for line in lines:\n if re.match(rf'[^\\S\\n]*{separator_marker}\\w+(\\(.*\\))?:', line):\n if current_group:\n result.append(current_group)\n current_group = []\n current_group.append(line)\n if current_group:\n result.append(current_group)\n return result", "def lines_to_blocks(text):\n n_sep = text.count('\\n\\n')\n n_lines = text.count('\\n')\n #approximate ratio of double newlines vs single newline: 40\n if int(n_sep/n_lines*100) > 40:\n text = re.sub('\\n\\n', '\\n',text)\n #try to split it up with topic indicators such as numbers or bullet points\n text = re.sub(r'[0-9]+[.]', '\\n',text)\n text = re.sub('โ€ข', '\\n',text)\n return text", "def strip_comments(tokens):\n prev_typ = None\n prev_end_col = 0\n for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:\n if typ in (tokenize.NL, tokenize.NEWLINE):\n if prev_typ in (tokenize.NL, tokenize.NEWLINE):\n start_col = 0\n else:\n start_col = prev_end_col\n end_col = start_col + 1\n elif typ == tokenize.COMMENT and start_row > 2:\n continue\n prev_typ = typ\n prev_end_col = end_col\n yield typ, tok, (start_row, start_col), (end_row, end_col), line", "def tokenize(fp):\n for line in fp:\n line = line.strip()\n if line[0] == '#':\n continue\n for tok in line.split():\n yield tok", "def split(in_string):\n in_string_list = ['']\n delphi_string_list = []\n\n # The first part is always a non-delphi part, even if it is just an empty string.\n delphi = False\n\n for line in in_string.splitlines(keepends=True):\n if not delphi:\n if line.startswith('// delphi begin') or line.startswith('/* delphi begin'):\n delphi = True\n in_string_list[len(in_string_list) - 1] = remove_comments(in_string_list[len(in_string_list) - 1])\n delphi_string_list.append('')\n else:\n in_string_list[len(in_string_list) - 1] += line\n else:\n if line.startswith('// delphi end') or line.startswith('delphi end */'):\n delphi_string_list[len(delphi_string_list) - 1] = delphi_string_list[len(delphi_string_list) - 1][:-1]\n delphi = False\n in_string_list.append('')\n else:\n delphi_string_list[len(delphi_string_list) - 1] += line\n\n # The last part must be a non-delphi part. It can also be just a single empty line.\n assert not delphi\n in_string_list[len(in_string_list) - 1] = remove_comments(in_string_list[len(in_string_list) - 1])\n\n return in_string_list, delphi_string_list", "def dedent(comment):\n commentLines = comment.split('\\n')\n if len(commentLines) < 2:\n cleaned = list(map(str.lstrip, commentLines))\n else:\n spc = 0\n for char in commentLines[1]:\n if char in string.whitespace:\n spc = spc + 1\n else:\n break\n #now check other lines\n cleaned = []\n for line in commentLines:\n for i in range(min(len(line),spc)):\n if line[0] in string.whitespace:\n line = line[1:]\n cleaned.append(line)\n return '\\n'.join(cleaned)", "def test_whitespace_before_comment(parallel, read_tab):\n text = \"a\\tb\\tc\\n # comment line\\n1\\t2\\t3\"\n table = read_tab(text, parallel=parallel)\n expected = Table([[1], [2], [3]], names=(\"a\", \"b\", \"c\"))\n assert_table_equal(table, expected)", "def handleCommentLine(sLine, iLine):\r\n\tglobal sEType, sEVar, sEData, iIndent\r\n\r\n\t# Work out the indentation level to operate at.\r\n\t# This is only done once for each comment block.\r\n\tif iIndent < 0:\r\n\t\tiIndent = (len(sLine) - len(sLine.lstrip())) / 4\r\n\r\n\t# If there is no '@' symbol, save as much data as we can from the commentline.\r\n\tif START_SYMBOL not in sLine:\r\n\r\n\t\t# If we are a directive which only accepts single line values then anything extra is a remark.\r\n\t\tif sEType in (PARAM, RETURN, AUTHOR, DATE):\r\n\t\t\thandleExistingData(iIndent)\r\n\t\t\tsEType = REMARK\r\n\t\t\tsEData = \"\"\r\n\r\n\t\t# Get the data from the line and append it if it is exists.\r\n\t\tsData = dataFromLine(sLine)\r\n\t\tif len(sData) > 0:\r\n\t\t\t# If we already have data, insert a breakline.\r\n\t\t\tif sEData:\r\n\t\t\t\tsEData += BREAK + sData\r\n\r\n\t\t\t# Otherwise do not.\r\n\t\t\telse:\r\n\t\t\t\tsEData = sData\r\n\t\t\r\n\t\t# If we have an end comment on this line, exit the comment by returning false.\r\n\t\tif CLOSE_COMMENT in sLine:\r\n\t\t\thandleExistingData(iIndent)\r\n\t\t\tendComment()\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\t# Since the line does contain an '@' symbol, push any existing data.\r\n\thandleExistingData(iIndent)\r\n\r\n\t# If this line contains an '@' symbol then work out what is after it.\r\n\tsEType = sLine.split(START_SYMBOL)[1].split(\" \")[0]\r\n\r\n\t# If the comment data type is BRIEF\r\n\tif sEType == BRIEF:\r\n\t\tsEData = dataFromString(sLine, sLine.find(BRIEF) + len(BRIEF) + 1)\r\n\r\n\telif sEType == PARAM:\r\n\t\tsTemp = dataFromString(sLine, sLine.find(PARAM) + len(PARAM) + 1)\r\n\t\tiChop = sTemp.find(\" \") + 1\r\n\t\tsEData = sTemp[iChop:]\r\n\t\tsEVar = sTemp[:iChop].rstrip()\r\n\r\n\telif sEType == RETURN:\r\n\t\tsEData = dataFromString(sLine, sLine.find(RETURN) + len(RETURN) + 1)\r\n\r\n\telif sEType == DATE:\r\n\t\tsEData = dataFromString(sLine, sLine.find(DATE) + len(DATE) + 1)\r\n\r\n\telif sEType == AUTHOR:\r\n\t\tsEData = dataFromString(sLine, sLine.find(AUTHOR) + len(AUTHOR) + 1)\r\n\r\n\t# If we have an end comment on this line, exit the comment by returning false.\r\n\tif CLOSE_COMMENT in sLine:\r\n\t\thandleExistingData(iIndent)\r\n\t\tendComment()\r\n\t\treturn False\r\n\treturn True", "def _extract_comment_values(line, status, comment, sources):\n \n line = line.strip( )\n\n if len(line) > 1:\n if line[1] == ':':\n sources = re.split(',[\\s]*', line[2:].strip( ))\n elif line[1] == '.':\n comment = line[2:].strip( )\n elif line[1] == ',':\n flags = re.split(',[\\s]*', line[2:].strip( ))\n if not 'fuzzy' in flags:\n status = store.STATUS_COMPLETE\n \n return status, comment, sources", "def chunks(text):\n lines = []\n for line in text.splitlines():\n lines.append(re.sub(' {2,}', ' ', line.strip()))\n return '\\n'.join(lines).split('\\n\\n')", "def __remove_fortran_comments(self, line):\n if self._in_block_comment:\n log.error('Before Fortran line is processed, in a block comment?')\n\n new_chars = []\n line_len = len(line)\n ignore_spaces = False\n for i in range(line_len):\n if line[i] == ' ':\n if not ignore_spaces:\n ignore_spaces = True\n new_chars.append(' ')\n elif line[i] == '!':\n new_chars.append(' ')\n break\n else:\n ignore_spaces = False\n new_chars.append(line[i])\n\n if self._in_block_comment:\n log.error('Processing Fortran comment left state in a block comment?')\n\n new_line = ''.join(new_chars)\n return new_line", "def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells", "def comment_stripper(iterator):\n for line in iterator:\n if line [:1] == '#':\n continue\n if not line.strip ():\n continue\n yield line", "def comment_content(c):\n content = str(c)[4:-3]\n return content.strip()", "def lify_split_buffers(lines):\n code_len = len(lines)\n for pos in range(code_len):\n line = lines[pos]\n if line.find('variable=buf_data_split') != -1:\n # Search for the variable declaration section\n decl_pos = -1\n prev_pos = pos - 1\n while prev_pos >= 0:\n prev_line = lines[prev_pos]\n if prev_line.find('Variable Declaration') != -1:\n decl_pos = prev_pos\n break\n prev_pos -= 1\n # Move the two code lines at [pos - 1] and [pos] to [decl_pos] and [decl_pos + 1]\n indent = lines[decl_pos].find('/*')\n line1 = ' ' * indent + lines[pos - 1].lstrip()\n line2 = ' ' * indent + lines[pos].lstrip()\n del lines[pos - 1]\n del lines[pos - 1]\n lines.insert(decl_pos, line1)\n lines.insert(decl_pos + 1, line2)\n\n return lines", "def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])", "def tokenize(line):\n line = line.strip()\n tokens = deque()\n permanent = line.startswith('@')\n if permanent:\n line = line[1:]\n while line:\n token, line, comment = OrdersParser._get_token(line)\n if comment:\n return (tokens, permanent, token)\n else:\n tokens.append(token)\n \n return (tokens, permanent, None)", "def trim_comment(line):\n if ';' not in line:\n return (line, None)\n\n comment_start = line.index(';')\n before_comment = line[:comment_start]\n spaces_before_comment = len(before_comment) - len(before_comment.rstrip())\n comment = line[comment_start:]\n return (before_comment.rstrip(), spaces_before_comment * ' ' + comment)", "def split_into_sections(text):\n headings_regex = re.compile(\n r'^={1,6}.*?={1,6}(?: *<!--.*?-->)?\\s*$', flags=re.M\n )\n sections = list()\n last_match_start = 0\n for match in headings_regex.finditer(text):\n match_start = match.start()\n if match_start > 0:\n sections.append(text[last_match_start:match_start])\n last_match_start = match_start\n sections.append(text[last_match_start:])\n return sections", "def test_parse_multiline_comment(self):\n source_code = dedent(\"\"\"\\\n /**\n * this is a doc comment that stretches over\n * more than one line\n */\n int main()\n {\n return 0;\n }\n \"\"\")\n result = self.parser.parse(source_code.splitlines())\n assert_equal(result, {\n \"int main()\": (\"this is a doc comment that stretches over \"\n \"more than one line\")})", "def parse_lit(self, lines):\n comment_char = \"#\" # TODO: move this into a directive option\n comment = re.compile(r\"^\\s*{}[ \\n]\".format(comment_char))\n section_test = lambda val: bool(comment.match(val))\n\n sections = []\n for is_doc, group in itertools.groupby(lines, section_test):\n if is_doc:\n text = [comment.sub(\"\", i).rstrip(\"\\r\\n\") for i in group]\n else:\n text = [i.rstrip(\"\\r\\n\") for i in group]\n\n sections.append((is_doc, text))\n\n return sections", "def process_all(self):\n global multi_comment_line_mode\n multi_comment_line_mode = False\n for line in self.fileToProcess:\n line = line.strip() # creating a strip line, with no whitespace in the beginning and in the end\n # multi_comment_line_mode = False\n # first, we want to filter all the lines which are comments or part of comments\n while line != '':\n ignoring_status,newline = self.shouldTheLineBeIgnored(line)\n if ignoring_status:\n break # we are ignoring the line\n elif (not ignoring_status) and (newline != '') and newline != '$endOfMultiLine':\n line = newline\n continue\n elif not ignoring_status and newline == '$endOfMultiLine':\n break\n else:\n line = self.isThereApartToIgnore(line) #getting the good line\n line = line.strip()\n if line.endswith('$endOfMultiLine'):\n # line = line[:-1]\n line = line[:-15]\n # in this case we don't want to ignore the current line\n # if multi_comment_line_mode:\n # # this comes from the\n list_of_line_strings = re.split('(\\W)', line) # the default of this method is to remove all the white spaces\n list_of_line_strings = list(filter(None, list_of_line_strings))\n global i\n i = 0\n global first_index\n first_index = 0\n global second_index\n second_index = 0\n len_of_list = len(list_of_line_strings)\n while i < len_of_list:\n # first adding the string literals\n if (list_of_line_strings[i] == '\"' and i == 0) or (i>0 and list_of_line_strings[i] == '\"' and\n list_of_line_strings[i-1]!='*'):\n first_index = i\n i = i + 1\n if i == len(list_of_line_strings):\n break\n while list_of_line_strings[i] != '\"':\n i = i + 1\n if i>=len(list_of_line_strings):\n # in case it's the end\n i = first_index\n break\n else:\n continue\n second_index = i\n list_of_line_strings[first_index:second_index + 1] = [\n ''.join(list_of_line_strings[first_index:second_index + 1])]\n i = i + 2\n len_of_list = len(list_of_line_strings)\n else:\n i = i + 1\n j=0\n global skip_mode\n skip_mode = False\n global counter\n counter = 0\n for string in list_of_line_strings:\n if j != len(list_of_line_strings)-1:\n j+=1\n if counter == 1:\n counter = 0\n continue\n if skip_mode and not (string == '*' and list_of_line_strings[j] == '/'):\n continue\n if skip_mode and string == '*' and list_of_line_strings[j] == '/':\n skip_mode = False\n counter = 1\n continue\n if string == \"/\" and (list_of_line_strings[j] == \"/\" ):\n # this is a comment that appeared in the line\n break # in this case, there are no more chars to read because it's a note\n if string == \"/\" and list_of_line_strings[j] == \"*\":\n skip_mode = True\n counter = 1\n continue # entering a skip mode\n if string.strip() == '':\n continue\n self.currStringToProcess = string\n type = self.tokenType()\n self.createToken(type,self.currStringToProcess)\n break", "def parse_tab_in_comment(comment):\n line_has_tab = [line.find(\"\\t\") != -1 for line in comment]\n if not any(line_has_tab):\n return {}, comment\n\n comment_dict = {}\n debug(\"parsing tabs in comment\")\n consecutive_true_count = max(len(list(v)) for k, v in groupby(line_has_tab) if k)\n # need to have 3 consecutive tab formatted lines.\n if consecutive_true_count >= 3:\n # for these lines, split by tab, strip non-alphabets.\n for line in copy(comment):\n if line.find(\"\\t\") != -1:\n key, value = line.rsplit(\"\\t\", 1)\n key = key.strip(STRIPS)\n value = value.strip(STRIPS)\n if re.compile(r'[0-9]{2,4}[ -.][0-9]{2}[ -.][0-9]{2}( [0-9]{2}:[0-9]{2}(:[0-9]{2})?)?').match(key):\n # if key is a datetime obj, ignore it.\n continue\n debug(\"tab || found %s: %s\" % (key, value))\n comment_dict[key] = value\n comment.remove(line)\n return comment_dict, comment", "def _is_comment_line(self):\n pattern = re.compile(r\"^(\\s)*(//)+\")\n return pattern.search(self._line)", "def _split_to_tokens(self, file_content: str):\n cur_token = ''\n string_started = False\n for line in file_content.split('\\n'):\n cur_token = ''\n line = line.strip()\n if line.startswith('#'): # skip comments\n continue\n for char in line:\n if string_started:\n if char == '\"': # string ended\n self._add_non_empty_token(cur_token)\n cur_token = '' # start of a new string\n string_started = False\n else:\n cur_token += char\n elif char == '\"':\n self._add_non_empty_token(cur_token)\n cur_token = '' # start of a new string\n string_started = True\n elif (char == \" \" and not string_started) or char == '\\n':\n self._add_non_empty_token(cur_token)\n cur_token = ''\n elif char in [':', '{', '}', '[', ']', ',']:\n self._add_non_empty_token(cur_token)\n self._tokens.append(char)\n cur_token = ''\n else:\n cur_token += char\n self._add_non_empty_token(cur_token)\n self._add_non_empty_token(cur_token)", "def test_splitDelimiters(self):\n r = irc.split(\"xx yyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)\n r = irc.split(\"xx\\nyyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)", "def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"", "def del_comm(self, blocks=False):\n logging.debug('Delete comments from text')\n if not(self.check()):\n raise GcodeError(\"Invalid g-codes\")\n temp = []\n comment = re.compile(';\\ .*')\n for line in self.blocks:\n n = comment.search(line)\n if n:\n line = line[:n.span()[0]]\n line = line.strip()\n if line != \"\":\n temp.append(line)\n if blocks:\n return temp\n return \"\\n\".join(temp)", "def strip_comments(blob, delim='#'):\n lines = blob.split('\\n')\n return '\\n'.join([line for line in lines if line.strip()[0] != delim])", "def prep_difflines(content):\n return [ x+\"\\n\" for x in content.split(\"\\n\") ]", "def __format_lines(cls, lines):\n\n result = []\n\n for line in [x for x in lines if x]:\n if not line.startswith(\"#\"):\n if \"#\" in line:\n line = line[: line.find(\"#\")]\n\n if \"\\t\" in line or \" \" in line:\n splited_line = line.split()\n\n for element in splited_line[:1]:\n if element:\n line = element\n break\n result.append(line)\n\n return result", "def chunk_split(cls, text):\n parts = []\n current = []\n for line in text.splitlines():\n size = sum(len(part) + 1 for part in current)\n extra = len(line)\n if size + extra >= 2000:\n if current:\n # The message is full, split here.\n parts.append(\"\\n\".join(current))\n current.clear()\n if extra >= 2000:\n # The line itself is too long, split on whitespace instead.\n *lines, line = wrap(line, 2000, expand_tabs=False, replace_whitespace=False)\n parts.extend(lines)\n current.append(line)\n if current:\n parts.append(\"\\n\".join(current))\n return parts", "def strip_comments(line):\n if \"#\" in line:\n return line[:line.find(\"#\")]\n else:\n return line", "def fParseHTMLComments(self, match):\n before, commenttext, after = match.groups()\n commenttext = self.shelve(commenttext)\n return '<!--%s-->' % commenttext", "def splitline (self, line):\n\t\treturn line.split('\\t')", "def comment():", "def deleteComments(self: Self, event: Event = None) -> None:\n #@+<< deleteComments docstring >>\n #@+node:ekr.20171123135625.37: *3* << deleteComments docstring >>\n #@@pagewidth 50\n #@-<< deleteComments docstring >>\n c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper\n #\n # \"Before\" snapshot.\n bunch = u.beforeChangeBody(p)\n #\n # Initial data.\n head, lines, tail, oldSel, oldYview = self.getBodyLines()\n if not lines:\n g.warning('no text selected')\n return\n # The default language in effect at p.\n language = c.frame.body.colorizer.scanLanguageDirectives(p)\n if c.hasAmbiguousLanguage(p):\n language = c.getLanguageAtCursor(p, language)\n d1, d2, d3 = g.set_delims_from_language(language)\n #\n # Calculate the result.\n changed, result = False, []\n if d1:\n # Remove the single-line comment delim in front of each line\n d1b = d1 + ' '\n n1, n1b = len(d1), len(d1b)\n for s in lines:\n i = g.skip_ws(s, 0)\n if g.match(s, i, d1b):\n result.append(s[:i] + s[i + n1b :])\n changed = True\n elif g.match(s, i, d1):\n result.append(s[:i] + s[i + n1 :])\n changed = True\n else:\n result.append(s)\n else:\n # Remove the block comment delimiters from each line.\n n2, n3 = len(d2), len(d3)\n for s in lines:\n i = g.skip_ws(s, 0)\n j = s.find(d3, i + n2)\n if g.match(s, i, d2) and j > -1:\n first = i + n2\n if g.match(s, first, ' '):\n first += 1\n last = j\n if g.match(s, last - 1, ' '):\n last -= 1\n result.append(s[:i] + s[first:last] + s[j + n3 :])\n changed = True\n else:\n result.append(s)\n if not changed:\n return\n #\n # Set p.b and w's text first.\n middle = ''.join(result)\n p.b = head + middle + tail # Sets dirty and changed bits.\n w.setAllText(head + middle + tail)\n #\n # Set the selection range and scroll position.\n i = len(head)\n j = ins = max(i, len(head) + len(middle) - 1)\n w.setSelectionRange(i, j, insert=ins)\n w.setYScrollPosition(oldYview)\n #\n # \"after\" snapshot.\n u.afterChangeBody(p, 'Indent Region', bunch)", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n # Happens when there is no documentation comment in the source file for the\n # item.\n spelling = comment.spelling\n if spelling is None:\n return \"\"\n\n # Comments from clang start at the '/*' portion, but if the comment itself\n # is indented subsequent lines will have too much indent.\n # Transform::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n #\n # into::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n indent = \" \" * (comment.extent.start.column - 1)\n indented_comment = indent + spelling\n dedented_comment = textwrap.dedent(indented_comment)\n\n # Notes on the regex here.\n # Option 1 '\\s?\\*/?'\n # This piece will match comment lines that start with '*' or ' *'.\n # This will also match a trailing '*/' for the end of a comment\n #\n # Option 2 '^/\\*+<?'\n # This will match the start of a comment '/*' and consume any\n # subsequent '*'. This is also meant to catch '/**<' for trailing comments.\n #\n # Option 3 '\\*+/'\n # Matches any and all '*' up to the end of the comment string.\n contents = re.sub(\n r\"^\\s?\\*/?|^/\\*+<?|\\*+/\",\n lambda x: len(x.group(0)) * \" \",\n dedented_comment,\n flags=re.MULTILINE,\n )\n\n contents = textwrap.dedent(contents)\n\n # there may still be left over newlines so only strip those, but leave any\n # whitespaces.\n contents = contents.strip(\"\\n\")\n\n return contents", "def removeHtmlComments(self, text):\n sb = []\n start = text.find(u'<!--')\n last = 0\n while start != -1:\n end = text.find(u'-->', start)\n if end == -1:\n break\n end += 3 \n \n spaceStart = max(0, start-1)\n spaceEnd = end\n while text[spaceStart] == u' ' and spaceStart > 0:\n spaceStart -= 1\n while text[spaceEnd] == u' ':\n spaceEnd += 1\n \n if text[spaceStart] == u'\\n' and text[spaceEnd] == u'\\n':\n sb.append(text[last:spaceStart])\n sb.append(u'\\n')\n last = spaceEnd+1\n else:\n sb.append(text[last:spaceStart+1])\n last = spaceEnd\n \n start = text.find(u'<!--', end)\n sb.append(text[last:])\n return u''.join(sb)", "def test_split_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first\n line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.5\", \"1.5\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"split-line\",\n )", "def _PreParse(line: str) -> str:\n line = line.rstrip(\"\\n\")\n\n commentIndex = line.find(\"/\")\n\n # no comment found\n if commentIndex == - 1:\n return line\n\n # truncate\n return line[0:commentIndex]", "def _parse_comments(s):\n i = iter(s.split(\",\"))\n\n rv = []\n try:\n while True:\n # get the flags and text of a comment part\n flags, text = next(i).split(':', 1)\n\n if len(flags) == 0:\n rv.append(('OTHER', text, text, text, \"\"))\n # parse 3-part comment, but ignore those with O flag\n elif 's' in flags and 'O' not in flags:\n ctriple = [\"TRIPLE\"]\n indent = \"\"\n\n if flags[-1] in string.digits:\n indent = \" \" * int(flags[-1])\n ctriple.append(text)\n\n flags, text = next(i).split(':', 1)\n assert flags[0] == 'm'\n ctriple.append(text)\n\n flags, text = next(i).split(':', 1)\n assert flags[0] == 'e'\n ctriple.append(text)\n ctriple.append(indent)\n\n rv.append(ctriple)\n elif 'b' in flags:\n if len(text) == 1:\n rv.insert(0, (\"SINGLE_CHAR\", text, text, text, \"\"))\n except StopIteration:\n return rv", "def split_by_lines(text, remove_empty=False):\n\tlines = text.splitlines()\n\t\n\treturn remove_empty and [line for line in lines if line.strip()] or lines", "def remove_comments(line):\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line", "def lines(filename, exclude_imports=True, exclude_comments=True, exclude_tests=True, exclude_globals=True, exclude_blank=True, verbose=False, is_c=False, s=None):\n if s is None:\n s = open(filename, 'rt').read()\n\n L = s.split('\\n')\n \n # Hack to strip out triple and single quote string lines in a heuristic (unreliable) way, which avoids parsing Cython\n if not is_c:\n for i in range(len(L)):\n if L[i].strip().startswith(\"'\") and L[i].strip().endswith(\"'\"):\n L[i] = ''\n i = 0\n while i < len(L):\n found = False\n for triple_quote in ['\"\"\"', \"'''\"]:\n if L[i].strip().startswith(triple_quote):\n L[i] = L[i].strip()[3:]\n for j in range(i, len(L)):\n if triple_quote in L[j]:\n found = True\n L[j] = ''\n if found:\n break\n i = j+1\n if not found:\n i += 1\n else:\n begin_comment = '/*'\n end_comment = '*/'\n i = 0\n while i < len(L):\n found = False\n if begin_comment in L[i]:\n rest = L[i][L[i].index(begin_comment)+len(begin_comment):]\n L[i] = L[i][:L[i].index(begin_comment)]\n if end_comment in rest:\n found = True\n i += 1\n else:\n for j in range(i+1, len(L)):\n if end_comment in L[j]:\n found = True\n L[j] = L[j][L[j].index(end_comment)+len(end_comment):]\n else:\n L[j] = ''\n if found:\n break\n i = j + 1\n if not found:\n i += 1\n\n# util.print_header('Lines before exclude_tests:' + filename, '\\n'.join(L))\n\n # Hack to strip out def test() and other methods in a heuristic (unreliable) way, which avoids parsing Cython\n if exclude_tests:\n # Also exclude makeColorMatrix so that our camera pipe is apples-to-apples comparable with reported lines in Halide paper\n if not is_c:\n methods = 'test run_test_all mandelbrot_gray mandelbrot_color composite_numpy composite_numexpr makeColorMatrix'.split()\n else:\n methods = ['int main', 'void main']\n i = 0\n while i < len(L):\n L_i_strip = L[i].strip()\n if ((not is_c and (any(L_i_strip.startswith('def ' + method) for method in methods) or\n any(L_i_strip.startswith('cdef ' + method) for method in methods))) or\n (is_c and (any(L_i_strip.startswith(method) for method in methods)))):\n L[i] = ''\n for j in range(i+1, len(L)):\n L_j_strip = L[j].strip()\n c_ok = True\n if is_c:\n c_ok = L_j_strip != '{' and L_j_strip != '}'\n if not L[j].startswith(' ') and not L[j].startswith('\\t') and not len(L[j].strip()) == 0 and c_ok:\n break\n else:\n L[j] = ''\n i = j\n elif (L[i].strip().startswith('test(') or L[i].strip().startswith('run_test_all(')) and not is_c:\n L[i] = ''\n i += 1\n else:\n i += 1\n\n# util.print_header('Lines before exclude_imports:' + filename, '\\n'.join(L))\n if exclude_imports:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('import') and not x.lstrip().startswith('cimport') and not x.startswith('cdef extern')]\n else:\n L = [x for x in L if not x.lstrip().startswith('#include')]\n# util.print_header('Lines before exclude_comments:' + filename, '\\n'.join(L))\n if exclude_comments:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('#') and not x.strip() == 'pass']\n else:\n L = [x for x in L if not x.lstrip().startswith('//')]\n# util.print_header('Lines before exclude_globals:' + filename, '\\n'.join(L))\n if exclude_globals and not is_c:\n L = [x for x in L if (x.startswith(' ') or x.startswith('\\t') or x.startswith('def') or x.startswith('cdef')) and (not x.lstrip().startswith('has_'))]\n# util.print_header('Lines before exclude_blank:' + filename, '\\n'.join(L))\n\n if is_c:\n # Also exclude makeColorMatrix so that C camera pipe is apples-to-apples comparable with reported lines in Halide paper\n L = [x for x in L if not x.lstrip().startswith('matrix_3200') and not x.lstrip().startswith('matrix_7000')]\n if exclude_blank:\n L = [x for x in L if not len(x.strip()) == 0]\n\n if verbose:\n util.print_header('Final lines for:' + filename, '\\n'.join(L))\n\n return len(L)", "def _parse_comment_trail(self): # type: () -> Tuple[str, str, str]\n if self.end():\n return \"\", \"\", \"\"\n\n comment = \"\"\n comment_ws = \"\"\n self.mark()\n\n while True:\n c = self._current\n\n if c == \"\\n\":\n break\n elif c == \"#\":\n comment_ws = self.extract()\n\n self.mark()\n self.inc() # Skip #\n\n # The comment itself\n while not self.end() and not self._current.is_nl() and self.inc():\n pass\n\n comment = self.extract()\n self.mark()\n\n break\n elif c in \" \\t\\r,\":\n self.inc()\n else:\n break\n\n if self.end():\n break\n\n while self._current.is_spaces() and self.inc():\n pass\n\n trail = \"\"\n if self._idx != self._marker or self._current.is_ws():\n trail = self.extract()\n\n return comment_ws, comment, trail", "def parse(input):\n\t\n\toutput = []\n\t\n\t# Comment delimiter of the docstring\n\tcommentDelim = '\"\"\"'\n\t\n\t# Some regexes\n\ttriggerRe = re.compile(\"^(\\s*)(def .+:|class .+:)\")\n\tcommentStartRe = re.compile('^\\s*(%s)' % commentDelim)\n\tcommentEndRe = re.compile('(%s)\\s*$' % commentDelim)\n\temptyRe = re.compile(\"^\\s*$\")\n\thashLineRe = re.compile(\"^\\s*#.*$\")\n\timportLineRe = re.compile(\"^\\s*(import |from .+ import)\")\n\t\n\t# split input into lines\n\tlines = input.split(\"\\n\")\n\t\n\t# flags, buffers, ...\n\tfileHeadFlag = True\n\ttriggerWordFlag = False\n\tcommentFlag = False\n\tcomment = []\n\ttriggerWs = \"\"\n\ttriggerLines = None\n\t\n\t# process each line\n\tfor line in enumerate(lines):\n\n\t\tmatch = re.search(triggerRe, line[1])\n\t\tif match:\n\t\t\tif triggerWordFlag and triggerLines:\n\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\n\t\t\ttriggerWordFlag = True\n\t\t\ttriggerWs = match.group(1)\n\t\t\tfileHeadFlag = False\n\t\t\ttriggerLines = [line[1]]\n\t\t\tcontinue\n\n\t\t# file header or active keyword trigger?\n\t\tif fileHeadFlag or triggerWordFlag:\n\t\t\t# comment end of multiline comment found\n\t\t\tif re.search(commentEndRe, line[1]) and commentFlag:\n\t\t\t\tcomment.append( line[1][ : line[1].rfind(commentDelim) ] )\n\t\t\t\toutput.append(makeCommentBlock(comment, triggerWs, (triggerLines is None)))\n\t\t\t\tif triggerLines:\n\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\tcomment = []\n\t\t\t\tcommentFlag = False\n\t\t\t\ttriggerWs = \"\"\n\t\t\t\ttriggerLines = None\n\t\t\t\ttriggerWordFlag = False\n\t\t\t\t\n\t\t\t# comment start found\n\t\t\telif re.search(commentStartRe, line[1]):\n\t\n\t\t\t\tif re.search(commentEndRe, line[1][line[1].find(commentDelim)+len(commentDelim) :]):\n\t\t\t\t\t# singleline comment\n\t\t\t\t\tcomment.append(line[1][line[1].find(commentDelim)+len(commentDelim) : line[1].rfind(commentDelim)])\n\t\t\t\t\toutput.append(makeCommentBlock(comment, triggerWs))\n\t\t\t\t\t\n\t\t\t\t\tif triggerLines:\n\t\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\t\t\t\n\t\t\t\t\tcomment = []\n\t\t\t\t\tcommentFlag = False\n\t\t\t\t\ttriggerWs = \"\"\n\t\t\t\t\ttriggerLines = None\n\t\t\t\t\ttriggerWordFlag = False\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# multiline comment begin\n\t\t\t\t\tcommentFlag = True\n\t\t\t\t\tcomment.append(\n\t\t\t\t\t\tline[1][line[1].find(commentDelim)+len(commentDelim):]\n\t\t\t\t\t)\n\t\n\t\t\t# active multiline comment -> append comment\n\t\t\telif commentFlag:\n\t\t\t\tcomment.append(line[1])\n\t\t\t\n\t\t\t# still searching for comment\n\t\t\telif re.search(emptyRe, line[1]):\n\t\t\t\tif triggerLines:\n\t\t\t\t\ttriggerLines.append(line[1])\n\t\t\t\telse:\n\t\t\t\t\toutput.append(line[1])\n\t\t\t\n\t\t\t# searching for file header\n\t\t\telif fileHeadFlag:\n\t\t\t\tif not (re.search(hashLineRe, line[1]) or re.search(emptyRe, line[1]) or re.search(importLineRe, line[1])):\n\t\t\t\t\t# fileheader over -> disable search\n\t\t\t\t\tfileHeadFlag = False\n\t\t\t\toutput.append(line[1])\n\t\t\t\n\t\t\t# no comment, disable comment search mode\n\t\t\telse:\n\t\t\t\ttriggerWordFlag = False\n\t\t\t\tif triggerLines:\n\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\ttriggerLines = None\n\t\t\t\toutput.append(line[1])\n\t\t\n\t\t# just append the line\n\t\telse:\n\t\t\toutput.append(line[1])\n\t\n\t# return output\n\treturn \"\\n\".join(output)", "def do_comments(self, line):\n for comment in self.review.comments():\n print(comment)", "def format_lines(unprocessed_text: str) -> List[List[str]]:\n stored_lines: List[List[str]] = []\n new_line: List = []\n new_word: str = \"\"\n for char in unprocessed_text:\n if char != \"\\n\":\n if char != \" \" and char.isalpha():\n new_word += char\n else:\n new_line.append(new_word)\n new_word = \"\"\n else:\n stored_lines.append(new_line)\n new_line = []\n return stored_lines", "def _chunk(self, string):\n #~ a = r'\\**\\s*(?:a\\.?|\\(?a\\))' #SMA option dot now required\n a = r'\\**\\s*(?:a\\.|\\(?a\\))'\n b = r'\\**\\s*(?:b\\.|\\(?b\\))'\n c = r'\\**\\s*(?:c\\.|\\(?c\\))'\n d = r'\\**\\s*(?:d\\.|\\(?d\\))'\n e = r'\\**\\s*(?:e\\.|\\(?e\\))'\n l = r'\\s+.+?\\s+'\n # last option trucated here \\/\n regex = r\"({a}{line}{b}{line}{c}{line}(?:{d}{line})(?:{e}.*?)?)\\n?\".format(\n a=a, b=b, c=c, d=d, e=e, line=l, \n )\n p = re.compile(regex, re.IGNORECASE | re.DOTALL)\n\n self._tokens = p.split(string)", "def make_sentences(comment):\n sentences = [sent for sent in split_single(comment)]\n return sentences", "def __ingest_c_block_comments(self, line, position):\n\n pos = position\n while self._in_block_comment and pos < len(line):\n if pos + 1 < len(line) and line[pos] == '*' and line[pos + 1] == '/':\n self._in_block_comment = False\n pos += 2\n pos += 1\n return pos - position", "def _split_line(line):\n # This method encapsulates the recognition of an unusual Unix format\n # variant (see ticket http://ftputil.sschwarzer.net/trac/ticket/12 ).\n line_parts = line.split()\n FIELD_COUNT_WITHOUT_USERID = 8\n FIELD_COUNT_WITH_USERID = FIELD_COUNT_WITHOUT_USERID + 1\n if len(line_parts) < FIELD_COUNT_WITHOUT_USERID:\n # No known Unix-style format\n raise ftputil.error.ParserError(\"line '{}' can't be parsed\".format(line))\n # If we have a valid format (either with or without user id field), the\n # field with index 5 is either the month abbreviation or a day.\n try:\n int(line_parts[5])\n except ValueError:\n # Month abbreviation, \"invalid literal for int\"\n line_parts = line.split(None, FIELD_COUNT_WITH_USERID - 1)\n else:\n # Day\n line_parts = line.split(None, FIELD_COUNT_WITHOUT_USERID - 1)\n USER_FIELD_INDEX = 2\n line_parts.insert(USER_FIELD_INDEX, None)\n return line_parts", "def _get_multi_line_comment(node):\n return _get_comment_from_node(node)", "def AutoSplitlines(self):\n\t\ttry:\n\t\t\tends_with_cr = self.content.endswith('\\n')\n\t\t\tself.lines = self.content.splitlines()\n\t\t\tyield\n\t\tfinally:\n\t\t\tself.content = '\\n'.join(self.lines)\n\t\t\tif ends_with_cr:\n\t\t\t\tself.content += '\\n'", "def extract_want_line_capabilities(text):\n split_text = text.rstrip().split(b\" \")\n if len(split_text) < 3:\n return text, []\n return (b\" \".join(split_text[:2]), split_text[2:])", "def print_comment_v(text):\n print_comment(text, True)", "def split_tagged_text_into_chunks(text):\n if not sentinel_d.get(\"repatt1\"):\n patt1 = r\"(<t(?:ag)?.*?(?<=/)(?:t(?:ag)?)?>)\"\n sentinel_d.update(\n repatt1=re.compile(patt1, flags=re.IGNORECASE | re.DOTALL)\n )\n return [chunk for chunk in sentinel_d[\"repatt1\"].split(text) if chunk]", "def extract_comment_py():\n debug(\"extract comment from a python script.\")\n for line in CURRENT_BUFFER[:3]:\n if re.search(r\"coding[:=]\\s*([-\\w.]+)\", line):\n pattern = re.compile(r\"coding[:=]\\s*(?P<encoding>[-\\w.]+)\")\n globals()['ENCODING'] = pattern.search(line).group('encoding')\n debug(\"found encoding: %s\" % globals()['ENCODING'])\n\n lines = list(CURRENT_BUFFER)\n for (i, iline) in enumerate(lines[:10]):\n # find \"\"\" or ''' in the first few lines.\n if '\"\"\"' in iline or \"'''\" in iline:\n # find the end of it.\n breaker = '\"\"\"' if '\"\"\"' in iline else \"'''\"\n for j, jline in enumerate(lines[i+1:]):\n if breaker in jline:\n # found it, format the comment a little bit.\n if j == 0:\n # in the same line, this is a one line comment.\n return [jline[jline.index(breaker)+3:jline.rindex(breaker)]]\n else:\n lines[i] = lines[i][lines[i].index(breaker)+3:]\n lines[i+j+1] = lines[i+j+1][:lines[i+j+1].rindex(breaker)]\n return lines[i:i+j+1]\n else:\n # end of the comment is not found.\n return\n else:\n # comment might start with #\n return extract_comment_sh(python_style=True)", "def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()", "def parse_lines(contents):\n # Nested comments not handled.\n in_comment = False\n stripped_contents = \"\"\n for char in contents.lower():\n if char == \"{\":\n in_comment = True\n elif char == \"}\":\n in_comment = False\n elif not in_comment and not char.isspace():\n stripped_contents += char\n return stripped_contents.split(\";\")", "def test_commentline_same_comment_glue(create):\n\n comment = create(CommentItem, Comment)\n line = create(CommentLineItem)\n\n connect(line, line.head, comment)\n glued = allow(line, line.tail, comment)\n assert not glued", "def get_comment(view, pt):\n\n shell_vars = view.meta_info(\"shellVariables\", pt)\n if not shell_vars:\n return ([], [])\n\n # transform the list of dicts into a single dict\n all_vars = {}\n for v in shell_vars:\n if 'name' in v and 'value' in v:\n all_vars[v['name']] = v['value']\n\n line_comments = []\n block_comments = []\n\n # transform the dict into a single array of valid comments\n suffixes = [\"\"] + [\"_\" + str(i) for i in range(1, 10)]\n for suffix in suffixes:\n start = all_vars.setdefault(\"TM_COMMENT_START\" + suffix)\n end = all_vars.setdefault(\"TM_COMMENT_END\" + suffix)\n\n if start and end is None:\n line_comments.append((start,))\n elif start and end:\n block_comments.append((start, end))\n\n return (line_comments, block_comments)", "def block_comment(self):\n while (\n not (self.peek() == \"*\" and self.peek_next() == \"/\")\n and not self.is_at_end()\n ):\n if self.peek() == \"\\n\":\n self.line += 1\n self.advance()\n\n if self.peek() == \"*\" and self.peek_next() == \"/\":\n self.advance(spaces=2)\n\n return None", "def collect_tokens(line):\n tokens = []\n try:\n for tok in tokenize.generate_tokens(StringIO(line).readline):\n token = Token(tok)\n if not token.string.strip(): # ignore spaces\n continue\n if token.type == tokenize.COMMENT:\n break\n tokens.append(token)\n except tokenize.TokenError:\n return []\n except Exception as e:\n print(\"%s raised in utils.collect_tokens\" % repr(e))\n\n return tokens", "def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())", "def split_excerpt(input):\n\n lines = input.split(\"\\n\")\n for i,line in enumerate(lines):\n if line.find(BALISE_TOKEN) != -1:\n break\n\n before, after = lines[:i], lines[i+1:]\n\n return '\\n'.join(before), '\\n'.join(after)", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def parser(filename: str):\r\n f = open(filename, 'r')\r\n lines = []\r\n for line in f:\r\n line = line.split(\"//\", 1)[0] # discard comments\r\n line = line.strip()\r\n if line: # discard pure whitespace lines\r\n lines.append(line)\r\n f.close()\r\n return lines", "def split(text):\n articles = re.split(\"<doc>\", text)\n del articles[0]\n return articles", "def clean(text):\n lines = text.split('\\n')\n\n indx = range(len(lines))\n indx.reverse()\n for i in indx:\n temp = lines[i].strip()\n if temp == '' or temp.startswith('#'):\n del lines[i]\n else:\n lines[i] = temp\n\n return lines", "def test_comments(self):\n\n comment_str = \"# This is a comment\\n# This is another comment\"\n doc = parser.parse(comment_str)\n\n self.assertEqual(len(doc.children()), 2)" ]
[ "0.6692685", "0.66081315", "0.65925145", "0.6545137", "0.6362264", "0.6226867", "0.62236226", "0.6117526", "0.6067819", "0.60001516", "0.59967846", "0.5966611", "0.59597796", "0.5954498", "0.59440887", "0.59440887", "0.5927107", "0.5925445", "0.59013635", "0.5877957", "0.5875666", "0.58529496", "0.58512974", "0.58387405", "0.58302045", "0.58242655", "0.58213", "0.5811949", "0.5803566", "0.5784433", "0.5784127", "0.57700264", "0.5760233", "0.5748944", "0.57476306", "0.5742514", "0.57390386", "0.5735618", "0.5726375", "0.5712556", "0.57021284", "0.5696829", "0.56840914", "0.5681457", "0.5665776", "0.56652516", "0.56636566", "0.5660469", "0.56515926", "0.56338483", "0.56126845", "0.5612406", "0.56031984", "0.56014067", "0.56005394", "0.5600211", "0.55993026", "0.55919045", "0.5589795", "0.55893046", "0.55860907", "0.5574589", "0.5570929", "0.5568863", "0.5567358", "0.556135", "0.55605215", "0.5556726", "0.55497307", "0.55489165", "0.5547294", "0.5543438", "0.5533715", "0.55262864", "0.55195326", "0.5516322", "0.5514418", "0.5511656", "0.54927886", "0.5489426", "0.5488591", "0.5478774", "0.5476819", "0.5466759", "0.54645807", "0.54582787", "0.54582644", "0.5451884", "0.54515636", "0.5442529", "0.54337144", "0.541993", "0.5410355", "0.54070246", "0.5402898", "0.5386753", "0.5378259", "0.53759885", "0.5370184", "0.536717" ]
0.6687648
1
Calculate cosine distance between two vector
def findCosineDistance(vector1, vector2): vec1 = vector1.flatten() vec2 = vector2.flatten() a = np.dot(vec1.T, vec2) b = np.dot(vec1.T, vec1) c = np.dot(vec2.T, vec2) return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_distance(u, v):\n #print u,v\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))", "def cosine_distance(u, v):\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))", "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def cosine(vector_1, vector_2):\n\n def _norm(_v):\n return np.sqrt(sum([x ** 2 for x in _v.values()]))\n\n numerator = dot_product(vector_1, vector_2)\n denominator = _norm(vector_1) * _norm(vector_2)\n if denominator == 0:\n return -1\n return numerator / denominator", "def cosine_dist(d1, d2):\n suma=0\n for x in d1:\n if x in d2:\n suma+=(d1[x]*d2[x])\n sqrt1=0\n sqrt2=0\n for i in d1:\n sqrt1+=math.pow(d1[i],2)\n for i in d2:\n sqrt2+=math.pow(d2[i],2)\n return 1-suma/(math.sqrt(sqrt1)*math.sqrt(sqrt2))", "def cosine_distance(a, b, axis=1):\n a_norm = np.dot(a,a)**.5\n b_norm = np.sum(b**2, axis=axis)**.5\n return np.dot(b,a)/(a_norm*b_norm)", "def get_cosine(vec1, vec2):\n OPS = get_current_ops()\n v1 = OPS.to_numpy(OPS.asarray(vec1))\n v2 = OPS.to_numpy(OPS.asarray(vec2))\n return numpy.dot(v1, v2) / (numpy.linalg.norm(v1) * numpy.linalg.norm(v2))", "def cosine_distance(point1, point2):\n cos_dist = 0\n length_point1 = norm(point1)\n length_point2 = norm(point2)\n cos_dist = 1 - (dot_product(point1, point2)/(length_point1 * length_point2))\n return cos_dist", "def cosine_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x_1` and rows of `x_2`\n # \"ij,ij->i\" := output[i] = sum_j x1[i, j] * x2[i, j]\n cos_thetas = tf.linalg.einsum(\"ij,ij->i\", x1, x2)\n cos_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n cos_distances = tf.maximum(cos_distances, 0.0)\n\n return cos_distances", "def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def cosine_similarity(a, b):\n cs = dot_product(a, b)/(norm(a) * norm(b))\n return cs", "def compute_cosine_sim(vec1, vec2):\r\n\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.dot(vec1, vec2)/(norm(vec1) * norm(vec2))", "def cosine_distance(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return 1 - (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))", "def tf_cosine_distance(self, a, b):\n normalize_a = tf.nn.l2_normalize(a, -1)\n normalize_b = tf.nn.l2_normalize(b, -1)\n cos_similarity = tf.reduce_sum(\n tf.multiply(normalize_a, normalize_b), axis=-1, keep_dims=True\n )\n return (1.0 - cos_similarity) / 2.0", "def cos(\r\n vec1: torch.FloatTensor, vec2: torch.FloatTensor, dim: int = -1\r\n) -> torch.FloatTensor:\r\n return torch.sum(vec1 * vec2, dim=dim) / (\r\n vec1.norm(dim=dim) * vec2.norm(dim=dim) + EPS\r\n )", "def cosine_collection_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x1` and columns of `x2` transpose\n cos_thetas = tf.linalg.matmul(x1, x2, transpose_b=True)\n pairwise_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n pairwise_distances = tf.maximum(pairwise_distances, 0.0)\n\n return pairwise_distances", "def cos_sim(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float]) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def cos_sim(v1, v2):\r\n return np.inner(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def cos_sim(vec1, vec2):\n if len(vec1) != len(vec2):\n print 'dimension does not agree.'\n numerator_sum = 0 \n for i in range(len(vec1)):\n numerator_sum = numerator_sum + vec1[i]*vec2[i]\n \n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n \n return numerator_sum/denom", "def cosine_distance(A, B):\n\n A = A / T.sqrt(T.sum(A ** 2, axis=1)).reshape((-1, 1))\n B = B / T.sqrt(T.sum(B ** 2, axis=1)).reshape((-1, 1))\n D = T.dot(A, T.transpose(B))\n\n return 1 - D", "def __cos_sim(self, v1, v2):\n if np.count_nonzero(v1) == 0 or np.count_nonzero(v2) == 0:\n # whenever at least one of the vectors is all zeros, spatial.distance.cosine will fail by returning nan\n ret = 0\n else:\n ret = 1 - spatial.distance.cosine(v1, v2)\n return ret", "def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity", "def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))", "def cosine_sim(a: np.ndarray, \n b: np.ndarray \n ) -> float:\n return (\n 1 + a.dot(b) / \n (np.linalg.norm(a)*np.linalg.norm(b))\n ) / 2", "def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim", "def vector_cosine_angle(vec_1:tuple, vec_2:tuple)->float:\n if is_zero_vector(vec_1) or is_zero_vector(vec_2):\n return None\n return dot_product(vec_1, vec_2) / (magnitude(vec_1) * magnitude(vec_2))", "def cosine_sim_counters(a, b):\n union_ab = sorted((a | b).keys())\n veca = np.array([a[element] if element in a else 0 for element in union_ab])\n vecb = np.array([b[element] if element in b else 0 for element in union_ab])\n return np.dot(veca, vecb) / (np.linalg.norm(veca) * np.linalg.norm(vecb))", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def cosine_distance(tensor1, tensor2, dtype=tf.float32):\n tensor1 = tf.convert_to_tensor(tensor1, dtype)\n tensor2 = tf.convert_to_tensor(tensor2, dtype)\n\n dot_prod = tf.reduce_sum(tf.multiply(tensor1, tensor2), -1)\n norm1 = tf.norm(tensor1, axis=-1)\n norm2 = tf.norm(tensor2, axis=-1)\n\n norm12 = norm1 * norm2\n cos12 = dot_prod / norm12\n\n sim = tf.where(tf.math.is_nan(cos12), tf.zeros_like(cos12), cos12)\n\n # if we need to correct this to angular distance, acos(1.000001) is nan)\n sim = tf.clip_by_value(sim, -1., 1.)\n return 1 - sim", "def cos_vecs(x, y):\r\n _t = np.sum((x * y), axis=1)\r\n norm_x = np.linalg.norm(x, axis=1, keepdims=True)\r\n norm_y = np.linalg.norm(y, axis=1, keepdims=True)\r\n _t = np.reshape(_t, (-1, 1))\r\n ret = _t / (norm_x * norm_y + 1e-10)\r\n return ret", "def pcosine(u, v):\n\n # validate vectors like scipy does\n u = ssd._validate_vector(u)\n v = ssd._validate_vector(v)\n\n dist = 1. - np.abs(np.dot(u, v) / (linalg.norm(u) * linalg.norm(v)))\n\n return dist", "def CosineSimilarity(test_vec, source_vecs):\n cos_dist = 0\n for source_vec in source_vecs:\n cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec)\n return cos_dist / len(source_vecs)", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def cosine_sim_collections(a, b):\n setab = sorted(set(a) | set(b))\n countera, counterb = Counter(a), Counter(b)\n veca = [countera[element] if element in a else 0 for element in setab]\n vecb = [counterb[element] if element in b else 0 for element in setab]\n return dot(veca, vecb) / (norm(veca) * norm(vecb))", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def compute_cosine_similarity(base_vector, target_vector):\n\n np.seterr(all='print')\n cosine_similarity = 0\n\n try:\n base_vector = np.longdouble(base_vector)\n target_vector = np.longdouble(target_vector)\n vector_dot_products = np.dot(base_vector, target_vector)\n vector_norms = np.linalg.norm(base_vector) * np.linalg.norm(target_vector)\n cosine_similarity = np.divide(vector_dot_products, vector_norms)\n\n if vector_norms == 0.0:\n print 'Error in vec in compute_cosine_similarity'\n print target_vector\n\n except Exception, e:\n print(str(e))\n\n return cosine_similarity", "def _cosine_distance(a, b, data_is_normalized=False):\n if not data_is_normalized:\n a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)\n b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)\n return 1. - np.dot(a, b.T)", "def _cosine_distance(a, b, data_is_normalized=False):\n if not data_is_normalized:\n a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)\n b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)\n return 1. - np.dot(a, b.T)", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def cosine_similarity(u, v):\n\n distance = 0.0\n\n ### START CODE HERE ###\n # Compute the dot product between u and v (โ‰ˆ1 line)\n dot = np.dot(u, v)\n # Compute the L2 norm of u (โ‰ˆ1 line)\n norm_u = np.sqrt(np.dot(u, u))\n\n # Compute the L2 norm of v (โ‰ˆ1 line)\n norm_v = np.sqrt(np.dot(v, v)) ##np.linalg.norm(u)\n # Compute the cosine similarity defined by formula (1) (โ‰ˆ1 line)\n cosine_similarity = dot / (norm_u * norm_v)\n ### END CODE HERE ###\n\n return cosine_similarity", "def cosine_similarity(a, b):\n if a.ndim != 1 or b.ndim != 1:\n raise InvalidShapeException(a,b)\n\n if len(a) != len(b):\n raise InvalidLengthException(a,b)\n \n mag_a = np.linalg.norm(a)\n mag_b = np.linalg.norm(b)\n\n return np.dot(a,b)/(mag_a*mag_b)", "def dist(self, point_a, point_b):\n # TODO(nina): case np.dot(unit_vec, unit_vec) != 1\n if np.all(point_a == point_b):\n return 0.\n\n point_a = vectorization.expand_dims(point_a, to_ndim=2)\n point_b = vectorization.expand_dims(point_b, to_ndim=2)\n\n n_points_a, _ = point_a.shape\n n_points_b, _ = point_b.shape\n\n assert (n_points_a == n_points_b\n or n_points_a == 1\n or n_points_b == 1)\n\n n_dists = np.maximum(n_points_a, n_points_b)\n dist = np.zeros((n_dists, 1))\n\n norm_a = self.embedding_metric.norm(point_a)\n norm_b = self.embedding_metric.norm(point_b)\n inner_prod = self.embedding_metric.inner_product(point_a, point_b)\n\n cos_angle = inner_prod / (norm_a * norm_b)\n mask_cos_greater_1 = np.greater_equal(cos_angle, 1.)\n mask_cos_less_minus_1 = np.less_equal(cos_angle, -1.)\n mask_else = ~mask_cos_greater_1 & ~mask_cos_less_minus_1\n\n dist[mask_cos_greater_1] = 0.\n dist[mask_cos_less_minus_1] = np.pi\n dist[mask_else] = np.arccos(cos_angle[mask_else])\n\n return dist", "def cosine_similarity(u: np.ndarray, v: np.ndarray) -> np.float64:\n assert u.shape[0] == v.shape[0], \"Input vector must have same shape.\"\n uv = 0\n uu = 0\n vv = 0\n for i in range(u.shape[0]):\n uv += u[i] * v[i]\n uu += u[i] * u[i]\n vv += v[i] * v[i]\n cosine_score = 0\n if uu != 0 and vv != 0:\n cosine_score = uv / np.sqrt(uu * vv)\n return np.float64(cosine_score)", "def cosine_similarity(a, b):\n\n numerator = tf.reduce_sum(tf.multiply(a, b), axis=1)\n denominator = tf.multiply(tf.norm(a, axis=1), tf.norm(b, axis=1))\n cos_similarity = numerator/denominator\n return cos_similarity", "def cosine_sim(self, u_name, v_name):\n u_vector = self.get_vector(u_name)\n v_vector = self.get_vector(v_name)\n u_norm = self.vectors_norm_db.get(u_name)\n v_norm = self.vectors_norm_db.get(v_name)\n\n numerator = self.scalar_product(u_vector.items, v_vector.items)\n denominator = float(u_norm) * float(v_norm)\n \n try:\n # round the cosine similarity two digits after the decimal point \n cosine = round(numerator / denominator, 2)\n except ZeroDivisionError:\n logging.error(\"division by zero for %s and %s !\" \\\n % (u_name, v_name))\n cosine = 0\n \n logging.debug(\"%s %s = %s \" \\\n % (u_name, v_name, cosine))\n \n return cosine", "def cosine_value(self,doc_vector,query_vector):\n\t\tvalue=0;i=0;\n\t\tunit_vector_query=self.unit_vector(query_vector);\n\t\tunit_vector_doc=self.unit_vector(doc_vector);\n\t\titerate=0\n\t\tfor word in query_vector:\n\t\t\tif word in doc_vector:\n\t\t\t\tvalue+=query_vector[word]*doc_vector[word]\n\t\tif unit_vector_query != 0:\n\t\t\tvalue = value/(unit_vector_query*unit_vector_doc)\n\t\telse:\n\t\t\tvalue = 0\n\t\treturn value", "def safe_cosine_sim(x, y):\n l2x = fluid.layers.l2_normalize(x, axis=-1)\n l2y = fluid.layers.l2_normalize(y, axis=-1)\n cos = fluid.layers.reduce_sum(l2x * l2y, dim=1, keep_dim=True)\n return cos", "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator", "def distance_func(a, b, w_eucl, w_cos):\n d = w_cos * (1 - torch.nn.functional.cosine_similarity(a.view(1, -1), b)) + \\\n w_eucl * torch.nn.functional.pairwise_distance(a.view(1, -1), b, p=2)\n return d", "def cosine_distances(X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n if X.shape[1] != Y.shape[1]:\n raise ValueError(\"Incompatible dimension for X and Y matrices\")\n\n return 1. - ssd.cdist(X, Y, 'cosine')", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def cos_sim(u, v):\n return np.vdot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))", "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def similarity_vec(self, vec1: numpy.ndarray, vec2: numpy.ndarray, metric='cosine') -> float:\n if numpy.count_nonzero(vec1) == 0 or numpy.count_nonzero(vec2) == 0:\n if metric == 'cosine':\n return 0.\n else:\n return 0.\n\n vec1 = vec1.reshape((1, -1))\n vec2 = vec2.reshape((1, -1))\n if metric == 'cosine':\n return (1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1))[0]\n else:\n return distance.cdist(vec1, vec2, metric=metric).reshape(-1)[0]", "def get_dist(text1, text2, wv):\n t1 = lookup(text1, wv)\n t2 = lookup(text2, wv)\n dist = cos_sim(t1, t2)\n return dist", "def distance(v1, v2):\r\n return magnitude(*subtract(v2, v1))", "def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)", "def cosin_sim_pairs(a, b):\n wordsA = set(a.keys())\n wordsB = set(b.keys())\n inter = wordsA.intersection(wordsB)\n if(len(inter) == 0):\n return 0.0\n aa, bb, ab = 0, 0, 0\n for k in inter:\n aa += a[k] ** 2\n bb += b[k] ** 2\n ab += a[k] * b[k]\n for k in wordsA - inter:\n aa += a[k] ** 2\n for k in wordsB - inter:\n bb += b[k] ** 2\n return ab / float(math.sqrt(aa) * math.sqrt(bb))", "def cosine_distance(user1: User, user2: User) -> float:\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n if len(common_animes) == 0:\r\n return 1\r\n numerator = sum(anime.neighbor_users[user1] * anime.neighbor_users[user2]\r\n for anime in common_animes)\r\n denominator = _square_rooted(user1, common_animes) * _square_rooted(user2, common_animes)\r\n try:\r\n return 1 - (numerator / denominator)\r\n except ZeroDivisionError:\r\n print(user1.username)\r\n for anime in common_animes:\r\n print(anime.neighbor_users[user1])\r\n print(user2.username)\r\n for anime in common_animes:\r\n print(anime.neighbor_users[user2])\r\n return 1", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def cosine_similarity(v1, v2):\n v1_len = 0\n v2_len = 0\n dot_product = 0\n\n for context_id, count in v1.items():\n v1_len += count ** 2\n if context_id in v2:\n dot_product += count*v2[context_id]\n for count in v2.values():\n v2_len += count ** 2\n\n v1_len = math.sqrt(v1_len)\n v2_len = math.sqrt(v2_len)\n return dot_product/(v1_len * v2_len)", "def cosine_distances(X, Y):\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = 1 - (np.dot(X[X_idx,:],Y[Y_idx,:]) / (np.sqrt(np.dot(X[X_idx,:], X[X_idx,:]))* np.sqrt(np.dot(Y[Y_idx,:], Y[Y_idx,:])))) \r\n return D", "def pairwise_cosine_similarity(x, y):\n x = torch.div(x, torch.sqrt(torch.max(torch.sum(x ** 2), 1e-12)))\n y = torch.div(y, torch.sqrt(torch.max(torch.sum(y ** 2), 1e-12)))\n return torch.mm(x, torch.transpose(y, 1, 0))", "def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))", "def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d", "def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5", "def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)", "def dist_cosine(src, dest, qgraml=None):\n\treturn 1 - sim_cosine(src, dest, qgraml)", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def cosine_distance(m, f):\n return (1 - ((m * f).sum() / (norm(m) * norm(f))))", "def cosine(xs: Tensor, ys: Tensor, epsilon: float = 1e-8) -> Tensor:\n mat = xs @ ys.t()\n x_norm = xs.norm(2, dim=1) + epsilon\n y_norm = ys.norm(2, dim=1) + epsilon\n x_diag = (1 / x_norm).diag()\n y_diag = (1 / y_norm).diag()\n return x_diag @ mat @ y_diag", "def cosine(arr1, arr2):\n\n if arr1 is None or arr2 is None:\n return np.NaN\n if not isinstance(arr1, list):\n arr1 = [arr1]\n if any(pd.isnull(arr1)):\n return np.NaN\n if not isinstance(arr2, list):\n arr2 = [arr2]\n if any(pd.isnull(arr2)):\n return np.NaN\n # Create cosine measure object\n measure = sm.Cosine()\n # Call the function to compute the cosine measure.\n return measure.get_raw_score(arr1, arr2)", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:\n return cosine_similarity(word2vec.wv.vectors)", "def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score", "def calculate_cosine_dist(main_text, new_text):\n wordbag = set(\" \".join([main_text, new_text]).split(\" \"))\n dot_prod = 0\n main_text = main_text.split(\" \")\n new_text = new_text.split(\" \")\n\n for word in wordbag:\n if word in main_text and word in new_text:\n # only worth looking at if word is in both. Otherwise dot prod = 0\n count_A = sum(np.array(main_text) == word)\n count_B = sum(np.array(new_text) == word)\n dot_prod += count_A * count_B\n\n return float(dot_prod) / (len(main_text) * len(new_text))", "def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle", "def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])", "def cos(self) -> np.float64:\n\n return (self.node2.x - self.node1.x) / self.get_length()", "def by_distance_vectors(self, string_1, string_2):\n string_1 = self.kywrds.by_frequency(string_1)\n string_2 = self.kywrds.by_frequency(string_2)\n model = self.doc2vec_model[0]\n doc_vec_1 = model.infer_vector(string_1)\n doc_vec_2 = model.infer_vector(string_2)\n return spatial.distance.cosine(doc_vec_1, doc_vec_2)", "def EuclideanDistanceSq( self, a, b ):\n if not (type(a) == list or type(a) == Vector):\n a = [a]\n if not (type(b) == list or type(a) == Vector):\n b = [b]\n assert len(a) == len(b)\n sqDist = 0\n for x,y in zip(a,b):\n sqDist += (x-y)**2\n return sqDist", "def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))", "def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def get_euclid_dist(vec_1, vec_2):\n\n\treturn np.sqrt(np.sum(np.fabs(vec_1 - vec_2), axis=1)).flatten()", "def euclidean_distance(vector_1, vector_2) -> float:\n\n\n before_square_root = 0\n for i in range(len(vector_1)):\n before_square_root += (vector_1[i] - vector_2[i])**2\n\n d = math.sqrt(before_square_root)\n print(d)\n return(d)", "def _arccosine(self, s1, s2, tf_embs):\n tf_pi = tf.constant(np.pi, dtype=tf.float64)\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')\n norms1 = tf.gather(tf_norms, s1)\n norms2 = tf.gather(tf_norms, s2)\n dot = tf.matmul(mat1, tf.transpose(mat2))\n norms = tf.matmul(norms1, tf.transpose(norms2))\n # We clip values due to numerical errors\n # which put some values outside the arccosine range.\n cosine = tf.clip_by_value(dot / norms, -1, 1)\n angle = tf.acos(cosine)\n # The 0 vector has norm 0, which generates a NaN.\n # We catch these NaNs and replace them with pi,\n # which ends up returning 0 similarity.\n angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)\n return 1 - (angle / tf_pi)", "def distance_sq(self, other_vector):\n return sum((x - y) ** 2 for x, y in zip(self.vector, other_vector))", "def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km" ]
[ "0.8144758", "0.806103", "0.7972851", "0.79563916", "0.7951864", "0.77495205", "0.7739488", "0.7737394", "0.7714112", "0.7667365", "0.76573527", "0.7630341", "0.7616587", "0.76105106", "0.76048666", "0.7574436", "0.7551911", "0.7540636", "0.7515504", "0.7509719", "0.7507556", "0.7460115", "0.7421564", "0.74071646", "0.7367841", "0.73649263", "0.7355743", "0.7353771", "0.73448557", "0.72659564", "0.72462475", "0.7225103", "0.7214073", "0.71971864", "0.7189994", "0.71704376", "0.7143493", "0.71243775", "0.7113887", "0.70865095", "0.7063107", "0.70570695", "0.70570695", "0.7055245", "0.7037807", "0.7028861", "0.7020519", "0.6997602", "0.69719195", "0.69653475", "0.6934389", "0.6917237", "0.6904156", "0.69036734", "0.687425", "0.6845646", "0.6840872", "0.67999285", "0.679873", "0.67895085", "0.6783437", "0.6781249", "0.67744017", "0.6773737", "0.6770984", "0.67687833", "0.67390805", "0.67313105", "0.6722819", "0.67228144", "0.67198527", "0.6718481", "0.6714711", "0.67065275", "0.6703688", "0.66916233", "0.6674616", "0.6668081", "0.6663044", "0.6660839", "0.666016", "0.66563255", "0.66500485", "0.6644937", "0.6643245", "0.6635336", "0.66325134", "0.6632062", "0.6607269", "0.65900975", "0.6588401", "0.65842855", "0.6581276", "0.65772134", "0.6572456", "0.6567926", "0.6563725", "0.6558432", "0.6555727", "0.6542679" ]
0.8303037
0
Verify the similarity of one vector to group vectors of one class
def CosineSimilarity(test_vec, source_vecs): cos_dist = 0 for source_vec in source_vecs: cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec) return cos_dist / len(source_vecs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def test_dice_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def exactClassify(sim_vec_dict):\n\n print('Exact classification of %d record pairs' % (len(sim_vec_dict)))\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n # Iterate over all record pairs\n #\n for (rec_id_tuple, sim_vec) in sim_vec_dict.items():\n\n sim_sum = sum(sim_vec) # Sum all attribute similarities\n\n if sim_sum == len(sim_vec): # All similarities were 1.0\n class_match_set.add(rec_id_tuple)\n else:\n class_nonmatch_set.add(rec_id_tuple)\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def calc_euclidean_similarity(vec_1, vec_2):\n sim = 0\n vec_1 = vec_1.reshape((vec_1.shape[1],))\n vec_2 = vec_2.reshape((vec_2.shape[1],))\n vec_1_nnz = np.nonzero(vec_1)[0]\n print vec_1_nnz\n # import ipdb; ipdb.set_trace()\n vec_2_nnz = np.nonzero(vec_2)[0]\n print vec_2_nnz\n intersect = set(vec_1_nnz) & set(vec_2_nnz)\n if len(intersect) > 0:\n error_squares = [pow(vec_1[arg] - vec_2[arg], 2) for arg in intersect]\n sim = 1.0 / (1 + np.sqrt(np.sum(error_squares)))\n return sim", "def test_dice_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity.py_func(vector1, vector1)\n score12 = dice_similarity.py_func(vector1, vector2)\n score22 = dice_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def test_most_similar_with_vector_input(self):\n expected = [\n 'dog.n.01',\n 'canine.n.02',\n 'hunting_dog.n.01',\n 'carnivore.n.01',\n 'placental.n.01',\n ]\n input_vector = self.vectors['dog.n.01']\n predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]\n self.assertEqual(expected, predicted)", "def test_dice_similarity_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def test_sim(vec_x, vec_y, feature_list, func):\n feature_map_x = create_feature_map(vec_x, feature_list)\n feature_map_y = create_feature_map(vec_y, feature_list)\n\n if func == 0:\n return cosine_similarity(feature_map_x, feature_map_y)\n\n return minmax(feature_map_x, feature_map_y)", "def compare_vectors(v1, v2):\n if len(v1) == len(v2):\n distance = 0\n for i in xrange(len(v1)):\n distance += (v1[i] - v2[i]) ** 2\n return distance\n else:\n print \"vector not match in dimensions\"", "def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def test_dice_similarity_all_zeros():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity.py_func(vector1, vector1)\n score12 = dice_similarity.py_func(vector1, vector2)\n score22 = dice_similarity.py_func(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def test_vector_dist(self):\r\n v1 = [1, 4, 2]\r\n v2 = [-1, 12, 4]\r\n\r\n exp = 8.48528137424\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)\r\n\r\n v1 = [1, 2, 100, 4, 2]\r\n v2 = [-1, 12, 4, 12, 99]\r\n\r\n exp = 137.087563258\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)", "def test_cosine_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def test_cosine_similarity_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def test_dice_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def test_dice_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def distance(self, u, v):\n numerator = np.dot(u,v)\n denominator = np.linalg.norm(u) * np.linalg.norm(v)\n similarity = numerator/(denominator +1e-7)\n return similarity", "def test_cosine_similarity_all_zeros():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def similarity(self, e1, e2):\n\t\tpass", "def similarity_euclid(matrix, business1, business2):\n selected_features = matrix.loc[business1].notna() & matrix.loc[business2].notna()\n\n if not selected_features.any():\n return 0\n\n features1 = matrix.loc[business1][selected_features]\n features2 = matrix.loc[business2][selected_features]\n distance = math.sqrt(((features1 - features2) ** 2).sum())\n\n if distance is np.nan:\n return 0\n\n return 1 / (1 + distance)", "def compare_vectors(word_vector1, word_vector2):\n all_words = list(set(word_vector1).union(set(word_vector2)))\n frequency_dict1 = word_frequencies(word_vector1)\n frequency_dict2 = word_frequencies(word_vector2)\n\n frequency_vector1 = [frequency_dict1.get(word, 0) for word in all_words]\n frequency_vector2 = [frequency_dict2.get(word, 0) for word in all_words]\n\n return similarity(frequency_vector1, frequency_vector2)", "def test_cosine_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def centrality(similarity, vector, vectors):\n return 1.0/len(vectors)*sum([similarity(vector,y) for y in vectors\\\n if y != vector])", "def dice_similarity(u: np.ndarray, v: np.ndarray) -> np.float64:\n u_and_v = np.bitwise_and(u != 0, v != 0)\n u_abs_and_v_abs = np.abs(u).sum() + np.abs(v).sum()\n dice_score = 0\n if u_abs_and_v_abs != 0:\n dice_score = 2.0 * np.float64(u_and_v.sum()) / np.float64(u_abs_and_v_abs)\n return dice_score", "def similarity_vec(self, vec1: numpy.ndarray, vec2: numpy.ndarray, metric='cosine') -> float:\n if numpy.count_nonzero(vec1) == 0 or numpy.count_nonzero(vec2) == 0:\n if metric == 'cosine':\n return 0.\n else:\n return 0.\n\n vec1 = vec1.reshape((1, -1))\n vec2 = vec2.reshape((1, -1))\n if metric == 'cosine':\n return (1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1))[0]\n else:\n return distance.cdist(vec1, vec2, metric=metric).reshape(-1)[0]", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def test_cosine_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def test_cossim(self):\n metrics = SimilarityMetrics()\n test1 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test1,0.0)\n\n test2 = metrics.cosine_similarity(np.asarray([1,-1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test2,-1.0)\n\n test3 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([1,1]))\n np.testing.assert_almost_equal(test3,1.0)", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def compute_similarities_from_vec(self, dataset, a):\n self.model.fit(dataset.X, a)\n return self.model.coef_", "def test_splits_similarity(self):\n a_train = torch.as_tensor(\n [\n [1, 1, 2],\n [2, 1, 3],\n [1, 2, 3],\n [4, 1, 5],\n [5, 1, 6],\n ]\n )\n a_test = torch.as_tensor(\n [\n [4, 2, 6],\n ]\n )\n b_train = torch.as_tensor(\n [\n [1, 1, 2],\n [2, 1, 3],\n [1, 2, 3],\n [4, 1, 5],\n [4, 2, 6],\n ]\n )\n b_test = torch.as_tensor(\n [\n [5, 1, 6],\n ]\n )\n\n a_train_tf = CoreTriplesFactory.create(a_train)\n a_test_tf = CoreTriplesFactory.create(a_test)\n b_train_tf = CoreTriplesFactory.create(b_train)\n b_test_tf = CoreTriplesFactory.create(b_test)\n\n steps = splits_steps([a_train_tf, a_test_tf], [b_train_tf, b_test_tf])\n self.assertEqual(2, steps)\n\n similarity = splits_similarity([a_train_tf, a_test_tf], [b_train_tf, b_test_tf])\n self.assertEqual(1 - steps / 6, similarity)", "def matcher(features1, features2):\n #TODO: write a matching function\n #Performing the L2-Norm\n new_features1=[]\n new_features2=[]\n for itr in range(5):\n [rootOfSquare1,rootOfSquare2] = sumOfSquares(features1[itr],features2[itr])\n new_features1.append(np.array(features1[itr])/rootOfSquare1)\n new_features2.append(np.array(features2[itr])/rootOfSquare2)\n indices = []\n for itr in range(len(new_features1)):\n findMinDist=[]\n #findMaxCosineVal=[]\n for itr2 in range(len(new_features2)):\n f1 = new_features1[itr]\n f2 = new_features2[itr2]\n\n #For evaluating the cosine similarity\n # [rootOfSquare1,rootOfSquare2] = sumOfSquares(f1,f2)\n # numerator = np.array(f1)*np.array(f2)\n # numeratorSum = sum(numerator)\n # denominator = rootOfSquare1*rootOfSquare2\n # cosine = np.divide(numeratorSum,denominator)\n # findMaxCosineVal.append(cosine)\n\n #For evaluating the similarity based on euclidean distance\n Dist = np.array(f1) - np.array(f2)\n sum=0\n for i in Dist:\n sum=sum+math.pow(i,2)\n rootOfSum = math.sqrt(sum)\n findMinDist.append(rootOfSum)\n # print \"itr: \", itr, \" Matching scores: \", findMaxCosineVal\n # bestMatch = findMaxCosineVal.index(max(findMaxCosineVal))\n bestMatch = findMinDist.index(min(findMinDist))\n indices.append([itr,bestMatch])\n return indices", "def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))", "def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)", "def similarize(test, target):\n test = np.array(test)\n assert len(test) == 3,'vector must be dim 3'\n angle = angle_between_vectors(test,target)\n if angle > np.pi/2:\n test = -test\n return test", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def test_poincare_distance(self):\n vector_1 = self.vectors['dog.n.01']\n vector_2 = self.vectors['mammal.n.01']\n\n distance = self.vectors.vector_distance(vector_1, vector_2)\n self.assertTrue(np.allclose(distance, 4.5278745))\n\n distance = self.vectors.vector_distance(vector_1, vector_1)\n self.assertTrue(np.allclose(distance, 0))", "def evaluate_similarity(kv: KeyedVectors, X, y):\n mean_vector = np.mean(kv.vectors, axis=0, keepdims=True)\n missing_words = np.sum(np.isin(X, kv.index2word, invert=True))\n if missing_words > 0:\n logging.warning(\"Missing {} words. Will replace them with mean vector\".format(missing_words))\n get = np.vectorize(gensim_helper.get_vector, signature='(),(),(m)->(m)')\n timer = mytimer.Timer(\"getting vectors for words\")\n wv_x = get(X, kv, mean_vector)\n timer.stop()\n a = wv_x[:, 0]\n b = wv_x[:, 1]\n # timer = mytimer.Timer()\n # a = np_helper.normalize_over_cols_2d(a)\n # b = np_helper.normalize_over_cols_2d(b)\n # scores = np.diag(np.matmul(a, b.T))\n # timer.stop()\n # print(scores.shape)\n #\n # A = np.vstack(kv.get(word, mean_vector) for word in X[:, 0])\n # B = np.vstack(kv.get(word, mean_vector) for word in X[:, 1])\n timer = mytimer.Timer()\n scores = np.array([v1.dot(v2.T) / (np.linalg.norm(v1) * np.linalg.norm(v2)) for v1, v2 in zip(a, b)])\n timer.stop()\n # print(scores.shape)\n return scipy.stats.spearmanr(scores, y)", "def test_distances_with_vector_input(self):\n input_vector = self.vectors['dog.n.01']\n distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances(input_vector)\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)", "def _compare_vector(arr1, arr2):\n\n length = len(arr1)\n if len(arr2) != length:\n return False\n\n for i in range(length):\n element_1 = float(arr1[i])\n element_2 = float(arr2[i])\n\n\n diff = abs(abs(element_1) - abs(element_2))\n if diff != 0.0:\n rel = diff / min(abs(element_1), abs(element_2))\n \n # For a basis set, a relatively coarse comparison\n # should be acceptible\n if rel > 1.0e-10:\n return False\n\n return True", "def test_most_similar(self):\n expected = [\n 'canine.n.02',\n 'hunting_dog.n.01',\n 'carnivore.n.01',\n 'placental.n.01',\n 'mammal.n.01'\n ]\n predicted = [result[0] for result in self.vectors.most_similar('dog.n.01', topn=5)]\n self.assertEqual(expected, predicted)", "def compare_stability_matrices(ism1, ism2): \n \n import scipy as sp\n import sklearn as sk\n\n ism1=sk.preprocessing.normalize(ism1,norm='l2')\n ism2=sk.preprocessing.normalize(ism2,norm='l2')\n distance=sp.spatial.distance.correlation(ism1.ravel(), ism2.ravel())\n similarity= 1-distance\n return similarity", "def test_multiclass_compare(self):\n dataset = make_fixture(binary=False, split=True)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y.train, dataset.y.test) is oz\n assert oz._mode == COMPARE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def similarity(centroid_a, centroid_b):\n \n vector_a = centroid_a.centroid_vector\n vector_b = centroid_b.centroid_vector\n \n length_a = centroid_a.length\n length_b = centroid_b.length\n \n dotproduct = 0.0\n\n for key, value in vector_a.iteritems():\n if key in vector_b: # if both vectors have the key\n dotproduct += (value * vector_b[key])\n\n return float(dotproduct / (length_a * length_b))", "def test_sad_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n sad = sad_similarity_measure(patch1, patch2)\n\n assert np.isclose(sad, 3.6, atol=1e-2)", "def vec_equal_multiset(vec_1, vec_2):\n if vec_1.shape != vec_2.shape:\n return False\n return (np.sort(vec_1) == np.sort(vec_2)).all()", "def overlap_similarity(vect1, vect2, normalize=False, use_bigrams=False):\n overlap = len(set(vect1).intersection(set(vect2)))\n\n if use_bigrams:\n overlap += len(set(bigrams(vect1)).intersection(set(bigrams(vect2))))\n\n if not normalize:\n return overlap\n \n if overlap == 0:\n return 0\n \n return overlap / (math.log10(len(vect1)) + math.log10(len(vect2)))", "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def main():\n\n measures = Similarity()\n\n input1=sys.argv[1]\n vect1=np.loadtxt(fname = input1)\n \n input2=sys.argv[2]\n vect2=np.loadtxt(fname = input2)\n\n print measures.cosine_similarity(normBySum(vect1), normBySum(vect2))\n \n\n\n #print measures.cosine_similarity2(vect1, vect2)\n\n #print measures.jaccard_similarity([0,1,2,5,6],[0,2,3,5,7,9])", "def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))", "def calculate_similarity(self, tweets):\r\n if (len(tweets) == 1):\r\n return 0\r\n vectors = self.vectorizer.vectorize_data(tweets, False)\r\n\r\n temp = cosine_similarity(vectors[0:-1], vectors)\r\n temp = [item for sublist in temp for item in sublist]\r\n sim = sum(temp) / len(temp)\r\n return sim", "def test_most_similar_to_given(self):\n predicted = self.vectors.most_similar_to_given('dog.n.01', ['carnivore.n.01', 'placental.n.01', 'mammal.n.01'])\n self.assertEqual(predicted, 'carnivore.n.01')", "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim", "def cosine_similarity(u, v):\n\n distance = 0.0\n\n ### START CODE HERE ###\n # Compute the dot product between u and v (โ‰ˆ1 line)\n dot = np.dot(u, v)\n # Compute the L2 norm of u (โ‰ˆ1 line)\n norm_u = np.sqrt(np.dot(u, u))\n\n # Compute the L2 norm of v (โ‰ˆ1 line)\n norm_v = np.sqrt(np.dot(v, v)) ##np.linalg.norm(u)\n # Compute the cosine similarity defined by formula (1) (โ‰ˆ1 line)\n cosine_similarity = dot / (norm_u * norm_v)\n ### END CODE HERE ###\n\n return cosine_similarity", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def test_similarity_list():\n list1 = [\"a\", \"b\", \"c\"]\n list2 = [\"b\", \"c\", \"d\", \"e\"]\n similarity = pm.compute_similarity_for_list(list1, list2)\n nose.tools.ok_(abs(similarity - 2/3) < tests.FLOAT_DELTA, \"Wrong list similarity\")\n similarity = pm.compute_similarity_for_list(list2, list1) # intentionally asymmetric\n nose.tools.ok_(abs(similarity - 1/2) < tests.FLOAT_DELTA, \"Wrong list similarity\")", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def try1():\n path = '/Users/mayankkejriwal/datasets/eswc2017/disasters/'\n model = Word2Vec.load_word2vec_format(path+'GoogleNews-vectors-negative300.bin', binary=True)\n model.init_sims(replace=True)\n keys = ['charlotte', 'Charlotte', 'yorktown', 'LA']\n for key in keys:\n try:\n # print model.most_similar(positive=['woman', 'king'], negative=['man'])\n j = model[key]\n print 'found...',\n print key\n except KeyError:\n print 'not found...',\n print key\n continue\n print model.similarity('charlotte', 'carolina')\n print model.similarity('LA', 'California')", "def cosine_sim_collections(a, b):\n setab = sorted(set(a) | set(b))\n countera, counterb = Counter(a), Counter(b)\n veca = [countera[element] if element in a else 0 for element in setab]\n vecb = [counterb[element] if element in b else 0 for element in setab]\n return dot(veca, vecb) / (norm(veca) * norm(vecb))", "def find_similars(self, test_set):\n\n tfidf = TfidfVectorizer(lowercase=False, sublinear_tf=True)\n tfidf_matrix = tfidf.fit_transform(self.train_str)\n\n # Calling only transform on test so that idf calculated on train data\n test_str = [' '.join(q.title) for q in test_set]\n test_tfidf = tfidf.transform(test_str)\n\n simis = self.calculate_similarity(tfidf_matrix, test_tfidf)\n return simis", "def test_poincare_distances_batch(self):\n vector_1 = self.vectors['dog.n.01']\n vectors_2 = self.vectors[['mammal.n.01', 'dog.n.01']]\n distances = self.vectors.vector_distance_batch(vector_1, vectors_2)\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))", "def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)", "def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)", "def test_qsvm_multiclass_all_pairs(self):\n training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],\n [0.8690704, 0.70847635]]),\n 'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],\n [0.49156185, -0.3660534]]),\n 'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],\n [-0.82139073, 0.29941512]])}\n\n test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],\n [0.48142649, 0.15931707]]),\n 'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],\n [0.06183066, -0.53376975]]),\n 'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],\n [-0.66489165, 0.1181712]])}\n\n total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))\n\n aqua_globals.random_seed = self.random_seed\n feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),\n depth=2,\n entangler_map=[[0, 1]])\n try:\n svm = QSVM(feature_map, training_input, test_input, total_array,\n multiclass_extension=AllPairs())\n\n quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),\n shots=self.shots,\n seed_simulator=aqua_globals.random_seed,\n seed_transpiler=aqua_globals.random_seed)\n result = svm.run(quantum_instance)\n self.assertAlmostEqual(result['testing_accuracy'], 0.444444444, places=4)\n self.assertEqual(result['predicted_classes'], ['A', 'A', 'C', 'A',\n 'A', 'A', 'A', 'C', 'C'])\n except NameError as ex:\n self.skipTest(str(ex))", "def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))", "def fashion_similarity(input_txt, features, keys):\n feature_index = keys.index(input_txt)\n input_vector = features[feature_index]\n\n scores = [similarity_function(input_vector, partner) for partner in features]\n return scores", "def test_jaccard_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = jaccard_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[1/3, 1/4],\n [1/3, 2/3]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def test_jaccard_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = jaccard_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[1/3, 1/4],\n [1/3, 2/3]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity", "def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)", "def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity", "def test_qsvm_multiclass_one_against_all(self):\n training_input = {'A': np.asarray([[0.6560706, 0.17605998], [0.25776033, 0.47628296],\n [0.8690704, 0.70847635]]),\n 'B': np.asarray([[0.38857596, -0.33775802], [0.49946978, -0.48727951],\n [0.49156185, -0.3660534]]),\n 'C': np.asarray([[-0.68088231, 0.46824423], [-0.56167659, 0.65270294],\n [-0.82139073, 0.29941512]])}\n\n test_input = {'A': np.asarray([[0.57483139, 0.47120732], [0.48372348, 0.25438544],\n [0.48142649, 0.15931707]]),\n 'B': np.asarray([[-0.06048935, -0.48345293], [-0.01065613, -0.33910828],\n [0.06183066, -0.53376975]]),\n 'C': np.asarray([[-0.74561108, 0.27047295], [-0.69942965, 0.11885162],\n [-0.66489165, 0.1181712]])}\n\n total_array = np.concatenate((test_input['A'], test_input['B'], test_input['C']))\n\n aqua_globals.random_seed = self.random_seed\n feature_map = SecondOrderExpansion(feature_dimension=get_feature_dimension(training_input),\n depth=2,\n entangler_map=[[0, 1]])\n try:\n svm = QSVM(feature_map, training_input, test_input, total_array,\n multiclass_extension=OneAgainstRest())\n quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'),\n shots=self.shots,\n seed_simulator=aqua_globals.random_seed,\n seed_transpiler=aqua_globals.random_seed)\n result = svm.run(quantum_instance)\n expected_accuracy = 0.444444444\n expected_classes = ['A', 'A', 'C', 'A', 'A', 'A', 'A', 'C', 'C']\n self.assertAlmostEqual(result['testing_accuracy'], expected_accuracy, places=4)\n self.assertEqual(result['predicted_classes'], expected_classes)\n except NameError as ex:\n self.skipTest(str(ex))", "def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])", "def testEquals(self):\n v1 = Vector(3, 4, 5)\n assert (v1 == [3, 4, 5])\n assert (v1 != [0, 2, 4])\n v2 = Vector(3.0, 4.0, 5.000)\n assert (v1 == v2)\n v3 = Vector(2, 2)\n assert v1 != v3", "def test_similarity_measure_size_compatibility():\n\n patch1 = torch.randn(size=(4, 6, 2))\n patch2 = torch.randn(size=(4, 6, 2))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(4, 3))\n patch2 = torch.randn(size=(4, 3))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(5,))\n patch2 = torch.randn(size=(5,))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(3, 7, 2, 4))\n patch2 = torch.randn(size=(3, 7, 2, 4))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successful", "def _attentive_matching(self, h1, h2, cosine_matrix, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # attentive vector (batch_size, h1_timesteps, embedding_szie)\n attentive_vec = self._mean_attentive_vectors(h2, cosine_matrix)\n # attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n attentive_vec = self._time_distributed_multiply(attentive_vec, w)\n # matching vector, (batch_size, h1_timesteps, mp_dim)\n matching = self._cosine_similarity(h1, attentive_vec)\n return matching", "def calculate_similarity(self, cluster0, cluster1):\n\n def compare_spectrum(spectrum0, spectrum1): \n \"\"\"\n Compare a pair of spectra to decide the\n order. \n :param: pair of spectra\n :return: 0 equal, -1 spectrum0 is less,\n 1, spectrum0 is bigger.\n \"\"\"\n title0 = spectrum0.get_title() \n title1 = spectrum1.get_title() \n if(title0 < title1): \n return -1\n elif(title0 > title1): \n return 1\n else:\n return 0\n # end of compare_spectrum function\n\n spectra0 = self.sorted_spectra_dict[cluster0.id]\n spectra1 = self.sorted_spectra_dict[cluster1.id]\n\n (n,i,j) = (0,0,0)\n while(i<len(spectra0) and j<len(spectra1)):\n comp_score = compare_spectrum(spectra0[i], spectra1[j])\n if(comp_score < 0):\n i += 1\n elif(comp_score > 0):\n j += 1\n else: \n n += 1\n i += 1\n j += 1\n similarity_score = 0.5 * (n/len(spectra0) + n/len(spectra1))\n return (n,similarity_score)", "def embedding_similarity(model, validation_pairs):\n scores = dict()\n for pair in validation_pairs:\n author1 = pair[0]\n author2 = pair[1]\n scores[author1 + ' ' +\n author2] = cosine_similarity(model.wv[author1], model.wv[author2])\n return scores", "def compute_all_similarities(self,A,a):\n pass", "def weightedSimilarityClassify(sim_vec_dict, weight_vec, sim_thres):\n\n assert sim_thres >= 0.0 and sim_thres <= 1.0, sim_thres\n\n # Check weights are available for all attributes\n #\n first_sim_vec = list(sim_vec_dict.values())[0]\n assert len(weight_vec) == len(first_sim_vec), len(weight_vec)\n\n print('Weighted similarity based classification of %d record pairs' % \\\n (len(sim_vec_dict)))\n print(' Weight vector: %s' % (str(weight_vec)))\n print(' Classification similarity threshold: %.3f' % (sim_thres))\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n weight_sum = sum(weight_vec) # Sum of all attribute weights\n\n # Iterate over all record pairs\n #\n for (rec_id_tuple, sim_vec) in sim_vec_dict.items():\n # ******* Implement weighted similarity classification ********************\n\n pass # Add your code here\n\n # ************ End of your code *******************************************\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def cosine_similarity(u: np.ndarray, v: np.ndarray) -> np.float64:\n assert u.shape[0] == v.shape[0], \"Input vector must have same shape.\"\n uv = 0\n uu = 0\n vv = 0\n for i in range(u.shape[0]):\n uv += u[i] * v[i]\n uu += u[i] * u[i]\n vv += v[i] * v[i]\n cosine_score = 0\n if uu != 0 and vv != 0:\n cosine_score = uv / np.sqrt(uu * vv)\n return np.float64(cosine_score)", "def overlap_similarity(box, other_boxes):\n return jaccard(np.expand_dims(box, axis=0), other_boxes).squeeze(0)", "def struct_sim(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n ssim = np.zeros(n)\n for ii in range(n):\n ssim[ii] = structural_similarity(image1[ii], image2[ii], **kwargs)\n return ssim", "def wordSimilarityRatio(sent_1,sent_2):", "def test_distances(self):\n distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances('dog.n.01')\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def get_subscore_mixvec(vec1, vec2, matrixi=matrix, gap_s=gap_s, gap_e=gap_e):\n score = 0\n for i in range(len(vec1)):\n n1s=np.nonzero(vec1[i][:-2])[0]\n n2s=np.nonzero(vec2[i][:-2])[0]\n for n1 in n1s:\n k1=vec1[i][n1]\n for n2 in n2s:\n k2=vec2[i][n2]\n pair=(aalist[n1],aalist[n2])\n score += _blosum_match(pair, matrix)*k1*k2\n score += gap_s*max(vec1[i][-2],vec2[i][-2])\n score += gap_e*max(vec1[i][-1],vec2[i][-1])\n return score", "def find_similar_vectors(data, ids, k, k_large,\n n_clusters,\n metric, score_threshold):\n n, d = data.shape\n k = n if k > n else k\n k_large = n if k_large > n else k_large\n n_clusters = n if n < n_clusters else n_clusters\n quantizer = faiss.IndexFlat(d, metric)\n index = faiss.IndexIVFFlat(quantizer, d, n_clusters)\n index.train(data)\n index.add(data)\n\n # Make an expansive search to determine the base level of\n # similarity in this space as the mean similarity of documents\n # in the close vicinity\n index.nprobe = 100\n D, I = index.search(data, k_large)\n base_similarity = D.mean(axis=1) # Calculate the mean distance\n\n # Now subset only the top k results\n D = D[:,:k] # Distances\n I = I[:,:k] # Indexes of the k results\n\n # Extract similar vectors\n similar_vectors = {}\n for _id, all_ids, sims, base in zip(ids, ids[I], D, base_similarity):\n _id = str(_id) # FAISS returns ids as strings\n scores = (base - sims) / base\n over_threshold = scores > score_threshold\n # If no similar results, noting that the query vector is always\n # found so there will always be one result\n if over_threshold.sum() <= 1:\n continue\n results = {i: float(s) for i, s in zip(all_ids, scores)\n if s > score_threshold # Ignore low scores\n and _id != i # Ignore the query vector itself\n and i not in similar_vectors} # Don't duplicate results\n # Possible that there are no similar vectors,\n # depending on the score_threshold\n if len(results) == 0:\n continue\n similar_vectors[_id] = results\n return similar_vectors", "def __eq__(self, candidate):\n return np.linalg.norm(self.components()\n -\n candidate.components()) < 1.e-7", "def plot_similarity(self) -> None:\n if isinstance(self.model, FastTextWrapper):\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x), sentence_level=True))\n else:\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x))[0])\n messages = list(self.valid_data[\"label\"])\n vectors = list(self.valid_data[\"vector\"])\n similarity_matrix(messages=messages, vectors=vectors, name=self.folder, save_path=self.base_path)", "def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))", "def test_u_distance_correlation_vector(self):\n return self._test_u_distance_correlation_vector_generic(\n vector_type=float\n )", "def vector_equal(v1,v2):\n if (v2.x - 0.001 <= v1.x <= v2.x + 0.001) and \\\n (v2.y - 0.001 <= v1.y <= v2.y + 0.001) and \\\n (v2.z - 0.001 <= v1.z <= v2.z + 0.001):\n return True" ]
[ "0.750954", "0.66709244", "0.65119016", "0.6507448", "0.65028244", "0.6483979", "0.6449786", "0.64489347", "0.6439888", "0.64329153", "0.6320735", "0.6308931", "0.63024855", "0.62732375", "0.6246762", "0.6246572", "0.6237279", "0.6201954", "0.61760354", "0.6173955", "0.61729133", "0.61533636", "0.6135139", "0.61227435", "0.611243", "0.6078673", "0.60696894", "0.60645086", "0.6062002", "0.60385287", "0.60381615", "0.60366553", "0.60303104", "0.6028681", "0.6005616", "0.59987366", "0.59954315", "0.5990278", "0.598052", "0.5979897", "0.59676147", "0.59664494", "0.59505117", "0.5949019", "0.5933383", "0.5932526", "0.59304917", "0.5925627", "0.58912736", "0.58722353", "0.5863176", "0.5844122", "0.5843843", "0.5836479", "0.5835242", "0.58122593", "0.5798864", "0.57854235", "0.57838726", "0.57813", "0.5772642", "0.57565224", "0.5748781", "0.5744634", "0.5744634", "0.57414293", "0.5738743", "0.57256633", "0.5717469", "0.5715109", "0.57123256", "0.57086056", "0.5704999", "0.570484", "0.56992227", "0.56972104", "0.56887513", "0.5685194", "0.5674773", "0.5665544", "0.56622905", "0.5661554", "0.56566083", "0.5647436", "0.5646709", "0.56453246", "0.5644074", "0.56430185", "0.5642685", "0.56418824", "0.5634779", "0.5614067", "0.5601282", "0.56011283", "0.5597001", "0.5588865", "0.5587945", "0.5585853", "0.5582776", "0.55816114" ]
0.55804986
100
Method to compile the BigQuery specific script execution command.
def generate_provider_specific_cmd_list(script, driver, output, error): cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id, script, output, error] return cmd_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_script_command(command_name,\n parent_shell,\n bootstrap_script,\n container_path,\n scripts_path,\n script):\n script_fragment = \"\\\"{}\\\"\".format(script) if script else \"\"\n parent_shell.define_command(command_name,\n \"python \\\"{bootstrap}\\\" \"\n \"-d \\\"{container}\\\" \"\n \"-r \\\"{scripts}\\\" \"\n \"-s {script}\"\n \"\".format(bootstrap=bootstrap_script,\n container=container_path,\n scripts=scripts_path,\n script=script_fragment))", "def writeScript( tspec, xdb, plat, \\\n vvtestdir, projdir, srcdir, \\\n onopts, offopts,\n scriptname ):\n \n scriptbasename = os.path.basename( scriptname )\n \n idstr = tspec.getName()\n for (n,v) in tspec.getParameters().items():\n idstr = idstr + ' ' + n + '=' + v\n \n line_list = []\n \n # start the csh script\n \n line_list.extend([ \\\n '#!/bin/csh -f', '',\n '# clear variables and aliases to prevent the users',\n '# environment from interferring with the test',\n 'unalias rm',\n 'unalias ls',\n 'unalias cp',\n 'unalias ln',\n 'unalias mv',\n 'unalias set',\n 'unalias setenv',\n 'unalias echo',\n 'unset newbaseline',\n 'unsetenv newbaseline', '',\n 'set analyze = 0',\n 'echo \"++++++++++++++++ begin ' + idstr + '\"', '',\n 'echo \" \"',\n 'set have_diff = no', '' ])\n \n # the common database might have variables to clear from the environment\n \n s = xdb.getClear()\n if s != None:\n line_list.append( s )\n \n # option parsing is for things that can only be known during invocation\n \n line_list.extend( [ \\\n '',\n '# parse command line options',\n '@ i = 1',\n 'while ($i <= $#argv)',\n ' switch (\"$argv[$i]\")',\n ' case --baseline:',\n ' set newbaseline = 1',\n ' breaksw',\n ' case --mpirun_opts:',\n ' @ i += 1',\n ' setenv MPI_OPT \"$argv[$i]\"',\n ' echo \"MPI_OPT=$MPI_OPT\"',\n ' breaksw',\n ' case --execute_analysis_sections:',\n ' set analyze = 1',\n ' breaksw',\n ' endsw',\n ' @ i += 1',\n 'end', '',\n '' ] )\n \n # set variables guaranteed to be defined for each test\n \n line_list.extend( [ \\\n '',\n '# variables defined for all tests',\n 'set NAME = \"' + tspec.getName() + '\"',\n 'set PLATFORM = ' + plat.getName(),\n 'echo \"PLATFORM = $PLATFORM\"',\n 'set COMPILER = ' + plat.getCompiler(),\n 'echo \"COMPILER = $COMPILER\"',\n 'set TOOLSET_DIR = ' + vvtestdir,\n 'echo \"TOOLSET_DIR = $TOOLSET_DIR\"' ] )\n if projdir:\n line_list.append( 'set PROJECT = ' + projdir )\n else:\n line_list.append( 'set PROJECT =' )\n line_list.extend( [ \\\n 'echo \"PROJECT = $PROJECT\"',\n 'set ON = \"' + '+'.join(onopts) + '\"',\n 'echo ON = \"$ON\"',\n 'set OFF = \"' + '+'.join(offopts) + '\"',\n 'echo OFF = \"$OFF\"',\n 'set np = ' + str(tspec.getParameters().get('np',0)),\n 'set SRCDIR = \"' + srcdir + '\"',\n 'set XMLDIR = \"' + srcdir + '\"',\n 'echo \"XMLDIR = $XMLDIR\"' ] )\n \n # set variables defined by the platform\n \n line_list.extend( [ '', '# variables defined by the platform' ] )\n for (k,v) in plat.getEnvironment().items():\n line_list.extend( [ \\\n 'setenv ' + k + ' \"' + v + '\"',\n 'echo \"' + k + ' = $' + k + '\"' ] )\n \n # set defines and variables contained in the common database\n \n line_list.extend( [ '',\n '######## common database definitions ########', '' ] )\n \n for cs in xdb.getDefines():\n dfn = cs.getDefine( plat.getName() )\n assert dfn != None\n line_list.append( dfn )\n \n for cs in xdb.getVariables():\n \n varL = cs.getVariable( plat.getName() )\n assert varL != None\n assert len(varL) == 2\n \n vname = varL[0]\n if len(varL[1]) == 2:\n # a path list is to be used to define the variable\n assert vname != None\n paths = varL[1][0]\n flags = varL[1][1]\n line_list.extend( [ \\\n 'foreach p (' + ' '.join(paths) + ')',\n ' if ( -e $p ) then',\n ' set ' + vname + ' = \"$p ' + flags + '\"',\n ' break',\n ' endif',\n 'end' ] )\n else:\n # a script fragment is to be used to define the variable\n assert len(varL[1]) == 1\n line_list.append( varL[1][0] )\n \n if vname != None:\n line_list.extend( [ \\\n 'if ( $?' + vname + ' ) then',\n ' echo \"' + vname + ' = $' + vname + '\"',\n 'else',\n ' echo \"' + vname + ' is not defined\"',\n 'endif', '' ] )\n \n # set the problem parameter variables\n \n line_list.extend( [ '', '# parameters defined by the test' ] )\n for (k,v) in tspec.getParameters().items():\n line_list.extend( [ \\\n 'set ' + k + ' = ' + v,\n 'echo \"' + k + ' = $' + k + '\"' ] )\n \n # put the baseline fragment in before the file removal occurs\n \n line_list.extend( [ '',\n '# copy baseline files if this is a rebaselining execution' ] )\n line_list.extend( [ \\\n 'if ($?newbaseline) then',\n ' set echo', '' ] )\n\n frag = tspec.getBaselineScript()\n if frag:\n line_list.append( frag )\n\n line_list.extend( [ \\\n ' exit 0',\n 'endif', '' ] )\n \n # finally, add the main execution fragments\n\n if not tspec.isAnalyze():\n \n line_list.extend( [ '',\n '######## main execution fragments ########', '' ] )\n \n for name,content,exitstat,analyze in tspec.getExecutionList():\n \n if name == None:\n if not analyze:\n line_list.extend( [ '', 'if ( $analyze == 0 ) then' ] )\n \n # a raw csh script fragment\n line_list.append( content )\n \n if not analyze:\n line_list.extend( [ 'endif', '' ] )\n \n else:\n # a named executable block\n cs = xdb.findContent( name )\n if cs != None:\n \n # get the common script fragment from the common specification\n frag = cs.getContent( plat.getName() )\n \n if frag != None and frag:\n # substitute the content from the test into $(CONTENT) patterns\n frag = content_replace_re.sub( content, frag )\n \n # the invocation of the script fragment may expect and handle\n # non-zero exit statuses\n x = \"0\"\n if exitstat != None:\n if exitstat == \"fail\": x = \"1\"\n elif exitstat == \"anyexit\" or exitstat == \"any\": x = \"-1\"\n frag = expect_status_replace_re.sub( x, frag )\n \n if not cs.isAnalyze():\n line_list.extend( [ '', 'if ( $analyze == 0 ) then' ] )\n \n line_list.append( frag )\n \n if not cs.isAnalyze():\n line_list.extend( [ 'endif', '' ] )\n \n else:\n # could not find the name in the database; make sure the test fails\n line_list.extend( [ \\\n '''echo \"*** error: the test specification file refers to the \"'\"'\"''' + \\\n name +'\"'+ \"\"\"'\"'\"\"\",\n 'echo \" execute fragment, but the fragment database did\"',\n 'echo \" not contain that fragment name\"',\n 'exit 1', '' ] )\n \n else:\n line_list.append('################ begin analyze script')\n line_list.append('')\n\n paramset = tspec.getParameterSet()\n if paramset != None:\n psetD = paramset.getParameters()\n if len(psetD) > 0:\n # provide the parameter names and values that formed the\n # children tests\n for n,L in psetD.items():\n n2 = '_'.join( n )\n L2 = [ '/'.join( v ) for v in L ]\n line_list.append( 'set PARAM_'+n2+' = ( ' + ' '.join(L2) + ' )' )\n line_list.append( 'echo \"PARAM_'+n2+' = $PARAM_'+n2+'\"' )\n line_list.append('')\n\n line_list.extend( tspec.getAnalyzeScript().split( os.linesep ) )\n line_list.append('')\n line_list.append('################ end analyze script')\n line_list.append('')\n \n # lastly, check for a diff status\n \n line_list.extend( [ \\\n '',\n '# check for a diff status before quitting',\n 'echo \" \"',\n 'if ( \"$have_diff\" != \"no\" ) then',\n ' echo \"*** at least one diff test showed differences; exiting diff\"',\n ' exit ' + str(diffExitStatus),\n 'endif',\n 'echo \"++++++++++++++++ SUCCESS: ' + idstr + '\"' ] )\n \n line_list.append( 'exit 0' )\n \n fp = open( scriptname, 'w' )\n for l in line_list:\n fp.write( l + '\\n' )\n fp.close()\n \n perm = stat.S_IMODE( os.stat(scriptname)[stat.ST_MODE] )\n perm |= stat.S_IXUSR\n try:\n os.chmod( scriptname, perm )\n except:\n pass", "def compile(self) -> str:\n compiled_command = (\n f\"{PUMP_ADDRESS[self.target_pump_num]}\"\n f\"{self.target_syringe}\"\n f\"{self.command}{self.command_value}\"\n )\n\n if self.parameter_value:\n compiled_command += f\"{self.optional_parameter}{self.parameter_value}\"\n\n return compiled_command + self.execution_command", "def job_script(self):\n quoted_arguments = quote_arguments([self._command_template])\n quoted_environment = quote_environment(self.env_dict)\n job_header_lines = \"\\n\".join(\n \"%s = %s\" % (k, v) for k, v in self.job_header_dict.items()\n )\n return self._script_template % {\n \"shebang\": self.shebang,\n \"job_header\": job_header_lines,\n \"quoted_environment\": quoted_environment,\n \"quoted_arguments\": quoted_arguments,\n \"executable\": self.executable,\n }", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def _run_compile_cmd(arg_map):\n code_type = arg_map['code_type']\n project_root_dir = arg_map['project_root_dir']\n project_env_name = arg_map['project']\n runlevel_name = arg_map['runlevel']\n project_env = ProjectEnv.from_string(project_env_name)\n runlevel = RunLevel.from_string(runlevel_name)\n exit_code = _compile(project_root_dir, code_type, project_env, runlevel)\n return exit_code", "def executescript(c, of, debug = False):\n\tquery_list = []\n\tquery_list_candidates = of.readlines()\n\tfor line in query_list_candidates:\n\t\t# process out comment lines\n\t\tif line.startswith(\"--\"):\n\t\t\tpass\n\t\telse:\n\t\t\tif line.strip() != \"\":\n\t\t\t\tquery_list.append(line.strip())\n\tquery_list = \" \".join(query_list).split(';')\n\tfor query in query_list:\n\t\tif query.strip():\n\t\t\tif debug:\n\t\t\t\tprint \"executescript [status] : executing query:\\n\\t%s\\n\" % (query.strip())\n\t\t\tc.execute(query.strip())", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-parameterize\",\n action=\"store_true\",\n help=\"Generate a query without parameters\",\n )\n parser.add_argument(\n \"--source-table\",\n type=str,\n help=\"Name of Glean table\",\n default=\"org_mozilla_fenix_stable.metrics_v1\",\n )\n args = parser.parse_args()\n\n # If set to 1 day, then runs of copy_deduplicate may not be done yet\n submission_date = (\n \"date_sub(current_date, interval 2 day)\"\n if args.no_parameterize\n else \"@submission_date\"\n )\n header = (\n \"-- Query generated by: python3 -m \"\n \"bigquery_etl.glam.clients_daily_scalar_aggregates \"\n f\"--source-table {args.source_table}\"\n + (\" --no-parameterize\" if args.no_parameterize else \"\")\n )\n\n schema = get_schema(args.source_table)\n unlabeled_metric_names = get_scalar_metrics(schema, \"unlabeled\")\n labeled_metric_names = get_scalar_metrics(schema, \"labeled\")\n unlabeled_metrics = get_unlabeled_metrics_sql(unlabeled_metric_names).strip()\n labeled_metrics = get_labeled_metrics_sql(labeled_metric_names).strip()\n\n if not unlabeled_metrics and not labeled_metrics:\n print(header)\n print(\"-- Empty query: no probes found!\")\n sys.exit(1)\n print(\n render_main(\n header=header,\n source_table=args.source_table,\n submission_date=submission_date,\n attributes=ATTRIBUTES,\n unlabeled_metrics=unlabeled_metrics,\n labeled_metrics=labeled_metrics,\n ping_type=ping_type_from_table(args.source_table),\n )\n )", "def execute(self, task, script, **kwargs):\n locals().update(kwargs)\n exec(script)", "def _build_container_script(self,\n name,\n settings,\n logger):\n raise NotImplementedError(\"'_build_container_script' not implemented.\")", "def execute_statement(self, bql_statement_ast, pretty=True, timing=False, plots=None, yes=False,\n debug=False, pandas_df=None, pandas_output=True, key_column=None,\n return_raw_result=False, force_output=False):\n if timing:\n start_time = time.time()\n\n parser_out = None\n # TODO move pyparsing objects out of client into parser\n if debug:\n parser_out = self.parser.parse_single_statement(bql_statement_ast)\n else:\n try:\n parser_out = self.parser.parse_single_statement(bql_statement_ast)\n except Exception as e:\n raise utils.BayesDBParseError(str(e))\n if parser_out is None:\n print(\"Could not parse command. Try typing 'help' for a list of all commands.\")\n return\n elif not parser_out:\n return\n\n method_name, args_dict, client_dict = parser_out\n if client_dict is None:\n client_dict = {}\n\n # Do stuff now that you know the user's command, but before passing it to engine.\n if method_name == 'execute_file':\n return dict(message='execute_file', bql_string=open(args_dict['filename'], 'r').read())\n elif method_name == 'update_codebook':\n _, codebook_rows = data_utils.read_csv(client_dict['codebook_path'], has_header=True)\n # TODO: require specific codebook_header values? Or don't require a header,\n # and if the first value in the header is actually a data column name, assume\n # the first row is codebook data, not a header.\n\n # Create a dict indexed by column name\n codebook = dict()\n for codebook_row in codebook_rows:\n codebook[codebook_row[0]] = dict(zip(['short_name', 'description', 'value_map'],\n codebook_row[1:]))\n\n args_dict['codebook'] = codebook\n elif (method_name == 'drop_btable') and (not yes):\n # If dropping something, ask for confirmation.\n print(\"Are you sure you want to permanently delete this btable, and all associated \"\n \"models, without any way to get them back? Enter 'y' if yes.\")\n user_confirmation = raw_input()\n if 'y' != user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif (method_name == 'drop_models') and (not yes):\n # If dropping something, ask for confirmation.\n print(\"Are you sure you want to permanently delete model(s), without any way to get \"\n \"them back? Enter 'y' if yes.\")\n user_confirmation = raw_input()\n if 'y' != user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif method_name == 'load_models':\n pklpath = client_dict['pkl_path']\n try:\n model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), 'rb'))\n except IOError as e:\n if pklpath[-7:] != '.pkl.gz':\n if pklpath[-4:] == '.pkl':\n model_data = pickle.load(open(self.parser.get_absolute_path(pklpath), 'rb'))\n else:\n pklpath = pklpath + \".pkl.gz\"\n model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath),\n 'rb'))\n else:\n raise utils.BayesDBError('Models file %s could not be found.' % pklpath)\n # This is the more recent version, where schema is stored with models.\n if 'schema' in model_data.keys():\n args_dict['models'] = model_data['models']\n args_dict['model_schema'] = model_data['schema']\n # This support older saved models, where only the model info was stored.\n else:\n args_dict['models'] = model_data\n args_dict['model_schema'] = None\n\n # Older versions of model_schema just had a str cctype as the dict items.\n # Newest version has a dict of cctype and parameters. Use this values to\n # test the recency of the models.\n model_schema = args_dict['model_schema']\n if model_schema:\n model_schema_itemtype = type(model_schema[model_schema.keys()[0]])\n else:\n model_schema_itemtype = None\n\n if model_schema is None or model_schema_itemtype != dict:\n args_dict['model_schema'] = None\n if not yes:\n print \"\"\"WARNING! The models you are currently importing were saved without a schema\n or without detailed column parameters (probably from a previous version).\n\n If you are loading models into the same table from which you created them, problems\n are unlikely, unless you have dropped models and then updated the schema.\n\n If you are loading models into a different table from which you created them, you\n should verify that the table schemas are the same.\n\n Please use \"SAVE MODELS FROM <btable> TO <filename.pkl.gz>\" to create an updated copy of your models.\n\n Are you sure you want to load these model(s)?\n \"\"\"\n user_confirmation = raw_input()\n if 'y' != user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif method_name == 'create_btable':\n if pandas_df is None:\n header, rows = data_utils.read_csv(client_dict['csv_path'])\n else:\n header, rows = data_utils.read_pandas_df(pandas_df)\n args_dict['header'] = header\n args_dict['raw_T_full'] = rows\n args_dict['key_column'] = key_column\n args_dict['subsample'] = False\n\n if 'codebook_path' in client_dict:\n _, codebook_rows = data_utils.read_csv(client_dict['codebook_path'],\n has_header=True)\n # TODO: require specific codebook_header values? Or don't require a header,\n # and if the first value in the header is actually a data column name, assume\n # the first row is codebook data, not a header.\n\n # Create a dict indexed by column name\n codebook = dict()\n for codebook_row in codebook_rows:\n codebook[codebook_row[0]] = dict(zip(['short_name', 'description', 'value_map'],\n codebook_row[1:]))\n args_dict['codebook'] = codebook\n else:\n warning = dedent(\"\"\"\n WARNING!\n\n You are creating a btable without a codebook, which will make interpretation\n of results more difficult. Codebooks should be in CSV format with each row\n corresponding to one column of the original data. The codebook should have four\n columns:\n\n 1. actual column name\n 2. short column description\n 3. long column description\n 4. value map (optional, only used for categorical columns - should be in JSON\n format)\n \"\"\")\n print(warning)\n\n # Display warning messages and get confirmation if btable is too large.\n # Ask user if they want to turn on subsampling.\n max_columns = 200\n max_rows = 1000\n max_cells = 100000\n message = None\n if not yes:\n if len(rows[0]) > max_columns:\n message = \"The btable you are uploading has %d columns, but BayesDB is \" \\\n \"currently designed to support only %d columns. If you proceed, \" \\\n \"performance may suffer unless you set many columns' datatypes to \" \\\n \"'ignore'. Would you like to continue? Enter 'y' if yes.\" \\\n % (len(rows[0]), max_columns)\n if len(rows) > max_rows:\n message = \"The btable you are uploading has %d rows, but BayesDB is currently \"\\\n \"designed to support only %d rows. If you proceed, performance may \"\\\n \"suffer. Would you like to continue? Enter 'y' to continue without \"\\\n \"subsampling, 'n' to abort, 's' to continue by subsampling %d rows, \"\\\n \"or a positive integer to specify the number of rows to be \"\\\n \"subsampled.\" % (len(rows), max_rows, max_rows)\n if len(rows[0])*len(rows) > max_cells:\n message = \"The btable you are uploading has %d cells, but BayesDB is currently\"\\\n \" designed to support only %d cells. If you proceed, performance may\"\\\n \" suffer unless you enable subsampling. Enter 'y' to continue \"\\\n \" without subsampling, 'n' to abort, 's' to continue by subsampling \"\\\n \"%d rows, or a positive integer to specify the number of rows to be \"\\\n \"subsampled.\" % (len(rows)*len(rows[0]), max_cells, max_rows)\n if message is not None:\n print(message)\n user_confirmation = raw_input()\n if 'y' == user_confirmation.strip():\n pass\n elif 'n' == user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif 's' == user_confirmation.strip():\n args_dict['subsample'] = min(max_rows, len(rows))\n elif utils.is_int(user_confirmation.strip()):\n args_dict['subsample'] = int(user_confirmation.strip())\n else:\n return dict(message=\"Operation canceled by user.\")\n elif method_name in ['label_columns', 'update_metadata']:\n if client_dict['source'] == 'file':\n header, rows = data_utils.read_csv(client_dict['csv_path'])\n args_dict['mappings'] = {key: value for key, value in rows}\n\n # Call engine.\n result = self.call_bayesdb_engine(method_name, args_dict, debug)\n\n # If error occurred, exit now.\n if 'error' in result and result['error']:\n if pretty:\n print(result['message'])\n if force_output:\n return result\n else:\n return result['message']\n else:\n return result\n\n # Do stuff now that engine has given you output, but before printing the result.\n result = self.callback(method_name, args_dict, client_dict, result)\n\n if return_raw_result:\n raw_result = {\n 'result': result,\n 'method_name': method_name,\n 'client_dict': client_dict}\n print(\"returning raw result for %s\" % (method_name))\n return raw_result\n\n assert type(result) != int\n\n if timing:\n end_time = time.time()\n print('Elapsed time: %.2f seconds.' % (end_time - start_time))\n\n if plots is None:\n plots = 'DISPLAY' in os.environ.keys()\n\n if 'matrix' in result and (plots or client_dict['filename']):\n # Plot matrices\n plotting_utils.plot_matrix(result['matrix'], result['column_names'], result['title'],\n client_dict['filename'])\n if pretty:\n if 'column_lists' in result:\n print(self.pretty_print(dict(column_lists=result['column_lists'])))\n\n if force_output:\n return result\n else:\n return self.pretty_print(result)\n else:\n return result\n if ('plot' in client_dict and client_dict['plot']):\n if (plots or client_dict['filename']):\n # Plot generalized histograms or scatterplots\n\n try:\n plotting_M_c = result['metadata_full']['M_c_full']\n except KeyError:\n plotting_M_c = result['M_c']\n\n plot_remove_key = method_name in ['select', 'infer']\n plotting_utils.plot_general_histogram(result['column_names'], result['data'],\n plotting_M_c, result['schema_full'],\n client_dict['filename'],\n client_dict['scatter'],\n remove_key=plot_remove_key)\n return self.pretty_print(result)\n else:\n if 'message' not in result:\n result['message'] = \"\"\n result['message'] = \"Your query indicates that you would like to make a plot, but \"\\\n \"in order to do so, you must either enable plotting in a \"\\\n \"window or specify a filename to save to by appending 'SAVE \"\\\n \"TO <filename>' to this command.\\n\" + result['message']\n\n if pretty:\n pp = self.pretty_print(result)\n print(pp)\n\n # Print warnings last so they're readable without scrolling backwards.\n if 'warnings' in result:\n \"\"\" Pretty-print warnings. \"\"\"\n for warning in result['warnings']:\n print('WARNING: %s' % warning)\n\n if pandas_output and 'data' in result and 'column_labels' in result:\n result_pandas_df = data_utils.construct_pandas_df(result)\n return result_pandas_df\n else:\n return result", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def parse_script_cmd(self, line):\n line, _ = self.find_vars_in_str(line)\n words = line.split()\n words[1] = gen_parse.rm_quotation_marks(words[1])\n filepath = gen_io.get_abs_path(words[1])\n if len(words) == 2:\n self.exec_python_script(filepath)\n else:\n if words[2] == 'python':\n self.exec_python_script(filepath)\n elif words[2] == \"bash\":\n self.exec_bash_script(filepath)\n else:\n self.print_error(f\"'{words[2]}' scripts not yet suported\")", "def pyscript_subcommand():", "def run_script():\n # pylint: disable=unsupported-assignment-operation\n script_source.data['script'] = [inp_script.value]", "def prepare_sub_script(i):\n\n run_cmd=''\n\n target_os_cfg=i['target_os_cfg']\n\n remote=False\n if target_os_cfg.get('remote','')=='yes': \n remote=True\n\n script_name=i['run_script']\n\n script_path=''\n if 'run_script_uoa' in i and i['run_script_uoa']!='':\n# cm_kernel.print_for_con('')\n# cm_kernel.print_for_con('Preparing path for OS script '+i['run_script_uoa']+' ...')\n\n ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['os.script'],\n 'cm_action':'load',\n 'cm_data_uoa':i['run_script_uoa']}\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n script_cfg=r['cm_data_obj']['cfg']\n script_path=r['cm_path']\n\n if 'scripts' not in script_cfg or i['run_script'] not in script_cfg['scripts']:\n return {'cm_return':1, 'cm_error':'can\\'t find script in os.script configuration'}\n\n script_name=script_cfg['scripts'][script_name]\n\n script_name+=target_os_cfg['script_ext']\n\n run_name=script_name\n if script_path!='':\n run_name=os.path.join(script_path, run_name)\n elif 'exec_prefix' in target_os_cfg and target_os_cfg['exec_prefix']!='': \n run_name=target_os_cfg['exec_prefix']+run_name\n\n if target_os_cfg.get('set_executable','')!='':\n p=target_os_cfg['set_executable']+' '+run_name\n x=os.system(p)\n\n run_cmd=''\n if remote and target_os_cfg.get('no_script_execution','')=='yes':\n r=cm_kernel.load_array_from_file({'cm_filename':run_name})\n if r['cm_return']>0: return r\n a=r['cm_array']\n for x in a:\n xx=x.strip()\n if xx!='' and not xx.startswith(target_os_cfg['rem']):\n if run_cmd!='': run_cmd+=target_os_cfg['env_separator']+' '\n run_cmd+=xx\n run_name=''\n else:\n run_cmd=run_name\n\n if i.get('run_cmd','')!='': run_cmd+=' '+i['run_cmd']\n\n if i.get('run_cmd_out1','')!='': run_cmd+=' 1>'+i['run_cmd_out1']\n if i.get('run_cmd_out2','')!='': run_cmd+=' 2>'+i['run_cmd_out2']\n\n\n return {'cm_return':0, 'run_cmd':run_cmd}", "def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))", "def __cmd_builder(self):\n self.cmd = 'python -m lizard \"%s\" ' % self.get_proj_path()\n args = \"\"\n if self.get_cyclo_args():\n args = self.get_cyclo_args()\n exclude = \",\".join(str(x) for x in self.get_cyclo_exclude() if x is not None)\n if exclude:\n exclude = ','.join(' -x \"{0}\"'.format(w) for w in exclude.rstrip().split(','))\n self.cmd = self.cmd + args + \" \" + exclude + \" --csv\"\n print(self.cmd) # pragma: no mutate", "def gen_qsub_script(\n crop, batch_ids=None, *, scheduler=\"sge\", **kwargs\n): # pragma: no cover\n warnings.warn(\n \"'gen_qsub_script' is deprecated in favour of \"\n \"`gen_cluster_script` and will be removed in the future\",\n FutureWarning,\n )\n return gen_cluster_script(crop, scheduler, batch_ids=batch_ids, **kwargs)", "def _build_container_script(self, name, job_settings, logger):\n if not isinstance(job_settings, dict) or \\\n not isinstance(name, basestring):\n logger.error(\"Singularity Script malformed\")\n return None\n\n if 'image' not in job_settings or 'command' not in job_settings or\\\n 'max_time' not in job_settings:\n logger.error(\"Singularity Script malformed\")\n return None\n\n script = '#!/bin/bash -l\\n\\n'\n\n # NOTE an uploaded script could also be interesting to execute\n if 'pre' in job_settings:\n for entry in job_settings['pre']:\n script += entry + '\\n'\n\n# ################### Torque settings ###################\n if 'nodes' in job_settings:\n resources_request = \"nodes={}\".format(job_settings['nodes'])\n\n if 'tasks_per_node' in job_settings:\n resources_request += ':ppn={}'.format(\n job_settings['tasks_per_node'])\n\n script += '#PBS -l walltime={}\\n'.format(resources_request)\n else:\n if 'tasks_per_node' in job_settings:\n logger.error(\n r\"Specify 'tasks_per_node' while 'nodes' is not specified\")\n\n # if 'tasks' in job_settings:\n # script += '#qsub -n ' + str(job_settings['tasks']) + '\\n'\n\n script += '#PBS -l walltime={}\\n\\n'.format(job_settings['max_time'])\n# #######################################################\n\n script += '\\n# DYNAMIC VARIABLES\\n\\n'\n\n # NOTE an uploaded script could also be interesting to execute\n if 'pre' in job_settings:\n for entry in job_settings['pre']:\n script += entry + '\\n'\n\n script += '\\nmpirun singularity exec '\n\n if 'home' in job_settings and job_settings['home'] != '':\n script += '-H ' + job_settings['home'] + ' '\n\n if 'volumes' in job_settings:\n for volume in job_settings['volumes']:\n script += '-B ' + volume + ' '\n\n # add executable and arguments\n script += job_settings['image'] + ' ' + job_settings['command'] + '\\n'\n\n # NOTE an uploaded script could also be interesting to execute\n if 'post' in job_settings:\n for entry in job_settings['post']:\n script += entry + '\\n'\n\n return script", "def generic_script(contents,\n job_name=None,\n stdout='/dev/null',\n stderr='/dev/null',\n shebang='#!/bin/bash',\n numnodes=None,\n numcpu=None,\n queue=None,\n walltime=None,\n mem=None,\n pmem=None):\n me = __file__\n current_time = time.strftime('%H:%M %D')\n\n if job_name is None:\n job_name = 'unnamed_job'\n\n if numnodes is None:\n numnodes = str(config.getint('pbs','numnodes'))#str(configuration.numnodes)\n\n if numcpu is None:\n numcpu = str(config.getint('pbs','numprocs'))#str(configuration.numprocs)\n\n if pmem:\n pmem = ',pmem=' + pmem\n else:\n pmem=''\n\n\n if mem:\n mem = ',mem=' + mem\n else:\n mem=''\n\n\n if queue is None:\n queue = config.read('pbs','queue')#configuration.queue\n\n additional_configuration_lines = []\n\n if queue is not None:\n additional_configuration_lines.append(\"#PBS -q %(queue)s\" % locals())\n\n if walltime is None:\n walltime = config.getint('pbs','walltime')#configuration.walltime\n\n if walltime is not None:\n additional_configuration_lines.append(\"#PBS -l walltime=%(walltime)s\" % locals())\n\n additional_configuration = '\\n'.join(additional_configuration_lines)\n\n the_script = \"\"\"#!/bin/bash\n# Created by pbs.py\n#PBS -N unnamed_job\n#PBS -l npcus=1\n#PBS -l walltime=1:00:00\n#PBS -j oe\n\n{ad_confg}\n\n{c}\n\"\"\".format(ad_confg = additional_configuration, c=contents)\n\n return the_script", "def _compile_endpoint_command(endpoint_file, out):\n compile_endpoint_command(endpoint_file, out)", "def compile_script(s):\n f = io.BytesIO()\n for t in s.split():\n if t in OPCODE_TO_INT:\n f.write(bytes_from_int(OPCODE_TO_INT[t]))\n elif (\"OP_%s\" % t) in OPCODE_TO_INT:\n f.write(bytes_from_int(OPCODE_TO_INT[\"OP_%s\" % t]))\n else:\n if (t[0], t[-1]) == ('[', ']'):\n t = t[1:-1]\n if len(t) == 1:\n t = \"0\" + t\n if t[:2] == \"0x\":\n t = t[2:]\n t = binascii.unhexlify(t.encode(\"utf8\"))\n f.write(t)\n return f.getvalue()", "def compile_run(\n path,\n host,\n params={}\n ):\n\n compiled_path = MyCLI.compile(path)\n MyCLI.run(compiled_path, host, params)", "def execute(\n c7n_config: C7nCfg,\n data_dir: PathLike = Path(\"data\").joinpath(\"query\"),\n telemetry_disabled: bool = True,\n):\n run(\n c7n_config, data_dir=data_dir, telemetry_disabled=telemetry_disabled, dryrun=False,\n )", "def _cmd_builder(self, test_config):\n arg_str = ''\n for key, value in sorted(test_config['args'].items()):\n arg_str += '--{} {} '.format(key, value)\n return test_config['pycmd'].format(arg_str)", "def execute(self, args=\"\"):\r\n return super(PythonScript, self).execute(_EXECUTABLE, args)", "def compile() -> str:\n content = request.get_json()\n code = injection + \"\\n\" + content[\"input_code\"]\n stdin = content[\"input_stdin\"]\n args = [\n \"python\",\n \"-c\",\n code,\n config[\"blocked\"][\"imports\"],\n config[\"blocked\"][\"functions\"],\n ]\n\n process = Popen(\n args,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n encoding=config[\"process\"][\"encoding\"],\n )\n\n try:\n stdout, stderr = process.communicate(\n stdin, timeout=float(config[\"process\"][\"timeout\"])\n )\n except TimeoutExpired:\n return json.dumps(\n {\n \"output\": \"The program takes too long to execute\",\n \"error\": str(config[\"process\"][\"timeout\"]),\n }\n )\n\n return json.dumps({\"output\": str(stdout), \"error\": str(stderr)})", "def _make_publish_sql(self):\n\n ### publish.sql header and instructions\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse รฉ o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporรงรตes, etc.)\n\n Qualquer coluna definida aqui deve tambรฉm existir em `table_config.yaml`.\n\n # Alรฉm disso, sinta-se ร  vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explรญcito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo vรกlido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n\n # remove triple quotes extra space\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n\n # add create table statement\n project_id_prod = self.client[\"bigquery_prod\"].project\n publish_txt += f\"\\n\\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\\nSELECT \\n\"\n\n # sort columns by is_partition, partitions_columns come first\n\n if self._is_partitioned():\n columns = sorted(\n self.table_config[\"columns\"],\n key=lambda k: (k[\"is_partition\"] is not None, k[\"is_partition\"]),\n reverse=True,\n )\n else:\n columns = self.table_config[\"columns\"]\n\n # add columns in publish.sql\n for col in columns:\n name = col[\"name\"]\n bigquery_type = (\n \"STRING\"\n if col[\"bigquery_type\"] is None\n else col[\"bigquery_type\"].upper()\n )\n\n publish_txt += f\"SAFE_CAST({name} AS {bigquery_type}) {name},\\n\"\n ## remove last comma\n publish_txt = publish_txt[:-2] + \"\\n\"\n\n # add from statement\n project_id_staging = self.client[\"bigquery_staging\"].project\n publish_txt += (\n f\"FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t\"\n )\n\n # save publish.sql in table_folder\n (self.table_folder / \"publish.sql\").open(\"w\", encoding=\"utf-8\").write(\n publish_txt\n )", "def run_codeql_query(query, database, output, search_path):\n # --search-path is required when the CLI needs to upgrade the database scheme.\n subprocess_run([\"codeql\", \"query\", \"run\", query, \"--database\", database,\n \"--output\", output + \".bqrs\", \"--search-path\", search_path])\n subprocess_run([\"codeql\", \"bqrs\", \"decode\", output + \".bqrs\",\n \"--format=csv\", \"--no-titles\", \"--output\", output])\n os.remove(output + \".bqrs\")", "def execute(cmd_string):\n pass", "def compile(self, **options):\n pass", "def script(self):", "def build_commands(self) -> list:\r\n commands: list = []\r\n\r\n arguments = CommandArguments()\r\n\r\n compiler_path: str = self.options.compiler_path\r\n flags_path: str = self.options.flags_path\r\n output_path: str = self.options.output_path\r\n\r\n if self.options.no_incremental_build:\r\n psc_paths: dict = self.psc_paths\r\n else:\r\n psc_paths = self._try_exclude_unmodified_scripts()\r\n\r\n # add .psc scripts whose .pex counterparts do not exist\r\n for object_name, script_path in self.missing_scripts.items():\r\n if object_name not in psc_paths.keys():\r\n psc_paths[object_name] = script_path\r\n\r\n source_import_paths = deepcopy(self.import_paths)\r\n\r\n # TODO: depth sorting solution is not foolproof! parse psc files for imports to determine command order\r\n for object_name, script_path in psc_paths.items():\r\n import_paths: list = self.import_paths\r\n\r\n if self.options.game_type != GameType.FO4:\r\n object_name = script_path\r\n\r\n # remove unnecessary import paths for script\r\n if self.options.game_type == GameType.FO4:\r\n for import_path in reversed(self.import_paths):\r\n if self._can_remove_folder(import_path, object_name, script_path):\r\n import_paths.remove(import_path)\r\n\r\n arguments.clear()\r\n arguments.append(compiler_path, enquote_value=True)\r\n arguments.append(object_name, enquote_value=True)\r\n arguments.append(flags_path, key='f', enquote_value=True)\r\n arguments.append(';'.join(import_paths), key='i', enquote_value=True)\r\n arguments.append(output_path, key='o', enquote_value=True)\r\n\r\n if self.options.game_type == GameType.FO4:\r\n # noinspection PyUnboundLocalVariable\r\n if self.release:\r\n arguments.append('-release')\r\n\r\n # noinspection PyUnboundLocalVariable\r\n if self.final:\r\n arguments.append('-final')\r\n\r\n if self.optimize:\r\n arguments.append('-op')\r\n\r\n arg_s = arguments.join()\r\n commands.append(arg_s)\r\n\r\n self.import_paths = source_import_paths\r\n\r\n return commands", "def gen_cmd(dali_root_dir, file_list, process_includes=False):\n if not file_list:\n return [\"true\"]\n cmd = [\"python\",\n os.path.join(dali_root_dir, \"third_party\", \"cpplint.py\"),\n \"--quiet\",\n \"--linelength=100\",\n \"--headers=h,cuh\",\n \"--root=\" + os.path.join(dali_root_dir, \"include\" if process_includes else \"\")]\n cmd.extend(file_list)\n return cmd", "def render_main(**kwargs):\n env = Environment(loader=PackageLoader(\"bigquery_etl\", \"glam/templates\"))\n main_sql = env.get_template(\"clients_daily_scalar_aggregates_v1.sql\")\n return reformat(main_sql.render(**kwargs))", "def script_generator(self):\n analyze_tool = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py\"\n ex_options = self.global_setting.get('analyze_options', str())\n py = self.global_setting.get('python', sys.executable)\n if os.access(py, os.X_OK):\n content = \"set -e \\n\" \n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s plot_curve *.log.json \"%(py, analyze_tool)\n content += \"--keys loss loss_cls loss_pts_init \"\n content += \"loss_pts_refine \"\n content += \"--out losses.pdf %s &> analyze.log \\n\"%(ex_options)\n\n content += \"touch analyze.done \\n\"\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def execute(script, data, outfile):\n parsed = load_script(script)\n input_data = pd.read_csv(data)\n\n output_data = parsed.execute(input_data)\n output_data.to_csv(outfile, index=False)", "def _compile(self, tocompile, parameters):\n compiler = self.dialect.statement_compiler(self.dialect, tocompile, parameters)\n compiler.compile()\n return compiler", "def run_query_target_bigquery(self, query):\n return db.run_query_bigquery(\n query, project=self.get_conn_env_var('TARGET_BIGQUERY', 'PROJECT')\n )", "def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore", "def PyHiew_ExecuteScript(script, g, strip_path = False):\r\n PY_COMPILE_ERR = None\r\n try:\r\n execfile(script, g)\r\n except Exception, e:\r\n PY_COMPILE_ERR = str(e) + \"\\n\" + traceback.format_exc()\r\n PY_COMPILE_ERR = PY_COMPILE_ERR.replace(\r\n script[:-len(os.path.basename(script))],\r\n '')\r\n if PYHIEW_SHOW_EXEC_ERRORS:\r\n MessageBox(PY_COMPILE_ERR)\r\n\r\n return PY_COMPILE_ERR", "def call_script(self, script):\n filename, callable = script.rsplit(':', 1)\n filename = os.path.abspath(filename)\n module = imp.load_source('script', filename)\n script = getattr(module, callable.strip())\n\n try:\n script(self.options, self.buildout, self.augmented_environment())\n except TypeError:\n # BBB: Support hook scripts that do not take the environment as\n # the third parameter\n script(self.options, self.buildout)", "def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err", "def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)", "def main() -> None:\n parser: argparse.ArgumentParser = argparse.ArgumentParser(\n description=\"Set default expiration for BigQuery datasets and optionally tables within a specified Google Cloud Project.\"\n )\n parser.add_argument(\"project_id\", help=\"Google Cloud Project ID\")\n parser.add_argument(\"dataset_name\", help=\"BigQuery Dataset Name\")\n parser.add_argument(\"-d\", \"--days\", type=int, required=True, help=\"Number of days for expiration\")\n\n # Create a mutually exclusive group for --all-tables and --table\n table_group = parser.add_mutually_exclusive_group()\n table_group.add_argument(\"-a\", \"--all-tables\", action=\"store_true\", help=\"Set expiration for all tables\")\n table_group.add_argument(\"-t\", \"--table\", type=str, help=\"Regex pattern for tables to set expiration\")\n\n parser.add_argument(\"-s\", \"--skip-tables\", type=str, help=\"Regex pattern to skip tables that should not be affected\")\n parser.add_argument(\"-n\", \"--dry-run\", action=\"store_true\", help=\"Dry run, show changes without applying them\")\n args: argparse.Namespace = parser.parse_args()\n\n try:\n client: bigquery.Client = bigquery.Client(project=args.project_id)\n dataset_id = f\"{args.project_id}.{args.dataset_name}\"\n dataset_obj: bigquery.Dataset = client.get_dataset(dataset_id) # Fetch dataset directly\n\n # Only change the dataset's default expiration if neither --all-tables nor --table is provided\n if not (args.all_tables or args.table):\n set_expiration(client, dataset_obj, args.days, args.dry_run)\n\n if args.all_tables or args.table:\n handle_tables(client, dataset_obj, args.days, args.table, args.skip_tables, args.dry_run)\n\n except KeyboardInterrupt:\n print(\"\\nOperation canceled by user. Exiting...\")\n sys.exit(0)\n except NotFound as e:\n print(f\"Error: {str(e)}\")\n sys.exit(1)", "def main():\n #Set up objects\n client = BigQueryClient()\n writer = FileWriter()\n\n #Send data from big query to a given file.\n # 500 is the limit of data points fetched.\n client.produce_json_data(writer, 500)", "def _compile_and_execute(self, distribution_root):\n\n # compile the script\n compiler = path.join(distribution_root, \"py/sepcompiler.py\")\n compilation_error = subprocess.call([sys.executable, compiler,\n self.source_name,\n self.binary_name])\n if compilation_error:\n return COMPILATION_FAILED\n\n # execute the script\n try:\n interpreter = path.join(distribution_root, \"bin/09.exe\")\n if not path.exists(interpreter):\n interpreter = path.join(distribution_root, \"bin/09\")\n if not path.exists(interpreter):\n sys.stderr.write(\"Interpreter not found.\\n\")\n sys.exit(2)\n\n output = subprocess.check_output([interpreter, self.binary_name])\n output = output.decode(sys.getdefaultencoding())\n output = output.replace(\"\\r\\n\", \"\\n\")\n except subprocess.CalledProcessError:\n return EXECUTION_FAILED\n\n # store output for later\n self.output = output\n\n # check the output for correctness\n expected = self.expected_output()\n if expected is None:\n self.write_actual_output(output)\n return FIRST_RUN\n elif expected != output:\n self.write_actual_output(output)\n return WRONG_OUTPUT\n else:\n return OK", "def operation_script(self, subcommand: Command, args: Command) -> ShellScript:\n\n return ShellScript(\n f\"{self.command.to_script()} {subcommand.to_script()} \"\n f\"{self.options.to_script()} {args.to_script()}\")", "def main(argv, out=print):\n opts = parser.parse_args(argv[1:])\n out(generate_sql(vars(opts)))", "def _execute(self):\n\n self.time_point(tag=\"execution\")\n\n main = self.import_engine_as_python_function()\n\n output_file = os.path.join(\n self.params[\"output_dir_path\"], self.params[\"output_file\"]\n )\n\n input_file = os.path.join(\n self.params[\"input_dir_path\"], self.params[\"input_file\"]\n )\n\n translations = self.params['translations']['_grouped_by_translated_key']\n\n pyqms_params = {\n \"PERCENTILE_FORMAT_STRING\": None,\n \"M_SCORE_THRESHOLD\": None,\n \"ELEMENT_MIN_ABUNDANCE\": None,\n \"MIN_REL_PEAK_INTENSITY_FOR_MATCHING\": None,\n \"REQUIRED_PERCENTILE_PEAK_OVERLAP\": None,\n \"MINIMUM_NUMBER_OF_MATCHED_ISOTOPOLOGUES\": None,\n \"INTENSITY_TRANSFORMATION_FACTOR\": None,\n \"UPPER_MZ_LIMIT\": None,\n \"LOWER_MZ_LIMIT\": None,\n \"MZ_TRANSFORMATION_FACTOR\": None,\n \"REL_MZ_RANGE\": None,\n \"REL_I_RANGE\": None,\n \"INTERNAL_PRECISION\": None,\n \"MAX_MOLECULES_PER_MATCH_BIN\": None,\n \"SILAC_AAS_LOCKED_IN_EXPERIMENT\": None,\n \"BUILD_RESULT_INDEX\": None,\n \"MACHINE_OFFSET_IN_PPM\": None,\n \"FIXED_LABEL_ISOTOPE_ENRICHMENT_LEVELS\": None,\n \"MZ_SCORE_PERCENTILE\": None,\n }\n sugarpy_params = {}\n sugarpy_params[\"charges\"] = list(\n range(\n self.params[\"translations\"][\"precursor_min_charge\"],\n self.params[\"translations\"][\"precursor_max_charge\"] + 1,\n )\n )\n\n for translated_key, translation_dict in translations.items():\n if translated_key == \"REL_MZ_RANGE\":\n if self.params[\"translations\"][\"ms_level\"] == 1:\n print(\n \"\"\"\n [ WARNING ] precursor_mass_tolerance_plus and precursor_mass_tolerance_minus\n [ WARNING ] need to be combined for SugarPy (use of symmetric tolerance window).\n [ WARNING ] The arithmetic mean is used.\n \"\"\"\n )\n pyqms_params[\"REL_MZ_RANGE\"] = (\n float(\n self.params[\"translations\"][\"precursor_mass_tolerance_plus\"]\n )\n + float(\n self.params[\"translations\"][\n \"precursor_mass_tolerance_minus\"\n ]\n )\n ) / 2.0\n if (\n self.params[\"translations\"][\"precursor_mass_tolerance_unit\"]\n == \"da\"\n ):\n pyqms_params[\n \"REL_MZ_RANGE\"\n ] = ursgal.ucore.convert_dalton_to_ppm(\n pyqms_params[\"REL_MZ_RANGE\"],\n base_mz=self.params[\"translations\"][\"base_mz\"],\n )\n else:\n pyqms_params[\"REL_MZ_RANGE\"] = self.params[\"translations\"][\n \"frag_mass_tolerance\"\n ]\n if self.params[\"translations\"][\"frag_mass_tolerance_unit\"] == \"da\":\n pyqms_params[\n \"REL_MZ_RANGE\"\n ] = ursgal.ucore.convert_dalton_to_ppm(\n pyqms_params[\"REL_MZ_RANGE\"],\n base_mz=self.params[\"translations\"][\"base_mz\"],\n )\n pyqms_params[\"REL_MZ_RANGE\"] = pyqms_params[\"REL_MZ_RANGE\"] * 1e-6\n elif translated_key in pyqms_params.keys():\n pyqms_params[translated_key] = list(translation_dict.values())[0]\n elif \"charge\" in translated_key:\n continue\n elif translated_key == \"mzml_file\":\n sugarpy_params[translated_key] = list(translation_dict.values())[0][0]\n elif len(translation_dict) == 1:\n sugarpy_params[translated_key] = list(translation_dict.values())[0]\n else:\n print(\n \"The translatd key \",\n translated_key,\n \" maps on more than one ukey, but no special rules have been defined\",\n )\n print(translation_dict)\n sys.exit(1)\n sugarpy_params[\"pyqms_params\"] = pyqms_params\n sugarpy_params[\"ident_file\"] = input_file\n sugarpy_params[\"output_file\"] = output_file\n sugarpy_params[\"force\"] = True\n\n out = main(**sugarpy_params)\n\n self.print_execution_time(tag=\"execution\")\n return out", "def sql_cell(source, output_dataset=None, validate=False):\n # If the validate flag is true create a package index that contains the\n # SQL package declaration\n if validate:\n packages = {sql.PACKAGE_SQL: pckg.PackageIndex(sql.SQL_COMMANDS)}\n else:\n packages = None\n arguments = [md.ARG(id=sql.PARA_SQL_SOURCE, value=source)]\n if not output_dataset is None:\n arguments.append(\n md.ARG(id=sql.PARA_OUTPUT_DATASET, value=output_dataset)\n )\n return md.ModuleCommand(\n sql.PACKAGE_SQL,\n sql.SQL_QUERY,\n arguments=arguments,\n packages=packages\n )", "def _create_execute_blastdbcmd(execute_command):\n\n def execute_blastdbcmd(input_file: str, sequence_file: str, database: str):\n cmd = \"{} -db {} -entry_batch {} > {}\".format(\n BLASTDBCMD_CMD, database, input_file, sequence_file)\n execute_command(cmd)\n\n return execute_blastdbcmd", "def RUN_CMD(self) -> str:\n args = \" \\ \\n \".join(CONFIG.WEBPACK.ARGS)\n return f\"{CONFIG.WEBPACK.BIN} \\ \\n {args}\"", "def binary_compile_cmd(self):\n ld = self.nvcc_options_json[\"ld\"]\n objcopy = self.nvcc_options_json[\"objcopy\"]\n cmd = \" \".join([ld, \"-r -b binary -o {target} {src}\"])\n # Support models with >2GB constants on Linux only\n if is_linux():\n cmd += (\n f\" && {objcopy} --rename-section\"\n \" .data=.lrodata,alloc,load,readonly,data,contents\"\n \" {target} {target}\"\n )\n return cmd", "def compile_and_run(self, desired_result, input, limit):\n cfg = desired_result.configuration.data\n compile_result = self.compile(cfg, 0)\n return self.run_precompiled(desired_result, input, limit, compile_result, 0)", "def execute():", "def pytest_configure():\n exec(open(\"script/generate_sql\").read())", "async def compile_command(self, ctx, *, codeblock: str):\n regex = re.compile(r\"(\\w*)\\s*(?:```)(\\w*)?([\\s\\S]*)(?:```$)\")\n matches = regex.findall(codeblock)\n if not matches:\n embed = Embed(color=Color.blurple())\n embed.set_author(\n name=f\"Could not find codeblock.\", icon_url=self.client.user.avatar_url\n )\n await ctx.send(embed=embed)\n lang = matches[0][0] or matches[0][1]\n if not lang:\n embed = Embed(color=Color.blurple())\n embed.set_author(\n name=f\"Could not find language hinted in the codeblock.\",\n icon_url=self.client.user.avatar_url,\n )\n code = matches[0][2]\n result = await self._run_code(lang=lang, code=code)\n await self._send_result(ctx, result)", "def Scriptgen(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"scriptgen\", payload=payload, response_object=None)", "def executescript(self, script: SQLQuery) -> \"Cursor\":\n return self.execute(script)", "def __compile_subroutine_body(self):\r\n self.compile_statements()", "def get_command(self):\n if os.name == 'posix':\n code_command = self._get_code_command_linux()\n elif os.name == 'nt':\n code_command = self._get_code_command_windows()\n command = self._build_command(code_command)\n return command", "def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))", "def execute():\n\tfrappe.reload_doc(\"Custom\", \"doctype\", \"Custom Script\")\n\n\tfrappe.db.sql(\"\"\"\n\t\tUPDATE `tabCustom Script` SET enabled=1\n\t\"\"\")", "def compile(self, code, options=''):\n try:\n data = self.client.cli.compile_contract(body=dict(\n code=code,\n options=options\n ))\n return data.bytecode\n except OpenAPIClientException as e:\n raise ContractError(e)", "def build_js(command_subclass):\n subclass_run = command_subclass.run\n def run(self):\n self.run_command(\"build_js\")\n subclass_run(self)\n command_subclass.run = run\n return command_subclass", "def script(self, code):\r\n LOG(\"Executing script \" + repr(code))\r\n cmd = MsgHelper.createMessage(Messages.CMD_SCRIPT)\r\n cmd[Messages.FIELD_SCRIPT] = code\r\n cmd[Messages.FIELD_FORCE] = True\r\n self.mailbox.push( cmd, high_priority = False )", "def compile_and_run(self, desired_result, input, limit):\n cfg = desired_result.configuration.data\n compile_result = self.compile(cfg, 0)\n return self.run_precompiled(\n desired_result, input, limit, compile_result, 0)", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def execute_script(script, variables):\n code = compile(script, 'fake-filename', 'exec')\n output = io.StringIO()\n with contextlib.redirect_stdout(output):\n exec(code, variables)\n output = output.getvalue()\n return output", "def _process_bigquery():\n config_path = os.path.join(app.app.config['DATASET_CONFIG_DIR'],\n 'bigquery.json')\n table_names = []\n participant_id_column = ''\n sample_id_column = ''\n sample_file_columns = []\n if os.path.isfile(config_path):\n bigquery_config = _parse_json_file(config_path)\n table_names = bigquery_config['table_names']\n participant_id_column = bigquery_config['participant_id_column']\n sample_id_column = bigquery_config.get('sample_id_column', '')\n sample_file_columns = bigquery_config.get('sample_file_columns', {})\n table_names.sort()\n\n table_names_dict = OrderedDict()\n for full_table_name in table_names:\n splits = full_table_name.rsplit('.', 2)\n if len(splits) != 3:\n raise ValueError(\n 'Unknown format for table name %s. Expected BigQuery project_id.dataset_id.table_name'\n % full_table_name)\n project_id, dataset_id, table_name = splits\n client = bigquery.Client(project=project_id)\n dataset_ref = client.dataset(dataset_id, project=project_id)\n table_ref = dataset_ref.table(table_name)\n description = client.get_table(table_ref).description\n table_names_dict[full_table_name] = description\n\n # A dict from table name (project_id.dataset_id.table_name) to table description.\n app.app.config['TABLES'] = table_names_dict\n app.app.config['PARTICIPANT_ID_COLUMN'] = participant_id_column\n app.app.config['SAMPLE_ID_COLUMN'] = sample_id_column\n app.app.config['SAMPLE_FILE_COLUMNS'] = sample_file_columns", "def execute(self, code):\n code = code()\n\n # Build an AST tree from the Python code, to get the line number of each statement\n try:\n nodes = compiler.parse(code).getChildNodes()[0].getChildNodes()\n lines = [node.lineno - 1 for node in nodes]\n except:\n self.executions += '>>> ' + code + '\\n' + ''.join(traceback.format_exception(*sys.exc_info())[4:])\n return\n\n code = code.splitlines()\n\n with IDEFrameContext.exec_lock:\n stdout = sys.stdout\n\n try:\n # Iterate over all the statements\n for (a, b) in zip(lines, lines[1:] + [None]):\n sys.stdout = StringIO()\n\n source = code[a:b]\n\n try:\n # Execute the statement using this local and global context\n frame = self.get_frame()\n exec compile('\\n'.join(source), '<web>', 'single', 0, 1) in frame.f_locals, frame.f_globals\n except:\n print ''.join(traceback.format_exception(*sys.exc_info())[2:]).rstrip()\n\n self.executions += '\\n'.join([('... ' if line.startswith(' ') else '>>> ') + line for line in source]) + '\\n' + sys.stdout.getvalue()\n finally:\n sys.stdout = stdout", "def main():\n sys.argv.pop(0)\n (cmd, var, args) = process_options(sys.argv[:])\n execute(cmd, var, args)", "def compile_package(self):\n build_package = [\n self.mock,\n '--root=%s' % self.root,\n '--arch=%s' % self.arch,\n '--shell',\n '/build_package.sh',\n \n ]\n output, errors = self._run_command(build_package)", "def run_script(self, params, config_no):\n raise NotImplementedError()", "def compile_function(self, function, arguments):", "def get_command(self):\n return 'date && cd ' + \\\n os.path.join(ChronosJob.cloud_path_dict[self.cloud], \\\n 'userfiles', self.job_dir_relative_path) + \\\n ' && python3 /home/src/gene_prioritization.py ' + \\\n ' -run_directory ./' + \\\n ' -run_file run.yml' + \\\n ' && date;'", "def execute():\n pass", "def _BuildCommand(self, command_name, parameter_files=None, **kwargs):\n command = [YCSB_EXE, command_name, self.database]\n\n parameters = self.parameters.copy()\n parameters.update(kwargs)\n\n # Adding -s prints status which includes average throughput per sec.\n if _THROUGHPUT_TIME_SERIES.value and command_name == 'run':\n command.append('-s')\n parameters['status.interval'] = _STATUS_INTERVAL_SEC\n\n # These are passed as flags rather than properties, so they\n # are handled differently.\n for flag in self.FLAG_ATTRIBUTES:\n value = parameters.pop(flag, None)\n if value is not None:\n command.extend(('-{0}'.format(flag), str(value)))\n\n for param_file in list(self.parameter_files) + list(parameter_files or []):\n command.extend(('-P', param_file))\n\n for parameter, value in parameters.items():\n command.extend(('-p', '{0}={1}'.format(parameter, value)))\n\n return 'cd %s && %s' % (YCSB_DIR, ' '.join(command))", "def Non_VASP_Script(my_project):\n\n WORKFLOWS = my_project['Workflow']\n Workflow_Params = WORKFLOWS['Steps'][2]\n Workflow_name = Workflow_Params['NAME']\n job_dir = my_project['NAME'] + Workflow_Params['NAME']\n chkpt = job_dir + '.json'\n prev_filter = Workflow_Params['Continue']['Filter']\n prev_chkpt = Workflow_Params['Continue']['Source']\n Script = Workflow_Params['Script']\n executable = Script['Executable']\n non_arg_inputs = Script['NonArgInput']\n arg_inputs = Script['ArgInput']\n\n rerun_paths = continue_job_inputs(chkpt_files= prev_chkpt,\\\n user_filters=prev_filter)\n\n # Run the script now at the rerun_paths\n for r in rerun_paths:\n if inputs:\n shutil.copy(inputs, r)\n os.chdir(r)\n print ('Running {0} in {1}'.format(executable, r))\n script_output = sp.run([executable]+ arg_inputs, stdout=sp.PIPE).stdout.decode('utf-8')\n \n\n return None", "def exec(self,**kwargs):\r\n pass", "async def _compile(ctx, code: Option(str, \"Brainfuck code to compile into python\")):\n compiled = bot.brainfuck.compile(code)\n await send_code(ctx, compiled.code, lang=\"py\")", "def _get_sys_cmd(file_path_input, fold_coverage, file_path_output_prefix):\n return", "def compile_contract(file: str, class_call: str) -> str:\n print(f\"Compiling {file}.py ....\")\n exit_code = os.system(\n f\"~/smartpy-cli/SmartPy.sh compile contract/contracts/{file}.py \\\"{class_call}\\\" contract/build\")\n if exit_code != 0:\n raise Exception(f\"Failed to compile Contract : {file}.py\")", "def airflow_commands():\n pass", "def run_built_executable(self, name, *args, **kw):\n raise NotImplementedError", "def run_new_sql(self):\n\n pass", "def _get_sys_cmd(self, file_path_input, fold_coverage, file_path_output_prefix):\n assert self.validate_file(file_path_input)\n assert isinstance(fold_coverage, (int, float))\n assert self.validate_dir(file_path_output_prefix, only_parent=True)\n\n error_profile = os.path.join(self._directory_error_profiles)\n\n arguments = [\n '--data-type', \"CLR\",\n '--model_qc', os.path.join(error_profile + \"/model_qc_clr\"),\n '--depth', str(fold_coverage),\n '--seed', str(self._get_seed()),\n '--prefix', file_path_output_prefix\n ]\n if self._fragment_size_mean is not None:\n arguments.extend([\n '--length-mean', str(self._fragment_size_mean),\n ])\n if self._fragment_size_standard_deviation is not None:\n arguments.extend([\n '--length-sd', str(self._fragment_size_standard_deviation),\n ])\n\n arguments.extend([\n file_path_input,\n ])\n \n if self._logfile:\n arguments.append(\">> '{}'\".format(self._logfile))\n\n cmd = \"{exe} {args}\".format(exe=self._file_path_executable, args=\" \".join(arguments))\n return cmd", "def make_command(self, bam_file):\n command = \"\"\n var_sites = \"-knownSites {}\".format(self.known)\n obam = self.rebase_file(bam_file)\n\n if not self.recal_table:\n self.recal_table = self.rebase_file(bam_file)\n self.recal_table = self.replace_extension_with(\".grp\", bam_file)\n command += (\"java -Xms{xms} -Xmx{xmx} -Djava.io.tmpdir={tmp} -jar\"\n \" {GATK} -T BaseRecalibrator -R {ref} -I {ibam} -o \"\n \"{recal_table} {knownsites} && \").format(\n xms=self.get_mem(0.98),\n xmx=self.get_mem(0.99),\n tmp=self.tmp_dir,\n GATK=self.GATK,\n ref=self.reference,\n ibam=bam_file,\n recal_table=self.recal_table,\n knownsites=var_sites\n ) # Create Recalibration Table\n\n command += (\"java -Xms{xms} -Xmx{xmx} -Djava.io.tmpdir={tmp} -jar \"\n \"{GATK} -T PrintReads -R {ref} -I {ibam} -BQSR \"\n \"{recal_table} -o {obam}\").format(\n xms=self.get_mem(0.98),\n xmx=self.get_mem(0.99),\n tmp=self.tmp_dir,\n GATK=self.GATK,\n ref=self.reference,\n ibam=bam_file,\n recal_table=self.recal_table,\n obam=obam\n ) # Recalibrate the BAM file\n\n return (command)", "def build_script(self):\n return self._build_script", "def main(custom_commandline = None):\n parser = argparse.ArgumentParser(\n description = 'Gapy v%s - GAP8 Utility' % __version__,\n prog = 'gapy',\n fromfile_prefix_chars = '@',\n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n \n #\n # Common options shared by Gapy and sub-command #\n common.appendCommonOptions(parser)\n \n #\n # Fetch target Json config\n #\n (config, system) = common.importConfig(parser)\n \n #\n # Append sub-commands\n #\n subparsers = parser.add_subparsers(\n dest = 'operation',\n help = 'Gapy operation. Run gapy {command} -h for additional help')\n appendOperations(parser, subparsers, config)\n \n # Every operation matches a module function called operationFunc.\n for operation in subparsers.choices.keys():\n assert operation in globals(), \"%s should be a module function\" % operation\n \n argcomplete.autocomplete(parser)\n args = parser.parse_args(custom_commandline)\n \n common.parseOptions(args, config)\n\n if args.operation is None:\n parser.print_help()\n sys.exit(1)\n \n operationFunc = globals()[args.operation].operationFunc\n operationFunc(args, config, system)\n try: # Clean up AddrFilenamePairAction files\n for address, argfile in args.addr_filename:\n argfile.close()\n except AttributeError:\n pass", "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def code():", "def compile_python(self):\n if(self.input == \"\"):\n stdout = subprocess.run(\n [\"python\", self.id+\".py\"], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n if(len(stdout) == 0):\n self.output = subprocess.run(\n [\"python\", self.id+\".py\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n self.status = 0 # error\n else:\n self.status = 1 # success\n else:\n pass", "def compile_asm(self, src, dst):\n cmd = [self.get_command(), \"-S\", src, \"-o\", dst] + self.__compiler_flags + self.__compiler_flags_extra + self.__definitions + self.__include_directories\n (so, se) = run_command(cmd)\n if 0 < len(se) and is_verbose():\n print(se)", "def cmd(self):\n\n command = [NODE_FILE, _js, '--reporter', 'jslint', '--stdin']\n #command.append('--literate')\n\n return command", "def _UpdateScripts(benchmark_spec, vm):\n benchmark = benchmark_spec.benchmark\n vm = vm or benchmark_spec.vms[0]\n\n config_sed = []\n config_sed += [(r'DGXSYSTEM=.*', fr'DGXSYSTEM=\\\"{DGXSYSTEM}\\\"')]\n gpus_per_node = nvidia_driver.QueryNumberOfGpus(vm)\n config_sed.append((\n r'DGXNGPU=.*', fr'DGXNGPU={gpus_per_node}\\n'\n fr'export CUDA_VISIBLE_DEVICES={\",\".join([str(gpu_number) for gpu_number in range(gpus_per_node)])}'\n ))\n config_sed += [(r'DGXNSOCKET=.*',\n fr'DGXNSOCKET={vm.CheckLsCpu().socket_count}')]\n config_sed += [(r'DGXSOCKETCORES=.*',\n fr'DGXSOCKETCORES={vm.CheckLsCpu().cores_per_socket}')]\n\n model = 'maskrcnn' if MASK in benchmark else benchmark\n framework = 'mxnet' if RESNET in benchmark else 'pytorch'\n script_path = (\n fr'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks/{model}/'\n fr'implementations/{framework}')\n\n config_files = [CONFIG]\n\n if MASK in benchmark:\n config_sed = _GetChangesForMask(config_sed)\n config_files = ['config_DGXA100.sh']\n\n elif RESNET in benchmark:\n config_sed = _GetChangesForResnet(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100.sh']\n UpdateScriptForSmallGpuMem(vm)\n\n elif BERT in benchmark:\n config_sed = _GetChangesForBert(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100_1x8x56x1.sh']\n\n vm.RemoteCommand(\n f'cd {script_path} && '\n f'sed \"{SedPairsToString(config_sed)}\" '\n f'{\" \".join(config_files)} > {CONFIG} && '\n f'chmod 755 {CONFIG} && '\n f'sed -i \"2 i source {CONFIG}\" run_and_time.sh && '\n f'sed -i \"2 i source {CONFIG}\" run_with_docker.sh')", "def run_script(self):\n pass", "def write_qsub_script(self, filename, echo=False):\n\n buf = ['#!/usr/bin/env qsub', '# Written using SGE module']\n\n for option, value in self.args.__dict__.items():\n if value is True:\n value = ''\n\n if option not in ['command', 'command_args', 'xterm_args']:\n if isinstance(value, list):\n val = ' '.join(value)\n else:\n val = str(value)\n\n buf.append(' '.join(['#', '-' + option, val]))\n\n args = getattr(self.args, 'command_args', [])\n args = getattr(self.args, 'xterm_args', args)\n\n buf.append(' '.join([self.args.command] + args))\n\n if echo:\n print('\\n'.join(buf))\n\n f = open(filename, 'w')\n f.write('\\n'.join(buf))\n f.close()", "def compile (self):\n print(\"*** compiling the inno setup script ***\")\n progpath = get_nt_platform_vars()[0]\n cmd = r'%s\\Inno Setup 5\\iscc.exe' % progpath\n subprocess.check_call([cmd, self.pathname])" ]
[ "0.58411306", "0.5736632", "0.55719966", "0.5505636", "0.54719853", "0.5344112", "0.5325198", "0.5318884", "0.53028625", "0.5297254", "0.528932", "0.5273765", "0.5254984", "0.5248845", "0.5227913", "0.51987875", "0.5188278", "0.51837045", "0.5174086", "0.5140885", "0.5131505", "0.5123826", "0.51228386", "0.5118536", "0.5094359", "0.50572914", "0.5056427", "0.5044482", "0.5032013", "0.5027674", "0.50129247", "0.5011682", "0.50107366", "0.5008402", "0.5005652", "0.5004604", "0.49981186", "0.49972022", "0.49887323", "0.49874866", "0.49854344", "0.49805164", "0.49794793", "0.49710265", "0.49693438", "0.49660677", "0.49382553", "0.49242896", "0.49187845", "0.4897956", "0.4893873", "0.48938015", "0.48931292", "0.4882908", "0.48828775", "0.48814797", "0.48715672", "0.48676708", "0.4867475", "0.48624438", "0.4859988", "0.48501265", "0.48384663", "0.48317748", "0.48316047", "0.4819822", "0.48111406", "0.48061973", "0.48051658", "0.48039815", "0.48037335", "0.47966316", "0.4778154", "0.47770727", "0.47704697", "0.476814", "0.47559273", "0.47543892", "0.47527072", "0.47515747", "0.47503075", "0.47493497", "0.4747633", "0.47459358", "0.4737124", "0.47334367", "0.47280127", "0.47239533", "0.47178376", "0.47171226", "0.47146183", "0.47065887", "0.4687787", "0.4686574", "0.4679362", "0.46752974", "0.46660894", "0.46640387", "0.46637475", "0.4653496", "0.4650918" ]
0.0
-1
Add index operation with name to the operations given.
def add_index_operation(self, name, operations): if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def _apply_index_op(db, op):\n if 'createIndexes' not in op['o']:\n return\n o = op['o']\n coll_name = o['createIndexes']\n key = list(o['key'].items())\n name = o['name']\n return db[coll_name].create_index(key, name=name)", "def add_operation(self, op):\n\n self.operations[op.name] = op", "def __call__(self, op):\n self._handle_renameCollection(op)\n if self.regex.match(op['ns']):\n ns = self.regex.sub(self.new_ns, op['ns']).rstrip(\".\")\n logging.debug(\"renaming %s to %s\", op['ns'], ns)\n op['ns'] = ns\n if op['ns'].endswith('.system.indexes'):\n # index operation; update ns in the op also.\n self(op['o'])\n self._handle_create(op)", "def addOp(self, op):\n self.operations << op", "def AddOperation(self, op):\n self._operations.append(op)", "def register_operation(self, name, result, args, kwargs):\r\n if not isinstance(result, autodiff.tensor.Tensor):\r\n result = autodiff.tensor.Tensor(result, graph=self)\r\n args = [x if isinstance(x, autodiff.tensor.Tensor) \r\n else autodiff.tensor.Tensor(x, graph=self) for x in args]\r\n self.operation_map[result.id] = Operation(name, result, args, kwargs)", "def add_target_and_index(self, name, sig, signode):\n key = normalize_object_name(name)\n if key in self.state.document.ids:\n return\n\n signode['names'].append(name)\n signode['ids'].append(key)\n signode['first'] = not self.names\n self.indexnode['entries'].append(\n ('single', 'JSON Objects; {}'.format(name), key, '', None))", "def add_op(self, op):\n self._operations.append(op)", "def _register_operation(self, **operation):\n name = operation[\"name\"]\n if name in self.operations:\n raise ValueError(\"operation name already registered: {}\".format(name))\n self.operations[name] = _Operation({**operation, \"resource\": self})", "def operation(self, name):\n\n try:\n return self.operations[name]\n except KeyError:\n return self.operation_not_found(name)", "def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name", "def add(self, **kwargs) -> None:\n self.append(Operation(**kwargs))", "def add_impala_operation(op, name, database):\n udf.add_impala_operation(op, name, database)", "def invoke(self, op):\n for rename in self:\n rename(op)", "def addop(name, fields, args=None, alias=False):\n\n namespace = {\"fields\": fields, \"alias\": alias}\n\n if args is not None:\n namespace[\"args\"] = args\n\n # Dynamically create the \"name\" object\n type(name, (mn_pinky,), namespace)", "def add_repair_operator(\n self, op: _OperatorType, name: Optional[str] = None\n ):\n logger.debug(f\"Adding repair operator {op.__name__}.\")\n self._r_ops[name if name else op.__name__] = op", "def register_op(op_name, **kwargs):\n _DEFAULT_SCOPE[TargetRegistry].register_op(op_name, **kwargs)\n return", "def instantiate_indexor(prefix, width):\n stdlib = py_ast.Stdlib()\n name = py_ast.CompVar(NAME_SCHEME[\"index name\"].format(prefix=prefix))\n add_name = py_ast.CompVar(f\"{prefix}_add\")\n cells = [\n py_ast.Cell(name, stdlib.register(width)),\n py_ast.Cell(add_name, stdlib.op(\"add\", width, signed=False)),\n ]\n\n init_name = py_ast.CompVar(NAME_SCHEME[\"index init\"].format(prefix=prefix))\n init_group = py_ast.Group(\n init_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 2 ** width - 1), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(init_name, \"done\")\n ),\n ],\n )\n\n upd_name = py_ast.CompVar(NAME_SCHEME[\"index update\"].format(prefix=prefix))\n upd_group = py_ast.Group(\n upd_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 1), py_ast.CompPort(add_name, \"left\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"out\"), py_ast.CompPort(add_name, \"right\")\n ),\n py_ast.Connect(\n py_ast.CompPort(add_name, \"out\"), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(upd_name, \"done\")\n ),\n ],\n )\n\n return (cells, [init_group, upd_group])", "def _add_default_op(op_name):\n _add_op(\"__%s__\"%op_name, getattr(operator, op_name))", "def set_operation_name(self, operation_name):\n return self", "def _add_default_ops(op_name):\n _add_default_op(op_name)\n _add_default_reverse_op(op_name)", "def AddIndex(self, target):\n if \"w\" not in self.mode:\n raise IOError(\"FileStoreImage %s is not in write mode.\", self.urn)\n predicate = (\"index:target:%s\" % target).lower()\n data_store.DB.MultiSet(self.urn, {predicate: target}, token=self.token,\n replace=True, sync=False)", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def getOperationByName(self, name):\n for item in self.operations:\n if item.name == name:\n return item\n raise KeyError, \"No operation named %s\" % name", "def document_add(index_name, doc_type, doc, doc_id=None):\n resp = es.index(index=index_name, doc_type=doc_type, body=doc, id=doc_id)\n print(resp)", "def add_operations_from(self, obj):\n\n for name in dir(obj):\n op = getattr(obj, name)\n if isinstance(op, Operation):\n self.add_operation(op)", "def apply(db, op):\n dbname = op['ns'].split('.')[0] or \"admin\"\n _db = db[dbname]\n return _get_index_handler(db)(_db, op) or _apply_regular(_db, op)", "def set_operation_name(self, operation_name: str) -> 'Span':\n with self.update_lock:\n self.operation_name = operation_name\n return self", "def getOperationByName(self, name):\n for item in self.operations:\n if item.name == name:\n return item\n raise KeyError(\"No operation named %s\" % name)", "def getOperationByName(self, name):\n for item in self.operations:\n if item.name == name:\n return item\n raise KeyError(\"No operation named %s\" % name)", "def add_workspace_to_index(self, ctx, params):\n # ctx is the context object\n #BEGIN add_workspace_to_index\n #END add_workspace_to_index\n pass", "def register(operation_key, *param_keys):\n\n def decorator(operation_fn):\n _operations[operation_key] = Operation(operation_fn, param_keys)\n return operation_fn\n\n return decorator", "def create_index(self, db_name):\n\t\tindex_func_path = self._get_index_func_filepath(db_name)\n\t\t\n\t\tif os.path.isfile(index_func_path):\n\t\t\t# create index request payload from predefined file\t\n\t\t\twith open(index_func_path, 'r') as content_file:\n\t\t\t\tpayload = content_file.read()\n\t\t\n\t\t\tprint (\"Create index using function in: {}\".format(index_func_path))\n\t\t\turl = \"https://{}/{}/_design/view\".format(\n\t\t\t\tself.cloudanthost, db_name)\n\t\t\tresponse = self.r.put(url, data=payload)\n\t\t\tassert response.status_code == 201", "def _add_op(attr_name, op):\n def closure(self, other):\n return VTKNoneArray._op(self, other, op)\n closure.__name__ = attr_name\n attr[attr_name] = closure", "def add_op(self, expr):\n from cascada.bitvector import operation\n assert isinstance(expr, operation.Operation)\n assert not self.contain_op(expr)\n name = \"{}{}\".format(self.id_prefix, self.counter)\n self.counter += 1\n identifier = core.Variable(name, expr.width)\n self.table[identifier] = expr\n\n return identifier", "def _add_to_index_operations(self, which, reconstrained, what, warning):\n if warning and reconstrained.size > 0:\n # TODO: figure out which parameters have changed and only print those\n print(\"WARNING: reconstraining parameters {}\".format(self.hierarchy_name() or self.name))\n index = self._raveled_index()\n which.add(what, index)\n return index", "def _add_to_index_operations(self, which, reconstrained, what, warning):\n if warning and reconstrained.size > 0:\n # TODO: figure out which parameters have changed and only print those\n print(\"WARNING: reconstraining parameters {}\".format(self.hierarchy_name() or self.name))\n index = self._raveled_index()\n which.add(what, index)\n return index", "def create_index():", "def add_elementwise(self, op, inputs, name=None):\n input_names = [self._maybe_add_const(input, \"elementwise_input\") \\\n for input in inputs]\n return self._build_op(op, input_names, name=name)", "def create_ops(self):\n return self._create_ops", "def add_mode_index(self) -> None:", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def remove_operation(self, name):\n\n del self.operations[name]", "async def index_documents(self, app_id, namespace, index_name, documents):\n collection = get_collection_name(app_id, namespace, index_name)\n solr_documents = [_to_solr_document(doc) for doc in documents]\n await self.solr.put_documents(collection, solr_documents)", "def add_default_numeric_op(op_name):\n add_numeric_op(\"__%s__\"%op_name, getattr(operator, op_name))", "def operate(\n self, op: OperatorType, *other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def add_default_numeric_op(op_name):\n add_numeric_op(\"__%s__\"%op_name)", "def create_or_update(\n self,\n index_name: str,\n prefer: Union[str, _models.Enum0],\n index: IO,\n allow_index_downtime: Optional[bool] = None,\n if_match: Optional[str] = None,\n if_none_match: Optional[str] = None,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def list_operations():", "def insert_index(self):\n pass", "def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)", "def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)", "def transformSingle(self, name):\n for op in self.operations:\n name = op.apply(name)\n return name", "def create_index(self, index_name):\n print(f\"Creating {index_name} index started \\n\")\n add_index = '//*[@id=\"content-react\"]/div/div/button'\n create_new_index_btn_sitem = self.locator_finder_by_xpath(add_index)\n create_new_index_btn_sitem.click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n\n if index_name == 'Persistent':\n # selecting persistent index's filed\n persistent_field = \"/html//input[@id='fields']\"\n persistent_field_sitem = self.locator_finder_by_xpath(persistent_field)\n persistent_field_sitem.click()\n persistent_field_sitem.send_keys('name')\n\n # selecting persistent index's name\n persistent_name = \"/html//input[@id='name']\"\n persistent_name_sitem = self.locator_finder_by_xpath(persistent_name)\n persistent_name_sitem.click()\n persistent_name_sitem.send_keys(index_name)\n\n # selecting persistent index's extra value\n extra_value = \"/html//input[@id='storedValues']\"\n extra_value_sitem = self.locator_finder_by_xpath(extra_value)\n extra_value_sitem.click()\n extra_value_sitem.send_keys('email, likes')\n\n # selecting persistent index's sparse value\n sparse = \"(//span[@aria-hidden='true'])[1]\"\n sparse_sitem = self.locator_finder_by_xpath(sparse)\n sparse_sitem.click()\n\n # selecting persistent index's duplicate array value\n duplicate_array = '//*[@id=\"content-react\"]/div/div[3]/form/div/div[1]/div[11]/label/span/span'\n duplicate_array_sitem = self.locator_finder_by_xpath(duplicate_array)\n duplicate_array_sitem.click()\n\n memory_cache = '//*[@id=\"content-react\"]/div/div[3]/form/div/div[1]/div[15]/label/span/span'\n memory_cache_sitem = self.locator_finder_by_xpath(memory_cache)\n memory_cache_sitem.click()\n\n elif index_name == 'Geo':\n self.select_desired_index_from_the_list('Geo Index')\n # selecting geo index's filed\n geo_field = \"/html//input[@id='fields']\"\n geo_field_sitem = self.locator_finder_by_xpath(geo_field)\n geo_field_sitem.click()\n geo_field_sitem.send_keys('region')\n\n # selecting geo index's name\n geo_name = \"/html//input[@id='name']\"\n geo_name_sitem = self.locator_finder_by_xpath(geo_name)\n geo_name_sitem.click()\n geo_name_sitem.send_keys(index_name)\n\n elif index_name == 'Fulltext':\n self.select_desired_index_from_the_list('Fulltext Index')\n # selecting fullText index's filed\n full_text_field = \"/html//input[@id='fields']\"\n full_text_field_sitem = self.locator_finder_by_xpath(full_text_field)\n full_text_field_sitem.click()\n full_text_field_sitem.send_keys('region')\n\n # selecting fullText index's name\n full_text_name = \"/html//input[@id='name']\"\n full_text_name_sitem = self.locator_finder_by_xpath(full_text_name)\n full_text_name_sitem.click()\n full_text_name_sitem.send_keys(index_name)\n\n # selecting fullText index's min length\n min_length = \"/html//input[@id='minLength']\"\n min_length_sitem = self.locator_finder_by_xpath(min_length)\n min_length_sitem.click()\n min_length_sitem.send_keys()\n\n elif index_name == 'TTL':\n self.select_desired_index_from_the_list('TTL Index')\n # selecting ttl index's filed\n ttl_field = \"/html//input[@id='fields']\"\n ttl_field_sitem = self.locator_finder_by_xpath(ttl_field)\n ttl_field_sitem.click()\n ttl_field_sitem.send_keys('region')\n\n # selecting ttl index's name\n ttl_name = \"/html//input[@id='name']\"\n ttl_name_sitem = self.locator_finder_by_xpath(ttl_name)\n ttl_name_sitem.click()\n ttl_name_sitem.send_keys(index_name)\n\n ttl_expire = \"/html//input[@id='expireAfter']\"\n ttl_expire_sitem = self.locator_finder_by_xpath(ttl_expire)\n ttl_expire_sitem.click()\n ttl_expire_sitem.send_keys(1000)\n\n elif index_name == 'Inverted Index':\n action = ActionChains(self.driver)\n self.select_desired_index_from_the_list('Inverted Index')\n\n fields = \"(//div[contains(@class,'css-1d6mnfj')])[2]\"\n fields_sitem = self.locator_finder_by_xpath(fields)\n fields_sitem.click()\n action.send_keys('region').send_keys(Keys.ENTER).send_keys('name').send_keys(Keys.ENTER).perform()\n time.sleep(1)\n\n analyzer = \"//*[text()='Analyzer']\"\n analyzer_sitem = self.locator_finder_by_xpath(analyzer)\n analyzer_sitem.click()\n action.send_keys(Keys.DOWN).send_keys(Keys.ENTER).perform()\n time.sleep(1)\n\n include_all_fields = \"//*[text()='Include All Fields']\"\n include_all_fields_sitem = self.locator_finder_by_xpath(include_all_fields)\n include_all_fields_sitem.click()\n time.sleep(1)\n\n track_all_position = \"//*[text()='Track List Positions']\"\n track_all_position_sitem = self.locator_finder_by_xpath(track_all_position)\n track_all_position_sitem.click()\n time.sleep(1)\n\n search_fields = \"//*[text()='Search Field']\"\n search_fields_sitem = self.locator_finder_by_xpath(search_fields)\n search_fields_sitem.click()\n time.sleep(1)\n\n general_name = \"//*[text()='Name']\"\n general_name_sitem = self.locator_finder_by_xpath(general_name)\n general_name_sitem.click()\n action.send_keys('Inverted').perform()\n time.sleep(1)\n\n general_writebuffer_idle = \"//*[text()='Writebuffer Idle']\"\n general_writebuffer_idle_sitem = self.locator_finder_by_xpath(general_writebuffer_idle)\n general_writebuffer_idle_sitem.click()\n action.key_down(Keys.CONTROL).\\\n send_keys(\"a\").\\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE).\\\n send_keys(100).perform()\n time.sleep(1)\n\n general_writebuffer_active = \"//*[text()='Writebuffer Active']\"\n general_writebuffer_active_sitem = self.locator_finder_by_xpath(general_writebuffer_active)\n general_writebuffer_active_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(1).perform()\n time.sleep(1)\n\n general_writebuffer_size_max = \"//*[text()='Writebuffer Size Max']\"\n general_writebuffer_size_max_sitem = self.locator_finder_by_xpath(\n general_writebuffer_size_max)\n general_writebuffer_size_max_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(33554438).perform()\n time.sleep(1)\n\n general_cleanup_startup_steps = \"//*[text()='Cleanup Interval Step']\"\n general_cleanup_startup_steps_sitem = self.locator_finder_by_xpath(\n general_cleanup_startup_steps)\n general_cleanup_startup_steps_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(3).perform()\n time.sleep(1)\n\n general_commit_interval = \"//*[text()='Commit Interval (msec)']\"\n general_commit_interval_sitem = self.locator_finder_by_xpath(\n general_commit_interval)\n general_commit_interval_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(1010).perform()\n time.sleep(1)\n\n general_consolidation_interval = \"//*[text()='Consolidation Interval (msec)']\"\n general_consolidation_interval_sitem = self.locator_finder_by_xpath(\n general_consolidation_interval)\n general_consolidation_interval_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(1010).perform()\n time.sleep(1)\n\n primary_sort = \"//*[text()='Primary Sort']\"\n primary_sort_sitem = self.locator_finder_by_xpath(\n primary_sort)\n primary_sort_sitem.click()\n time.sleep(1)\n\n primary_sort_field = \"//*[text()='Field']\"\n primary_sort_field_sitem = self.locator_finder_by_xpath(\n primary_sort_field)\n primary_sort_field_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(\"name\").perform()\n time.sleep(1)\n\n stored_value = \"//*[text()='Stored Values']\"\n stored_value_sitem = self.locator_finder_by_xpath(\n stored_value)\n stored_value_sitem.click()\n time.sleep(1)\n\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(\"age\").perform()\n time.sleep(1)\n\n consolidation_policy = \"//*[text()='Consolidation Policy']\"\n consolidation_policy_sitem = self.locator_finder_by_xpath(\n consolidation_policy)\n consolidation_policy_sitem.click()\n time.sleep(1)\n\n segment_min = \"//*[text()='Segments Min']\"\n segment_min_sitem = self.locator_finder_by_xpath(\n segment_min)\n segment_min_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(2).perform()\n time.sleep(1)\n\n segment_max = \"//*[text()='Segments Max']\"\n segment_max_sitem = self.locator_finder_by_xpath(\n segment_max)\n segment_max_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(12).perform()\n time.sleep(1)\n\n segment_byte_max = \"//*[text()='Segments Bytes Max']\"\n segment_byte_max_sitem = self.locator_finder_by_xpath(\n segment_byte_max)\n segment_byte_max_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(5368709120).perform()\n time.sleep(1)\n\n segment_bytes_floor = \"//*[text()='Segments Bytes Floor']\"\n segment_bytes_floor_sitem = self.locator_finder_by_xpath(\n segment_bytes_floor)\n segment_bytes_floor_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(5368709128).perform()\n time.sleep(1)\n\n else:\n self.navbar_goto(\"collections\")\n print(\"Selecting computed values collections. \\n\")\n col = '//*[@id=\"collection_ComputedValueCol\"]/div/h5'\n self.locator_finder_by_xpath(col).click()\n time.sleep(1)\n\n self.select_index_menu()\n\n create_new_index_btn_sitem = self.locator_finder_by_xpath(add_index)\n create_new_index_btn_sitem.click()\n time.sleep(2)\n\n print('ZKD Index (EXPERIMENTAL)')\n zkd_field = \"/html//input[@id='fields']\"\n zkd_field = self.locator_finder_by_xpath(zkd_field)\n zkd_field.click()\n zkd_field.send_keys('x,y')\n\n # selecting ZKD index's name\n zkd_name = \"/html//input[@id='name']\"\n zkd_name_sitem = self.locator_finder_by_xpath(zkd_name)\n zkd_name_sitem.click()\n zkd_name_sitem.send_keys(index_name)\n\n\n # create the index\n create_btn = \"//*[text()='Create']\"\n create_btn_sitem = self.locator_finder_by_xpath(create_btn)\n create_btn_sitem.click()\n time.sleep(2)", "def add_sql_operation(self, app_label, sql_name, operation, dependencies):\n deps = [(dp[0], SQL_BLOB, dp[1], self._sql_operations.get(dp)) for dp in dependencies]\n\n self.add_operation(app_label, operation, dependencies=deps)\n self._sql_operations[(app_label, sql_name)] = operation", "def __getitem__(self, index: Any) -> ColumnOperators:\n return self.operate(getitem, index)", "def ops2alg(ops):\n return Model(cardinality=len(ops[0]), \n operations=dict([\"h\"+str(i),list(ops[i])] for i in range(len(ops))))", "def add(self, name, command):", "def add_word(self,word,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].add(str(index))\r\n else:\r\n self.word_dict[word] = {str(index)}\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_words \"\r\n +\"(word, notebook)\"\r\n +\" VALUES (?,?);\",value_tuple)\r\n value_tuple = (notebookname, word, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO word_to_indexes \"\r\n +\"(notebook, word, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def add_identity(self, input_name, name=None):\n return self._build_op('Identity', [input_name], name=name)", "def ensure_operators_name(config):\n if is_v1_config(config):\n return\n i = 1\n for process in [\"preprocess\", \"postprocess\"]:\n process_config = config.get(process)\n if process_config:\n for op_config in process_config:\n op_type = op_config.get(\"op\")\n if op_type:\n op_config.setdefault(\"name\", \"%s_%d\" % (op_type, i))\n i += 1", "def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None):\n # Verify specified name doesn't already exist as some object attribute.\n for object_name, object_type in self._object_types.items():\n if name in object_type[1] and name != object_type[1][name][2]:\n raise ValueError, \"Inverted index name '%s' conflicts with registered attribute in object '%s'\" % \\\n (name, object_name)\n\n if split is None:\n # Default split regexp is to split words on\n # alphanumeric/digits/underscore boundaries.\n split = re.compile(\"[\\W_\\d]+\", re.U)\n elif isinstance(split, basestring):\n split = re.compile(split, re.U)\n\n if name not in self._inverted_indexes:\n self._db_query('INSERT INTO inverted_indexes VALUES(?, \"objectcount\", 0)', (name,))\n # Create the tables needed by the inverted index.\n self._lock.acquire()\n self._db.executescript(CREATE_IVTIDX_TEMPLATE.replace('%IDXNAME%', name))\n self._lock.release()\n else:\n defn = self._inverted_indexes[name]\n if min == defn['min'] and max == defn['max'] and split == defn['split'] and \\\n ignore == defn['ignore']:\n # Definition unchanged, nothing to do.\n return\n\n defn = {\n 'min': min,\n 'max': max,\n 'split': split,\n 'ignore': ignore,\n }\n\n self._db_query(\"INSERT OR REPLACE INTO inverted_indexes VALUES(?, 'definition', ?)\",\n (name, buffer(cPickle.dumps(defn, 2))))\n\n defn['objectcount'] = 0\n self._inverted_indexes[name] = defn", "def add_command(self, expr, name, priority=0):\n add_expr = Group(expr).setResultsName(name)\n for i, (p, _) in enumerate(self.commands):\n if priority >= p:\n self.commands.insert(i, (priority, add_expr))\n break\n else:\n self.commands.append((priority, add_expr))\n self.reinit_exprs()", "def set_index_name(self, name, axis=0):\n self.get_axis(axis).name = name", "def create(\n self,\n index: IO,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def operation_not_found(self, name):\n raise OperationError(\"Operation '%s' not found\" % name)", "def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None):\n # Verify specified name doesn't already exist as some object attribute.\n for object_name, object_type in self._object_types.items():\n if name in object_type[1] and name != object_type[1][name][2]:\n raise ValueError(\"Inverted index name '%s' conflicts with registered attribute in object '%s'\" % \\\n (name, object_name))\n\n if split is None:\n # Default split regexp is to split words on\n # alphanumeric/digits/underscore boundaries.\n split = re.compile(u\"(\\d+)|[_\\W]\", re.U)\n elif isinstance(split, str):\n split = re.compile(tostr(split), re.U)\n\n if name not in self._inverted_indexes and not self._readonly:\n self._db_query('INSERT INTO inverted_indexes VALUES(?, \"objectcount\", 0)', (name,))\n # Create the tables needed by the inverted index.\n with self._lock:\n self._db.executescript(CREATE_IVTIDX_TEMPLATE.replace('%IDXNAME%', name))\n elif name in self._inverted_indexes:\n defn = self._inverted_indexes[name]\n if min == defn['min'] and max == defn['max'] and split == defn['split'] and \\\n ignore == defn['ignore']:\n # Definition unchanged, nothing to do.\n return\n\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n defn = {\n 'min': min,\n 'max': max,\n 'split': split,\n 'ignore': ignore,\n }\n\n self._db_query(\"INSERT OR REPLACE INTO inverted_indexes VALUES(?, 'definition', ?)\",\n (name, self._pickle(defn)))\n\n defn['objectcount'] = 0\n self._inverted_indexes[name] = defn\n self.commit()", "def create_or_update(\n self,\n index_name: str,\n prefer: Union[str, _models.Enum0],\n index: _models.SearchIndex,\n allow_index_downtime: Optional[bool] = None,\n if_match: Optional[str] = None,\n if_none_match: Optional[str] = None,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def update_index_by_name(self, doc_name):\n\t\tdocument = self.get_document_to_index(doc_name)\n\t\tif document:\n\t\t\tself.update_index(document)", "def add_default_numeric_ops(op_name):\n add_default_numeric_op(op_name)\n add_default_reverse_numeric_op(op_name)", "def add_default_numeric_ops(op_name):\n add_default_numeric_op(op_name)\n add_default_reverse_numeric_op(op_name)", "def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)", "def alterTableAddIndex(database: str, table: str, indexName: str, columns: list) -> int:\n\n bd = _database(database)\n\n if bd:\n\n tb = _table(database, table)\n\n if tb:\n\n tb[\"index\"].insert([indexName, table, columns])\n return 0\n\n else:\n return 3\n\n else:\n return 2", "def create_index(self, table_name, index, timeout):\n _abstract()", "def create_index(self, table_name, index, timeout):\n _abstract()", "def create_index(\n self,\n ) -> Callable[\n [datastore_admin.CreateIndexRequest], Awaitable[operations_pb2.Operation]\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_index\" not in self._stubs:\n self._stubs[\"create_index\"] = self.grpc_channel.unary_unary(\n \"/google.datastore.admin.v1.DatastoreAdmin/CreateIndex\",\n request_serializer=datastore_admin.CreateIndexRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_index\"]", "def add_operation(session, node_id, operation, operation_sent=None,\n operation_received=None, username='system_user'):\n session = validate_session(session)\n add_oper = Operations(node_id, operation, operation_sent,\n operation_received, username\n )\n if add_oper:\n session.add(add_oper)\n session.commit()\n return add_oper", "def index(self, name, file, passages, index_name=\"default\"):\n raise NotImplementedError()", "def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1 + n2}')\n self._cursor += 4\n return", "def add_numeric_op(attr_name, op):\n def closure(self, other):\n return VTKCompositeDataArray._numeric_op(self, other, op)\n closure.__name__ = attr_name\n attr[attr_name] = closure", "def get_ops (self, names):\n return operator.attrgetter(names)(self.core) if isinstance(names,str) else [\n operator.attrgetter(n)(self.core) for n in names ]", "def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)", "def rename(self, renames):\n def operation(*args):\n args = [renames[i] for i in args]\n result = self(*args)\n try:\n return renames.index(result)\n except:\n # TODO esto no tiene sentido\n # hace falta porque a veces una subestructura no\n # es subestructura con alguna funcion, y hay que poder traducirla\n return result\n\n return FO_Operation(operation, d_universe=list(range(len(renames))), arity=self.arity())", "def create_index(self, index_name, body):\n if self.es.indices.exists(index_name):\n print(\"deleting '%s' index...\" % index_name)\n res = self.es.indices.delete(index=index_name)\n print(\" response: '%s'\" % res)\n\n print(\"creating '%s' index...\" % index_name)\n res = self.es.indices.create(index=index_name, body=body)\n print(\" response: '%s'\" % res)", "def add_constraint(name, indexes, constraint_func):\n name_base = name\n for _ in range(len(indexes)):\n name_base += \"_{}\"\n\n for index in itertools.product(*indexes):\n name = name_base.format(*index)\n con = constraint_func(index)\n\n constraints.append((con, name))", "def test_add_to_index(koan, assert_index_includes_added_file):\n koan.shell('')\n koan.shell('')\n koan.shell('')", "def _create_update_index(self) -> Result[Ok, Err]:\n collection_status = self.collection\n if collection_status.is_err():\n return collection_status\n collection: MongoCollection = collection_status.ok()\n\n def check_index_keys(current_keys, new_index_keys):\n current_keys.sort()\n new_index_keys.sort()\n return current_keys == new_index_keys\n\n syft_obj = self.settings.object_type\n\n unique_attrs = getattr(syft_obj, \"__attr_unique__\", [])\n object_name = syft_obj.__canonical_name__\n\n new_index_keys = [(attr, ASCENDING) for attr in unique_attrs]\n\n try:\n current_indexes = collection.index_information()\n except BaseException as e:\n return Err(str(e))\n index_name = f\"{object_name}_index_name\"\n\n current_index_keys = current_indexes.get(index_name, None)\n\n if current_index_keys is not None:\n keys_same = check_index_keys(current_index_keys[\"key\"], new_index_keys)\n if keys_same:\n return Ok()\n\n # Drop current index, since incompatible with current object\n try:\n collection.drop_index(index_or_name=index_name)\n except Exception:\n return Err(\n f\"Failed to drop index for object: {object_name} with index keys: {current_index_keys}\"\n )\n\n # If no new indexes, then skip index creation\n if len(new_index_keys) == 0:\n return Ok()\n\n try:\n collection.create_index(new_index_keys, unique=True, name=index_name)\n except Exception:\n return Err(\n f\"Failed to create index for {object_name} with index keys: {new_index_keys}\"\n )\n\n return Ok()", "def create_index(index_name):\n resp = es.indices.create(index=index_name)\n print(resp)", "def get_operation_by_name(operation_name: str) -> Operation:\n client = vmwareengine_v1.VmwareEngineClient()\n request = GetOperationRequest()\n request.name = operation_name\n return client.get_operation(request)", "def __init__(self, name=None, description=None):\n super().__init__()\n self.name = name or getattr(self, \"name\", type(self).__name__.lower())\n self.description = description or getattr(self, \"description\", None) or self.__doc__ or self.__class__.__name__\n self.operations = {}\n for function in (attr for attr in (getattr(self, nom) for nom in dir(self)) if callable(attr)):\n try:\n operation = function._roax_operation_\n except:\n continue # ignore undecorated functions\n self._register_operation(**operation)", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def simple_index():\n examples = [\n benchmark.Example(\n inputs=[\n [12, 34, 56, 78],\n -2,\n ],\n output=56,\n ),\n ]\n constants = []\n description = 'Index into a tensor'\n target_program = 'in1[in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_index')", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def add_index(self, column_list, name=None, unique=False):\n columns = self._join_cols(column_list)\n if not name:\n name = self.new_index_name('_'.join(column_list), unique)\n\n self.execute(self.commands.add_index(self.name, name, columns, unique))\n self.commit()", "def addCommand(self, name, func, resultType=None, globalName=False):\n\n if globalName and \".\" in name:\n raise Exception(\"Invalid global name: %s!\" % name)\n elif not globalName and len(name.split(\".\")) != 2:\n raise Exception(\"Command names should always match namespace.name! Tried with: %s!\" % name)\n\n commands = self.__commands\n\n if name in commands:\n raise Exception(\"Command %s already exists!\" % name)\n\n commands[name] = {\n \"func\" : func,\n \"type\" : resultType\n }", "def _insert_op(self, op):", "def add_op_to_opstring(\n qsim_op: cirq.GateOperation,\n qubit_to_index_dict: Dict[cirq.Qid, int],\n opstring: qsim.OpString,\n):\n qsim_gate = qsim_op.gate\n gate_kind = _cirq_gate_kind(qsim_gate)\n if gate_kind not in {qsim.kX, qsim.kY, qsim.kZ, qsim.kI1}:\n raise ValueError(f\"OpString should only have Paulis; got {gate_kind}\")\n if len(qsim_op.qubits) != 1:\n raise ValueError(f\"OpString ops should have 1 qubit; got {len(qsim_op.qubits)}\")\n\n is_controlled = isinstance(qsim_gate, cirq.ControlledGate)\n if is_controlled:\n raise ValueError(f\"OpString ops should not be controlled.\")\n\n qubits = [qubit_to_index_dict[q] for q in qsim_op.qubits]\n qsim.add_gate_to_opstring(gate_kind, qubits, opstring)", "def construct_indexing_command(yaml_data):\n print('='*10, 'Indexing', '='*10)\n index_command = [\n os.path.join(yaml_data['root'], yaml_data['index_command']),\n '-collection', yaml_data['collection'],\n '-generator', yaml_data['generator'],\n '-threads', str(yaml_data['threads']),\n '-input', yaml_data['input'],\n '-index', 'lucene-index.{0}.pos+docvectors{1}'.format(yaml_data['name'], '+rawdocs' if 'storeRawdocs' in yaml_data['index_options'] else '')\n ]\n index_command.extend(yaml_data['index_options'])\n return index_command" ]
[ "0.68858343", "0.6698154", "0.64274466", "0.6417586", "0.63482445", "0.6007961", "0.5947242", "0.5861968", "0.5853543", "0.57812166", "0.57532567", "0.57404816", "0.57012236", "0.5667653", "0.5642528", "0.5634883", "0.56331265", "0.5619371", "0.55785316", "0.5549118", "0.55464095", "0.55158126", "0.5503876", "0.5477135", "0.5477135", "0.54508245", "0.5447577", "0.54331696", "0.53988826", "0.5366012", "0.5364457", "0.5364457", "0.5328185", "0.5321456", "0.5315841", "0.52987677", "0.526957", "0.52435255", "0.52435255", "0.5226725", "0.52156776", "0.5189369", "0.5186127", "0.5136779", "0.51342326", "0.51206917", "0.5105691", "0.5103587", "0.5102006", "0.50899065", "0.5079994", "0.50753397", "0.5067556", "0.5067556", "0.5042267", "0.50338906", "0.5031976", "0.5006995", "0.49906424", "0.4985376", "0.49817425", "0.4976201", "0.497404", "0.4966959", "0.49635088", "0.49628258", "0.49625424", "0.4961201", "0.49563956", "0.49532452", "0.4950398", "0.4949601", "0.4949601", "0.4945903", "0.4942888", "0.49376485", "0.49376485", "0.49240825", "0.49220404", "0.49206442", "0.49002284", "0.49001536", "0.48981562", "0.4897275", "0.48856843", "0.48852035", "0.488426", "0.48605707", "0.48568758", "0.48566052", "0.48457992", "0.48293558", "0.48249304", "0.48224542", "0.4818457", "0.48121235", "0.48116657", "0.48111594", "0.48097378", "0.48091227" ]
0.85664165
0
Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param.
def _offset_for(self, param): if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def get_offset(self):\n return self.offset", "def wm_offset(self):\n return self.get_par(\"offset\")", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def offset(self):\n\n return self._offset", "def get_pos(self, mode, param, param_idx):\n\n if mode == 0:\n return param[param_idx]\n elif mode == 1:\n return self.ptr + param_idx + 1\n elif mode == 2:\n return self.r + param[param_idx]", "def offset(self) -> Tuple[int, int]:\n return (self.ioffset[0].to_pixels(self.parent.width),\n self.ioffset[1].to_pixels(self.parent.height))", "def axis_offset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"axis_offset\")", "def axis_offset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"axis_offset\")", "def axis_offset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"axis_offset\")", "def offset(self):\n return self.query.offset", "def offset(self):\r\n return self._get_instantiation()[3]", "def get_field_relative_offset(self, field_name):\n return self.__field_offsets__[field_name]", "def get_input_offset(self):\n return ELFLING_PADDING + len(self.__data) - 4", "def min_offset(self):\n return self.offset", "def offset(self):\n return _PositionD(self._dx, self._dy)", "def get_offset(self):\r\n offset = self.offset\r\n\r\n if 'offset' in self.request_data:\r\n offset = self.request_data['offset']\r\n\r\n try:\r\n offset = int(offset)\r\n except ValueError:\r\n raise BadRequest(\"Invalid offset '%s' provided. Please provide an integer.\" % offset)\r\n\r\n if offset < 0:\r\n raise BadRequest(\"Invalid offset '%s' provided. Please provide a positive integer >= 0.\" % offset)\r\n\r\n return offset", "def parameter_index(self):\n return self._parameter_index", "def GetOffset(self, *args, **kwargs):\n pass", "def offset(self):\n try:\n return self._annotations[EventData.PROP_OFFSET].decode('UTF-8')\n except (KeyError, AttributeError):\n return None", "def _raveled_index_for(self, param):\n from ..param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def elemoffset(self):\n return self.offset // self.itemsize", "def pixel_offset(self, x, y):\n return y * self.width + x", "def get_offset(self, tuple_of_slices, shape):\n raise NotImplementedError()", "def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def param(self):\n return self._param", "def get_drawing_offset(self) -> Tuple2IntType:\n return self._drawing_offset", "def offset(self):\n return self.unpack_dword(0x0)", "def _param(self) ->nn.Parameter:\n return next(self.parameters())", "def offset(self):\n self._fetch_if_needed()\n return self._offset", "def top_offset(self):\n raise NotImplementedError", "def getOffset(self):\n return _libsbml.Unit_getOffset(self)", "def pos(self):\n return self.bbox().pos(self.offset)", "def param(self, param_nb: int) -> int:\n mode = get_digit_right_to_left(self.modes, param_nb - 1)\n param_index = self.pointer + param_nb\n if mode == 1:\n # immediate mode\n return param_index\n if mode == 2:\n # relative mode\n return self.relative_base + self.program[param_index]\n else:\n # position mode\n return self.program[param_index]", "def strides_shape_param(self, param):\n\t\tindex = self.variables['strides_format'].index(param)\n\t\treturn self.variables['strides'].shape[index]", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def get_field_absolute_offset(self, field_name):\n return self.__file_offset__ + self.__field_offsets__[field_name]", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def findParameter(self, pos):\n text = self.text()\n comma_pos = text.find(',', pos)\n if comma_pos == -1:\n comma_pos = len(text)\n left_comma = text.rfind(',', 0, comma_pos) + 1\n left_eq = text.rfind('=', 0, comma_pos) + 1\n left_delim = max(left_comma, left_eq)\n start = left_delim\n length = comma_pos - left_delim\n return start, length", "def local_param_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if not self.symbols[-1][s].isparam: continue\n size += 1\n return size", "def get_alignment_offset(self):\n\n return 0", "def smpte_offset(self) -> int:\n return self.__smpte_offset", "def get_offset_value():\n # TODO rename it 'get_margin_value'\n # should be greater than 2 (maybe 1 is enough)\n return 5", "def getVarIndexOffset(self) -> Optional[int]:\n m = self.varIndexBasePlusOffsetRE.search(self.description)\n if not m:\n return None\n return int(m.group(1))", "def chain_offset(self):\n return self._chain_offset", "def vsGetOffset(self, name, offset=0):\n nameparts = name.split('.')\n namedepth = len(nameparts) - 1\n depth = 0\n for fname,field in self.vsGetFields():\n if nameparts[depth] == fname:\n if depth == namedepth:\n return offset\n depth += 1\n return field.vsGetOffset('.'.join(nameparts[depth:]), offset=offset)\n offset += len(field)\n raise Exception(\"Invalid Field Specified!\")", "def get_offset(self):\n if self.shift >= 12 and not isinstance(self, OffsetVariable):\n return OffsetVariable(\n rank=self.rank,\n name=self.name,\n shift=self.shift,\n units=self.units,\n parent=self.parent,\n )\n return self", "def get_pos(self):\n self.__param_lock.acquire()\n pos = self.__pos\n self.__param_lock.release()\n return pos", "def offset_point(self,base, offset):\r\n return (base[0] + offset[0], base[1] + offset[1])", "def _as_parameter_(self):\n return POINT(self.x, self.y)", "async def get_focus_offset(self, **kwargs: Any) -> float:\n return 0", "def get_page_offset(self) -> int:\n page_offset: int = self.page_number\n if page_offset <= 1:\n return 0\n return (page_offset - 1) * self.get_page_size()", "def dtype_offset( dtype, name = None ):\n if name:\n # get the dtype for the named value\n # the offset is the second value\n return dtype.fields[ name ][ 1 ]\n else:\n return 0", "def param_to_point(self, param):\n return self.p1 + param * (self.p2 - self.p1)", "def max_offset(self):\n return self.offset + self.filesize - 1", "def input_position(self) -> int:\n return self._input_position", "def point_to_param(self, pt):\n r = self.p2 - self.p1\n return (pt - self.p1).dot(r) / r.square()", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def get_feature_offset(meta):\n with_label = meta.get(\"with_label\", False)\n with_match_id = meta.get(\"with_match_id\", False)\n id_range = meta.get(\"id_range\", 0)\n\n if with_match_id:\n if not id_range:\n id_range = 1\n\n offset = id_range\n if with_label:\n offset += 1\n\n return offset", "def tell(self):\n return self._offset", "def offset_from_start(self, part):\n index = self.parts.index(part)\n return sum([p.length for p in self.parts[:index]])", "def Offset(*args, **kwargs):\n return _gdi_.NativePixelData_Accessor_Offset(*args, **kwargs)", "def offset(self, offset):\n raise NotImplementedError(\"This should have been implemented.\")", "def tell(self):\n return self.offset", "def get_a_param(self):\n value = self.memory[self.exec_ptr + 1]\n Vm.validate_value(value)\n return value", "def _get_next_offset(self):\n return self.__offset", "def find_offset(self,value):\n return self.header.find_offset(value)", "def paddings_shape_param(self, param):\n\t\tindex = self.variables['paddings_format'].index(param)\n\t\treturn self.variables['paddings'].shape[index]", "def get_shapeOffset(self):\n try:\n _str_func = ' get_shapeOffset'.format(self)\n log.debug(\"|{0}| >> ... [{1}]\".format(_str_func,self)+ '-'*80)\n \n ml_check = self.getBlockParents()\n ml_check.insert(0,self)\n \n for mBlock in ml_check:\n l_attrs = ['controlOffset','skinOffset']\n for a in l_attrs:\n if mBlock.hasAttr(a):\n v = mBlock.getMayaAttr(a)\n log.debug(\"|{0}| >> {1} attr found on rigBlock: {2} | {3}\".format(_str_func,a,v,mBlock.mNode)) \n return v \n return 1\n except Exception,err:cgmGEN.cgmExceptCB(Exception,err,msg=vars())", "def word_offset(signame, argname):\n return \"CCP_%s_%s_WORD_OFFSET\" % (\n signame.upper(), argname.upper())", "def num_param(self):\n return len(self._parameters)", "def extend_param(self):\n return self._extend_param", "def pos(self):\n return (self.raw - self.raw_zero) / self.ratio", "def get_position(self) -> Tuple[int]:\n return self.position.copy()", "def get_params(self):\r\n return (self.w[:-1], self.w[-1])", "def Offset(*args, **kwargs):\n return _gdi_.Region_Offset(*args, **kwargs)", "def getPos(self):\n pos = [None,None]\n try:\n for i in self.itemType.find('parameters'):\n paramType = i.find('type').text.strip()\n if paramType.startswith('position-x'):\n pos[0] = round(float(self.params[i.find('name').text]))\n if paramType.startswith('position-y'):\n pos[1] = round(float(self.params[i.find('name').text]))\n except:\n pos = [-1,-1]\n return pos", "def get_dependent_param_points(self, param):\n if param == SHAPE_STRING:\n return self.shape_at, self.shape_value\n elif param == LOCATION_STRING:\n return self.loc_at, self.loc_value\n elif param == SCALE_STRING:\n return self.scale_at, self.scale_value\n else:\n err_msg = \"Parameter '{}' is unknown.\".format(param)\n raise ValueError(err_msg)", "def __call__(self, x):\n x, fmt = _convert_input(x, self.param_dim)\n result = self._offsets + x\n return _convert_output(result, fmt)", "def stride(self):\n\t\treturn self.strides_shape_param('W')", "def get_position(self):\n\n return (self._fileobj.tell() - self._pos) * 8 - self._bits", "def aic(self):\n return 2*self.number_of_parameters() - 2*self.ll[-1]", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def _calculate_offset(self, anchor_type, size=(0, 0)):\n w, h = self._surf.get_size()\n w2, h2 = size\n\n if anchor_type == 'topleft':\n return spyral.Vec2D(0, 0)\n elif anchor_type == 'topright':\n return spyral.Vec2D(w - w2, 0)\n elif anchor_type == 'midtop':\n return spyral.Vec2D((w - w2) / 2., 0)\n elif anchor_type == 'bottomleft':\n return spyral.Vec2D(0, h - h2)\n elif anchor_type == 'bottomright':\n return spyral.Vec2D(w - w2, h - h2)\n elif anchor_type == 'midbottom':\n return spyral.Vec2D((w - w2) / 2., h - h2)\n elif anchor_type == 'midleft':\n return spyral.Vec2D(0, (h - h2) / 2.)\n elif anchor_type == 'midright':\n return spyral.Vec2D(w - w2, (h - h2) / 2.)\n elif anchor_type == 'center':\n return spyral.Vec2D((w - w2) / 2., (h - h2) / 2.)\n else:\n return spyral.Vec2D(anchor_type) - spyral.Vec2D(w2, h2)", "def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError", "def get(self):\n return self.x-self.offset", "def get_param_idx(model, target_name):\n for i, (param_name, param) in enumerate(model.get_parameters().items()):\n if param_name == target_name:\n return i", "def data_offset(self):\n if self.raw_data_length() < 5 or self.raw_data_length() >= 0x80000000:\n return self.absolute_offset(0x8)\n else:\n return self.abs_offset_from_hbin_offset(self.unpack_dword(0x8))" ]
[ "0.65088356", "0.65088356", "0.6396925", "0.63015145", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.62532693", "0.62532693", "0.62532693", "0.62496376", "0.62265295", "0.61739165", "0.61539537", "0.61539537", "0.61539537", "0.61262417", "0.6096628", "0.60391563", "0.6031847", "0.6002478", "0.59846246", "0.59754264", "0.5955708", "0.5952924", "0.59426564", "0.5919122", "0.5918565", "0.59156764", "0.5899489", "0.5879652", "0.5850257", "0.5849471", "0.5832588", "0.5819702", "0.5798088", "0.5768284", "0.5759162", "0.5739973", "0.5730045", "0.571736", "0.57130414", "0.56650287", "0.5661091", "0.566092", "0.56535083", "0.56412345", "0.5632512", "0.563096", "0.5611889", "0.55522496", "0.55486584", "0.5547278", "0.55140036", "0.5511098", "0.55048496", "0.55033845", "0.54977906", "0.5487567", "0.5482972", "0.5479308", "0.5469212", "0.54560846", "0.5436348", "0.5434381", "0.54233664", "0.53802943", "0.53801507", "0.53793603", "0.536975", "0.53591233", "0.5349167", "0.5340663", "0.53337336", "0.5324466", "0.53135663", "0.53123814", "0.5310006", "0.52983326", "0.5285311", "0.5284802", "0.5267793", "0.5262269", "0.52579904", "0.52536845", "0.5238217", "0.5229262", "0.52106804", "0.52026147", "0.52026147", "0.5198052", "0.5194524", "0.5186592", "0.5186073", "0.5172832" ]
0.79988617
1
get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work
def _raveled_index_for(self, param): from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def _raveled_index_for_transformed(self, param):\n ravi = self._raveled_index_for(param)\n if self._has_fixes():\n fixes = self._fixes_\n ### Transformed indices, handling the offsets of previous fixes\n transformed = (np.r_[:self.size] - (~fixes).cumsum())\n return transformed[ravi[fixes[ravi]]]\n else:\n return ravi", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def ravel_index(x, dims):\n i = 0\n for dim, j in zip(dims, x):\n i *= dim\n i += j\n return i", "def get_param_indexes(self):\n self.debug.start_function('get_param_indexes')\n\n for i, key in enumerate(self.mcmc_version.param_keys):\n self.param_idxs[key] = i\n for i, key in enumerate(self.mcmc_version.interp_keys):\n self.interp_idxs[key] = i\n\n self.debug.end_function()", "def _get_array_index(array_path):\n\n if not array_path.startswith('@'):\n raise XJPathError('Array index must start from @ symbol.')\n array_path = array_path[1:]\n if array_path == 'last':\n return -1\n if array_path == 'first':\n return 0\n if array_path.isdigit() or (array_path.startswith('-')\n and array_path[1:].isdigit()):\n return int(array_path)\n else:\n raise XJPathError('Unknown index reference', (array_path,))", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param", "def pndindex(*args):\r\n return np.ndindex(*args)", "def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])", "def getbaraidx(self,idx_,sub_,weights_):\n maxnum_ = self.getbaraidxinfo((idx_))\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n num_ = ctypes.c_int64()\n _sub_minlength = (maxnum_)\n if (maxnum_) > 0 and sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnum_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int64) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int64))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _weights_minlength = (maxnum_)\n if (maxnum_) > 0 and weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights is not long enough: Is %d, expected %d\" % (len(weights_),(maxnum_)))\n if isinstance(weights_,numpy.ndarray) and not weights_.flags.writeable:\n raise ValueError(\"Argument weights must be writable\")\n if weights_ is None:\n raise ValueError(\"Argument weights may not be None\")\n if isinstance(weights_, numpy.ndarray) and weights_.dtype is numpy.dtype(numpy.float64) and weights_.flags.contiguous:\n _weights_copyarray = False\n _weights_tmp = ctypes.cast(weights_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif weights_ is not None:\n _weights_copyarray = True\n _weights_np_tmp = numpy.zeros(len(weights_),numpy.dtype(numpy.float64))\n _weights_np_tmp[:] = weights_\n assert _weights_np_tmp.flags.contiguous\n _weights_tmp = ctypes.cast(_weights_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _weights_copyarray = False\n _weights_tmp = None\n \n res = __library__.MSK_XX_getbaraidx(self.__nativep,idx_,maxnum_,ctypes.byref(i_),ctypes.byref(j_),ctypes.byref(num_),_sub_tmp,_weights_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n num_ = num_.value\n _num_return_value = num_\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _weights_copyarray:\n weights_[:] = _weights_np_tmp\n return (_i_return_value,_j_return_value,_num_return_value)", "def pndindex(*args):\n return np.ndindex(*args)", "def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)", "def getind(self,start,end,blk):\n\n if blk is None:\n # Return all blocks\n blk = np.arange(self.ind[start].size)\n\n ind=np.array([])\n for k,val in enumerate(blk):\n ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))\n return ind.astype(int)", "def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)", "def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()", "def get_index_param(self, list_of_parameters_and_redshift, multiple_redshift=False):\n idx = pd.IndexSlice\n if multiple_redshift:\n ind = idx[self.data_type,list_of_parameters_and_redshift[0]] # first value is the redshift\n else :\n ind = idx[self.data_type,:]\n for i in range (self.num_parameters):\n if multiple_redshift:\n ind += idx[:,list_of_parameters_and_redshift[i+1]] # first value is the redshift\n else : \n ind += idx[:,list_of_parameters_and_redshift[i]] \n return ind", "def get_ray_index_for_grid_point(ray, grid_idx, n_depth_pts):\n if ray.mu < 0:\n return (grid_idx)\n else:\n return (n_depth_pts - (grid_idx + 1))", "def ravel_indices(shape, *args):\n new_positions = []\n for arg in args:\n new_positions.append(np.ravel_multi_index(arg, shape))\n return new_positions", "def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2", "def getbaraidxij(self,idx_): # 3\n res,resargs = self.__obj.getbaraidxij(idx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value = resargs\n return _i_return_value,_j_return_value", "def get_grid_index_for_ray_point(ray, ray_idx, n_depth_pts):\n if ray.mu < 0:\n return (ray_idx)\n else:\n return (n_depth_pts - ray_idx - 1)", "def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0", "def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0", "def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def mainIndices(self):\n return self.i1, self.i2", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def unravel_index(ijk, n):\n \n if type(ijk) is int:\n return ijk\n if len(ijk)==1:\n return ijk[0]\n\n assert (np.diff(ijk)>0).all()\n assert all([i<n for i in ijk])\n\n ix = sum([int(binom(n-1-i,len(ijk)-1)) for i in range(ijk[0])])\n for d in range(1, len(ijk)-1):\n if (ijk[d]-ijk[d-1])>1:\n ix += sum([int(binom(n-i-1,len(ijk)-d-1)) for i in range(ijk[d-1]+1, ijk[d])])\n ix += ijk[-1] -ijk[-2] -1\n return ix", "def parent_id(neuron, selected_index):\n parent_id = np.array([], dtype=int)\n for i in selected_index:\n p = neuron.parent_index[i]\n while(~np.any(selected_index == p)):\n p = neuron.parent_index[p]\n (ind,) = np.where(selected_index == p)\n parent_id = np.append(parent_id, ind)\n return parent_id", "def recursive_index_decode(int_array, max=32767, min=-32768):\n out_arr = []\n decoded_val = 0\n for item in int_array.tolist():\n if item==max or item==min:\n decoded_val += item\n else:\n decoded_val += item\n out_arr.append(decoded_val)\n decoded_val = 0\n return numpy.asarray(out_arr,dtype=numpy.int32)", "def index(self, x) -> int:\n pass", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def getbarcidx(self,idx_,sub_,weights_):\n maxnum_ = self.getbarcidxinfo((idx_))\n j_ = ctypes.c_int32()\n num_ = ctypes.c_int64()\n _sub_minlength = (maxnum_)\n if (maxnum_) > 0 and sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnum_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int64) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int64))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _weights_minlength = (maxnum_)\n if (maxnum_) > 0 and weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights is not long enough: Is %d, expected %d\" % (len(weights_),(maxnum_)))\n if isinstance(weights_,numpy.ndarray) and not weights_.flags.writeable:\n raise ValueError(\"Argument weights must be writable\")\n if weights_ is None:\n raise ValueError(\"Argument weights may not be None\")\n if isinstance(weights_, numpy.ndarray) and weights_.dtype is numpy.dtype(numpy.float64) and weights_.flags.contiguous:\n _weights_copyarray = False\n _weights_tmp = ctypes.cast(weights_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif weights_ is not None:\n _weights_copyarray = True\n _weights_np_tmp = numpy.zeros(len(weights_),numpy.dtype(numpy.float64))\n _weights_np_tmp[:] = weights_\n assert _weights_np_tmp.flags.contiguous\n _weights_tmp = ctypes.cast(_weights_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _weights_copyarray = False\n _weights_tmp = None\n \n res = __library__.MSK_XX_getbarcidx(self.__nativep,idx_,maxnum_,ctypes.byref(j_),ctypes.byref(num_),_sub_tmp,_weights_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n j_ = j_.value\n _j_return_value = j_\n num_ = num_.value\n _num_return_value = num_\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _weights_copyarray:\n weights_[:] = _weights_np_tmp\n return (_j_return_value,_num_return_value)", "def find_pivot_idx(arr: List[int]) -> int:\n\n def _find_pivot_idx_rec(arr: List[int], low: int, high: int):\n # base cases for recussion\n if high < low:\n return -1 # cannot find\n if high == low:\n return high\n # ----------------------------\n\n mid = (low + high) // 2\n print(\"mid=\", mid)\n\n assert mid < high\n # consider if we pin-point the pivot\n if (arr[mid] > arr[mid+1]):\n return mid\n\n if (arr[mid-1]> arr[mid]):\n return mid-1\n\n\n if arr[low] >= arr[mid]:\n return _find_pivot_idx_rec(arr, low, mid-1)\n\n # if arr[mid+1] >= arr[high]:\n return _find_pivot_idx_rec(arr, mid+1, high)\n\n # -------------------\n ret = _find_pivot_idx_rec(arr, 0, len(arr)-1)\n if ret == -1:\n raise ValueError(\"Cannot find the pivot point.\")\n\n return ret", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def GetCellLinearIndex(self, vtkAMRBox, p_int, p_int_1, p_int_2, p_int=..., p_int=..., p_int=...):\n ...", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def get_child_indices(idx: int):\n return 2 * idx + 1, 2 * idx + 2", "def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]", "def GetEdgeArray(self, p_int):\n ...", "def update_idx(tmp, array):\n for i in range(0, len(array)):\n if list(tmp).count(i)>1:\n # Some index in idx may be duplicated because some leaves have the same scalar value.\n idx = [index for index, value in enumerate(tmp) if value == i]\n new = np.array(tmp)[idx]+np.array(array[idx]).argsort()*0.1\n for j in range(0, len(idx)):\n tmp[idx[j]]=new[j]\n sort_list = sorted(tmp)\n idx = []\n for i in range(0, len(tmp)):\n idx.append(sort_list.index(tmp[i]))\n return idx", "def parent_idx(idx):\n return (idx - 1) >> 1", "def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx", "def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])", "def get_pulling_indices(self, weight):\n pass", "def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]:\n lev = self.levels[0]\n codes = self._codes[0]\n cat = Categorical.from_codes(codes=codes, categories=lev, validate=False)\n ci = Index(cat)\n return ci.get_indexer_for(target)", "def tril_indices_from(arr,k=0):\r\n if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:\r\n raise ValueError(\"input array must be 2-d and square\")\r\n return tril_indices(arr.shape[0],k)", "def get_data_index(self, data, data_point):\n\n if self.method == 1:\n idx = np.where((data[0,:]==data_point[0]) & \\\n (data[1,:]==data_point[1]) & \\\n (data[2,:]==data_point[2]) & \\\n (data[3,:]==data_point[3]) & \\\n (data[4,:]==data_point[4]))\n else:\n idx = np.where((data[0,:]==data_point[0]) & \\\n (data[1,:]==data_point[1]) & \\\n (data[2,:]==data_point[2]) & \\\n (data[3,:]==data_point[3]))\n\n idx = idx[0][0]\n\n return idx", "def get_locs(self, seq) -> npt.NDArray[np.intp]:\n\n # must be lexsorted to at least as many levels\n true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]\n if true_slices and true_slices[-1] >= self._lexsort_depth:\n raise UnsortedIndexError(\n \"MultiIndex slicing requires the index to be lexsorted: slicing \"\n f\"on levels {true_slices}, lexsort depth {self._lexsort_depth}\"\n )\n\n if any(x is Ellipsis for x in seq):\n raise NotImplementedError(\n \"MultiIndex does not support indexing with Ellipsis\"\n )\n\n n = len(self)\n\n def _to_bool_indexer(indexer) -> npt.NDArray[np.bool_]:\n if isinstance(indexer, slice):\n new_indexer = np.zeros(n, dtype=np.bool_)\n new_indexer[indexer] = True\n return new_indexer\n return indexer\n\n # a bool indexer for the positions we want to take\n indexer: npt.NDArray[np.bool_] | None = None\n\n for i, k in enumerate(seq):\n lvl_indexer: npt.NDArray[np.bool_] | slice | None = None\n\n if com.is_bool_indexer(k):\n if len(k) != n:\n raise ValueError(\n \"cannot index with a boolean indexer that \"\n \"is not the same length as the index\"\n )\n lvl_indexer = np.asarray(k)\n\n elif is_list_like(k):\n # a collection of labels to include from this level (these are or'd)\n\n # GH#27591 check if this is a single tuple key in the level\n try:\n lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer)\n except (InvalidIndexError, TypeError, KeyError) as err:\n # InvalidIndexError e.g. non-hashable, fall back to treating\n # this as a sequence of labels\n # KeyError it can be ambiguous if this is a label or sequence\n # of labels\n # github.com/pandas-dev/pandas/issues/39424#issuecomment-871626708\n for x in k:\n if not is_hashable(x):\n # e.g. slice\n raise err\n # GH 39424: Ignore not founds\n # GH 42351: No longer ignore not founds & enforced in 2.0\n # TODO: how to handle IntervalIndex level? (no test cases)\n item_indexer = self._get_level_indexer(\n x, level=i, indexer=indexer\n )\n if lvl_indexer is None:\n lvl_indexer = _to_bool_indexer(item_indexer)\n elif isinstance(item_indexer, slice):\n lvl_indexer[item_indexer] = True # type: ignore[index]\n else:\n lvl_indexer |= item_indexer\n\n if lvl_indexer is None:\n # no matches we are done\n # test_loc_getitem_duplicates_multiindex_empty_indexer\n return np.array([], dtype=np.intp)\n\n elif com.is_null_slice(k):\n # empty slice\n if indexer is None and i == len(seq) - 1:\n return np.arange(n, dtype=np.intp)\n continue\n\n else:\n # a slice or a single label\n lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer)\n\n # update indexer\n lvl_indexer = _to_bool_indexer(lvl_indexer)\n if indexer is None:\n indexer = lvl_indexer\n else:\n indexer &= lvl_indexer\n if not np.any(indexer) and np.any(lvl_indexer):\n raise KeyError(seq)\n\n # empty indexer\n if indexer is None:\n return np.array([], dtype=np.intp)\n\n pos_indexer = indexer.nonzero()[0]\n return self._reorder_indexer(seq, pos_indexer)", "def index_lvl(a, thlds):\n if isinstance(a, (float, int)):\n return bisect_right(thlds, a)\n\n res = np.zeros(len(a))\n for thld in thlds:\n res += thld <= a\n return res.astype(int)", "def get_view_idx(self, parent_idx):\n a = self.p_rows.index(i)\n b = self.p_cols.index(j)\n return (a, b)", "def _index_list(self, level, node):\n if level >= self._max_level:\n raise ValueError(\"Invalid level: greater than `max_level`\")\n\n if node >= 2**level:\n raise ValueError(\"Invalid node\")\n\n return 2**level + node - 1", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def get_parent_index(index):\n if index == 0:\n return 0\n if index % 2 == 0:\n return int(index / 2 - 1)\n return int(index / 2)", "def GetPointToOneRingPointsArray(self, p_int):\n ...", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def get_ind(self,*q):\n try:\n if( len(q) == 1 ):\n x = q[0][:,0]\n y = q[0][:,1]\n z = q[0][:,2]\n else:\n x = q[0]\n y = q[1]\n z = q[2]\n try:\n cx = (x+0.5).astype(na.int32)\n cy = (y+0.5).astype(na.int32)\n cz = (z+0.5).astype(na.int32)\n except:\n cx = int(x+0.5)\n cy = int(y+0.5)\n cz = int(z+0.5)\n ind = cx + cy*self.dim[0]+cz*self.dim[0]*self.dim[1]\n return ind\n except Exception as error:\n print(error)\n return None", "def getbaraidx(self,idx_,sub,weights): # 3\n maxnum_ = self.getbaraidxinfo((idx_))\n if sub is None: raise TypeError(\"Invalid type for argument sub\")\n _copyback_sub = False\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"q\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n _copyback_sub = True\n else:\n if sub_.format != \"q\":\n sub_ = memoryview(array.array(\"q\",sub))\n _copyback_sub = True\n if sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub has wrong length\")\n if weights is None: raise TypeError(\"Invalid type for argument weights\")\n _copyback_weights = False\n if weights is None:\n weights_ = None\n else:\n try:\n weights_ = memoryview(weights)\n except TypeError:\n try:\n _tmparr_weights = array.array(\"d\",weights)\n except TypeError:\n raise TypeError(\"Argument weights has wrong type\")\n else:\n weights_ = memoryview(_tmparr_weights)\n _copyback_weights = True\n else:\n if weights_.format != \"d\":\n weights_ = memoryview(array.array(\"d\",weights))\n _copyback_weights = True\n if weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights has wrong length\")\n res,resargs = self.__obj.getbaraidx(idx_,maxnum_,sub_,weights_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value,_num_return_value = resargs\n if _copyback_weights:\n weights[:] = _tmparr_weights\n if _copyback_sub:\n sub[:] = _tmparr_sub\n return _i_return_value,_j_return_value,_num_return_value", "def select(self, arr):\n\n return arr[self.relative_degree_idxs]", "def get_index(corners, i, jk):\n if type(jk) != list:\n jk = list(jk)\n assert corners.shape[1] == 3\n sol = np.where(np.bitwise_or(np.all(corners == [i] + jk, axis=1), \n np.all(corners == [i] + jk[::-1], axis=1)))[0]\n if len(sol) > 0: \n return sol[0]", "def index(self) -> int:", "def get_index_from_well(self, well):\n pass", "def _staticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = [self.idxs[i] for i in inds]\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds", "def _staticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def _notstaticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def ijk_array_to_indices(self, ijk_array):\n blocks = self.parent_block_count\n if not blocks:\n raise AttributeError(\"parent_block_count is required to calculate index\")\n if not isinstance(ijk_array, (list, tuple, np.ndarray)):\n raise ValueError(\"ijk_array must be a list of length-3 ijk values\")\n ijk_array = np.array(ijk_array)\n if len(ijk_array.shape) != 2 or ijk_array.shape[1] != 3:\n raise ValueError(\"ijk_array must be n x 3 array\")\n if not np.array_equal(ijk_array, ijk_array.astype(np.uint32)):\n raise ValueError(\"ijk values must be non-negative integers\")\n if np.any(np.max(ijk_array, axis=0) >= blocks):\n raise ValueError(\n \"ijk must be less than parent_block_count in each dimension\"\n )\n index = np.ravel_multi_index(\n multi_index=ijk_array.T,\n dims=blocks,\n order=\"F\",\n )\n return index", "def encode_to_flat_array_index(row, column, matrix):\n return row * matrix.cols + column", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def starting_values(self, resids: NDArray) -> NDArray:", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def index_col(self, i0, i1, j0, j1):\n edges = self.h5['indexes']['bin1_offset'][i0:i1 + 1]\n index = []\n for lo1, hi1 in zip(edges[:-1], edges[1:]):\n if hi1 - lo1 > 0:\n bin2 = self.h5['pixels']['bin2_id'][lo1:hi1]\n mask = (bin2 >= j0) & (bin2 < j1)\n index.append(lo1 + np.flatnonzero(mask))\n if not index:\n return np.array([], dtype=int)\n else:\n return np.concatenate(index, axis=0)", "def getIndexRef(row, col, frame, midRange, maxIndex): \n indexRef = numpy.zeros(12).reshape(4,3)\n indexRef[:,0] = numpy.arange(-1, 3) * midRange + row\n indexRef[:,1] = numpy.arange(-1, 3) * midRange + col\n indexRef[:,2] = numpy.arange(-1, 3) * midRange + frame\n \n for i in range(4):\n for j in range(3):\n if indexRef[i,j] < 0 or indexRef[i,j] > maxIndex:\n indexRef[i,j] = -1\n return indexRef", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def GetItemIds(vDataItem,tid):\r\n track_ids = np.array(vDataItem.GetTrackIds())\r\n le = np.array(vDataItem.GetTrackEdges())\r\n wh = (track_ids == tid)\r\n idx = le[wh,:]\r\n idx = np.append(idx[:,0],idx[-1,1])\r\n return idx", "def index(self, arr, idx, temp = True, name = None):\n \n temp = temp or name is not None\n \n arr_t = arr.type\n\n if isinstance(arr_t, ScalarT):\n # even though it's not correct externally, it's\n # often more convenient to treat indexing\n # into scalars as the identity function.\n # Just be sure to catch this as an error in\n # the user's code earlier in the pipeline.\n return arr\n if isinstance(arr_t, TupleT):\n if isinstance(idx, Const):\n idx = idx.value\n\n assert isinstance(idx, int), \\\n \"Index into tuple must be an integer, got %s\" % idx\n if isinstance(idx, Const):\n idx = idx.value\n proj = self.tuple_proj(arr, idx)\n if temp:\n return self.assign_temp(proj, \"tuple_elt%d\" % idx if name is None else name)\n else:\n return proj\n\n if self.is_tuple(idx):\n indices = self.tuple_elts(idx)\n elif hasattr(idx, '__iter__'):\n indices = tuple(map(wrap_if_constant,idx))\n else:\n indices = (wrap_if_constant(idx),)\n\n n_required = arr_t.rank\n n_indices = len(indices)\n if n_indices < n_required:\n # all unspecified dimensions are considered fully sliced\n extra = (syntax_helpers.slice_none,) * (n_required - n_indices)\n indices = indices + extra\n\n if len(indices) > 1:\n idx = self.tuple(indices, \"index_tuple\" if name is None else name)\n else:\n idx = indices[0]\n\n t = arr_t.index_type(idx.type)\n idx_expr = Index(arr, idx, type=t)\n if temp:\n return self.assign_temp(idx_expr, \"array_elt\" if name is None else name)\n else:\n return idx_expr", "def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index", "def _tree_field_indices(self):\n\n if self._tfi is not None:\n return self._tfi\n\n self.arbor._grow_tree(self)\n self._tfi = np.array([node.tree_id for node in self._tree_nodes])\n return self._tfi", "def parameter_index(self):\n return self._parameter_index", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def loc_data_idx(loc_idx):\n retval = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice) and i.step is not None and i.step == -1:\n if i.stop is None:\n retval.append(slice(0, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif isinstance(i, slice) and i.step is not None and i.step < -1:\n if i.stop is None:\n lmin = i.start\n while lmin >= 0:\n lmin += i.step\n retval.append(slice(lmin-i.step, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif is_integer(i):\n retval.append(slice(i, i+1, 1))\n else:\n retval.append(i)\n return as_tuple(retval)", "def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1", "def linear_search(arr: IntList, query: int) -> int:\n arr_len: int = len(arr)\n for idx in range(arr_len):\n if arr[idx] == query:\n return idx\n return -1", "def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims", "def get_node_indices_and_levels(nd: np.ndarray):\n indices = []\n lvs = []\n for j in range(1, nd.shape[0]):\n if j == 1:\n indices = nd[j]\n lvs = nd[j + 1]\n elif j % 2 != 0 and j > 1:\n indices = np.append(indices, nd[j])\n elif j % 2 == 0 and j > 2:\n lvs = np.append(lvs, nd[j])\n return indices, lvs", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v", "def ids_to_index(self, ids):\n index = (ids[0]*self._div + ids[1])*self.batch_per_file +ids[2]\n return(index)", "def right_child_idx(idx):\n return (idx + 1) << 1", "def _get_indices_from_iss(self, iss):\n iss = [iss] if type(iss) not in [np.ndarray, list] else iss\n if self.iss is not None:\n inds = []\n for i in iss:\n inds.append(list(self.iss).index(i))\n# else:\n# inds = iss\n return inds", "def get_index_3d_from_pos(self, pos):\n pos_wrapped = [wrap(x, L) for x, L in zip(pos, self.L)]\n index = [np.digitize(x, b) for x, b in zip(pos_wrapped, self.bins)]\n # subtract 1 from each index because np starts counting from 1\n index = [n-1 for n in index]\n return index", "def get_p_idx(self, node_idx):\n idx = (node_idx + 1) / 2 - 1\n return idx", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def get_parent_idx(self, view_idx):\n a, b = view_idx\n R, C = self.shape\n i = self.p_rows[a]\n j = self.p_cols[b]\n return (i, j)", "def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi", "def loc2d(a,extremum='max'):\n forma=a.shape\n if len(forma)>2:raise \"Array dimension > 2\"\n if extremum!='min' and extremum!='max':\n raise 'Which extremum are you looking for?'\n x=ravel(a)\n if extremum=='min': i=argmin(x)\n else: i=argmax(x)\n i1=i/forma[1]\n i2=i%forma[1]\n return i1,i2", "def tril_indices_from(arr, k=0):\r\n if not arr.ndim == 2 and arr.shape[0] == arr.shape[1]:\r\n raise ValueError(\"input array must be 2-d and square\")\r\n return tril_indices(arr.shape[0], k)", "def __call__(self, *args):\n return args[self.i_dim]" ]
[ "0.71273017", "0.6010891", "0.59065056", "0.5804188", "0.58011395", "0.57862777", "0.57440937", "0.57440937", "0.57353306", "0.56626016", "0.56598043", "0.5625494", "0.5600237", "0.5588847", "0.5567432", "0.55587304", "0.55522966", "0.554781", "0.55181646", "0.5507306", "0.5506176", "0.549819", "0.5472546", "0.5460384", "0.54576814", "0.54576814", "0.54547673", "0.54545677", "0.5453246", "0.5439349", "0.541475", "0.5410925", "0.54090905", "0.54057467", "0.53998935", "0.5396655", "0.5393203", "0.5390229", "0.53772527", "0.537689", "0.53729", "0.53725594", "0.53710854", "0.53699034", "0.53590447", "0.535869", "0.53518236", "0.5331674", "0.53127295", "0.5300649", "0.53001887", "0.5293751", "0.5279391", "0.52713114", "0.52689946", "0.52655876", "0.5265093", "0.52581346", "0.5255241", "0.5253048", "0.52474797", "0.5247243", "0.5246158", "0.5244258", "0.52398866", "0.5236568", "0.5229946", "0.5225791", "0.5218637", "0.5216555", "0.5210089", "0.52002627", "0.519943", "0.5198048", "0.5193441", "0.5192343", "0.51889014", "0.5187412", "0.5183876", "0.51815706", "0.5173014", "0.51658005", "0.51538306", "0.5151522", "0.5144127", "0.51412994", "0.51407564", "0.51363623", "0.5135326", "0.5133245", "0.5131233", "0.5130042", "0.5128837", "0.5125917", "0.5123759", "0.51192826", "0.51174635", "0.51172507", "0.51144534", "0.5110717" ]
0.71871
0
get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns!
def _raveled_index_for_transformed(self, param): ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raveled_index_for(self, param):\n from ..param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def get_param_indexes(self):\n self.debug.start_function('get_param_indexes')\n\n for i, key in enumerate(self.mcmc_version.param_keys):\n self.param_idxs[key] = i\n for i, key in enumerate(self.mcmc_version.interp_keys):\n self.interp_idxs[key] = i\n\n self.debug.end_function()", "def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param", "def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0", "def _offset_for(self, param):\n if param.has_parent():\n p = param._parent_._get_original(param)\n if p in self.parameters:\n return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)\n return self._offset_for(param._parent_) + param._parent_._offset_for(param)\n return 0", "def ravel_index(x, dims):\n i = 0\n for dim, j in zip(dims, x):\n i *= dim\n i += j\n return i", "def get_analytically_computed_optimization_parameter_indices(self):\n indices = []\n if '/offsetParameterIndices' in self.f:\n indices.extend(self.f['/offsetParameterIndices'])\n\n if '/scalingParameterIndices' in self.f:\n indices.extend(self.f['/scalingParameterIndices'])\n\n if '/sigmaParameterIndices' in self.f:\n indices.extend(self.f['/sigmaParameterIndices'])\n\n return list(set(indices))", "def optimizer_array(self, p):\n f = None\n if self.has_parent() and self.constraints[__fixed__].size != 0:\n f = np.ones(self.size).astype(bool)\n f[self.constraints[__fixed__]] = FIXED\n elif self._has_fixes():\n f = self._fixes_\n if f is None:\n self.param_array.flat = p\n [np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))\n #py3 fix\n #for c, ind in self.constraints.iteritems() if c != __fixed__]\n for c, ind in self.constraints.items() if c != __fixed__]\n else:\n self.param_array.flat[f] = p\n [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))\n #py3 fix\n #for c, ind in self.constraints.iteritems() if c != __fixed__]\n for c, ind in self.constraints.items() if c != __fixed__]\n #self._highest_parent_.tie.propagate_val()\n\n self._optimizer_copy_transformed = False\n self.trigger_update()", "def parameter_index(self):\n return self._parameter_index", "def get_pulling_indices(self, weight):\n pass", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]:\n lev = self.levels[0]\n codes = self._codes[0]\n cat = Categorical.from_codes(codes=codes, categories=lev, validate=False)\n ci = Index(cat)\n return ci.get_indexer_for(target)", "def get_param_idx(model, target_name):\n for i, (param_name, param) in enumerate(model.get_parameters().items()):\n if param_name == target_name:\n return i", "def get_ray_index_for_grid_point(ray, grid_idx, n_depth_pts):\n if ray.mu < 0:\n return (grid_idx)\n else:\n return (n_depth_pts - (grid_idx + 1))", "def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx", "def __getitem__(self, i):\n return self._optimizer_list[i]", "def get_grid_index_for_ray_point(ray, ray_idx, n_depth_pts):\n if ray.mu < 0:\n return (ray_idx)\n else:\n return (n_depth_pts - ray_idx - 1)", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def get_index_param(self, list_of_parameters_and_redshift, multiple_redshift=False):\n idx = pd.IndexSlice\n if multiple_redshift:\n ind = idx[self.data_type,list_of_parameters_and_redshift[0]] # first value is the redshift\n else :\n ind = idx[self.data_type,:]\n for i in range (self.num_parameters):\n if multiple_redshift:\n ind += idx[:,list_of_parameters_and_redshift[i+1]] # first value is the redshift\n else : \n ind += idx[:,list_of_parameters_and_redshift[i]] \n return ind", "def select(self, arr):\n\n return arr[self.relative_degree_idxs]", "def optimizer_array(self):\n if self.__dict__.get('_optimizer_copy_', None) is None or self.size != self._optimizer_copy_.size:\n self._optimizer_copy_ = np.empty(self.size)\n\n if not self._optimizer_copy_transformed:\n self._optimizer_copy_.flat = self.param_array.flat\n #py3 fix\n #[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]\n [np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]\n if self.has_parent() and (self.constraints[__fixed__].size != 0 or self._has_ties()):\n fixes = np.ones(self.size).astype(bool)\n fixes[self.constraints[__fixed__]] = FIXED\n return self._optimizer_copy_[np.logical_and(fixes, self._highest_parent_.tie.getTieFlag(self))]\n elif self._has_fixes():\n return self._optimizer_copy_[self._fixes_]\n\n self._optimizer_copy_transformed = True\n\n return self._optimizer_copy_", "def roi(path, adjust_index=True):\n mat = loadmat(path)\n roi_idx = np.ravel(mat['roi_sources'])\n if adjust_index:\n roi_idx -= 1\n return roi_idx", "def ravel_indices(shape, *args):\n new_positions = []\n for arg in args:\n new_positions.append(np.ravel_multi_index(arg, shape))\n return new_positions", "def update_idx(tmp, array):\n for i in range(0, len(array)):\n if list(tmp).count(i)>1:\n # Some index in idx may be duplicated because some leaves have the same scalar value.\n idx = [index for index, value in enumerate(tmp) if value == i]\n new = np.array(tmp)[idx]+np.array(array[idx]).argsort()*0.1\n for j in range(0, len(idx)):\n tmp[idx[j]]=new[j]\n sort_list = sorted(tmp)\n idx = []\n for i in range(0, len(tmp)):\n idx.append(sort_list.index(tmp[i]))\n return idx", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def tril_indices_from(arr,k=0):\r\n if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:\r\n raise ValueError(\"input array must be 2-d and square\")\r\n return tril_indices(arr.shape[0],k)", "def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2", "def parent_id(neuron, selected_index):\n parent_id = np.array([], dtype=int)\n for i in selected_index:\n p = neuron.parent_index[i]\n while(~np.any(selected_index == p)):\n p = neuron.parent_index[p]\n (ind,) = np.where(selected_index == p)\n parent_id = np.append(parent_id, ind)\n return parent_id", "def recompressionIdx(self, opt=1):\n maskCr = np.full(len(self.raw), False)\n if opt == 1:\n maskCr[self.brkIdx1] = True\n maskCr[self.brkIdx2] = True\n elif opt == 2:\n maskCr[self.brkIdx1: self.brkIdx2+1] = True\n elif opt == 3:\n maskCr[self.brkIdx1: self.brkIdx3+1] = True\n # -- Linear regresion\n sigmaCr = self.raw['stress'].iloc[maskCr]\n sigmaCrlog = np.log10(sigmaCr)\n eCr = self.raw['e'].iloc[maskCr]\n idxCrInt, idxCr = polyfit(sigmaCrlog, eCr, deg=1)\n r2Cr = r2_score(\n y_true=eCr, y_pred=polyval(sigmaCrlog, [idxCrInt, idxCr]))\n self.maskCr = maskCr\n self.r2Cr = r2Cr\n self.idxCr = abs(idxCr)\n self.idxCrInt = idxCrInt\n return", "def pndindex(*args):\r\n return np.ndindex(*args)", "def get_p_idx(self, node_idx):\n idx = (node_idx + 1) / 2 - 1\n return idx", "def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v", "def get_ind(self,*q):\n try:\n if( len(q) == 1 ):\n x = q[0][:,0]\n y = q[0][:,1]\n z = q[0][:,2]\n else:\n x = q[0]\n y = q[1]\n z = q[2]\n try:\n cx = (x+0.5).astype(na.int32)\n cy = (y+0.5).astype(na.int32)\n cz = (z+0.5).astype(na.int32)\n except:\n cx = int(x+0.5)\n cy = int(y+0.5)\n cz = int(z+0.5)\n ind = cx + cy*self.dim[0]+cz*self.dim[0]*self.dim[1]\n return ind\n except Exception as error:\n print(error)\n return None", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def sorted_index(self) -> np.ndarray:\n return np.argsort(self.result_array.sum(axis=1))[::-1]", "def reconstruct_input(self, ix):", "def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi", "def get_view_idx(self, parent_idx):\n a = self.p_rows.index(i)\n b = self.p_cols.index(j)\n return (a, b)", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def pndindex(*args):\n return np.ndindex(*args)", "def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]", "def _get_index(self, orb: int, sz: float = None):\n if orb >= self.n_orbitals:\n raise IndexError(\"requested orbital index outside of the hilbert space\")\n spin_idx = self._spin_index(sz)\n return spin_idx * self.n_orbitals + orb", "def getbaraidxij(self,idx_): # 3\n res,resargs = self.__obj.getbaraidxij(idx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value = resargs\n return _i_return_value,_j_return_value", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def mainIndices(self):\n return self.i1, self.i2", "def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)", "def calc_nearest_ind(self, robot_pose):\n pass", "def parent_idx(idx):\n return (idx - 1) >> 1", "def get_best_idx(patient, plan_type, stop=False):\n if plan_type in ['clinical', 'default']:\n return 0\n plan = get_plan(patient, plan_type)\n util_vec = plan.opt_result.func_vals\n if stop:\n stop_idx = get_stop_idx(util_vec)\n util_vec = util_vec[:stop_idx + 1]\n return np.argmin(util_vec)", "def GetCellLinearIndex(self, vtkAMRBox, p_int, p_int_1, p_int_2, p_int=..., p_int=..., p_int=...):\n ...", "def unravel_index(ijk, n):\n \n if type(ijk) is int:\n return ijk\n if len(ijk)==1:\n return ijk[0]\n\n assert (np.diff(ijk)>0).all()\n assert all([i<n for i in ijk])\n\n ix = sum([int(binom(n-1-i,len(ijk)-1)) for i in range(ijk[0])])\n for d in range(1, len(ijk)-1):\n if (ijk[d]-ijk[d-1])>1:\n ix += sum([int(binom(n-i-1,len(ijk)-d-1)) for i in range(ijk[d-1]+1, ijk[d])])\n ix += ijk[-1] -ijk[-2] -1\n return ix", "def tril_indices_from(arr, k=0):\r\n if not arr.ndim == 2 and arr.shape[0] == arr.shape[1]:\r\n raise ValueError(\"input array must be 2-d and square\")\r\n return tril_indices(arr.shape[0], k)", "def advanced_indexing_op(input, index):\n batch_size = tf.shape(input)[0]\n max_length = int(input.get_shape()[1])\n dim_size = int(input.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (index - 1)\n flat = tf.reshape(input, [-1, dim_size])\n relevant = tf.gather(flat, index)\n return relevant", "def getbaraidx(self,idx_,sub_,weights_):\n maxnum_ = self.getbaraidxinfo((idx_))\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n num_ = ctypes.c_int64()\n _sub_minlength = (maxnum_)\n if (maxnum_) > 0 and sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnum_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int64) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int64))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _weights_minlength = (maxnum_)\n if (maxnum_) > 0 and weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights is not long enough: Is %d, expected %d\" % (len(weights_),(maxnum_)))\n if isinstance(weights_,numpy.ndarray) and not weights_.flags.writeable:\n raise ValueError(\"Argument weights must be writable\")\n if weights_ is None:\n raise ValueError(\"Argument weights may not be None\")\n if isinstance(weights_, numpy.ndarray) and weights_.dtype is numpy.dtype(numpy.float64) and weights_.flags.contiguous:\n _weights_copyarray = False\n _weights_tmp = ctypes.cast(weights_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif weights_ is not None:\n _weights_copyarray = True\n _weights_np_tmp = numpy.zeros(len(weights_),numpy.dtype(numpy.float64))\n _weights_np_tmp[:] = weights_\n assert _weights_np_tmp.flags.contiguous\n _weights_tmp = ctypes.cast(_weights_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _weights_copyarray = False\n _weights_tmp = None\n \n res = __library__.MSK_XX_getbaraidx(self.__nativep,idx_,maxnum_,ctypes.byref(i_),ctypes.byref(j_),ctypes.byref(num_),_sub_tmp,_weights_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n num_ = num_.value\n _num_return_value = num_\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _weights_copyarray:\n weights_[:] = _weights_np_tmp\n return (_i_return_value,_j_return_value,_num_return_value)", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def get_index_from_well(self, well):\n pass", "def __getitem__(self, index) -> torch.nn.Parameter:\n return self.parameters[index]", "def _reparam(self):\n\n k_fe, k_re, k_re2 = self.k_fe, self.k_re, self.k_re2\n k_tot = k_fe + k_re2\n ix = np.tril_indices(self.k_re)\n\n lin = []\n for k in range(k_fe):\n e = np.zeros(k_tot)\n e[k] = 1\n lin.append(e)\n for k in range(k_re2):\n lin.append(np.zeros(k_tot))\n\n quad = []\n for k in range(k_tot):\n quad.append(np.zeros((k_tot, k_tot)))\n ii = np.tril_indices(k_re)\n ix = [(a,b) for a,b in zip(ii[0], ii[1])]\n for i1 in range(k_re2):\n for i2 in range(k_re2):\n ix1 = ix[i1]\n ix2 = ix[i2]\n if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):\n ii = (ix2[0], ix1[0])\n k = ix.index(ii)\n quad[k_fe+k][k_fe+i2, k_fe+i1] += 1\n for k in range(k_tot):\n quad[k] = 0.5*(quad[k] + quad[k].T)\n\n return lin, quad", "def getNeighboursCriteriaIndex(seg,mergedSegments,updatedSpeed,inversedIndex,weights,minValidData):\n neighbours = getNeighbours(seg,mergedSegments,inversedIndex)\n if len(neighbours) == 0 : return pd.Series(index=[[],[]])\n\n df = pd.Series(index=[np.array([seg]*len(neighbours)),neighbours])\n return pd.Series(df.index.map( lambda x: computePairCriteria(*x,mergedSegments,updatedSpeed,inversedIndex,weights,minValidData)).values,df.index)", "def _get_param_names_transformed(self):\r\n n = self._get_param_names()\r\n\r\n # remove/concatenate the tied parameter names\r\n if len(self.tied_indices):\r\n for t in self.tied_indices:\r\n n[t[0]] = \"<tie>\".join([n[tt] for tt in t])\r\n remove = np.hstack([t[1:] for t in self.tied_indices])\r\n else:\r\n remove = np.empty(shape=(0,), dtype=np.int)\r\n\r\n # also remove the fixed params\r\n if len(self.fixed_indices):\r\n remove = np.hstack((remove, np.hstack(self.fixed_indices)))\r\n\r\n # add markers to show that some variables are constrained\r\n for i, t in zip(self.constrained_indices, self.constraints):\r\n for ii in i:\r\n n[ii] = n[ii] + t.__str__()\r\n\r\n n = [nn for i, nn in enumerate(n) if not i in remove]\r\n return n", "def index(self, arr, idx, temp = True, name = None):\n \n temp = temp or name is not None\n \n arr_t = arr.type\n\n if isinstance(arr_t, ScalarT):\n # even though it's not correct externally, it's\n # often more convenient to treat indexing\n # into scalars as the identity function.\n # Just be sure to catch this as an error in\n # the user's code earlier in the pipeline.\n return arr\n if isinstance(arr_t, TupleT):\n if isinstance(idx, Const):\n idx = idx.value\n\n assert isinstance(idx, int), \\\n \"Index into tuple must be an integer, got %s\" % idx\n if isinstance(idx, Const):\n idx = idx.value\n proj = self.tuple_proj(arr, idx)\n if temp:\n return self.assign_temp(proj, \"tuple_elt%d\" % idx if name is None else name)\n else:\n return proj\n\n if self.is_tuple(idx):\n indices = self.tuple_elts(idx)\n elif hasattr(idx, '__iter__'):\n indices = tuple(map(wrap_if_constant,idx))\n else:\n indices = (wrap_if_constant(idx),)\n\n n_required = arr_t.rank\n n_indices = len(indices)\n if n_indices < n_required:\n # all unspecified dimensions are considered fully sliced\n extra = (syntax_helpers.slice_none,) * (n_required - n_indices)\n indices = indices + extra\n\n if len(indices) > 1:\n idx = self.tuple(indices, \"index_tuple\" if name is None else name)\n else:\n idx = indices[0]\n\n t = arr_t.index_type(idx.type)\n idx_expr = Index(arr, idx, type=t)\n if temp:\n return self.assign_temp(idx_expr, \"array_elt\" if name is None else name)\n else:\n return idx_expr", "def get_parent_idx(self, view_idx):\n a, b = view_idx\n R, C = self.shape\n i = self.p_rows[a]\n j = self.p_cols[b]\n return (i, j)", "def param(self, param_nb: int) -> int:\n mode = get_digit_right_to_left(self.modes, param_nb - 1)\n param_index = self.pointer + param_nb\n if mode == 1:\n # immediate mode\n return param_index\n if mode == 2:\n # relative mode\n return self.relative_base + self.program[param_index]\n else:\n # position mode\n return self.program[param_index]", "def lpol2index(ar):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n ar = array_like(ar, \"ar\")\n index = np.nonzero(ar)[0]\n coeffs = ar[index]\n return coeffs, index", "def indices(self, fit):\r\n lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20\r\n reev = int(lam) + ((lam % 1) > np.random.rand())\r\n return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]", "def _get_permutated_segments_indices(\n self, randomized: bool, random_state: Optional[np.random.mtrand.RandomState]\n ) -> np.ndarray:\n idx = np.arange(self.dy.size)\n\n if randomized:\n if random_state is None:\n random_state = np.random.RandomState()\n idx = random_state.permutation(idx)\n return idx", "def get_train_input(self, prev, i):\n pass", "def get_index(corners, i, jk):\n if type(jk) != list:\n jk = list(jk)\n assert corners.shape[1] == 3\n sol = np.where(np.bitwise_or(np.all(corners == [i] + jk, axis=1), \n np.all(corners == [i] + jk[::-1], axis=1)))[0]\n if len(sol) > 0: \n return sol[0]", "def get_boundary_position_of_index():\n function = LegacyFunctionSpecification() \n function.must_handle_array = True\n for x in ['i','j','k']:\n function.addParameter(x, dtype='i', direction=function.IN)\n function.addParameter('index_of_boundary', dtype='i', direction=function.IN, default = 1)\n for x in ['x','y','z']:\n function.addParameter(x, dtype='d', direction=function.OUT)\n function.addParameter('number_of_points', 'i', function.LENGTH) \n function.result_type = 'i'\n return function", "def calc_param(self,tri_coord,node_coord):\n x_basis, y_basis = node_coord\n A = []\n for j,coord in enumerate(tri_coord):\n xi, yi = coord \n A.append([1.0,xi,yi])\n if xi == x_basis and yi == y_basis:\n special = j\n continue \n\n b = np.zeros(3,dtype=float)\n b[special] = 1.0\n\n param = np.linalg.solve(A,b)\n\n return param[1:] #ignore /alpha because its falls out of gradient\n\n # print(calc_param((0,1,11),0))", "def index(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"index\")", "def GetEdgeArray(self, p_int):\n ...", "def starting_values(self, resids: NDArray) -> NDArray:", "def _get_params_transformed(self):\r\n x = self._get_params()\r\n [np.put(x, i, t.finv(x[i])) for i, t in zip(self.constrained_indices, self.constraints)]\r\n\r\n to_remove = self.fixed_indices + [t[1:] for t in self.tied_indices]\r\n if len(to_remove):\r\n return np.delete(x, np.hstack(to_remove))\r\n else:\r\n return x", "def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk", "def get_pos(self, mode, param, param_idx):\n\n if mode == 0:\n return param[param_idx]\n elif mode == 1:\n return self.ptr + param_idx + 1\n elif mode == 2:\n return self.r + param[param_idx]", "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def __call__(self, *args):\n return args[self.i_dim]", "def encode_to_flat_array_index(row, column, matrix):\n return row * matrix.cols + column", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def rindex(self, sub) -> int:\n pass", "def idx(self):\n if self._idx is None:\n self._idx = list(np.where(self.polar_angle < self.polar_max)[0])\n return self._idx", "def return_BMU_coord(self, sess, input_array):\n output = sess.run([self.distance_matrix,self.distance_argmin], feed_dict={self.input_placeholder: input_array})\n index = output[1] #flatten index\n row = index/self.tot_cols\n col = index - (row*self.tot_cols)\n return index, (row,col)", "def _get_Pij(self): \n \n with tf.name_scope(\"getting_Pij\"):\n \n n_splits = int(self.dim_input / self.per_split_feats)\n n_divisible = n_splits * self.per_split_feats\n X_split = tf.split(self.X_transformed[:,0:n_divisible], n_splits, axis=1)\n X_split.append(self.X_transformed[:,n_divisible:])\n \n # get norm along first feature set\n normAX = X_split[0][None, :, :] - X_split[0][:, None, :]\n normAX = tf.reduce_sum(normAX ** 2, axis=2)\n \n for split in range(1, len(X_split)): \n \n # Expand dims of AX to [n_samples, n_samples, n_features], where\n # each \"channel\" in the third dimension is the difference between\n # one sample and all other samples along one feature\n norm_thisFeatureSet = X_split[split][None, :, :] - \\\n X_split[split][:, None, :]\n \n norm_thisFeatureSet = tf.reduce_sum(norm_thisFeatureSet ** 2, axis=2)\n \n # add to existing cumulative sum \n normAX = normAX + norm_thisFeatureSet\n \n # Calculate Pij, the probability that j will be chosen \n # as i's neighbor, for all i's. Pij has shape\n # [n_samples, n_samples] and ** is NOT symmetrical **.\n # Because the data is normalized using softmax, values\n # add to 1 in rows, that is i (central patients) are\n # represented in rows\n denomSum = tf.reduce_sum(tf.exp(-normAX), axis=0)\n epsilon = 1e-50\n denomSum = denomSum + epsilon \n \n self.Pij = tf.exp(-normAX) / denomSum[:, None]", "def index(self, x) -> int:\n pass", "def _tree_field_indices(self):\n\n if self._tfi is not None:\n return self._tfi\n\n self.arbor._grow_tree(self)\n self._tfi = np.array([node.tree_id for node in self._tree_nodes])\n return self._tfi", "def _target(self, data):\n relative_values = abs(data - data.mean())\n index = relative_values.idxmax()\n value = relative_values[index]\n return index, value", "def break_index(self, **kwargs):\n return self.peak_indices(**kwargs)[0][-1]", "def random_pipeline_index(self) -> int:\n indices = np.arange(len(self.transforms))\n return np.random.choice(indices, p=self.prob)", "def __getHints(self, p):\n st = bisect.bisect_left(self.index, (p[:self.ln], -1)) # binary search\n en = bisect.bisect_right(self.index, (p[:self.ln], sys.maxsize)) # binary search\n hits = self.index[st:en] # this range of elements corresponds to the hits\n return [h[1] for h in hits] # return just the offsets", "def getbaraidx(self,idx_,sub,weights): # 3\n maxnum_ = self.getbaraidxinfo((idx_))\n if sub is None: raise TypeError(\"Invalid type for argument sub\")\n _copyback_sub = False\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"q\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n _copyback_sub = True\n else:\n if sub_.format != \"q\":\n sub_ = memoryview(array.array(\"q\",sub))\n _copyback_sub = True\n if sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub has wrong length\")\n if weights is None: raise TypeError(\"Invalid type for argument weights\")\n _copyback_weights = False\n if weights is None:\n weights_ = None\n else:\n try:\n weights_ = memoryview(weights)\n except TypeError:\n try:\n _tmparr_weights = array.array(\"d\",weights)\n except TypeError:\n raise TypeError(\"Argument weights has wrong type\")\n else:\n weights_ = memoryview(_tmparr_weights)\n _copyback_weights = True\n else:\n if weights_.format != \"d\":\n weights_ = memoryview(array.array(\"d\",weights))\n _copyback_weights = True\n if weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights has wrong length\")\n res,resargs = self.__obj.getbaraidx(idx_,maxnum_,sub_,weights_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value,_num_return_value = resargs\n if _copyback_weights:\n weights[:] = _tmparr_weights\n if _copyback_sub:\n sub[:] = _tmparr_sub\n return _i_return_value,_j_return_value,_num_return_value" ]
[ "0.73500043", "0.72873694", "0.5873878", "0.5873878", "0.58434486", "0.56710553", "0.5656544", "0.5656544", "0.5554051", "0.554955", "0.5519457", "0.5428711", "0.536669", "0.5312925", "0.5258314", "0.5250333", "0.51969874", "0.51883346", "0.5186911", "0.51863664", "0.5186337", "0.51770914", "0.516617", "0.5163142", "0.5151234", "0.5121344", "0.5119788", "0.5118742", "0.5083041", "0.5078002", "0.5072892", "0.5072276", "0.506761", "0.50606245", "0.5046647", "0.5031889", "0.50298274", "0.50292176", "0.5029212", "0.5013643", "0.50081724", "0.5006871", "0.49985757", "0.49888927", "0.4975878", "0.49720314", "0.49677593", "0.4959604", "0.4944059", "0.49359828", "0.4933573", "0.49323124", "0.49259922", "0.4923054", "0.49223685", "0.49218774", "0.49177703", "0.49077058", "0.49047592", "0.4903401", "0.48987186", "0.48943895", "0.48859736", "0.48827907", "0.48762706", "0.48717818", "0.487148", "0.4870419", "0.4869457", "0.4865104", "0.4859796", "0.485883", "0.48556253", "0.4843685", "0.482156", "0.4817827", "0.481587", "0.4813228", "0.478976", "0.4776428", "0.47755745", "0.47710785", "0.47655928", "0.47548902", "0.4751959", "0.47489744", "0.47475672", "0.47451463", "0.47424826", "0.4739707", "0.473835", "0.47327244", "0.47222367", "0.47188815", "0.4718832", "0.47138518", "0.47124842", "0.47067618", "0.47048852", "0.46932316" ]
0.69185156
2
Flattened array of ints, specifying the index of this object. This has to account for shaped parameters!
def _raveled_index(self): return np.r_[:self.size]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx", "def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])", "def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def flatten(self):\n\n if self.ndim == 1:\n return self.copy()\n\n return ArrayCoordinates1d(self.coordinates.flatten(), **self.properties)", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])", "def matrix_to_flat(self, idx_rows):\n idx = []\n for i in range(self.nts):\n idx.append(self._matrix_to_flat_by_ts(idx_rows, i))\n return idx", "def flatten(x):\n return x.view(x.size(0), -1)", "def flatten(self):\n return [e for es in self.array for e in es]", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def FlattenModelData(y, i):\n outs = np.array([y[j][i][0] for j in range(len(y))])\n return outs", "def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)", "def _flatten(self):\n n = self.B\n idx = self.nodect - 1\n self.seq = []\n while n is not None:\n n['idx'] = idx\n self.seq.insert(0, n)\n idx -= 1\n n = n['pred']", "def flatten(self, arr):\n shape = arr.shape\n return arr.reshape(shape[0] * shape[1], *shape[2:])", "def GetPointToIncidentEdgesArray(self, p_int):\n ...", "def reconstruct_input(self, ix):", "def flatten(self) -> np.ndarray:\n\n return self.data.copy()", "def flatten(a, start=0, count=2):\n s = a.shape\n return np.reshape(a, s[:start] + (-1,) + s[start+count:])", "def flatten_npar(np_array):\n \n itr = len(np_array)\n start = np_array[0]\n \n for i in range(1,itr):\n start = np.hstack((start,np_array[i]))\n \n return(np.array(start))", "def array_form(self):\n return tuple(self)", "def flatten(self):\n pass", "def do_flatten(obj):\n if type(obj) == list:\n return np.array(obj).flatten()\n return obj.flatten()", "def flatten(x):\n return reshape(x, (x.shape[0], -1))", "def flatten_stimulus(stimulus):\n n, h, w = stimulus.shape\n return stimulus.reshape((n, h * w))", "def get(self, idx_in):\n shape_out = idx_in[0].shape\n idx_flat_in, msk_in = self._to_flat_index(idx_in)\n idx, msk = find_in_array(idx_flat_in, self.idx)\n val_out = np.full(shape_out, self._fill_value)\n val_out.flat[np.flatnonzero(msk_in)[msk]] = self._data[idx[msk]]\n return np.squeeze(val_out)", "def unstacked_index(size, index):\n return index % size, index // size", "def ravel_indices(shape, *args):\n new_positions = []\n for arg in args:\n new_positions.append(np.ravel_multi_index(arg, shape))\n return new_positions", "def row(self):\n return self.reshape((1, self.size))", "def GetPointToIncidentFacesArray(self, p_int):\n ...", "def flatten():", "def GetPointToOneRingPointsArray(self, p_int):\n ...", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))", "def _indarray(np_array):\n return skil_client.INDArray(\n ordering='c',\n shape=list(np_array.shape),\n data=np_array.reshape(-1).tolist()\n )", "def flatten_array(X_input):\r\n X_input_flat = np.array([x.flatten() for x in X_input])\r\n return X_input_flat", "def index_object(idxs=None):", "def serialize_flatten(name, value, array_indices=True):\r\n # call the recursive function that returns a tuple of tuples\r\n return tuple(serialize_flatten_rec(name, value, array_indices))", "def row(self, index: int) -> List[int]:\n return self.matrix[index - 1]", "def _multi_index(indexes, shape):\n indexes = indexes if isinstance(indexes, typing.Sequence) else (indexes,)\n if any(isinstance(i, type(Ellipsis)) for i in indexes):\n raise IndexError('Ellipsis index currently is not supported.')\n # Fill the right-most elements.\n indexes = indexes + (slice(0, None, None),) * (len(shape) - len(indexes))\n # Convert to positive index.\n positive_indexes = []\n for i, index in enumerate(indexes):\n if isinstance(index, slice):\n index = slice(\n index.start or 0, index.stop or shape[i], index.step or 1\n )\n positive_indexes.append(\n slice(\n index.start + shape[i] if index.start < 0 else index.start,\n index.stop + shape[i] if index.stop < 0 else index.stop,\n # Negative step means index backward, no need to convert to\n # positive interger.\n index.step,\n )\n )\n elif isinstance(index, int):\n positive_indexes.append(index + shape[i] if index < 0 else index)\n else:\n raise TypeError(f'Not supported index type {index}.')\n return tuple(positive_indexes)", "def indices(self):\n slice_list = []\n for axis in range(self.ndim):\n if axis in self.displayed:\n slice_list.append(slice(None))\n else:\n if self.clip:\n p = np.clip(\n self.point[axis],\n np.round(self.range[axis][0]),\n np.round(self.range[axis][1]) - 1,\n )\n else:\n p = self.point[axis]\n p = np.round(p / self.range[axis][2]).astype(int)\n slice_list.append(p)\n return tuple(slice_list)", "def GetEdgeArray(self, p_int):\n ...", "def wrap(self, flatten_x):\n batch_size = flatten_x.size(0)\n x = torch.reshape(flatten_x, (batch_size, self.num_frames, self.num_ticks_per_frame, -1))\n return x", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def flatten(self):\n return DataArray([s for s in self.unstructured()])", "def unflatten(self, flat, unused_shaped_like):\n return next(flat)", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def get(self):\n return _n.reshape(self.next_x, self.original_shape)", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def _shaped_arange(*shape):\n return np.random.randn(np.prod(shape)).astype(np.float32).reshape(\n *shape\n ) * np.prod(shape)", "def _flatten_parameters(self):\n [m.flatten_parameters() for m in self._to_flatten]", "def flat(self):\n return Op('flat', self)", "def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims", "def flatten(X):\n N = X.shape[-1]\n flat = np.zeros((N, 3072))\n for idx, i in enumerate(range(N)):\n # if not idx:\n # print(X[:,:,:,i].reshape(3072))\n flat[i] = X[:,:,:,i].reshape(3072)\n return flat", "def _get_args(idx, *args):\n new_args = []\n for arg in list(args[0]):\n if isinstance(arg, Iterable):\n new_args.append(arg[idx])\n else:\n new_args.append(arg)\n\n return new_args", "def BatchCreator(self, j, n_batch):\n j_start = (j-1)*n_batch + 1\n j_end = j*n_batch + 1\n ind = np.arange(start= j_start, stop=j_end, step=1)\n return ind", "def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)", "def __getitem__(self, index):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def transform(self, x):\n res = [x[i] for i in range(len(x))\n if i not in self.index_value_pairs]\n return res if isinstance(x, list) else np.asarray(res)", "def _extend_index_dim(input_index, new_index, new_index_max):\n # Construct an iterator from new_index\n if isinstance(new_index, (int, np.integer)):\n it = [new_index]\n else:\n if isinstance(new_index, slice):\n # slices don't work very well with multi-dimensional circular mappings.\n it = _conv_slice_to_list(slice_obj=new_index, stop_def=new_index_max)\n else:\n it = new_index\n # Index extension\n if input_index is None:\n output = []\n for i in it:\n output.append(tuple([i]))\n return output\n else:\n output = []\n for _i in input_index:\n output_row = []\n for i in it:\n output_row.append(tuple(list(_i) + [i]))\n output.append(output_row)\n return output", "def _flatten(params):\n params, _ = tree_flatten(params)\n return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])", "def flatten(self):\n return tuple(map(lambda i: 1 if i > 0 else 0, self.freq()))", "def make_indices(dimensions):\n\n level = len(dimensions)\n\n if level == 1:\n return range(dimensions[0])\n\n indices = [[]]\n\n while level:\n\n _indices = []\n\n for j in range(dimensions[level - 1]):\n\n _indices += [[j] + i for i in indices]\n\n indices = _indices\n\n level -= 1\n\n try:\n return [tuple(i) for i in indices]\n except TypeError:\n return indices", "def GetFaceToAdjacentFacesArray(self, p_int):\n ...", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def flatten_numpy(ndarray):\n return np.reshape(ndarray, (-1,), 'F')", "def loc_data_idx(loc_idx):\n retval = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice) and i.step is not None and i.step == -1:\n if i.stop is None:\n retval.append(slice(0, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif isinstance(i, slice) and i.step is not None and i.step < -1:\n if i.stop is None:\n lmin = i.start\n while lmin >= 0:\n lmin += i.step\n retval.append(slice(lmin-i.step, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif is_integer(i):\n retval.append(slice(i, i+1, 1))\n else:\n retval.append(i)\n return as_tuple(retval)", "def __getitem__(self, index: list) -> (np.array, np.array):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, M = self.__data_generation(list_IDs_temp)\n\n return X, M", "def get_x(self):\n return self.x[:self.nump, :]", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def flatten_array(self):\n numel = self.xyz_array[:, :, 0].size # Number of elements in dataset\n self.flat_array = np.zeros([self._len_z, numel]) # Create array to hold flattened array\n\n # Loop through each dimension (dataset) and flatten it into new array\n for dim in range(self._len_z):\n self.flat_array[dim, :] = np.ravel(self.xyz_array[:, :, dim])", "def flatten_parameters(self):", "def get_scatter_indices_for_bboxes(flatted_indices, batch_size, total_bboxes):\n indices_size = len(flatted_indices)\n scatter_indices = tf.concat(flatted_indices, 1)\n return tf.reshape(scatter_indices, (batch_size, total_bboxes, indices_size))", "def encode_to_flat_array_index(row, column, matrix):\n return row * matrix.cols + column", "def mainIndices(self):\n return self.i1, self.i2", "def flatten(self, x):\n return np.concatenate([c.flatten(xi) for c, xi in zip(self.spaces, x)])", "def sliceshape(slicetuple, totshape):\n res = []\n for i,s in enumerate(slicetuple):\n if isinstance(s,int):\n #n = 1\n pass\n else:\n i0,i1,istep = s.indices(totshape[i])\n n = (i1-i0)//istep\n res.append(n)\n return res", "def gather(x, idx, method=2):\n\n if method == 0:\n return x[idx]\n elif method == 1:\n x = x.unsqueeze(1)\n x = x.expand((-1, idx.shape[-1], -1))\n idx = idx.unsqueeze(2)\n idx = idx.expand((-1, -1, x.shape[-1]))\n return x.gather(0, idx)\n elif method == 2:\n for i, ni in enumerate(idx.size()[1:]):\n x = x.unsqueeze(i+1)\n new_s = list(x.size())\n new_s[i+1] = ni\n x = x.expand(new_s)\n n = len(idx.size())\n for i, di in enumerate(x.size()[n:]):\n idx = idx.unsqueeze(i+n)\n new_s = list(idx.size())\n new_s[i+n] = di\n idx = idx.expand(new_s)\n return x.gather(0, idx)\n else:\n raise ValueError('Unkown method')", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def _normalize_index(self, index: int):\n if index < 0:\n return len(self) + index\n else:\n return index", "def _simplify_index(indices, shape):\n # First clean up and check indices, unpacking ellipsis and boolean arrays\n indices = da.slicing.normalize_index(indices, shape)\n out = []\n axis = 0\n for index in indices:\n if index is not np.newaxis:\n length = shape[axis]\n axis += 1\n # If there is 1-D fancy index on this axis, try to convert to slice\n if isinstance(index, np.ndarray) and index.ndim == 1:\n try:\n index = _range_to_slice(index)\n except ValueError:\n pass\n else:\n index = da.slicing.normalize_slice(index, length)\n out.append(index)\n return tuple(out)", "def advanced_indexing_op(input, index):\n batch_size = tf.shape(input)[0]\n max_length = int(input.get_shape()[1])\n dim_size = int(input.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (index - 1)\n flat = tf.reshape(input, [-1, dim_size])\n relevant = tf.gather(flat, index)\n return relevant", "def recursive_index_decode(int_array, max=32767, min=-32768):\n out_arr = []\n decoded_val = 0\n for item in int_array.tolist():\n if item==max or item==min:\n decoded_val += item\n else:\n decoded_val += item\n out_arr.append(decoded_val)\n decoded_val = 0\n return numpy.asarray(out_arr,dtype=numpy.int32)", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def _create_flatten(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.getattr('axis', 1)\n if factor < 0:\n # in order to support the negative axis\n factor = len(inputs[0].shape) + factor\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)", "def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):\n return (row_indices * num_cols) + col_indices", "def state_from_id(index, dims_state_grid):\n\n entries = [index] * len(dims_state_grid)\n for i in range(1, len(dims_state_grid)):\n value = 1\n for j in range(i, len(dims_state_grid)):\n value *= dims_state_grid[j]\n for k in range(i - 1, len(dims_state_grid)):\n if k == i - 1:\n entries[k] //= value\n else:\n entries[k] %= value\n\n out = np.array(object=entries)\n\n return out", "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def duplicate_flatten(size: int) -> List[List[int]]:\n duplicate = []\n for _ in range(size):\n temp = [-1] * size\n duplicate.append(temp)\n return duplicate", "def unflatten(self, x):\n dims = [c.flat_dim for c in self.spaces]\n flat_x = np.split(x, np.cumsum(dims)[:-1])\n return tuple(c.unflatten(xi) for c, xi in zip(self.spaces, flat_x))", "def _asarray(self, vec):\n shape = self.domain[0][0].shape + self.pshape\n arr = np.empty(shape, dtype=self.domain.dtype)\n for i, xi in enumerate(vec):\n for j, xij in enumerate(xi):\n arr[..., i, j] = xij.asarray()\n\n return arr", "def flattenImage(input_array):\r\n shp = np.size(input_array)\r\n return np.reshape(input_array, (shp,))" ]
[ "0.65175533", "0.62058824", "0.5974461", "0.59025675", "0.58936995", "0.58421", "0.58367753", "0.57612103", "0.5670626", "0.5655063", "0.5648909", "0.56180966", "0.5589195", "0.55857176", "0.55857176", "0.5580362", "0.55702096", "0.5560747", "0.544082", "0.5435741", "0.54188216", "0.5409668", "0.53965384", "0.5367334", "0.53597945", "0.5351686", "0.5341007", "0.52925646", "0.5280118", "0.52661866", "0.52611285", "0.5256359", "0.52527046", "0.52487624", "0.5247055", "0.524415", "0.5242159", "0.5231501", "0.5228461", "0.5195386", "0.5180177", "0.5172795", "0.51692456", "0.5161007", "0.5160354", "0.5155424", "0.515318", "0.5135404", "0.5124941", "0.5120456", "0.5118928", "0.51058185", "0.51023453", "0.5096304", "0.50954634", "0.5094139", "0.50928885", "0.50887007", "0.50881743", "0.5080389", "0.5077059", "0.5072143", "0.50647575", "0.50647575", "0.50589573", "0.5048208", "0.5047156", "0.5045411", "0.504235", "0.5034239", "0.50310206", "0.5030486", "0.5030197", "0.50243735", "0.5024333", "0.50241196", "0.50241196", "0.50136197", "0.5009852", "0.4994477", "0.4993231", "0.49893743", "0.49876082", "0.49783012", "0.49714625", "0.49661586", "0.49639437", "0.4962034", "0.49616212", "0.4958802", "0.4941388", "0.49397555", "0.49366266", "0.49348867", "0.49333957", "0.49313772", "0.49312207", "0.49266088", "0.49158248" ]
0.53458685
27
Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True.
def _add_to_index_operations(self, which, reconstrained, what, warning): if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warn_inplace(exc, nav, repl_pairs, local_opt):\r\n if isinstance(exc, InconsistencyError):\r\n return\r\n return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)", "def ensure_default_constraints(self,warn=False):\n positive_strings = ['variance','lengthscale', 'precision']\n for s in positive_strings:\n for i in self.grep_param_names(s):\n if not (i in self.all_constrained_indices()):\n name = self._get_param_names()[i]\n self.constrain_positive(name)\n if warn:\n print \"Warning! constraining %s postive\"%name", "def _optimise(self):\n pass", "def reset_parameters(self, p: Dict[str, ArrayType]):\n super().reset_parameters(p)\n if self.method == \"trust-constr\":\n if self.opt.nk:\n self._constraints[\"k\"].A = csc_matrix(self.opt.M(self.p).toarray())\n self._constraints[\"k\"].lb = -self.opt.c(self.p).toarray().flatten()\n if self.opt.na:\n eq = -self.opt.b(self.p).toarray().flatten()\n self._constraints[\"a\"].A = csc_matrix(self.opt.A(self.p).toarray())\n self._constraints[\"a\"].lb = eq\n self._constraints[\"a\"].ub = eq\n if self._constraints:\n self.minimize_input[\"constraints\"] = list(self._constraints.values())", "def __adjust(self, *args):\n return \"adjust\"", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def _parameters_changed_notification(self, me, which=None):\n self._optimizer_copy_transformed = False # tells the optimizer array to update on next request\n self.parameters_changed()", "def test_wrong_parameters(self):\n with self.assertWarns(RuntimeWarning):\n Parameters(1, mu=3, lambda_=2)", "def accept_optimize():\n pass", "def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes,\r\n clobber_dr_vals=True,\r\n perform=None, warn_input_not_reused=True):\r\n destroyed_idx_list = []\r\n destroy_map = getattr(node.op, 'destroy_map', {})\r\n for o_pos, i_pos_list in destroy_map.iteritems():\r\n destroyed_idx_list.extend(i_pos_list)\r\n destroyed_res_list = [node.inputs[i] for i in destroyed_idx_list]\r\n\r\n actually_inplace_outputs = []\r\n dmap = getattr(node.op, 'destroy_map', {})\r\n for oo, ii in dmap.iteritems():\r\n out_var = storage_map[node.outputs[oo]][0]\r\n in_var = storage_map[node.inputs[ii[0]]][0]\r\n if _may_share_memory(out_var, in_var):\r\n actually_inplace_outputs.append(node.outputs[oo])\r\n\r\n if warn_input_not_reused and destroyed_res_list:\r\n if isinstance(node.op, OutputGuard):\r\n # The point of OutputGuard is to be declared as destructive\r\n # while not destroying anything\r\n continue\r\n if out_var is not in_var:\r\n _logger.warning(\"Optimization Warning: input idx %d marked \"\r\n \"as destroyed was not changed for node '%s'\",\r\n ii[0], str(node))\r\n\r\n vmap = getattr(node.op, 'view_map', {})\r\n for oo, ii in vmap.iteritems():\r\n out_var = storage_map[node.outputs[oo]][0]\r\n in_var = storage_map[node.inputs[ii[0]]][0]\r\n if _may_share_memory(out_var, in_var):\r\n actually_inplace_outputs.append(node.outputs[oo])\r\n\r\n if warn_input_not_reused:\r\n # We don't try to optimize simple scalar and empty ndarray,\r\n # as this is not worth our time. This happen at least in\r\n # Subtensor when the output is a scalar But this depend on\r\n # the version of numpy!\r\n if getattr(out_var, 'size', 2) <= 1:\r\n continue\r\n if isinstance(node.op, OutputGuard):\r\n # This class is not in the final graph.\r\n continue\r\n if not _may_share_memory(out_var, in_var):\r\n _logger.warning(\"Optimization Warning: input idx %d marked \"\r\n \"as viewed but new memory allocated by node '%s'\",\r\n ii[0], str(node))\r\n\r\n for r_idx, r in enumerate(node.inputs):\r\n if not r.type.values_eq(r_vals[r], storage_map[r][0]):\r\n # some input node 'r' got changed by running the node\r\n # this may or may not be ok...\r\n if r in destroyed_res_list:\r\n # ok, we expected r to be destroyed\r\n if node in active_nodes:\r\n if dr_vals.get(r, (0, node))[1] is not node:\r\n # bad: there should only be one active node that destroys any variable\r\n raise Exception('failure in topological ordering')\r\n if clobber_dr_vals:\r\n dr_vals[r] = (storage_map[r][0], node) #no copy, this is the last use of this variable\r\n storage_map[r][0] = None #make sure that dr_vals[r] doens't get used again\r\n else:\r\n raise BadDestroyMap(node, r_idx, r_vals[r],\r\n storage_map[r][0], perform)\r\n\r\n return actually_inplace_outputs", "def propose_optimize():\n pass", "def _discretize(self, constraints_object):\n pass", "def _constraints_other(self):\n pass", "def addConstraint(constraint, problem):\n problem += constraint", "def check_invalid_args_general(config):\n # Not mathematically correct, but might be required if prior is not\n # appropriate.\n if hasattr(config, 'kl_scale') and config.kl_scale != 1.0:\n warnings.warn('Prior matching term will be scaled by %f.'\n % config.kl_scale)\n\n if hasattr(config, 'store_final_model') and \\\n hasattr(config, 'train_from_scratch') and \\\n config.store_final_model and config.train_from_scratch:\n warnings.warn('Note, when training from scratch, the final model is ' +\n 'only trained on the last task!')", "def ensure_default_constraints(self):\r\n positive_strings = ['variance', 'lengthscale', 'precision', 'decay', 'kappa']\r\n # param_names = self._get_param_names()\r\n currently_constrained = self.all_constrained_indices()\r\n to_make_positive = []\r\n for s in positive_strings:\r\n for i in self.grep_param_names(\".*\" + s):\r\n if not (i in currently_constrained):\r\n to_make_positive.append(i)\r\n if len(to_make_positive):\r\n self.constrain_positive(np.asarray(to_make_positive))", "def change_priorities(self,idxs, errors): \n for i in range(len(idxs)):\n self.update(idxs[i] , errors[i])", "def __relational_restriction_incorrect_parameter_vs_parameter(self):\n strTestName = 'Parameter higher or equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, the parameter 2 must be lower or equal to 3*iRefParameter-4\n RxCSObject.paramAddMan('iParameter2', 'Int parameter')\n RxCSObject.paramType('iParameter2', int)\n RxCSObject.paramHE('iParameter2', 'iRefParameter1', mul=3, add=-4) # In English, iParameter must be higher than 4\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.iParameter2 = 4\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)", "def __call__(self, p, q, verbosity=1, warn=True):\n if self.exactly_zero: return 0.0 # shortcut for trivial case\n if self.weight == 0:\n return _np.sum(_np.abs(q - p)) / 2\n\n #Set parameter values\n self.P.value[:] = p[:]\n self.Q.value[:] = q[:]\n\n treg_factor_ok = False\n self.Treg_factor.value = self.initial_treg_factor\n while not treg_factor_ok:\n\n obj1 = self._obj(self.t_params)\n if REBUILD:\n self._rebuild_problem()\n else:\n self._build_problem()\n\n self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1),\n **default_cvxpy_args(self.solver))\n\n failed = self.T.value is None # or self.resid_tvd.value is None\n\n if not failed: # sanity check\n t_chk = self.build_transfer_mx(self.T_params.value)\n assert(_np.linalg.norm(_np.abs(self.T.value) - t_chk) < 1e-6)\n\n self.warning_msg = None\n if failed:\n if self.solver == \"SCS\":\n #raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n for eps in [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:\n if REBUILD:\n self._rebuild_problem()\n else:\n self._build_problem()\n self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1), eps=eps)\n failed = self.T.value is None # or self.resid_tvd.value is None\n\n if not failed:\n t_chk = self.build_transfer_mx(self.T_params.value)\n assert(_np.linalg.norm(self.T.value - t_chk) < 1e-6)\n\n if eps > 1e-4:\n self.warning_msg = (\"ResidualTVD: Needed to increase eps to %g.\"\n \" The resulting ResidualTVD values are less precise.\") % eps\n if warn: print(self.warning_msg)\n break\n else:\n raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n else:\n raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n\n #check that Treg_factor term doesn't dominate\n\n # Update: just leave this alone, since norm-penalty doesn't get reported - TODO later\n treg_factor_ok = True\n\n # ------------------------------------------------------------------\n #EXPERIMENTAL algorithms for updating Treg_factor ------------------\n # ------------------------------------------------------------------\n\n #resid_tvd = self._obj(self.T_params.value)\n #if resid_tvd > 10 * self.Treg_factor.value * _np.linalg.norm(self.T_params.value, 1):\n # Treg_factor_ok = True\n #else:\n # self.Treg_factor.value = resid_tvd / 10 # self.Treg_factor.value / 10\n\n #obj2 = self._obj(self.T_params.value)\n #if obj2 < obj1:\n # Treg_factor_ok = True\n #else:\n # #maybe penalty term dominated - reduce norm(tparams) penalty term\n # self.T_params.value[:] = self.t_params[:] #REVERT\n # self.T.value[:, :] = _np.sum([self.t_params[ind] * self.t_basis[ind]\n # for ind in range(self.dim)], axis=0) + _np.eye(self.n) # REVERT\n # self.Treg_factor.value = self.Treg_factor.value / 10\n # if self.Treg_factor.value > 1e-7:\n # print(\"REDUCING treg factor to: \", self.Treg_factor.value)\n # else:\n # Treg_factor_ok = True # give up!\n\n if self.Treg_factor.value != self.initial_treg_factor:\n if verbosity > 0: print(\"NOTE: Treg_factor was reduced to %g.\" % self.Treg_factor.value)\n #_warnings.warn((\"Initial Treg_factor (%g) was too large, and was reduced to %g.\"\n # \" Consider reducing the initial value to avoid repeating calculations.\")\n # % (self.initial_treg_factor, self.Treg_factor.value))\n\n obj2 = self._obj(self.T_params.value)\n if obj2 <= obj1:\n self.t_params[:] = self.T_params.value[:]\n else:\n print_revert_msg(\"ResidualTVD failed to reduce objective function (%g > %g)\", (obj2, obj1), verbosity)\n self.T_params.value[:] = self.t_params[:]\n self.T.value[:, :] = self.build_transfer_mx(self.t_params)\n\n return self._obj(self.t_params) # not self.obj.value b/c that has additional norm regularization", "def test_creation_incorrect_change_hardbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, hardbounds=[0, 10])\n int_a.hardbounds = [0, 10, 20]", "def constraints(self, x):\n pass", "def test_creation_incorrect_change_softbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, softbounds=[0, 10])\n int_a.softbounds = [0, 10, 20]", "def warning(self, *args, **kwargs):", "def change_priorities(self,idxs,errors):\n #print(\"Indecies \",idxs)\n for i,idx in enumerate(idxs):\n self.update(idx, errors[i])", "def constraints(self):\n ...", "def __relational_restriction_correct_parameter_vs_parameter(self):\n strTestName = 'Parameter lower or equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, the parameter 2 must be lower or equal to 3*iRefParameter-4\n RxCSObject.paramAddMan('iParameter2', 'Int parameter')\n RxCSObject.paramType('iParameter2', int)\n RxCSObject.paramLE('iParameter2', 'iRefParameter1', mul=3, add=-4) # In English, iParameter must be higher than 4\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.iParameter2 = 5\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def add_constraint(self, constraint, problem):\n problem += constraint", "def _perturbInPlaceHard(self):\n die", "def le_inplace(a,b):", "def pointConstraint(*args, layer: AnyStr=\"\", maintainOffset: bool=True, name: Union[AnyStr,\n bool]=\"\", offset: Union[List[float, float, float], bool]=None, remove:\n bool=True, skip: Union[AnyStr, List[AnyStr]]=\"\", targetList: bool=True,\n weight: Union[float, bool]=0.0, weightAliasList: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def optimizer_array(self, p):\n f = None\n if self.has_parent() and self.constraints[__fixed__].size != 0:\n f = np.ones(self.size).astype(bool)\n f[self.constraints[__fixed__]] = FIXED\n elif self._has_fixes():\n f = self._fixes_\n if f is None:\n self.param_array.flat = p\n [np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))\n #py3 fix\n #for c, ind in self.constraints.iteritems() if c != __fixed__]\n for c, ind in self.constraints.items() if c != __fixed__]\n else:\n self.param_array.flat[f] = p\n [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))\n #py3 fix\n #for c, ind in self.constraints.iteritems() if c != __fixed__]\n for c, ind in self.constraints.items() if c != __fixed__]\n #self._highest_parent_.tie.propagate_val()\n\n self._optimizer_copy_transformed = False\n self.trigger_update()", "def optimize_policy(self, all_samples_data, log=True):\n raise NotImplementedError", "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "def warning(self, *args, **kwargs): # real signature unknown\n pass", "def optimise(w, w_delta):\n return w.assign(w - w_delta)", "def simplify_modifications(self, simulationProblem, objFunction, fitness):\n constraintsOrig = self.constraints.copy()\n for k in constraintsOrig.keys():\n del self.constraints[k]\n try:\n res = simulationProblem.simulate(self)\n newFitness = objFunction.get_fitness(res)\n except Exception:\n newFitness = -1.0\n if round(fitness, 12) != round(newFitness, 12):\n self.constraints[k] = constraintsOrig[k]", "def unconstrain_positive(self):\n self.unconstrain(Logexp())", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def _warn_immutability(self):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n if opt.mutable:\n continue\n groupname = group.name if group else 'DEFAULT'\n try:\n old, _ = opt._get_from_namespace(self._namespace, groupname)\n except KeyError:\n old = None\n try:\n new, _ = opt._get_from_namespace(self._mutable_ns, groupname)\n except KeyError:\n new = None\n if old != new:\n LOG.warning(\"Ignoring change to immutable option \"\n \"%(group)s.%(option)s\",\n {\"group\": groupname, \"option\": opt.name})", "def soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)", "def relax(self):\n # print(\"putin\", self.level.rhs.reshape(-1)[:])\n # print(\"getout\", self.solver(self.level.rhs.reshape(-1)))\n\n self.level.mid[:] = self.solver(self.level.rhs.reshape(-1)).reshape(self.level.mid.shape)", "def update_params(self):\n if self.clip > 0:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()", "def do_relax(self):\n raise NotImplementedError", "def parameter_update(self, X, X_mask):\n data_with_mask = np.hstack([X, X_mask])\n X_pred, X_MC_preds = self.predict(data_with_mask)\n X[X_mask] *= self.keep_coeff\n X[X_mask] += self.weight_update_coeff * X_pred[X_mask]\n return X, X_MC_preds", "def constrain_fn(model, model_args, model_kwargs, transforms, params):\n params_constrained = transform_fn(transforms, params)\n substituted_model = substitute(model, base_param_map=params_constrained)\n model_trace = trace(substituted_model).get_trace(*model_args, **model_kwargs)\n return {k: model_trace[k]['value'] for k, v in params.items() if k in model_trace}", "def _objective_grad(self, transformed_free_parameters):\n # get the fixed indices and add to the transformed parameters\n free = np.logical_not(self._fixed_indicies)\n transformed_parameters = self._previous_parameters\n transformed_parameters[free] = transformed_free_parameters\n try:\n # untransform and internalize parameters\n self.parameters = self._untransform_parameters(transformed_parameters)\n # compute objective and gradient in untransformed space\n (objective, gradient) = self.log_likelihood(return_gradient=True)\n objective = -objective # since we want to minimize\n gradient = -gradient\n # ensure the values are finite\n if not np.isfinite(objective):\n logger.debug('objective is not finite')\n if not np.all(np.isfinite(gradient[free])):\n logger.debug('some derivatives are non-finite')\n # transform the gradient \n gradient = self._transform_gradient(self.parameters, gradient)\n except (LinAlgError, ZeroDivisionError, ValueError):\n logger.error('numerical issue computing log-likelihood or gradient')\n raise\n # get rid of the gradients of the fixed parameters\n free_gradient = gradient[free]\n\n # call the counter if ness\n if self._counter is not None:\n msg='log-likelihood=%.4g, gradient_norm=%.2g'\\\n % (-objective, np.linalg.norm(gradient))\n if self._counter.backup is None or self._counter.backup[0]<-objective:\n self._counter(msg=msg,store=(-objective,self.parameters.copy()))\n else: # don't update backup\n self._counter(msg=msg)\n return objective, free_gradient", "def pre_modify(self):\n return 0", "def recompressionIdx(self, opt=1):\n maskCr = np.full(len(self.raw), False)\n if opt == 1:\n maskCr[self.brkIdx1] = True\n maskCr[self.brkIdx2] = True\n elif opt == 2:\n maskCr[self.brkIdx1: self.brkIdx2+1] = True\n elif opt == 3:\n maskCr[self.brkIdx1: self.brkIdx3+1] = True\n # -- Linear regresion\n sigmaCr = self.raw['stress'].iloc[maskCr]\n sigmaCrlog = np.log10(sigmaCr)\n eCr = self.raw['e'].iloc[maskCr]\n idxCrInt, idxCr = polyfit(sigmaCrlog, eCr, deg=1)\n r2Cr = r2_score(\n y_true=eCr, y_pred=polyval(sigmaCrlog, [idxCrInt, idxCr]))\n self.maskCr = maskCr\n self.r2Cr = r2Cr\n self.idxCr = abs(idxCr)\n self.idxCrInt = idxCrInt\n return", "def unconstrain_negative(self):\n self.unconstrain(NegativeLogexp())", "def sub_inplace(a, b):", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def poleVectorConstraint(*args, layer: AnyStr=\"\", name: Union[AnyStr, bool]=\"\", remove:\n bool=True, targetList: bool=True, weight: Union[float, bool]=0.0,\n weightAliasList: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def rechargeHint(self):\n if self.hints < 8:\n self.hints = self.hints + 1", "def CompilationRelaxations(self) -> int:", "def apply_default_constraints(self):\n try:\n self.apply_secthresh(pipeline_weaksec(self.koi))\n except NoWeakSecondaryError:\n logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))\n self.set_maxrad(default_r_exclusion(self.koi))", "def internal_write_or_check(self, orig_query, orig_varenv, mode):\n\n # make sure we have a Varenv object, not just a dictionary\n if isinstance(orig_varenv, Varenv):\n varenv = orig_varenv\n else:\n varenv = Varenv(orig_varenv, self.lookup)\n\n transaction_id = varenv.get('tid')\n\n # check that the user can write to $permission\n\n # stages:\n pprintlog(\n 'LOW_%s' % str(mode),\n orig_query,\n transaction_id=transaction_id,\n level=log_util.DEBUG,\n push=True)\n\n # add \"original_query\" decorators.\n query = self.make_orig(orig_query)\n self.make_parents(query, True)\n\n # check that $user, $permission, $attribution all check out OK\n scope.check_write_defaults(self, varenv)\n\n # propagate @insert and @delete to links as :insert and :delete\n self.add_implied_writes(query, varenv)\n\n # create QueryPrimitives\n self.add_query_primitives(query, varenv, mode)\n\n # hook QueryPrimitives together\n self.link_query_primitives_root(query, varenv, mode)\n\n dumplog('LOW_%s_QUERY' % str(mode), query)\n\n dumplog('INITIAL_%s_PRIMITIVES' % str(mode),\n [x.node for x in elements(query, dict)])\n\n # run (recursively) all the preparations\n self.dispatch_prepares(query, varenv)\n\n dumplog('PREPARED_%s_PRIMITIVES' % str(mode),\n [x.node for x in elements(query, dict)])\n\n # check the query is not trying to do the same thing in two places.\n self.check_circularity(query, varenv)\n\n # this is the point where we check that the writes are legal\n # and set the scopes.\n self.check_write_access(query, varenv)\n\n if mode is WriteMode:\n # generate the writes\n self.generate_write_queries(query, varenv)\n else:\n self.generate_check_responses(query, varenv)\n\n dumplog('COMPLETED_%s_PRIMITIVES' % str(mode),\n [x.node for x in elements(query, dict)])\n\n # dump the filtered write tree\n write_result = self.generate_write_result(query, varenv)\n\n pprintlog(\n 'LOW_%s_RESULT' % str(mode),\n write_result,\n transaction_id=transaction_id,\n level=log_util.DEBUG,\n pop=True)\n\n return write_result\n\n # figure out how far the write goes\n # book it as a potential write\n\n # figure out the query for the other side if we have a non-empty one\n # recursively prepare that query\n\n # get the list of writes\n # pipeline them to the graph\n # parse out the new guids\n # put it all back together\n # success!!\n\n # turn QPs into graph query.\n #self.run_query(query,WriteMode)", "def set_internal(self):\n self.internal_bisect = True # pragma: no cover", "def _update_parallel_coef_constraints(self, x):\n n_features = x.shape[1]\n xi_final = np.zeros((n_features, n_features))\n\n # Todo: parallelize this for loop with Multiprocessing/joblib\n if self.model_subset is None:\n self.model_subset = range(n_features)\n elif np.max(np.abs(self.model_subset)) >= n_features:\n raise ValueError(\n \"A value in model_subset is larger than the number \"\n \"of features in the candidate library\"\n )\n for i in self.model_subset:\n print(\"Model \", i)\n xi = cp.Variable(n_features)\n # Note that norm choice below must be convex,\n # so thresholder must be L1 or L2\n if (self.thresholder).lower() in (\"l1\", \"weighted_l1\"):\n if self.thresholds is None:\n cost = cp.sum_squares(x[:, i] - x @ xi) + self.threshold * cp.norm1(\n xi\n )\n else:\n cost = cp.sum_squares(x[:, i] - x @ xi) + cp.norm1(\n self.thresholds[i, :] @ xi\n )\n if (self.thresholder).lower() in (\"l2\", \"weighted_l2\"):\n if self.thresholds is None:\n cost = (\n cp.sum_squares(x[:, i] - x @ xi)\n + self.threshold * cp.norm2(xi) ** 2\n )\n else:\n cost = (\n cp.sum_squares(x[:, i] - x @ xi)\n + cp.norm2(self.thresholds[i, :] @ xi) ** 2\n )\n prob = cp.Problem(\n cp.Minimize(cost),\n [xi[i] == 0.0],\n )\n try:\n prob.solve(\n max_iter=self.max_iter,\n eps_abs=self.tol,\n eps_rel=self.tol,\n verbose=self.verbose_cvxpy,\n )\n if xi.value is None:\n warnings.warn(\n \"Infeasible solve on iteration \"\n + str(i)\n + \", try changing your library\",\n ConvergenceWarning,\n )\n xi_final[:, i] = xi.value\n # Annoying error coming from L2 norm switching to use the ECOS\n # solver, which uses \"max_iters\" instead of \"max_iter\", and\n # similar semantic changes for the other variables.\n except TypeError:\n prob.solve(\n max_iters=self.max_iter,\n abstol=self.tol,\n reltol=self.tol,\n verbose=self.verbose_cvxpy,\n )\n if xi.value is None:\n warnings.warn(\n \"Infeasible solve on iteration \"\n + str(i)\n + \", try changing your library\",\n ConvergenceWarning,\n )\n xi_final[:, i] = xi.value\n except cp.error.SolverError:\n print(\"Solver failed on model \", str(i), \", setting coefs to zeros\")\n xi_final[:, i] = np.zeros(n_features)\n return xi_final", "def __relational_restriction_incorrect_parameter_vs_number(self):\n strTestName = 'A parameter lower than a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n RxCSObject.paramH('iParameter1', 2, mul=0.5, add=3) # In English, iParameter must be higher than 4\n\n RxCSObject.iParameter1 = 4\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)", "def _remove_from_index_operations(self, which, transforms):\n if len(transforms) == 0:\n transforms = which.properties()\n removed = np.empty((0,), dtype=int)\n for t in list(transforms):\n unconstrained = which.remove(t, self._raveled_index())\n removed = np.union1d(removed, unconstrained)\n if t is __fixed__:\n self._highest_parent_._set_unfixed(self, unconstrained)\n\n return removed", "def _remove_from_index_operations(self, which, transforms):\n if len(transforms) == 0:\n transforms = which.properties()\n removed = np.empty((0,), dtype=int)\n for t in list(transforms):\n unconstrained = which.remove(t, self._raveled_index())\n removed = np.union1d(removed, unconstrained)\n if t is __fixed__:\n self._highest_parent_._set_unfixed(self, unconstrained)\n\n return removed", "def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)", "def additional_cloning_checks(self):\n pass", "def reset_parameters(self) -> None:\n \n self.classifier.apply(xavier)\n if len(self.old_cols) > 0:\n self.adaptor1.apply(xavier)\n self.adaptor2.apply(xavier)", "def _modify_penalty(self, new_penalty, penalty_name):\n if not new_penalty:\n return\n phase_idx = new_penalty.phase\n\n # Copy to self.original_values so it can be save/load\n self.original_values[penalty_name].add(deepcopy(new_penalty))\n\n if penalty_name == \"objective_functions\":\n ObjectiveFunction.add_or_replace(self, self.nlp[phase_idx], new_penalty)\n elif penalty_name == \"constraints\":\n ConstraintFunction.add_or_replace(self, self.nlp[phase_idx], new_penalty)\n elif penalty_name == \"parameters\":\n Parameters.add_or_replace(self, new_penalty)\n else:\n raise RuntimeError(\"Unrecognized penalty\")", "def addConstraint(self, updatedData):\n x = None # TODO: retrive x from updated data.\n y = None # TODO: retrive y from updated data.\n\n x = np.asarray(x)\n y = np.asarray(y)\n cons = self.delta\n l = cp.sum(cp.multiply(y, x @ self.theta) - cp.logistic(x @ self.theta))\n cons -= l\n self.constraints.append(cons <= 0)", "def create_keep_in_constraint(self,der=2,limit=1e1,weight=1e5):\n print(\"Creating Keep in constraint\")\n constr = dict()\n constr['constraint_type'] = \"ellipsoid\"\n constr['weight'] = self.accel_weight\n constr['keep_out'] = False\n constr['der'] = der\n constr['x0'] = np.zeros(3)\n A = np.matrix(np.identity(3))\n limit = self.accel_lim\n A[0,0] = 1/limit**2\n A[1,1] = 1/limit**2\n A[2,2] = 1/limit**2\n constr['rot_mat'] = np.identity(3)\n constr['A'] = A\n\n\n self.qr_polytraj.add_constraint(constr['constraint_type'],constr,dynamic_weighting=False,sum_func=False)\n\n # self.qr_polytraj.run_astro()\n # self.update_path_markers()\n # acc_wp = self.get_accel_at_waypoints(\"main\")\n # self.interactive_marker_worker.make_controls(self.qr_polytraj.waypoints)\n # self.interactive_marker_worker.update_controls(self.qr_polytraj.waypoints,acc_wp = acc_wp)", "def warn(exc, self, optimizer):\r\n _logger.error(\"SeqOptimizer apply %s\" % str(optimizer))\r\n _logger.error(\"Traceback:\")\r\n _logger.error(traceback.format_exc())\r\n if config.on_opt_error == 'raise':\r\n raise exc\r\n elif config.on_opt_error == 'pdb':\r\n pdb.post_mortem(sys.exc_info()[2])", "def reindex(self):", "def reindex(self):", "def forward(self,add,mod,dat):\n if(mod.shape[0] != self.__nm or dat.shape[0] != self.__nd):\n raise Exception(\"lint forward: input shapes do not match those passed to constructor\")\n\n if(add == False):\n dat[:] = 0.0\n\n forward_lint(self.__om,self.__dm,self.__nm,self.__nd,self.__crd,mod,dat)", "def checkAndUpdateAllPossibilities(self, G, PD, prevPat):\n ### removing candidate if any other action was performed on it ###\n if 'shrink' not in prevPat.pat_type:\n if prevPat.pat_type in ['merge']:\n for p in prevPat.prev_order:\n if p in self.Data:\n del self.Data[p]\n elif prevPat.pat_type in ['update', 'split', 'remove']:\n if prevPat.prev_order in self.Data:\n del self.Data[prevPat.prev_order]\n if self.gtype == 'U':\n for k,v in self.Data.items():\n if len(set(v['Pat'].NL).intersection(set(prevPat.NL))) > 1:\n self.updateConstraintEvaluation(G, PD, k, 2)\n else:\n self.updateConstraintEvaluation(G, PD, k, 1)\n else:\n for k,v in self.Data.items():\n inInt = len(set(v['Pat'].inNL).intersection(set(prevPat.inNL)))\n outInt = len(set(v['Pat'].outNL).intersection(set(prevPat.outNL)))\n if inInt > 1 and outInt > 1:\n self.updateConstraintEvaluation(G, PD, k, 2)\n else:\n self.updateConstraintEvaluation(G, PD, k, 1)\n return", "def objective(self, param):\n self.__init__(param, self.data)\n # return self.rmse() + self.penalty()\n return self.rmse() + self.penalty()", "def unconstrain(self, regexp):\r\n matches = self.grep_param_names(regexp)\r\n\r\n # tranformed contraints:\r\n for match in matches:\r\n self.constrained_indices = [i[i <> match] for i in self.constrained_indices]\r\n\r\n # remove empty constraints\r\n tmp = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)])\r\n if tmp:\r\n self.constrained_indices, self.constraints = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)])\r\n self.constrained_indices, self.constraints = list(self.constrained_indices), list(self.constraints)\r\n\r\n # fixed:\r\n self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices, values in zip(self.fixed_indices, self.fixed_values)]\r\n self.fixed_indices = [np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices in self.fixed_indices]\r\n\r\n # remove empty elements\r\n tmp = [(i, v) for i, v in zip(self.fixed_indices, self.fixed_values) if len(i)]\r\n if tmp:\r\n self.fixed_indices, self.fixed_values = zip(*tmp)\r\n self.fixed_indices, self.fixed_values = list(self.fixed_indices), list(self.fixed_values)\r\n else:\r\n self.fixed_indices, self.fixed_values = [], []", "def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):\n for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):\n if name_opti != name_model:\n warnings.warn(\"name_opti != name_model: {} {}\".format(name_opti, name_model))\n raise ValueError\n param_model.data.copy_(param_opti.data)", "def ge_inplace(a,b):", "def update(\r\n params: hk.Params,\r\n opt_state: OptState,\r\n batch, label, agreement\r\n ) -> Tuple[hk.Params, OptState]:\r\n # grads = jax.grad(loss)(params, batch, label)\r\n # grads_masked = (gradient_per_sample if use_ilc else gradient)(params, batch, label) # (gradient_per_sample)(params, batch, label)\r\n # sum_grad_masked_regularized = jax.tree_multimap(lambda x,y:x+y,grads_masked,gradient_reg(params))\r\n # grads = sum_grad_masked_regularized\r\n # updates, opt_state = opt.update(grads, opt_state)\r\n # new_params = optax.apply_updates(params, updates)\r\n\r\n grads_samples = gradient_per_sample(params, batch, label)\r\n ANDmask = and_mask(agreement)\r\n\r\n masked_grads,_ = ANDmask.update(grads_samples, opt_state)\r\n reg_grads = gradient_reg(params)\r\n\r\n sum_grad_masked_regularized = jax.tree_multimap(lambda x,y:x+y,masked_grads,reg_grads)\r\n \r\n updates,_ = opt.update(sum_grad_masked_regularized, opt_state)\r\n\r\n new_params = optax.apply_updates(params, updates)\r\n\r\n return new_params, opt_state", "def check_params(self, model_params):\n\n comm = self.comm\n for param, policy in self.noise_policy.items():\n low_bound, up_bound, absify, low_bound_diagonal = policy\n new_pvalue = model_params[param]\n if np.isscalar(new_pvalue): # Param to be noisified is scalar\n if comm.rank == 0:\n if new_pvalue < low_bound:\n print(\"check_params: Reset lower bound of %s\" % param)\n new_pvalue = low_bound\n if new_pvalue >= up_bound:\n print(\"check_params: Reset upper bound of %s\" % param)\n new_pvalue = up_bound\n if absify:\n print(\"check_params: Taking abs of %s\" % param)\n new_pvalue = np.abs(new_pvalue)\n if (\n low_bound_diagonal is not None\n ): # when using isotropic instead of full matrix\n if new_pvalue < low_bound_diagonal:\n print(\"check_params: Reset lower bound of %s (diagonal)\" % param)\n new_pvalue = low_bound_diagonal\n new_pvalue = comm.bcast(new_pvalue)\n else:\n if comm.rank == 0:\n if (new_pvalue < low_bound).any():\n print(\"check_params: Reset lower bound of %s\" % param)\n if (new_pvalue >= up_bound).any():\n print(\"check_params: Reset upper bound of %s\" % param)\n new_pvalue = np.maximum(low_bound, new_pvalue)\n new_pvalue = np.minimum(up_bound, new_pvalue)\n if absify:\n print(\"check_params: Taking abs of %s\" % param)\n new_pvalue = np.abs(new_pvalue)\n if low_bound_diagonal is not None:\n mask = np.diag(new_pvalue) < low_bound_diagonal\n if mask.any():\n print(\"check_params: Reset lower bound of %s (diagonal)\" % param)\n new_pvalue[np.diag(mask)] = low_bound_diagonal\n comm.Bcast([new_pvalue, MPI.DOUBLE])\n model_params[param] = new_pvalue\n\n return model_params", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def allow_warnings(self, allow_warnings):\n self._allow_warnings = allow_warnings", "def _update_optimizer(self, hyperparameters, score, fit=True):\n if self.do_maximize:\n score = -score\n self.optimizer_result = self.optimizer.tell(hyperparameters, score, fit=fit)", "def modifyNotValuableComponents(self):\n # Nothing to do\n pass", "def useHint(self):\n self.hints = self.hints - 1", "def nop_minifier(arg):\n return arg", "def reduced_cost(data, indexing, cf, cf_prime, v, k, uw_sample_count=None):\n # copy indexing and move v to k\n cpy = indexing.copy()\n cpy[v] = k\n vs = np.arange(data.shape[0])\n points = None\n if uw_sample_count is None:\n # compute all possible 3-ary subsets that contain v\n points = np.array(np.meshgrid(v,vs,vs)).T.reshape(-1,3)\n points = points[ (points[:,0] != points[:,1]) & \n (points[:,0] != points[:,2]) & \n (points[:,1] < points[:,2]) ]\n else:\n # sample 3-ary subsets that contain v\n pointsv = np.ones((uw_sample_count,1), dtype=np.int)*v\n allowed = np.arange(indexing.shape[0])[np.arange(indexing.shape[0]) != v]\n pointsuw = sample_noreplace(allowed, uw_sample_count, 2)\n points = np.concatenate((pointsv, pointsuw), axis=1)\n # the result is the difference between the original indexing\n # and the indexing after the move-operation\n result = (partial_cost(data, indexing, points, cf, cf_prime) -\n partial_cost(data, cpy, points, cf, cf_prime))\n return result", "def _applyLimitsOfQuantification(self, onlyLLOQ=False, **kwargs):\n\n sampleMetadata = copy.deepcopy(self.sampleMetadata)\n featureMetadata = copy.deepcopy(self.featureMetadata)\n intensityData = copy.deepcopy(self._intensityData)\n expectedConcentration = copy.deepcopy(self.expectedConcentration)\n calibration = copy.deepcopy(self.calibration)\n if ((not hasattr(self, 'sampleMetadataExcluded')) | (not hasattr(self, 'featureMetadataExcluded')) | (not hasattr(self, 'intensityDataExcluded')) | (not hasattr(self, 'expectedConcentrationExcluded')) | (not hasattr(self, 'excludedFlag'))):\n sampleMetadataExcluded = []\n featureMetadataExcluded = []\n intensityDataExcluded = []\n expectedConcentrationExcluded = []\n excludedFlag = []\n else:\n sampleMetadataExcluded = copy.deepcopy(self.sampleMetadataExcluded)\n featureMetadataExcluded = copy.deepcopy(self.featureMetadataExcluded)\n intensityDataExcluded = copy.deepcopy(self.intensityDataExcluded)\n expectedConcentrationExcluded = copy.deepcopy(self.expectedConcentrationExcluded)\n excludedFlag = copy.deepcopy(self.excludedFlag)\n\n ## Check input columns\n if 'LLOQ' not in featureMetadata.columns:\n raise AttributeError('the featureMetadata[\\'LLOQ\\'] column is absent')\n if onlyLLOQ==False:\n if 'ULOQ' not in featureMetadata.columns:\n raise AttributeError('featureMetadata[\\'ULOQ\\'] column is absent')\n\n ## Features only Monitored are not processed and passed untouched (concatenated back at the end)\n untouched = (featureMetadata['quantificationType'] == QuantificationType.Monitored).values\n if sum(untouched) != 0:\n print('The following features are only monitored and therefore not processed for LOQs: ' + str(featureMetadata.loc[untouched, 'Feature Name'].values.tolist()))\n untouchedFeatureMetadata = featureMetadata.loc[untouched, :]\n featureMetadata = featureMetadata.loc[~untouched, :]\n untouchedIntensityData = intensityData[:, untouched]\n intensityData = intensityData[:, ~untouched]\n untouchedExpectedConcentration = expectedConcentration.loc[:, untouched]\n expectedConcentration = expectedConcentration.loc[:, ~untouched]\n # same reordering of the calibration\n if isinstance(calibration, dict):\n untouchedCalibFeatureMetadata = calibration['calibFeatureMetadata'].loc[untouched, :]\n calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~untouched, :]\n untouchedCalibIntensityData = calibration['calibIntensityData'][:, untouched]\n calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~untouched]\n untouchedCalibExpectedConcentration = calibration['calibExpectedConcentration'].loc[:, untouched]\n calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~untouched]\n\n\n ## Exclude features without required information\n unusableFeat = featureMetadata['LLOQ'].isnull().values & (featureMetadata['quantificationType'] != QuantificationType.QuantOther).values\n if not onlyLLOQ:\n unusableFeat = unusableFeat | (featureMetadata['ULOQ'].isnull().values & (featureMetadata['quantificationType'] != QuantificationType.QuantOther).values)\n if sum(unusableFeat) != 0:\n print(str(sum(unusableFeat)) + ' features cannot be pre-processed:')\n print('\\t' + str(sum(unusableFeat)) + ' features lack the required information to apply limits of quantification')\n # store\n sampleMetadataExcluded.append(sampleMetadata)\n featureMetadataExcluded.append(featureMetadata.loc[unusableFeat, :])\n intensityDataExcluded.append(intensityData[:, unusableFeat])\n expectedConcentrationExcluded.append(expectedConcentration.loc[:, unusableFeat])\n excludedFlag.append('Features')\n #remove\n featureMetadata = featureMetadata.loc[~unusableFeat, :]\n intensityData = intensityData[:, ~unusableFeat]\n expectedConcentration = expectedConcentration.loc[:, ~unusableFeat]\n if isinstance(calibration, dict):\n calibration['calibFeatureMetadata'] = calibration['calibFeatureMetadata'].loc[~unusableFeat, :]\n calibration['calibIntensityData'] = calibration['calibIntensityData'][:, ~unusableFeat]\n calibration['calibExpectedConcentration'] = calibration['calibExpectedConcentration'].loc[:, ~unusableFeat]\n\n\n ## Values replacement (-inf / +inf)\n # iterate over the features\n for i in range(0, featureMetadata.shape[0]):\n # LLOQ\n if not numpy.isnan(featureMetadata['LLOQ'].values[i]):\n toReplaceLLOQ = intensityData[:, i] < featureMetadata['LLOQ'].values[i]\n intensityData[toReplaceLLOQ, i] = -numpy.inf\n\n # ULOQ\n if not onlyLLOQ:\n if not numpy.isnan(featureMetadata['ULOQ'].values[i]):\n toReplaceULOQ = intensityData[:, i] > featureMetadata['ULOQ'].values[i]\n intensityData[toReplaceULOQ, i] = numpy.inf\n\n\n ## Add back the untouched monitored features\n if sum(untouched) != 0:\n featureMetadata = pandas.concat([featureMetadata, untouchedFeatureMetadata], axis=0, sort=False)\n intensityData = numpy.concatenate((intensityData, untouchedIntensityData), axis=1)\n expectedConcentration = pandas.concat([expectedConcentration, untouchedExpectedConcentration], axis=1, sort=False)\n # reorder the calib\n if isinstance(calibration, dict):\n calibration['calibFeatureMetadata'] = pandas.concat([calibration['calibFeatureMetadata'], untouchedCalibFeatureMetadata], axis=0, sort=False)\n calibration['calibIntensityData'] = numpy.concatenate((calibration['calibIntensityData'], untouchedCalibIntensityData), axis=1)\n calibration['calibExpectedConcentration'] = pandas.concat([calibration['calibExpectedConcentration'], untouchedCalibExpectedConcentration], axis=1, sort=False)\n\n # Remove excess info\n featureMetadata.reset_index(drop=True, inplace=True)\n expectedConcentration.reset_index(drop=True, inplace=True)\n if isinstance(calibration, dict):\n calibration['calibFeatureMetadata'].reset_index(drop=True, inplace=True)\n calibration['calibExpectedConcentration'].reset_index(drop=True, inplace=True)\n\n ## return dataset with limits of quantification applied\n self.featureMetadata = featureMetadata\n self._intensityData = intensityData\n self.expectedConcentration = expectedConcentration\n self.calibration = calibration\n self.sampleMetadataExcluded = sampleMetadataExcluded\n self.featureMetadataExcluded = featureMetadataExcluded\n self.intensityDataExcluded = intensityDataExcluded\n self.expectedConcentrationExcluded = expectedConcentrationExcluded\n self.excludedFlag = excludedFlag\n if sum(unusableFeat) != 0:\n # featureMask size will be wrong, requires a reinitialisation\n self.initialiseMasks()\n\n ## Output and Log\n print('Values <LLOQ replaced by -inf')\n if not onlyLLOQ:\n print('Values >ULOQ replaced by +inf')\n if isinstance(calibration, dict):\n print('\\n')\n\n # log the modifications\n if onlyLLOQ:\n logLimits = 'Limits of quantification applied to LLOQ'\n else:\n logLimits = 'Limits of quantification applied to LLOQ and ULOQ'\n if sum(untouched) != 0:\n logUntouchedFeatures = ' ' + str(sum(untouched)) + ' features only monitored and not processed: ' + str(untouchedFeatureMetadata.loc[:, 'Feature Name'].values.tolist()) + '.'\n else:\n logUntouchedFeatures = ''\n self.Attributes['Log'].append([datetime.now(), '%s (%i samples, %i features). LLOQ are replaced by -inf.%s' % (logLimits, self.noSamples, self.noFeatures, logUntouchedFeatures)])", "def inform_all_except(source, message, constraints):\n for c in constraints:\n if c != source:\n c[message]()", "def _transform_gradients(self, g):\n\n x = self._get_params()\n g[self.constrained_positive_indices] = g[self.constrained_positive_indices]*x[self.constrained_positive_indices]\n g[self.constrained_negative_indices] = g[self.constrained_negative_indices]*x[self.constrained_negative_indices]\n [np.put(g,i,g[i]*(x[i]-l)*(h-x[i])/(h-l)) for i,l,h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]\n [np.put(g,i,v) for i,v in [(t[0],np.sum(g[t])) for t in self.tied_indices]]\n if len(self.tied_indices) or len(self.constrained_fixed_indices):\n to_remove = np.hstack((self.constrained_fixed_indices+[t[1:] for t in self.tied_indices]))\n return np.delete(g,to_remove)\n else:\n return g", "def _update_non_learnable_var(old_var: NestedMap, new_var: NestedMap,\n var_params: ParamsT) -> NestedMap:\n if not base_layer.var_not_trainable(var_params):\n assert new_var is None\n return old_var\n elif not in_pmap:\n # No aggregation is needed.\n assert new_var is not None\n return new_var\n elif base_layer.var_requires_mean_sync(var_params):\n assert new_var is not None\n return _synchronize_vars_using_mean(new_var, old_var)\n else:\n raise ValueError('Non-trainable variables must have a cross-replica '\n 'synchronization method specified.')", "def _ensure_unsupported_params_unchanged(optimizer_params, supported_params,\n unsupported_params):\n error_template = (\n \"Optimizer parameter %s is unsupported for TPU embeddings. Please \"\n \"construct a new optimizer for embedding if you wish to use this setting \"\n \"for model training. Note if you are using a dynamic learning rate \"\n \"schedule, the use of a new embedding specific optimizer will not \"\n \"automatically carry over your learning rate schedule. The learning rate \"\n \"will stay the same as the learning rate when the embedding layer was \"\n \"first defined (which is probably not the intended behavior).\")\n\n for attr in [\"clipnorm\", \"clipvalue\"]:\n if getattr(optimizer_params, attr, None) is not None:\n raise ValueError(error_template % attr)\n\n config = optimizer_params.get_config()\n constructor_args = {p: config[p] for p in supported_params}\n reference = optimizer_params.__class__(**constructor_args)\n reference_config = reference.get_config()\n for p in unsupported_params:\n if config[p] != reference_config[p]:\n raise ValueError(error_template % p)", "def lt_inplace(a,b):", "def set_prior(self, prior, warning=True):\n repriorized = self.unset_priors()\n self._add_to_index_operations(self.priors, repriorized, prior, warning)\n\n from .domains import _REAL, _POSITIVE, _NEGATIVE\n if prior.domain is _POSITIVE:\n self.constrain_positive(warning)\n elif prior.domain is _NEGATIVE:\n self.constrain_negative(warning)\n elif prior.domain is _REAL:\n rav_i = self._raveled_index()\n assert all(all(False if c is __fixed__ else c.domain is _REAL for c in con) for con in self.constraints.properties_for(rav_i)), 'Domain of prior and constraint have to match, please unconstrain if you REALLY wish to use this prior'", "def _replacement(\n self,\n pos: np.ndarray,\n neg: np.ndarray,\n rules,\n index: int,\n ratio: float = 2 / 3\n ):\n rest = rules[:index] + rules[index + 1:]\n\n new_pos = _unbound_rule_list(pos, rest)\n\n if len(new_pos) > 2:\n\n pos_grow, pos_prune, neg_grow, neg_prune = _split_instances(new_pos, neg, ratio, self.random_state)\n\n new_rule = self._grow_rule(pos=pos_grow, neg=neg_grow)\n\n pruned_rule = _pruning_optimization(\n pos_prune=pos_prune,\n neg_prune=neg_prune,\n rule=new_rule,\n rules=rules,\n index=index\n )\n return pruned_rule\n else:\n return rules[index]", "def updateParameters(self):\r\n\r\n\t\tif self.approach.altered:\r\n\t\t\tself.transform.enabled = True\r\n\r\n\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\tself.predefined_pattern.enabled = False\r\n\t\t\t\tself.pattern_workspace.enabled = False\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telif self.approach.value == 'Locations in the DEM versus pre-defined pattern':\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_table.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telse: # seek pre-defined pattern in DEM\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_raster_workspace.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = False\r\n\t\t\t\tself.point_vectors.value = ''\r\n\t\t\t\tself.mapping_field.enabled = False\r\n\t\t\t\tself.move_to_max.enabled = False\r\n\t\t\t\tself.move_to_max.value = False\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_table.value = ''\r\n\r\n\t\tif self.mh_iteration.altered:\r\n\r\n\t\t\tif self.mh_iteration.value is True:\r\n\t\t\t\tself.mh_dil_start.enabled = True\r\n\t\t\t\tself.mh_dil_stop.enabled = True\r\n\t\t\t\tself.mh_dil_step.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\r\n\t\t\telse:\r\n\t\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.mh_dil_val.enabled = True\r\n\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\r\n\t\tif self.move_to_max.altered:\r\n\t\t\tif self.move_to_max.value is True:\r\n\t\t\t\tself.move_to_max_distance.enabled = True\r\n\t\t\telse:\r\n\t\t\t\tself.move_to_max_distance.enabled = False\r\n\t\t\t\tself.move_to_max_distance.value = 3\r\n\r\n\t\tif self.transform.altered:\r\n\t\t\tif self.transform.value == 'Work directly on the elevation matrix':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Perform a local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Compute slopes' or self.transform.value == \\\r\n\t\t\t\t\t'Compute slopes and perform local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = True\r\n\r\n\t\tif self.predefined_pattern.altered:\r\n\t\t\tif self.predefined_pattern.value == 'Custom pattern':\r\n\t\t\t\tself.pattern_workspace.enabled = True\r\n\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\telse:\r\n\t\t\t\tself.pattern_workspace.enabled = False", "def composes_inplace_with(self):\n pass", "def _gradient_clipping(grad, param, non_zero, eps=1e-3, threshold=1e-2):\n norm_grad = non_zero(_axis_aware_euclidian_norm(grad))\n norm_param = lax.max(_axis_aware_euclidian_norm(param), eps)\n dynamic_threshold = threshold * (norm_param / norm_grad)\n return jnp.where(dynamic_threshold < 1., grad * dynamic_threshold, grad)" ]
[ "0.6421251", "0.57638377", "0.53960013", "0.5364344", "0.52853006", "0.52772886", "0.52772886", "0.52772886", "0.52637357", "0.52537453", "0.52390754", "0.5176815", "0.51558375", "0.514963", "0.51430506", "0.5140084", "0.5106313", "0.5041951", "0.49778527", "0.4977233", "0.4967542", "0.49671486", "0.49668357", "0.49666795", "0.4944968", "0.4941914", "0.4939274", "0.49331748", "0.49250418", "0.49204156", "0.48857087", "0.48809445", "0.48769072", "0.4836729", "0.48363814", "0.4828534", "0.48284316", "0.48207834", "0.48150554", "0.4802658", "0.4802658", "0.480243", "0.4781665", "0.4778343", "0.47707167", "0.47687444", "0.47605565", "0.47509056", "0.47381482", "0.47356996", "0.47282007", "0.47219726", "0.47163945", "0.4713881", "0.47090217", "0.46998805", "0.46983644", "0.46974206", "0.46915603", "0.46836025", "0.46827716", "0.4678894", "0.46771002", "0.46771002", "0.46686798", "0.46663547", "0.46637323", "0.46611074", "0.46538988", "0.46534917", "0.4652945", "0.46515238", "0.46515238", "0.46494123", "0.46477473", "0.46432433", "0.4628377", "0.46280548", "0.46280074", "0.4618904", "0.46154138", "0.46142775", "0.46096924", "0.4609173", "0.46059915", "0.46006134", "0.45972005", "0.45881745", "0.45825538", "0.45822608", "0.45724708", "0.45679194", "0.4567262", "0.45649135", "0.456369", "0.45598462", "0.45585003", "0.45575774", "0.45552766" ]
0.71344423
1
Helper preventing copy code. Remove given what (transform prior etc) from which param index ops.
def _remove_from_index_operations(self, which, transforms): if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_operator(self, operator):", "def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n keys_to_remove = total_keys - keys_to_keep\n for key in keys_to_remove:\n obj_dct.pop(key)\n return context_action_dict", "def neg_inplace(a):", "def remove_unused_args(args, thnn_args):\n def clean_name(name):\n name = name[:name.index('[')] if '[' in name else name\n if name.endswith('_'):\n name = name[:-1]\n return name\n uses = set([clean_name(arg['name']) for arg in thnn_args])\n uses.add('output_mask')\n args = [arg for arg in args if arg['name'] in uses]\n for arg in args:\n if 'default' in arg:\n del arg['default']\n return args", "def exclude(self, *args, **kwargs):", "def removeInputCopies(self):\n for p in self.assoc.parlist:\n if int(p['group']) == 1:\n _img = p['image'].datafile\n shutil.move(p['orig_filename'],_img)", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def inverse_transform(self, X, copy=...):\n ...", "def removed(self, comp):\n\t\tpass", "def remove(func):", "def composes_inplace_with(self):\n pass", "def fast_inplace_check(inputs):\r\n fgraph = inputs[0].fgraph\r\n protected_inputs = [f.protected for f in fgraph._features if isinstance(f,theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs,[])#flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n\r\n inputs = [i for i in inputs if\r\n not isinstance(i,graph.Constant)\r\n and not fgraph.destroyers(i)\r\n and i not in protected_inputs]\r\n return inputs", "def _tf_remove_noise_op(self):\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()", "def sub_inplace(a, b):", "def _revert(self):\n if self.kwargs.get(\"collect\"):\n remove_exported_collect_data(self.kwargs[\"collect\"])", "def remove_parameters(self):\n self.parameters = []", "def _op_inplace(self, op: str, other: t.Any) -> te.Self:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n if getattr(self.__members__, op)(other) is NotImplemented:\n return NotImplemented\n return self\n return NotImplemented", "def ignore(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def remove_action(self, action_index):\n self.pipeline.drop(action_index, inplace=True)", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def remove_incompatible_operations(pipelines):\n\n def find_duplicates(pipelines):\n for idx in range(len(pipelines)):\n for idx_ in range(idx + 1, len(pipelines)):\n if pipelines[idx] == pipelines[idx_]:\n return idx\n return -1\n\n\n def _remove_illegal_combination(pipelines, combination):\n illegal_pipes = []\n pipelines_ = []\n for idx, pipeline in enumerate(pipelines):\n combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))\n actives = [pipeline[key] != None for key in pipeline if key in combination_]\n\n if sum(actives) > 1:\n illegal_pipes.append(idx) # Store the index of bad combination\n for param in combination_: # Generate substituting legal combinations\n if pipeline[param] != None: # we need to make new pipe\n pipeline_ = pipeline.copy()\n for param_ in combination_: # Set ALL conflicting parameters to None\n pipeline_[param_] = None\n pipeline_[param] = pipeline[param] # Set current parameter back to original value\n pipelines_.append(pipeline_)\n\n new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]\n # new_pipelines.extend(pipelines_)\n return new_pipelines, pipelines_\n\n illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],\n ['SMOOTH', 'SAVGOL']]\n\n for combination in illegal_combinations:\n pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)\n\n pipelines.extend(new_pipes)\n pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}\n pipelines = [json.loads(item) for item in pipelines_set]\n\n\n return pipelines", "def _RemoveFromCloneList(self, clone, attrNamesToClone):\n attrNamesToClone = super(EquationUnit, self)._RemoveFromCloneList(clone, attrNamesToClone)\n \n dontClone = [\"_Funcs\", \"_FuncsDefs\"]\n \n for name in dontClone:\n if name in attrNamesToClone:\n attrNamesToClone.remove(name)\n \n return attrNamesToClone", "def get_other_params(step):\n params = copy.copy(step.get('parameters', {}))\n for to_remove in ['input', 'inputs', 'output', 'outputs', 'src_output', 'tgt_output']:\n if to_remove in params:\n del params[to_remove]\n return params", "def remove_ops(self):\n return self._remove_ops", "def _removeOutOfRangeTransformer(self, working_stats, params):\n\n choices = [int(choice) for choice, subsets in working_stats.iteritems()\n if [value for value in subsets if value > 0]]\n\n min_choice = min(choices)\n max_choice = max(choices)\n\n for choice in working_stats.keys():\n if int(choice) < min_choice or int(choice) > max_choice:\n del working_stats[choice]\n\n return working_stats", "def _prune_parameter_by_idx(self,\n scope,\n params,\n pruned_idx,\n pruned_axis,\n place,\n lazy=False,\n only_graph=False,\n param_shape_backup=None,\n param_backup=None):\n if params[0].name() in self.pruned_list[pruned_axis]:\n return\n for param in params:\n assert isinstance(param, VarWrapper)\n param_t = scope.find_var(param.name()).get_tensor()\n if param_backup is not None and (param.name() not in param_backup):\n param_backup[param.name()] = copy.deepcopy(np.array(param_t))\n pruned_param = self.pruner.prune_tensor(\n np.array(param_t), pruned_idx, pruned_axis, lazy=lazy)\n if not only_graph:\n param_t.set(pruned_param, place)\n ori_shape = param.shape()\n\n if param_shape_backup is not None and (\n param.name() not in param_shape_backup):\n param_shape_backup[param.name()] = copy.deepcopy(param.shape())\n new_shape = list(param.shape())\n new_shape[pruned_axis] = pruned_param.shape[pruned_axis]\n param.set_shape(new_shape)\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^40}|{:^4}|{:^30}|{:^30}|'.format(\n str(param.name()),\n str(pruned_axis), str(ori_shape), str(param.shape())))\n self.pruned_list[pruned_axis].append(param.name())", "def removeAutoSaveFilter(filter):", "def __delitem__(self, i):\n # An element of a policy function can't be deleted", "def or__inplace(a,b):", "def remove_extra_index(actions_structure, type_object):\n for i, action_dict in enumerate(actions_structure):\n for obj_dict in action_dict['context'][type_object]:\n obj_dict.pop('main_index')", "def remove_virtual_inputargs(state, link_state_tuples):\n inputargs = link_state_tuples[0][0].target.inputargs\n\n i = 0\n while i < len(inputargs):\n if inputargs[i] in state:\n del inputargs[i]\n for lnk, _ in link_state_tuples:\n del lnk.args[i]\n else:\n i += 1\n return", "def nop_minifier(arg):\n return arg", "def f_remove(self, *args):\n for arg in args:\n arg = self.f_translate_key(arg)\n if arg in self._data:\n del self._data[arg]\n else:\n raise AttributeError(\n \"Your result `%s` does not contain %s.\" % (self.name_, arg)\n )", "def _removeInsufficientTransformer(self, working_stats, params):\n\n for choice, subsets in working_stats.items():\n sufficient_values = [value for value in subsets if value > 0]\n if not sufficient_values:\n del working_stats[choice]\n\n return working_stats", "def delop(self, mask, target, args):\n config = self.config\n try:\n del config[args['<mask>']]\n except KeyError:\n yield \"Operator not found!\"\n else:\n self.bot.db[self.key] = config\n yield \"Deleted operator.\"", "def le_inplace(a,b):", "def strip_profiles_copy(profiles):\n return strip_profiles(lambda x: x['operation'] == 'copy', profiles)", "def exclude_from_prefixing(self, inp):\n raise NotImplementedError", "def keep_params(self, base_key, *params):\n self.params[base_key + '.' + '|'.join(params)] = keep_params(\n self.params[base_key], *params)", "def _TransformHidden(self, _):\n raise NotImplementedError()", "def cut_params(params, exclude):\n for ex_var, ex_list in exclude.items():\n for ex in ex_list:\n if ex in params[ex_var]:\n print(f'Excluding {ex_var}={ex:.3f} from grid')\n ex_idx = np.searchsorted(params[ex_var], ex)\n params[ex_var] = np.delete(params[ex_var], [ex_idx])", "def prepare(self, is_excluding, lists, kind, fn, values):\n pass", "def remove_discarded(self):\n while self.shrink_target.has_discards:\n discarded = []\n\n for ex in self.shrink_target.examples:\n if ex.discarded and (not discarded or ex.start >= discarded[-1][-1]):\n discarded.append((ex.start, ex.end))\n\n assert discarded\n\n attempt = bytearray(self.shrink_target.buffer)\n for u, v in reversed(discarded):\n del attempt[u:v]\n\n if not self.incorporate_new_buffer(attempt):\n break", "def unusedFromKDOTDataPreparation():", "def _transform_inputs(self) -> None:\n self.inputs = None if self.inputs == {} else self.inputs", "def discard(self, value):\r\n raise NotImplementedError", "def _make_soft_copy_ops(tau, target_vars, online_vars):\n return [_make_soft_copy_op(tau, target_vars[var_name], online_vars[var_name])\n for var_name in target_vars.keys()]", "def _make_soft_copy_ops(tau, target_vars, online_vars):\n return [_make_soft_copy_op(tau, target_vars[var_name], online_vars[var_name])\n for var_name in target_vars.keys()]", "def remove_aliases(self):\n for k, v in iteritems(self.argspec):\n if 'aliases' in v:\n for alias in v['aliases']:\n if alias in self.params:\n self.params.pop(alias)", "def remove():", "def delete_params(self, base_key, *params):\n self.params[\n base_key + '.no_' + '|'.join(params)] = delete_params(\n self.params[base_key], *params)", "def slice(self, evidence={}):\n return self.condition(evidence)\n \n \n\n# def eliminate(self, elimVars, elimOp):\n # TODO: awkward way to define this; convert to more direct implementation?\n for v in elimVars:\n if len(self.markovBlanket(v)) > 2: raise ValueError(\"Cannot eliminate {} with {} (>2) neighbors\".format(v,len(self.markovBlanket(v))))\n flist = self.factorsWith(v)\n gm_model = GraphModel(flist); print(gm_model); gm_model.eliminate([v],elimOp)\n fnew = gm_model.factors[0]\n self.removeFactors(flist); # doesn't quite work? numerical roundoff issues?\n self.L[v,:] = 0; self.L[:,v] = 0; self.h[v] = 0; # TODO: better to mark as removed? how?\n self.addFactors([fnew])\n # TODO: \"remove\" variable by setting states = 0? \"known value\" = 0?", "def removeAutoSaveDeleteFilter(filter):", "def auto_omit():\n\tpass", "def _revert(self):\n if self.kwargs.get(\"collect\"):\n remove_collect_medias(self.kwargs[\"collect\"])", "def _prune(self, idx):\n idx = list(idx)\n neurons = []\n for nold in self.neurons:\n k = nold[1] # number of neurons\n ix1 = [i for i in idx if i < k] # index for current neuron type\n idx = [i-k for i in idx if i >= k]\n func = nold[0]\n number = len(ix1)\n W = nold[2][:, ix1]\n bias = nold[3][ix1]\n neurons.append((func, number, W, bias))\n self.neurons = neurons", "def remove(self, board):\n for c in board.copy():\n while self in c:\n index = tuple(c.inputs.values()).index(self)\n key = tuple(c.inputs.keys())[index]\n c.inputs[key] = None\n # fixes possible memory leak\n self.inputs = {k: None for k, v in self.inputs.items()}", "def prepareToDelete(self):\n pass", "def on_unassign(self):", "def discard(self, param):\n self._data.discard(param)", "def psi_inplace(a):", "def clean(x):\r\n return ensure_sorted_indices(remove0(x))", "def keep_params(self, base_key, *params):\n self.params[base_key + \".\" + \"|\".join(params)] = self.keep_params_s(self.params[base_key], params)", "def _clean_inputs(self, inputs):\n return inputs", "def without(self, *args):\n return self.reject(lambda x: x in args)", "def remove_unuseful(remove_fields: np.ndarray, remove_values: np.ndarray):\n remove_fields = remove_fields[[0, 1, 2, 3, 4, 6]]\n remove_values = remove_values[:, [0, 1, 2, 3, 4, 6]]\n return remove_fields, remove_values", "def difference_update(self, other):\n if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):\n label_ind = flattern_multilabel_index(other, self._label_size)\n for j in label_ind:\n self.discard(j)\n elif isinstance(other, tuple):\n self.discard(other)\n else:\n raise TypeError(\n \"A list or np.ndarray is expected if multiple indexes are \"\n \"contained. Otherwise, a tuple should be provided\")\n return self", "def _remove_copyset(mapping: MutableMapping[T, CopySet['Entity']], key: T, ent: 'Entity') -> None:\n copyset = mapping.get(key, None)\n if copyset is not None:\n copyset.discard(ent)\n if not copyset:\n del mapping[key]", "def copySpecial():\n depNode = nuke.dependencies(nuke.selectedNode())\n dependNode = nuke.dependentNodes(nuke.INPUTS or nuke.HIDDEN_INPUTS or nuke.EXPRESSIONS, [nuke.selectedNode()])\n i = 0\n if dependNode[0].Class() in ['Scene', 'MergeGeo']:\n i = nuke.inputs(dependNode[0])+1\n\n nuke.nodeCopy(nukescripts.cut_paste_file())\n\n for node in nuke.allNodes():\n node['selected'].setValue(0)\n\n nuke.nodePaste(nukescripts.cut_paste_file())\n\n newNode = nuke.selectedNode()\n newNode.setInput(0, depNode[0])\n dependNode[0].setInput(i+1, newNode)", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def test_removing_index(self):", "def gt_inplace(a,b):", "def mod_inplace(a, b):", "def remove_some_extraneous_information(variant):\n for key in ['xpos','xstop','vep_annotations',]: variant.pop(key, None)", "def prune(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def untie_everything(self):\r\n self.tied_indices = []", "def sanitize_clone(self):\n pass", "def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:\n keywords_to_scrub: List[str] = ['extra_arguments', 'kernel_id']\n scrubbed_kwargs = kwargs.copy()\n for kw in keywords_to_scrub:\n scrubbed_kwargs.pop(kw, None)\n\n return scrubbed_kwargs", "def remove(self, src: int, dst: int) -> None:\n if src and dst is None:\n raise ValueError(\n \"tfgraph and dst must not be None \")\n self.run_tf([tf.scatter_nd_add(self.A_tf, [[src, dst]], [-1.0]),\n tf.scatter_nd_add(self.out_degrees_tf, [[src, 0]], [-1.0]),\n tf.scatter_nd_add(self.in_degrees_tf, [[0, dst]], [-1.0])])\n self.m -= 1\n self._notify(np.array([src, dst]), -1)", "def local_inplace_remove0(node):\r\n # If inplace is not enabled, enable it and replace that op with a\r\n # new op which has inplace enabled\r\n if isinstance(node.op, sparse.Remove0) and not node.op.inplace:\r\n new_op = node.op.__class__(inplace=True)\r\n new_node = new_op(*node.inputs)\r\n return [new_node]\r\n return False", "def clear_transforms(self): # -> None:\n ...", "def _transform_args(self) -> None:\n self.args = None if self.args == [] else self.args", "def second_inplace(a):", "def trim_features():\n pass", "def remove_unused_keys(cop):\n delete_these = [\n 'officer_atty',\n 'officer_atty_firm',\n 'case_id',\n 'cop_first_name',\n 'cop_middle_initial',\n 'cop_last_name',\n 'entered_by',\n 'entered_when',\n 'fact_checked_by',\n 'fact_checked_when',\n 'matched_by',\n 'matched_when'\n ]\n\n for key in delete_these:\n del cop[key]\n\n return cop", "def discard_tile(self):\n raise NotImplemented()", "def removeEquates(self, instruction: ghidra.program.model.listing.Instruction, operandIndex: int) -> None:\n ...", "def removeEquate(self, instruction: ghidra.program.model.listing.Instruction, operandIndex: int) -> None:\n ...", "def _remove(self):\n pass", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:\n if isinstance(param, int):\n params = list(sig.parameters.values())\n params.pop(param)\n else:\n params = [p for name, p in sig.parameters.items() if name != param]\n return sig.replace(parameters=params)", "def replace(self, *args, **kwargs): # real signature unknown\r\n pass", "def remove(self):" ]
[ "0.5864179", "0.57993364", "0.5767263", "0.5759645", "0.57012475", "0.5697558", "0.5676129", "0.56178457", "0.559176", "0.55809194", "0.5571276", "0.5533166", "0.5462391", "0.5455604", "0.54388994", "0.54100037", "0.539974", "0.5399218", "0.53672886", "0.53319484", "0.53278744", "0.53207713", "0.5289455", "0.52678233", "0.52660394", "0.5256599", "0.5255282", "0.51993924", "0.5191167", "0.51896566", "0.5189032", "0.5187028", "0.5183714", "0.51722825", "0.5161783", "0.51475406", "0.5142097", "0.51419455", "0.5140921", "0.5120206", "0.5108198", "0.51034266", "0.51016974", "0.5097072", "0.50936866", "0.508502", "0.5078213", "0.5074307", "0.5074307", "0.50717974", "0.5069383", "0.5059219", "0.50522023", "0.5049542", "0.5047758", "0.504682", "0.5019303", "0.5017427", "0.5016358", "0.5002013", "0.49987632", "0.49960414", "0.49904746", "0.4989557", "0.49855834", "0.4984688", "0.49811167", "0.4979188", "0.49787572", "0.49779487", "0.49777704", "0.49777704", "0.49777704", "0.4964254", "0.49595118", "0.49558234", "0.4952272", "0.49511918", "0.49500576", "0.49497437", "0.49449012", "0.49443555", "0.49443275", "0.49403858", "0.4938235", "0.49381977", "0.49291152", "0.49277747", "0.49218047", "0.49203193", "0.49168873", "0.4913716", "0.49117965", "0.49117965", "0.49117965", "0.49117965", "0.49027678", "0.490158", "0.48981068" ]
0.65478945
1
Emit a JSON representation of a given row
def format(self, row): return json.dumps(row.print_fields)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, row: Optional[Any] = None):\n self.fout.write('{}\\n'.format(json.dumps(row, cls=self.encoder)))", "def row_to_json(row: sqlite3.Row) -> str:\n d = {}\n for key in row.keys():\n d[key] = row[key]\n\n return json.dumps(d)", "def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)", "def write_rows(self, rows: Union[List[dict], dict]):\n rows = listify(rows)\n flat = self.get_arg_value(\"json_flat\")\n\n indent = None if flat else 2\n prefix = \" \" * indent if indent else \"\"\n\n for row in rows:\n if self._first_row:\n pre = \"\" if flat else \"\\n\"\n else:\n pre = \"\\n\" if flat else \",\\n\"\n\n self._first_row = False\n self._fd.write(pre)\n\n value = json.dumps(row, indent=indent)\n value = textwrap.indent(value, prefix=prefix) if indent else value\n self._fd.write(value)\n del value, row", "def to_json_line(bq_row):\n row = dict()\n for key in bq_row:\n row[key] = bq_row[key]\n\n # default=str converts non JSON serializable objects to str eg datetime.datetime\n row_json = json.dumps(row, default=str)\n return row_json.encode('utf-8')", "def convert_to_json(self, rows):\n\t\tjson_list = []\n\t\tfor row in rows:\n\t\t\tjson_record = {}\n\t\t\tjson_record[\"movie_id\"] = row[0]\n\t\t\tjson_record[\"title\"] = change_title(row[1])\n\t\t\tjson_record[\"genres\"] = row[2][:5]\n\t\t\tjson_record[\"imdb_id\"] = row[3]\n\t\t\tjson_record[\"tmdb_id\"] = row[4]\n\t\t\tjson_record[\"rating\"] = row[5]\n\t\t\tjson_record[\"number_of_ratings\"] = row[6]\n\t\t\tjson_record[\"weighted_rating\"] = row[7]\n\t\t\tjson_record[\"release_year\"] = row[8]\n\t\t\tjson_record[\"img_path\"] = row[9]\n\t\t\tjson_record[\"description\"] = row[10]\n\t\t\tjson_record[\"director\"] = row[11]\n\t\t\tjson_record[\"length\"] = row[12]\n\t\t\tjson_list.append(json_record)\n\t\treturn json.dumps(json_list, indent = 4)", "def row_list_to_json(rows: List[sqlite3.Row]) -> str:\n l = []\n for row in rows:\n l.append(row_to_json(row))\n\n return json.dumps(l)", "def format_row(self, row):\n raise NotImplementedError()", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def write_row(self, data):\n raise NotImplementedError()", "def toJSON(self, file_path=str) -> None:\n try:\n return(exportJSON([value[0] for value in self.table.items()], file_path))\n except Exception as error:\n print(f\"Error: self.toJSON({file_path}) -> {error}\")", "def kvp_writer_udf(row, fm_config):\n\n # get handler, that includes defaults\n xml2kvp_defaults = XML2kvp(**fm_config)\n\n # convert XML to kvp\n xml2kvp_handler = XML2kvp.xml_to_kvp(\n row.document, return_handler=True, handler=xml2kvp_defaults)\n\n # loop through and convert lists/tuples to multivalue_delim\n for k, v in xml2kvp_handler.kvp_dict.items():\n if type(v) in [list, tuple]:\n xml2kvp_handler.kvp_dict[k] = xml2kvp_handler.multivalue_delim.join(\n v)\n\n # mixin other row attributes to kvp_dict\n xml2kvp_handler.kvp_dict.update({\n 'record_id': row.record_id,\n 'combine_id': row.combine_id\n })\n\n # return JSON line\n return json.dumps(xml2kvp_handler.kvp_dict)", "def gen_json(self, show_headers=True, show_tags=True, use_objects=False):\n is_first = True\n yield \"[\\n\"\n if use_objects:\n for row in self:\n if is_first:\n is_first = False\n yield json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n yield \",\\n\" + json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n for raw in self.gen_raw(show_headers, show_tags):\n if is_first:\n is_first = False\n yield json.dumps(raw)\n else:\n yield \",\\n\" + json.dumps(raw)\n yield \"\\n]\\n\"", "def to_json(self):\n\t\treturn self._dataframe.reset_index().to_json(orient=\"records\")", "def _jsonify(self):\n return self.experiment_record.to_ddb_record()", "def json(self) -> CellJson:\n\n return {\"id\": self.id, \"content\": self.content, \"data\": self.data}", "def format(self, table):\n #return table.data.to_json()\n data = _replace_nans(table.as_array().tolist())\n if hasattr(data, \"strip\") or \\\n (not hasattr(data, \"__getitem__\") and \\\n not hasattr(data, \"__iter__\")):\n # data is not a list/tuple => wrap it\n data = [ data ]\n v = {\n 'offset': table.offset,\n 'data': data,\n 'headers': table.headers,\n 'types': table.types,\n }\n if table.sizes is not None:\n v[\"sizes\"] = table.sizes\n return json.dumps(v, cls=ExtEncoder)", "def write_row(row: dict):\n row = {k: format_float(v) for k, v in row.items()}\n writer.writerow(row)\n csvfile.flush()", "def _serialize_row(self, data):\n if isinstance(data, str):\n return data\n\n if isinstance(data, np.ndarray):\n data = np.ndarray.flatten(data)\n\n if hasattr(data, \"__len__\"):\n if len(data) == 0:\n raise ValueError(\"Cannot serialize empty array\")\n csv_buffer = io.StringIO()\n csv_writer = csv.writer(csv_buffer, delimiter=\",\")\n csv_writer.writerow(data)\n return csv_buffer.getvalue().rstrip(\"\\r\\n\")\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def write(self, row):\n bytes = struct.pack(self.pack_format, *row)\n self.f.write(bytes)", "def encode_record(record):\n return json.dumps(record)", "def to_json(self, orient=\"columns\", double_precision=10,\n force_ascii=True):\n return dumps(self, orient=orient, double_precision=double_precision,\n ensure_ascii=force_ascii)", "def dumps(row):\n return cPickle.dumps(row)", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent = 2, sort_keys = True) + \"\\n\"", "def to_json(self, record: Mapping[str, Any]) -> str:\n return self.json_lib.dumps(record, cls=ObjectEncoder)", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def _set_outputrow(self, v):\n super(Row, self).__setattr__(\"__output__\", v)", "def json(self, update=False):\n return json.dumps(self.export(update=update), indent=4)", "def process_row(self, row: Union[List[dict], dict]) -> List[dict]:\n rows = listify(row)\n rows = self.do_pre_row(rows=rows)\n row_return = [{\"internal_axon_id\": row[\"internal_axon_id\"]} for row in rows]\n rows = self.do_row(rows=rows)\n self.write_rows(rows=rows)\n del rows, row\n return row_return", "def as_json(self):", "def run(self, row, **kwargs):\n self.source = row\n kwargs['output'] = self.__graph__()\n super(CSVRowProcessor, self).run(**kwargs)\n return kwargs['output']", "def write_jason(df):\n\n\t# set Country as index of dataframe\n\tdf = df.set_index('Country')\n\n\t# write datafram to jason file \n\tdf = df.to_json('eda.json', orient='index')", "def write(self, row):\n self.append_rows.append(tuple(row))", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def write_archive_row(self, row: ArchiveRow):\n raise NotImplementedError() # pragma: no cover", "def thrift_to_json(o):\r\n # ident=2 tells python to pretty-print, and put a newline after each comma. Therefore we\r\n # set the comma separator to have no space after it. Otherwise it'll be difficult to embed\r\n # a golden string for comparisons (since our editors strip spaces at the ends of lines).\r\n return json.dumps(o, sort_keys=True, cls=ThriftJSONEncoder, indent=2, separators=(',', ': '))", "def to_python(self):\r\n # Adding rows\r\n items = []\r\n for row in self.rows:\r\n formatted_row = [_format_python_value(v) for v in row]\r\n items.append(dict(zip(self.columns, formatted_row)))\r\n return items", "def to_json(self, orient=\"index\", double_precision=10, force_ascii=True):\n return dumps(self, orient=orient, double_precision=double_precision,\n ensure_ascii=force_ascii)", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def fetch_row_into_str(self, row):\n\n\t\tstr_row = \"\"\n\t\tfor value in row:\t\t\n\t\t\tstr_row = str_row + str(value) + ' | \\t\\t'\n\t\treturn str_row[:-5]", "def format(self, table):\n #return table.data.to_json()\n m = table.as_array()\n rank = len(m.shape)\n is_table = len(table.headers)<=5 or (len(table.headers)>5 and (table.headers[0] != '0' or table.headers[1] != '1' or table.headers[2] != '2' ))\n\n if rank<3 and is_table:\n v = []\n for i in range(len(table.headers)):\n vv = {\n 'offset': table.offset,\n 'header': table.headers[i],\n 'type': table.types[i],\n 'data': _replace_nans(m[:,i].tolist()) if rank>1 else _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n vv[\"size\"] = table.sizes[0]\n v.append(vv)\n else:\n # if hasattr(data, \"strip\") or \\\n # (not hasattr(data, \"__getitem__\") and \\\n # not hasattr(data, \"__iter__\")):\n # # data is not a list/tuple => wrap it\n # data = [ data ]\n v = {\n 'offset': table.offset,\n #'headers': table.headers,\n 'type': table.types[0],\n 'data': _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n v[\"size\"] = table.sizes\n\n return json.dumps(v, cls=ExtEncoder)", "def to_json_string(self) -> None:\n return json.dumps(self.to_dict(), indent=2) + \"\\n\"", "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"", "def emit(self, record):\n full_record_msg_str = str(record.msg)\n rows = [\n (\n record.asctime,\n record.levelname,\n record.pathname,\n record.funcName,\n record.lineno,\n full_record_msg_str,\n ),\n ]\n\n if len(full_record_msg_str) > GOOGLE_SHEETS_MAX_CELL_CHAR_LENGTH:\n rows = []\n # split row into multiple\n for i in range(\n 0, len(full_record_msg_str), GOOGLE_SHEETS_MAX_CELL_CHAR_LENGTH\n ):\n rows.append(\n (\n record.asctime,\n record.levelname,\n record.pathname,\n record.funcName,\n record.lineno,\n full_record_msg_str[i : i + GOOGLE_SHEETS_MAX_CELL_CHAR_LENGTH],\n ),\n )\n\n with self._pending_rows_mutex:\n for row in rows:\n self._pending_rows.append(row)", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def row_to_incident(row):\n incident = {}\n raw = {underscoreToCamelCase(k): convert_to_string_if_datetime(v) for k, v in row.items()}\n incident[\"rawJSON\"] = json.dumps(raw)\n incident_name_field = demisto.params().get(\"incident_name_field\")\n if incident_name_field and incident_name_field in raw:\n incident[\"name\"] = raw[incident_name_field]\n return incident", "def serialize(self, row):\n # (Dict[str, Any]) -> Tuple[str, Dict[str, Any]]\n\n rowid_column = self.id_column.name\n if rowid_column not in row:\n message = 'INSERT/UPDATE requires \"{rowid}\" column. Missing in: {values}'.format(\n rowid=rowid_column, values=row\n )\n log2pg(message, logging.ERROR)\n # The insert or update cannot proceed so the transaction should abort.\n # It can happen that the log2pg method is unimplemented, so this\n # value error will abort the operation.\n #\n # https://multicorn.org/implementing-a-fdw/#error-reporting\n # logging.ERROR:\n # Maps to a PostgreSQL ERROR message. An ERROR message is passed\n # to the client, as well as in the server logs. An ERROR message\n # results in the current transaction being aborted. Think about\n # the consequences when you use it!\n raise ValueError(message)\n\n document_id = row.pop(rowid_column)\n columns_by_name = self.columns_by_name\n data = {\n key: columns_by_name[key].serialize(value)\n for key, value in row.items()\n if key in columns_by_name\n }\n\n return document_id, data", "def represent_row(self, row, prefix=None):\n\n # Custom Row (with the Orgs left-joined)\n organisation_id = row[\"project_activity_organisation.organisation_id\"]\n if organisation_id:\n return self.org_represent(organisation_id)\n else:\n # Fallback to name\n name = row[\"project_activity.name\"]\n if name:\n return s3_str(name)\n else:\n return current.messages[\"NONE\"]", "def represent_row(self, row, prefix=None):\n\n # Custom Row (with the Orgs left-joined)\n organisation_id = row[\"project_activity_organisation.organisation_id\"]\n if organisation_id:\n return self.org_represent(organisation_id)\n else:\n # Fallback to name\n name = row[\"project_activity.name\"]\n if name:\n return s3_str(name)\n else:\n return current.messages[\"NONE\"]", "def json(cls):\n def _json_impl(agg, record):\n if not agg: agg = {}\n for (k, v) in record.items():\n logger.info(\"k: \" + str(k) + \", v: \" + str(v))\n if agg.has_key(k):\n if happy.flow.isIterable(v):\n agg[k].extend(v)\n else:\n agg[k].append(v)\n else:\n if happy.flow.isIterable(v):\n agg[k] = v\n else:\n agg[k] = [v]\n return agg\n return _json_impl", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def __str__(self):\n s = '<Row';\n for column_number, value in enumerate(self.values):\n s += \"\\n \" + str(self.columns[column_number]) + \"=\" + str(value)\n s += \"\\n>\"\n return s", "def _write_json(\n output_path, records\n):\n output_path.write_text(json.dumps(records))", "def response(row):\n return row['response']", "def row(self):\n return self[\"row\"]", "def add_row(self, row):\n ...", "def tojson(self) -> ty.Text:\n return json.dumps(self.todict())", "def print_row():\n print_line()", "def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'organization': self.organization.name,\n 'name': self.name,\n }", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def append_row(self):\r\n values = []\r\n vals_to_insert = ''\r\n\r\n for key in Output.COLUMNS:\r\n values.append(str(self[key]))\r\n\r\n # Replace any Quotes in parsed record with double quotes\r\n for i in values:\r\n vals_to_insert += i.replace('\"', '\"\"') + '\",\"'\r\n\r\n vals_to_insert = '\"' + vals_to_insert[:-3] + '\"'\r\n insert_sqlite_db(vals_to_insert)", "def json_out(self, data):\n\t\treturn json.dumps(data)", "def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'organization': self.organization.name,\n 'name': self.name,\n 'key': self.key,\n 'timeAdded': datetime_to_str(self.time_added),\n }", "def conform_output_data(rowdict,fields_to_show=''):\n rowdict['TimeStamp'] = str(rowdict['TimeStamp'])\n if fields_to_show:\n rowdict= removed_fields(fields_to_show, rowdict)\n return rowdict", "def to_json(self):\n pass", "def converttojson(edge_df):\n\tedge_df_str = edge_df.copy()\n\tfor idx, col in enumerate(edge_df.columns):\n\t\tfirst_row_element = edge_df.iloc[0, idx]\n\t\tif isinstance(first_row_element, list) or isinstance(first_row_element, dict):\n\t\t\tedge_df_str[col] = edge_df[col].apply(json.dumps)\n\t\t\tprint('Field \"{}\" of class {} converted to json string'.format(col, type(first_row_element)))\n\t\t#else:\n\t\t#\tprint(col,type(edge_df[col][0]))\n\treturn edge_df_str", "def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")", "def users_json(self, rows=None, sidx=None, _search=None, searchField=None,\n searchOper=None, searchString=None, page=None, sord=None, nd=None): # 1 line # 2 lines\n t1 = time.clock()\n header = [\"value\", \"flags\", \"source\", \"evidence_type\", \"creation_time\", \"time\", \"useby\", \"owner\", \"comment\"] # 3 lines\n reslist = []\n genshi_tmpl = LoadGenshiTemplate(cherrypy.session.get('cur_session'), cherrypy.session.get('username'))\n cur_component = cherrypy.session.get('cur_component')\n cur_context = cherrypy.session.get('cur_context') \n if cur_component != 'None':\n #print \"getting new\"\n context = cur_context.split()\n um = cherrypy.session.get('um')\n reslist = um.get_evidence_new(context, cur_component)\n cherrypy.session['cur_component'] = 'None'\n else:\n #print \"getting default\"\n cherrypy.session['cur_component'] = 'firstname'\n reslist = um.get_evidence_new()\n\n #users_list = test_data_to_list(test_data) # 4 lines\n evdlist = []\n i = 0\n #{'comment': None, 'evidence_type': 'explicit', 'creation_time': 1322914468.889158, 'value': 'Bob',\n #'source': 'Jane', 'flags': [], 'time': None, 'owner': 'Jane', 'objectType': 'Evidence', 'useby': None}\n myEvd = []\n\n if type(reslist) is ListType:\n for res in reslist:\n print \"Inside user_json \"\n myEvd = [0]*10\n myEvd[0] = i\n for key, value in res.__dict__.items():\n #print \"%s:%s\"%(key, value)\n for item in header:\n if item == key:\n #print \"key: %s %s--\"%(item,key)\n if key == 'creation_time' or key == 'time' or key == 'useby':\n if value:\n import datetime\n value = datetime.datetime.fromtimestamp(int(value)).strftime('%d/%m/%Y %H:%M:%S')\n elif key == 'flags':\n if value:\n value = ''.join(value)\n else:\n value=\"None\"\n __index = header.index(item)\n #print \"%s in %d\" %(value,__index+1)\n myEvd[__index+1]=value\n evdlist.append(myEvd)\n i = i+1\n #print \"Evidence: %d\" %i\n #for val in myEvd:\n # print val\n\n import my_jqGrid\n result_page = my_jqGrid.jqgrid_json(self, evdlist, header, rows=rows, sidx=sidx, _search=_search,\n searchField=searchField, searchOper=searchOper, searchString=searchString, page=page, sord=sord)\n\n t2 = time.clock()\n print 'user-json took %0.3fms' % ((t2-t1)*1000.0)\n write_log('notice','Show evidence list operation successful')\n\n return result_page\n\n else:\n #print reslist\n e = reslist\n write_log('error','Show evidence list Operation Failed; Error:'+str(e))\n modeltree = cherrypy.session.get('modeltree')\n return genshi_tmpl.greeting_template(e, \"Evidencelist upload\", modeltree)", "def Result(row, schema):\r\n return dict(zip(schema.fields(), row))", "def rosterRowData(self):", "def json_formatter(components):\n columns = cfg['columns']\n\n newList = [] # New list of only dictionaries with column attributes to marshall\n\n for component in components:\n newComp = {}\n\n for column in columns:\n try:\n newComp[column] = component[column]\n except:\n newComp[column] = cfg['emptyValue']\n\n newList.append(newComp)\n\n result = json.dumps(newList)\n\n # Save the json file\n save_path = args.output_file\n try:\n with open(save_path, \"w\") as file:\n file.write(result)\n\n Logger.Debug(\"Output saved to\", save_path)\n\n return save_path\n\n except:\n Logger.Error(\"Could not save output to\", save_path)", "def to_json(self):\n return json.dumps({\n \"header\": self.header,\n \"transactions\": self._transactions\n })", "def json_temp_ges(df):\n\n json_str = (\n df.groupby(\n [\n \"sensor_id\",\n \"measure_name\",\n \"run_id\",\n \"ventilation_rate\",\n \"num_dehumidifiers\",\n \"lighting_shift\",\n \"scenario_type\",\n ],\n as_index=True,\n )\n .apply(\n lambda x: x[\n [\n \"prediction_value\",\n \"prediction_index\",\n \"run_id\",\n \"time\",\n \"timestamp\",\n ]\n ].to_dict(orient=\"records\")\n )\n .reset_index()\n .rename(columns={0: \"Values\"})\n .to_json(orient=\"records\")\n )\n return json_str", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\\n'", "def writerow(self, row):\n data = []\n basestring_type = six.string_types[0]\n for value in row:\n if not isinstance(value, basestring_type):\n value = '%s' % (value,)\n data.append(value.encode('utf-8'))\n self.writer.writerow(data)\n data = self.queue.getvalue()\n data = data.decode('utf-8')\n self.stream.write(data)\n self.queue.truncate(0)", "def represent_row(self, row):\n\n # Custom Row (with the Orgs left-joined)\n organisation_id = row[\"project_activity_organisation.organisation_id\"]\n if organisation_id:\n return self.org_represent(organisation_id)\n else:\n # Fallback to name\n name = row[\"project_activity.name\"]\n if name:\n return s3_str(name)\n else:\n return current.messages[\"NONE\"]", "def output_json(data, code, headers=None):\n #data[\"timestamp\"] = datetime.now()\n return jsonify(data)", "def row_to_obj(self, row, cur):\n obj = tornado.util.ObjectDict()\n for val, desc in zip(row, cur.description):\n obj[desc.name] = val\n return obj", "def display_raw_data(df):\n raw_data_lenght=df.shape[0]\n #loop through from 0 to number of rows in steps of 5\n for i in range(0,raw_data_lenght,5):\n response=input('\\n Do you want examin a perticular user data? Type \\'yes \\'or \\'no \\'\\n')\n if response.lower()!='yes':\n break\n \n data=df.iloc[i: i+5].to_json(orient='records',lines=True).split('\\n')\n for row in data:\n passed=json.loads(row)\n j_row=json.dumps(passed,indent=3)\n print(j_row)", "def write_row(ws,rowx,row_array):\n for colx, value in enumerate(row_array):\n try:\n float(value)\n type_to_write=\"Float\"\n except ValueError:\n type_to_write=\"String\"\n if type_to_write==\"Float\":\n ws.write(rowx, colx, float(value))\n elif type_to_write==\"String\":\n ws.write(rowx, colx, value)", "def to_json_string(self):\n \n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"" ]
[ "0.745525", "0.7241681", "0.72320414", "0.6897235", "0.68846", "0.6736316", "0.65954936", "0.65526325", "0.6160118", "0.61541754", "0.6141491", "0.61156374", "0.6115557", "0.6070374", "0.60693794", "0.6057097", "0.5938166", "0.5934188", "0.58886075", "0.58313084", "0.5826782", "0.5772088", "0.5771293", "0.5747949", "0.57186973", "0.5710404", "0.57039976", "0.57014555", "0.56853765", "0.5676864", "0.5666237", "0.5660431", "0.56513274", "0.56433225", "0.5636727", "0.5609868", "0.56066674", "0.5593856", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55901825", "0.55893314", "0.5580297", "0.5577089", "0.5576821", "0.55767024", "0.55549735", "0.55548483", "0.5554524", "0.55468", "0.55468", "0.5524163", "0.5520337", "0.55202997", "0.5516048", "0.55084395", "0.5507321", "0.54977894", "0.5495149", "0.54832435", "0.54755825", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.5471413", "0.54574865", "0.5451491", "0.5441355", "0.54331595", "0.54326624", "0.5432331", "0.5427357", "0.5426857", "0.54239863", "0.542317", "0.5422581", "0.5422504", "0.54202354", "0.5416143", "0.5415794", "0.5410793", "0.5408864", "0.54060537", "0.54037035", "0.5399587", "0.5388039" ]
0.7718322
0
return a header for the row
def header(self, fields): return fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _horizontal_header(self):\n return self.header()", "def _horizontal_header(self):\n return self.header()", "def __get_header_tags(self):\n tag = \"<th>{}</th>\"\n\n return (tag * len(self.__rows)).format(*self.__rows)", "def _generateRowHeader(self, obj, **args):\n result = []\n header = self._script.utilities.rowHeaderForCell(obj)\n if not header:\n return result\n\n text = self._script.utilities.displayedText(header)\n if not text:\n return result\n\n roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_ROW_HEADER)\n if args.get('mode') == 'speech':\n if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \\\n and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:\n text = \"%s %s\" % (text, roleString)\n elif args.get('mode') == 'braille':\n text = \"%s %s\" % (text, roleString)\n\n result.append(text)\n return result", "def header(self):\n\n data = {}\n data['latitude'] = self.latitude()\n data['latitude_unc'] = self.latitude_unc()\n data['longitude'] = self.longitude()\n data['longitude_unc'] = self.longitude_unc()\n data['uid'] = self.uid()\n data['n_levels'] = self.n_levels()\n data['year'] = self.year()\n data['month'] = self.month()\n data['day'] = self.day()\n data['time'] = self.time()\n data['cruise'] = self.cruise()\n data['probe_type'] = self.probe_type()\n \n header = pd.Series(data)\n\n return header", "def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"", "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def _horizontal_header(self):\n return self.horizontalHeader()", "def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out", "def _generateColumnHeader(self, obj, **args):\n result = []\n header = self._script.utilities.columnHeaderForCell(obj)\n if not header:\n return result\n\n text = self._script.utilities.displayedText(header)\n if not text:\n return result\n\n roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_COLUMN_HEADER)\n if args.get('mode') == 'speech':\n if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \\\n and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:\n text = \"%s %s\" % (text, roleString)\n elif args.get('mode') == 'braille':\n text = \"%s %s\" % (text, roleString)\n\n result.append(text)\n return result", "def generate_header_row(self):\n weekday_abbrev = (\"M\",\"T\",\"W\",\"T\",\"F\",\"S\",\"S\")\n weeknames = self._create_week_dates_text()\n self.add_element('<thead class=\"fixedHeader\" style=\"width: 986px;\">',True,0)\n self.add_element('<tr id=\"datesRow\">', True, 2)\n self.add_element('<th class=\"attTblHeaderName attTblHeaderDateFill\" id=\"id_tablehdr_fill\" ></th>', True, 4)\n th = '<th class=\"attTblHeaderDate\" id=\"id_tablehdr_firstweek\" colspan=\"7\">%s</th>' %weeknames[0]\n self.add_element(th, True, 4)\n th = '<th class=\"attTblHeaderDate\" id=\"id_tablehdr_firstweek\" colspan=\"7\">%s</th>' %weeknames[1]\n self.add_element(th, True, 4)\n self.add_element('</tr>' , True, 2)\n self.add_element('<tr id=\"daysRow\">', True, 2)\n th_name = \\\n '<th class=\"attTblHeaderBase attTblHeaderName ui-widget-header\" name=\"headerTdName\" id=\"id_table_header\">Name</td>'\n self.add_element(th_name, True, 4)\n for column_index in xrange(0, self.total_days_count):\n day = self.start_date + timedelta(column_index)\n day_text = weekday_abbrev[day.weekday()]\n week = self._compute_week_index(column_index)\n date_ordinal = day.toordinal()\n if (not SchoolDB.models.StudentAttendanceRecord.is_valid(\n self.dayperiod_type[column_index* 2])):\n title_text = day.strftime(\"%a, %b %d %Y\") + \\\n \" Future day so it cannot be set.\"\n th_type = \"headerTdNed ui-state-disabled\"\n elif (not SchoolDB.models.StudentAttendanceRecord.is_schoolday(\n self.dayperiod_type[column_index * 2])):\n title_text = day.strftime(\"%a, %b %d %Y\") + \\\n \" \" + self.day_description[column_index] + \\\n \" so it cannot be set.\"\n th_type = \"headerTdNsd ui-state-disabled\"\n else:\n title_text = day.strftime(\"%a, %b %d %Y\") + \\\n \" \" + self.day_description[column_index]\n th_type = \"headerTdSd\"\n #fix for last to clean up border\n if (column_index == self.total_days_count - 1):\n modifier = \"attTblHeaderRight\"\n else:\n modifier = \"\"\n th_text = \\\n '<th id=\"attDay-%s\" class=\"attTblHeaderBase attSelectable ui-widget-header %s %s\" title=\"%s\" >%s</th>' \\\n %(column_index, th_type, modifier, title_text, day_text)\n self.add_element(th_text, True, 4)\n #th_text = '<th class=\"attTblHeaderBase attTblHeaderFiller\" name=\"headerTdFiller\" id=\"headerTdFiller\" colspan=\"2\"></th>'\n #self.add_element(th_text, True, 4)\n self.add_element('</tr>', True, 2, True)\n self.add_element('</thead>', True, 0)\n self.add_element('</table>')\n return self.html_table", "def header(self) -> List:\n return self.rows[0]", "def table_header(self):\n title = 'HYPERPARAMETER FINE-TUNING RESULTS'\n title_len = len(title)\n extra_spaces = self.max_length - title_len\n left_spaces = extra_spaces // 2\n right_spaces = extra_spaces - left_spaces - 1\n\n return '| ' + (left_spaces * ' ') + title + (right_spaces * ' ') + ' |\\n'", "def get_header(\n self,\n source: TestCaseReport,\n depth: int,\n row_idx: int,\n ) -> RowData:\n row_data = RowData(\n start=row_idx,\n content=const.EMPTY_ROW,\n style=RowStyle(line_below=(1, colors.black)),\n )\n\n row_data += super(MultiTestRowBuilder, self).get_header(\n source, depth, row_data.end\n )\n\n summary = \", \".join(\n [\n \"{} {}\".format(count, status)\n for count, status in source.counter.items()\n if status != \"total\"\n ]\n )\n\n if \"run\" in source.timer:\n summary += \", total run time: {}.\".format(\n format_duration(source.timer[\"run\"].elapsed)\n )\n\n row_data.append(\n content=[summary, \"\", \"\", \"\"],\n style=[\n RowStyle(\n font=(const.FONT, const.FONT_SIZE_SMALL),\n left_padding=const.INDENT * depth,\n end_column=0,\n ),\n RowStyle(bottom_padding=0, top_padding=0, valign=\"TOP\"),\n ],\n )\n\n return row_data", "def format_report_header(self):", "def header(self):\r\n # favour Column.header\r\n column_header = self.column.header\r\n if column_header:\r\n return column_header\r\n # fall back to automatic best guess\r\n return self.verbose_name", "def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header", "def design_report_header(self):\n rstr = nl() + \" \" + nl() + t('table border-collapse= \"collapse\" border=\"1px solid black\" width=100%') + nl()\n rstr += t('tr') + nl()\n row = [0, '<object type= \"image/PNG\" data= \"cmpylogoSeatAngle.png\" height=60 ></object>',\n '<font face=\"Helvetica, Arial, Sans Serif\" size=\"3\">Created with</font>' \"&nbsp\" \"&nbsp\" \"&nbsp\" \"&nbsp\" \"&nbsp\" '<object type= \"image/PNG\" data= \"Osdag_header.png\" height=60 ''&nbsp\" \"&nbsp\" \"&nbsp\" \"&nbsp\"></object>']\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Company Name\", \"detail\", text_two=self.company_name, is_row=False)\n rstr += design_summary_row(0, \"Project Title\", \"detail\", text_two=self.project_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Group/Team Name\", \"detail\", text_two=self.group_team_name, is_row=False)\n rstr += design_summary_row(0, \"Subtitle\", \"detail\", text_two=self.sub_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Designer\", \"detail\", text_two=self.designer, is_row=False)\n rstr += design_summary_row(0, \"Job Number\", \"detail\", text_two=self.job_number, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Date\", \"detail\", text_two=time.strftime(\"%d /%m /%Y\"), is_row=False)\n rstr += design_summary_row(0, \"Client\", \"detail\", text_two=self.client, is_row=False)\n rstr += t('/tr')\n rstr += t('/table') + nl() + \" \" + nl()\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n return rstr", "def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]", "def buildStatsTableHeader(self, table):\n heading = table.thead.tr\n heading.th('No')\n heading.th('Begin probe')\n heading.th('End probe')\n heading.th('Min')\n heading.th('Max')\n heading.th('Median')\n heading.th('Mean')\n heading.th('{}%'.format(self.percentile1))\n heading.th('{}%'.format(self.percentile2))\n heading.th('Standard Deviation')", "def header(self):\n\n return [c.name for c in self.columns]", "def header(self):\r\n raise NotImplementedError", "def get_header(col_current, col_shift):\n header = col_current\n for i in range(col_shift):\n header = header.right\n return header", "def headerData(self, section, orientation, role=Qt.DisplayRole):\n if(orientation == Qt.Horizontal and role == Qt.DisplayRole):\n if(section == Columns.Date):\n return \"#\"\n elif(section == Columns.Code):\n return \"Code\"\n elif(section == Columns.User):\n return \"User\"\n elif(section == Columns.Tags):\n return \"Privileges\"\n elif(section == Columns.TimesRequested):\n return \"Times requested\"", "def tsv_header(self):\n return self.tsv_lines[0]", "def second_header():\n return \"\"\"\n<th>\n<th>start\n<th>start\n<th>end\n<th>(secs)\n<th>time\n<th>frames\n<th>\n<th>bin\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>width\n<th>angle\n<th>\n<th>\n<th>\n<th>\n\"\"\"", "def row_headers(self) -> Sequence[str]:\n return self._row_headers", "def headerData(self, i, orientation, role=myqt.Qt.ItemDataRole.DisplayRole):\n if (\n orientation == myqt.Qt.Orientation.Horizontal\n and role == myqt.Qt.ItemDataRole.DisplayRole\n ):\n return self.column[i]\n return None", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def headerData(self, column, orientation, role):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return QtCore.QVariant(self.header[column])\n return QtCore.QVariant()", "def header(self):\n ...", "def headerData(self, section, orientation:int, role:int=QtCore.Qt.DisplayRole):\n# if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n# return 'Parameter Set'\n\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n\n nodetype=inspect(type(self.__checknode)).relationships['restrictions'].mapper.class_\n column=nodetype.__qtmap__[section]\n return column.info.get('qt_label')", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def header(self):\n return self[0]", "def headerData(self, sidx, orientation, role):\n res = None\n if orientation == qtc.Qt.Vertical and role == qtc.Qt.DecorationRole and sidx == self._sel.give_conds_nb():\n res = qtc.QVariant(GC.load_icon(\"wizards/add.png\"))\n elif role == qtc.Qt.DisplayRole:\n res = QNULL\n if orientation == qtc.Qt.Horizontal:\n res = self._headers[sidx]\n elif role == qtc.Qt.DecorationRole and orientation == qtc.Qt.Vertical:\n res = qtc.QVariant(GC.load_icon(\"wizards/remove.png\"))\n if res is None:\n res = AbsTableModel.headerData(self, sidx, orientation, role)\n return res", "def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header", "def headerData(self, sidx, orientation, role):\n res = None\n if role == qtc.Qt.DisplayRole:\n res = QNULL\n if orientation == qtc.Qt.Horizontal:\n res = self._headers[sidx]\n elif role == qtc.Qt.DecorationRole and orientation == qtc.Qt.Vertical:\n res = qtc.QVariant(GC.load_icon(\"wizards/remove.png\"))\n if res is None:\n res = AbsTableModel.headerData(self, sidx, orientation, role)\n return res", "def columnTitles(self):\n \n pass", "def columnTitles(self):\n \n pass", "def add_header(worksheet, row, header_format):\n worksheet.merge_range(row, 0, row+1, 0, 'โ„–', cell_format=header_format)\n worksheet.merge_range(row, 1, row+1, 1, 'ะคะฐะผะธะปะธั ะธะผั ะพั‚ั‡ะตัั‚ะฒะพ', cell_format=header_format)\n worksheet.merge_range(row, 2, row+1, 2, 'ะ”ะพะปะถะฝะพัั‚ัŒ', cell_format=header_format)\n worksheet.merge_range(row, 3, row, 4, 'ะขะตะปะตั„ะพะฝั‹', cell_format=header_format)\n worksheet.write(row+1, 3, 'ะกะปัƒะถะตะฑะฝั‹ะน', header_format)\n worksheet.write(row+1, 4, 'ะœะพะฑะธะปัŒะฝั‹ะน', header_format)\n return row+2", "def add_header(self, *column_headers):\n header = \"<tr>\"\n header += \" \".join(f\"<th>{header}</th> \" for header in column_headers)\n header += \"</tr>\\n\"\n self.result += header", "def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):\n # If we're reading just a single cell in speech, the new\n # header portion is going to give us this information.\n #\n if args['mode'] == 'speech' and not args.get('readingRow', False):\n return []\n\n result = []\n descendant = self._script.utilities.realActiveDescendant(obj)\n label = self._script.utilities.displayedText(descendant)\n if not label and self._script.utilities.hasMeaningfulToggleAction(obj):\n accHeader = self._script.utilities.columnHeaderForCell(obj)\n result.append(accHeader.name)\n return result", "def table(self, header, body):\n return header + body", "def CSVHeader(self):\n \t\n return ','.join('\"{}\"'.format(Statistics.attrs[i][1]) \n for i in sorted(Statistics.attrs.keys()))", "def header(self, text, level, raw=None):\n return [[MdStyleInstructionCell('h{}'.format(level))] + text]", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n \n fileobj.write(csv_line( ['Notes'] + [x.name for x in self.angles] + ['Wait For/n', 'Value'] ) )", "def get_headers(df):\n return df.columns.values", "def get_csv_header(verbose=False):\n if verbose:\n return \"Time,Raw Time,Name,ID,Value\\n\"\n else:\n return \"Time,Name,Value\\n\"", "def headers(self):\n return [column.header if column else '' for column in self.columns]", "def md_header(tabular_data: Union[pd.DataFrame, object],\n headers: tuple = None,\n showindex: Union[bool, None] = False,\n formats: Union[dict, str, Iterable[str]] = None,\n **kwargs) -> str:\n return md_table(tabular_data, headers=headers, showindex=showindex,\n formats=formats, return_headers_only=True, **kwargs)", "def _get_header(self, headline, column_widths):\n header = []\n header_underline = []\n header_widths = map(len, headline)\n\n for width, header_width in zip(column_widths, header_widths):\n width = max(header_width, width)\n\n item = '-' * width\n header_underline.append(item)\n\n header.append(headline)\n header.append(header_underline)\n\n return header", "def _format_header(self):\n return self._format_dict(self.header)", "def headerData(self, section, orientation, role = Qt.DisplayRole):\n\n if role != Qt.DisplayRole:\n return QVariant()\n\n if orientation == Qt.Vertical and role == Qt.DisplayRole:\n value = section + 1\n return QVariant(str(value))\n elif orientation == Qt.Horizontal and role == Qt.DisplayRole:\n value = ef.col_num_to_string(int(section)+1)\n return QVariant(str(value))\n else:\n return QVariant('')", "def to_header(self):\n\n return self._header_block", "def get_export_header(self):\n\n name = self.get_name()\n\n if (self.name == \"input::nodes\"):\n\n name = \"user-specified\"\n\n grp_string = self.get_grp_string()\n\n if grp_string != \"\":\n\n grp_string = \" \" + grp_string\n\n return \"\\n!*!Label \" + self.path[1] + \" ..\" + grp_string + \" .. \" + name + \"\\n\"", "def get_headerRow(self, sheetname, row):\n\n sheet = self.wb.sheet_by_name(sheetname)\n headers = []\n\n for i in range(0, sheet.ncols):\n value = str(sheet.cell(row, i).value)\n\n if value:\n headers.append(value.lower())\n\n return headers", "def make_table_header(*headers):\n return [[Cell(h, bold=True) for h in headers]]", "def header( self ):\n\t\treturn '; '.join( [ '='.join(i) for i in self.items() ] )", "def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines", "def format_header(line):\n cols = []\n for n, item in enumerate(line.replace('#', '').strip().split('\\t')):\n if item == 'id/name':\n cols.append('name')\n elif item == 'brass_score':\n cols.append('score')\n elif item.startswith('strand') and n > 9:\n if item == 'strand1':\n cols.append('transcript1_strand')\n elif item == 'strand2':\n cols.append('transcript2_strand')\n else:\n raise ValueError(\"Unknown strand value column {0} - {1}\".format(n, item))\n else:\n cols.append(item.lower().replace(' ', '_').replace('/', '_').replace('-', '_'))\n return cols", "def add_heading_row(self, heading):\n # A bit of space if we're not the very first row\n if self.cur_row != 0:\n self.grid.addWidget(QtWidgets.QLabel(''), self.cur_row, 0, 1, 2)\n self.cur_row += 1\n\n # Now the actual header\n label = QtWidgets.QLabel('<b>{}</b>'.format(heading))\n self.grid.addWidget(label, self.cur_row, 0, 1, 2, QtCore.Qt.AlignCenter)\n self.cur_row += 1\n return label", "def get_header():\n title = \"\"\"\n ___ __\n | o _|_ _|_ _ ._ (_ _ ._ _|_ o ._ _ _ ._ _|_ /\\ ._ _. | _ o _\n | \\/\\/ | |_ |_ (/_ | __) (/_ | | |_ | | | | (/_ | | |_ /--\\ | | (_| | \\/ _> | _>\n /\"\"\"\n\n sub_title = \"Get sentiments from your tweets fast and easy!\"\n header = bcolors.HEADER + title + bcolors.ENDC + \"\\n\" + bcolors.WARNING + \"\\t\\t\" + sub_title + bcolors.ENDC + \"\\n\"\n return header", "def to_header(self):\n if not self.filled:\n return ''\n\n return \"\\n\".join(self.data)", "def formatHeaderNames(self):\n listaNomiGiorniSettimana = ['Lun',\n 'Mar',\n 'Mer',\n 'Gio',\n 'Ven',\n 'Sab',\n 'Dom']\n\n for colonna, giorno in enumerate(listaNomiGiorniSettimana):\n item = QTableWidgetItem()\n item.setText(giorno)\n if colonna > 4:\n brush = QBrush(Qt.red)\n item.setForeground(brush)\n self.table.setHorizontalHeaderItem(colonna, item)\n\n # self.table.setHorizontalHeaderLabels(listaGiorniSettimana)", "def open_thead(self) -> str:\n self.html_table = self.html_table + \"\"\"<thead>\\n\"\"\"\n return self.html_table", "def table_header(columns=None, url_query=None, sort=None, fugue_icons=False,\n sort_variable_name='sort'):\n new_columns = []\n for column in columns:\n if isinstance(column.show_conditions, tuple):\n func, arg = column.show_conditions\n if func(arg):\n new_columns.append(column)\n else:\n new_columns.append(column)\n return {\n 'columns': new_columns,\n 'sort': sort,\n 'url_query': url_query,\n 'fugue_icons': fugue_icons,\n 'sort_variable_name': sort_variable_name,\n }", "def headerData(self, section, orientation, role):\n headers = [\"Constituancy\", \"Lab\", \"Con\", \"LD\"]\n\n if role == qc.Qt.DisplayRole and orientation == qc.Qt.Horizontal:\n return qc.QVariant(headers[section])\n\n return qc.QVariant()", "def header(self, **args):\n return self.pageConfig['header'] % self.pageConfig", "def get_header():\n str_list = ['specimennumber','speciesid','group','family','genus','species','scientificname', \\\n 'commonname','country','state','county','locality','latitude','longitude', \\\n 'source','accuracy','drainagename','centroidtype','huc8name','huc8', \\\n 'huc10name','huc10','huc12name','huc12','date','year','month','day','status','comments', \\\n 'recordtype','disposal','museumcatnumber','freshmarineintro','references']\n return str_list", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n fileobj.write(csv_line( ['Comment'] + [x.name.lower() for x in self.angles] + ['Wait For', 'Value'] ) )", "def test_header_row(self):\n header_row = self.view_class().header_row\n if not header_row:\n return\n\n response = self.view_class().get()\n # Some formatting needs to be done so that the header row\n # is compliant with the CSV dialect - all fields need\n # to be quoted.\n quoted_header_row = '\"{}\"'.format('\",\"'.join(header_row))\n self.assertContains(response, quoted_header_row)", "def header(self):\r\n return self.__header", "def get_header_table(self , dt, ds = '' , all_ds = '', length = ''):\n index_low = self.unique_dates[ds]['indices'][dt]['low']\n #index_up = self.unique_dates[best_ds]['indices'][dt]['up'] \n hd = self.data[ds]['header_table'][index_low:index_low+length] \n hd['duplicates'] = all_ds \n \n return hd", "def render_table_start(header, title):\n num_columns = len(header)\n entries = ['\\color[HTML]{FFFFFF}' + '{}'.format(clean_latex(x)) for x in header]\n if num_columns > 1: \n line = '} & {'.join(entries)\n else:\n line = entries[0]\n columns_format = '{|' + '|'.join(['l']*num_columns) + '|}'\n print('\\\\begin{tabular}' + '{}'.format(columns_format) + '\\n'\n ' ' + '\\\\hline\\n' +\n ' ' + '\\\\multicolumn{' + str(num_columns) + '}' +\n '{|c|}' + '{' + title + '}' + ' \\\\\\\\\\n' +\n ' ' + '\\\\rowcolor[HTML]{333333}\\n' +\n ' ' + '{' + line + '}' + ' \\\\\\\\' \n )", "def __str__(self):\n if self.row_count > 0:\n texttable = Texttable(200)\n texttable.add_rows(self.rows)\n texttable.set_deco(Texttable.HEADER)\n return texttable.draw()\n else:\n return '<empty table>'", "def generate_header(gene, variant):\n return '>{}_{}'.format(gene, variant)", "def print_row(content, isHeader= False):\n\trow_str =\"\"\n\trow_str =\"<tr>\"\n\tfor index in range(len(content)):\n\t\tif isHeader:\n\t\t\trow_str +=\"<th>\"\n\t\telse:\n\t\t\trow_str +=\"<td>\"\n\t\trow_str += str(content[index])\n\t\tif isHeader:\n\t\t\trow_str +=\"</th>\"\n\t\telse:\n\t\t\trow_str +=\"</td>\"\n\trow_str += \"</tr>\"\n\trow_str += NEW_LINE_STR\n\treturn row_str", "def headerData(self, section, orientation, role):\n headers = [\"Lab\", \"Con\", \"LD\"]\n\n if role == qc.Qt.DisplayRole:\n if orientation == qc.Qt.Vertical:\n return qc.QVariant(headers[section])\n\n return qc.QVariant(\"Vote (%)\")\n\n return qc.QVariant()", "def buildheader(self):\n \n lines = {}\n for k in self._d:\n lines[self._d[k]]='# %d %s'%(self._d[k],k.upper())\n #sort the new keys\n nkeys= lines.keys()\n nkeys.sort()\n #join them together with newlines\n ans = ''\n for k in nkeys:\n ans=ans+\"%s\\n\"%lines[k]\n return ans", "def getTableHeader(self, filename):\n hdr = \"\"\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \">\":\n hdr += line\n else:\n return hdr", "def get_header(sheet: xlrd.sheet.Sheet,\n datemode: int = DEFAULT_DATEMODE) -> Tuple[str]:\n values = Worksheet.get_row_values(sheet, 0, datemode)\n return tuple(str(val) for val in values)", "def _html_table_headers(self, row_axes, col_axes):\n dsh = self.get_dshape()\n nb_blank_cols = len(row_axes) * 2 # nb of blank cols preprended to\n # each line of the column header\n nb_rows = int(np.prod([dsh[a] for a in row_axes]))\n nb_cols = int(np.prod([dsh[a] for a in col_axes]))\n # col header\n if nb_blank_cols > 0:\n blank_cells = ['']\n blank_cells_attrs = [{'colspan': str(nb_blank_cols)}]\n else:\n blank_cells = []\n blank_cells_attrs = []\n col_header = []\n nb_repets = 1\n span = nb_cols\n for a in col_axes:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n # row showing the axis label\n col_header.append(html_list_to_row(blank_cells + [a], 'h',\n blank_cells_attrs +\n [{'colspan': nb_cols}]))\n # row showing domain values\n col_header.append(html_list_to_row(blank_cells + dom * nb_repets, 'h',\n blank_cells_attrs +\n [{'colspan': str(span)}] *\n len(dom) * nb_repets))\n nb_repets *= len(dom)\n\n # row header\n # initialization of all rows because row filling wont be sequential:\n row_header = [[] for i in range(nb_rows)]\n nb_repets = 1\n span = nb_rows\n for a in row_axes:\n # 1st row contains all axis labels:\n row_header[0].append(html_cell(html_div(a, {'class': 'rotate'}), 'h',\n {'rowspan': nb_rows}))\n\n # dispatch domain values across corresponding rows:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n for idv, dv in enumerate(dom * nb_repets):\n row_header[\n idv * span].append(html_cell(dv, 'h', {'rowspan': span}))\n\n nb_repets *= len(dom)\n\n return [''.join(r) for r in row_header], col_header", "def writeTableHeader(self, fileName, variant=0):\r\n # research\r\n w = slicer.modules.NeedleFinderWidget\r\n l = w.logic\r\n if not variant:\r\n l.exportEvaluation(['user','case','maxTipHD','maxHD', 'avgHD', 'stdHD', 'medHD',\r\n 'nNeedles','nOutliers','outliers',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n 'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)\r\n else:\r\n l.exportEvaluation(['user','case','tipHD','HD', 'man.-seg_', 'ID1', 'ID2',\r\n 'outlier?',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n #'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)", "def table_heading_cell_html(text: str) -> str:\n return \"<th>{}</th>\".format(text)", "def header(self):\n header = _HEADER_DEFAULT.copy()\n bounds = self.bounds\n header.update({'point_return_count': [len(self), 0, 0, 0, 0],\n 'x_offset': round(bounds.minx),\n 'y_offset': round(bounds.miny),\n 'z_offset': round(bounds.minz),\n 'x_min': bounds.minx,\n 'y_min': bounds.miny,\n 'z_min': bounds.minz,\n 'x_max': bounds.maxx,\n 'y_max': bounds.maxy,\n 'z_max': bounds.maxz})\n\n return laspy.header.Header(**header)", "def _get_headers(self, data):\n if not self._headers:\n self._headers = list(map(lambda col: col.lower(), data.columns))\n return self._headers", "def getHeader(self):\n return self.data.header", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def open_cell_head(self, width) -> str:\n self.html_table = self.html_table + \"\"\"<th width=\"\"\" + str(width) + \"\"\">\"\"\"\n return self.html_table", "def header(self):\n return self._header", "def header(self):\n return self._header", "def header(self):\n return self._header", "def get_tbl_headers(rows):\n tbl_header = rows.pop(0)\n tbl_headers = {}\n for index, header_name in enumerate(tbl_header.find_all('th')):\n if header_name.text in conf.TABLE_HEADER_COLS:\n tbl_headers[header_name.text] = index\n return tbl_headers", "def format_header(self, header):\n raise NotImplementedError()", "def get_tfsheader(tfsfile):\n headerdata = pd.read_csv(tfsfile, delim_whitespace=True, nrows=44, index_col=None)\n headerdata.columns = ['AT', 'NAME', 'TYPE', 'VALUE']\n return headerdata[['NAME', 'VALUE']]", "def __print_header():\n __collen[\"id\"] = max(__collen[\"id\"], 2) # min is \"ID\"\n __collen[\"name\"] = max(__collen[\"name\"], 14) # min is \"Subvolume Name\"\n __collen[\"used_lim\"] = max(__collen[\"used_lim\"], 10) # min is \"Max (Used)\"\n __collen[\"excl_lim\"] = max(__collen[\"excl_lim\"], 11) # min is \"Max (Excl.)\"\n print(\"ID{:s} | Subvolume Name{:s} | {:s}Used | {:s}Max (Used) | {:s}Exclusive | {:s}Max (Excl.)\".format(\n \" \"*(__collen[\"id\"]-2),\n \" \"*(__collen[\"name\"]-14),\n \" \"*(MAX_SIZE-4),\n \" \"*(__collen[\"used_lim\"]-10),\n \" \"*(MAX_SIZE-9),\n \" \"*(__collen[\"excl_lim\"]-11)))", "def gen_question_header_cell(question_number):\n return nbformat.v4.new_markdown_cell(f\"### Question {question_number}\")", "def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):\n\n ### Merge range function takes the locations of the cells to merge, the data\n ### to write and the cell format. A sample input would look like:\n ### worksheet.merge_range(\"A0:B1\", \"Location\", cell_format_obj)\n ### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the\n ### value \"Location\". \n \n end_row = curr_row + CELL_HT[\"location\"]\n row_range = cols[0] + str(curr_row) + \":\" + cols[0] + str(end_row)\n worksheet.merge_range(row_range, \"Location\", header_format)\n \n num_pop_cols = sum(map(lambda i: \"pop\" in i, data_cols)) - 1\n num_tfr_cols = sum(map(lambda i: \"tfr\" in i, data_cols)) - 1\n\n col_end = 0\n for i, stage in enumerate(stages):\n \n if stage == \"pop\":\n unit_txt = \" (in millions)\"\n stage_txt = \"Population\"\n col_range = num_pop_cols\n else:\n unit_txt = \"\"\n stage_txt = \"Total Fertility Rate\"\n col_range = num_tfr_cols\n \n col_st = col_end + 1\n col_end = col_st + col_range\n \n curr_row_copy = curr_row\n end_row = curr_row_copy + CELL_HT[\"stage\"]\n\n row_range = (\n cols[col_st] + str(curr_row_copy) + \":\" +\n cols[col_end] + str(end_row)\n )\n \n col_txt = stage_txt + unit_txt\n worksheet.merge_range(row_range, col_txt, header_format)\n\n curr_row_copy = end_row + 1\n end_row = curr_row_copy + CELL_HT[\"stage\"]\n \n col_st_copy = col_st\n \n for column in data_cols:\n if stage in column: \n row_range = cols[col_st_copy] + str(curr_row_copy)\n worksheet.write(row_range, COL_NAME_MAP[column], header_format)\n col_st_copy += 1\n \n\n return end_row + 1", "def parseRowHeader(self, i, j) :\n rowHeaderValue = \"\"\n\n # Don't attach the cell value to the namespace if it's already a URI\n isURI = urlparse(str(self.source_cell.value))\n if isURI.scheme and isURI.netloc:\n rowHeaderValue = URIRef(self.source_cell.value)\n else:\n self.source_cell_value_qname = self.source_cell.value\n rowHeaderValue = Literal(self.source_cell_value_qname)\n \n # Get the properties to use for the row headers\n prop = self.property_dimensions[j]\n self.row_dimensions.setdefault(i,{})\n self.row_dimensions[i][self.namespaces['scope'][prop]]= rowHeaderValue\n \n return" ]
[ "0.7744287", "0.7744287", "0.7658061", "0.7620999", "0.75094855", "0.74332565", "0.73744935", "0.73059195", "0.7254804", "0.7210276", "0.7183111", "0.7131767", "0.71054065", "0.7100797", "0.7089827", "0.69185275", "0.69134706", "0.6910354", "0.6902813", "0.6865558", "0.68538827", "0.6803872", "0.680191", "0.67953295", "0.67706007", "0.67384773", "0.6728348", "0.67046094", "0.66957074", "0.66948605", "0.6687135", "0.66805017", "0.66478723", "0.6635034", "0.6633647", "0.6615817", "0.6612278", "0.6610185", "0.6610185", "0.6594182", "0.6581404", "0.6581174", "0.6568275", "0.65229136", "0.64867914", "0.6481882", "0.646347", "0.64597833", "0.6439779", "0.6439197", "0.64361864", "0.64326364", "0.6427784", "0.6419711", "0.640646", "0.6391361", "0.6387896", "0.63853544", "0.6380809", "0.63789815", "0.6378839", "0.6366219", "0.6358375", "0.6356475", "0.6345774", "0.63450265", "0.63407177", "0.6335196", "0.6326933", "0.63239956", "0.6310511", "0.6302238", "0.6291987", "0.62861663", "0.6284136", "0.6274648", "0.62689203", "0.6257699", "0.62559927", "0.6242186", "0.6239284", "0.62375665", "0.6226749", "0.62254405", "0.6222863", "0.62209857", "0.6220104", "0.6217665", "0.6204932", "0.62047035", "0.62023604", "0.62023604", "0.62023604", "0.61971694", "0.61677426", "0.6163003", "0.61588407", "0.61506236", "0.61388844", "0.61321497" ]
0.62879777
73
Populate the class with the json info
def populate(self, fid1, fid2): self.input1 = json.load(fid1) self.input2 = json.load(fid2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n self.name = json[\"name\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episodeCount\" in json:\n self.episodeCount = json[\"episodeCount\"]\n\n if \"seasonNumbers\" in json:\n self.seasonNumbers = json[\"seasonNumbers\"]\n\n if \"image\" in json:\n self.image = Image(json[\"image\"])", "def __json_init__(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self, data):\n self.json = data\n self.id = data.get(\"ID\", None)\n self.name = data.get(\"Name\", None)\n self.domain = data.get(\"Domain\", None)\n self.brand_id = data.get(\"BrandID\", None)\n self.account_id = data.get(\"AccountID\", None)\n self.brand_name = data.get(\"BrandName\", None)\n self.country_id = data.get(\"CountryID\", None)\n self.country_name = data.get(\"CountryName\", None)\n self.account_name = data.get(\"AccountName\", None)\n self.pre_order = data.get(\"PreOrder\", None)\n self.type_id = data.get(\"TypeID\", None)\n self.type_name = data.get(\"TypeName\", None)\n self.nominal_code_id = data.get(\"NominalCodeID\", None)\n self.external_shop_id = data.get(\"ExtShopID\", None)\n self.pseudo_stock_level_type = data.get(\"PseudoStockLevelType\", None)\n self.currency_symbol = data.get(\"CurrencySymbol\", None)\n self.loyalty_point_per_value = data.get(\"LoyaltyPointPerValue\", None)\n self.loyalty_value_per_point = data.get(\"LoyaltyValuePerPoint\", None)\n self.disabled = data.get(\"disabled\", None)\n self.deleted = data.get(\"deleted\", None)\n self.note = data.get(\"Note\", None)", "def __init__(self, json_dict, endpoint):\n self._values = json_dict\n self._endpoint = endpoint", "def __init__(self, order_json):\n self.shop = order_json['shop']\n self.size = order_json['size']\n self.customer_name = order_json['name']\n self.drink_name = order_json['drink']\n self.customer_number = order_json['customer_number']\n self.location = order_json['location']\n self.details = order_json['details']", "def __init__(self):\n self.info = dict()", "def __init__(self, json):\n\n self.height = json[\"height\"]\n self.width = json[\"width\"]\n self.src = json[\"src\"]", "def __init__(self):\n self.data = json.loads(resource_string(__name__, 'data/oz_postcodes.json'))", "def __init__(self):\n with open('info.json') as file:\n self.info = json.load(file)\n file.close()\n self.count = 0", "def __init__(self, data):\n\t\tassert isinstance(data, str), \"Data location must be provided in type 'str'!\"\n\t\t\n\t\t# load the location provided\n\t\tdata = json.loads(open(data).read())\n\n\t\t# check for correct format\n\t\tassert isinstance(data, list), \"Data must be of type 'list'!\"\n\n\t\tfor element in data:\n\t\t\tassert isinstance(element, dict), \"Each element of data must be of type 'dict'!\"\n\n\t\tself.data = data", "def from_json(self, json_data):\n try:\n data = json_data.decode()\n except Exception:\n data = json_data\n self.__dict__ = json.loads(data)", "def load_class(self):\n if not os.path.exists(self.savefile):\n self.save_class()\n\n with open(self.savefile, \"r\") as f:\n data = json.load(f)\n for key, value in data.items():\n # set every dict key to an atribute of the class\n setattr(self, key, value) # self.key = value", "def __init__(self, json_obj=None):\n # deserialize\n if json_obj:\n self.__dict__ = json_obj # type: dict\n\n else:\n self.frames = [] # type: list<dict>\n self.number = ''\n self.ref = ''\n self.title = ''", "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n\n if \"airDate\" in json:\n self.airDate = datetime.strptime(json[\"airDate\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"name\" in json:\n self.name = json[\"name\"]\n\n if \"title\" in json:\n self.title = json[\"title\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episode\" in json:\n self.episode = json[\"episode\"]\n\n if \"episodeNumber\" in json:\n self.episodeNumber = json[\"episodeNumber\"]\n else:\n self.episodeNumber = None\n\n if \"season\" in json:\n self.season = json[\"season\"]\n\n if \"seasonNumber\" in json:\n self.seasonNumber = json[\"seasonNumber\"]\n else:\n self.seasonNumber = None\n\n if \"publishStart\" in json:\n self.publishStart = datetime.strptime(json[\"publishStart\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"publishEnd\" in json:\n self.publishEnd = datetime.strptime(json[\"publishEnd\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"videoDuration\" in json:\n self.videoDuration = timedelta(milliseconds=json[\"videoDuration\"])\n\n if \"isFreePlayable\" in json:\n self.isFreePlayable = json[\"isFreePlayable\"]\n\n if \"isPlayable\" in json:\n self.isPlayable = json[\"isPlayable\"]\n\n if \"isNew\" in json:\n self.isNew = json[\"isNew\"]\n\n if \"image\" in json:\n self.image = Image(json[\"image\"])", "def __init__(self, response_dict={}):\n self.id = response_dict.get('id')\n self.name = response_dict.get('name')\n self.image_url = response_dict.get('imageUrl')\n self.subtype = response_dict.get('subtype')\n self.supertype = response_dict.get('supertype')\n self.ability = response_dict.get('ability')\n self.hp = response_dict.get('hp')\n self.retreat_cost = response_dict.get('retreatCost')\n self.number = response_dict.get('number')\n self.artist = response_dict.get('artist')\n self.rarity = response_dict.get('rarity')\n self.series = response_dict.get('series')\n self.set = response_dict.get('set')\n self.set_code = response_dict.get('setCode')\n self.types = response_dict.get('types')\n self.attacks = response_dict.get('attacks')\n self.weaknesses = response_dict.get('weaknesses')\n self.resistances = response_dict.get('resistances')", "def __init__(\n self,\n response: dict\n ):\n\n self.__name = read_value(\n \"name\", response, str, True)\n self.__uuid = read_value(\n \"uuid\", response, str, True)\n self.__note = read_value(\n \"note\", response, str, True)\n self.__location = read_value(\n \"location\", response, str, True)\n self.__datacenter_uuid = read_value(\n \"datacenter.uuid\", response, str, True)\n self.__row_uuids = read_value(\n \"rows.uuid\", response, str, False)\n self.__row_count = read_value(\n \"rowCount\", response, int, True)\n self.__rack_count = read_value(\n \"rackCount\", response, int, True)\n self.__host_count = read_value(\n \"hostCount\", response, int, True)", "def __init__(self, api, site_id):\n self.api = api\n self.site_id = site_id\n\n self.data = {}\n self.attributes = {}", "def __init__(self, json_data, id_mappings):\n self._data = json_data\n self._id_mappings = id_mappings\n self._driven_routes = []\n self._invoices = []", "def __init__(self, json_data=None, unique=False):\n\n self.uid = str(uuid.uuid4().fields[-1])[:8]\n self.unique = unique\n if json_data is not None:\n self.from_json(json_data)", "def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()", "def load(cls, data):\n if isinstance(data, dict):\n print('>>> dict')\n else:\n print('>>> obj')\n # cls_fields = fields(cls)\n init()", "def __init__(self, mobile_attck_json):\n self.mobile_attck = mobile_attck_json", "def __init__(self, tableValues=None, json=None):\n if tableValues is not None:\n self.class_id = tableValues[0]\n self.crop_id = tableValues[1]\n self.target = tableValues[2]\n self.type = tableValues[3]\n self.latitude = tableValues[4]\n self.longitude = tableValues[5]\n self.orientation = tableValues[6]\n self.shape = tableValues[7]\n self.background_color = tableValues[8]\n self.alphanumeric = tableValues[9]\n self.alphanumeric_color = tableValues[10]\n self.description = tableValues[11]\n self.submitted = tableValues[12]\n elif json is not None:\n for prop in self.allProps():\n if prop in json:\n setattr(self, prop, json[prop])", "def __init__(self, class_name):\n self.class_name = class_name.lower()\n\n try:\n if _req.json is not None:\n self.parse.json = _req.json\n\n if bool(_req.form):\n self.parse.form = _req.form.to_dict(flat=False)\n\n if bool(_req.files):\n self.parse.file = _req.files.to_dict(flat=False)\n\n if bool(_req.args):\n self.parse.args = _req.args\n except AttributeError:\n pass", "def __init__(self, response):\n self.response = response\n self.json = response.json()\n self.text = response.text\n try:\n for key in response.json():\n value = response.json()[key]\n setattr(self, key, sanitize(key, value))\n\n except Exception, e:\n # It is possible that json is empty and throws: TypeError: 'NoneType' object is not iterable\n if self._Hoiio.debuglevel > 0:\n print 'Exception: %s' % e\n import traceback\n traceback.print_exc()\n raise HoiioException", "def __init__(self, tmp_json):\n super(Template, self).__init__(tmp_json)", "def __init__(self, request):\n self._data = request.get_json(silent=True)", "def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)", "def __init__(self, code=None, details=None, json=None):\n self.code = code\n self.details = details\n self.json = json", "def __init__(self,\n json_dict: dict,\n clazz,\n factory_method = None,\n prescience: PrescienceClient = None\n ):\n self.page_class = clazz\n if factory_method is not None:\n self.factory_method = factory_method\n else:\n self.factory_method = clazz\n\n self.metadata = MetadataPageResult(json_dict=json_dict['metadata'])\n self.content = [self.factory_method(x, prescience) for x in json_dict['content']]\n self.json_dict = json_dict", "def load_json(self):\n\n self.load_json_str(self.get_json_str())", "def load_from_json(self, json={}):\n\n self.original_json = json\n for key in self.__dict__.keys():\n if key in json:\n setattr(self, key, json[key])\n\n self.set_expiration()", "def __init__(self):\n self.ju = ju.JSONUtil()\n self.apiC = api.API()", "def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data", "def to_init_json(self) -> JSON:\n pass", "def set_model_from_json(self, json):\n self.enable_auto_reply = get_value_from_json(json, \"enableAutoReply\")\n self.response_subject = get_value_from_json(json, \"responseSubject\")\n self.response_body_plain_text = json.get(\"responseBodyPlainText\")\n self.response_body_html = json.get(\"responseBodyHtml\")\n self.restrict_to_contacts = get_value_from_json(json, \"restrictToContacts\")\n self.restrict_to_domain = json.get(\"restrictToDomain\")\n self.start_time = get_value_from_json(json, \"startTime\")\n self.end_time = get_value_from_json(json, \"endTime\")\n return self", "def __init__(self, rest_class, client, data):\n if not isinstance(data, dict):\n raise TypeError('Object must be a dictionary')\n\n # copy all items in dict\n for key, value in data.iteritems():\n self[key] = value\n\n super(AnonStackObject, self).__init__(rest_class, client)", "def __init__(self, data: dict = {}):\n pass", "def __init__(self, **kwargs):\n self.data_dict = dict()\n self.data_list = dict()\n self.user_id = kwargs[\"user_id\"]", "def from_dict(self, json_data: Dict) -> None:\n self.package_name = json_data[\"name\"]\n # self.package_path = Path(json_data[\"path\"])\n self.description = json_data[\"description\"]\n self.mpy_version = json_data[\"mpy_version\"]\n self._publish = json_data[\"publish\"]\n self.hash = json_data[\"hash\"]\n self.stub_hash = json_data[\"stub_hash\"]\n # create folder\n if not self.package_path.exists():\n self.package_path.mkdir(parents=True, exist_ok=True)\n # create the pyproject.toml file\n self.create_update_pyproject_toml()\n # set pkg version after creating the toml file\n self.pkg_version = json_data[\"pkg_version\"]\n self.stub_sources = []\n for name, path in json_data[\"stub_sources\"]:\n if path.startswith(\"stubs/\"):\n path = path.replace(\"stubs/\", \"\")\n self.stub_sources.append((name, Path(path)))", "def __init__(self, file_name=None):\n # deserialize\n if file_name:\n if os.path.isfile(file_name):\n self.__dict__ = load_json_object(file_name)\n else:\n raise IOError('The file {0} was not found.'.format(file_name))\n else:\n self.checking_entity = ''\n self.checking_level = '1'\n self.comments = ''\n self.contributors = ''\n self.publish_date = datetime.today().strftime('%Y-%m-%d')\n self.source_text = 'en'\n self.source_text_version = ''\n self.version = ''", "def __init__(self, data={}):\n self._update_(data)", "def setUpClass(self):\n self.content_type = \"application/json\"\n self.product_payload = {\"name\": \"Olive Oil\"}", "def __init__(self):\n\n self.question_list = self.read_quiz_json()", "def __init__(self):\n\n # open json config file that reads in information\n config_path = open(\"config.json\", \"r\")\n config_json = config_path.read()\n config_dict = json.loads(config_json)\n\n # assign object variables\n self.project_id = config_dict[\"project-id\"]\n self.bucket_name = config_dict[\"bucket-name\"]\n self.location_id = config_dict[\"key-location\"]\n self.key_ring_id = config_dict[\"key-ring-id\"]\n self.crypto_key_id = config_dict[\"crypto-key-id\"]\n self.service_account_email = config_dict[\"service-account-email\"]\n\n # close the file\n config_path.close()", "def __init__(self, **entries):\n\n self.jsonOptions = {}\n for (key, value) in entries.iteritems():\n webObj = _WebObject(value)\n self.jsonOptions[key] = webObj\n self.__dict__[key] = webObj", "def deserialize(self):\n with open(os.path.join(self.root_path, self._data_file), 'r') as file:\n data = json.load(file)\n for key, val in data.items():\n self.__dict__[key] = val", "def __init__(self, data: dict):\n self._data = {\n '': 'Location', # this is required\n 'street': '',\n 'suburb': '',\n 'location': '',\n 'stop': ''\n }\n\n self._data.update(data)", "def __init__(self, filename):\n #Opening the file and storing its contents in a list\n with open(filename) as fp:\n self.data = json.load(fp)", "def __init__(self, dict):\n self.dict = dict", "def __init__(self, dict):\n self.dict = dict", "def fromJson(json):\r\n raise NotImplementedError(\"Returns instance\")", "def from_dict(self, data):\n for field in [\"first_name\", \"last_name\", \"username\", \n \"email\", \"city\", \"state\", \"active_plan\"]:\n if field in data:\n setattr(self, field, data[field])", "def from_json(self, data: str) -> None:\n self.clear()\n self.extend(json.loads(data))", "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(self, collection, json_data = None):\n\n super(Entity, self).__init__(json_data)\n self.collection = collection\n self.return_error_code = True", "def __init__(self, json_data=None):\r\n super(NSPatset, self).__init__()\r\n self.options = {'name': '',\r\n 'comment': '',\r\n 'indextype': '',\r\n\t 'description': '',\r\n '__count': ''}\r\n \r\n self.resourcetype = NSPatset.get_resourcetype()\r\n \r\n if not (json_data is None):\r\n for key in json_data.keys():\r\n if key in self.options.keys():\r\n self.options[key] = json_data[key]", "def populate(cls):\n raise NotImplementedError", "def reload_from_json(self, json):\n if json:\n self.__dict__ = json", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self, preston, base_url):\n self.data = {}\n self._preston = preston\n self.base_url = base_url", "def json_load(self, json):\n #print('json to load ' + str(json))\n for key in self.__dict__.keys():\n if key in json:\n if isinstance(getattr(self, key), BaseResource):\n getattr(self, key).json_load(json[key])\n elif is_reference_type(json[key]):\n ref = ReferenceType()\n ref.json_load(json[key])\n setattr(self, key, ref)\n else:\n setattr(self, key, json[key])", "def __init__(self, json_str: object = None, json_file_path: object = None) -> None:\n self.data = None\n if json_str is None and json_file_path is None:\n # raise Exception(\"Invalid file path or json string. Please provide valid file path for json data or provide json string\")\n print(\"No valid json file has been loaded\")\n if json_str is None:\n with open(json_file_path) as file:\n self.data = json.load(file)\n else:\n self.data = json.loads(json_str)\n # if self.data is not None:", "def __init__(self, keydata):\n if isinstance(keydata, basestring):\n keydata = json.loads(keydata)\n assert isinstance(keydata, dict), keydata\n self.dict = keydata", "def __init__(self, other):\n if type(other) == dict:\n for k in other:\n if type(other[k]) == dict:\n other[k] = JsonFactory(other[k])\n self.__dict__ = other\n else:\n raise PJFInvalidType(other, dict)", "def __init__(self, keydata=None):\n if isinstance(keydata, basestring):\n keydata = json.loads(keydata)\n assert keydata is None or isinstance(keydata, dict), keydata\n self.dict = keydata if keydata is not None else {}", "def __init__(self, json):\n\n if \"show\" not in json or \"videos\" not in json:\n raise Exception(\"Invalid JSON.\")\n\n self.show = Show(json[\"show\"])\n self.seasons = []\n for seasonNumber in self.show.seasonNumbers:\n try:\n season_json = json[\"videos\"][\"episode\"][str(seasonNumber)]\n except KeyError:\n continue\n self.seasons.append(Season(seasonNumber, season_json))\n\n self.specials = []\n if \"standalone\" in json[\"videos\"]:\n for special in json[\"videos\"][\"standalone\"]:\n self.specials.append(Episode(special))", "def __init__(self):\n \n # base_path == this floder\n base_path = os.path.dirname(os.path.abspath(__file__))\n #\n self._curvetypes = app.app_utils.read_json_file(\n os.path.join(base_path, CURVETYPES_FILE)\n )\n # \n self._parametersdict = app.app_utils.read_json_file(\n os.path.join(base_path, PARAMETERS_FILE)\n )", "def __init__(self):\n self.dict = {}", "def __init__(self):\n self.dict = {}", "def _load_from_json(self, data):\n if \"errors\" in data:\n # TODO: handle responses with more than one error\n data = data[\"errors\"][0]\n self.code = data[\"code\"]\n if \"message\" in data:\n self.message = data[\"message\"]\n else:\n self.message = data[\"detail\"]", "def build(self, data: dict):", "def __init__(self, name):\n self.name = name\n self.data = {}\n self.use_success = False", "def __init__(self, data):\n self.data = data\n return", "def __init__(self, response):\n self.response = response\n self.object = response['object']\n self.event_id = response['event_id']\n self.created_at = response['created_at']\n self.data = response['data']\n self.request = response['request']\n self.event_type = response['type']\n self.livemode = response['livemode']", "def _update(self, json_task):\n self._name = json_task['name']\n self._shortname = json_task.get('shortname')\n self._profile = json_task['profile']\n self._pooluuid = json_task.get('pooluuid')\n self._instancecount = json_task.get('instanceCount')\n self._advanced_range = json_task.get('advancedRanges')\n\n if 'resourceDisks' in json_task and json_task['resourceDisks']:\n self._resource_objects_ids = json_task['resourceDisks']\n self._resource_type = Disk\n elif 'resourceBuckets' in json_task and json_task['resourceBuckets']:\n self._resource_objects_ids = json_task['resourceBuckets']\n self._resource_type = Bucket\n\n if len(self._resource_objects_ids) != \\\n len(self._resource_objects):\n del self._resource_objects[:]\n\n if 'resultDisk' in json_task and json_task['resultDisk']:\n self._result_object_id = json_task['resultDisk']\n self._result_type = Disk\n elif 'resultBucket' in json_task and json_task['resultBucket']:\n self._result_object_id = json_task['resultBucket']\n self._result_type = Bucket\n\n if 'status' in json_task:\n self._status = json_task['status']\n self._creation_date = _util.parse_datetime(json_task['creationDate'])\n if 'errors' in json_task:\n self._errors = [Error(d) for d in json_task['errors']]\n else:\n self._errors = []\n\n if 'constants' in json_task:\n for constant in json_task['constants']:\n self.constants[constant.get('key')] = constant.get('value')\n\n self._uuid = json_task['uuid']\n self._state = json_task['state']\n self._tags = json_task.get('tags', None)\n if 'resultsCount' in json_task:\n if self._rescount < json_task['resultsCount']:\n self._dirty = True\n self._rescount = json_task['resultsCount']\n\n if 'resultsBlacklist' in json_task:\n self._results_blacklist = json_task['resultsBlacklist']\n if 'resultsWhitelist' in json_task:\n self._results_whitelist = json_task['resultsWhitelist']\n if 'snapshotWhitelist' in json_task:\n self._snapshot_whitelist = json_task['snapshotWhitelist']\n if 'snapshotBlacklist' in json_task:\n self._snapshot_blacklist = json_task['snapshotBlacklist']\n\n if 'completedInstances' in json_task:\n self._completed_instances = [CompletedInstance(x) for x in json_task['completedInstances']]\n else:\n self._completed_instances = []", "def __init__(self, json_body):\n if 'Active' in json_body:\n self._active = json_body['Active']\n else:\n self._active = None\n self._serial = json_body['Serial']\n self._name = json_body['Name']\n self._version = json_body['Version']\n self._credentials = decrypt_password(json_body['LocalCredentials'])\n self._auto_update = json_body['AutoUpdate']\n self._new_version_available = json_body['NewVersionAvailable']\n self._product_type = json_body['ProductType']\n self._network_device = None\n self._connected = False\n self._mqtt = None\n self._callback_message = []\n self._device_available = False\n self._current_state = None\n self._state_data_available = Queue()\n\n self._search_device_queue = Queue()\n self._connection_queue = Queue()", "def __init__(self, api):\n self.api = api\n self.data = None", "def _init_(self):\n self.res = {}", "def __init__(\n self,\n response: dict\n ):\n\n self.__more = read_value(\n \"more\", response, bool, True)\n self.__total_count = read_value(\n \"totalCount\", response, int, True)\n self.__filtered_count = read_value(\n \"filteredCount\", response, int, True)\n self.__items = read_value(\n \"items\", response, Room, True)", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def __init__(self,\n resp_data,\n ):\n self.raw_data = resp_data.dict()\n\n # Packet parsed for host db processing\n self.parsed_data = {'insert': {'product': self.get_product_pack(),\n 'selling_status': self.get_selling_status_pack(),\n 'shipping_info': self.get_shipping_info_pack(),\n 'listing_info': self.get_listing_info_pack(),\n },\n 'items_received': resp_data.dict()['searchResult']['_count']}", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self, item, item_clazz):\n self.key = item['key']\n self.stat = item['value']\n self.item_clazz = item_clazz", "def __init__(self, src_path, json, *args, **kwargs):\n super(SimState, self).__init__(*args, **kwargs)\n\n self.src_path = src_path\n self.json = json\n self.name = json['name']\n self.super = json['super']\n\n self.vars = []\n for json_var in json['vars']:\n self.vars.append(SimVar(json_var))", "def __init__(self, number, json):\n\n self.number = number\n self.episodes = []\n for episode in json:\n self.episodes.append(Episode(episode))", "def __init__(self, data):\n self.game_id = data['game_id']\n self.home_players = [Player(x) for x in data['home_team']['players']]\n self.home_coaches = [Coach(x) for x in data['home_team']['coaches']]\n self.away_players = [Player(x) for x in data['away_team']['players']]\n self.away_coaches = [Coach(x) for x in data['away_team']['coaches']]\n self.umpires = [Umpire(x) for x in data['umpires']]", "def fromJson(cls, inJson):\n\n return cls.fromDict(json.loads(inJson))", "def fromJson(cls, inJson):\n\n return cls.fromDict(json.loads(inJson))", "def fromJson(cls, inJson):\n\n return cls.fromDict(json.loads(inJson))", "def __init__(self, lat, lon, grid, api):\n self.lat = lat\n self.lon = lon\n self.city = grid['city']\n self.county = grid['county']\n self.village = grid['village']\n self.api = api\n self.result = {}", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def __init__(self):\n self.structure = {}", "def __init__(self, data):\n self.data = data\n self.model_func = DecisionTree._deserialize_decision_tree_from_json(data[\"model\"])", "def __init__(self, quota_json):\n super(Quota, self).__init__(quota_json)", "def __init__(self):\n self.content = dict()", "def __init__(self, data: dict):\n self.name = data['name'] # type: str\n self.type = data['type'] # type: str\n self.group = data['group'] # type: str\n self.pin = data['pin'] # type: int\n self.topic = (\n self.group + '/' +\n self.type + '/' +\n self.name) # type: str", "def __from_json__(self, properties: dict):\r\n # Look for units first so the temperatures are set correctly.\r\n value = properties.pop(\"temperature_scale\", None)\r\n if value is not None:\r\n eval(f\"self.set_temperature_scale('{value}')\")\r\n\r\n # Let superclass handle the rest\r\n super().__from_json__(properties)", "def __init__(self):\n\n try:\n # read attributes from attributes file\n with open(const.Storage.ATTRIBUTES) as attributes_file:\n # read the file and parse it to JSON data\n json_data = attributes_file.read()\n attributes = json.loads(json_data)\n\n # set attributes\n self.id = str(attributes[\"id\"])\n self.length = float(attributes[\"length\"])\n self.width = float(attributes[\"width\"])\n except OSError:\n raise OSError(\"The attributes file could not be opened.\")", "def parse_config(self, json_dict):\n self.Checked = json_dict['Checked']\n self.Files = json_dict['Files']\n self.Notes = json_dict['Notes']\n self.field_config = json_dict['field_config']\n self.active_config = json_dict['active_config']\n self.moving_bed_type = json_dict['moving_bed_type']" ]
[ "0.7457732", "0.74043804", "0.7343072", "0.71182615", "0.7032978", "0.6789153", "0.66915345", "0.66697764", "0.66629", "0.66505814", "0.66464657", "0.6644336", "0.6605952", "0.66035146", "0.65517664", "0.65442574", "0.6533766", "0.65309536", "0.65301555", "0.65257514", "0.6520427", "0.6518397", "0.650829", "0.6483783", "0.64836687", "0.6480981", "0.64383465", "0.64065087", "0.6385307", "0.6375035", "0.6354695", "0.63459444", "0.63432807", "0.6325771", "0.6305469", "0.6279574", "0.6256307", "0.6247723", "0.62411267", "0.62368804", "0.6233538", "0.62330884", "0.62089795", "0.6193976", "0.6193969", "0.61912435", "0.61822027", "0.6180652", "0.6174353", "0.6157329", "0.6157329", "0.6140306", "0.6140157", "0.6136077", "0.6131895", "0.6131895", "0.61227083", "0.6118568", "0.61138374", "0.6111796", "0.61080605", "0.6105449", "0.6097585", "0.6093546", "0.6078999", "0.6075186", "0.6069635", "0.6066976", "0.60623324", "0.6053914", "0.6053914", "0.60425055", "0.60320616", "0.60279095", "0.6025403", "0.6014775", "0.60049206", "0.5993346", "0.5987665", "0.59860367", "0.5985545", "0.5979101", "0.5975831", "0.59755975", "0.5974792", "0.59731835", "0.5969368", "0.59642494", "0.5961851", "0.5961851", "0.5961851", "0.5958079", "0.5952536", "0.59514207", "0.5942479", "0.59402645", "0.5936463", "0.59312683", "0.5928801", "0.591438", "0.59027016" ]
0.0
-1
Creates a dictonary of nodes listed by currie id from answers 1 and 2
def make_node_dict(self): if self.input1 is None or self.input2 is None: raise Exception("Missing input: please run the populate() method first") self.node_dict1 = {} for node in self.input1['knowledge_graph']['nodes']: self.node_dict1[node['id']] = node self.node_dict2 = {} for node in self.input2['knowledge_graph']['nodes']: self.node_dict2[node['id']] = node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nodes_by_id(ntwrk, nodeid):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if k == nodeid}", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict", "def ans():\n ret = {}\n for i in range(12):\n ret[ind[i]] = ans2[ind[i]]\n ret['id']=\"id\"\n return jsonify(ret)", "def get_answers(self):\r\n answers = {}\r\n for ielt in self.ielements:\r\n ie_id = ielt.get('id')\r\n answers[ie_id] = {'rectangle': ielt.get('rectangle'), 'regions': ielt.get('regions')}\r\n\r\n return answers", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def all_in_edges_of_node(self, id1: int) -> dict:\n if id1 in self.Nodes:\n ans = {}\n for i, j in self.Edges.items():\n if id1 in j:\n ans[i] = j[id1]\n return ans\n return {}", "def create_nodes_and_edges(list_of_nodes_, adjacency_matrix_):\n\n # Random numbers for the labels\n random_numbers = np.arange(len(list_of_nodes_))\n np.random.shuffle(random_numbers)\n print random_numbers\n\n # Update the nodes: Every node gets told how many other nodes know it\n for node in sorted(list_of_nodes_, key=lambda x: x.id):\n node.knows = int(sum(np.ravel(adjacency_matrix[node.id])))\n node.known_by = int(np.ravel(sum(adjacency_matrix[:,node.id])))\n\n # Update the nodes: Every node gets its questionaire answers\n for node in sorted(list_of_nodes_, key=lambda x: x.id):\n try:\n with open('./data-answers/{}.csv'.format(node.id), 'r') as f:\n answers = f.readlines()\n node.age = answers[0].strip() if (answers[0].strip() and answers[0].strip() != '-1') else \"'?'\"\n node.academies = answers[1].strip() if (answers[1].strip() and answers[1].strip() != '-1') else \"'?'\"\n node.waylength = answers[2].strip() if (answers[2].strip() and answers[2].strip() != '-1') else \"'?'\"\n node.hiking = answers[3].strip() if (answers[3].strip() and answers[3].strip() != '-1') else \"'?'\"\n node.lake = answers[4].strip() if (answers[4].strip() and answers[4].strip() != '-1') else \"'?'\"\n node.choir = answers[5].strip() if (answers[5].strip() and answers[5].strip() != '-1') else \"'?'\"\n node.games = answers[6].strip() if (answers[6].strip() and answers[6].strip() != '-1') else \"'?'\"\n node.drinks = answers[7].strip() if (answers[7].strip() and answers[7].strip() != '-1') else \"'?'\"\n node.sleep = answers[8].strip() if (answers[8].strip() and answers[8].strip() != '-1') else \"'?'\"\n node.number = answers[9].strip() if (answers[9].strip() and answers[9].strip() != '-1') else \"'?'\"\n node.hotness = answers[10].strip() if (answers[10].strip() and answers[10].strip()!= '-1') else \"'?'\"\n node.hookups = answers[11].strip() if (answers[11].strip() and answers[11].strip()!= '-1') else \"'?'\"\n node.description = answers[12].strip() if (answers[12].strip() and answers[12].strip()!= '-1') else \"'?'\"\n \n except IOError:\n node.age = \"'?'\"\n node.academies = \"'?'\"\n node.waylength = \"'?'\"\n node.hiking = \"'?'\"\n node.lake = \"'?'\"\n node.choir = \"'?'\"\n node.games = \"'?'\"\n node.drinks = \"'?'\"\n node.sleep = \"'?'\"\n node.number = \"'?'\"\n node.hotness = \"'?'\"\n node.hookups = \"'?'\"\n node.description = \"?\"\n\n with open('nodes-and-edges.js', 'w+') as f:\n\n # Write the code for the Nodes to the file\n # This is just the preamble\n f.write('// The nodes for the graph \\n')\n f.write('var nodes = [ \\n')\n\n # And these are the actual data\n for node in sorted(list_of_nodes_, key=lambda x: x.id):\n pos = xy_from_group(node.group)\n f.write('\\t{{ id: {id}, '\n 'label: \"{random_number}\", '\n 'title: \"<small style=\\'font-family: Roboto Slab;\\'>'\n# 'Name: {label} <br>'\n# 'Fach: {major} <br>'\n 'AG: {group} <br>'\n '---<br>'\n 'Kennt {knows} Leute <br>'\n 'Wird gekannt von {known_by} Leuten <br>'\n '---<br>'\n 'Alter: {age} <br>'\n 'Anzahl Sommerakademien: {academies} <br>'\n 'Anfahrtsdauer: {waylength} <br>'\n 'Wander-Tage: {hiking} <br>'\n 'See-Tage: {lake} <br>'\n 'Chor-Tage: {choir} <br>'\n 'Spieleabende: {games} <br>'\n 'Beitrag zur Barkasse: {drinks} <br>'\n 'Schlaf pro Nacht: {sleep} <br>'\n 'Lieblingszahl: {number} <br>'\n 'Eigene Attraktivitรคt: {hotness} <br>'\n 'Hookup-Schรคtzung: {hookups} <br>'\n 'Neubeuern in einem Wort: {description}'\n '</small>\", '\n 'value: {value}, '\n 'group: {group}, '\n 'knows: {knows}, '\n 'known_by: {known_by}, '\n 'x: {x}, '\n 'y: {y}, '\n 'color: {{ border: \"{border}\", '\n 'background: \"{background}\", '\n 'highlight: {{ border: \"{border}\", '\n 'background: \"{background}\" }} }}, '\n 'original_color: {{ border: \"{border}\", '\n 'background: \"{background}\", '\n 'highlight: {{ border: \"{border}\", '\n 'background: \"{background}\" }} }}, '\n 'age: {age}, '\n 'academies: {academies}, '\n 'waylength: {waylength}, '\n 'hiking: {hiking}, '\n 'lake: {lake}, '\n 'choir: {choir}, '\n 'games: {games}, '\n 'drinks: {drinks}, '\n 'sleep: {sleep}, '\n 'number: {number}, '\n 'hotness: {hotness}, '\n 'hookups: {hookups}, '\n 'description: \"{description}\" }},\\n'\n .format(id=node.id,\n random_number=random_numbers[node.id],\n label=node.name,\n major=node.major,\n group=node.group,\n x=pos[0],\n y=pos[1],\n knows=node.knows,\n known_by=node.known_by,\n value=node.known_by,\n border=DarkColor(int(node.group)),\n background=LightColor(int(node.group)),\n age=node.age,\n academies=node.academies,\n waylength=node.waylength,\n hiking=node.hiking,\n lake=node.lake,\n choir=node.choir,\n games=node.games,\n drinks=node.drinks,\n sleep=node.sleep,\n number=node.number,\n hotness=node.hotness,\n hookups=node.hookups,\n description=node.description))\n\n # Close the Node array properly\n f.write(']; \\n\\n\\n')\n\n # Create the edges...\n f.write('var edges = [\\n')\n\n # Now loop over the adjacency matrix to calculate the edges\n n_people = len(adjacency_matrix_)\n id = 0\n for row in range(n_people):\n for col in range(row):\n\n # CASE 1: Both people said they know each other.\n # We draw an undirected edge between them\n if adjacency_matrix_[row, col] and adjacency_matrix_[col, row]:\n startnode = get_node_by_id(list_of_nodes_, row)\n color = DarkColor(int(startnode.group))\n f.write('\\t{{ id: {}, from: {}, to: {}, '\n 'color: \"{}\", original_color: \"{}\"}},\\n'\n .format(id, row, col, color, color))\n id += 1\n\n # CASE 2: Person in row knows person in col, but not vice versa\n if adjacency_matrix_[row, col] and not adjacency_matrix_[col, row]:\n startnode = get_node_by_id(list_of_nodes_, row)\n color = DarkColor(int(startnode.group))\n f.write('\\t{{ id: {}, from: {}, to: {}, arrows: \"to\", '\n 'color: \"{}\", original_color: \"{}\"}},\\n'\n .format(id, row, col, color, color))\n id += 1\n\n # CASE 3: Person in col knows person in row, but not vice versa\n if not adjacency_matrix_[row, col] and adjacency_matrix_[col, row]:\n startnode = get_node_by_id(list_of_nodes_, col)\n color = DarkColor(int(startnode.group))\n f.write('\\t{{ id: {}, from: {}, to: {}, arrows: \"to\", '\n 'color: \"{}\", original_color: \"{}\"}},\\n'\n .format(id, col, row, color, color))\n id += 1\n\n # Close the Edges array properly\n f.write('];')\n\n print 'Created nodes-and-edges.js!'", "def __init__(self, nodes):\n self.parents = {}\n self.ranks = {}\n\n for node in nodes:\n self.parents[node] = node\n self.ranks[node] = 0", "def make_nodes_and_paths(friends_lst):\n\n # nodes = {}\n\n # for item in friends_lst:\n # friend1, friend2, group = item\n # for person in pair:\n # if not nodes.get(person):\n # nodes[person] = pair[1]\n\n # nodes = [{'name': person, 'friend': nodes[person]} for person in nodes.keys()]\n\n nodes = {}\n for item in friends_lst:\n friend1, friend2, group = item\n if not nodes.get(friend1):\n nodes[friend1] = group\n elif nodes.get(friend1) > group:\n nodes[friend1] = group\n\n nodes = [{'name': person, 'group': nodes[person]} for person in nodes.keys()]\n\n index_nodes = {}\n for idx, n in enumerate(nodes):\n index_nodes[n['name']] = (idx, n['group'])\n\n paths = []\n\n # paths.append({'source': item[1], 'target': item[0]})\n\n for item in friends_lst:\n # one = User.query.get(item.user_id)\n # two = User.query.get(item.friend_id)\n source, target, group = item\n paths.append({'source': index_nodes[source][0], 'target': index_nodes[target][0]})\n\n # print nodes\n # print index_nodes\n # print paths\n\n return nodes, paths", "def dictize(self):\n dict = {}\n for node in self.sort():\n logger.debug(\"Dictize: id %s has name %s\" % (node._id, node.name))\n x = node._kwargs()\n dict[node._id]={\"klass\":node.__class__.__name__, \n \"kwargs\": x,\n \"children\":[child._id for child in node.children()]}\n return dict", "def node_mapping(self):\n ...", "def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes", "def get_question_answers(self):\r\n # dict of (id, correct_answer)\r\n answer_map = dict()\r\n for response in self.responders.keys():\r\n results = self.responder_answers[response]\r\n answer_map.update(results)\r\n\r\n # include solutions from <solution>...</solution> stanzas\r\n for entry in self.tree.xpath(\"//\" + \"|//\".join(solution_tags)):\r\n answer = etree.tostring(entry)\r\n if answer:\r\n answer_map[entry.get('id')] = contextualize_text(answer, self.context)\r\n\r\n log.debug('answer_map = %s', answer_map)\r\n return answer_map", "def assign_no_to_node(self,list):\n list = sorted(list)\n d = {}\n for i,node in enumerate(list):\n #print i,node\n d[node] = i \n return d,len(d)", "def process_answer(ans):\n\n #TODO: check whether need type coversion?\n ans['parentid'] = int(ans['parentid'])\n ## I remain comments here, maybe can do some sentiment analysis to evaluate score of answer\n return ans", "def _init_catalog_node(catalog, pid, lid=None, rid=None):\n if pid not in catalog: catalog[pid] = {'_langs': {}}\n if lid is not None:\n if lid not in catalog[pid]['_langs']: catalog[pid]['_langs'][lid] = {'_res': {}, 'language': {}}\n if lid is not None and rid is not None:\n if rid not in catalog[pid]['_langs'][lid]['_res']: catalog[pid]['_langs'][lid]['_res'][rid] = {}", "def create_nodes(self):\n # Create a special dictionary that will raise an error if a key is\n # updated. This avoids the\n nodes = NodeDict()\n\n return create_solph_nodes_from_data(self.input_data, nodes)", "def get_mapped_answers(self):\r\n answers = (\r\n dict([(ie.get('id'), ie.get(\r\n 'rectangle')) for ie in self.ielements]),\r\n dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))\r\n return answers", "def make_dict(\n nn,\n q_id,\n polarity,\n context_cond,\n cat,\n subcat,\n answer_info,\n bias_targets,\n version,\n notes,\n context,\n question,\n ans_list,\n ans_place,\n):\n this_dict = {\n \"example_id\": nn,\n \"question_index\": q_id,\n \"question_polarity\": polarity,\n \"context_condition\": context_cond,\n \"category\": cat,\n \"answer_info\": answer_info,\n \"additional_metadata\": {\n \"subcategory\": subcat,\n \"stereotyped_groups\": bias_targets,\n \"version\": version,\n \"source\": notes,\n },\n \"context\": context.strip(),\n \"question\": question.strip(),\n \"ans0\": ans_list[0],\n \"ans1\": ans_list[1],\n \"ans2\": ans_list[2],\n \"label\": ans_place,\n }\n return this_dict", "def __init__(self):\n self.kids = [{}]\n self.root = 0\n self.vocabular = set([])", "def __init__(self):\n self.head = Node(float('-inf'))\n self.tail = Node(float('inf'))\n self.head.next = self.tail\n self.tail.prev = self.head\n # value 1, 2, 3, key: hello or abc\n self.cntKey = {}\n # key : hello or abc, cnt value 1, 2, 3\n self.keyCnt = {}", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def get_my_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question FROM question WHERE question.user_id = (%s) ORDER BY create_time DESC \", user_id)\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id,answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id WHERE question.user_id =(%s)\", user_id)\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=user_id)) for q in questions}\n\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()", "def parser(self, answer):\n result = {}\n for rrsets in answer.response.answer:\n for item in rrsets.items:\n rdtype = self.get_type_name(item.rdtype)\n\n if item.rdtype == self.get_type_id('A'):\n if result.has_key(rdtype):\n result[rdtype].append(item.address)\n else:\n result[rdtype] = [item.address]\n return result", "def display_possible_answers(question):\n answers = question['incorrect'] + [question['correct']]\n random.shuffle(answers)\n answer_dict = {}\n for i, answer in enumerate(answers):\n answer_dict[str(i + 1)] = answer\n print(f\"{i + 1}: {answer}\\n\")\n return answer_dict", "def node_info(self) -> dict:\r\n location_str = f\"{self.location[0]},{str(self.location[1])},{str(self.location[2])}\"\r\n return {\"id\": self.key, \"pos\": location_str}", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def all_in_edges_of_node(self, id1: int) -> dict:\n return self.edges_in[id1]", "def _docMapping(self):\n doc2quests = defaultdict(list)\n for q, d in self.quest2doc.items():\n doc2quests[d].append(q)\n return doc2quests", "def make_complete_graph(num_nodes):\r\n result = {}\r\n for idx in range(0,num_nodes):\r\n result[idx] = set([])\r\n for jdx in range(0,num_nodes):\r\n if (idx!=jdx):\r\n result[idx].add(jdx)\r\n return result", "def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)", "def read_graph_nodes(dict1,node_list):\n\t#print fname\n\t#dict1={}\n\t#with open(fname,'r') as fp:\n\t#\tfor line in fp:\n\t#\t\tline=line.rstrip('\\n')\n\t#\t\tn_id,name=line.split('\\t')\n\t#\t\tdict1[int(n_id)]=name\n\thuman_readable_node_list=[]\n\tfor node in node_list:\n\t\tif int(node) in dict1:\n\t\t\t#print 'found'\n\t\t\thuman_readable_node_list.append(dict1[node])\n\t\telse:\n\t\t\thuman_readable_node_list.append('-1')\n\treturn human_readable_node_list", "def gen_nodes(self):\n self.nodes = []\n for i in range(self.num_nodes):\n self.nodes.append(Node(self.fk))", "def getselected_nodes(self):\n self.selected_nodes = {}\n for path in self.options.selected_nodes:\n sel_data = path.rsplit(':', 2)\n path_id = sel_data[0]\n sub_path = int(sel_data[1])\n sel_node = int(sel_data[2])\n if path_id not in self.selected_nodes:\n self.selected_nodes[path_id] = {sub_path: [sel_node]}\n else:\n if sub_path not in self.selected_nodes[path_id]:\n self.selected_nodes[path_id][sub_path] = [sel_node]\n else:\n self.selected_nodes[path_id][sub_path].extend([sel_node])", "def thread(comments):\r\n \r\n ret = {'root': []}\r\n for comment in comments:\r\n if not comment.parent_id:\r\n ret['root'].append(comment)\r\n else:\r\n if comment.parent_id not in ret:\r\n ret[comment.parent_id] = []\r\n ret[comment.parent_id].append(comment)\r\n return ret", "def create_ner_tags_dict():\r\n global ne_tags_set, ner_to_id, ne_tags, id_to_ner\r\n\r\n ne_tags = list(ne_tags_set) + ['[CLS]', '[SEP]']\r\n ne_tags.sort()\r\n id_to_ner = {idx: tag for idx, tag in enumerate(ne_tags)}\r\n ner_to_id = {tag: idx for idx, tag in enumerate(ne_tags)}\r\n print(f'Total NER tag size: {len(ne_tags)}; Tags: {ne_tags}')", "def get_answers(self):\r\n anshtml = '<span class=\"openended-answer\"><pre><code>{0}</code></pre></span>'.format(self.answer)\r\n return {self.answer_id: anshtml}", "def __init__(self):\n\n self.nodes = {}", "def get_nodes(wf_results):\n return {node.fullname: node for node in wf_results.nodes}", "def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 49:\n break\n\n return answers", "def map(self, group_id=None, block_id=None, answer_id=None, group_instance=None, answer_instance=None):\n result = {}\n for answer in self.filter(group_id, block_id, answer_id, group_instance, answer_instance):\n answer_id = answer['answer_id']\n answer_id += \"_\" + str(answer['answer_instance']) if answer['answer_instance'] > 0 else ''\n\n result[answer_id] = answer['value']\n\n return OrderedDict(sorted(result.items(), key=lambda t: natural_order(t[0])))", "def retrieveTrees(c):\n\n all_nodes = dict()\n root_nodes = list()\n c.execute('''SELECT id, parent_id, title FROM node''')\n data_db = c.fetchall()\n \n # Initialize nodes list\n for data_line in data_db:\n db_child_id = data_line[0]\n db_parent_id = data_line[1]\n child_title = data_line[2]\n \n node = Node(db_child_id, child_title)\n all_nodes[db_child_id] = node\n if not db_parent_id:\n root_nodes.append(node)\n \n # Create relations\n for data_line in data_db:\n db_child_id = data_line[0]\n db_parent_id = data_line[1]\n if db_parent_id:\n all_nodes[db_parent_id].append(all_nodes[db_child_id])\n \n return (all_nodes, root_nodes,)", "def fromRootToListDic(t,labels = []):\r\n print \"creating list of dictionaries from Root Tuple\"\r\n N=t.GetEntries()\r\n labels2 = []\r\n list=[]\r\n a=t.GetListOfBranches()\r\n if not labels:\r\n\t for branch in a:\r\n\t\t labels.append(branch.GetName())\r\n for label in labels:\r\n\t if hasattr(t,label): labels2.append(label)\r\n\t else: print \"ignoring \", label\r\n for i in range(N):\r\n thing={}\r\n t.GetEntry(i)\r\n for label in labels2:\r\n thing[label]=getattr(t,label)\r\n list.append(thing)\r\n labels = []\r\n return list", "def get_nodes(self, ids):\n return [self.node_labels[i] for i in ids]", "def make_neighbor_db(data):\n acted_with = {}\n for i, j, _ in data:\n # the setdefault method lets us avoid checking for ourselves whether an\n # actor is aclready in the dictionary.\n # see https://docs.python.org/3/library/stdtypes.html#dict.setdefault\n acted_with.setdefault(i, set()).add(j)\n acted_with.setdefault(j, set()).add(i)\n return acted_with", "def construct_authorities_for_answers(answers):\n domains = [rr._dn for rr in answers]\n glue = {\"authority\": [], \"additional\": []}\n for domain in domains:\n auth = construct_ns_rr_from_cache(domain)\n additional = construct_a_rr_from_cache(auth._nsdn)\n if auth:\n glue[\"authority\"].append(auth)\n if additional:\n glue[\"additional\"].append(additional)\n return (glue[\"authority\"], glue[\"additional\"])", "def get_all_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question, user_id FROM question\")\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=q[2])) for q in questions}\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id, answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id\")\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()", "def __getitem__(self, idx):\n\n text, label = self.data[idx]\n ids = self.get_ids(text)\n\n return {\"ids\": ids, \"label\": label}", "def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped", "def node_dictionary():\r\n\r\n classes = node_subclasses(Node)\r\n dictionary = {}\r\n\r\n for c in classes:\r\n try:\r\n name = c.identifier()\r\n dictionary[name] = c\r\n except AttributeError:\r\n # If node does not provide identifier, we consider it to be\r\n # private or abstract class\r\n pass\r\n\r\n return dictionary", "def nodes_names_map(self):\n return {nd.name: nd for nd in self.nodes}", "def get_node_a(name, taxid, pathway, topology, psi_mi_to_sql_object):\n\n # Testing if the node is already in the database\n node_dict = psi_mi_to_sql_object.get_node(name, node_tax_id=taxid)\n\n if not node_dict:\n node_dict = {\n \"name\" : 'Uniprot:' + name,\n \"tax_id\": taxid,\n \"alt_accession\": None,\n 'pathways': pathway,\n \"aliases\": None,\n \"topology\": topology\n }\n\n return node_dict", "def _get_comment_map(self):\r\n def _visit(obj):\r\n res = []\r\n for child in obj.get('children', []):\r\n res.append((child['id'], child))\r\n if 'children' in child:\r\n res += _visit(child)\r\n return res\r\n return dict(_visit(self.thread))", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def create_nodes(self):", "def __create_node(self, from_node_id, to_node_id):\n #ensure from_node_id and start_node_id is not the same\n if from_node_id == to_node_id:\n print(\"Cannot insert same node\")\n return\n \n # 1. declare two variable nodes\n n1 = n2 = None\n \n # 2. check if exist\n for x in self.__node:\n if x.getId()==from_node_id:\n n1 = x\n if x.getId()==to_node_id:\n n2 = x\n\n # 3. if n1 or n2 is None, create from_node_id / to_node_id\n if n1 is None:\n n1 = Node(from_node_id)\n self.__node.append(n1)\n \n if n2 is None:\n n2 = Node(to_node_id)\n self.__node.append(n2)\n\n #return from_node and to_node\n return n1, n2", "def extract_nodes(file_name, file_name_out):\n with open(file_name, 'r') as file_in:\n nodes = {} # dict of player and unique id\n uid = 1\n for line in file_in:\n fields = parse_line(line)\n player = format_name(fields[0])\n if player not in nodes:\n nodes[player] = uid\n uid += 1\n\n with open(file_name_out, 'w') as file_out:\n print('id,label', file=file_out)\n for player in nodes:\n print(nodes[player], player, sep=',', file=file_out)\n\n return nodes", "def get_graph_dictionary(self):\n nodes = {}\n n = 0\n for node in self.__nodes:\n nodes[n] = tuple(node.get_data())\n n += 1\n\n edges = set()\n for edge in self.__edges:\n new_edge = (edge.get_node_a().get_id(), edge.get_node_b().get_id())\n edges.add(new_edge)\n\n graph_dict = {}\n graph_dict[\"nodes\"] = nodes\n graph_dict[\"edges\"] = edges\n\n return graph_dict", "def get_hits(nodes: Dict[int, PhyloNode], rank: str, taxids: List[int]) -> Dict[int, int]:\n\n hits = {}\n for taxid in taxids:\n if taxid not in nodes:\n continue\n hit = get_ancestor_of_rank(nodes[taxid], rank)\n # pigeonhole ancestors of taxons\n if not hit:\n continue\n if hit in hits:\n hits[hit] += 1\n else:\n hits[hit] = 1\n return hits", "def _make_answer_dict(self, choice_list):\r\n\r\n answer_dict = {}\r\n for index, choice_answers_pair in enumerate(choice_list):\r\n # Choice is whether this choice is correct\r\n # Answers contains a list of answers to textinpts for the choice\r\n choice, answers = choice_answers_pair\r\n\r\n if choice:\r\n # Radio/Checkbox inputs in choicetext problems follow\r\n # a naming convention that gives them names ending with \"bc\"\r\n choice_id = \"1_2_1_choiceinput_{index}bc\".format(index=index)\r\n choice_value = \"choiceinput_{index}\".format(index=index)\r\n answer_dict[choice_id] = choice_value\r\n # Build the names for the numtolerance_inputs and add their answers\r\n # to `answer_dict`.\r\n for ind, answer in enumerate(answers):\r\n # In `answer_id` `index` represents the ordinality of the\r\n # choice and `ind` represents the ordinality of the\r\n # numtolerance_input inside the parent choice.\r\n answer_id = \"1_2_1_choiceinput_{index}_numtolerance_input_{ind}\".format(\r\n index=index,\r\n ind=ind\r\n )\r\n answer_dict[answer_id] = answer\r\n\r\n return answer_dict", "def get_relation_dict(word: str, num: int):\n global database_time, other_time\n label = str(word + \"_\" + str(num))\n\n # These two cases are used in finding relations of \"last\" words, words we find at the end of a sentence\n if num == -1:\n label = str(word + \"_last1\")\n elif num == -2:\n label = str(word + \"_last2\")\n\n start = time.time()\n\n # response = word_relation_table.get_item(\n # Key = {\n # \"id\": label\n # }\n # )\n\n end = time.time()\n diff = end - start\n database_time += diff\n # data = response['Item']['words']\n data = word_relation_lookup(label)\n start = time.time()\n\n data1 = __clean_noise(data, 0.0025)\n if __get_json_sum(data1) == 0:\n data1 = __clean_noise(data, 0.00125)\n print(\"switchin it up\")\n if __get_json_sum(data1) == 0:\n print(\"switchin it up11111\")\n data1 = data\n\n end = time.time()\n diff = end - start\n other_time += diff\n return dict(data1)", "def make_mapping(items):\n compid, nodes = items\n nodes = list(nodes)\n base_node = min(nodes)\n return [(node,base_node) for node in nodes if node != base_node]", "def add_nodes(list_of_ids, G, singleGraph):\r\n road_set=set()\r\n for id, pm, dir, coords, hwy in list_of_ids:\r\n id_dict=dict(lat=coords[0], lon=coords[1], dire=dir, mile=pm, road=hwy)\r\n G.add_node(id, id_dict)\r\n singleGraph.add_node(id)\r\n singleGraph.position[id]=(coords[1], coords[0])\r\n road_set.add(int(hwy))\r\n print 'road set: ', road_set\r\n return road_set, G, singleGraph", "def all_out_edges_of_node(self, id1: int) -> dict:\n if id1 in self.Nodes:\n return self.Edges[id1]\n return {}", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def new_branch_tree(tree, ids):\n branch_tree = {}\n branch_tree[\"selftext\"] = tree[\"selftext\"]\n branch_tree[\"title\"] = tree[\"title\"]\n branch_tree[\"id\"] = tree[\"id\"]\n branch_tree[\"comments\"] = {}\n for id in ids[1:]:\n branch_tree[\"comments\"][id] = tree[\"comments\"][id]\n return branch_tree", "def build_nodes(jsonData):\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n # parses cookie str if a cookie has been set\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2: \n # usually this is just a flag e.g HTTPOnly, HTTPSOnly\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list \n \n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node \n \n #jsonData contains all the domains and all the req/resp pairs made to them\n #iterating over the domains first\n for domain, dval in jsonData.items():\n # but first check if a node for this domain has been created or not\n domain_node = _check_node(domain)\n cookie_list = []\n # iterating thru all the req/resp pairs on a domain\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie'] \n \n parsed_cookie = None \n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath' : domainPath,\n 'referrer' : referrer,\n 'referrerPath' : referrerPath,\n 'cookieList' : parsed_cookie\n })\n # making sure that we also create the node for the referrer\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes", "def __init__(self):\n self.adjList = {}", "def make_unpack_map(node):\n return dict(zip(node.names, node.iternodes()))", "def create_node2edges_on2freq_grid(self):\n trip_id2model = pickle.load(open('pickles/trip_id2model.pickle','rb'))\n old_trip_id = -1\n model = trip_id2model[1]\n sub_x = 5\n sub_y = 5\n node2edges_on2sub_grid2points = {}\n for line in self.lines:\n trip_id,lat,lon = normalize_simple(line)\n if trip_id != old_trip_id:\n #print trip_id\n model = trip_id2model[trip_id]\n old_trip_id = trip_id\n node = self.gps_to_node(lat,lon)\n if node == -1:\n continue\n #print \"pushed through\"\n incident_edges = self.incident_edges(node)\n edges_on = []\n for edge in incident_edges:\n if model[edge] == 1:\n edges_on.append(edge)\n edges_on.sort()\n edges_on = tuple(edges_on)\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon(self.node_to_coords(node))\n\n sub_row,sub_col = gen_gps_to_coords(lat,lon,sub_x,sub_y,min_lat,max_lat,min_lon,max_lon)\n sub_tuple = (sub_row,sub_col)\n if node not in node2edges_on2sub_grid2points:\n node2edges_on2sub_grid2points[node] = {}\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n if edges_on not in edges_on2sub_grid2points:\n edges_on2sub_grid2points[edges_on] = defaultdict(list)\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n points = sub_grid2points[sub_tuple]\n node2edges_on2sub_grid2points[node][edges_on][sub_tuple].append([lat,lon])\n #points.append([lat,lon])\n\n print node2edges_on2sub_grid2points.keys()\n print node2edges_on2sub_grid2points[2].keys()\n print node2edges_on2sub_grid2points[2][(2,3)].keys()\n \n node2edges_on2median = {}\n for node in node2edges_on2sub_grid2points:\n print node\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n node2edges_on2median[node] = {}\n for edges_on in edges_on2sub_grid2points:\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n best_spot = (-1,-1)\n best_score = 0\n for spot in sub_grid2points:\n score = len(sub_grid2points[spot])\n if score > best_score:\n best_score = score\n best_spot = spot\n node2edges_on2median[node][edges_on] = list_median(sub_grid2points[spot])\n \n with open('pickles/node2edges_on2median-%d-%d.pickle' % (self.rows,self.cols),'wb') as output:\n pickle.dump(node2edges_on2median,output)", "def makeGraph(locations, distances):\n graph = dict() # maps (lat, lng) to Node\n for location,distance in zip(locations, distances):\n currLocation = location[0]\n neighbors = location[1:]\n makeNode(currLocation, neighbors, distance[1:], graph)\n return graph", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def _build_nodes_dict(self, graph):\n nodes_dict = {}\n for node, data in graph.nodes_iter(data=True):\n nodes_dict.update({node: data['label']})\n return nodes_dict", "def adj_nodes(self, node) -> dict:\n if node in self._adj:\n return self._adj[node]\n else: # is this a good idea to return empty when node is not there?\n return {}", "def get_election(self, id: int) -> dict:", "def answers_db() -> Dict[str, List]:\n return{\"lawyer\":[\"either\",\"other\",\"law\",\"boy\"],\n \"cot_caught\":[\"different\",\"other\",\"same\"],\n \"second_person_plural\":[\"other\",\"y'all\",\"yins\",\n \"you\",\"you'uns\",\"you all\",\"you guys\",\"you lot\",\n \"yous, youse\"],\n \"yard_sale\":[\"car boot\",\"car boot sale\",\n \"carport sale\",\"garage sale\",\"jumble (sale)\",\n \"other\",\"patio sale\",\"rummage sale\",\"sidewalk sale\",\n \"stoop sale\",\"tag sale\",\"thrift sale\",\"yard sale\"],\n \"verge\":[\"beltway\",\"berm\",\"curb strip\",\n \"I have no word for this\",\"other\",\"parking\",\n \"terrace\",\"tree lawn\",\"verge\"],\n \"sandwich\":[\"baguette\",\"bomber\",\"grinder\",\"hero\",\n \"hoagie\",\"I have no word for this\",\"Italian sandwich\",\n \"other\",\"poor boy\",\"sarney\",\"sub\"],\n \"firefly\":[\"firefly\",\"I have no word for this\",\n \"I use lightning bug and firefly interchangeably\",\n \"lightning bug\",\"other\",\"peenie wallie\"],\n \"crawfish\":[\"craw\",\"crawdad\",\"crawfish\",\"crayfish\",\n \"crowfish\",\"I have no word for this critter\",\"mudbug\",\"other\"],\n \"shoes\":[\"gymshoes\",\"I have no general word for this\",\n \"jumpers\",\"other\",\"runners\",\"running shoes\",\"sand shoes\",\n \"shoes\",\"sneakers\",\"tennis shoes\",\"trainers\"],\n \"bug\":[\"basketball bug\",\"centipede\",\"doodle bug\",\n \"I have no idea what this creature is\",\n \"I know what this creature is, but have no word for it\",\n \"millipede\",\"other\",\"pill bug\",\"potato bug\",\"roll-up bug\",\n \"roly poly\",\"sow bug\",\"twiddle bug\",\"wood louse\"],\n \"kitty_corner\":[\"catercorner\",\"catty-corner\",\n \"I can only use \\\"diagonal\\\" for this\",\"I have no term for this\",\n \"kitacorner\",\"kitty-corner\",\"kitty cross\",\"kitty wampus\",\"other\"],\n \"highway\":[\"a freeway has limited access (no stop lights, no intersections), whereas a highway can have stop lights and intersections\",\n \"a freeway is bigger than a highway\",\n \"a freeway is free (i.e., doesn't charge tolls); a highway isn't\",\n \"expressway\",\"freeway\",\"highway\",\"other\",\"parkway\",\n \"throughway/thru-way\",\"turnpike\"],\n \"rain_sun\":[\"fox's wedding\",\"I have no term or expression for this\",\n \"liquid sun\",\"monkey's wedding\",\"other\",\"pineapple rain\",\"sunshower\",\n \"the devil is beating his wife\",\"the wolf is giving birth\"],\n \"frosting\":[\"both\",\"frosting\",\"icing\",\n \"icing is thinner than frosting, white, and/or made of powdered sugar and milk or lemon juice\",\n \"neither\",\"other\"],\n \"side_road\":[\"access road\",\"feeder road\",\"frontage road\",\n \"gateway\",\"I've never heard of this concept\",\"other\",\n \"service road\",\"we have them but I have no word for them\"],\n \"water_fountain\":[\"bubbler\",\"drinking fountain\",\"other\",\"water bubbler\",\n \"water fountain\"],\n \"beverage\":[\"cocola\",\"coke\",\"dope\",\"fizzy drink\",\n \"lemonade\",\"other\",\"pop\",\"soda\",\"soft drink\",\"tonic\"],\n \"rubbernecking\":[\"curiosity delay\",\"gapers' block\",\n \"gapers' delay\",\"gawk block\",\"I have no word for this\",\n \"Lookie Lou\",\"other\",\"rubberneck\",\"rubbernecking\",\n \"rubbernecking is the thing you do, not the traffice jam\"],\n \"halloween\":[\"cabbage night\",\"devil's eve\",\"devil's night\",\n \"gate night\",\"goosy night\",\"I have no word for this\",\n \"mischief night\",\"other\",\"trick night\"],\n \"brew_thru\":[\"beer barn\",\"beverage barn\",\"bootlegger\",\"brew thru\",\n \"I have never heard of such a thing\",\"other\",\"party barn\",\n \"we have these in my area, but we have no special term for them\"]}", "def map_to_homo_nid(self, ids, ntype):\n ...", "def map_to_homo_nid(self, ids, ntype):\n ...", "def show_tree(data):\r\n tree_dict = []\r\n ids = []\r\n for i in data:\r\n d = dict()\r\n if i[0] in ids:\r\n continue\r\n ids.append(i[0])\r\n d[\"id\"] = i[0]\r\n d[\"name\"] = i[1]\r\n d[\"parent_id\"] = i[2]\r\n tree_dict.append(d)\r\n return {\"nodes\": tree_dict}", "def nodeid_to_index(G):\n\n d = {node_id: i for i, node_id in enumerate(G.nodes)}\n\n return d", "def serialize(self):\n out = {\"nodes\":[],\n \"idmap\":{i:x for i,x in enumerate(sorted(self.nodes))}} #map for decoding\n r_idmap = {x:i for i,x in out[\"idmap\"].items()} #map for encoding\n for name, node in self.nodes.items():\n tmp = {\"name\": name,\n \"text\": node.text,\n \"neighbors\": []}\n for rtype, dest in node.outgoing_relations:\n if dest in r_idmap:\n tmp[\"neighbors\"].append([\"relation\", rtype, str(r_idmap[dest])])\n else:\n tmp[\"neighbors\"].append([\"literal\", rtype, dest])\n for atype, attribute in node.attributes:\n tmp[\"neighbors\"].append([\"literal\", atype, attribute])\n out[\"nodes\"].append(tmp)\n return json.dumps(out,sort_keys=True)", "def _build_votes_dict(self, participants):\n # more idiomatic way to do this?\n pvotes = {}\n for p in participants:\n pid = p.vote.id()\n pvcount = pvotes.get(pid, 0)\n pvotes[pid] = pvcount + 1\n logging.info(\"in _build_votes_dict, got pvotes: %s\", pvotes)\n return pvotes", "def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1", "def results(self) -> Dict[str, Any]:\n return self.nodes", "def get_winners():\n\n context = {}\n\n # url\n context[\"url\"] = flask.request.path\n context['winners'] = []\n\n # Database\n db = quiplash.model.get_db()\n\n # Grab all the votes\n cur = db.execute(\"SELECT * FROM votes\",)\n votes = cur.fetchall()\n\n # Group votes by question id\n questionsToVotes = {}\n for vote in votes:\n if vote['questionid'] not in questionsToVotes:\n questionsToVotes[vote['questionid']] = []\n\n questionsToVotes[vote['questionid']].append(vote)\n\n playersAnswered = {}\n for row in db.cursor().execute(\"SELECT * FROM questions\"):\n newQuestion = {}\n newQuestion['question'] = row['question']\n newQuestion['questionid'] = row['questionid']\n\n # Grab all the votes\n votesForQ = []\n if row['questionid'] in questionsToVotes:\n votesForQ = questionsToVotes[row['questionid']]\n\n # Grab players who answered these questions\n cur = db.execute(\n (\"SELECT * FROM players WHERE name = \\'%s\\'\") % (row['name1']),)\n playerA = cur.fetchone()\n cur = db.execute(\n (\"SELECT * FROM players WHERE name = \\'%s\\'\") % (row['name2']),)\n playerB = cur.fetchone()\n\n # Get votes for each player\n newQuestion['votesA'] = []\n newQuestion['votesB'] = []\n for vote in votesForQ:\n if playerA['playerid'] == vote['playerid']:\n newQuestion['votesA'].append(vote['name'])\n else:\n newQuestion['votesB'].append(vote['name'])\n\n # Determine winners\n if len(newQuestion['votesA']) > len(newQuestion['votesB']):\n newQuestion['winnerName'] = playerA['name']\n newQuestion['loserName'] = playerB['name']\n else:\n newQuestion['winnerName'] = playerB['name']\n newQuestion['loserName'] = playerA['name']\n\n # Grab player's answer\n newQuestion[\"ansA\"] = playerA['ans1'] if row[\n 'name1'] not in playersAnswered else playerA['ans2']\n newQuestion[\"ansB\"] = playerB['ans1'] if row[\n 'name2'] not in playersAnswered else playerB['ans2']\n\n playersAnswered[row['name1']] = True\n playersAnswered[row['name2']] = True\n\n context['winners'].append(newQuestion)\n\n return flask.jsonify(**context)", "def visit_node_face(self, node, children):\n nface = {k:v[0] for k,v in children.results.items()}\n return nface", "def make_complete_graph(num_nodes):\n\tif num_nodes <= 0:\n\t\treturn {}\n\tdict_graph = {}\n\tfor node in range(num_nodes):\n\t\tnode_set = set()\n\t\tfor neighbor in range(num_nodes):\n\t\t\tif node != neighbor:\n\t\t\t\tnode_set.add(neighbor)\n\t\tdict_graph[node] = node_set\n\n\treturn dict_graph", "def get_node_b(name, taxid, pathway, topology, psi_mi_to_sql_object):\n\n # Testing if the node is already in the database\n node_dict = psi_mi_to_sql_object.get_node(name, node_tax_id=taxid)\n\n if not node_dict:\n node_dict = {\n \"name\": 'Uniprot:' + name,\n \"tax_id\": taxid,\n \"alt_accession\": None,\n 'pathways': pathway,\n \"aliases\": None,\n \"topology\": topology\n }\n\n return node_dict", "def linear(comments):\r\n\r\n return {'root': comments}", "def associate_node_id(tr, node=\"\"):\n return {\"id\": tr.get_uml_id(name=node)}", "def make_complete_graph(num_nodes):\n\tif num_nodes <= 0:\n\t\treturn {}\n\telse:\n\t\tdict_graph = {}\n\t\tfor node in range(num_nodes):\n\t\t\tnode_set = set()\n\t\t\tfor neighbor in range(num_nodes):\n\t\t\t\tif node != neighbor:\n\t\t\t\t\tnode_set.add(neighbor)\n\t\t\tdict_graph[node] = node_set\n\n\treturn dict_graph", "def dict_list(query_id, qrels):\n rel_list = []\n for query_dict in qrels:\n if int(query_dict['query_num']) == query_id:\n rel_list.append(query_dict)\n\n return rel_list", "def __init__(self, adjacency={}):\r\n self.d = dict(adjacency)", "def make_complete_graph(num_nodes):\r\n if num_nodes < 1:\r\n return dict()\r\n else:\r\n new_dict = dict()\r\n for node in range(num_nodes):\r\n other_nodes = range(num_nodes)\r\n other_nodes.pop(node)\r\n new_dict[node]=set(other_nodes)\r\n return new_dict", "def get_descendant_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def _get_answer_map(self):\r\n answer_map = {}\r\n for inputfield in self.inputfields:\r\n correct_option = self._find_option_with_choice(\r\n inputfield, 'correct')\r\n if correct_option is not None:\r\n input_id = inputfield.get('id')\r\n answer_map[input_id] = correct_option.get('description')\r\n return answer_map", "def get_answers(self):\r\n if len(self.answer_ids) > 1:\r\n return self.default_answer_map\r\n if self.expect:\r\n return {self.answer_ids[0]: self.expect}\r\n return self.default_answer_map", "def load_word2index(self):\n word2index = {}\n with open(self.nodes_file, 'r') as reader:\n for index, line in enumerate(reader):\n node = line.strip()\n word2index[node] = index\n\n return word2index", "def get_child_2_parent_dict(direct=False, type_=None, verbose=False):\n select_statement = \"SELECT ontologies.child, ontologies.parent FROM ontologies\"\n extend_stmt = \"\"\n if type_ is not None:\n extend_stmt += \" WHERE ontologies.type='{}'\".format(type_)\n if direct:\n extend_stmt += \" AND ontologies.direct=TRUE\"\n else:\n if direct:\n extend_stmt += \" WHERE ontologies.direct=TRUE\"\n\n sql_statement = select_statement + extend_stmt + \";\"\n result = get_results_of_statement(sql_statement)\n child_2_parent_dict = {}\n if verbose:\n print(sql_statement)\n print(\"Number of rows fetched: \", len(result))\n for res in result:\n child, parent = res\n if child not in child_2_parent_dict:\n child_2_parent_dict[child] = {parent}\n else:\n child_2_parent_dict[child].update([parent])\n return child_2_parent_dict" ]
[ "0.5874153", "0.5803899", "0.5788026", "0.5769955", "0.56848216", "0.56081706", "0.5563815", "0.55347794", "0.5533629", "0.55147934", "0.5420318", "0.53809404", "0.53226113", "0.53205335", "0.52443105", "0.52339405", "0.5148043", "0.5130552", "0.5121005", "0.5114256", "0.5098566", "0.50605094", "0.5052158", "0.50452065", "0.5038137", "0.50350416", "0.50346714", "0.5018171", "0.501794", "0.5008823", "0.5008426", "0.4998185", "0.4992468", "0.498238", "0.49822018", "0.49782777", "0.49704245", "0.4969099", "0.49666142", "0.49609345", "0.49580103", "0.49517098", "0.49497873", "0.49489102", "0.49380758", "0.49366164", "0.492869", "0.492433", "0.49219444", "0.49143198", "0.49105567", "0.49039835", "0.4903906", "0.4899413", "0.48845312", "0.48839", "0.48720554", "0.48690715", "0.48659235", "0.48577556", "0.4854529", "0.4851783", "0.48417318", "0.48398897", "0.48342043", "0.4832386", "0.4815972", "0.4810546", "0.4809791", "0.4806211", "0.4805551", "0.4802605", "0.48010892", "0.4800954", "0.47915953", "0.47915807", "0.47902632", "0.47866136", "0.47866136", "0.47840908", "0.47837344", "0.477715", "0.47697356", "0.4765937", "0.47580883", "0.47579518", "0.47565478", "0.47522768", "0.4750918", "0.4741651", "0.4740543", "0.47341207", "0.4733697", "0.4729617", "0.4727961", "0.47271037", "0.4718349", "0.47180858", "0.47163865", "0.4716198" ]
0.60742897
0
Runs through all of the nodes in the json responses storing the intersection and set differences into a dictonary organized by tuples of node ids or the tuple (1, 1) for all nodes.
def node_diff(self): if self.input1 is None or self.input2 is None: raise Exception("Missing input: please run the populate() method first") if self.node_dict1 is None or self.node_dict2 is None: self.make_node_dict() # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2 g1={} g2={} # Set to keep track of the union of all curie ids curie_set = set() for curie in self.node_dict1.keys(): g1[curie] = {} # intersection is only in the g1 dictionary g1[curie]['intersection'] = set() # node section keeps track of node ids associated with each node i.e. "n0" g1[curie]['node'] = set() curie_set.add(curie) for curie in self.node_dict2.keys(): g2[curie] = {} # node section keeps track of node ids associated with each node i.e. "n0" g2[curie]['node'] = set() curie_set.add(curie) node_names1 = [] node_names2 = [] # extract all node ids (i.e. "n0","n1",ect...) if len(self.input1['question_graph']['nodes'])>0: if 'id' in self.input1['question_graph']['nodes'][0]: node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']] elif 'node_id' in self.input1['question_graph']['nodes'][0]: node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']] if len(self.input2['question_graph']['nodes'])>0: if 'id' in self.input2['question_graph']['nodes'][0]: node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']] elif 'node_id' in self.input2['question_graph']['nodes'][0]: node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']] # initialize the result dictonary diff_dict = {} diff_dict["-1|-1"] = {'intersection':[],'g1-g2':[],'g2-g1':[]} # initialize node id tuple keys for id1 in node_names1: for id2 in node_names2: diff_dict[id1+"|"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]} # iterate through answers for answer1 in self.input1['answers']: for answer2 in self.input2['answers']: for id1 in answer1['node_bindings'].keys(): # This is to handle cases where answer node id has a list or a string if isinstance(answer1['node_bindings'][id1], str): bindings1 = [answer1['node_bindings'][id1]] elif isinstance(answer1['node_bindings'][id1], list): bindings1 = answer1['node_bindings'][id1] for curie1 in bindings1: # store node id g1[curie1]['node'].add(id1) for id2 in answer2['node_bindings'].keys(): # This is to handle cases where answer node id has a list or a string if isinstance(answer2['node_bindings'][id2], str): bindings2 = [answer2['node_bindings'][id2]] elif isinstance(answer2['node_bindings'][id2], list): bindings2 = answer2['node_bindings'][id2] for curie2 in bindings2: # store node id g2[curie2]['node'].add(id2) if curie1 == curie2: # stor intersection tuple g1[curie1]['intersection'].add(id1+"|"+id2) # iterate through all curies for curie in curie_set: # check if curie is from answer 1 if curie in g1.keys(): # check if in intersection if len(g1[curie]['intersection'])>0: diff_dict["-1|-1"]['intersection'] += [self.node_dict1[curie]] for id1 in node_names1: for id2 in node_names2: node_tuple = id1+"|"+id2 if id1 in g1[curie]['node'] and id2 in g2[curie]['node']: diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]] elif id1 in g1[curie]['node']: diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]] elif id2 in g2[curie]['node']: diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]] # If not in intersection store in g1-g2 else: diff_dict["-1|-1"]['g1-g2'] += [self.node_dict1[curie]] for id1 in g1[curie]['node']: # iterate through all answer 2 ids for id2 in node_names2: diff_dict[id1+"|"+id2]['g1-g2'] += [self.node_dict1[curie]] # if not in g1 but in g2 then in g2-g1 elif curie in g2.keys(): diff_dict["-1|-1"]['g2-g1'] += [self.node_dict2[curie]] for id2 in g2[curie]['node']: # iterate through all answer 1 ids for id1 in node_names1: diff_dict[id1+"|"+id2]['g2-g1'] += [self.node_dict2[curie]] return diff_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_sets_intersection_test(self, data):\n\n data_info = self.get_data_info(data)\n finished = []\n\n for part in data:\n for union_part in data:\n if part != union_part and union_part not in finished:\n data[part].intersection(data[union_part])\n finished.append(part)\n\n return data_info", "def dict_un_lists_intersection_test(self, data):\n\n data_info = self.get_data_info(data)\n finished = []\n\n for part in data:\n for union_part in data:\n union = []\n if part != union_part and union_part not in finished:\n for node in data[part]:\n if node in data[union_part]:\n union.append(node)\n finished.append(part)\n\n return data_info", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def intersect(self, rays): \n result = {}\n \n if bool(self._merged):\n result[\"x\"], result[\"y\"], result[\"z\"], result[\"valid\"], result[\"ray_u\"], \\\n result[\"trig_u\"], result[\"trig_v\"], result[\"gather_ray\"], \\\n result[\"gather_trig\"] = self._intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"z_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n rays[\"z_end\"],\n self._merged[\"xp\"],\n self._merged[\"yp\"],\n self._merged[\"zp\"],\n self._merged[\"x1\"],\n self._merged[\"y1\"],\n self._merged[\"z1\"],\n self._merged[\"x2\"],\n self._merged[\"y2\"],\n self._merged[\"z2\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n \n result[\"norm\"] = tf.gather(\n self._merged[\"norm\"],\n result[\"gather_trig\"]\n )\n \n return result", "def test_intersection_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_intersection_of.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0000082__GO:0044843__\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000082\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0044843\")\n self.assertEqual(entry[\"intersection_type\"], \"\")\n if entry[\"id\"] == \"GO:0000082__GO:0000278__part_of\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000082\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0000278\")\n self.assertEqual(entry[\"intersection_type\"], \"part_of\")", "def test_intersection(self, client):\n\n expected = {\n 'a': [0,2,4,6,8],\n 'b': [4,6,8,10,12,14,16],\n 'result': [4,6,8]\n }\n\n res = client.post('/api/v1/intersection', json={'a': expected['a'], 'b': expected['b'] })\n assert res.status_code == 200\n assert res.json['data'] == expected['result']\n assert res.json['status'] == 2000", "def merge_duplicate_nodes(self):\n merges={}\n xys={}\n for n in self.valid_node_iter():\n k=tuple(self.nodes['x'][n])\n if k in xys:\n merges[n]=xys[k]\n self.merge_nodes(xys[k],n)\n else:\n xys[k]=n\n return merges", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set", "def alldiff():\n res = {'Computation-alldiff-0': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 1, 'z': 4},\n 'Results': {'f1': 15, 'f2': 51}},\n 'Computation-alldiff-1': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 1, 'z': 4},\n 'Results': {'f1': 16, 'f2': 61}},\n 'Computation-alldiff-2': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 2, 'z': 4},\n 'Results': {'f1': 25, 'f2': 52}},\n 'Computation-alldiff-3': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 2, 'z': 4},\n 'Results': {'f1': 26, 'f2': 62}},\n 'Computation-alldiff-4': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 3, 'z': 4},\n 'Results': {'f1': 35, 'f2': 53}},\n 'Computation-alldiff-5': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 3, 'z': 4},\n 'Results': {'f1': 36, 'f2': 63}}}\n\n # Notice the ordering\n domain = {'x':[\"1\", \"2\", \"3\"], 'w':[\"5\", \"6\"]}\n metadata = {'z':\"4\"}\n parameters = [\"x\", \"w\"]\n parameters.sort()\n metrics = [\"f1\", \"f2\"]\n metrics.sort()\n exp_name = \"alldiff\"\n return exp_name, metadata, parameters, domain, metrics, res", "def all_in_edges_of_node(self, id1: int) -> dict:\n if id1 in self.Nodes:\n ans = {}\n for i, j in self.Edges.items():\n if id1 in j:\n ans[i] = j[id1]\n return ans\n return {}", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def _get_intersections():\n with _get_mongo_client() as client:\n coll = client[mongo_database]['locations']\n return coll.find({'intersection_number': {'$exists': True}}, {'_id': False})", "def differentNodesForNode(ntupleSet,nodeList,verbose=False):\n nodesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n for nodeInTuple in ntuple:\n nodesPerNode[nodeInTuple].extend(ntuple)\n \n for a,v in nodesPerNode.iteritems():\n nodesPerNode[a] = set(v)\n \n return nodesPerNode", "def results(self) -> Dict[str, Any]:\n return self.nodes", "def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes", "def process_data(self, json_dict: dict):\n all_token_ids = []\n all_level_ids = []\n all_synset_ids = []\n all_lemma_ids = []\n all_is_highway = []\n all_targets = []\n\n def tokenize(lemma_):\n return self.tokenizer(\n lemma_,\n add_special_tokens=False,\n truncation=True,\n is_split_into_words=True,\n return_token_type_ids=False,\n ).input_ids\n\n def add_lemma(lemma_, abs_level_, synset_id_, is_highway_):\n lemma_token_ids = tokenize([lemma_])\n n_tokens_ = len(lemma_token_ids)\n token_ids.extend(lemma_token_ids)\n level_ids.extend([self.level_to_id[abs_level_]] * n_tokens_)\n synset_ids.extend([synset_id_] * n_tokens_)\n lemma_ids.extend([lemma_ids[-1] + 1] * n_tokens_)\n is_highway.extend([is_highway_] * n_tokens_)\n\n # Go through all JSON entries\n for synset in tqdm(json_dict.values()):\n token_ids = []\n level_ids = []\n synset_ids = [0]\n lemma_ids = [0]\n is_highway = []\n\n lemmas = [l.replace(\"_\", \" \") for l in synset[\"lemmas\"]]\n abs_level = (\"current\", \"current\")\n\n # Save all lemmas of the current node\n synset_token_ids = self.tokenizer.batch_encode_plus(lemmas,\n add_special_tokens=False,\n return_token_type_ids=False).input_ids\n all_targets.append(synset_token_ids)\n\n for level in (\"hypernyms\", \"hyponyms\"):\n for sub_synset in synset[level].values():\n if \"lemmas\" in sub_synset:\n lemmas = [l.replace(\"_\", \" \") for l in sub_synset[\"lemmas\"]]\n abs_level = (level, \"current\")\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n for sub_level in (\"hypernyms\", \"hyponyms\"):\n for sub_sub_lemmas in sub_synset[sub_level].values():\n lemmas = [l.replace(\"_\", \" \") for l in sub_sub_lemmas]\n abs_level = (level, sub_level)\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n # Append the global lists\n all_token_ids.append(token_ids)\n all_level_ids.append(level_ids)\n all_synset_ids.append(synset_ids[1:])\n all_lemma_ids.append(lemma_ids[1:])\n all_is_highway.append(is_highway)\n\n data = (\n all_token_ids,\n all_level_ids,\n all_synset_ids,\n all_lemma_ids,\n all_is_highway,\n all_targets\n )\n\n return data", "def transform_response_for_loading(response, schema, test_execute_start_time=None):\n if not test_execute_start_time:\n test_execute_start_time = datetime.utcnow()\n\n # flatten the dictionaries and add to flats list\n flats: list = list()\n nodes: list = nl('nodes', response)\n node_list: list = nodes[0]\n for unique_node in node_list:\n\n flat: dict = dict()\n v2_schema = [i[0] for i in schema]\n for key in v2_schema:\n flat.setdefault(key, '')\n flat['node_updateTimestamp'] = unique_node['updateTimestamp']\n flat['node_id'] = unique_node['id']\n\n dimensions: dict = dict()\n try:\n dimensions = unique_node['staticData']['dimensions']\n except KeyError:\n pass # handle with check for value below\n\n if dimensions:\n for key, value in dimensions.items():\n if not value:\n value = ''\n flat[key] = value\n\n # vessel in node\n vessel: dict = unique_node['staticData']\n for k, v in vessel.items():\n if k == \"updateTimestamp\":\n flat[\"staticData_updateTimestamp\"] = v\n elif k == 'timestamp':\n flat['staticData_timestamp'] = v\n else:\n if not k == 'dimensions':\n if not v:\n v = ''\n flat[k] = v\n\n # lastPositionUpdate in node\n lastPositionUpdate: dict = dict()\n try:\n lastPositionUpdate: dict = unique_node['lastPositionUpdate']\n except BaseException as e:\n logger.error(e)\n logger.error(\"Could be there is no lastPositionUpdate\")\n\n if lastPositionUpdate:\n for k, v in lastPositionUpdate.items():\n if k == \"updateTimestamp\":\n flat[\"lastPositionUpdate_updateTimestamp\"] = v\n elif k == 'timestamp':\n flat['lastPositionUpdate_timestamp'] = v\n else:\n if not v:\n v = ''\n flat[k] = v\n\n # currentVoyage in node\n currentVoyage: dict = dict()\n try:\n currentVoyage = unique_node['currentVoyage']\n except BaseException as e:\n logger.error(e)\n logger.error(\"Could be there is no currentVoyage\")\n if currentVoyage:\n for k, v in currentVoyage.items():\n if k == \"updateTimestamp\":\n flat['currentVoyage_updateTimestamp'] = v\n elif k == 'timestamp':\n flat['currentVoyage_timestamp'] = v\n elif k == 'matchedPort':\n try:\n flat['matchedPort_matchScore'] = currentVoyage['matchedPort']['matchScore']\n except (KeyError, TypeError):\n continue\n\n port: dict = dict()\n try:\n port = currentVoyage['port']\n except (KeyError, TypeError):\n continue\n centerPoint: dict = dict()\n try:\n centerPoint = port['centerPoint']\n\n except (KeyError, TypeError):\n continue\n if centerPoint:\n flat['port_name'] = centerPoint['matchedPort']['name']\n flat['port_unlocode'] = centerPoint['matchedPort']['unlocode']\n latitude = centerPoint['latitude']\n longitude = centerPoint['longitude']\n flat['matchedPort_latitude'] = latitude\n flat['matchedPort_long'] = longitude\n else:\n if not v:\n v = ''\n flat[k] = v\n try:\n # in case somehow these got into the dictionary\n del flat['dimensions']\n del flat['currentVoyage']\n del flat['matchedPort']\n except KeyError:\n pass\n flats.append(flat)\n return flats", "def get_nodes(wf_results):\n return {node.fullname: node for node in wf_results.nodes}", "def get_incident_nodes(self):\n # return the set of incident edges\n return \\\n {\n self.first_incident_node,\n self.second_incident_node\n }", "def get_structs(self, ignore=\"^(rel_|frame_)\"):\n triples = list(self.get_triples())\n # get nodes per predicate and roles per nodes\n predicates = {} # node : predicate (set of nodes)\n roles = defaultdict(list) # subject_node : [rel]\n nodes = {} # id : node\n for s,p,o in triples:\n sid, oid = int(s.id), int(o.id)\n nodes[sid] = s\n nodes[oid] = o\n if p == \"pred\":\n pred =(predicates.get(sid, {sid}) |\n predicates.get(oid, {oid}))\n for node in pred:\n predicates[node] = pred\n elif not re.match(ignore, p):\n if sid not in predicates:\n predicates[sid] = {sid}\n roles[sid].append((p, oid))\n # output a dict per predicate with nodes per role\n for pnodes in set(map(tuple, predicates.values())):\n pid = sorted(pnodes)[0]\n result = defaultdict(list) # list of nodes per role\n for node in pnodes:\n result['predicate'].append(nodes[node])\n for p, oid in roles[node]:\n node_ids = self.get_descendants(oid, triples, ignore=ignore)\n result[p] += [nodes[n] for n in node_ids]\n yield dict(result.iteritems()) # convert to regular dict", "def get_common():\n body: t.Any = request.json\n check_error({'input': {'first': {}, 'second': {}}}, body)\n response_first = rpc_search({'input': body['input']['first']})\n response_second = rpc_search({'input': body['input']['second']})\n\n modules_first = response_first['yang-catalog:modules']['module']\n modules_second = response_second['yang-catalog:modules']['module']\n\n if len(modules_first) == 0 or len(modules_second) == 0:\n abort(404, description='No hits found either in first or second input')\n\n output_modules_list = []\n names = []\n for mod_first in modules_first:\n for mod_second in modules_second:\n if mod_first['name'] == mod_second['name']:\n if mod_first['name'] not in names:\n names.append(mod_first['name'])\n output_modules_list.append(mod_first)\n if len(output_modules_list) == 0:\n abort(404, description='No common modules found within provided input')\n return {'output': output_modules_list}", "def all_ids(self) -> Set[int]:\n return {node_id for _, (node_id, _) in self.nodes.items()}", "def get_hits(nodes: Dict[int, PhyloNode], rank: str, taxids: List[int]) -> Dict[int, int]:\n\n hits = {}\n for taxid in taxids:\n if taxid not in nodes:\n continue\n hit = get_ancestor_of_rank(nodes[taxid], rank)\n # pigeonhole ancestors of taxons\n if not hit:\n continue\n if hit in hits:\n hits[hit] += 1\n else:\n hits[hit] = 1\n return hits", "def common_peers(self, i, j):\n ir = self.get(i, self.router.network)\n jr = self.get(j, self.router.network)\n \n if not ir or not jr:\n return []\n\n ir = [tuple(p['node']) for p in ir if p['transactions']]\n jr = [tuple(p['node']) for p in jr if p['transactions']]\n\n result = list(set(ir).intersection(jr))\n log(\"cmn: %s %s %i: %s\" % (i, j, len(result), result))\n return result", "def dict_get_nodekeys_recursive(d):\n nodekeys = set(d.keys())\n for nk in nodekeys:\n # print \"nodekey\", nk\n # print \"graphkeys\", d[nk]['params'].keys()\n if 'graph' in d[nk]['params']:\n # print \"graphkeys\", d[nk]['params']['graph'].keys()\n nodekeys = nodekeys.union(dict_get_nodekeys_recursive(d[nk]['params']['graph']))\n return nodekeys", "def intersection(llist_1, llist_2):\n hashmap = {}\n return_linked_list = LinkedList()\n node = llist_1.get_head()\n while node:\n hashmap[node.get_value()] = 0\n node = node.get_next()\n node = llist_2.get_head()\n while node:\n if node.get_value() in hashmap:\n if hashmap[node.get_value()] == 1:\n node= node.get_next()\n continue\n\n return_linked_list.append(node.get_value())\n hashmap[node.get_value()] = 1\n node = node.get_next()\n if return_linked_list.size() == 0:\n return 'No intersections found'\n return return_linked_list", "def getNodesAndDistances():\n\n\tglobal width, height\n\n\t# First we generate the list\n\n\tprint \"\\tGetting node list...\"\n\t\n\tnodeDict = {}\n\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttheType = getSquare(x, y)\n\n\t\t\tprint \"\\t\\tGetting list for node (%d, %d) of type %d...\" % (x, y, theType)\n\n\t\t\ttempList = getNodeList(x, y, theType)\n\n\t\t\tif tempList == []:\n\t\t\t\tprint \"\\t\\t\\tNo nodes here.\"\n\t\t\telse:\n\t\t\t\tfor i in range(len(tempList)):\n\t\t\t\t\tnode = tempList[i]\n\t\t\t\t\tnodeName = node[0]\n\t\t\t\t\tnodeDict[nodeName] = node[1:]\t# Everything but the first element\n\t\t\t\t\tprint \"\\t\\t\\tAdded node '%s'...\" % nodeName\n\n\tprint \"\\tDone getting node list (%d nodes)...\" % (len(nodeDict.keys()))\n\tprint \"\"\n\n\t# Now that we've got that, we get a list of pairs\n\n\tpairList = getPairList(nodeDict)\n\n\t# Now we calculate the distance between every pair of nodes that connect\n\n\tprint \"\"\n\tprint \"\\tCreateing dictionary of distances between connected nodes...\"\n\n\tdistanceDict = {}\n\n\tfor tuple in pairList:\n\t\t(nodeA, nodeB) = tuple\n\t\tprint \"\\t\\tCalculating distance between '%s' and '%s'...\" % (nodeA, nodeB)\n\t\tdistance = distanceBetween(nodeA, nodeB, nodeDict)\n\t\tpairName = \"%s%s\" % (nodeA, nodeB)\n\t\tdistanceDict[pairName] = distance\n\t\tprint \"\\t\\t\\tDistace was %f.\" % (distance)\n\n\tprint \"\\tDone creating dictionary of node differences (%d pairs).\" % (len(distanceDict.keys()))\n\n\treturn nodeDict, distanceDict", "def intersection(arrays):\n # Your code here\n hash = {}\n hash2 = {}\n for i in range(len(arrays[0])):\n hash[arrays[0][i]] = i\n\n for key in hash:\n if key in arrays[1]:\n hash2[key] = hash[key]\n print(hash2)\n \n for i in range(2, len(arrays)):\n for key in hash2:\n if key not in arrays[i]:\n hash2[key] = None\n\n list1 = [key for key in hash2 if hash2[key] != None] \n result = list1\n\n return result", "def get_answers(self):\r\n answers = {}\r\n for ielt in self.ielements:\r\n ie_id = ielt.get('id')\r\n answers[ie_id] = {'rectangle': ielt.get('rectangle'), 'regions': ielt.get('regions')}\r\n\r\n return answers", "def diff():\n return {\n '_meta': {\n 'civicpy_version': civicpy_version,\n 'metakb_version': __version__,\n 'date_harvested': date.today().strftime('%Y%m%d')\n },\n 'genes': {\n 'DELETE': [\n {\n \"id\": 3,\n \"name\": \"test_remove\"\n }\n ],\n 'INSERT': [],\n 'UPDATE': [\n {\n '2778': {\n 'aliases': {\n '$insert': [\n (1, 'MIF2')\n ]\n }\n\n }\n }\n ]\n },\n 'variants': {\n 'DELETE': [],\n 'INSERT': [],\n 'UPDATE': [\n {\n '27': {\n '$delete': ['entrez_name']\n }\n }\n ]\n },\n 'assertions': {\n 'DELETE': [],\n 'INSERT': [\n {\n \"id\": 1,\n \"description\": \"description\"\n }\n ],\n 'UPDATE': []\n },\n 'evidence': {\n 'INSERT': [],\n 'DELETE': [],\n 'UPDATE': [\n {\n \"358\": {\"variant_origin\": \"Somatic\"}\n }\n ]\n }\n }", "def intersect_categories_reduced(categories, set_nodes):\r\n\tcategories_reduced = {}\r\n\tfor key, values in categories.items():\r\n\t aux_list = set_nodes.intersection(values)\r\n\t if(len(aux_list) > 3500): # Each category must have more than 3500 nodes\r\n\t categories_reduced[key] = aux_list \r\n\treturn categories_reduced", "def _unconvert_hashring_nodes(self, hashring_nodes):\n results = []\n for service_node in hashring_nodes or []:\n node_data = {\n \"service_info\": service_node.service_info.to_json(),\n \"data\": service_node.data\n }\n node = self.hashring_watch.HashringNode(\n token=service_node.token,\n data=json.dumps(node_data))\n results.append(node)\n return results", "def data():\n result = {}\n for thread in DATA.threads:\n result[thread] = [formatNode(node) for node in DATA.threads[thread].tree]\n return json.dumps({\n 'checkpoints': DATA.checkpoints,\n 'threads': result\n })", "def ans():\n ret = {}\n for i in range(12):\n ret[ind[i]] = ans2[ind[i]]\n ret['id']=\"id\"\n return jsonify(ret)", "def flatten(orig):\n\n\t# Empty dictionary\n\tdata = {}\n\tfor c in orig['tree']['children']:\n\t\t# in operator\n\t\tif 'children' in c:\n\t\t\tfor c2 in c['children']:\n\t\t\t\tif 'children' in c2:\n\t\t\t\t\tfor c3 in c2['children']:\n\t\t\t\t\t\tif 'children' in c3:\n\t\t\t\t\t\t\tfor c4 in c3['children']:\n\t\t\t\t\t\t\t\tif (c4['category'] == 'personality'):\n\t\t\t\t\t\t\t\t\tdata[c4['id']] = c4['percentage']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif (c3['category'] == 'personality'):\n\t\t\t\t\t\t\t\tdata[c3['id']] = c3['percentage']\n\n\treturn data", "def _convert_hashring_nodes(self, hashring_nodes):\n results = []\n for node in hashring_nodes or []:\n node_data = json.loads(node.data)\n service_info = ServiceInfo.from_json(node_data[\"service_info\"])\n service_node = ServiceHashringNode(\n token=node.token,\n service_info = service_info, \n data=node_data[\"data\"])\n results.append(service_node)\n return results", "def get_graph_dictionary(self):\n nodes = {}\n n = 0\n for node in self.__nodes:\n nodes[n] = tuple(node.get_data())\n n += 1\n\n edges = set()\n for edge in self.__edges:\n new_edge = (edge.get_node_a().get_id(), edge.get_node_b().get_id())\n edges.add(new_edge)\n\n graph_dict = {}\n graph_dict[\"nodes\"] = nodes\n graph_dict[\"edges\"] = edges\n\n return graph_dict", "def get_nodes_dict(self):\n return self._nodes_dict.copy()", "def to_dict(self):\n result = {\"document_name\": self.root_node()[\"meta\"], \"nodes\": []}\n for node, data in self.traverse():\n successors = list(self.successors(node))\n predecessors = list(self.predecessors(node))\n result[\"nodes\"].append(\n {\"key\": node, \"content\": data, \"successors\": successors, \"predecessors\": predecessors})\n return result", "def main():\n users = {}\n\n with open('../the_data.json', 'r') as f:\n the_data = json.loads(f.read())\n\n user_data = the_data['data']\n the_user_ids = json.loads(the_data['user_ids_list'])\n \n def p(vector):\n vector = json.loads(vector)\n return {field: vector[i] for i,field in enumerate(the_data['vector_fields'])}\n\n result = {}\n for step in range(1, 20):\n step = str(step)\n\n users = {}\n the_user_ids_for_this_step = []\n for uid in the_user_ids:\n try:\n users[uid] = p(user_data[uid][step])\n the_user_ids_for_this_step.append(uid) \n except:\n pass\n\n for user_id in the_user_ids_for_this_step: \n nearest = computeNearestNeighbor(user_id,\n users,\n distance_algorithm='minkowski')\n # print user_id\n if user_id not in result:\n result[user_id] = {}\n\n result[user_id][step] = nearest[:3]\n\n\n\n # print result\n\n for u in result.keys():\n woha = []\n print '%s, step_count: %s' % (u, user_data[u]['step_count'])\n ls = result[u].keys()\n ls.sort()\n for s in ls: \n print s\n for near in result[u][s]:\n if near[1] in woha:\n ulala = '>'*woha.count(near[1])\n else:\n ulala = ''\n woha.append(near[1])\n print '\\t'*int(s), '%s %s, %s, step_count: %s' % (ulala, near[1], near[0], user_data[near[1]]['step_count'])\n\n print", "def __find_union(dicts: list):\n global other_time\n start = time.time()\n union_list = []\n while len(dicts) > 1:\n word_count = {}\n final_dict = {}\n for d in dicts:\n # print(json.dumps(d, indent=4))\n for word in d:\n if word not in word_count:\n word_count[word] = 1\n else:\n word_count[word] = word_count[word] + 1\n for item in word_count:\n if word_count[item] == len(dicts):\n union_list.append(item)\n for d in dicts:\n for word in d:\n if word in union_list:\n if word not in final_dict:\n final_dict[word] = d[word]\n else:\n final_dict[word] += d[word]\n if len(final_dict) > 0:\n # print(len(dicts))\n end = time.time()\n diff = end - start\n other_time += diff\n # print(json.dumps(final_dict, indent=4))\n return (final_dict)\n else:\n\n dicts = dicts[:len(dicts) - 1]\n\n end = time.time()\n diff = end - start\n other_time += diff\n return {}", "def get_clusters(nodes: Dict[int, PhyloNode], hits: Dict[int, int], cluster_degree: int) \\\n -> Dict[PhyloNode, List[int]]:\n\n clusters = {}\n # pigeonhole ancestors of a specific degree of hits\n for taxid in hits.keys():\n cluster = nodes[taxid]\n for _ in range(cluster_degree):\n if cluster.parent is not None:\n cluster = cluster.parent\n if cluster in clusters:\n clusters[cluster].append(taxid)\n else:\n clusters[cluster] = [taxid]\n return clusters", "def _merge_identities(self, request):\n identities = []\n for agent in self.server.agents():\n response = agent.forward_request(request)\n\n for key_blob, key_comment in self._parse_identities(response):\n # Record where each identity came from\n hex_blob = ''.join('{:02x}'.format(b) for b in key_blob)\n if hex_blob in self._identity_map and \\\n self._identity_map[hex_blob] != agent:\n LOG.error(\"identity %s duplicated in %s and %s by %s\",\n hex_blob, agent, self._identity_map[hex_blob],\n self.username)\n\n self._identity_map[hex_blob] = agent\n\n identity = (key_blob, key_comment)\n identities.append(identity)\n\n return self._build_identities_answer(identities)", "def create_intersection_report(self, data=None):\n\n if not data:\n data = self.build_dict_of_sets(self.data_dict)\n\n self.intersection_report_data = []\n finished = []\n\n for part in data:\n for union_part in data:\n if part != union_part and union_part not in finished:\n union_set = data[part].intersection(data[union_part])\n self.intersection_report_data.append([part, union_part, union_set])\n finished.append(part)", "def linkage(self):\n self.tree = {}\n un_linked = []\n for i in range(len(self.leaves)):\n leaf = self.leaves[i]\n un_linked.append({\n 'id': i,\n 'x': 0,\n 'y': 0,\n 'value': 0,\n 'set': leaf,\n 'children': []\n })\n pass\n while len(un_linked) > 1:\n # for i in tqdm(range(len(un_linked))):\n # print(\"Linking... {} nodes left\".format(len(un_linked)))\n for node in un_linked:\n for d in node['set']:\n node['x'] += d['x']\n node['y'] += d['y']\n node['value'] += d['value']\n pass\n node['x'] /= len(node['set'])\n node['y'] /= len(node['set'])\n node['value'] /= len(node['set'])\n pass\n # min_dif = ((un_linked[1]['x'] - un_linked[0]['x']) ** 2 + (un_linked[1]['y'] - un_linked[0]['y']) ** 2) \\\n # * self._alpha + (un_linked[1]['value'] - un_linked[0]['value']) * (1 - self._alpha)\n min_dif = ((un_linked[1]['x'] - un_linked[0]['x']) ** 2 + (un_linked[1]['y'] - un_linked[0]['y']) ** 2)\n min_cp = [0, 1]\n for i in range(len(un_linked) - 1):\n for j in range(i + 1, len(un_linked)):\n # dif = self._alpha * ((un_linked[j]['x'] - un_linked[i]['x']) ** 2\n # + (un_linked[j]['x'] - un_linked[i]['x']) ** 2) \\\n # + (1 - self._alpha) * (un_linked[j]['value'] - un_linked[i]['value'])\n dif = ((un_linked[j]['x'] - un_linked[i]['x']) ** 2\n + (un_linked[j]['x'] - un_linked[i]['x']) ** 2)\n if dif < min_dif:\n min_dif = dif\n min_cp = [i, j]\n pass\n pass\n pass\n set_a = []\n for each in un_linked[min_cp[0]]['set']:\n set_a.append(each)\n pass\n for each in un_linked[min_cp[1]]['set']:\n set_a.append(each)\n pass\n next_un_linked = []\n new_children = []\n if len(un_linked[min_cp[0]]['children']) != 0:\n new_children.append({'children': un_linked[min_cp[0]]['children'],\n 'value': len(un_linked[min_cp[0]]['set'])})\n pass\n else:\n new_children.append({'id': un_linked[min_cp[0]]['id'],\n 'value': len(un_linked[min_cp[0]]['set'])})\n if len(un_linked[min_cp[1]]['children']) != 0:\n new_children.append({'children': un_linked[min_cp[1]]['children'],\n 'value': len(un_linked[min_cp[1]]['set'])})\n pass\n else:\n new_children.append({'id': un_linked[min_cp[1]]['id'],\n 'value': len(un_linked[min_cp[1]]['set'])})\n pass\n next_un_linked.append({\n 'x': 0,\n 'y': 0,\n 'value': 0,\n 'set': set_a,\n 'children': new_children\n })\n del un_linked[min_cp[0]]['set']\n del un_linked[min_cp[0]]['x']\n del un_linked[min_cp[0]]['y']\n # del un_linked[min_cp[0]]['value']\n del un_linked[min_cp[1]]['set']\n del un_linked[min_cp[1]]['x']\n del un_linked[min_cp[1]]['y']\n # del un_linked[min_cp[1]]['value']\n for s in range(len(un_linked)):\n if s not in min_cp:\n next_un_linked.append(un_linked[s])\n pass\n pass\n un_linked = next_un_linked\n pass\n del un_linked[0]['set']\n del un_linked[0]['x']\n del un_linked[0]['y']\n # del un_linked[0]['value']\n self.tree = un_linked[0]\n self._count = 0\n\n self.tree = self._resolve(self.tree)\n return", "def index_nodes(self):\n out = {}\n\n #avg = np.mean(list(self.rtype_vectors.values()),axis=0)\n\n\n #for name, node in self.nodes.items():\n # tmp1 = [self.rtype_vectors[rtype]\n # for rtype, dest in node.outgoing_relations] or [NULL_VEC()]\n # tmp2 = [permute_rtype_vector(self.rtype_vectors[rtype])\n # for rtype, prev in node.incoming_relations] or [NULL_VEC()]\n\n # net = tmp1 + tmp2\n\n # #out[name] = np.asarray(net).mean(axis=0)\n # #out[name] = np.asarray(net).sum(axis=0)\n # v = np.asarray(net).sum(axis=0)\n # if v.any():\n # out[name] = v/max(v)#softmax(v/max(v))\n # else:\n # out[name] = v\n\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n ####normalize everything\n #for r,v in out.items():\n # if v.any():\n # #out[r] = v / sqrt(v.dot(v))\n # out[r] = softmax((v-avg)/maxm)\n\n\n\n # PCA method 0001701\n rmap = self.rtype_vectors\n data = np.zeros((len(self.nodes), JACCARD_DIMENSIONS), dtype=np.float)\n ix = 0\n for node in self.nodes.values():\n\n #compute weighted average of each relation type\n tmp = [rmap[rtype] for \n rtype, dest in node.outgoing_relations] + \\\n [permute_rtype_vector(rmap[rtype]) for \n rtype, prev in node.incoming_relations]\n\n v = np.asarray(tmp).mean(axis=0) if tmp else NULL_VEC()\n\n #normalize\n if v.any():\n data[ix] = v / sqrt(v.dot(v))\n else:\n data[ix] = v\n ix += 1\n\n #eliminate projection onto first 7 principal components\n d2 = data - PCA(data, 7)\n\n #order of nodes is preserved\n for i,v in enumerate(self.nodes):\n out[v] = softmax(d2[i])\n\n return out", "def headsofdiff(h1, h2):\n res = unfi.set(b'heads(%ln %% %ln)', h1, h2)\n return {ctx.node() for ctx in res}", "def get_nodes():\n\n host = str(request.args['host'])\n days = float(request.args['days'])\n\n to_time = int(time.time())\n to_day = int(time.strftime('%Y%m%d', time.gmtime(float(to_time))))\n from_time = to_time-int(days*24*60*60)\n from_day = int(time.strftime('%Y%m%d', time.gmtime(float(from_time))))\n day_in=''\n for x in range(from_day, to_day+1):\n day_in = day_in + ',' + str(x)\n day_in=re.sub(r\"^,\", \"\", day_in)\n day_in=re.sub(r\",$\", \"\", day_in)\n query = \"SELECT * FROM metrics WHERE host='\" + str(host) + \"' and date IN (\"\n query = query + str(day_in) + \") and time>=\" + str(int(int(from_time)*1000)) + \" and time<=\"\n query = query + str(int(int(to_time)*1000)) + \" ALLOW FILTERING\"\n rows = session.execute(query);\n reply={}\n last_value={}\n for r in rows:\n if str(r.host) not in reply:\n reply[r.host]={}\n last_value[r.host]={}\n if str(r.metric) not in reply[r.host]:\n reply[r.host][r.metric]=[]\n last_value[r.host][r.metric]=int(r.value)\n continue\n real_value = (r.value-last_value[r.host][r.metric])/60\n\tlast_value[r.host][r.metric]=int(r.value)\n reply[str(r.host)][r.metric].append({ 'value': int(real_value),\n 'time': str(r.time) })\n return json.dumps(reply)", "def merge_par_results(res):\n nres = {}\n for r in res:\n nres.update(r)\n return nres", "def get_induced_subgraph(graph: Graph, nodes: Nodes):\n return {node: graph[node].intersection(nodes) for node in nodes}", "def parse_json_result(out):\n\n result = json.loads(out)\n \n assert len(result['Call']) > 0\n assert len(result['Call'][0]['Witnesses']) > 0\n \n witness = result['Call'][0]['Witnesses'][0]['Value']\n \n class identitydefaultdict(collections.defaultdict):\n def __missing__(self, key):\n return key\n \n preds = collections.defaultdict(list)\n env = identitydefaultdict()\n \n for atom in witness:\n parsed,dummy = parse_terms(atom)\n preds[parsed[0]['predicate']].append(parsed)\n return preds", "def compute_nodeset(data):\n xset = NodeSet()\n for nodeset in data.split():\n xset.update(nodeset)\n return xset", "def get_master_data():\n data = RAW_RESPONSE.json()\n STATES = set()\n CITIES = set()\n DISTRICTS = set()\n for each in data[\"raw_data\"]:\n\n if each[\"detectedstate\"]!='':\n\n RAW_STATES.add(each[\"detectedstate\"])\n STATES.add(each[\"detectedstate\"].lower().strip().replace(\" \",\"\"))\n if each[\"detecteddistrict\"]!='':\n RAW_DISTRICTS.add(each[\"detecteddistrict\"])\n DISTRICTS.add(each[\"detecteddistrict\"].lower().strip().replace(\" \",\"\"))\n if each[\"detectedcity\"]!='':\n RAW_CITIES.add(each[\"detectedcity\"])\n CITIES.add(each[\"detectedcity\"].lower().strip().replace(\" \",\"\"))\n STATES = list(filter(None, STATES))\n DISTRICTS = list(filter(None, DISTRICTS))\n CITIES = list(filter(None, CITIES))\n\n return STATES, DISTRICTS, CITIES", "def get_feature_differences(self):\n feats = self.get_feature_sets()\n\n # create a dictionary storing the features which are distinct\n diff_dict = dict()\n\n # compare each dataset against each and collect differences\n for i in range(len(feats)):\n for j in range(i + 1, len(feats)):\n # take union from differences\n diff_dict[i, j] = feats[i].difference(feats[j]).union(feats[j].difference(feats[i]))\n\n return diff_dict", "def get_nodes_by_id(ntwrk, nodeid):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if k == nodeid}", "def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break", "def map_uses(self):\n out = {}\n for node in self.nodes.values():\n baddies = set()#track incomplete connections and relegate to attributes\n for rtype, dest in node.outgoing_relations:\n try:\n self.nodes[dest].add_predecessor(rtype, node.name)\n out.setdefault(rtype, set()).add((node.name, dest))\n except KeyError:\n baddies.add((rtype, dest))\n for rtype, dest in baddies:\n node.remove_relation(rtype, dest)\n node.add_attribute(rtype, dest)\n\n atc = node.attributes.copy()\n #check if any attributes have corresponding nodes\n for atype, attrib in atc:\n if attrib in self.nodes:\n node.remove_attribute(atype, attrib)\n node.add_relation(atype, attrib)\n self.nodes[attrib].add_predecessor(atype, node.name)\n out.setdefault(atype, set()).add((node.name, attrib))\n \n return out", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def all_in_edges_of_node(self, id1: int) -> dict:\n return self.edges_in[id1]", "def get_common_ipv4():\n common_ips_set = set() # Set to eliminate duplicates\n try:\n for i in get_data():\n if 'ips' in i and i['serviceArea'] == 'Common' and i[\"expressRoute\"] == True:\n common_ips = i['ips']\n for j in common_ips:\n if type(ipaddress.ip_network(j)) is ipaddress.IPv4Network:\n common_ips_set.add(j)\n common_ips_lst = list(common_ips_set)\n common_ips_dic = {'microsoft365CommonAndOfficeOnlineIPv4': common_ips_lst}\n return get_json(common_ips_dic)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def evaluate_predictions_from_jsons( # TODO: Change name to end2end_evaluate ?\n ground_truth_tree_jsons: List[Dict],\n prediction_tree_jsons: List[Dict],\n node_types: List[str] = None,\n) -> Dict[str, float]:\n node_types = node_types or [\"ml\", \"module\", \"model\"]\n\n ground_truth_trees = TreeNode.read_from_jsons(ground_truth_tree_jsons, [])\n predictions_trees = TreeNode.read_from_jsons(prediction_tree_jsons, [])\n\n node_type_to_percentage_errors = {}\n for node_type in node_types:\n assert node_type in (\"model\", \"module\", \"ml\")\n\n id_to_gold_energy = {}\n for tree in ground_truth_trees:\n for attribute_object in tree.get_subtree_nodes_attributes(\n [node_type], [\"id\", \"gold_energy\"]\n ):\n id_to_gold_energy[attribute_object[\"id\"]] = attribute_object[\n \"gold_energy\"\n ]\n\n id_to_predicted_energy = {}\n for tree in predictions_trees:\n for attribute_object in tree.get_subtree_nodes_attributes(\n [node_type], [\"id\", \"predicted_energy\"]\n ):\n id_to_predicted_energy[attribute_object[\"id\"]] = attribute_object[\n \"predicted_energy\"\n ]\n\n expected_ids = id_to_gold_energy.keys()\n gold_energies = [id_to_gold_energy[id_] for id_ in expected_ids]\n predicted_energies = []\n for id_ in expected_ids:\n predicted_energy = id_to_predicted_energy.get(id_, None)\n\n if not predicted_energy:\n print(\n f\"WARNING: No predicted energy found for node-id {id_}. Force setting 0.\"\n )\n predicted_energy = 0\n\n predicted_energies.append(predicted_energy)\n\n percentage_error = get_percentage_error_list(gold_energies, predicted_energies)\n node_type_to_percentage_errors[node_type] = round(percentage_error, 2)\n\n return node_type_to_percentage_errors", "def jsonify(node) -> Dict:\n return {\n **{\n \"u{0}\".format(i):\n ui.tolist() for (i, ui) in enumerate(node.u)\n },\n **{\n \"observed\": node.observed\n },\n **{\n \"phi{0}\".format(i):\n phii.tolist() for (i, phii) in enumerate(node.phi)\n },\n **{\n \"f\": node.f.tolist(),\n \"g\": node.g.tolist()\n }\n }", "def diff_sets(self):\n self.difference = self.urls_from_json - self.urls_from_datastore", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def extract_hinge_nodes(status, nodes_sign, with_distances=False):\n res = {}\n for v, (_, distance_from_root, nb_incident_hinge_lines, _) in iteritems(status):\n if v in nodes_sign or nb_incident_hinge_lines >= 3:\n res[v] = distance_from_root\n if with_distances:\n return res\n return [_[0] for _ in sorted(iteritems(res), key=lambda x: (x[1], x[0]))]", "def findSameSubtrees(self):\n\n collapsable = {}\n\n for i in range(0, len(list(self.nodes))):\n for j in range(i + 1, len(list(self.nodes))):\n # Be careful, non-zero based indexing here\n if self.isSameTree(self.nodes[i + 1], self.nodes[j + 1]):\n # Note time complexity of isSameTree\n collapsable[self.nodes[i + 1]] = self.nodes[j + 1]\n\n return collapsable", "def __get_merge_nodelist(induced_ordering):\n duplicate = list(induced_ordering) # create a copy of the list containing the induced ordering\n return {duplicate.pop(), duplicate.pop()} # return the vertices corresponding to the s-t cut", "def iter_node_map(self):\n return self.d_inv.keys()", "def match_cluster_sets(cs1, cs2):\n\n matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters]\n mapping = maximized_mapping(matr)\n return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping)", "def aggregate_interference_results(interference_results: List[Dict]) -> Dict:\n if not interference_results:\n return {}\n\n inr_current_scan: DefaultDict = defaultdict(int)\n inr_n_days_scan: DefaultDict = defaultdict(int)\n for result in interference_results:\n network_name = result[\"network_name\"]\n inr_db = result[\"inr_curr_power\"].get(\"snr_avg\")\n if inr_db is None:\n continue\n\n if result[\"is_n_day_avg\"]:\n inr_n_days_scan[(result[\"rx_node\"], result[\"rx_from_node\"])] += pow(\n 10, inr_db / 10\n )\n else:\n inr_current_scan[(result[\"rx_node\"], result[\"rx_from_node\"])] += pow(\n 10, inr_db / 10\n )\n\n aggregated_results: Dict = {\n \"current\": get_link_inr(network_name, inr_current_scan),\n \"n_day_avg\": get_link_inr(network_name, inr_n_days_scan),\n }\n return aggregated_results", "def get_viewpoints(\n data, graphs, feature_reader,\n):\n scan_list = set(item[\"scan\"] for item in data)\n viewpoints = {}\n for scan in scan_list:\n graph_viewpoints = set(graphs[scan].nodes())\n feats_viewpoints = feature_reader.viewpoints[scan]\n viewpoints[scan] = feats_viewpoints.intersection(graph_viewpoints)\n return viewpoints", "def intersection(set_1, set_2):\n intersection_list = []\n\n for number in set_1:\n if number in set_2:\n intersection_list.append(number)\n \n print(\"Intersection:\", intersection_list)\n return set_1, set_2", "def get_nodes(self, uri):\n node = self.get(uri)\n\n _nodes = {}\n for k, v in node.items():\n if isinstance(v, dict):\n _nodes[k] = v\n\n return _nodes", "def test_disjoint_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_disjoint_from.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0044848__GO:0051179__disjoint_from\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0044848\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0051179\")", "def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index", "def get_locations(self):\n self.locations = {} # reset dictionary\n for node in self.extant_p:\n if node.host not in self.locations:\n self.locations.update({node.host: []})\n self.locations[node.host].append(node)", "def to_json(self):\n\n tcluster = {\"clusters\": [], \"matchings\": None}\n if self.matching is not None:\n tcluster[\"matchings\"] = self.matching\n elif self.matched is not None:\n tcluster[\"matchings\"] = self.matched\n\n for tid in self.get_observation_ids():\n ct = self.get_clustering_at(tid)\n partition = {\n \"tid\": tid,\n \"communities\": ct.named_communities,\n \"algorithm\": ct.method_name,\n \"params\": ct.method_parameters,\n \"overlap\": ct.overlap,\n \"coverage\": ct.node_coverage,\n }\n tcluster[\"clusters\"].append(partition)\n\n return json.dumps(tcluster)", "def getIntersections(self):\n\t\treturn self.intersections", "def network_to_dict(self):\n return reduce(lambda x,y: x.update(y) or x, \n [self.downstream(root) for root in self.roots])", "def get_nodes(self):\n self.get_status()\n old_api = self.version[0] <= '3'\n if old_api:\n certs_path = \"%s/certificate_statuses/*\" % (self.environment)\n nodeinfo_path_tpl = \"{env}/node/{node}\"\n else:\n certs_path = \"puppet-ca/v1/certificate_statuses/no_key?environment=%s\" % (self.environment)\n nodeinfo_path_tpl = \"puppet/v3/node/{node}?environment={env}\"\n\n csts = self._send('GET', certs_path)\n nodes_names = []\n for cst in csts:\n nodes_names.append(cst['name'])\n\n all_nodes = []\n for nname in nodes_names:\n path = nodeinfo_path_tpl.format(node=nname, env=self.environment)\n nodeinfo = self._send('GET', path)\n if old_api:\n nodeinfo = self._from_pson(nodeinfo['data'])\n else:\n nodeinfo = self._from_pson(nodeinfo)\n if 'parameters' in nodeinfo:\n node = nodeinfo['parameters']\n if self.onlynodes:\n if not (node.get('hostname') in self.onlynodes or\n node.get('ipaddress') in self.onlynodes or\n node.get('fqdn') in self.onlynodes or\n node.get('uuid') in self.onlynodes):\n continue\n all_nodes.append(node)\n\n return all_nodes", "def merge_results(res1, res2):\n empty = []\n keys = set(res1).union(res2)\n return dict((k, res1.get(k, empty) + res2.get(k, empty)) for k in keys)", "def ConstrDict(raw_data):\n if (path.exists(\"processed_out.txt\") and\n path.exists(\"processed_in.txt\")):\n with open(\"processed_out.txt\") as out:\n global out_edges\n out_edges = pickle.load(out)\n with open(\"processed_in.txt\") as fin:\n global in_edges\n in_edges = pickle.load(fin)\n print len(in_edges.keys())\n with open(\"nodes.txt\") as n:\n global nodes\n nodes = pickle.load(n)\n print \"nodes: \", len(nodes)\n else:\n # read each line and construct a dictionary to store\n # sources and destinations\n for line in raw_data: \n splitted_line = line.split()\n # source is the first element in a line, the rest of elements\n # are destinations\n threshold = 10000\n src, dests = splitted_line[0], splitted_line[1:threshold]\n # if src is not in the dictionary, create a key-value pair for\n # this src\n out_edges.setdefault(src, set())\n\n # put all destinations into the list of the corresponding src\n out_edges[src].update(set(dests))\n\n # construct a set to store all nodes appearing\n nodes.add(src)\n nodes.update(set(dests))\n\n # create the list of inedges for each node\n for i in out_edges[src]:\n in_edges.setdefault(i, set())\n in_edges[i].add(src)\n\n nodes = list(nodes)\n # shuffle the order of nodes\n shuffle(nodes)\n\n with open(\"processed_out.txt\", \"wb\") as out:\n pickle.dump(out_edges, out)\n with open(\"processed_in.txt\", \"wb\") as fin:\n pickle.dump(in_edges, fin)\n with open(\"nodes.txt\", \"wb\") as n:\n pickle.dump(nodes, n)\n\n\n # construct edge list\n for src, dests in out_edges.iteritems():\n pairs = [(src, dest) for dest in dests if (src, dest) not in\n exists]\n edges.extend(pairs)", "def nodes(self):\n return set(self.values())", "def diff_json(response_data, assert_data):\n if isinstance(response_data, dict):\n \"\"\" dict format \"\"\"\n for key in assert_data:\n if key not in response_data:\n info = \"โŒ Response data has no key: {}\".format(key)\n print(info)\n AssertInfo.data.append(info)\n for key in response_data:\n if key in assert_data:\n \"\"\" recursion \"\"\"\n diff_json(response_data[key], assert_data[key])\n else:\n info = \"๐Ÿ’ก Assert data has not key: {}\".format(key)\n print(info)\n elif isinstance(response_data, list):\n \"\"\" list format \"\"\"\n if len(response_data) == 0:\n print(\"response is []\")\n if len(response_data) != len(assert_data):\n print(\"list len: '{}' != '{}'\".format(len(response_data), len(assert_data)))\n\n if isinstance(response_data[0], dict):\n response_data = sorted(response_data, key=lambda x: x[list(response_data[0].keys())[0]])\n else:\n response_data = sorted(response_data)\n if isinstance(assert_data[0], dict):\n assert_data = sorted(assert_data, key=lambda x: x[list(assert_data[0].keys())[0]])\n else:\n assert_data = sorted(assert_data)\n\n for src_list, dst_list in zip(response_data, assert_data):\n \"\"\" recursion \"\"\"\n diff_json(src_list, dst_list)\n else:\n if str(response_data) != str(assert_data):\n info = \"โŒ Value are not equal: {}\".format(response_data)\n print(info)\n AssertInfo.data.append(info)", "def json_flat_diff(a, b):\r\n\r\n res_a = {}\r\n res_b = {}\r\n for key in set(a.keys()).union(set(b.keys())):\r\n a_value = a.get(key)\r\n b_value = b.get(key)\r\n if a_value != b_value:\r\n res_a[key] = a_value\r\n res_b[key] = b_value\r\n # Mind the parentheses below lest you return ({}, None) if res_a is None.\r\n return (res_a, res_b) if res_a != {} else None", "def update_intersections(self, start_node, end_node):\n\n # paths is a list that will contains a list of shortest paths (list of nodes)\n try:\n path = nx.shortest_path(self.map, start_node, end_node)\n for node in range(len(path)-1):\n start = path[node]\n end = path[node+1]\n self.map.nodes[start]['count'] += 1\n self.map.nodes[end]['count'] += 1\n self.map.edges[start, end, 0]['count'] += 1\n except:\n print(\"Cannot find path between, \", start_node, end_node)\n \n\n # increment edge count for an edge denoted by path", "def _build_nodes_dict(self, graph):\n nodes_dict = {}\n for node, data in graph.nodes_iter(data=True):\n nodes_dict.update({node: data['label']})\n return nodes_dict", "def dependencies(self):\n tree_to_heads = {}\n for tree in reversed(list(self.all_subtrees())):\n if len(tree):\n head = tree.head()\n assert head.span() in tree_to_heads\n tree_to_heads[tree.span()] = tree_to_heads[head.span()]\n\n for subtree in tree:\n subhead = tree_to_heads[subtree.span()]\n if subhead.span() != head.span():\n yield (head, subhead)\n else:\n tree_to_heads[tree.span()] = tree", "def org_diff(lst_dicts, media_type, main_server):\n diff_dict = {}\n # todo-me pull posters from connected servers\n\n for mtype in media_type:\n meta_lst = []\n seen = {}\n missing = []\n unique = []\n print('...combining {}s'.format(mtype))\n for server_lst in lst_dicts:\n for item in server_lst[mtype]:\n if mtype == 'movie':\n title = u'{} ({})'.format(item.title, item.year)\n else:\n title = item.title\n\n # Look for duplicate titles\n if title not in seen:\n seen[title] = 1\n meta_lst.append(get_meta(item))\n else:\n # Duplicate found\n if seen[title] >= 1:\n # Go back through list to find original\n for meta in meta_lst:\n if meta['title'] == title:\n # Append the duplicate server's name\n meta['server'].append(item._server.friendlyName)\n thumb_url = '{}{}?X-Plex-Token={}'.format(\n item._server._baseurl, item.thumb, item._server._token)\n meta['thumb'].append(thumb_url)\n seen[title] += 1\n # Sort item list by Plex rating\n # Duplicates will use originals rating\n meta_lst = sorted(meta_lst, key=lambda d: d['rating'], reverse=True)\n diff_dict[mtype] = {'combined': {\n 'count': len(meta_lst),\n 'list': meta_lst}}\n\n print('...finding {}s missing from {}'.format(\n mtype, main_server))\n for item in meta_lst:\n # Main Server name is alone in items server list\n if main_server not in item['server']:\n missing.append(item)\n # Main Server name is absent in items server list\n elif main_server in item['server'] and len(item['server']) == 1:\n unique.append(item)\n diff_dict[mtype].update({'missing': {\n 'count': len(missing),\n 'list': missing}})\n\n print('...finding {}s unique to {}'.format(\n mtype, main_server))\n diff_dict[mtype].update({'unique': {\n 'count': len(unique),\n 'list': unique}})\n\n return diff_dict", "def get_synset_stats(\n synsets_to_urls: Dict[str, List[str]]\n) -> Dict[str, Any]:\n stats = {}\n for synset in synsets_to_urls:\n urls = synsets_to_urls[synset]\n n_urls = len(urls)\n n_flickr_urls = 0\n for url in urls:\n if url.find('flickr') != -1:\n n_flickr_urls += 1\n stats[synset] = {\"n_urls\": n_urls, \"n_flickr_urls\": n_flickr_urls}\n return stats", "def intersect(self, rays):\n raise NotImplementedError", "def differentNTuplesForNode(ntupleSet,nodeList,verbose=False):\n ntuplesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n ntuple.sort()\n joinedTuple = \"\".join(ntuple)\n for nodeInTuple in ntuple:\n ntuplesPerNode[nodeInTuple].append(joinedTuple)\n \n for a,v in ntuplesPerNode.iteritems():\n ntuplesPerNode[a] = set(v)\n \n return ntuplesPerNode", "def get_resource_results():\n resource_results = {}\n resource_results['checks'] = []\n resource_results['green'] = 0\n resource_results['red'] = 0\n resource_results['orange'] = 0\n resource_results['blue'] = 0\n resource_results['failed_accounts'] = 0\n resource_results['total_accounts'] = 0\n resource_results['total_checks'] = 0\n\n # Defaults for when no data is reported, working towards having modules be\n # modular / optional\n resource_results['blue_percent'] = 100\n resource_results['red_percent'] = 0\n resource_results['orange_percent'] = 0\n resource_results['green_percent'] = 0\n resource_results['working_percentage'] = 100\n\n # Check how many accounts failed from each module and add them to the\n # total failed accounts. If the data from a module is considered stale\n # then all of it's accounts will be considered failed.\n milliseconds_since_epoch = time.time() * 1000\n for module in get_all_data('resources_success:*'):\n module_success_json = get_data(module)\n module_success = json.loads(module_success_json)[0]\n resource_results['total_accounts'] += module_success['total_accounts']\n resource_results['total_checks'] += module_success['total_checks']\n milliseconds_since_epoch_module_data_is_valid_until = module_success['valid_until']\n if milliseconds_since_epoch > milliseconds_since_epoch_module_data_is_valid_until:\n resource_results['failed_accounts'] += module_success['total_accounts']\n logger.error('Data for {} is stale, please check the daemon is functioning properly'.format(module))\n else:\n resource_results['failed_accounts'] += module_success['failed_accounts']\n\n # We will count checks in so we can compare it against the number of checks\n # reported by the daemon\n checks_found = 0\n # Get list of keys in the format resources:module#uuid\n for host in get_all_data('resources:*'):\n try:\n # Storing lists with only one value since when I convert\n # dictionaries to json and store them in redis they come back as\n # strings, I am working around this by storing lists,\n # ast.literal_eval also works\n host_data = json.loads(get_data(host))[0]\n resource_results['checks'].append(host_data)\n # get the health status colour of the current check, and then add\n # one to the number of checks with that health status\n resource_results[host_data['health_status']] += 1\n checks_found += 1\n except Exception as e:\n # I would rather log to uwsgi's log but I'll sort this out later\n logger.error('Data for {} is not in a valid format: {}'.format(host, e))\n\n # If we are getting back old checks that are no-longer reporting hence\n # are not in the total_checks variable then they have failed.\n # If we are getting back less checks than we stored then something has\n # gone really wrong or we caught the weekly cron that clears the keys.\n if resource_results['total_checks'] != checks_found:\n logger.info('The number of checks stored in the database doesn\\'t '\\\n 'match the number reported by the daemon, it is likely some '\\\n 'servers are no-longer reporting, run '\\\n 'resources_list_unreporting_servers.py to look into this.')\n\n # The number of checks we are outputing is authoritive over the number\n # we expected to be there, at the moment we are just logging the fact they\n # were different, it would be nice to have a visual display or send an\n # email but there isn't a correct place to do this at the moment\n resource_results['total_checks'] = checks_found\n\n total_results = resource_results['green'] + resource_results['red'] + resource_results['orange'] + resource_results['blue']\n if total_results != 0:\n resource_results['red_percent'] = ( resource_results['red'] / total_results ) * 100\n resource_results['orange_percent'] = ( resource_results['orange'] / total_results ) * 100\n resource_results['blue_percent'] = ( resource_results['blue'] / total_results ) * 100\n # I want the percentage to always be 100 and green seems the most\n # disposable / least affected by any rounding issues\n resource_results['green_percent'] = 100 - ( resource_results['red_percent'] + resource_results['orange_percent'] + resource_results['blue_percent'] )\n\n resource_results['working_percentage'] = 100 - (( resource_results['failed_accounts'] / resource_results['total_accounts'] ) * 100 )\n resource_results['working_accounts'] = resource_results['total_accounts'] - resource_results['failed_accounts']\n logger.debug('working_percentage: {}'.format(resource_results['working_percentage']))\n return resource_results", "def get_common_ipv6():\n common_ips_set = set() # Set to eliminate duplicates\n try:\n for i in get_data():\n if 'ips' in i and i['serviceArea'] == 'Common' and i[\"expressRoute\"] == True:\n common_ips = i['ips']\n for j in common_ips:\n if type(ipaddress.ip_network(j)) is ipaddress.IPv6Network:\n common_ips_set.add(j)\n common_ips_lst = list(common_ips_set)\n common_ips_dic = {'microsoft365CommonAndOfficeOnlineIPv6': common_ips_lst}\n return get_json(common_ips_dic)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def calc_distances(client_list):\n distances = {}\n for x in client_list:\n distances[x] = {}\n for y in client_list:\n distances[x][y] = dis(x, y)\n return distances", "def get_neighbors(graph):\n neighbor_dict = dict()\n for node in graph:\n neighbor_dict[node] = set(graph[node])\n return neighbor_dict", "def __find_correlations(self, results):\n\n for result in results[:self.__result_limit]:\n\n # pub without venue\n if len(result['ven']) == 0:\n result['alternative'] = []\n\n with self.vix.searcher(weighting=Frequency) as vs:\n vq_parse = QueryParser('key', self.vix.schema).parse(result['pub']['crossref'])\n tresult = vs.search(vq_parse, limit=None, )\n if len(tresult) != 0:\n result['ven'] = {}\n result['added'] = 1\n for attr in tresult[0].items():\n result['ven'][attr[0]] = attr[1]\n\n self.__output.append(result)\n\n # venue without pub or venue with a list of pubs\n elif len(result['pub']) == 0 or (\n isinstance(result['pub'], list) and len(result['pub']) > 1):\n result['alternative'] = []\n\n with self.pix.searcher(weighting=Frequency) as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(result['ven']['key'])\n tresult = ps.search(pq_parse, limit=None, )\n\n if len(tresult):\n plist = []\n tmp = dict()\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in result['pub']]:\n plist.append(attr[1])\n break\n\n result['alternative'] = plist\n self.__output.append(result)\n\n # mixed case\n elif len(self.__output) == 0 or not result['ven']['key'] in [x['key'] for x in self.__output]:\n lis = [x for x in results if len(x['ven']) and x['ven']['key'] == result['ven']['key']]\n tmp = {}\n if len(lis) <= 1:\n tmp = {'key': result['pub']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n else:\n tmp = {'key': result['ven']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n plist = []\n with self.pix.searcher() as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(tmp['key'])\n tresult = ps.search(pq_parse, limit=None, )\n if len(tresult):\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in tmp['pub']]:\n plist.append(attr[1])\n break\n\n tmp['alternative'] = plist\n self.__output.append(tmp)" ]
[ "0.57611847", "0.57074016", "0.5648599", "0.5608913", "0.55996966", "0.54121363", "0.539312", "0.5247284", "0.5185026", "0.51842594", "0.51647484", "0.51572686", "0.51484066", "0.5144395", "0.5093303", "0.5067902", "0.5056289", "0.5055382", "0.5027136", "0.5016611", "0.50049776", "0.49992225", "0.4978968", "0.49493864", "0.49396846", "0.49309868", "0.49214655", "0.4914161", "0.48999932", "0.48992977", "0.4892552", "0.48794404", "0.48768398", "0.48744082", "0.4869861", "0.4825929", "0.4819075", "0.48154446", "0.48143855", "0.48101348", "0.4789253", "0.47891802", "0.47885507", "0.47844106", "0.47665566", "0.47641113", "0.47627583", "0.47587466", "0.47582805", "0.47508216", "0.4747329", "0.47424603", "0.47419626", "0.4740783", "0.47393903", "0.47389296", "0.4730277", "0.4729513", "0.47274753", "0.47208166", "0.4719442", "0.47188738", "0.4708247", "0.46915054", "0.46899122", "0.46898934", "0.46897972", "0.46845332", "0.46833125", "0.46762553", "0.46745798", "0.4669126", "0.46672842", "0.4661652", "0.46525565", "0.46446085", "0.4637975", "0.46303543", "0.46289054", "0.46283978", "0.4627098", "0.46215352", "0.46206802", "0.46175784", "0.4606413", "0.46058813", "0.46038193", "0.46016625", "0.45956302", "0.4595194", "0.45909187", "0.4589613", "0.45889533", "0.45868623", "0.45856723", "0.458427", "0.45839643", "0.45770463", "0.45749918", "0.45702058" ]
0.6736763
0
Create matrix associated with kernel interpolation.
def interp_matrix(qpnts, spnts, npgrid, nsamp, deg_max): # Initialize A = np.zeros((nsamp,npgrid)) # Create matrix for i in xrange(nsamp): for j in xrange(npgrid): cosTheta = np.dot(spnts[i], qpnts[j]) if(abs(cosTheta)>1): cosTheta = np.sign(cosTheta) A[i,j] = inv_funk_radon_kernel(cosTheta, deg_max) return A
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_interpolation_matrix(self, X):\n G = np.zeros((len(X), self.hidden_shape))\n for data_point_arg, data_point in enumerate(X):\n for center_arg, center in enumerate(self.centers):\n if self.mode == 'gaus':\n G[\n data_point_arg, center_arg] = \\\n self._kernel_function_gaussian(\n self.sigma_g[center_arg], center,\n data_point, self.ampls[center_arg])\n elif self.mode == 'lorz':\n G[\n data_point_arg, center_arg] = \\\n self._kernel_function_lorentzian(\n self.sigma_l[center_arg], center,\n data_point, self.ampls[center_arg])\n elif self.mode == 'psed':\n G[\n data_point_arg, center_arg] = \\\n self._kernel_function_pseudovoigt(\n self.alpha[center_arg],\n self.sigma_g[center_arg],\n self.sigma_l[center_arg], center,\n data_point,\n self.ampls[center_arg])\n return G", "def interpolate_matrix(matrix):", "def _get_kernel_matrix(self, x):\n m = len(x)\n kernel_matrix = np.zeros((m, m))\n for i in range(m):\n for j in range(i, m):\n if self.kernel == 'quadratic':\n kernel_result = np.dot(x[i], x[j])\n if self.degree > 1:\n kernel_result = (1 + kernel_result)**self.degree\n else:\n kernel_result = np.exp(-1 * norm(x[i] - x[j])**2 / self.s**2)\n kernel_matrix[i, j] = kernel_matrix[j, i] = kernel_result\n\n return kernel_matrix", "def _gaussian_kernel_matrix(x, y, sigmas):\n def norm(v):\n return tf.reduce_sum(tf.square(v), 1)\n beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))\n dist = tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y)))\n s = tf.matmul(beta, tf.reshape(dist, (1, -1)))\n kernel = tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist))\n return kernel", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def oned_bilinear(kernel,phi,test,w_g):\n M = np.dot(np.dot(test.T,np.diag(kernel*w_g)),phi)\n return M", "def interp_matrix_new(qpnts, spnts, npgrid, nsamp, deg_max):\n # Initialize\n A = np.zeros((nsamp,npgrid))\n\n # Create matrix\n for i in xrange(nsamp):\n for j in xrange(npgrid):\n cosTheta = np.dot(spnts[i], qpnts[j])\n if(abs(cosTheta)>1):\n cosTheta = np.sign(cosTheta)\n A[i,j] = inv_funk_radon_even_kernel(cosTheta, deg_max)\n return A", "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def get_kernel_matrix(self):\n with self.get_lock().read_lock():\n return self._kernel", "def build_mat(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n else:\n self.mat[row, col] = self.kernel(s, t, self.n)\n\n if self.symmetric:\n self.mat = self.symmetrize(self.mat)\n else:\n for idx, s in enumerate(self.S):\n self.test_normalization[idx] = self.kernel(s, s, self.n)", "def kernel_matrix(self, x_i, x_j, jitter_dims=None):\n sq_dist = (tf.reshape(tf.reduce_sum(x_i ** 2, 1), [-1, 1])\n + tf.reduce_sum(x_j ** 2, 1)\n - 2 * tf.matmul(x_i, tf.transpose(x_j)))\n k = tf.exp(self.tf_log_amp) * tf.exp(-1.0/tf.exp(self.tf_log_length_scale) * sq_dist)\n if jitter_dims is not None:\n k += jitter * tf.eye(jitter_dims[0], jitter_dims[1])\n return k", "def formK(x, y, kernel, cl):\n\n if kernel == 'se':\n k = lambda x,y: np.exp(-np.sum((x-y)**2)/2/cl**2)\n else:\n raise('Kernel %s not implemented' %(kernel))\n\n # form kernel matrix\n K = np.zeros((x.shape[0], y.shape[0]))\n for i in range(len(x)):\n for j in range(len(y)):\n K[i,j] = k(x[i], y[j])\n\n return K", "def conv_matrix(matrix, kernel):", "def _build_interpolation_matrix(src_graph, dst_graph):\n\n ds = remap.compute_interpolation_weights(src_graph, dst_graph, method='conservative', normalization='fracarea') # destareaโ€™\n\n # Sanity checks.\n np.testing.assert_allclose(ds.src_grid_center_lat, src_graph.signals['lat'])\n np.testing.assert_allclose(ds.src_grid_center_lon, src_graph.signals['lon'])\n np.testing.assert_allclose(ds.dst_grid_center_lat, dst_graph.signals['lat'])\n np.testing.assert_allclose(ds.dst_grid_center_lon, dst_graph.signals['lon'])\n np.testing.assert_allclose(ds.src_grid_frac, 1)\n np.testing.assert_allclose(ds.dst_grid_frac, 1)\n np.testing.assert_allclose(ds.src_grid_imask, 1)\n np.testing.assert_allclose(ds.dst_grid_imask, 1)\n\n col = ds.src_address\n row = ds.dst_address\n dat = ds.remap_matrix.squeeze()\n # CDO indexing starts at 1\n row = np.array(row) - 1\n col = np.array(col) - 1\n weights = sparse.csr_matrix((dat, (row, col)))\n assert weights.shape == (dst_graph.n_vertices, src_graph.n_vertices)\n\n # Destination pixels are normalized to 1 (row-sum = 1).\n # Weights represent the fractions of area attributed to source pixels.\n np.testing.assert_allclose(weights.sum(axis=1), 1)\n # Interpolation is conservative: it preserves area.\n np.testing.assert_allclose(weights.T @ ds.dst_grid_area, ds.src_grid_area)\n\n # Unnormalize.\n weights = weights.multiply(ds.dst_grid_area.values[:, np.newaxis])\n\n # Another way to assert that the interpolation is conservative.\n np.testing.assert_allclose(np.asarray(weights.sum(1)).squeeze(), ds.dst_grid_area)\n np.testing.assert_allclose(np.asarray(weights.sum(0)).squeeze(), ds.src_grid_area)\n\n return weights", "def kernel_temp(self, x):\n x = np.atleast_2d(x)\n n, d = x.shape\n\n # Vectorized implementation\n kxx = self.kernel(x, x) # (n, n)\n assert_shape(kxx, (n, n))\n\n k_xx = np.zeros((n, n, d))\n k_x_x = np.zeros((n, n, d))\n\n for l in xrange(d):\n if l % 100 == 0:\n print \"\\tkxx, k_xx, k_x_x: l = %d ...\" % l\n\n neg_l_x = self.neg_inv(x, l)\n k_xx[:, :, l] = self.kernel(neg_l_x, x)\n k_x_x[:, :, l] = self.kernel(neg_l_x, neg_l_x)\n\n return [kxx, k_xx, k_x_x]", "def generator_matrix(self):\n self.generator_mat = np.zeros((self.k, self.n), dtype=int)\n A_matrix = np.ones((self.k, self.n-self.k), dtype=int)\n\n identity_i = np.identity(self.k, dtype=int)\n self.generator_mat[:, :self.k] = identity_i\n\n # This loop edits the A_matrix to make the column vectors linearly ind.\n for x in range(self.n-self.k):\n A_matrix[x, x] = 0\n\n self.generator_mat[:, self.k:] = A_matrix\n\n# for i in range(self.k):\n# print(self.generator_mat[i,:])\n\n return self.generator_mat", "def get_kernel_matrix(self, measurement_manager):\n # Matrix multiplication not used as matrix returned by\n # .get_probed_basis() may be too big to be stored in available memory.\n # The sum() builtin not used as it uses + operator instead of augmented\n # assignment, thus it may be less memory-efficient than the loop below.\n cross_kernel = 0\n for component, ROW in zip(self._field_components,\n self._pre_kernel):\n cross_kernel += np.outer(measurement_manager.probe(component),\n ROW)\n return cross_kernel", "def normalise_kernel(K_temp):\n\n nb_item = K_temp.shape[0]\n K_norm = np.zeros((nb_item, nb_item))\n for i in range(nb_item):\n for j in range(i, nb_item):\n K_norm[i, j] = K_temp[i, j] / math.sqrt(K_temp[i, i] * K_temp[j, j])\n K_norm[j, i] = K_norm[i, j]\n\n return K_norm", "def gaussian_kernel_matrix(x, y, sigmas):\n\n beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))\n norm = lambda x: tf.reduce_sum(tf.square(x), 1)\n dist = tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y)))\n s = tf.matmul(beta, tf.reshape(dist, (1, -1)))\n return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist))", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def get_ky_mat(hyps: np.ndarray, name: str,\n kernel, cutoffs=None, hyps_mask=None,\n n_cpus=1, n_sample=100):\n\n training_data = _global_training_data[name]\n size = len(training_data)\n size3 = 3*size\n\n if (n_cpus is None):\n n_cpus = mp.cpu_count()\n if (n_cpus == 1):\n k_mat = get_ky_mat_pack(\n hyps, name, 0, size, 0, size, True,\n kernel, cutoffs, hyps_mask)\n else:\n\n # initialize matrices\n block_id, nbatch = partition_cr(n_sample, size, n_cpus)\n\n result_queue = mp.Queue()\n children = []\n for wid in range(nbatch):\n s1, e1, s2, e2 = block_id[wid]\n children.append(\n mp.Process(\n target=queue_wrapper,\n args=(result_queue, wid,\n get_ky_mat_pack,\n (hyps, name, s1, e1, s2, e2,\n s1==s2, kernel, cutoffs, hyps_mask\n )\n )\n )\n )\n\n # Run child processes.\n for c in children:\n c.start()\n\n # Wait for all results to arrive.\n k_mat = np.zeros([size3, size3])\n for _ in range(nbatch):\n wid, result_chunk = result_queue.get(block=True)\n s1, e1, s2, e2 = block_id[wid]\n k_mat[s1*3:e1*3, s2*3:e2*3] = result_chunk\n if (s1 != s2):\n k_mat[s2*3:e2*3, s1*3:e1*3] = result_chunk.T\n\n # Join child processes (clean up zombies).\n for c in children:\n c.join()\n\n # matrix manipulation\n del result_queue\n del children\n\n sigma_n, _, __ = obtain_noise_len(hyps, hyps_mask)\n\n ky_mat = k_mat\n ky_mat += sigma_n ** 2 * np.eye(size3)\n\n return ky_mat", "def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W", "def _generate_transform(self):\n A = np.zeros([self.m, self.n])\n idx = np.arange(self.m)\n for i in idx:\n A[i, max(i-self. k+1, 0):min(i+1, self.n)] = np.ones(\n min(i+1, self.n)-max(i-self. k+1, 0)) / self.k\n return A", "def get_kernel_matrix(self, x1, x2):\n hash_value = id(x1) + id(x2)\n if hash_value not in self._cached_norm_matrix:\n k = euclidean_distances(x1, x2, squared=True)\n self._cached_norm_matrix[hash_value] = k\n\n return numpy.exp(self._cached_norm_matrix[hash_value] * (-0.5 / (self.par_sigma ** 2)))", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def _compute_t_matrix(self):\n self.t_matrix = self._kronecker_product(\n tf.diag(tf.reshape(self.likelihood_variances, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def gaussian_kernel_matrix(x, y, sigmas):\n\n beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))\n norm = lambda x: tf.reduce_sum(input_tensor=tf.square(x), axis=1)\n dist = tf.transpose(a=norm(tf.expand_dims(x, 2) - tf.transpose(a=y)))\n s = tf.matmul(beta, tf.reshape(dist, (1, -1)))\n return tf.reshape(tf.reduce_sum(input_tensor=tf.exp(-s), axis=0), tf.shape(input=dist))", "def build_spmatrix(om, numpoints, im_size, grid_size, n_shift, order, alpha):\n spmat = -1\n\n ndims = om.shape[0]\n klength = om.shape[1]\n\n # calculate interpolation coefficients using kb kernel\n def interp_coeff(om, npts, grdsz, alpha, order):\n gam = 2 * np.pi / grdsz\n interp_dist = om / gam - np.floor(om / gam - npts / 2)\n Jvec = np.reshape(np.array(range(1, npts + 1)), (1, npts))\n kern_in = -1 * Jvec + np.expand_dims(interp_dist, 1)\n\n cur_coeff = np.zeros(shape=kern_in.shape, dtype=np.complex)\n indices = abs(kern_in) < npts / 2\n bess_arg = np.sqrt(1 - (kern_in[indices] / (npts / 2))**2)\n denom = special.iv(order, alpha)\n cur_coeff[indices] = special.iv(order, alpha * bess_arg) / denom\n cur_coeff = np.real(cur_coeff)\n\n return cur_coeff, kern_in\n\n full_coef = []\n kd = []\n for i in range(ndims):\n N = im_size[i]\n J = numpoints[i]\n K = grid_size[i]\n\n # get the interpolation coefficients\n coef, kern_in = interp_coeff(om[i, :], J, K, alpha[i], order[i])\n\n gam = 2 * np.pi / K\n phase_scale = 1j * gam * (N - 1) / 2\n\n phase = np.exp(phase_scale * kern_in)\n full_coef.append(phase * coef)\n\n # nufft_offset\n koff = np.expand_dims(np.floor(om[i, :] / gam - J / 2), 1)\n Jvec = np.reshape(np.array(range(1, J + 1)), (1, J))\n kd.append(np.mod(Jvec + koff, K) + 1)\n\n for i in range(len(kd)):\n kd[i] = (kd[i] - 1) * np.prod(grid_size[i + 1:])\n\n # build the sparse matrix\n kk = kd[0]\n spmat_coef = full_coef[0]\n for i in range(1, ndims):\n Jprod = np.prod(numpoints[:i + 1])\n # block outer sum\n kk = np.reshape(\n np.expand_dims(kk, 1) + np.expand_dims(kd[i], 2),\n (klength, Jprod)\n )\n # block outer prod\n spmat_coef = np.reshape(\n np.expand_dims(spmat_coef, 1) *\n np.expand_dims(full_coef[i], 2),\n (klength, Jprod)\n )\n\n # build in fftshift\n phase = np.exp(1j * np.dot(np.transpose(om),\n np.expand_dims(n_shift, 1)))\n spmat_coef = np.conj(spmat_coef) * phase\n\n # get coordinates in sparse matrix\n trajind = np.expand_dims(np.array(range(klength)), 1)\n trajind = np.repeat(trajind, np.prod(numpoints), axis=1)\n\n # build the sparse matrix\n spmat = coo_matrix((\n spmat_coef.flatten(),\n (trajind.flatten(), kk.flatten())),\n shape=(klength, np.prod(grid_size))\n )\n\n return spmat", "def _generate_distance_kernel_matrix(self):\n with self._rw_lock.read_lock():\n # Create matrix whose elements are the distances between all row\n # permutations\n fmat = self._feature_mat # shorter name\n num_rows = fmat.shape[0]\n\n # distance kernel is a square matrix based on feature samples\n dist_kernel = np.mat(np.ndarray((num_rows,)*2))\n self._log.info(\"Creating distance kernel with shape %s\",\n dist_kernel.shape)\n\n timer_log = logging.getLogger('.'.join((self.__module__,\n self.__class__.__name__,\n \"SimpleTimer\")))\n\n for i in xrange(num_rows - 1):\n with SimpleTimer('computing distances from row %d to [%d-%d]'\n % (i, i+1, num_rows-1), timer_log):\n dist_kernel[i, i] = 1.0\n for j in xrange(i + 1, num_rows):\n dist = self._histogram_intersection_distance(fmat[i],\n fmat[j])\n dist_kernel[i, j] = dist_kernel[j, i] = dist\n dist_kernel[-1, -1] = 1.0\n return dist_kernel", "def compute_matrix(self):\n\n fac = self.a / self.dx ** 2\n\n diagonal = np.ones(self.nx) * 2 * fac\n lower = np.ones(self.nx - 1) * -fac\n upper = np.ones(self.nx - 1) * -fac\n\n matrix = sp.diags(\n diagonals=[diagonal, lower, upper],\n offsets=[0, -1, 1], shape=(self.nx, self.nx),\n format='csr')\n\n return matrix", "def test_gauss_kernel():\n\n gauss = gauss_kernel(2, 5)\n\n assert gauss.shape == (5, 5)\n assert gauss[2, 2] == 0.039788735772973836", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def conv2mat(im_shape,kernel,format=\"channels_first\",dtype=_dtype):\n\tif len(im_shape)==2:\n\t\tim_size_y, im_size_x = im_shape\n\t\tnum_channels = 1\n\telse:\n\t\tif format==\"channels_last\":\n\t\t\tim_size_y, im_size_x, num_channels = im_shape\n\t\telse:\n\t\t\tnum_channels, im_size_y, im_size_x = im_shape\n\n\tker_size_y, ker_size_x = kernel.shape\n\tker_size = kernel.size\n\tkernel = kernel.ravel()\n\n\tmat_size_x = im_size_x * im_size_y\n\tmat = sp.dia_matrix( (mat_size_x,mat_size_x), dtype=dtype )\n\n\t# diagonal blocks corresponding to the rows of the kernel\n\tfor j in range(-(ker_size_y//2),ker_size_y//2+1):\n\t\t# diagonal of the block corresponding to the given row of the kernel\n\t\t# correlation\n\t\tdiag = sp.eye(im_size_y,im_size_y,-j,dtype=dtype)\n\t\t# # convolution\n\t\t# diag = sp.eye(im_size_y,im_size_y,-j,dtype=dtype)\n\n\t\t# contribution from convolution matrix corresponding to the given row of the kernel\n\t\tmat += sp.kron( diag, convmat(im_size_x,kernel[ker_size//2-ker_size_x//2-j*ker_size_x:ker_size//2-ker_size_x//2-(j-1)*ker_size_x],dtype=dtype) ) #.astype(dtype=dtype)\n\n\tif num_channels>1:\n\t\tif format==\"channels_last\":\n\t\t\tdiag_ch = sp.eye(num_channels,num_channels,dtype=dtype)\n\t\t\treturn sp.kron(mat,diag_ch) #.tocsr()\n\t\telse:\n\t\t\tdiag_ch = sp.eye(num_channels,num_channels,dtype=dtype)\n\t\t\treturn sp.kron(diag_ch,mat) #.tocsr()\n\n\treturn mat #.tocsr()", "def c_():\r\n c = np.array([[0, 0], [0, 100], [100, 100], [100, 80], [20, 80],\r\n [20, 20], [100, 20], [100, 0], [0, 0]])\r\n return c", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def create_p1(shape, ki, kj):\n a = np.zeros(shape, dtype=np.int)\n for i, row in enumerate(a):\n for j, col in enumerate(row):\n a[i][j] = ki[i] * kj[j]\n return a", "def interpolation_matrix(m):\n return np.nanmean(m,axis=1)", "def heatbasic(u0,T,K):\n import numpy as np\n N = len(u0)-1\n\n dx = 1.0/N;\n dt = 0.1/K;\n x = np.linspace(0,1,N+1)\n\n u = np.copy(u0)\n\n u_history = [u]\n\n A = np.zeros( (N+1,N+1) )\n for i in range(1,N):\n A[i,i-1] = 1;\n A[i,i] = -2;\n A[i,i+1] = 1\n A = A * dt/(dx*dx)\n A = A + np.eye(N+1)\n A[0,0] = 0.0;\n A[N,N] = 0.0;\n\n for k in range(K):\n u = np.dot(A,u)\n u_history.append(u)\n\n return u_history", "def make_covariance_matrix(points, kernel):\n\n dim = len(points)\n p1 = np.reshape(points, (dim, 1, -1))\n p2 = np.reshape(points, (dim, -1, 1))\n\n return kernel(p1, p2)", "def get_stain_matrix(I):", "def get_ky_mat_pack(hyps: np.ndarray, name: str,\n s1: int, e1: int, s2: int, e2: int,\n same: bool, kernel, cutoffs, hyps_mask):\n\n\n # initialize matrices\n training_data = _global_training_data[name]\n size1 = (e1-s1)*3\n size2 = (e2-s2)*3\n k_mat = np.zeros([size1, size2])\n\n ds = [1, 2, 3]\n\n # calculate elements\n args = from_mask_to_args(hyps, hyps_mask, cutoffs)\n\n for m_index in range(size1):\n x_1 = training_data[int(math.floor(m_index / 3))+s1]\n d_1 = ds[m_index % 3]\n if (same):\n lowbound = m_index\n else:\n lowbound = 0\n for n_index in range(lowbound, size2):\n x_2 = training_data[int(math.floor(n_index / 3))+s2]\n d_2 = ds[n_index % 3]\n kern_curr = kernel(x_1, x_2, d_1, d_2, *args)\n # store kernel value\n k_mat[m_index, n_index] = kern_curr\n if (same):\n k_mat[n_index, m_index] = kern_curr\n\n return k_mat", "def createK(obsSites,kernelSites,kernel=gaussianKernel,dist=euclidDist,d=4):\n r = len(obsSites)\n n = len(kernelSites)\n K = np.zeros([r,n])\n for i in range(r):\n for j in range(n):\n K[i,j] = kernel(dist(obsSites[i],kernelSites[j]),d)\n return K", "def kernel(self):\n V = self.matrix().kernel()\n D = self.domain()\n if not D.is_ambient():\n # Transform V to ambient space\n # This is a matrix multiply: we take the linear combinations of the basis for\n # D given by the elements of the basis for V.\n B = V.basis_matrix() * D.basis_matrix()\n V = B.row_module(D.base_ring())\n return self.domain().submodule(V, check=False)", "def createSensorModel(self):\n sensorMatrix = np.zeros((1, self.n*self.m))\n w = self.w\n w2 = 2*w+1\n div = float(w2 * w2)\n\n for i in self.board:\n x1, y1 = i\n line = []\n for j in self.board:\n x2, y2 = j\n\n cond1 = (x2 >= x1 - w and x2 <= x1 + w)\n cond2 = (y2 >= y1 - w and y2 <= y1 + w)\n\n if (cond1 and cond2):\n line.append(1/div)\n else:\n line.append(0)\n\n sensorMatrix = np.vstack((sensorMatrix, line))\n\n return sensorMatrix[1:, :]", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def expansion_matrix_du(self):\n row = self._base_nlp._upper_d_map\n nnz = len(self._base_nlp._upper_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))", "def c_src_kernel_tiling(self, node, nodename):\r\n\r\n #The kernel is intended to be structured roughly like this:\r\n \"\"\"\r\n static __global__ void kernel()\r\n {\r\n for (int v = blockIdx.y; v < dim0; v += gridDim.x)\r\n {\r\n for (int w = blockIdx.y; w < dim1; w += gridDim.y)\r\n {\r\n for (int x = threadIdx.x; x < dim2; x += blockDim.x)\r\n {\r\n for (int y = threadIdx.y; y < dim3; y += blockDim.y)\r\n {\r\n for (int z = threadIdx.z; z < dim4; z += blockDim.z)\r\n {\r\n out[v * out_stride[0] + ...] = f(in1[...], in2[...])\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n\r\n \"\"\"\r\n\r\n nd = node.outputs[0].type.ndim\r\n sio = StringIO()\r\n #print 'C_SRC_KERNEL', sio.getvalue()\r\n\r\n if nd in (4,):\r\n # print some leading comments to make the code easier to read\r\n for ipos, i in enumerate(node.inputs):\r\n print >> sio, \"// Input \", ipos, str(i.type)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \"// Output \", ipos, str(i.type)\r\n print >> sio, \"static __global__ void kernel_%s_%s_%s(unsigned int numEls\" %(\r\n self.scalar_op.__class__.__name__,\r\n nodename,\r\n 'tiling%i'%nd)\r\n if (nd):\r\n print >> sio, \"\\t,\", \", \".join(\"const int dim%i\" % i for i in xrange(nd))\r\n #declare inputs\r\n for ipos, i in enumerate(node.inputs):\r\n s = \", \".join([\"const float * i%i_data\" % ipos] + list(\"int i%i_str_%i\" % (ipos, d) for d in xrange(nd)))\r\n print >> sio, \"\\t,\", s\r\n #declare outputs\r\n for ipos, i in enumerate(node.outputs):\r\n s = \", \".join([\"float * o%i_data\" % ipos] + list(\"int o%i_str_%i\" % (ipos, d) for d in xrange(nd)))\r\n print >> sio, \"\\t,\", s\r\n #print >> sio, \"\\t,\", \", \".join(\"int o%i_str_%i\" % (ipos, d) for d in xrange(nd))\r\n #print >> sio, \"\\t,\", \"float * o%i_data\" % ipos\r\n print >> sio, \"\\t)\\n{\"\r\n\r\n # For each input that is a scalar which has been broadcasted to a tensor,\r\n # load it into a local variable\r\n print >> sio, \" __shared__ float value0[%i];\" % len(node.inputs)\r\n print >> sio, \" __shared__ int shared_dims[%(nd)s];\" % locals()\r\n #print >> sio, \" __shared__ int shared_i_str[%(n_in)s][%(nd)s]\"\r\n print >> sio, \" if ((threadIdx.x == 0) && (threadIdx.y == 0)) {\"\r\n for ipos, i in enumerate(node.inputs):\r\n if _logical_scalar(i):\r\n print >> sio, \" value0[%i] = i%i_data[0];\" % (ipos, ipos)\r\n for ipos in xrange(nd):\r\n print >> sio, \" shared_dims[%i] = dim%i;\" % (ipos, ipos)\r\n print >> sio, \" }\"\r\n print >> sio, \" __syncthreads();\"\r\n\r\n\r\n if (nd == 4):\r\n print >> sio, \"\"\"\r\n for (int pos0 = blockIdx.x; pos0 < shared_dims[0]; pos0 += gridDim.x)\r\n {\r\n for (int pos1 = blockIdx.y; pos1 < shared_dims[1]; pos1 += gridDim.y)\r\n {\r\n //for (int pos2 = threadIdx.x; pos2 < shared_dims[2]; pos2 += blockDim.x)\r\n for (int pos2 = threadIdx.y; pos2 < shared_dims[2]; pos2 += blockDim.y)\r\n {\r\n //for (int pos3 = threadIdx.y; pos3 < shared_dims[3]; pos3 += blockDim.y)\r\n for (int pos3 = threadIdx.x; pos3 < shared_dims[3]; pos3 += blockDim.x)\r\n {\r\n \"\"\"\r\n else:\r\n raise NotImplementedError()\r\n\r\n for ipos, i in enumerate(node.inputs):\r\n if not _logical_scalar(i):\r\n print >> sio, \" const float * ii_i%i_data = i%i_data;\" % (ipos, ipos)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \" float * ii_o%i_data = o%i_data;\" % (ipos, ipos)\r\n for d in xrange(nd):\r\n for ipos, i in enumerate(node.inputs):\r\n if not _logical_scalar(i):\r\n print >> sio, \" ii_i%i_data += pos%i * i%i_str_%i;\" % (ipos, d, ipos, d)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \" ii_o%i_data += pos%i * o%i_str_%i;\" % (ipos, d, ipos, d)\r\n\r\n # perform the scalar operation on the input and output references\r\n #TODO: What if the scalar_op needs support_code??\r\n task_code = self.scalar_op.c_code(\r\n Apply(self.scalar_op,\r\n [scalar.Scalar(dtype = input.type.dtype)() for input in node.inputs],\r\n [scalar.Scalar(dtype = output.type.dtype)() for output in node.outputs])\r\n , nodename + '_scalar_'\r\n , get_str_list_logical_scalar(node, value_str='value0[%i]')\r\n , ['ii_o%i_data[0]'%ipos for ipos, i in enumerate(node.outputs)]\r\n , sub=dict(fail='return;')) #TODO: set a failure code somehow!!!\r\n print >> sio, \" \", task_code\r\n\r\n print >> sio, \" }\" * nd\r\n\r\n #TODO: insert runtime stride checks that select the best loop order either here, or in\r\n # the host code that launched the kernel (host code probably better spot)\r\n\r\n #indent = \" \"*(4*d+7)\r\n #for ipos, i in enumerate(node.inputs):\r\n #print >> sio, indent, \"const float * i%i\" % ipos, '= i%i_data', ''\r\n print >> sio, \"}\"\r\n\r\n print sio.getvalue()\r\n return sio.getvalue()", "def intrinsic_matrix_from_focal(cx, cy, f):\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])", "def compute_interp_matrix(self, interpolator, dlen):\r\n\t\tinterplen = len(interpolator)\r\n\t\tassert np.mod(interplen,2)==1, \"Interpolator legnth must be odd\"\r\n\t\tassert interplen<=dlen, \"Interpolator length must be less than dictionary template length\"\r\n\r\n\t\tinterpolator_flipped = np.flip(interpolator, axis=0)\r\n\r\n\t\tstart_clip = int((dlen-1)/2)\r\n\t\tend_clip = start_clip + dlen\r\n\t\tmtx = np.zeros((dlen, 2*dlen-1))\r\n\r\n\t\tfor idx in np.arange(dlen):\r\n\t\t\tstart_idx = start_clip+idx-int(interplen/2)\r\n\t\t\tend_idx = start_idx + interplen\r\n\t\t\tmtx[idx, start_idx : end_idx] = interpolator_flipped\r\n\r\n\t\tshift_mat = mtx[:, start_clip:end_clip]\r\n\r\n\t\treturn shift_mat", "def Get_2d_smoothed_activation( MNI_coords, kernel_width=10 ):\n MNI_coords = MNI_coords[:, :2].astype('int') + 100\n\n arr = np.zeros((200,200))\n arr[ MNI_coords[:,0], MNI_coords[:,1]] = 1\n\n return gaussian_filter( arr, kernel_width )", "def expansion_matrix_dl(self):\n\n row = self._base_nlp._lower_d_map\n nnz = len(self._base_nlp._lower_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))", "def get_matrix(df, features, output):\n #add a constant column as coefficient for w0\n df[\"constant\"] = 1.0\n feature_x, output_y = df[features].astype(float), df[output].astype(int)\n return feature_x, output_y", "def build_Xij_inv_matrix(self,Nmesh=64):\n H0, F = self.cosmo.H0, self.cosmo.F\n Lbox = self.attrs['Lbox']\n kgrid = initialize_kgrid(Nmesh,Lbox)\n kmag_grid = np.linalg.norm(kgrid,axis=3)\n w_grid = self.cosmo.Pk_lin(kmag_grid)*(1/Lbox**3)*np.exp(-kmag_grid*kmag_grid*self.RG*self.RG)\n k2 = kmag_grid**2\n k2[0,0,0] = 1 \n #----------------------------------------------------\n cspace = np.arange(0,18)\n \n xij_tensor = [[np.sum(np.conj(Hhats[i](kgrid,k2,H0,F))*Hhats[j](kgrid,k2,H0,F)*w_grid)\n for j in cspace[self.cmask]] for i in cspace[self.cmask]]\n \n xij_tensor = np.array(xij_tensor)\n self.xij_tensor_inv = np.linalg.inv(xij_tensor.real)", "def PNmatrix(t, inp):\n from pypride.vintlib import taitime, eop_iers, t_eph, ter2cel, load_cats\n # precess to date\n ''' set dates: '''\n\n tstamp = t.datetime\n mjd = np.floor(t.mjd)\n UTC = (tstamp.hour + tstamp.minute / 60.0 + tstamp.second / 3600.0) / 24.0\n JD = mjd + 2400000.5\n\n ''' compute tai & tt '''\n TAI, TT = taitime(mjd, UTC)\n\n ''' load cats '''\n _, _, eops = load_cats(inp, 'DUMMY', 'S', ['GEOCENTR'], tstamp)\n\n ''' interpolate eops to tstamp '''\n UT1, eop_int = eop_iers(mjd, UTC, eops)\n\n ''' compute coordinate time fraction of CT day at GC '''\n CT, dTAIdCT = t_eph(JD, UT1, TT, 0.0, 0.0, 0.0)\n\n ''' rotation matrix IERS '''\n r2000 = ter2cel(tstamp, eop_int, dTAIdCT, 'iau2000')\n # print(r2000[:,:,0])\n\n return r2000", "def numpy_compute_kernel_matrices(x, x_star, sigma_f=1, l=1):\r\n start_time = time()\r\n\r\n\r\n xx, yy = np.meshgrid(x, x, sparse=True)\r\n xx_star2, yy_star2 = np.meshgrid(x_star, x_star, sparse=True)\r\n xx_star, yy_star = np.meshgrid(x, x_star, sparse=True)\r\n\r\n K = kernel_function_1D(xx, yy, sigma_f, l)\r\n K_star2 = kernel_function_1D(xx_star2, yy_star2, sigma_f, l)\r\n K_star = kernel_function_1D(xx_star, yy_star, sigma_f, l)\r\n\r\n print(\"--- %s ---\" % seconds_to_str((time() - start_time)))\r\n\r\n return (K, K_star2, K_star)", "def write_kernel(w, k):\n w.writeln(\"void {k}(const Image<int>& in, Image<int>& out\".format(k=k.name))\n # write the tap signal in the function argument list\n for tapName in k.rtapNames:\n #tapType = k.edges[tapName].dtype\n #tapCType = dtypeMap[tapType]\n tapCType = getCType(k.edges[tapName])\n for indices in expand_range(k.edges[tapName].dim):\n w.writeln(\"\\t, {type} {sig}\".format(type=tapCType, sig=mangle((tapName, indices))))\n w.writeln(\")\")\n w.writeln(\"{\")\n w.indent()\n # TODO: insert size error checking into C code here\n\n w.writeln(\"for(int y = 0; y < in.height(); y++){\")\n w.indent()\n w.writeln(\"for(int x = 0; x < in.width(); x++){\")\n w.indent()\n\n \n # Grab the register declaration for the partial-pixel output and blow it into\n # the complete list of input registers\n startName = k.ppoutName\n #startType = k.edges[startName].dtype\n #startCType = dtypeMap[startType]\n startCType = getCType(k.edges[startName])\n for indices in expand_range(k.edges[startName].dim):\n # HACK: work with multi-channel or single-channel images\n z_idx = 0\n if len(indices) == 3:\n z_idx = indices[2]\n\n w.writeln(\"{type} {reg} = in(x+{xoff}, y+{yoff}, {z});\".format(\n type=startCType,\n reg=mangle((startName, indices)),\n xoff=(indices[0]-k.centroid[0]), \n yoff=(indices[1]-k.centroid[1]), z=z_idx))\n \n # Set up the constants\n for const in k.constants:\n # TODO: be careful here, because we need to be consistent with naming/indexing\n # TODO: handle int/float; infer datatype in parser\n w.writeln(\"const float {reg} = {val};\".format(reg=mangle((const[0], [0])), val=const[1]))\n \n w.writeln(\"\")\n\n\n #Special Register Examples for Reduce:\n #fix_17_0 pixel_out_pos[1:0] # Location of Reduce pixel in output image\n #fix_17_0 centroid_pos[1:0] # Location of Centroid in input image\n if \"centroid_pos\" in k.specialRegs:\n w.writeln(\"int centroid_pos_0 = x;\")\n w.writeln(\"int centroid_pos_1 = y;\")\n\n if \"pixel_out_pos\" in k.specialRegs:\n w.writeln(\"int pixel_out_pos_0 = x;\")\n w.writeln(\"int pixel_out_pos_1 = y;\")\n \n # Create a list of (name, index) tuples representing the valid (i.e., evaluated) signal\n validRegs = [(startName, i) for i in expand_range(k.edges[startName].dim)]\n validRegs += [(tapName, i) for tapName in k.rtapNames \n for i in expand_range(k.edges[tapName].dim)]\n validRegs += [(regName, i) for regName in k.specialRegs \n for i in expand_range(k.edges[regName].dim)]\n validRegs += [(c[0], [0]) for c in k.constants]\n \n # Make a copy of the list of operations which we can remove stuff from\n unprocessed = dict(k.ops)\n \n # Process all the operations\n while len(unprocessed) > 0:\n progress = False\n for opKey in unprocessed:\n op = k.ops[opKey]\n # Find an operation that can be evaluated\n if opOk(op, validRegs):\n #dtype = k.edges[op.result[0]].dtype\n #dtype = dtypeMap[dtype] # Look up the C-equivalent for this type\n dtype = getCType(k.edges[op.result[0]])\n # TODO: include integer/fraction width\n \n # TODO: error checking that we have the right number of operands - this should be done in the parser, actually\n # Evaluate it\n if op.name in ['max', 'min']:\n write_complex_op(w, op, dtype)\n elif op.name == \"sum\": \n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"mv\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"add\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"sub\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' - ', mangle(op.operands))))\n elif op.name == \"mult\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' * ', mangle(op.operands))))\n elif op.name == \"div\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' / ', mangle(op.operands))))\n\n elif op.name == \"lshift\":\n w.writeln(\"{dtype} {dst} = {op1} << {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"rshift\":\n w.writeln(\"{dtype} {dst} = {op1} >> {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"and\":\n w.writeln(\"{dtype} {dst} = {op1} & {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"or\":\n w.writeln(\"{dtype} {dst} = {op1} | {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"ne\":\n w.writeln(\"{dtype} {dst} = {op1} != {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"eq\":\n w.writeln(\"{dtype} {dst} = {op1} == {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lt\":\n w.writeln(\"{dtype} {dst} = {op1} < {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lte\":\n w.writeln(\"{dtype} {dst} = {op1} <= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gt\":\n w.writeln(\"{dtype} {dst} = {op1} > {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gte\":\n w.writeln(\"{dtype} {dst} = {op1} >= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"not\":\n w.writeln(\"{dtype} {dst} = !{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"abs\":\n w.writeln(\"{dtype} {dst} = ({src} >= 0) ? {src} : (-{src});\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"inv\":\n w.writeln(\"{dtype} {dst} = -{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n\n elif op.name == \"mux\":\n w.writeln(\"{dtype} {dst} = {cond} ? {op1} : {op2};\".format(dtype=dtype, dst=mangle(op.result), \\\n cond=mangle(op.operands[0]), op1=mangle(op.operands[1]), op2=mangle(op.operands[2])))\n else:\n print \"Unhandled operator \" + opKey\n \n validRegs.append(op.result)\n # Remove it from the list\n unprocessed.pop(opKey)\n progress = True\n break # We changed the list, so we gotta start over\n \n # If we went through the whole list without finding any ops to evaluate,\n # something is wrong and we need to give up.\n if progress is False:\n print \"Failed to evaluate some ops!\"\n for opKey in unprocessed:\n print \"\\t %s %s\" % (unprocessed[opKey].name, unprocessed[opKey].result)\n break\n \n for indices in expand_range(k.edges[k.sink].dim):\n #writeln('printf(\"result: %f\\\\n\", {reg});'.format(reg=mangle((k.sink, indices))))\n # TODO: make this handle depths other than 3\n w.writeln('out(x,y,{z}) = {reg};'.format(z=indices[0], reg=mangle((k.sink, indices))))\n\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"} // END %s\" % k.name)\n w.writeln(\"\\n\")", "def build_interpolator(self):\n\n self.grid_dims = np.append([len(self.gridpoints[p])\n for p in self.labels],\n self.output.shape[-1])\n self.xgrid = tuple([self.gridpoints[l] for l in self.gridpoints])\n self.ygrid = np.zeros(self.grid_dims)\n for x, y in zip(self.X, self.output):\n self.ygrid[tuple(x)] = y\n self.interpolator = RegularGridInterpolator(self.xgrid, self.ygrid)", "def smooth_with_kernel(expr_matrix, kernel, transpose='auto'):\n if isinstance(kernel, str):\n with open(kernel, 'rb') as f:\n kernel = pickle.load(f)\n\n transpose = _need_transpose(expr_matrix, kernel) if transpose=='auto' else transpose\n\n if transpose:\n return pd.DataFrame(np.dot(expr_matrix.T, kernel), index=expr_matrix.columns,\n columns=expr_matrix.index).T\n else:\n return pd.DataFrame(np.dot(expr_matrix, kernel), index=expr_matrix.index,\n columns=expr_matrix.columns)", "def expansion_matrix_c(self):\n row = np.zeros(0)\n nnz = 0\n col = np.arange(nnz, dtype=np.int)\n data = np.zeros(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))", "def _kernel(self, x, y, t):\n return (self.C / (2 * np.pi * self.sigma_x * self.sigma_y * t)) * \\\n tf.exp(- self.beta * t - (tf.square(x)/tf.square(self.sigma_x) + tf.square(y)/tf.square(self.sigma_y)) / (2*t))", "def make_matrix(self, rs, ths, xs, ys, vals):\n\n\t\tps = ravel_multi_index((hstack((rs, rs)), hstack((ths, self.thbins - ths - 1))), (self.rbins, self.thbins))\n\t\tcs = ravel_multi_index((hstack((xs, ys)), hstack((ys, xs))), (self.cbins, self.cbins))\n\n\t\treturn csr_matrix((hstack((vals, vals)), (ps, cs)), (self.rbins*self.thbins, self.cbins**2))", "def expansion_matrix_d(self):\n row = self._base_nlp._d_map\n nnz = len(self._base_nlp._d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def create_Uxx_mat(x):\n \n\n dx = x[1] - x[0]\n \n x_int = np.arange(1,len(x)-1,dtype=int)\n #create u_{xx} matrix operator\n Uxx_mat_row = np.hstack((x_int,x_int,x_int))\n Uxx_mat_col = np.hstack((x_int-1,x_int,x_int+1))\n Uxx_entry = (1/(dx**2))*np.hstack((np.ones(len(x)-2),-2*np.ones(len(x)-2),(np.ones(len(x)-2))))\n\n Uxx_mat_row_bd = np.hstack((0,0,len(x)-1,len(x)-1))\n Uxx_mat_col_bd = np.hstack((0,1,len(x)-2,len(x)-1))\n Uxx_entry_bd = (1/(dx**2))*np.hstack((-2,2,2,-2))\n\n\n Uxx_mat_row = np.hstack((Uxx_mat_row,Uxx_mat_row_bd))\n Uxx_mat_col = np.hstack((Uxx_mat_col,Uxx_mat_col_bd))\n Uxx_entry = np.hstack((Uxx_entry,Uxx_entry_bd))\n\n return sparse.coo_matrix((Uxx_entry,(Uxx_mat_row,Uxx_mat_col)))", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding as used in the previous assignment can make\n # derivatives at the image boundary very big.\n \n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge') \n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.flipud(np.fliplr(kernel)) # flip h/v\n for h in range(Hi):\n for w in range(Wi):\n out[h, w] = np.sum(np.multiply(kernel, padded[h : h + Hk, w : w + Wk]))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def stiffnessMatrix (bsp, knotlist,p, nbquad):\n\n\tS = np.zeros((bsp.N-2, bsp.N-2))\n\t\n\tfor line in range(1, bsp.N-1):\n\t\t\n\t\tfor column in range(1, bsp.N-1):\n\t\t\n\t\t\tfor iknot in range(len(knotlist)-1):\n\t\t\t\tS[line-1, column-1] = S[line-1, column-1] + legendreGauss(bilinearForm, nbquad, knotlist[iknot], knotlist[iknot+1], line, bsp, ind2=column)\n\t\n\treturn S;", "def get_intrinsic_mat(params):\n return np.asarray(\n [\n [params[0], 0.0, params[1]],\n [0.0, params[2], params[3]],\n [0.0, 0.0, 1.0],\n ]\n )", "def makeTimeInterpolatedMatrix(df, num_interpolation=10):\n times = df.index.tolist()\n time_last = times[0]\n matrix = []\n # For each pair of times\n for time in times[1:]:\n time_incr = (time - time_last)/num_interpolation\n arr_last = np.array(df.loc[time_last, :])\n arr_cur = np.array(df.loc[time, :])\n arr_incr = (arr_cur - arr_last)/num_interpolation\n # For each interpolation\n for idx in range(num_interpolation):\n arr = arr_last + idx*arr_incr\n arr = np.insert(arr, 0, time_last + idx*time_incr)\n matrix.append(arr)\n time_last = time\n return np.array(matrix)", "def make_result_matrix(T):\n result_matrix = []\n # Uniform sampled distribution\n distribution = np.random.choice([1, 0], T, p=[.1, .9])\n place_holder = np.random.randn(T)\n place_holder[distribution] = np.nan # Masking\n\n # This block is to un-flatten the 25 element matrix into a 5*5 matrix\n for j in range(T):\n temp = []\n for i in range(T):\n temp.append(place_holder[i])\n result_matrix.append(temp)\n\n result_matrix = np.array(result_matrix)\n\n return result_matrix", "def generator_matrix(self):\n C = self.code()\n F = C.base_ring()\n Cor = C.original_code()\n G = Cor.generator_matrix()\n k = C.dimension()\n extra_col = [-sum(G.rows()[i]) for i in range(k)]\n extra_col = matrix(F, k, 1, extra_col)\n return G.augment(extra_col)", "def projection_kernel(self, dataset, testset, C):\n N = dataset.shape[0]\n D = testset.shape[0]\n K = np.zeros((D, N), dtype=float)\n for i in range(D):\n for j in range(N):\n K[i, j] = self.Gaussian_Kernel(testset[i], dataset[j], C)\n\n return K", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def _create_kernel(sm_times, sm_freqs, kernel='hanning'):\n # frequency dependent kernels\n if isinstance(sm_times, (np.ndarray, list, tuple)):\n sm_freqs = 1 # force 1hz smoothing\n kernels = [_create_kernel(\n sm, sm_freqs, kernel=kernel) for sm in sm_times]\n return kernels\n\n # frequency independent kernels\n if kernel == 'square':\n return np.full((sm_freqs, sm_times), 1. / (sm_times * sm_freqs))\n elif kernel == 'hanning':\n hann_t, hann_f = np.hanning(sm_times), np.hanning(sm_freqs)\n hann = hann_f.reshape(-1, 1) * hann_t.reshape(1, -1)\n return hann / np.sum(hann)\n else:\n raise ValueError(f\"No kernel {kernel}\")", "def __init__(self, dim): #, length_scale, length_scale_bounds=()):\n# assert isinstance(column, (list, tuple, int)), \"must be int or list of ints\"\n# self.column = [column] if isinstance(column, int) else column\n# assert all(isinstance(i, int) for i in self.column), \"must be integers\"\n self.dim = dim\n \n kernels = [Projection([c]) for c in range(dim)]\n\n # combine the kernels into a single product kernel\n self.kernel = reduce(lambda k0, k1 : k0 * k1, kernels)", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def _compute_s_matrix(self, system_std_dev: tf.Tensor) -> None:\n self.s_matrix_inv = self._kronecker_product(\n tf.diag(tf.reshape(tf.ones_like(system_std_dev, dtype=tf.float64)\n / system_std_dev, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def buildKernel(self):\n\t\tkernel = list()\n\n\t\tif not self.spreadsheet_transposed:\t\t\t# If the spreadsheet is NOT transposed, i.e., the spreadsheet contains rows, transpose it so it contains columns\n\t\t\tself.transpose(1)\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tappend = kernel.append\n\t\tlower = str.lower\n\t\tformat = str.format\n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor c in xrange(len(self.spreadsheet)):\t\t# Iterate through the spreadsheet's columns to search for the \"kernel\"\n\t\t\tcolumn = self.spreadsheet[c]\n\n\t\t\tif lower(column[0]) == \"kernel\":\t\t# Examines to see if the column contains the kernel\n\t\t\t\tleftColumn = self.spreadsheet[c-1]\t# Assigns the left side of the kernel to this variable\n\t\t\t\trightColumn = self.spreadsheet[c+1]\t# Assigns the right side of the kernel to this variable\n\n\t\t\t\tfor r in xrange(len(column)):\t# Iterate through the column to create a string from the left kernel, center kernel, and right kernel columns\n\t\t\t\t\tif r != 0:\n\t\t\t\t\t\tappend(format(\"{0}\", lower(leftColumn[r] + column[r] + rightColumn[r])))\t# Append string to kernel list\n\n\t\treturn kernel", "def get_global_stiffness_matrix(self) -> NDArray[np.float64]:\n\n s = self.sin()\n c = self.cos()\n matrix_helper = [\n c ** 2,\n c * s,\n -(c ** 2),\n -c * s,\n c * s,\n s ** 2,\n -c * s,\n -(s ** 2),\n -(c ** 2),\n -c * s,\n c ** 2,\n c * s,\n -c * s,\n -(s ** 2),\n c * s,\n s ** 2,\n ]\n return (\n self.youngs_modulus\n * self.area\n / self.get_length()\n * np.array(matrix_helper, dtype=np.float64).reshape(4, 4)\n )", "def _build_dist(self):\n lamb = self.params['lamb']\n p = self.params['p']\n\n jac = self.jacobian\n # build D on grids\n xg, yg, mask = self._mask_grid()\n r_max = self._r_max(xg, yg, mask)\n d_mat = self._psf_grid(xg, yg, r_max=r_max)\n # E[yy^T]\n j_j_w = np.dot(jac, jac.transpose())\n r_mat = np.diag(np.diag(j_j_w) ** p)\n jac_inv = la.inv(j_j_w + lamb*r_mat)\n # RM = E[xx^T] / E[yy^T]\n h_mat = np.dot(np.dot(d_mat, jac.transpose()), jac_inv)\n return h_mat", "def getMatrix(self, scale):\n \n kbx = np.sqrt(self.kbxn**2. + self.kbxSF**2.) # rms kbeta\n kby = np.sqrt(self.kbyn**2. + self.kbySF**2.) # rms kbeta\n\n kbx = kbx / scale.lg\n kby = kby / scale.lg\n \n matx = np.zeros([2,2])\n\n if (kbx > 0.):\n matx[0][0] = np.cos(kbx*self.undlen)\n matx[0][1] = 1. /kbx * np.sin(kbx*self.undlen)\n matx[1][0] = -kbx * np.sin(kbx*self.undlen)\n matx[1][1] = np.cos(kbx*self.undlen)\n else: # just a free space drift\n matx[0][0] = 1.\n matx[0][1] = self.undlen\n matx[1][0] = 0.\n matx[1][1] = 1.\n\n maty = np.zeros([2,2])\n\n if (kby > 0.):\n maty[0][0] = np.cos(kby*self.undlen)\n maty[0][1] = 1. /kby * np.sin(kby*self.undlen)\n maty[1][0] = -kby * np.sin(kby*self.undlen)\n maty[1][1] = np.cos(kby*self.undlen)\n else: # just a free space drift\n maty[0][0] = 1.\n maty[0][1] = self.undlen\n maty[1][0] = 0.\n maty[1][1] = 1.\n \n return matx, maty", "def lte_rate_matrix_block(phi_block, electron_density):\n lte_rate_vector_block = -1.0 * np.hstack([*phi_block.values, -1.0])\n lte_rate_matrix_block = np.diag(lte_rate_vector_block)\n n_e_initial = np.ones(len(phi_block)) * electron_density\n n_e_matrix = np.diag(n_e_initial, 1)\n lte_rate_matrix_block += n_e_matrix\n lte_rate_matrix_block[-1, :] = 1.0\n return lte_rate_matrix_block", "def create_kernel(\n self,\n ) -> pyabc.StochasticKernel:\n def kernel_fun(x, x_0, t, par) -> float:\n \"\"\"The kernel function.\"\"\"\n # the kernel value is computed by amici already\n return x['llh']\n\n # create a kernel from function, returning log-scaled values\n kernel = pyabc.distance.SimpleFunctionKernel(\n kernel_fun, ret_scale=pyabc.distance.SCALE_LOG)\n\n return kernel", "def make_project_matrix(X):\n X = np.mat(X)\n return np.eye(X.shape[0]) - (X*(np.linalg.inv(X.T*X)*X.T))", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding will make derivatives at the image boundary very big,\n # whereas we want to ignore the edges at the boundary.\n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge')\n\n ### YOUR CODE HERE\n for i in range(Hi):\n for j in range(Wi):\n out[i,j] = np.sum(padded[i : i + Hk, j : j + Wk] * np.flip(kernel))\n ### END YOUR CODE\n\n return out", "def np_image_matrix(self):\n return np.array(self.crop_image())", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def _get_kriging_matrix(self, n, exact_values):\n\n xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis],\n self.Y_ADJUSTED[:, np.newaxis],\n self.Z_ADJUSTED[:, np.newaxis]), axis=1)\n d = cdist(xyz, xyz, 'euclidean')\n a = np.zeros((n+1, n+1))\n a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)\n if not exact_values:\n if self.variogram_model == 'linear':\n np.fill_diagonal(a, self.variogram_model_parameters[1])\n elif self.variogram_model != 'custom':\n np.fill_diagonal(a, self.variogram_model_parameters[2])\n else :\n np.fill_diagonal(a, 0.)\n a[n, :-1] = 1.0\n a[:-1, n] = 1.0\n\n return a", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def _make_gaussian_matrix(\n data_count: int,\n feature_count: int,\n) -> np.ndarray:\n return np.random.randn(data_count, feature_count)", "def compute_matrix(*params, **hyperparams):\n phi = params[0]\n d, t = hyperparams[\"dimension\"]\n\n if qml.math.get_interface(phi) == \"tensorflow\":\n p = qml.math.exp(1j * qml.math.cast_like(phi, 1j))\n minus_p = qml.math.exp(-1j * qml.math.cast_like(phi, 1j))\n zeros = qml.math.zeros_like(p)\n\n columns = []\n for i in range(t):\n columns.append(\n [p if j == i else zeros for j in range(t)]\n if i < d\n else [minus_p if j == i else zeros for j in range(t)]\n )\n r = qml.math.stack(columns, like=\"tensorflow\", axis=-2)\n return r\n\n arg = 1j * phi\n prefactors = qml.math.array([1 if index < d else -1 for index in range(t)], like=phi)\n\n if qml.math.ndim(arg) == 0:\n return qml.math.diag(qml.math.exp(arg * prefactors))\n\n diags = qml.math.exp(qml.math.outer(arg, prefactors))\n return qml.math.stack(qml.math.diag(d) for d in diags)", "def getBmatrix(H, kernMat, Gexp, wexp, *argv):\n n = kernMat.shape[0];\n ns = kernMat.shape[1];\n nl = ns - 2;\n r = np.zeros(n); \t # vector of size (n);\n\n # furnish relevant portion of Jacobian and residual\n\n # Kmatrix = np.dot((1./Gexp).reshape(n,1), np.ones((1,ns)));\n Kmatrix = np.dot((wexp/Gexp).reshape(n,1), np.ones((1,ns)))\n Jr = -kernelD(H, kernMat) * Kmatrix; \n\n # if plateau then unfurl G0\n if len(argv) > 0:\n G0 = argv[0]\n # r = (1. - kernel_prestore(H, kernMat, G0)/Gexp)\n r = wexp * (1. - kernel_prestore(H, kernMat, G0)/Gexp)\n\n else:\n # r = (1. - kernel_prestore(H, kernMat)/Gexp)\n r = wexp * (1. - kernel_prestore(H, kernMat)/Gexp)\n \n B = np.dot(Jr.T, Jr) + np.diag(np.dot(r.T, Jr))\n\n return B", "def initialize_Rs(self):\n return [np.eye(K.shape[0]) for K in self.Ks]", "def _calc_kernel(self,\n freq_1: float,\n time_1: float,\n freq_2: float,\n time_2: float,\n dagg: tuple\n ) -> Tuple[ndarray, ndarray]:\n dt = self._process_tensor.dt\n #pieces of kernel consist of some combination of phases and\n #Bose-Einstein factors\n n_1, n_2 = 0, 0\n if self._temp > 0:\n n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))\n n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))\n\n ker_dim = int(np.round(time_2 / dt))\n # calculate index corresponding to t_1\n switch = int(np.round(time_1 / dt))\n re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n\n tpp_index, tp_index = np.meshgrid(\n np.arange(ker_dim), np.arange(ker_dim),\n indexing='ij') #array of indices for each array element\n regions = {\n 'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)\n 'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)\n 'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)\n\n def phase(region, swap_ts = False):\n tk = tp_index[regions[region]]\n tkp = tpp_index[regions[region]]\n if tk.size == 0 or tkp.size == 0:\n return 0\n a = -1j * ((2*dagg[0] - 1)) * freq_2\n b = -1j * ((2*dagg[1] - 1)) * freq_1\n if swap_ts:\n a, b = b, a\n if region in ('a','c'):\n ph = np.triu(\n np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph += np.triu(\n np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)\n sel = np.diag(tk)\n di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)\n if a + b != 0:\n di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))\n di += np.exp((a + b) * sel * dt) / (a * (a+b))\n else:\n di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)\n ph += np.diag(di)\n else:\n ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)\n ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)\n ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)\n ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)\n return ph\n\n\n if dagg == (0, 1):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')\n\n elif dagg == (1, 0):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * n_1 * phase('c')\n\n elif dagg == (1, 1):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')\n\n elif dagg == (0, 0):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +\n (2*n_1 + 1) * phase('a'))\n\n im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * n_1 * phase('c')\n\n re_kernel = np.triu(re_kernel) #only keep triangular region\n im_kernel = np.triu(im_kernel)\n return re_kernel, im_kernel", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def _create_temperature_grid():\n\n temperatures_kelvins = numpy.full(NUM_GRID_POINTS, MIN_TEMPERATURE_KELVINS)\n\n for i in range(1, NUM_GRID_POINTS):\n if FIRST_POINT_IN_FRONT < i <= LAST_POINT_IN_FRONT:\n this_diff_kelvins = FRONT_GRADIENT_KELVINS_PT01 + 0.\n else:\n this_diff_kelvins = DEFAULT_GRADIENT_KELVINS_PT01 + 0.\n\n temperatures_kelvins[i] = (\n temperatures_kelvins[i - 1] + this_diff_kelvins\n )\n\n return temperatures_kelvins", "def tmatrix(self, source, drain):\n if self.gf_r is None:\n self.gf()\n\n G_sd = self.device.connections[drain].dot(self.gf_r).dot(self.device.connections[source].conj().T)\n\n gamma_source = self.calculators[source].gamma()\n gamma_drain = self.calculators[drain].gamma()\n\n vals, vecs = linalg.eig(gamma_source)\n vald, vecd = linalg.eig(gamma_drain)\n\n return numpy.diag((-self.calculators[drain].v_r.real)**.5).\\\n dot(self.calculators[drain].states_ri).dot(G_sd).\\\n dot(self.calculators[source].states_li.conj().T).\\\n dot(numpy.diag(self.calculators[source].v_l.real**.5))", "def nonsquare_matrix_mult(matrix):\n\n #Setup openCL\n dev, ctx, queue = setup_CL()\n\n #openCL Kernel\n #Naive approach\n kernel_code = \"\"\"\n #define MATRIX_ROW_SIZE %(matrix_row_size)s\n #define MATRIX_COL_SIZE %(matrix_col_size)s\n\n __kernel void func(__global float* a, __global float* b, __global float* transposed) {\n\n unsigned int i = get_local_id(0);\n __local float tmp[MATRIX_ROW_SIZE*MATRIX_COL_SIZE];\n\n //Initialize tmp to 0\n //Initialize output b to 0 for this thread\n for(int k=0; k<MATRIX_COL_SIZE*MATRIX_ROW_SIZE; k++){\n tmp[k] = 0;\n }\n\n for(int k=0; k<MATRIX_ROW_SIZE; k++){\n b[k + MATRIX_ROW_SIZE*get_group_id(0)] = 0;\n }\n\n //Transpose output\n transposed[i*MATRIX_ROW_SIZE+get_group_id(0)]=a[i+get_local_size(0)*get_group_id(0)];\n\n for(int j=0; j < MATRIX_ROW_SIZE; j++){\n tmp[j+MATRIX_ROW_SIZE*i] = a[i+get_local_size(0)*get_group_id(0)]*a[i+j*MATRIX_COL_SIZE];\n }\n\n // Store to output\n for(int j=0; j < MATRIX_ROW_SIZE; j++){\n for(int k=0; k < MATRIX_COL_SIZE; k++){\n if(i==0){\n b[j + MATRIX_ROW_SIZE*get_group_id(0)] += tmp[j+MATRIX_ROW_SIZE*k];\n }\n }\n }\n barrier(CLK_LOCAL_MEM_FENCE);\n }\n \"\"\"\n\n #Move data to device\n matrix_float = matrix.astype(np.float32)\n matrix_gpu = cl.array.to_device(queue, matrix_float)\n transposeMult_gpu = cl.array.empty(queue, (matrix.shape[0], matrix.shape[0]), np.float32)\n transposed_gpu = cl.array.empty(queue, (matrix.shape[1],matrix.shape[0]), np.float32)\n\n matrix_row_size = np.int32(matrix.shape[0])\n matrix_col_size = np.int32(matrix.shape[1])\n\n #Calculate workItems, workGroup size, workGroups for input\n matrix_val_count = matrix_float.shape[0]*matrix_float.shape[1]\n xWorkItems = min(int(matrix_row_size),1024)\n yWorkItems = min(int(matrix_col_size),1024)\n totalWorkItems = float(xWorkItems*yWorkItems)\n groups = np.int(max(np.ceil(matrix_val_count / xWorkItems),1))\n\n # print(\"workItems: %s, matrix_val_count: %s, groups: %s\" % (totalWorkItems, matrix_val_count, groups))\n\n # update template with current runtime requirements\n kernel = kernel_code % {\n 'matrix_row_size': matrix_row_size,\n 'matrix_col_size': matrix_col_size\n }\n\n #Launch kernel and time it\n #Set global ID, workItems, workGroups\n prg = cl.Program(ctx, kernel).build()\n start = time.time()\n event = prg.func(queue, (yWorkItems*xWorkItems,1),(groups,1), matrix_gpu.data, transposeMult_gpu.data, transposed_gpu.data)\n\n #event.wait()\n runtime = time.time()-start\n\n #Save output\n transposedMult = transposeMult_gpu.get()\n transposed = transposed_gpu.get()\n\n # print('openCL_opt0 %d x %d transpose-mult time: %.2E' % (matrix.shape[0], matrix.shape[1], runtime))\n # print('openCL_opt0 transposed==goldenTransposed: %s' % np.allclose(transposed, np.transpose(matrix)))\n # print('openCL_opt0 mult==goldenMult: %s' % np.allclose(transposedMult, matrix.dot(np.transpose(matrix))))\n if not(np.allclose(transposedMult, matrix.dot(np.transpose(matrix)))):\n # print('Original Matrix:\\n %s' % matrix)\n print('openCL_opt0 transposed val:\\n %s' % transposed)\n print('golden transpose-mult:\\n %s' % matrix.dot(np.transpose(matrix)))\n transposedMult[(transposedMult>0) & (transposedMult<1)] = -1\n print('openCL_opt0 mult val:\\n %s' % transposedMult)\n print('openCL_opt0 transpose-mult:\\n %s' % np.isclose(transposedMult,matrix.dot(np.transpose(matrix))))\n # print('--------------------')\n\n return [transposedMult, runtime]" ]
[ "0.6601181", "0.5991661", "0.59131616", "0.5783467", "0.5772154", "0.5769376", "0.5727504", "0.571512", "0.5695456", "0.56801045", "0.5649581", "0.56263125", "0.56191933", "0.55479634", "0.54599106", "0.54450864", "0.5416208", "0.54120284", "0.54111737", "0.5408678", "0.5330335", "0.53215504", "0.5311331", "0.5304429", "0.5302084", "0.5282557", "0.5280879", "0.5280879", "0.5279878", "0.52218014", "0.5221516", "0.5166585", "0.51631075", "0.51578563", "0.5156029", "0.5138328", "0.51328987", "0.5127175", "0.51079917", "0.5105579", "0.5096786", "0.508741", "0.50663954", "0.506401", "0.5062737", "0.5052265", "0.50426686", "0.5040259", "0.5040006", "0.5039424", "0.5038771", "0.50374", "0.5033515", "0.5032085", "0.49958384", "0.49882242", "0.4986986", "0.4986514", "0.496909", "0.4965269", "0.49600902", "0.49530604", "0.4944436", "0.49400908", "0.49393263", "0.4933858", "0.4929446", "0.49233308", "0.492074", "0.4917314", "0.4913255", "0.4911998", "0.49074602", "0.49068737", "0.49034554", "0.4900525", "0.4898098", "0.48977366", "0.48831907", "0.48816225", "0.48800004", "0.48786494", "0.48728952", "0.486966", "0.48694625", "0.48621556", "0.4861167", "0.4852716", "0.48521483", "0.48466995", "0.48448992", "0.48373464", "0.48372343", "0.4836366", "0.4831263", "0.48272285", "0.48268318", "0.48260692", "0.48241568", "0.482107" ]
0.5754054
6
Create matrix associated with inversion based on Aganj et al. formalism.
def interp_matrix_new(qpnts, spnts, npgrid, nsamp, deg_max): # Initialize A = np.zeros((nsamp,npgrid)) # Create matrix for i in xrange(nsamp): for j in xrange(npgrid): cosTheta = np.dot(spnts[i], qpnts[j]) if(abs(cosTheta)>1): cosTheta = np.sign(cosTheta) A[i,j] = inv_funk_radon_even_kernel(cosTheta, deg_max) return A
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def get_stain_matrix(I):", "def incidence_matrix(self):\n try: \n return self._incidence_matrix\n except AttributeError:\n self._incidence_matrix = matrix(ZZ, len(self.Vrepresentation()), \n len(self.Hrepresentation()), 0)\n for V in self.Vrep_generator():\n for H in self.Hrep_generator():\n if self._is_zero(H*V):\n self._incidence_matrix[V.index(),H.index()] = 1\n\n return self._incidence_matrix", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # ไธŽ็Ÿฉ้˜ตTraceA * I identity ๅ•ไฝ็Ÿฉ้˜ต", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def _dmatrix(kn_u, kn_d):\n d = np.zeros((kn_u.size, 4, 4), np.complex128)\n d_inv = np.zeros_like(d)\n\n d[:, 0, 0] = 1\n d[:, 0, 1] = 1\n d[:, 1, 0] = kn_u\n d[:, 1, 1] = -kn_u\n\n d[:, 2, 2] = 1\n d[:, 2, 3] = 1\n d[:, 3, 2] = kn_d\n d[:, 3, 3] = -kn_d\n\n # an analytic matrix inverse saves time\n inv_kn_u = 0.5 / kn_u\n inv_kn_d = 0.5 / kn_d\n\n d_inv[:, 0, 0] = 0.5\n d_inv[:, 0, 1] = inv_kn_u\n d_inv[:, 1, 0] = 0.5\n d_inv[:, 1, 1] = -inv_kn_u\n\n d_inv[:, 2, 2] = 0.5\n d_inv[:, 2, 3] = inv_kn_d\n d_inv[:, 3, 2] = 0.5\n d_inv[:, 3, 3] = -inv_kn_d\n\n return d, d_inv", "def build_Xij_inv_matrix(self,Nmesh=64):\n H0, F = self.cosmo.H0, self.cosmo.F\n Lbox = self.attrs['Lbox']\n kgrid = initialize_kgrid(Nmesh,Lbox)\n kmag_grid = np.linalg.norm(kgrid,axis=3)\n w_grid = self.cosmo.Pk_lin(kmag_grid)*(1/Lbox**3)*np.exp(-kmag_grid*kmag_grid*self.RG*self.RG)\n k2 = kmag_grid**2\n k2[0,0,0] = 1 \n #----------------------------------------------------\n cspace = np.arange(0,18)\n \n xij_tensor = [[np.sum(np.conj(Hhats[i](kgrid,k2,H0,F))*Hhats[j](kgrid,k2,H0,F)*w_grid)\n for j in cspace[self.cmask]] for i in cspace[self.cmask]]\n \n xij_tensor = np.array(xij_tensor)\n self.xij_tensor_inv = np.linalg.inv(xij_tensor.real)", "def build_stoichiometric_matrix(incidence_matrix, complexes_matrix):\n\n #@ is matrix multiplication\n\n #print(\"complexes matrix\")\n\n #This is matrix N in toric paper\n return complexes_matrix.transpose() @ incidence_matrix", "def I(n):\n identity = Matrix(n,n)\n print identity.matrix\n index = 0 \n for i in range(identity.nrows):\n for j in range(identity.ncols):\n identity.matrix[i][index] = 1\n index += 1\n\n\n flat = []\n for i in range(identity.nrows):\n for j in range(identity.ncols):\n flat.append(identity.matrix[i][j])\n\n\n return identity", "def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM", "def vandermonde_matrix(x):\n m = size(x) \n n = m+1\n V = ones((m, n))\n for j in range(1, n):\n for i in range(0, m):\n V[i,j] = pow(x[i],j) \n return V", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def Dinvmatrix(N):\r\n import numpy as np\r\n D = np.zeros((N,N,2))\r\n D[:,:,0] = np.diag((np.append(np.ones((1,int(N/2))),np.zeros((1,int(N/2))))))\r\n D[:,:,1] = np.diag((np.append(np.zeros((1,int(N/2))),np.ones((1,int(N/2))))))\r\n return D", "def pseudoInversa(J):\n\tJinv = np.linalg.pinv(J)\n\treturn Jinv", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def generator_matrix(self):\n self.generator_mat = np.zeros((self.k, self.n), dtype=int)\n A_matrix = np.ones((self.k, self.n-self.k), dtype=int)\n\n identity_i = np.identity(self.k, dtype=int)\n self.generator_mat[:, :self.k] = identity_i\n\n # This loop edits the A_matrix to make the column vectors linearly ind.\n for x in range(self.n-self.k):\n A_matrix[x, x] = 0\n\n self.generator_mat[:, self.k:] = A_matrix\n\n# for i in range(self.k):\n# print(self.generator_mat[i,:])\n\n return self.generator_mat", "def inversion(self, index=0):\r\n i = [(j[0]+index) % 12 for j in self.__full]\r\n return TWToneMatrix(i)", "def inversion(origin=(0, 0, 0)):\n mat = -np.eye(4)\n mat[3, 3] = 1\n mat[0:3, 3] = 2 * np.array(origin)\n return SymmOp(mat)", "def matrix_inv(mat):\n\ta = mat[0,0]\n\tb = mat[0,1]\n\tc = mat[0,2]\n\td = mat[1,0]\n\te = mat[1,1]\n\tf = mat[1,2]\n\tg = mat[2,0]\n\th = mat[2,1]\n\ti = mat[2,2]\n\n\tdet = b*f*g + c*d*h + a*e*i - a*f*h - b*d*i - c*e*g\n\n\tinvmat = np.zeros((3,3))\n\tinvmat[0,0] = (e*i - f*h) / det\n\tinvmat[0,1] = (c*h - b*i) / det\n\tinvmat[0,2] = (b*f - c*e) / det\n\tinvmat[1,0] = (f*g - d*i) / det\n\tinvmat[1,1] = (a*i - c*g) / det\n\tinvmat[1,2] = (c*d - a*f) / det\n\tinvmat[2,0] = (d*h - e*g) / det\n\tinvmat[2,1] = (b*g - a*h) / det\n\tinvmat[2,2] = (a*e - b*d) / det\n\treturn invmat", "def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals(self.design_matrix.T@self.design_matrix)", "def calculate_posvij_matrices(main_tetrad_ark):\n\n\t# Import all the possible solutions to the Vij matrices\n\tvij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n\tvij_matrices = []\n\n\tprint(\"\t\t\t\t\t\t\t\")\n\tprint(\"\tCalculating Vij matrices\")\n\tprint(\"\t\t\t\t\t\t\t\")\n\t# for i in range(0, len(main_tetrad_ark)):\n\tfor i in range(0, len(vij_possibilities)):\n\t\ttet_i = [x[1] for x in main_tetrad_ark[i]]\n\t\ttri_tet = [np.transpose(i) for i in tet_i]\n\t\tprint(\"# ********************************\")\n\t\t# print(\"\t\t\t\t\t\t\t\t \")\n\t\tprint(\"MATRIX i: \", i)\n\t\tprint(\"\t\t\t\t\t\t\t\t \")\n\t\tfor j in range(0, len(main_tetrad_ark)):\n\t\t\ttet_j = [x[1] for x in main_tetrad_ark[j]]\n\t\t\ttrj_tet = [np.transpose(j) for j in tet_j]\n\t\t\tvij_temp = []\n\t\t\t# print(\"# ********************************\")\n\t\t\tprint(\"\t\t\")\n\t\t\tprint(\"MATRIX j: \", j)\n\t\t\ttemp_zero = np.zeros((4,4), dtype=int)\n\t\t\tfor x in range(0,len(tet_i)):\n\t\t\t\ttest_1half = np.dot(tri_tet[x],tet_j[x])\n\t\t\t\ttest_2half = np.dot(trj_tet[x],tet_i[x])\n\t\t\t\ttest_difs = np.subtract(test_1half, test_2half)\n\t\t\t\t# print(\" \")\n\t\t\t\t# print(test_difs)\n\t\t\t\ttemp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n\t\t\t\tvij_temp.append(temp_mat)\n\t\t\t\t# print(\"\")\n\t\t\ttemp_add1 = np.add(vij_temp[0], vij_temp[1])\n\t\t\ttemp_add2 = np.add(temp_add1, vij_temp[2])\n\t\t\ttempf = np.add(temp_add2, vij_temp[3])\n\t\t\t# tempf = np.divide(temp_add3, 2)\n\t\t\tfor ijx in vij_possibilities:\n\t\t\t\tif np.array_equal(temp_addf, ijx[0]):\n\t\t\t\t\tprint(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n\t\t\t\t\tprint(\"l-solution found:\", ijx[1])\n\t\t\t\t\tprint(temp_addf)\n\t\t\t\t\tprint(\"\")\n\t\t\t\t\tprint(ijx[0])\n\t\t\tif np.array_equal(temp_addf, temp_zero):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tvij_matrices.append(temp_addf)\n\t\t\t# print(\"\")\n\t\t\tprint(temp_addf)\n\t\t\t# vij_matrices.append(temp_addf)\n\t\tvijmats_size = sys.getsizeof(vij_matrices)\n\t\tprint(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n\tprint(\"Length of Vij Matrices\")\n\tprint(len(vij_matrices))\n\tpass", "def _init_model(self):\n self.A_inv = np.zeros(shape=(self.numUsers, self.d, self.d))\n self.b = np.zeros(shape=(self.numUsers, self.d))\n self.w = np.zeros(shape=(self.numUsers, self.d))\n for i, mat in enumerate(self.A_inv):\n self.A_inv[i] = np.eye(self.d)", "def to_s_matrix(w,v):\n pass", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def make_project_matrix(X):\n X = np.mat(X)\n return np.eye(X.shape[0]) - (X*(np.linalg.inv(X.T*X)*X.T))", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def make_matrix(self):\n self.leftmost_element()\n self.rightmost_element()\n self.interior_element()\n\n #Transforms all sympy symbolic expressions for the lagrange polynomials into callable functions.\n self.psi_funcs = [sym.lambdify([self.x], self.psi[i], modules = \"numpy\") for i in range(3*self.Ne)]", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))", "def compute_trans_matrix( self, n_components ):\n matrix = np.zeros((n_components,n_components))\n matrix[-1,-1] = 1.\n\n for i in range(0,matrix.shape[0]-1):\n matrix[i,i:i+2] = [0.5,0.5]\n\n return matrix", "def create_A_matrix(J,sigma):\n A = np.zeros((3,J),dtype=complex)\n A[0,1] = 0\n A[0,2:] = -sigma\n A[1,:] = 1+2*sigma\n A[1,0] = 1\n A[1,-1] = 1\n A[2,:-2] = -sigma\n A[2,-2] = 0\n return A", "def HamiltonianMatrix(self):\n self.Inter = sp.Matrix([[0,self.t],[self.t,0]])\n self.Intra1 = sp.Matrix([[0,v],[w,0]])\n self.Intra2 = sp.Matrix([[0,w],[v,0]])\n H = sp.Matrix([])\n for i in range(1, self.N+1):\n fila = sp.Matrix([])\n for j in range(1, self.N+1):\n if j==i:\n fila = fila.row_join(self.Inter)\n elif j==i+1:\n fila = fila.row_join(self.Intra1)\n elif j==i-1:\n fila = fila.row_join(self.Intra2)\n else:\n fila = fila.row_join(sp.Matrix([[0,0],[0,0]]))\n H = H.col_join(fila) \n H.simplify()\n #printer = StrPrinter()\n #print(H.table(printer,align='center'))\n self.H = H", "def invertMatrixZN(M, N):\n n = M.shape[0] # shape = (nzeilen, nspalten), also shape[0] = nzeilen\n M = M.copy() # nicht an der Originalmatrix rumspielen\n I = np.identity(n, int) # Einheitsmatrix -> wird spรคter das Ergebnis\n for row in range(n):\n if not invertierbar(M[row, row], N):\n # mรผssen Zeilen tauschen\n for j in range(row+1, n):\n if invertierbar(M[j, row], N):\n tmp = M[row, :].copy()\n M[row, :] = M[j, :]\n M[j, :] = tmp\n tmp = I[row, :].copy()\n I[row, :] = I[j, :]\n I[j, :] = tmp\n break\n else:\n # hier kommen wir hin wenn die for-Schleife nicht durch ein\n # break beendet wurde, also keine geeignete Zeile zum Tauschen\n # existiert\n raise ValueError(\"Matrix nicht invertierbar\")\n # Zeile mit dem Inversen des Pivot-Elements multiplizieren, um eine 1\n # auf der Diagonalen zu erreichen\n faktor = invertZN(M[row, row], N)\n M[row, :] = (M[row, :] * faktor) % N\n I[row, :] = (I[row, :] * faktor) % N\n \n # Nullen unterhalb des aktuellen Pivots erzeugen\n for j in range(row + 1, n):\n if invertierbar(M[j, row], N):\n faktor = invertZN(M[j, row], N)\n M[j, :] = (M[j, :] * faktor - M[row, :]) % N\n I[j, :] = (I[j, :] * faktor - I[row, :]) % N\n elif M[j, row] != 0:\n # In Z_N kรถnnen Nullteiler auftreten, z.B. die 8 in Z_{12}.\n # Um dort eine 0 zu erzeugen, mรผssen wir mit dem kgV der beiden\n # Zahlen multiplizieren. Da ggt*kgv = mn gilt, kรถnnen wir dazu\n # den bereits implementierten ggt-Algorithmus nehmen.\n faktor = N * M[j, row] // krypto1.ggT(N, M[j, row])\n M[j, :] = (M[j, :] * faktor) % N\n I[j, :] = (I[j, :] * faktor) % N\n # jetzt haben wir eine obere Dreiecksmatrix. Um daraus eine Diagonalmatrix\n # zu machen, mรผssen wir nun noch einmal von unten nach oben durchgehen\n # um die Eintrรคge oberhalb der Diagonalen zu Nullen zu machen.\n for row in range(n-1, -1, -1):\n for j in range(row + 1, n):\n faktor = M[row, j]\n M[row, :] = (M[row, :] - faktor*M[j, :]) % N\n I[row, :] = (I[row, :] - faktor*I[j, :]) % N\n return I", "def laplace_matrix(self):\n n = self.number_of_vertices\n laplace_matrix = np.zeros((n, n))\n for i in range(n):\n laplace_matrix[i][i] = 1\n vertice = self.list_of_vertices[i]\n for edge in vertice.edges_list:\n laplace_matrix[i][edge.linked[1].index] = 1\n return laplace_matrix", "def invariant_bilinear_form(self):\n from sage.matrix.constructor import identity_matrix\n m = identity_matrix(self.base_ring(), self.degree())\n m.set_immutable()\n return m", "def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)", "def ion_matrix(ion_coefficients, atomic_number, ion_number):\n offdiag = np.zeros(atomic_number)\n index = ion_coefficients.index\n for i in index:\n offdiag[i] = ion_coefficients.loc[i]\n diag = np.hstack([-offdiag, np.zeros(1)])\n return (np.diag(diag) + np.diag(offdiag, k=-1))[ion_number, :]", "def makeIncidenceMatrix(self, variable, token, mechanism, network):\n model = self.model\n size = self.size_of_variable( variable )\n mat = np.zeros(size)\n for i, (label, node) in enumerate(model.nodes.items()):\n if node.named_network in self.nw_nnw_dict[network]:\n for j, (arc_label, arc) in enumerate(model.arcs.items()):\n if str(node.label) == str(arc.source) and mechanism == arc.mechanism:\n mat[i, j] = -1.\n elif str(node.label) == str(arc.sink) and mechanism == arc.mechanism:\n mat[i, j] = 1.\n return mat", "def __init__(self, probabilities, states):\n\n dim = np.shape(states[0])[0] #length of the state vector\n matrix_representation = np.zeros((dim, dim)) #build empty matrix\n\n for (prob, state) in zip(probabilities, states):\n matrix_representation += prob * state @ np.conjugate(state).T\n\n self.matrix_rep = matrix_representation", "def generate_adj_matrix(vertices, inhibition_degree=2):\n value_pool = [-1, 1, 0] + [0]*10 + [1]*2\n matrix = [[0 for _ in xrange(vertices)] for _ in xrange(vertices)]\n for i, row in enumerate(matrix):\n for j, element in enumerate(row):\n if i > j:\n if row.count(-1) + row.count(1) >= 2 and row.count(-1) != 0:\n matrix[i][j] = 0\n elif row.count(-1) == 1:\n matrix[i][j] = rnd.choice(value_pool)\n elif row.count(-1) == 2:\n matrix[i][j] = 0\n else:\n matrix[i][j] = rnd.choice(value_pool)\n matrix = [list(i) for i in zip(*matrix)]\n return matrix", "def compute_matrix(self):\n\n fac = self.a / self.dx ** 2\n\n diagonal = np.ones(self.nx) * 2 * fac\n lower = np.ones(self.nx - 1) * -fac\n upper = np.ones(self.nx - 1) * -fac\n\n matrix = sp.diags(\n diagonals=[diagonal, lower, upper],\n offsets=[0, -1, 1], shape=(self.nx, self.nx),\n format='csr')\n\n return matrix", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def generate_design_matrix(bs, v):\n #design matrix has 1 in all first rows (to detect image b=0 SIGNAL)\n x = np.ones((bs.shape[0], 7))\n for i, b in enumerate(bs):\n if b == 0:\n x[i, 1:] = 0\n else:\n x[i, 1] = -b*v[0, i]**2\n x[i, 2] = -b*2*v[0, i]*v[1, i]\n x[i, 3] = -b*2*v[0, i]*v[2, i]\n x[i, 4] = -b*v[1, i]**2\n x[i, 5] = -b*2*v[1, i]*v[2, i]\n x[i, 6] = -b*v[2, i]**2\n return x", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def Omat(self):\n if self.standard:\n return np.matrix(((0, -1, 0), (0, 0, 1), (-1, 0, 0)))\n else:\n return np.matrix(((0, 0, 1), (0, 1, 0), (-1, 0, 0)))", "def prepare(self):\n ls=len(self.v)\n self.S=numpy.zeros(ls)\n self.A=numpy.zeros((ls,ls))\n\n for k,v in self.e.items():\n b,e=k\n bi,ei=self.rv[b],self.rv[e]\n self.A[bi,bi]-=v\n self.A[bi,ei]+=v", "def init_needleman_wunsch_matrix(self):\r\n empty_matrix = self.empty_matrix() # Building on the previous definition, this will give you an empty matrix\r\n for i in range(len(self.s2)+1):\r\n for j in range(len(self.s1)+1):\r\n empty_matrix[0][i] = -i\r\n empty_matrix[j][0] = -j\r\n return empty_matrix", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def _buildMatrix(self, SparseMatrix, Ncells, MaxFaces, coeff):\n return (0, 0)", "def __neg__(self):\n # \n # TODO - your code here\n #\n result = [];\n for row in self.g:\n result.append([-1*n for n in row]);\n \n return Matrix(result);", "def relaxation_matrix(self, uphill, downhill):\n world.KK = numpy.zeros((2,2), dtype=numpy.float64)\n Kup = 1.0/float(uphill)\n world.KK[0,0] = -Kup\n world.KK[1,0] = Kup\n Kdn = 1.0/float(downhill)\n world.KK[0,1] = Kdn\n world.KK[1,1] = -Kdn", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def design_matrix(nonlinear_p, data, prior):\n P, ecc, omega, M0 = nonlinear_p[:4] # we don't need the jitter here\n\n t = data._t_bmjd\n t0 = data._t_ref_bmjd\n zdot = cy_rv_from_elements(t, P, 1., ecc, omega, M0, t0, 1e-8, 128)\n\n M1 = np.vander(t - t0, N=prior.poly_trend, increasing=True)\n M = np.hstack((zdot[:, None], M1))\n\n return M", "def toMatrix(self,v):\n return Matrix([[v.x],[v.y],[v.z]])", "def build_mat(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n else:\n self.mat[row, col] = self.kernel(s, t, self.n)\n\n if self.symmetric:\n self.mat = self.symmetrize(self.mat)\n else:\n for idx, s in enumerate(self.S):\n self.test_normalization[idx] = self.kernel(s, s, self.n)", "def create_Uxx_mat(x):\n \n\n dx = x[1] - x[0]\n \n x_int = np.arange(1,len(x)-1,dtype=int)\n #create u_{xx} matrix operator\n Uxx_mat_row = np.hstack((x_int,x_int,x_int))\n Uxx_mat_col = np.hstack((x_int-1,x_int,x_int+1))\n Uxx_entry = (1/(dx**2))*np.hstack((np.ones(len(x)-2),-2*np.ones(len(x)-2),(np.ones(len(x)-2))))\n\n Uxx_mat_row_bd = np.hstack((0,0,len(x)-1,len(x)-1))\n Uxx_mat_col_bd = np.hstack((0,1,len(x)-2,len(x)-1))\n Uxx_entry_bd = (1/(dx**2))*np.hstack((-2,2,2,-2))\n\n\n Uxx_mat_row = np.hstack((Uxx_mat_row,Uxx_mat_row_bd))\n Uxx_mat_col = np.hstack((Uxx_mat_col,Uxx_mat_col_bd))\n Uxx_entry = np.hstack((Uxx_entry,Uxx_entry_bd))\n\n return sparse.coo_matrix((Uxx_entry,(Uxx_mat_row,Uxx_mat_col)))", "def matIxs( n ):\n rows, cols = np.indices( (n,n) )\n row = rows.flatten()\n col = cols.flatten()\n \n return map( lambda x: Vector( x[0], x[1] ), zip( col, row ) )", "def _makeDerivativeMatrix(self, index, a):\n num_neurons = a\n jaccob_matrix = np.zeros(shape=(num_neurons, num_neurons)) # ie S=3, shape 3X3\n # dx_func = self.__getDerivative(self.layers[index]['trans_func'])\n dx_func = self._getTransFunc(self.layers[index]['trans_func']).derivative\n for i in range(num_neurons):\n # diagonal matrix\n a_val = self.layers[index]['a_output'][i]\n jaccob_matrix[i][i] = dx_func(a_val)\n return jaccob_matrix", "def bc_matrix(params):\r\n w = params['w']\r\n kx = params['kx']\r\n d_list = params['d_list']\r\n ex_list = params['ex_list']\r\n ez_list = params['ez_list']\r\n kz_list = params['kz_list']\r\n N = len(d_list)\r\n assert N == len(d_list) == len(ex_list) == len(ez_list) == len(kz_list)\r\n assert N >= 2\r\n assert d_list[0] == d_list[-1] == inf\r\n \r\n # delta = e^{i * kz * d}, i.e. phase change across each layer\r\n # delta[0] and delta[-1] are undefined and are not used.\r\n delta_list = [cmath.exp(1j * kz_list[i] * d_list[i]) for i in range(N)]\r\n \r\n Ex_up_over_H_up_list = [kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ex_down_over_H_down_list = [-a for a in Ex_up_over_H_up_list]\r\n Ez_up_over_H_up_list = [-kx / (w * ez_list[i] * nu.eps0) for i in range(N)]\r\n Ez_down_over_H_down_list = Ez_up_over_H_up_list[:]\r\n \r\n mat = np.zeros((2*N-2, 2*N-2), dtype=complex)\r\n \r\n for row_now in range(N-1):\r\n # This row concerns continuity of Ex across the boundary between\r\n # layer_under and layer_over (under and over the boundary respectively)\r\n layer_under = row_now\r\n layer_over = layer_under + 1\r\n # up_under_index is the column index in mat that gets multiplied by\r\n # H_{up} in layer_under.\r\n up_under_index = 2 * layer_under - 1\r\n down_under_index = 2 * layer_under\r\n up_over_index = 2 * layer_over - 1\r\n down_over_index = 2 * layer_over\r\n \r\n if layer_under != 0:\r\n assert 0 <= up_under_index < 2*N-2\r\n mat[row_now, up_under_index] = (\r\n Ex_up_over_H_up_list[layer_under] * delta_list[layer_under])\r\n mat[row_now, down_under_index] = Ex_down_over_H_down_list[layer_under]\r\n mat[row_now, up_over_index] = -Ex_up_over_H_up_list[layer_over]\r\n if layer_over != N-1:\r\n assert 0 <= down_over_index < 2*N-2\r\n mat[row_now, down_over_index] = (\r\n -Ex_down_over_H_down_list[layer_over] * delta_list[layer_over])\r\n\r\n for row_now in range(N-1, 2*N-2):\r\n # This row concerns continuity of eps_z * Ez across the boundary between\r\n # layer_under and layer_over (under and over the boundary respectively)\r\n layer_under = row_now - (N-1)\r\n layer_over = layer_under + 1\r\n # up_under_index is the column index in mat that gets multiplied by\r\n # H_{up} in layer_under.\r\n up_under_index = 2 * layer_under - 1\r\n down_under_index = 2 * layer_under\r\n up_over_index = 2 * layer_over - 1\r\n down_over_index = 2 * layer_over\r\n \r\n if layer_under != 0:\r\n assert 0 <= up_under_index < 2*N-2\r\n mat[row_now, up_under_index] = (ez_list[layer_under] *\r\n Ez_up_over_H_up_list[layer_under] * delta_list[layer_under])\r\n mat[row_now, down_under_index] = (ez_list[layer_under] *\r\n Ez_down_over_H_down_list[layer_under])\r\n mat[row_now, up_over_index] = (-ez_list[layer_over] * \r\n Ez_up_over_H_up_list[layer_over])\r\n if layer_over != N-1:\r\n assert 0 <= down_over_index < 2*N-2\r\n mat[row_now, down_over_index] = (-ez_list[layer_over] *\r\n Ez_down_over_H_down_list[layer_over] * delta_list[layer_over])\r\n \r\n return mat", "def _vect_matrix_inverse(A):\n identity = np.identity(A.shape[2], dtype=A.dtype)\n return np.array([np.linalg.solve(x, identity) for x in A])", "def identity_matrix(self, n):\r\n IdM = self.zeros_matrix(n, n)\r\n for i in range(n):\r\n IdM[i][i] = 1.0\r\n \r\n return IdM", "def matrix_neumann2D(Omega,Nx,Ny):\r\n \r\n hx = (Omega[1]-Omega[0])/Nx\r\n hy = (Omega[3]-Omega[2])/Ny\r\n hx2 = hx*hx\r\n hy2 = hy*hy\r\n\r\n # Les inconnues sont numรฉrotรฉs de 0 ร  Nx suivant x et 0 ร  Ny\r\n # suivant y. La taille du problรจme est donc (Nx+1)*(Ny+1).\r\n\r\n # Pour -Laplacien(u), la matrice est constituรฉe de (Ny+1)x(Ny+1)\r\n # blocs de taille (Nx+1)x(Nx+1), de la forme\r\n #\r\n # A = [ A0 B ]\r\n # [ B A1 B ]\r\n # [ B A1 B ]\r\n # [ . . . ]\r\n # [ B A1 B ]\r\n # [ B A0 ]\r\n #\r\n # Au final, on peut commencer ร  remplir avec des diagonales\r\n N = (1+Nx)*(1+Ny)\r\n diags = np.zeros((5,N))\r\n # La diagonale est constante\r\n diags[2,:] = 2./hx2+2./hy2\r\n # Diagonale -1\r\n diags[1,:] = -1./hx2 # en gรฉnรฉral\r\n diags[1,np.arange(Nx,N,Nx+1)] = 0. # bord gauche\r\n diags[1,np.arange(Nx-1,N,Nx+1)] = -2./hx2 # bord droit\r\n # Diagonale +1\r\n diags[3,:] = -1./hx2 # en gรฉnรฉral\r\n diags[3,np.arange(0,N,Nx+1)] = 0. # bord droit\r\n diags[3,np.arange(1,N,Nx+1)] = -2./hx2 # bord gauche\r\n # Diagonale -(Nx+1)\r\n diags[0,:] = -1./hy2 # en gรฉnรฉral\r\n diags[0,(Nx+1)*(Ny-1):(Nx+1)*Ny] = -2./hy2 # bord bas\r\n # Diagonale +(Nx+1)\r\n diags[4,:] = -1./hy2 # en gรฉnรฉral\r\n diags[4,Nx+1:2*(Nx+1)] = -2./hy2 # bord haut\r\n\r\n # Construction de la matrice creuse de u --> -Laplacien(u)\r\n A = sp.spdiags(diags,[-(Nx+1),-1,0,1,(Nx+1)], (Nx+1)*(Ny+1),\r\n (Nx+1)*(Ny+1), format=\"csc\")\r\n\r\n return A", "def formAdjacencyMatrix(self):\n self.adjacencyMatrix = dict()\n for i in self.node:\n self.adjacencyMatrix[i] = dict()\n for j in self.node:\n self.adjacencyMatrix[i][j] = 0\n \n for ij in self.link:\n self.adjacencyMatrix[self.link[ij].tail][self.link[ij].head] = 1", "def inverseTransformationMatrix(self,index=None):\n if self.method == 'pca':\n if index is not None:\n coordinateIndex = distribution1D.vectori_cxx(len(index))\n for i in range(len(index)):\n coordinateIndex[i] = index[i]\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions(coordinateIndex)\n inverseTransformation = self._distribution.getInverseTransformationMatrix(coordinateIndex)\n else:\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions()\n inverseTransformation = self._distribution.getInverseTransformationMatrix()\n row = matrixDim[0]\n column = matrixDim[1]\n # convert 1D vector to 2D array\n L = np.atleast_1d(inverseTransformation).reshape(row,column)\n else:\n self.raiseAnError(NotImplementedError,' inverse transformationMatrix is not yet implemented for ' + self.method + ' method')\n return L", "def create_B_matrix(J,sigma):\n diaUp = sigma*np.ones(J-1,dtype=complex)\n diaUp[0] = 0\n diaDown = sigma*np.ones(J-1,dtype=complex)\n diaDown[J-2] = 0\n dia = (1-2*sigma)*np.ones(J,dtype=complex)\n dia[0] = 1\n dia[-1] = 1\n return diags([diaUp,dia,diaDown], [1,0,-1], (J, J), format='csr')", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def buildmatrix(self, diffusion_coefficients):\n a = []\n\n for i in range(self.size):\n line = np.zeros(self.size)\n if i == 0:\n line[i] = -(diffusion_coefficients[i] + 2*diffusion_coefficients[i+1] + diffusion_coefficients[i+2])\n line[i+1] = diffusion_coefficients[i+1] + diffusion_coefficients[i+2]\n elif i == self.size-1:\n line[i - 1] = diffusion_coefficients[i] + diffusion_coefficients[i+1]\n line[i] = -(diffusion_coefficients[i] + 2*diffusion_coefficients[i+1] + diffusion_coefficients[i+2])\n else:\n line[i-1] = diffusion_coefficients[i] + diffusion_coefficients[i+1]\n line[i] = -(diffusion_coefficients[i] + 2*diffusion_coefficients[i+1] + diffusion_coefficients[i+2])\n line[i+1] = diffusion_coefficients[i+1] + diffusion_coefficients[i+2]\n a.append(line)\n a = np.asarray(a)\n\n return a", "def _precession_matrix(oldequinox, newequinox):\n return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)", "def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)", "def dens_matrix(state):\n size = len(state)\n state_conj = np.conj(state)\n dm = np.zeros((size,) * 4, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n dm[p1, p2, p1_, p2_] = state[p1, p2] * state_conj[p1_, p2_]\n\n return dm", "def identMatrix(size):\n returnvalue = Matrix()\n for i in range(size):\n newrow = [0] * size\n newrow[i] = 1\n returnvalue.addRow(*newrow)\n return returnvalue", "def tomatrix(self, ai_patch):\n V = self.space\n# print \"------------\"\n# print \"geo.npatchs : \", V.geometry.npatchs\n# print \"patch id : \", ai_patch\n# print \"dim : \", V.dim\n# print \"shape : \", V.geometry[ai_patch].shape\n if V.dim == 1 :\n [li_n_1] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_1d ( self.id, ai_patch \\\n , li_n_1 )\n if V.dim == 2 :\n [li_n_1, li_n_2] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_2d ( self.id, ai_patch \\\n , li_n_1, li_n_2 )\n if V.dim == 3 :\n [li_n_1, li_n_2, li_n_3] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_3d ( self.id \\\n , ai_patch, li_n_1, li_n_2, li_n_3 )", "def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)", "def InverseMatrix(matrix,vector):\r\n # Unveri reversible matrix\r\n if Determinant(matrix, 1) == 0:\r\n print(\"Error,Singular Matrix\\n\")\r\n return\r\n # result matrix initialized as singularity matrix\r\n result = MakeIMatrix(len(matrix), len(matrix))\r\n # loop for each row\r\n for i in range(len(matrix[0])):\r\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\r\n # pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[i][i] = 1/matrix[i][i]\r\n result = MultiplyMatrix(elementary, result)\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\r\n # elementary matrix and multiply with the result matrix )\r\n for j in range(i+1, len(matrix)):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n\r\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\r\n # (make elementary matrix and multiply with the result matrix )\r\n for i in range(len(matrix[0])-1, 0, -1):\r\n for j in range(i-1, -1, -1):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n return result", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def x_matrix(a):\r\n a0 = a[0]\r\n a1 = a[1]\r\n a2 = a[2]\r\n return np.array([[0, -a2, a1],\r\n [a2, 0, -a0],\r\n [-a1, a0, 0]])", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def z_operator_matrix(self):\n n, r_1, r_2, k = self.n, self.r_1, self.r_2, self.k\n\n # Use the row vector [ A2^T 0 I ], which commutes with the check matrix.\n check_mat = np.zeros((k, n), dtype='int')\n check_mat[:, 0:r_1] = np.transpose(self.parity_check_c1[:, (r_1 + r_2):n])\n check_mat[:, (r_1 + r_2):n] = np.identity(k)\n return check_mat", "def _init_vertex_adjacency_matrix(self, verbose=False):\n self._init_from_cdd_input(self.cdd_Vrepresentation(),\n '--adjacency', verbose)", "def basis_vector_matrix(*e):\n return np.array(e).T", "def gen_matrix(e):\n\tif e < 1:\n\t\treturn None\n\tm_list = [[[1, 2], [3, 0]]]\n\t_b = m_list[0]\n\tfor n in xrange(1, e):\n\t\tm = m_list[n - 1]\n\t\tm_list.append(\n\t\t\t[\n\t\t\t\t[4 * i + _b[0][0] for i in m[0]] + [4 * i + _b[0][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[0][0] for i in m[1]] + [4 * i + _b[0][1] for i in m[1]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[0]] + [4 * i + _b[1][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[1]] + [4 * i + _b[1][1] for i in m[1]],\n\t\t\t]\n\t\t)\n\treturn m_list", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def reflection_matrix(v):\n n = len(v)\n v = np.array(v)[np.newaxis]\n return np.eye(n) - 2 * np.dot(v.T, v)", "def polynomial_matrix(order):\n \n matrix = np.identity(order)\n for i in range(order-1):\n matrix[i,i+1] = 1\n return matrix", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def invredc(A, B, C, D, y, v):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[1] # the number of samples is the number of columns of y\n\n # calculate system's dimensions: number of states, number of inputs and number of outputs\n n = A.shape[0] # number of states\n # m=B.shape[1] # number of inputs, maybe it's not necessary\n p = C.shape[0] # number of outputs\n\n # A. Output Basis Change\n # here the output basis change and its important quantities and matrices are calculated\n\n # rank of the feedforward matrix:\n r = np.linalg.matrix_rank(D)\n\n # to calculate the S1 matrix, we have partitioned the matrix into [S1a;S2a]\n # firstly, we obtain S1a\n # since D0 must possess full row rank (rank(D0)=r), a simple way to do that is to use the scipy.linalg.orth function\n D0 = (scilin.orth(D.transpose())).transpose()\n # calculating S1a as a solution of the problem S1a*D=D0 using the pseudoinverse (Moore-Penrose inverse):\n S1at = scilin.pinv(D.transpose()) @ D0.transpose()\n S1a = S1at.transpose()\n # S1b is the null space (kernel) of D from the left\n S1b = (scilin.null_space(D.transpose())).transpose()\n # assembling the S1 matrix\n S1 = np.concatenate((S1a, S1b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # the C2 matrix is obtained by a partition of S1*C, which can by also obtained with the use of S1b\n # calculating C2\n C2 = S1b @ C\n # rank of C2\n q = np.linalg.matrix_rank(C2)\n\n # calculating the matrix S2, which is very similar to S1, and it is also partitioned as S2=[S2a;S2b]\n # since C2bar has to possess full row rank (rank(C2)=q)\n C2tilde = (scilin.orth(C2.transpose())).transpose()\n # calculating S2a as a solution of the problem S2a*C2=C2bar using the pseudoinverse (Moore-Penrose inverse):\n S2at = scilin.pinv(C2.transpose()) @ C2tilde.transpose()\n S2a = S2at.transpose()\n # S2b is the null space (kernel) of C2 from the left\n S2b = (scilin.null_space(C2.transpose())).transpose()\n # assembling the S2 matrix\n S2 = np.concatenate((S2a, S2b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # now that we have S1 and S2, we can assemble the S matrix\n # we defined the notation: S=Sa*S1, where Sa is partitioned as Sa=[I 0;0 S2]=[Sa1 Sa2]\n # partitions of Sa\n Sa11 = np.identity(r)\n Sa12 = np.zeros((r, p - r))\n Sa21 = np.zeros((p - r, r))\n Sa22 = S2\n # assembling the columns of Sa, Sa=[Sa1 Sa2]\n Sa1 = np.concatenate((Sa11, Sa21), axis=0) # concatenate vertically (row wise)\n Sa2 = np.concatenate((Sa12, Sa22), axis=0) # concatenate vertically (row wise)\n # finally, assembling the matrix Sa:\n Sa = np.concatenate((Sa1, Sa2), axis=1) # concatenate horizontally (column wise)\n # obtaining the S matrix by the multiplication\n S = Sa @ S1\n\n # doing the transformation of the output ytilde=Sy\n ytilde = S @ y\n # we'll not partition the output yet, first, we'll do the State-Space Basis Change\n\n # B. State-Space Basis Change\n # in this section we'll do the state-space basis change of the system\n\n # the first step is the calculation of the transformation matrix, as defined in the paper\n # we'll call T^{-1} as M, so C2tilde*M=[0 I]. And we'll partition M as M=[M1 M2]. C2tilde*M=[C2tilde*M1 C2tilde*M2]\n # since rank(C2tilde)=q, nullity(C2tilde)=n-q\n # M1 can be defined as a basis of the null space of C2tilde\n M1 = scilin.null_space(C2tilde)\n # and M2 is the solution of the equation C2tilde*M2=I. To calculate this solution, we'll use the pseudoinverse again\n M2 = scilin.pinv(C2tilde)\n # now, we assemble the M matrix with the concatenate function\n M = np.concatenate((M1, M2), axis=1) # concatenate horizontally (column wise)\n # finally, we calculate the T matrix by inverting M\n T = np.linalg.inv(M)\n\n # now, we proceed to the transformation of the state-space matrices\n # transformation of the system's dynamic matrix\n Atilde = T @ A @ M\n # transformation of the system's input matrix\n Btilde = T @ B\n # transformation of the system's output matrix\n Ctilde = C @ M\n # transformation of the system's feedforward matrix (it's the same)\n # Dtilde=D # actually, this step is not necessary\n # transformation of the additional system input v\n vtilde = T @ v\n\n # in the next step, we need to partition the new system's matrices and outputs\n\n # partition of the outputs\n # y1 has r lines and N columns\n y1 = ytilde[0:r, :]\n # y2 has q lines and N columns, and it starts at the r+1 line (which in python is the r line since the vector index starts at 0)\n y2 = ytilde[r : r + q, :]\n # y3 is irrelevant, then, it will be neglected\n\n # partitioning the system matrices\n # firstly, the system's dynamic matrix Atilde\n A11 = Atilde[0 : n - q, 0 : n - q]\n A12 = Atilde[0 : n - q, n - q : n]\n A21 = Atilde[n - q : n, 0 : n - q]\n A22 = Atilde[n - q : n, n - q : n]\n # the system's input matrix Btilde\n B1 = Btilde[0 : n - q, :]\n B2 = Btilde[n - q : n, :]\n # the system's output matrix Ctilde\n C11 = Ctilde[0:r, 0 : n - q]\n C12 = Ctilde[0:r, n - q : n]\n\n # partition the additional input vtilde\n v1 = vtilde[0 : n - q, :]\n v2 = vtilde[n - q : n, :]\n\n # C. Reduction of State-Space Dimension\n # now, we'll do the reduction of the state-space system\n\n # following the equations in the paper\n # calculating y1hat\n y1hat = y1 - C12 @ y2\n # we have to discard the last sample to make the dimensions of y1hat and y2hat match\n y1hat = y1hat[:, 0 : N - 1]\n\n # calculating y2hat\n # preallocating variables before the loop\n y2hat = np.zeros((q, N - 1))\n # running the loop\n for k in range(\n 0, N - 1\n ): # the loop has to run N-1 times, from 0 to N-2, because of y2[k+1] on the equation\n y2hat[:, k] = y2[:, k + 1] - A22 @ y2[:, k] - v2[:, k]\n\n # assembling the reduced system's output vector\n yhat = np.concatenate((y1hat, y2hat), axis=0)\n\n # calculating the additional input vhat\n vhat = v1 + A12 @ y2\n # discarding the last sample\n vhat = vhat[:, 0 : N - 1]\n\n # now, we'll assemble the reduced state-space system\n # reduced system's dynamic matrix\n Ahat = A11\n # reduced system's input matrix\n Bhat = B1\n # reduced system's output matrix\n Chat = np.concatenate((C11, A21), axis=0) # concatenate vertically (row wise)\n # reduced system's feedforward matrix\n Dhat = np.concatenate((D0, B2), axis=0) # concatenate vertically (row wise)\n # calculating rhat, the new rank of the feedforward matrix Dhat (an important quantity of the algorithm)\n rhat = np.linalg.matrix_rank(Dhat)\n\n # calculating the new dimension of the reduced system\n # reduced system's state vector dimension\n nhat = n - q\n # reduced system's output vector dimension\n phat = r + q\n\n return Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat", "def interaction_matrix(self):\n\n self.int_M_called = True\n int_M = np.zeros((self.n, self.n))\n for k in range(self.n):\n for j in range(k+1):\n o = self.attribute_interactions(k, j)\n int_M[k, j] = o.rel_total_ig_ab # Store total information gain\n int_M[j, k] = o.rel_total_ig_ab # TODO: Maybe storing interactions too is not a bad idea\n # TODO: We can than easily sort either by total gain or by positive interaction\n for k in range(self.n):\n int_M[k, k] = self.info_gains[self.data.domain.attributes[k].name]\n self.int_matrix = Orange.misc.distmatrix.DistMatrix(int_M)", "def contract(tensor):\n temp = np.einsum('ikma, jlan', tensor, tensor)\n M = np.zeros((tensor.shape[0]**2, tensor.shape[1]**2, tensor.shape[2], tensor.shape[3]))\n for i,j,k,l,m,n in it.product(*[range(x) for x in temp.shape]):\n M[i + tensor.shape[0]*j, k + tensor.shape[1]*l, m, n] = temp[i,j,k,l,m,n]\n return M", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def _build_adjacency_matrix_1(self):\n\n from scipy import sparse as sparse\n \n down_neighbour = np.empty(self.tri.npoints)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size)\n col_array = np.empty(size)\n down_array = np.ones(size)\n\n # Catch cases where node is local low point (i.e. it is its own low neighbour)\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n if row == down_neighbour[row]:\n down_array[row] = 0.0\n \n\n downMCOO = sparse.coo_matrix( (down_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.adjacency1 = downMCOO.tocsr() \n\n # Catch pathological cases - sometimes if there is a flat spot on the boundary, then \n # the filling method above will produce a non-square matrix. This is caused by\n # repetition of values in the COO list which are summed on conversion.\n\n if downMCOO.shape[0] != downMCOO.shape[1]:\n # This approach works but is a lot slower\n\n print \"\"\"\n Warning: the downhill matrices require a slow build method. This is probably\n Because there are degeneracies in the slope - particularly at the boundaries\n A small random perturbation is usually enough to fix this problem\n \"\"\"\n downMat = sparse.lil_matrix((size, size))\n\n for row in range(0, self.tri.npoints): \n downMat[down_neighbour[row],row] = 1.0\n\n for row in range(0, self.tri.npoints): \n if down_neighbour[row] == row:\n downMat[row,row] = 0.0\n \n self.adjacency1 = downMat.T.tocsr() \n \n return", "def to_matrix(self):\n return numpy.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]], dtype=complex)", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def assign_vertices(self):\n CV_matrix = np.zeros((self.n_c, self.n_v, 3))\n for i in range(3):\n CV_matrix[self.tris[:, i], np.arange(self.n_v), i] = 1\n self.CV_matrix = CV_matrix\n return self.CV_matrix", "def PNmatrix(t, inp):\n from pypride.vintlib import taitime, eop_iers, t_eph, ter2cel, load_cats\n # precess to date\n ''' set dates: '''\n\n tstamp = t.datetime\n mjd = np.floor(t.mjd)\n UTC = (tstamp.hour + tstamp.minute / 60.0 + tstamp.second / 3600.0) / 24.0\n JD = mjd + 2400000.5\n\n ''' compute tai & tt '''\n TAI, TT = taitime(mjd, UTC)\n\n ''' load cats '''\n _, _, eops = load_cats(inp, 'DUMMY', 'S', ['GEOCENTR'], tstamp)\n\n ''' interpolate eops to tstamp '''\n UT1, eop_int = eop_iers(mjd, UTC, eops)\n\n ''' compute coordinate time fraction of CT day at GC '''\n CT, dTAIdCT = t_eph(JD, UT1, TT, 0.0, 0.0, 0.0)\n\n ''' rotation matrix IERS '''\n r2000 = ter2cel(tstamp, eop_int, dTAIdCT, 'iau2000')\n # print(r2000[:,:,0])\n\n return r2000", "def to_matrix(self):\n return numpy.array([[1, 0],\n [0, 1]], dtype=complex)", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)" ]
[ "0.6711361", "0.6592599", "0.6546603", "0.64961314", "0.6466997", "0.64629614", "0.6427826", "0.6343991", "0.6329846", "0.6236044", "0.62210953", "0.621212", "0.62022287", "0.6199758", "0.6198103", "0.6197292", "0.6197292", "0.61816674", "0.615353", "0.61416143", "0.6140042", "0.61152947", "0.61004704", "0.60901827", "0.6069185", "0.606875", "0.6064555", "0.6063728", "0.6061032", "0.60497963", "0.6043066", "0.60291797", "0.60219485", "0.6000006", "0.5982535", "0.5982381", "0.59762526", "0.59754705", "0.5962733", "0.5960508", "0.59497917", "0.59272546", "0.592514", "0.5916936", "0.5915357", "0.58969", "0.58847356", "0.58817214", "0.58277935", "0.58231676", "0.58150035", "0.5812486", "0.5811324", "0.58054537", "0.58027905", "0.5789336", "0.5782189", "0.57779473", "0.57758385", "0.57746345", "0.57713544", "0.5770569", "0.57643676", "0.5760892", "0.5747489", "0.5746705", "0.57398677", "0.57371646", "0.57358134", "0.57357746", "0.57337576", "0.57318443", "0.57306767", "0.572502", "0.5706198", "0.5706074", "0.57057005", "0.5702166", "0.5700753", "0.5697774", "0.56904626", "0.56893516", "0.56859463", "0.5685334", "0.56822944", "0.5678086", "0.56778854", "0.56758916", "0.5669488", "0.56658846", "0.56627285", "0.56562084", "0.5651296", "0.5651279", "0.5649791", "0.5648469", "0.56469214", "0.56415284", "0.56405896", "0.56351995", "0.56242627" ]
0.0
-1
Create random signal on the sphere
def rand_sig(u, b, n, theta): # Locally used names from numpy import dot, exp # Diffusion tensor parameters -- diffusion along x-axis lambda1 = 1700e-6 lambda2 = 300e-6 lambda3 = 300e-6 # diagonal diffusion tensor for "prolate white matter" D1 = np.diag([lambda1, lambda2, lambda3]) D2 = D1 D3 = D1 # rotation of diffusion tensor rotationMatrix = rotation3Dy(theta) D1 = dot(dot(rotationMatrix,D1),rotationMatrix.T) # rotationMatrix = rotation3Dz(-theta) D2 = dot(dot(rotationMatrix,D2),rotationMatrix.T) angle = np.arccos(np.cos(theta)*np.cos(theta))*180/np.pi # XXX - check with cory these semantics if n==1: s = exp(-b * dot(u, dot(D1,u)) ) # Single mode elif n==2: s = 0.5 * (exp(-b * dot(u, dot(D1,u)) ) + exp(-b * dot(u, dot(D2,u)) ) ) elif n==3: s = (1.0/3) * (exp(-b * dot(u, dot(D1,u)) ) + exp(-b * dot(u, dot(D2,u)) ) + exp(-b * dot(u, dot(D3,u)) ) ) return (angle,s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rndSphere():\n sph = [0,0,0]\n \n sph[2] = random.uniform(-1.0,1.0)\n z2 = math.sqrt(1.0 - sph[2]*sph[2])\n phi = (2. * math.pi) * random.random()\n sph[0] = z2 * math.cos(phi)\n sph[1] = z2 * math.sin(phi)\n \n return sph", "def randomPointOnSphere(r):\n x = np.random.normal()\n y = np.random.normal()\n z = np.random.normal()\n point = np.array([x, y, z])\n point *= r/(x**2 + y**2 + z**2)**.5\n return point", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def arndSphere(N):\n sph = np.empty( (N,3), np.float64 )\n \n sph[:,2] = np.random.uniform(-1.0,1.0,N) # z-coordinates\n z2 = np.sqrt(1.0 - sph[:,2]**2)\n phi = (2.0 * math.pi) * np.random.random( N )\n sph[:,0] = z2 * np.cos(phi) # x \n sph[:,1] = z2 * np.sin(phi) # y\n \n return sph", "def on_sphere():\n vec = np.random.standard_normal(3)\n return vec / np.linalg.norm(vec)", "def random_spherepos(n):\n signs = np.sign(rand.uniform(-1,1,size=n))\n thetas = Angle(np.arccos(rand.uniform(size=n)*signs),unit=u.rad) #random b/w 0 and 180\n phis = Angle(rand.uniform(0,2*np.pi,size=n),unit=u.rad)\n c = SkyCoord(phis,thetas,1,representation='physicsspherical')\n return c", "def onsphere(size=None):\n xy = oncircle(size)\n z = 2.*random(xy.shape[:-1] + (1,)) - 1.\n xy *= sqrt(1. - z*z)\n return concatenate((xy, z), axis=-1)", "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def rand_sphere(d0):\n p1 = np.random.randn(d0, 3)\n m = np.sqrt(np.sum(p1**2, axis=1))\n\n rad = pow(np.random.rand(d0), 1.0 / 3.0)\n return (p1.T * (rad / m)).T", "def createSphere( position=(0,0,0), radius=1, colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20 ):\r\n return createEllipsoid( position, (radius,radius,radius), colour, samplesY, samplesXZ )", "def xrndSphere(n):\n for i in xrange(n):\n yield rndSphere()", "def sphere_generator(command_line_arguments):\n start_simulation(parse_command_line_arguments(command_line_arguments))", "def sample_spherical(self):\n vec = np.random.randn(self.dims, self.arms)\n vec /= np.linalg.norm(vec, axis=0)\n self.contexts = vec.T", "def random_spherical(R, N=10000, R0=0):\n\tu1 = numpy.random.random(size=N)\n\tr = u1 ** (1./3.) * R + R0\n\tu2 = numpy.random.random(size=N) * 2 -1\n\tphi = numpy.random.random(size=N) * 2 * math.pi\n\tx = numpy.sqrt(1-u2**2) * numpy.cos(phi) * r\n\ty = numpy.sqrt(1-u2**2) * numpy.sin(phi) * r\n\tz = u2 * r\n\treturn x, y, z", "def insphere(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n n = int(prod(size))\n if n < 70:\n # For small n, interpreted overhead dominates. Using sin and cos\n # results in fewer interpreted instructions than rejection method.\n # Compiled code should never use this algorithm.\n mu, phi, z = random((3,) + size + (1,))\n mu = 2.*mu - 1.\n phi *= 2. * pi\n s = sqrt(1. - mu)\n return z**(1./3.) * concatenate((s*cos(phi), s*sin(phi), mu), axis=-1)\n # Beats this:\n # p = onsphere(size)\n # return p * random(p.shape[:-1] + (1,)) ** (1./3.)\n # For large n, higher intrinsic cost of sin and cos compared to\n # rejection method dominates, and it is worth taking a few more\n # interpreted instructions to benefit from the superior algorithm.\n nmore = n\n p = []\n fac = 6./pi # 1/prob random point in unit sphere\n while nmore > 0:\n m = int((nmore + 5.*sqrt(nmore))*fac) # 99.9+% chance of nmore\n q = 2.*random((m, 3)) - 1.\n q = q[(q * q).sum(axis=-1) < 1., :]\n nmore -= len(q)\n p.append(q)\n return concatenate(p)[:n].reshape(size + (3,))", "def test_random_sphere_vector():\n\ttest_vector = o_gen_instance.generate_random_sphere_vector()\n\tassert isinstance(test_vector, np.ndarray)\n\tassert test_vector.shape == (3,)\n\tfor component in test_vector:\n\t\tassert component != 0.\n\tassert np.isclose(np.linalg.norm(test_vector), 1.0)", "def new_sphere_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])", "def uniform_sphere(batch_size, dim, epsilon=1, ord=2):\r\n\r\n random = numpy.random.randn(batch_size, dim)\r\n random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)\r\n random *= epsilon\r\n\r\n return random", "def partsphere(self, x):\r\n self.counter += 1\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n dim = len(x)\r\n x = array([x[i % dim] for i in range(2*dim)])\r\n N = 8\r\n i = self.counter % dim\r\n #f = sum(x[i:i + N]**2)\r\n f = sum(x[np.random.randint(dim, size=N)]**2)\r\n return f", "def signal_generation(Es):\n size = 3 * 10**5\n low = 1\n high = 9\n\n rint = np.random.randint(low, high, size)\n signal = np.zeros((size, 2))\n # Mapping, regardless of the grey coding\n signal[:, 0] = map(lambda m: (Es)**0.5 * cos(2 * pi * m / 8), rint)\n signal[:, 1] = map(lambda m: (Es)**0.5 * sin(2 * pi * m / 8), rint)\n return rint, signal", "def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r", "def psi_random(self):\n return np.random.normal(size=[1, self.model.config.latent_size])", "def Sphere(self,radius=1.0, npoints=10):\n\n # RESET MESH\n self.__reset__()\n\n from math import pi, cos, sin\n from meshpy.tet import MeshInfo, build\n from meshpy.geometry import generate_surface_of_revolution, EXT_OPEN, GeometryBuilder\n\n r = radius\n\n points = npoints\n dphi = pi/points\n\n def truncate(r):\n if abs(r) < 1e-10:\n return 0\n else:\n return r\n\n rz = [(truncate(r*sin(i*dphi)), r*cos(i*dphi)) for i in range(points+1)]\n\n geob = GeometryBuilder()\n geob.add_geometry(*generate_surface_of_revolution(rz,\n closure=EXT_OPEN, radial_subdiv=10))\n\n mesh_info = MeshInfo()\n geob.set(mesh_info)\n\n mesh = build(mesh_info)\n\n self.points = np.asarray(mesh.points)\n self.elements = np.asarray(mesh.elements)\n # self.faces = np.asarray(mesh.faces)\n # self.edges = np.asarray(self.edges)\n self.nelem = self.elements.shape[0]\n self.element_type = \"tet\"\n\n\n # GET EDGES & FACES - NONE ASSIGNMENT IS NECESSARY OTHERWISE IF FACES/EDGES ALREADY EXIST\n # THEY WON'T GET UPDATED\n self.faces = None\n self.edges = None\n self.GetBoundaryFacesTet()\n self.GetBoundaryEdgesTet()\n\n # CHECK MESH\n points = self.points[np.unique(self.faces),:]\n if not np.isclose(np.linalg.norm(points,axis=1),radius).all():\n raise ValueError(\"MeshPy could not construct a valid linear mesh for sphere\")", "def generate_sphere_points(n):\r\n points = []\r\n inc = math.pi * (3 - math.sqrt(5))\r\n offset = 2 / float(n)\r\n for k in range(int(n)):\r\n y = k * offset - 1 + (offset / 2)\r\n r = math.sqrt(1 - y*y)\r\n phi = k * inc\r\n points.append([math.cos(phi)*r, y, math.sin(phi)*r])\r\n return points", "def __init__(self, n=65, radius=1, port_distance_from_surface=.07):\n super(SilicaSphere, self).__init__()\n particle = mb.load('O=[Si]=O', smiles=True)\n particle.name = 'Silica'\n particle.add(mb.Port(anchor=particle[1]), label='out')\n\n # Generate 65 points on the surface of a unit sphere.\n pattern = mb.SpherePattern(n)\n # Magnify the unit sphere by the provided radius.\n pattern.scale(radius)\n\n particles = pattern.apply(particle, orientation='normal', compound_port='out')\n self.add(particles, label='silica_[$]')\n\n # Create particles and Ports at pattern positions.\n for i, pos in enumerate(pattern.points):\n particle = mb.load('O=[Si]=O', smiles=True)\n\n particle.translate_to(pos)\n self.add(particle, \"silica_{}\".format(i))\n port = mb.Port(anchor=particle)\n self.add(port, \"port_{}\".format(i))\n\n # Make the top of the port point toward the positive x axis.\n port.spin(-pi/2, [0, 0, 1]) \n # Raise up (or down) the top of the port in the z direction.\n port.spin(-arcsin(pos[2]/radius), [0, 1, 0]) \n # Rotate the Port along the z axis.\n port.spin(arctan2(pos[1], pos[0]), [0, 0, 1]) \n # Move the Port a bit away from the surface of the Sphere.\n port.translate(pos/radius * port_distance_from_surface)\n\n for bond in self.bonds():\n self.remove_bond(bond)", "def drawSphere3D(x0,y0,z0, radius, hres, vres):\n dislin.sphe3d(x0,y0,z0, radius, hres, vres)", "def sphvol(r):\n return (4./3.)*np.pi*(r**3.)", "def noisysphere(self, x, noise=4.0, cond=1.0):\r\n return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x))", "def sphere_sample(radius, num_pts=500):\n position_list = []\n for _ in range(int(num_pts)):\n # see https://stackoverflow.com/questions/33976911/\n # generate-a-random-sample-of-points-distributed-on-the-surface-of-a-unit-sphere/33977530#33977530\n # for discussion on this algorithm\n\n vec = np.random.normal(0, 1, 3) # select three random points (if normal dist no skip needed)\n vec /= np.linalg.norm(vec) # normalize vector\n vec *= radius # lengthen vector to desired radius\n position_list.append(list(vec))\n\n return position_list", "def _fast_sphere_pattern(n, radius):\n phi = (1 + np.sqrt(5)) / 2\n long_incr = 2*np.pi / phi\n dz = 2.0 / float(n)\n bands = np.arange(n)\n z = bands * dz - 1.0 + (dz/2.0)\n r = np.sqrt(1.0 - z*z)\n az = bands * long_incr\n x = r * np.cos(az)\n y = r * np.sin(az)\n points = np.column_stack((x, y, z)) * np.asarray([radius])\n\n return points", "def random_quaternions(count=100):\n rands = np.random.rand(count,3)\n root_1 = np.sqrt(rands[:,0])\n minus_root_1 = np.sqrt(1-rands[:,0])\n two_pi_2 = np.pi*2*rands[:,1]\n two_pi_3 = np.pi*2*rands[:,2]\n \n res = np.zeros((count,4))\n res[:,0] = minus_root_1*np.sin(two_pi_2)\n res[:,1] = minus_root_1*np.cos(two_pi_2)\n res[:,2] = root_1*np.sin(two_pi_3)\n res[:,3] = root_1*np.cos(two_pi_3)\n \n return res", "def surface_point(radius):\n z=random.uniform(-1,1)\n chi=random.uniform(0,2*numpy.pi)\n x=numpy.sqrt(1-z*z)*numpy.cos(chi)\n y=numpy.sqrt(1-z*z)*numpy.sin(chi)\n return radius*numpy.array([x,y,z])", "def randImS():\n u = 2*np.random.random()-1\n theta = 2*math.pi*np.random.random()\n h = np.zeros((1,4))\n h[0,1] = np.cos(theta)*np.sqrt(1-u**2)\n h[0,2] = np.sin(theta)*np.sqrt(1-u**2)\n h[0,3] = u\n return h", "def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3", "def sphere_generator():\n\n sphericalRadius = np.sqrt(N / (4 * np.pi * pointDensity))\n sphericalThreshold = sphericalRadius * np.arccos(1 - 2 * thresholdFrac)\n\n data_sphere = []\n # np.random.seed(2020)\n for r in range(num_graphs):\n coords = sample_spherical(N, sphericalRadius, 3)\n # computes the adjacency matrix\n Adj_Matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n a = coords[:, i]\n b = coords[:, j]\n dot_prod = np.dot(a, b)/sphericalRadius**2\n dot_prod = min(dot_prod, 1) # <-- sometimes np.dot returns 1.00000000002, messing up np.arccos()\n\n \"\"\" note that when np.arrcos gets 1, it returns a nan \"\"\"\n theta = np.arccos(dot_prod) # gets the angle between a and b (in radians)\n\n # ij_dist = np.linalg.norm(a-b) # calculate euclidean distance\n ij_dist = sphericalRadius * theta # arclength distance\n if ij_dist < sphericalThreshold:\n Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix\n\n data_sphere.append(Adj_Matrix)\n\n return data_sphere", "def sphere_example():\n env = holodeck.make(\"MazeWorld-FinishMazeSphere\")\n\n # This command is to constantly rotate to the right\n command = 2\n for i in range(10):\n env.reset()\n for _ in range(1000):\n state, reward, terminal, _ = env.step(command)\n\n # To access specific sensor data:\n pixels = state[\"RGBCamera\"]\n orientation = state[\"OrientationSensor\"]\n\n # For a full list of sensors the sphere robot has, view the README", "def sphere_volume(r):\n return (4/3) * 3.14159 * r**3", "def _random_spherical_position(u):\n n = u.size\n nhalf = n // 2\n cos_t = 2 * u[:nhalf] - 1\n phi = 2 * np.pi * u[nhalf:]\n\n sin_t = np.sqrt((1.0 - cos_t * cos_t))\n\n x = sin_t * np.cos(phi)\n y = sin_t * np.sin(phi)\n z = cos_t\n\n return x, y, z", "def build_hemisphere(self, n_phis, n_thetas):\n index = glGenLists(1)\n phis = [float(i)*numpy.pi/float(n_phis/2) for i in range(n_phis/2+1)]\n phi_pairs = zip(phis, phis[1:])\n thetas = [float(i)*numpy.pi/float(n_thetas) for i in range(-n_thetas/2, n_thetas/2+1)]\n theta_pairs = zip(thetas, thetas[1:])\n glNewList(index, GL_COMPILE)\n glBegin(GL_QUADS)\n for phi1,phi2 in phi_pairs:\n dot1 = min(max(numpy.cos(phi1), 0.0), 1.0)\n dot2 = min(max(numpy.cos(phi2), 0.0), 1.0)\n for th1,th2 in theta_pairs:\n glTexCoord1f(dot1)\n glVertex3f(numpy.sin(phi1)*numpy.cos(th1), numpy.sin(phi1)*numpy.sin(th1), numpy.cos(phi1))\n glTexCoord1f(dot2)\n glVertex3f(numpy.sin(phi2)*numpy.cos(th1), numpy.sin(phi2)*numpy.sin(th1), numpy.cos(phi2))\n glTexCoord1f(dot2)\n glVertex3f(numpy.sin(phi2)*numpy.cos(th2), numpy.sin(phi2)*numpy.sin(th2), numpy.cos(phi2))\n glTexCoord1f(dot1)\n glVertex3f(numpy.sin(phi1)*numpy.cos(th2), numpy.sin(phi1)*numpy.sin(th2), numpy.cos(phi1))\n glEnd()\n glEndList()\n return index", "def sphere_sre(solution):\n a = 0\n bias = 0.2\n x = solution.get_x()\n x1 = x[:10]\n x2 = x[10:]\n value1 = sum([(i-bias)*(i-bias) for i in x1])\n value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2])\n return value1 + value2", "def test_sphere(self, sample_rate, num_channels, bits_per_sample):\n duration = 1\n path = self.get_temp_path(\"data.sph\")\n sox_utils.gen_audio_file(path, sample_rate, num_channels, duration=duration, bit_depth=bits_per_sample)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == bits_per_sample\n assert info.encoding == \"PCM_S\"", "def sample_ar1(n, phi, sigma_e=1.0, size=1):\n x = sigma_e * np.random.randn(n, size)\n x[0] = x[0] * np.sqrt(1 / (1 - phi**2))\n for i in range(1, n):\n x[i] = x[i] + x[i - 1] * phi\n return x.T.squeeze()", "def sphere(\n network,\n pore_diameter='pore.diameter'\n):\n return 4/3*_pi*(network[pore_diameter]/2)**3", "def sample_sphere(n, truncate=True):\n point_dfs = []\n accumulated_samples = 0\n while accumulated_samples < n:\n # (2*r)^3 / (4/3 pi r^3) = 6/pi\n iter_npoints = min(int(np.round((n-accumulated_samples)*6/np.pi)),\n max_iter_npoints)\n # do 3-sigma more\n iter_npoints = iter_npoints + np.int(3*np.sqrt(iter_npoints))\n iter_npoints = max(iter_npoints, min_iter_npoints)\n\n x = np.random.uniform(-1, 1, iter_npoints)\n y = np.random.uniform(-1, 1, iter_npoints)\n z = np.random.uniform(-1, 1, iter_npoints)\n\n r = np.sqrt(x*x+y*y+z*z)\n in_sphere = r < 1.0\n\n r = r[in_sphere]\n x = x[in_sphere]/r\n y = y[in_sphere]/r\n z = z[in_sphere]/r\n\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n ra = (np.degrees(phi) + 360) % 360\n decl = 90.0-np.degrees(theta)\n\n new_df = pd.DataFrame({'ra': ra, 'decl': decl})\n new_df = new_df[['ra', 'decl']]\n\n point_dfs.append(new_df)\n new_samples = ra.shape[0]\n accumulated_samples += new_samples\n info('completed %d samples' % accumulated_samples)\n\n points = pd.concat(point_dfs)\n if truncate:\n points = points[:n]\n\n points.reset_index(drop=True, inplace=True)\n points.index.rename('sample_idx', inplace=True)\n\n return points", "def random_uniform_within_circle():\n rho = np.sqrt(np.random.uniform(0, 1))\n phi = np.random.uniform(0, 2 * np.pi)\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return np.array([x, y])", "def U(self, *args):\n return _Bnd.Bnd_Sphere_U(self, *args)", "def sphere(self, path, args):\n note_name = args[0]\n hands = args[1]\n probability = args[2]\n force = args[3]\n \n print \"got sphere : %s '%s', %d, %f, %f\" % (path, \n note_name, \n hands, \n probability,\n force)\n\n bowls = {'c2': (0, 127),\n 'd2': (0, 127)}\n\n speed_min = 0\n speed_max = 1\n\n try:\n bowl_lower = bowls[note_name][0]\n bowl_upper = bowls[note_name][1]\n except KeyError:\n bowl_lower = 0\n bowl_upper = 127\n\n bowl_range = bowl_upper - bowl_lower\n step = float(speed_max) / bowl_range\n\n print step, bowl_range\n \n midi_vel = int(min(math.ceil(bowl_lower + (float(force*force + 0.1) / step) + 5), 127))\n\n print midi_vel\n\n if hands == RIGHT_HAND:\n engine._TheEngine().process(NoteOnEvent(engine.in_ports()[0],\n settings.MIDI_HAMMER_CHANNEL,\n note_number(note_name),\n midi_vel)\n )\n\n elif hands == LEFT_HAND:\n engine._TheEngine().process(NoteOnEvent(engine.in_ports()[0],\n settings.MIDI_REPEAT_CHANNEL,\n note_number(note_name),\n midi_vel)\n )", "def rgb_sphere(n=128):\n sphere = np.zeros((n, n, 3), dtype=np.uint8)\n\n for x in range(n):\n xx = (n // 2 - x) / (n // 2)\n for y in range(n):\n yy = (n // 2 - y) / (n // 2)\n if xx**2 + yy**2 > 1:\n continue\n zz = np.sqrt(1 - xx**2 - yy**2)\n\n sphere[x, y, :] = _vec_to_rgb(xx, yy, zz)\n\n return sphere", "def circle(n=5000, r=1, noise=0.05):\n phis = 2 * np.pi * np.random.rand(n)\n x = [[r * np.sin(phi), r * np.cos(phi)] for phi in phis]\n x = np.array(x)\n x = x + noise * np.random.randn(n, 2)\n return x", "def random(cls):\n r1, r2, r3 = np.random.random(3)\n\n q1 = sqrt(1.0 - r1) * (sin(2 * pi * r2))\n q2 = sqrt(1.0 - r1) * (cos(2 * pi * r2))\n q3 = sqrt(r1) * (sin(2 * pi * r3))\n q4 = sqrt(r1) * (cos(2 * pi * r3))\n\n return cls(q1, q2, q3, q4)", "def get_s0_random_xy(N):\n s0x= []\n s0y = []\n s0z = []\n for i in range(N):\n s0z.append(0)\n r = 2 * np.pi * random.random()\n s0x.append(cos(r))\n s0y.append(sin(r))\n norm = np.linalg.norm([s0x [-1],s0y [-1],s0z [-1] ])\n s0x [-1] = s0x[-1] / norm\n s0y [-1] = s0y[-1] / norm\n s0z [-1] = s0z[-1] / norm\n return np.concatenate((s0x,s0y,s0z),axis = 0)", "def newMiniSurvey(cls, telescope, minRa, maxRa, direction):\n\n # TODO rotation isn't fully implemented yet\n rotation = next(cls.rotationGenerator.rotations(telescope))\n\n # get a tiling over the whole sphere\n tiling = Tiling.ThomsonTiling()\n allPointings = tiling.getTiling(config.tilingDensity)\n\n # now take the pointings and rotate them around the z and x\n # axes so they are randomly dithered on the sphere\n zDither = np.random.randn()\n xDither = np.random.randn()\n \n # rotating around z is easy\n allPointings[:,0] += zDither\n\n # rotating around x is harder\n sin = np.sin\n cos = np.cos\n phi = allPointings[:,0]\n theta = allPointings[:,1]\n newPhi = np.arctan2(-sin(xDither)*sin(theta) + cos(xDither)*cos(theta)*sin(phi),\n cos(theta)*cos(phi))\n newPhi %= 2 * np.pi\n newTheta = np.arcsin(cos(xDither)*sin(theta) + sin(xDither)*cos(theta)*sin(phi))\n allPointings = np.vstack([newPhi, newTheta]).T\n\n # TODO should rotate around y as well or else you get structured\n # noise in the final result\n\n # calculate min/maxDec that correspond to the passed-in direction\n # and modify min/maxRa if we're in the zenith dec band\n if direction == config.NORTH:\n minDec = telescope.latitude + config.zenithBuffer\n maxDec = config.maxDec\n # min/maxRa remain unchanged\n elif direction == config.SOUTH:\n minDec = config.minDec\n maxDec = telescope.latitude - config.zenithBuffer\n # min/maxRa remain unchanged\n elif direction == config.EAST:\n minDec = telescope.latitude - config.zenithBuffer\n maxDec = telescope.latitude + config.zenithBuffer\n minRa += config.zenithBuffer + config.zenithBufferOffset\n maxRa += config.zenithBuffer + config.zenithBufferOffset\n else:\n raise ValueError(\"Invalid direction: \" + str(direction))\n\n # choose the subset of pointings that lie in the min/maxRa/Dec rectangle\n validRa = utils.areRasInRange(allPointings[:,0], (minRa, maxRa))\n validDec = ((minDec < allPointings[:,1]) &\n (maxDec > allPointings[:,1]))\n validMask = validRa & validDec\n\n pointings = allPointings[np.where(validMask)]\n\n # create visitpairs from the calculated pointings\n visitPairs = [VisitPair(pointing[0], pointing[1], rotation)\n for pointing in pointings]\n\n return set(visitPairs)", "def mHollowSphere(a=3, b=6, N=250):\n a = float(a)\n b = float(b)\n N = int(N)\n rmin = 0\n rmax = 2*b\n dr = (rmax-rmin)/float(N)\n r = np.zeros((N))\n g = np.zeros((N))\n for i in range(N):\n r[i] = rmin+i*dr\n g[i] = 0\n if r[i] >= a and r[i] < b:\n g[i] = (r[i]-a)/(b-a)/np.power(r[i], 2)\n elif r[i] >= b:\n g[i] = 1/np.power(r[i], 2)\n return r, g", "def random_cloud(envelope, seed=None, impulse=False, events=None, do_amp=True, do_mask=False):\n\n (N_X, N_Y, N_frame) = envelope.shape\n amps = 1.\n if impulse:\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n phase = -2*np.pi*(N_X/2*fx + N_Y/2*fy + N_frame/2*ft)\n F_events = np.exp(1j * phase)\n elif events is None:\n np.random.seed(seed=seed)\n phase = 2 * np.pi * np.random.rand(N_X, N_Y, N_frame)\n F_events = np.exp(1j * phase)\n if do_amp:\n # see Galerne, B., Gousseau, Y. & Morel, J.-M. Random phase textures: Theory and synthesis. IEEE Transactions in Image Processing (2010). URL http://www.biomedsearch.com/nih/Random-Phase-Textures-Theory-Synthesis/20550995.html. (basically, they conclude \"Even though the two processes ADSN and RPN have different Fourier modulus distributions (see Section 4), they produce visually similar results when applied to natural images as shown by Fig. 11.\")\n F_events *= np.random.randn(N_X, N_Y, N_frame)\n else:\n F_events = np.fft.fftn( events[:, :, :] )\n F_events = np.fft.fftshift(F_events)\n\n Fz = F_events * envelope\n\n # de-centering the spectrum\n Fz = np.fft.ifftshift(Fz)\n Fz[0, 0, 0] = 0. # removing the DC component\n z = np.fft.ifftn(Fz).real\n if do_mask:\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n z *= get_mask(fx, fy, ft)\n\n return z", "def _randomSO3():\n u1 = np.random.random()\n u2 = np.random.random()\n u3 = np.random.random()\n R = np.array(\n [\n [np.cos(2 * np.pi * u1), np.sin(2 * np.pi * u1), 0],\n [-np.sin(2 * np.pi * u1), np.cos(2 * np.pi * u1), 0],\n [0, 0, 1],\n ]\n )\n v = np.array([np.cos(2 * np.pi * u2) * np.sqrt(u3), np.sin(2 * np.pi * u2) * np.sqrt(u3), np.sqrt(1 - u3)])\n H = np.identity(3) - 2 * v * np.transpose([v])\n return -np.dot(H, R)", "def V(self, *args):\n return _Bnd.Bnd_Sphere_V(self, *args)", "def create_unit_sphere(recursion_level=2):\n if recursion_level > 7 or recursion_level < 1:\n raise ValueError(\"recursion_level must be between 1 and 7\")\n return unit_octahedron.subdivide(recursion_level - 1)", "def test1_constuctor(self):\n\n center = np.asarray([0, 0, 0], dtype=float)\n radius = 1.\n c = Spherical_Gaussian(center, radius)\n self.assertTrue(c.radius_eff == radius)\n self.assertTrue(np.allclose(c.center, center))", "def create_unit_hemisphere(recursion_level=2):\n sphere = create_unit_sphere(recursion_level)\n return HemiSphere.from_sphere(sphere)", "def getSphere(radius, colour, distance):\n\n mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=radius)\n mesh_sphere.translate([distance, 0, 0])\n mesh_sphere.compute_vertex_normals()\n mesh_sphere.paint_uniform_color(colour)\n\n return mesh_sphere", "def randomSO3():\n\tu1 = random.random()\n\tu2 = random.random()\n\tu3 = random.random()\n\tR = array([[cos(2*pi*u1), sin(2*pi*u1), 0], [-sin(2*pi*u1), cos(2*pi*u1), 0], [0, 0, 1]])\n\tv = array([cos(2*pi*u2)*sqrt(u3), sin(2*pi*u2)*sqrt(u3), sqrt(1-u3)])\n\tH = identity(3)-2*v*transpose([v])\n\t#print \"v\", v\n\t#print \"vvT\", v*transpose([v])\n\t#print \"H\", H\n\t#print linalg.det(R), linalg.det(H)\n\t#print H, v * transpose([v])\n\treturn - dot(H, R)", "def hsv_black_sphere(n=128):\n sphere = np.zeros((n, n, 3), dtype=np.uint8)\n\n for x in range(n):\n xx = (-n // 2 + x) / (n // 2)\n for y in range(n):\n yy = (-n // 2 + y) / (n // 2)\n if xx**2 + yy**2 > 1:\n continue\n zz = np.sqrt(1 - xx**2 - yy**2)\n\n direction = np.arctan2(yy, xx)\n inclination = np.arcsin(zz)\n\n if direction < 0:\n direction += np.pi\n\n sphere[x, y, :] = _orientation_to_hsv(direction, inclination)\n\n return sphere", "def create_sphere_actor(unstructured_grid) -> vtkActor:\n\n # Set sphere\n sphere = vtkSphereSource()\n sphere.SetRadius(0.1)\n\n # Set sphere glyph 3D\n glyph = create_glyph_3d(unstructured_grid, sphere)\n\n # Set sphere mapper\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(glyph.GetOutputPort())\n\n # Set sphere actor\n actor = vtkActor()\n actor.SetMapper(mapper)\n\n return actor", "def _generate_signal(self):\n x = np.arange(self.n, dtype='float')\n resample = np.random.rand(self.n) >= self.proba\n resample[0] = True # randomly initialize first sample\n x[resample] = np.random.randn(np.sum(resample))\n for i in x[~resample]:\n x[int(i)] = x[int(i)-1]\n return x", "def create_sphere(lat=10, lng=10, color=COLOR_WHITE):\n if lat >= 3 and lng >= 3:\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidSphere(1.0, lat, lng)\n except:\n if not _ERRS[0]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidSphere\")\n _ERRS[0] = True\n glPopMatrix()\n glEndList()\n return obj\n else:\n raise Exception(\n \"La latitud y longitud de la figura deben ser mayores a 3\")", "def samplePatchOnSphere(phi, theta, delta, size, rng, degrees=True):\n u = rng.uniform(size=size)\n v = rng.uniform(size=size)\n phi = np.radians(phi)\n theta = np.radians(theta)\n delta = np.radians(delta)\n\n phivals = 2. * delta * u + (phi - delta)\n phivals = np.where(phivals >= 0., phivals, phivals + 2. * np.pi)\n\n # use conventions in spherical coordinates\n # theta = np.pi/2.0 - theta\n thetamax = theta + delta\n thetamin = theta - delta\n\n # if thetamax > np.pi or thetamin < 0. :\n # raise ValueError('Function not implemented to cover wrap around poles')\n\n # Cumulative Density Function is cos(thetamin) - cos(theta) / cos(thetamin) - cos(thetamax)\n a = np.cos(thetamin) - np.cos(thetamax)\n thetavals = np.arccos(-v * a + np.cos(thetamin))\n\n if degrees:\n return np.degrees(phivals), np.degrees(thetavals)\n else:\n return phivals, thetavals", "def __init__(self):\n # start x position\n self.x = random.randrange(size_x)\n # start y position\n self.y = - random.randrange(100)\n # drift x (amount of change each loop along the x axis)\n self.dx = random.randrange(3) - random.randrange(6)\n # drift y (amount of change each loop along the y axis)\n self.dy = random.randrange(1, 20) + random.randrange(4)\n # the size of the circular snowflake\n self.size = random.randrange(1, 4)\n # the colour of the snowflake (from sludgy grey to snowy white)\n c = random.randrange(200, 256)\n self.color = [c, c, c]", "def sinwave(scene):\n # create an empty homogeneous transformation\n matrix = np.eye(4)\n # set Y as cos of time\n matrix[1][3] = np.cos(time.time()) * 2\n # set Z as sin of time\n matrix[2][3] = np.sin(time.time()) * 3\n\n # take one of the two spheres arbitrarily\n node = s.graph.nodes_geometry[0]\n # apply the transform to the node\n scene.graph.update(node, matrix=matrix)", "def sample_radii(size=1):\n interp_func = InterpolatedUnivariateSpline(m_grid, np.log(r_grid), k=1)\n return np.exp(interp_func(np.random.uniform(0, 1, size=size))) * u.kpc", "def random_init(self, shape):\n return np.random.randn(shape[0],shape[1])*0.01", "def sample_ellipsoid(p0, covmat, size=1):\n return np.random.multivariate_normal(np.atleast_1d(p0),\n np.atleast_2d(covmat),\n size=size)", "def new_uniform_spherical_particle_distribution(number_of_particles, size, total_mass, **keyword_arguments):\n particles = Particles(number_of_particles)\n particles.mass = total_mass * 1.0 / number_of_particles\n x, y, z = UniformSphericalDistribution(number_of_particles, **keyword_arguments).result\n particles.x = size * x\n particles.y = size * y\n particles.z = size * z\n return particles", "def random_three_vector():\n phi = config.random.uniform(0, np.pi * 2)\n costheta = config.random.uniform(-1, 1)\n\n theta = np.arccos(costheta)\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n\n return x, y, z", "def add_sphere(self):\n self.scenes[self.current_scene].add_object(Sphere())\n self.redraw()", "def half_circle(n=5000, r=1, noise=0.05):\n phis = np.pi * np.random.rand(n)\n x = [[r * np.sin(phi), r * np.cos(phi)] for phi in phis]\n x = np.array(x)\n x = x + noise * np.random.randn(n, 2)\n return x", "def test_sphere_init():\n Sphere(5)", "def random():\n # Define lattice spacing as a multiple of the particle radius\n # using the formula a = 4 r/sqrt(3). Systems which are ordered\n # are probably mostly filled, so use a distribution which goes from\n # zero to one, but leaving 90% of them within 80% of the\n # maximum bcc packing. Lattice distortion values are empirically\n # useful between 0.01 and 0.7. Use an exponential distribution\n # in this range 'cuz its easy.\n radius = 10**np.random.uniform(1.3, 4)\n d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius*4/np.sqrt(3)/dnn_fraction\n pars = dict(\n #sld=1, sld_solvent=0, scale=1, background=1e-32,\n dnn=dnn,\n d_factor=d_factor,\n radius=radius,\n )\n return pars", "def create_sphere(self, position):\n\n # Create source\n source = vtk.vtkSphereSource()\n source.SetCenter(0, 0, 0)\n source.SetRadius(1.e3)\n source.SetPhiResolution(40)\n source.SetThetaResolution(40)\n\n # Mapper\n mapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n mapper.SetInput(source.GetOutput())\n else:\n mapper.SetInputConnection(source.GetOutputPort())\n\n # Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(1, 0.5, 0.5)\n actor.GetProperty().SetAmbient(0.5)\n actor.GetProperty().SetOpacity(0.8)\n actor.SetPosition(position)\n\n # Return actor\n return actor", "def sample_torsion(phi, psi, kappa_scalar=8, random_state=1):\n \n phi_sample = torch.tensor(np.round([randvonmises(phi, i, kappa_scalar, random_state) for i in range(phi.shape[3])], 4))\n psi_sample = torch.tensor(np.round([randvonmises(psi, i, kappa_scalar, random_state) for i in range(psi.shape[3])], 4))\n return phi_sample, psi_sample", "def test_points_on_1sphere_8x():\n points = generate.points_on_1sphere(8, 'x')\n assert np.allclose(points[0], cst.quat1)\n assert np.allclose(points[2], cst.quatx90)\n assert np.allclose(points[4], cst.quatx)", "def sphere(geometry,\n psd_name,psd_shape,psd_loc,psd_scale,\n pore_seed='pore.seed',\n psd_offset=0,\n **kwargs):\n import scipy.stats as spst\n prob_fn = getattr(spst,psd_name)\n P = prob_fn(psd_shape,loc=psd_loc,scale=psd_scale)\n value = P.ppf(geometry[pore_seed])+psd_offset\n return value", "def _sample_z(self, a):\n r = np.random.rand()\n return ((1 + r * (a - 1))**2) / a", "def sample_hypersphere(n_samples, sample_shape, radius, l_norm=2, mode='sphere', sample_gen=None, seed=None):\n\n if sample_gen is not None:\n assert seed is None, \"Can't provide individual seeds if using the multi-threaded generator.\"\n assert sample_shape == sample_gen.shape\n\n # Get precalculated samples from the generator\n gauss = np.empty(shape=(n_samples, np.prod(sample_shape)), dtype=np.float64)\n for i in range(n_samples):\n gauss[i] = sample_gen.get_normal().reshape(-1)\n else:\n if seed is not None:\n np.random.seed(seed)\n gauss = np.random.normal(size=(n_samples, np.prod(sample_shape)))\n\n # Norm to\n norm = np.linalg.norm(gauss, ord=l_norm, axis=1)\n perturbation = (gauss / norm[:, np.newaxis])\n\n # Sphere: sample only the surface of the hypersphere.\n # Ball: sample inside the sphere. Note: this is probably not uniform.\n if mode == 'sphere':\n perturbation *= radius\n elif mode == 'ball':\n perturbation *= np.random.uniform(low=0.0, high=radius, size=(n_samples, 1))\n else:\n raise ValueError(\"Unknown sampling mode.\")\n\n perturbation = np.reshape(perturbation, (n_samples,) + sample_shape)\n\n return perturbation", "def simpleSphere(precision):\n b = polyhedron([vertex(0, 0, 1)], [], [])\n rot1 = vector(math.pi / precision, 0, 0)\n for i in range(2 * precision + 1):\n b.rotateSweep(rot1, (0, 0, 0))\n rot2 = vector(0, 0, math.pi / precision)\n for j in range(precision):\n b.rotateSweep(rot2, (0, 0, 0))\n return b", "def make_ball(self, scale, color):\n sphere = Sphere()\n sphere.set_location(0, 0, 0)\n sphere.set_size(scale, scale, scale)\n sphere.set_color(c=color)\n return sphere", "def make_ball(self, scale, color):\n sphere = Sphere()\n sphere.set_location(0, 0, 0)\n sphere.set_size(scale, scale, scale)\n sphere.set_color(c=color)\n return sphere", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def testSphereRadius(self):\n sp = nigel.SphereSelection(nb, radius=10)\n self.assertEqual(sp.n, 9)", "def sphere_volume(radius : number) -> number:\n volume = 4/3*(pi*radius*radius*radius)\n return volume", "def random_nfw_ellipsoid(ran_key, conc, a=1, b=1, c=1):\n x, y, z = random_nfw_spherical_coords(ran_key, conc)\n return a * x, b * y, c * z", "def hemisphere_point(radius, inclination):\n if radius <= 0:\n raise AssertionError('Radius mast be grater than 0')\n\n alpha = np.random.rand() * pi*2\n r_small = radius*sin(radians(inclination))\n r = np.random.rand() * r_small\n\n # Find points on the sphere\n x = r * cos(alpha)\n y = r * sin(alpha)\n z = sqrt(radius**2 - x**2 - y**2)\n\n return x, y, z", "def generate_fake_noise(inputs, size):\n return np.random.normal(-0.0289923828125, 1.9391296947313124, (inputs, size)).astype(np.float32)", "def test_sphere(self):\n fun = get_problem('sphere', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def random_point():\n theta = numpy.arccos(2. * numpy.random.random() - 1)\n phi = 2 * numpy.pi * numpy.random.random()\n return theta, phi", "def sphere_volume(sphere_radius):\n return (4 / 3 * np.pi * sphere_radius**3)", "def make_circle(self):\n A = 2*np.random.rand(self.m, self.n)-1\n b = np.sign(np.sum(A**2, 1) - self.radius)\n return A, b", "def rand(cls):\n q_vec = np.random.rand(4)\n q=Quat(q_vec)\n q.normalize()\n return q", "def rand_unit_vect_3D():\n xyz = np.random.normal(size=3)\n mag = sum(i**2 for i in xyz) ** .5\n return xyz / mag", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def sphere(indiv):\n return sum([ x ** 2 for x in indiv])" ]
[ "0.7532667", "0.7204255", "0.71878636", "0.7109239", "0.70131403", "0.69970477", "0.6976129", "0.69127107", "0.69005173", "0.6890995", "0.6522725", "0.64420277", "0.6441751", "0.64262915", "0.6411878", "0.6391545", "0.6374727", "0.6287322", "0.6277766", "0.6242343", "0.6238088", "0.6192554", "0.6169456", "0.6126314", "0.6091389", "0.60883534", "0.6065728", "0.6053637", "0.60523427", "0.6035075", "0.5997573", "0.5997456", "0.596249", "0.5931715", "0.59093666", "0.590801", "0.59059626", "0.59057695", "0.58992803", "0.5886878", "0.5872868", "0.58580077", "0.5852174", "0.58468646", "0.58337575", "0.583153", "0.5829255", "0.58269626", "0.58212495", "0.5820256", "0.58195376", "0.58150077", "0.58147156", "0.58051014", "0.5785811", "0.57830757", "0.57763225", "0.5765192", "0.5758721", "0.5744116", "0.57415855", "0.5737198", "0.5722305", "0.5716711", "0.5710558", "0.5707367", "0.5701833", "0.5694499", "0.5685118", "0.5675328", "0.5673009", "0.5666706", "0.5659791", "0.56528336", "0.56477076", "0.5634631", "0.56333584", "0.5628827", "0.5622046", "0.5621232", "0.56037974", "0.56021416", "0.5594144", "0.5593348", "0.5585083", "0.5574921", "0.5574921", "0.5561236", "0.55580425", "0.5557319", "0.5543476", "0.55395633", "0.55338055", "0.55224144", "0.5521558", "0.5519532", "0.55086935", "0.55084664", "0.54987663", "0.54962337", "0.5493132" ]
0.0
-1
Reproducing kernel Calculate the inverse FunkRadon transform of reproducing kernel for the space of spherical harmonics of maximum degree N.
def inv_funk_radon_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) p_at_zero = legp(0, N) coefs = 2*np.arange(0, N+1, 2) + 1 ker = coefs*legPolys[::2]/p_at_zero[::2] return ker.sum() / (8*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def cs4243_filter_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n filtered_image[i, j] = np.multiply(kernel, recep_area).sum()\n ###\n\n return filtered_image", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def n2f(n):\n k = 4.0 * np.pi**2 * codata.value('electron mass') * codata.value('electric constant') / codata.value('elementary charge')**2\n return np.sqrt(n/k)", "def f2n(f):\n k = 4.0 * np.pi**2 * codata.value('electron mass') * codata.value('electric constant') / codata.value('elementary charge')**2\n return k * f**2", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def DisLayerSN(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def GenLayerSN(ngf, k):\n d_in = 2**k \n d_out = 2**(k-1)\n out = nn.Sequential( nn.utils.spectral_norm(\n nn.ConvTranspose2d(ngf * d_in, ngf * d_out, kernel_size, stride, padding, bias=False)),\n nn.BatchNorm2d(ngf * d_out),\n nn.ReLU(True) )\n return out", "def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)", "def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def __init__(self,\n max_freq_in: int,\n channels: int,\n *grid_args,\n non_linearity: str = 'elu',\n max_freq_out: int = None,\n moorepenrose: bool = True,\n repr_type: str = 'spherical',\n **grid_kwargs):\n super().__init__()\n self.channels = channels\n if repr_type not in ['spherical', 'so3']:\n raise ValueError(f'repr_type must be one of [spherical, so3], given: {repr_type}')\n self.repr_type = repr_type\n self.non_linearity = self.get_nonlin(non_linearity)\n max_freq_out = max_freq_out or max_freq_in\n\n self.gspace = gspaces.rot3dOnR3()\n rho = self.get_representation(max_freq_in)\n rho_bl = self.get_representation(max_freq_out)\n\n self.dim = rho.size\n self.in_type = enn.FieldType(self.gspace, [rho]*channels)\n self.out_type = enn.FieldType(self.gspace, [rho_bl]*channels)\n\n grid = self.get_grid(*grid_args, **grid_kwargs)\n A = self.build_sensing_matrix(rho, grid, max_freq_in)\n A_inv = self.build_reconstruction_matrix(rho_bl, grid, max_freq_out, moorepenrose=moorepenrose)\n\n self.register_buffer('A', torch.tensor(A, dtype=torch.get_default_dtype()))\n self.register_buffer('Ainv', torch.tensor(A_inv, dtype=torch.get_default_dtype()))", "def rasm_mode(self, K, MAX_ITER=40):\r\n #old_Ki_f = np.zeros((self.N, 1))\r\n\r\n #Start f's at zero originally of if we have gone off track, try restarting\r\n if self.old_Ki_f is None or self.bad_fhat:\r\n old_Ki_f = np.random.rand(self.N, 1)/50.0\r\n #old_Ki_f = self.Y\r\n f = np.dot(K, old_Ki_f)\r\n else:\r\n #Start at the old best point\r\n old_Ki_f = self.old_Ki_f.copy()\r\n f = self.f_hat.copy()\r\n\r\n new_obj = -np.inf\r\n old_obj = np.inf\r\n\r\n def obj(Ki_f, f):\r\n return -0.5*np.dot(Ki_f.T, f) + self.noise_model.logpdf(f, self.data, extra_data=self.extra_data)\r\n\r\n difference = np.inf\r\n epsilon = 1e-7\r\n #step_size = 1\r\n #rs = 0\r\n i = 0\r\n\r\n while difference > epsilon and i < MAX_ITER:\r\n W = -self.noise_model.d2logpdf_df2(f, self.data, extra_data=self.extra_data)\r\n\r\n W_f = W*f\r\n grad = self.noise_model.dlogpdf_df(f, self.data, extra_data=self.extra_data)\r\n\r\n b = W_f + grad\r\n W12BiW12Kb, _ = self._compute_B_statistics(K, W.copy(), np.dot(K, b))\r\n\r\n #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet\r\n full_step_Ki_f = b - W12BiW12Kb\r\n dKi_f = full_step_Ki_f - old_Ki_f\r\n\r\n f_old = f.copy()\r\n def inner_obj(step_size, old_Ki_f, dKi_f, K):\r\n Ki_f = old_Ki_f + step_size*dKi_f\r\n f = np.dot(K, Ki_f)\r\n # This is nasty, need to set something within an optimization though\r\n self.tmp_Ki_f = Ki_f.copy()\r\n self.tmp_f = f.copy()\r\n return -obj(Ki_f, f)\r\n\r\n i_o = partial_func(inner_obj, old_Ki_f=old_Ki_f, dKi_f=dKi_f, K=K)\r\n #Find the stepsize that minimizes the objective function using a brent line search\r\n #The tolerance and maxiter matter for speed! Seems to be best to keep them low and make more full\r\n #steps than get this exact then make a step, if B was bigger it might be the other way around though\r\n #new_obj = sp.optimize.minimize_scalar(i_o, method='brent', tol=1e-4, options={'maxiter':5}).fun\r\n new_obj = sp.optimize.brent(i_o, tol=1e-4, maxiter=10)\r\n f = self.tmp_f.copy()\r\n Ki_f = self.tmp_Ki_f.copy()\r\n\r\n #Optimize without linesearch\r\n #f_old = f.copy()\r\n #update_passed = False\r\n #while not update_passed:\r\n #Ki_f = old_Ki_f + step_size*dKi_f\r\n #f = np.dot(K, Ki_f)\r\n\r\n #old_obj = new_obj\r\n #new_obj = obj(Ki_f, f)\r\n #difference = new_obj - old_obj\r\n ##print \"difference: \",difference\r\n #if difference < 0:\r\n ##print \"Objective function rose\", np.float(difference)\r\n ##If the objective function isn't rising, restart optimization\r\n #step_size *= 0.8\r\n ##print \"Reducing step-size to {ss:.3} and restarting optimization\".format(ss=step_size)\r\n ##objective function isn't increasing, try reducing step size\r\n #f = f_old.copy() #it's actually faster not to go back to old location and just zigzag across the mode\r\n #old_obj = new_obj\r\n #rs += 1\r\n #else:\r\n #update_passed = True\r\n\r\n #old_Ki_f = self.Ki_f.copy()\r\n\r\n #difference = abs(new_obj - old_obj)\r\n #old_obj = new_obj.copy()\r\n difference = np.abs(np.sum(f - f_old)) + np.abs(np.sum(Ki_f - old_Ki_f))\r\n #difference = np.abs(np.sum(Ki_f - old_Ki_f))/np.float(self.N)\r\n old_Ki_f = Ki_f.copy()\r\n i += 1\r\n\r\n self.old_Ki_f = old_Ki_f.copy()\r\n\r\n #Warn of bad fits\r\n if difference > epsilon:\r\n self.bad_fhat = True\r\n warnings.warn(\"Not perfect f_hat fit difference: {}\".format(difference))\r\n elif self.bad_fhat:\r\n self.bad_fhat = False\r\n warnings.warn(\"f_hat now perfect again\")\r\n\r\n self.Ki_f = Ki_f\r\n return f", "def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu", "def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()", "def write_kernel(w, k):\n w.writeln(\"void {k}(const Image<int>& in, Image<int>& out\".format(k=k.name))\n # write the tap signal in the function argument list\n for tapName in k.rtapNames:\n #tapType = k.edges[tapName].dtype\n #tapCType = dtypeMap[tapType]\n tapCType = getCType(k.edges[tapName])\n for indices in expand_range(k.edges[tapName].dim):\n w.writeln(\"\\t, {type} {sig}\".format(type=tapCType, sig=mangle((tapName, indices))))\n w.writeln(\")\")\n w.writeln(\"{\")\n w.indent()\n # TODO: insert size error checking into C code here\n\n w.writeln(\"for(int y = 0; y < in.height(); y++){\")\n w.indent()\n w.writeln(\"for(int x = 0; x < in.width(); x++){\")\n w.indent()\n\n \n # Grab the register declaration for the partial-pixel output and blow it into\n # the complete list of input registers\n startName = k.ppoutName\n #startType = k.edges[startName].dtype\n #startCType = dtypeMap[startType]\n startCType = getCType(k.edges[startName])\n for indices in expand_range(k.edges[startName].dim):\n # HACK: work with multi-channel or single-channel images\n z_idx = 0\n if len(indices) == 3:\n z_idx = indices[2]\n\n w.writeln(\"{type} {reg} = in(x+{xoff}, y+{yoff}, {z});\".format(\n type=startCType,\n reg=mangle((startName, indices)),\n xoff=(indices[0]-k.centroid[0]), \n yoff=(indices[1]-k.centroid[1]), z=z_idx))\n \n # Set up the constants\n for const in k.constants:\n # TODO: be careful here, because we need to be consistent with naming/indexing\n # TODO: handle int/float; infer datatype in parser\n w.writeln(\"const float {reg} = {val};\".format(reg=mangle((const[0], [0])), val=const[1]))\n \n w.writeln(\"\")\n\n\n #Special Register Examples for Reduce:\n #fix_17_0 pixel_out_pos[1:0] # Location of Reduce pixel in output image\n #fix_17_0 centroid_pos[1:0] # Location of Centroid in input image\n if \"centroid_pos\" in k.specialRegs:\n w.writeln(\"int centroid_pos_0 = x;\")\n w.writeln(\"int centroid_pos_1 = y;\")\n\n if \"pixel_out_pos\" in k.specialRegs:\n w.writeln(\"int pixel_out_pos_0 = x;\")\n w.writeln(\"int pixel_out_pos_1 = y;\")\n \n # Create a list of (name, index) tuples representing the valid (i.e., evaluated) signal\n validRegs = [(startName, i) for i in expand_range(k.edges[startName].dim)]\n validRegs += [(tapName, i) for tapName in k.rtapNames \n for i in expand_range(k.edges[tapName].dim)]\n validRegs += [(regName, i) for regName in k.specialRegs \n for i in expand_range(k.edges[regName].dim)]\n validRegs += [(c[0], [0]) for c in k.constants]\n \n # Make a copy of the list of operations which we can remove stuff from\n unprocessed = dict(k.ops)\n \n # Process all the operations\n while len(unprocessed) > 0:\n progress = False\n for opKey in unprocessed:\n op = k.ops[opKey]\n # Find an operation that can be evaluated\n if opOk(op, validRegs):\n #dtype = k.edges[op.result[0]].dtype\n #dtype = dtypeMap[dtype] # Look up the C-equivalent for this type\n dtype = getCType(k.edges[op.result[0]])\n # TODO: include integer/fraction width\n \n # TODO: error checking that we have the right number of operands - this should be done in the parser, actually\n # Evaluate it\n if op.name in ['max', 'min']:\n write_complex_op(w, op, dtype)\n elif op.name == \"sum\": \n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"mv\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"add\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"sub\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' - ', mangle(op.operands))))\n elif op.name == \"mult\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' * ', mangle(op.operands))))\n elif op.name == \"div\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' / ', mangle(op.operands))))\n\n elif op.name == \"lshift\":\n w.writeln(\"{dtype} {dst} = {op1} << {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"rshift\":\n w.writeln(\"{dtype} {dst} = {op1} >> {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"and\":\n w.writeln(\"{dtype} {dst} = {op1} & {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"or\":\n w.writeln(\"{dtype} {dst} = {op1} | {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"ne\":\n w.writeln(\"{dtype} {dst} = {op1} != {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"eq\":\n w.writeln(\"{dtype} {dst} = {op1} == {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lt\":\n w.writeln(\"{dtype} {dst} = {op1} < {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lte\":\n w.writeln(\"{dtype} {dst} = {op1} <= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gt\":\n w.writeln(\"{dtype} {dst} = {op1} > {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gte\":\n w.writeln(\"{dtype} {dst} = {op1} >= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"not\":\n w.writeln(\"{dtype} {dst} = !{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"abs\":\n w.writeln(\"{dtype} {dst} = ({src} >= 0) ? {src} : (-{src});\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"inv\":\n w.writeln(\"{dtype} {dst} = -{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n\n elif op.name == \"mux\":\n w.writeln(\"{dtype} {dst} = {cond} ? {op1} : {op2};\".format(dtype=dtype, dst=mangle(op.result), \\\n cond=mangle(op.operands[0]), op1=mangle(op.operands[1]), op2=mangle(op.operands[2])))\n else:\n print \"Unhandled operator \" + opKey\n \n validRegs.append(op.result)\n # Remove it from the list\n unprocessed.pop(opKey)\n progress = True\n break # We changed the list, so we gotta start over\n \n # If we went through the whole list without finding any ops to evaluate,\n # something is wrong and we need to give up.\n if progress is False:\n print \"Failed to evaluate some ops!\"\n for opKey in unprocessed:\n print \"\\t %s %s\" % (unprocessed[opKey].name, unprocessed[opKey].result)\n break\n \n for indices in expand_range(k.edges[k.sink].dim):\n #writeln('printf(\"result: %f\\\\n\", {reg});'.format(reg=mangle((k.sink, indices))))\n # TODO: make this handle depths other than 3\n w.writeln('out(x,y,{z}) = {reg};'.format(z=indices[0], reg=mangle((k.sink, indices))))\n\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"} // END %s\" % k.name)\n w.writeln(\"\\n\")", "def create_low_pass_frequency_kernel(im, radius):\n kernel = create_high_pass_frequency_kernel(im, radius)\n kernel = 1 - kernel\n return kernel", "def create_low_pass_frequency_kernel(im, radius):\n kernel = create_high_pass_frequency_kernel(im, radius)\n kernel = 1 - kernel\n return kernel", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def kernel_inverse(rho):\n egval, egvec = LA.eigh(rho)\n\n if np.any(egval < -FLOAT_PRECISION):\n raise ValueError('Negative eigenvalues of given density matrix')\n\n pos_egval_idx = np.where(egval > FLOAT_PRECISION)[0]\n zero_egval_idx = np.where(egval < FLOAT_PRECISION)[0]\n\n egval[pos_egval_idx] = 0\n egval[zero_egval_idx] = 1\n\n rho_inv = egvec @ np.diagflat(egval) @ LA.inv(egvec)\n\n return rho_inv", "def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)", "def nfw(self, k, m, z):\n RS, rhoS, c = self.rS_rhoS_c(m, z)\n #\n result = np.sin(k * RS) * ( Si((1+c) * k * RS) - Si(k * RS) )\n result += - np.sin(c * k * RS) / ((1+c) * k * RS)\n result += np.cos(k * RS) * ( Ci((1+c) * k * RS) - Ci(k * RS) )\n result /= (np.log(1+c) - c/(1+c))\n return result", "def nfw(self, k, m, z):\n RS, rhoS, c = self.rS_rhoS_c(m, z)\n #\n result = np.sin(k * RS) * ( Si((1+c) * k * RS) - Si(k * RS) )\n result += - np.sin(c * k * RS) / ((1+c) * k * RS)\n result += np.cos(k * RS) * ( Ci((1+c) * k * RS) - Ci(k * RS) )\n result /= (np.log(1+c) - c/(1+c))\n return result", "def imwofz_nonvector(x, y):\n ncut=27\n xy=x*y \n xyp=2.0*xy/jnp.pi \n exx=jnp.exp(-x*x) \n f=-exx*erfcx(y)*jnp.sin(2.0*xy)+x/jnp.pi*exx*jnp.sinc(xyp) \n n=jnp.arange(1,ncut+1) \n n2=n*n \n vec0=0.5*n/(0.25*n2+ y*y) \n vec1=jnp.exp(-(0.25*n2+x*x)) \n vec4=jnp.exp(-(0.5*n+x)*(0.5*n+x)) \n vec5=jnp.exp(-(0.5*n-x)*(0.5*n-x)) \n Sigma1=jnp.dot(vec0,vec1)\n Sigma4=jnp.dot(vec0,vec4)\n Sigma5=jnp.dot(vec0,vec5)\n f = f + 1.0/jnp.pi*(y*jnp.sin(2.0*xy)*Sigma1 + 0.5*(Sigma5-Sigma4))\n \n return f", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def create_filter_bank():\r\n kernels = []\r\n for theta in range(0, 2):\r\n theta = theta / 2. * np.pi\r\n for sigma in (3, 5):\r\n for frequency in (0.10, 0.25):\r\n kernel = np.real(gabor_kernel(frequency, theta=theta,\r\n sigma_x=sigma, sigma_y=sigma))\r\n kernels.append(kernel)\r\n print(len(kernels))\r\n return kernels", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def rewofz_nonvector(x,y):\n ncut=27\n xy=x*y\n xyp=xy/jnp.pi\n exx=jnp.exp(-x*x)\n f=exx*erfcx(y)*jnp.cos(2.0*xy)+x*jnp.sin(xy)/jnp.pi*exx*jnp.sinc(xyp)\n n=jnp.arange(1,ncut+1)\n n2=n*n\n vec0=1.0/(0.25*n2+ y*y)\n vec1=jnp.exp(-(0.25*n2+x*x))\n vec2=jnp.exp(-(0.5*n+x)*(0.5*n+x))\n vec3=jnp.exp(-(0.5*n-x)*(0.5*n-x))\n Sigma1=jnp.dot(vec0,vec1)\n Sigma2=jnp.dot(vec0,vec2)\n Sigma3=jnp.dot(vec0,vec3)\n f = f + 1.0/jnp.pi*(-y*jnp.cos(2.0*xy)*Sigma1 + 0.5*y*Sigma2 + 0.5*y*Sigma3)\n return f", "def gcheckerboard(kernelen=64, nsig=32):\n c = np.array([[-1, 1], [1, -1]])\n intsize = int(np.ceil(kernelen/2))\n return np.kron(c, np.ones([intsize, intsize])) * gkern(kernelen, nsig)", "def kernel(self, modulus=None):\n M = self.matrix(modulus=modulus)\n if modulus is None:\n M = M.convert_to(QQ)\n # Note: Even when working over a finite field, what we want here is\n # the pullback into the integers, so in this case the conversion to ZZ\n # below is appropriate. When working over ZZ, the kernel should be a\n # ZZ-submodule, so, while the conversion to QQ above was required in\n # order for the nullspace calculation to work, conversion back to ZZ\n # afterward should always work.\n # TODO:\n # Watch <https://github.com/sympy/sympy/issues/21834>, which calls\n # for fraction-free algorithms. If this is implemented, we can skip\n # the conversion to `QQ` above.\n K = M.nullspace().convert_to(ZZ).transpose()\n return self.domain.submodule_from_matrix(K)", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def DisLayerSN_d(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.Dropout2d(),\n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out", "def get_func(k_center,enk,I,gamma,gamma_k):\n\n def lorentzian_k(k):\n return 1./np.pi * gamma_k / ( (k-k_center)**2 + gamma_k**2)\n\n def lorentzian(k,omega):\n return I * gamma / ( (omega-enk)**2 + gamma**2) * lorentzian_k(k)\n\n return lorentzian", "def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel", "def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X", "def ogfft2(x, N):\n x_p = brc(x)\n PI = np.pi\n for ii in np.arange(1,int(np.log2(N)) + 1):\n M = int(2**ii)\n w_M = np.exp(1j*((2*PI)/M))\n for kk in np.arange(0,N,M):\n w = 1\n m = int(M/2)\n for jj in np.arange(m):\n t = w*x_p[kk + jj + m]\n u = x_p[kk + jj]\n x_p[kk + jj] = u + t\n x_p[kk + jj + m] = u - t\n w = w*w_M\n return x_p", "def v_fermi(n_e):\n return sqrt(2*E_fermi(n_e) * k_b / m_star)", "def nitidez(img):\n kernel = np.array([\n [0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]\n ])\n if img.modo == \"L\":\n return convoluir(img, kernel)\n canais = [\n filtrar_canal(img, \"R\"), filtrar_canal(img, \"G\"), filtrar_canal(img, \"B\") \n ]\n canais = [convoluir(i, kernel) for i in canais]\n return reunir_canais(*canais)", "def KFilt(sample,fs=25):\n\t#kalman filter inputs\n \n # Dimensions of parameters:\n # 'transition_matrices': 2,\n # 'transition_offsets': 1,\n # 'observation_matrices': 2,\n # 'observation_offsets': 1,\n # 'transition_covariance': 2,\n # 'observation_covariance': 2,\n # 'initial_state_mean': 1,\n # 'initial_state_covariance': 2,\n \n n_timesteps = len(sample)\n trans_mat = []\n\n\t#mask missing values\n observations = np.ma.array(sample,mask=np.zeros(sample.shape))\n missing_loc = np.where(np.isnan(sample))\n observations[missing_loc[0][:],missing_loc[1][:]] = np.ma.masked\n\t\n\t#Import Kalman filter, inerpolate missing points and get 2nd, 3rd orde kinematics\n dt = 1./25\t#Length of each frame (should be iether 1/25 or 1/30)\t\n n_timesteps = len(sample)\n \n observation_matrix = np.array([[1,0,0,0],\n [0,1,0,0]])#np.eye(4) \n t = np.linspace(0,len(observations)*dt,len(observations))\n q = np.cov(observations.T[:2,:400])\n qdot = np.cov(np.diff(observations.T[:2,:400]))#np.cov(observations[:1,:400])\n\n h=(t[-1]-t[0])/t.shape[0]\n A=np.array([[1,0,h,.5*h**2], \n [0,1,0,h], \n [0,0,1,0],\n [0,0,0,1]]) \n\n init_mean = [sample[0],0,0] #initial mean should be close to the first point, esp if first point is human-picked and tracking starts at the beginning of a video\n observation_covariance = q*500 #ADJUST THIS TO CHANGE SMOOTHNESS OF FILTER\n init_cov = np.eye(4)*.001#*0.0026\n transition_matrix = A\n transition_covariance = np.array([[q[0,0],q[0,1],0,0],\n [q[1,0],q[1,1],0,0],\n [0,0,qdot[0,0],qdot[0,1]],\n [0,0,qdot[1,0],qdot[1,1]]])\n\n kf = KalmanFilter(transition_matrix, observation_matrix,transition_covariance,observation_covariance,n_dim_obs=2)\n\n kf = kf.em(observations,n_iter=1,em_vars=['transition_covariance','transition_matrix','observation_covariance'])\n\n #pdb.set_trace()\n \n global trans_mat, trans_cov, init_cond\n x_filt = kf.filter(observations[0])[0]#observations.T[0])[0]\n kf_means = kf.smooth(observations[0])[0]\n\t\n return kf_means,x_filt #np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1])),frames", "def evaluate(inshp, kshp, (dx, dy)=(1, 1), nkern=1, mode='valid', ws=True):\r\n N = numpy\r\n\r\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\r\n # in the first case, default nfeatures to 1\r\n if N.size(inshp) == 2:\r\n inshp = (1,) + inshp\r\n\r\n inshp = N.array(inshp)\r\n kshp = N.array(kshp)\r\n ksize = N.prod(kshp)\r\n\r\n kern = ksize - 1 - N.arange(ksize)\r\n\r\n # size of output image if doing proper convolution\r\n # (mode='full',dx=dy=0) outshp is the actual output shape\r\n # given the parameters\r\n fulloutshp = inshp[1:] + kshp - 1\r\n if mode == 'valid':\r\n s = -1\r\n else:\r\n s = 1\r\n outshp = N.int64(N.ceil((inshp[1:] + s * kshp - s * 1) \\\r\n / N.array([dy, dx], dtype='float')))\r\n if any(outshp <= 0):\r\n err = 'Invalid kernel', kshp, 'and/or step size', (dx, dy),\\\r\n 'for given input shape', inshp\r\n raise ValueError(err)\r\n\r\n outsize = N.prod(outshp)\r\n insize = N.prod(inshp)\r\n\r\n # range of output units over which to iterate\r\n if mode == 'valid':\r\n lbound = N.array([kshp[0] - 1, kshp[1] - 1])\r\n ubound = lbound + (inshp[1:] - kshp + 1)\r\n else:\r\n lbound = N.zeros(2)\r\n ubound = fulloutshp\r\n\r\n # coordinates of image in \"fulloutshp\" coordinates\r\n topleft = N.array([kshp[0] - 1, kshp[1] - 1])\r\n # bound when counting the receptive field\r\n botright = topleft + inshp[1:]\r\n\r\n # sparse matrix specifics...\r\n if ws:\r\n spmatshp = (outsize * N.prod(kshp) * inshp[0], insize)\r\n else:\r\n spmatshp = (nkern * outsize, insize)\r\n spmat = scipy_sparse.lil_matrix(spmatshp)\r\n\r\n # loop over output image pixels\r\n z, zz = 0, 0\r\n\r\n # incremented every time we write something to the sparse\r\n # matrix this is used to track the ordering of filter tap\r\n # coefficient in sparse column ordering\r\n tapi, ntaps = 0, 0\r\n\r\n # Note: looping over the number of kernels could've been done\r\n # more efficiently as the last step (when writing to\r\n # spmat). However, this messes up the ordering of the column\r\n # values (order in which you write the values determines how\r\n # the vectorized data will get used later one)\r\n\r\n for fmapi in xrange(inshp[0]): # loop over input features\r\n # loop over number of kernels (nkern=1 for weight sharing)\r\n for n in xrange(nkern):\r\n\r\n # FOR EACH OUTPUT PIXEL...\r\n # loop over output image height\r\n for oy in N.arange(lbound[0], ubound[0], dy):\r\n # loop over output image width\r\n for ox in N.arange(lbound[1], ubound[1], dx):\r\n\r\n # kern[l] is filter value to apply at (oj,oi)\r\n # for (iy,ix)\r\n l = 0\r\n\r\n # ... ITERATE OVER INPUT UNITS IN RECEPTIVE FIELD\r\n for ky in oy + N.arange(kshp[0]):\r\n for kx in ox + N.arange(kshp[1]):\r\n\r\n # verify if we are still within image\r\n # boundaries. Equivalent to\r\n # zero-padding of the input image\r\n if (all((ky, kx) >= topleft) and\r\n all((ky, kx) < botright)):\r\n\r\n # convert to \"valid\" input space\r\n # coords used to determine column\r\n # index to write to in sparse mat\r\n iy, ix = N.array((ky, kx)) - topleft\r\n # determine raster-index of input pixel...\r\n\r\n # taking into account multiple\r\n # input features\r\n col = iy * inshp[2] + ix + \\\r\n fmapi * N.prod(inshp[1:])\r\n\r\n # convert oy,ox values to output\r\n # space coordinates\r\n if mode == 'full':\r\n (y, x) = (oy, ox)\r\n else:\r\n (y, x) = (oy, ox) - topleft\r\n # taking into account step size\r\n (y, x) = N.array([y, x]) / (dy, dx)\r\n\r\n # convert to row index of sparse matrix\r\n if ws:\r\n row = ((y * outshp[1] + x) *\r\n inshp[0] * ksize + l + fmapi *\r\n ksize)\r\n else:\r\n row = y * outshp[1] + x\r\n\r\n # Store something at that location\r\n # in sparse matrix. The written\r\n # value is only useful for the\r\n # sparse case. It will determine\r\n # the way kernel taps are mapped\r\n # onto the sparse columns (idea of\r\n # kernel map)\r\n # n*... only for sparse\r\n spmat[row + n * outsize, col] = tapi + 1\r\n\r\n # total number of active taps\r\n # (used for kmap)\r\n ntaps += 1\r\n\r\n # absolute tap index (total number of taps)\r\n tapi += 1\r\n # move on to next filter tap l=(l+1)%ksize\r\n l += 1\r\n\r\n if spmat.format != 'csc':\r\n spmat = spmat.tocsc().sorted_indices()\r\n else:\r\n # BUG ALERT: scipy0.6 has bug where data and indices are written in\r\n # reverse column ordering.\r\n # Explicit call to sorted_indices removes this problem.\r\n spmat = spmat.sorted_indices()\r\n\r\n if ws:\r\n kmap = None\r\n else:\r\n kmap = N.zeros(ntaps, dtype='int')\r\n k = 0\r\n #print 'TEMPORARY BUGFIX: REMOVE !!!'\r\n for j in xrange(spmat.shape[1]):\r\n for i_idx in xrange(spmat.indptr[j], spmat.indptr[j + 1]):\r\n if spmat.data[i_idx] != 0:\r\n # this is == spmat[i,j] - 1\r\n kmap[k] = spmat.data[i_idx] - 1\r\n k += 1\r\n\r\n # when in valid mode, it is more efficient to store in sparse row\r\n # TODO: need to implement structured dot for csr matrix\r\n assert spmat.format == 'csc'\r\n sptype = 'csc'\r\n #sptype = 'csr' if mode=='valid' else 'csc'\r\n if 0 and mode == 'valid':\r\n spmat = spmat.tocsr()\r\n\r\n rval = (spmat.indices[:spmat.size],\r\n spmat.indptr, spmatshp, sptype, outshp)\r\n if kmap is not None:\r\n rval += (kmap,)\r\n\r\n return rval", "def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T", "def rb_nfw(m200,c,z):\n\n #Setting up cosmology\n rho0=1.4876862e+11;\n omegam=0.238000;\n msun=1.98892e+33;\n delta_vir=200.;\n G=6.6730003e-08;\n kmpsToCmps = 1.0*10.**(5.);\n Rvir=200.;\n kpc2cm=3.086*10.**(21);\n \n deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));\n rho_crit =rho0*omegam*(1.+z)**3.;\n r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;\n v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;\n \n r =np.linspace(1.,3.*r200,500); # kpc\n rs = r200 / c; \n ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);\n rho = (rho_crit * deltac)/(ss); \n M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)\n \n x = r/r200 ;\n tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));\n vcirc = v200*(tab)**0.5 ;\n maxvcirc = np.max(vcirc) ;\n q=np.where((vcirc == np.max(vcirc)));\n maxvcircr = r[q];\n \n \n # Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31\n Phi_new = r * 0.0;\n vesc = r * 0.0 ;\n for ir in range(2,len(r)-4):\n term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun; \n term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun; \n Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;\n vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22) \n \n\n # Chage Units to do velocity dispersion calculations\n rcm=r*kpc2cm;\n\n #M_r in gram\n M_r_gram=M_r*msun;\n\n Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);\n \n Phi=Phi*(1./((1e5)**2.));#%km^2/s^2\n Phi_out=np.max(Phi);\n\n k=0.41;\n a=0.29;\n\n sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));\n \n nfw={}\n qqqt=np.where((vesc==0.))\n vesc[qqqt]=1e-99\n\n nfw[\"m200\"]=m200;\n nfw[\"c\"]=c;\n nfw[\"r200\"]=r200;\n nfw[\"v200\"]=v200;\n nfw[\"maxvcirc\"]=maxvcirc;\n nfw[\"maxvcircr\"]=maxvcircr;\n nfw[\"r\"]=r;\n nfw[\"rho\"]=rho;\n nfw[\"vcirc\"]=vcirc;\n nfw[\"M_r\"]=M_r;\n nfw[\"sig_v\"]=sig;\n nfw[\"vesc\"]=vesc;\n \n return nfw", "def _compute_R1_from_kernel(n, m, kernel):\r\n\r\n R1 = 0\r\n ind_vec = np.arange(m)\r\n for l in range(n):\r\n ind_vec.shape = (1,)*l + (m,) + (1,)*(n-l-1)\r\n R1 += np.sum((2*ind_vec+1) * kernel**2)\r\n\r\n return R1", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def make_hanning_kernel_tensor_1d_no_depthwise(n_channels, downsample=2, length_of_window=8, make_plots=False, normalize=False, sqrt_window=True):\n hanning_kernel = make_hanning_kernel_1d(downsample=downsample,length_of_window=length_of_window,make_plots=make_plots, normalize=normalize, sqrt_window=sqrt_window).astype(np.float32)\n hanning_kernel_expanded = np.expand_dims(hanning_kernel,0) * np.expand_dims(np.eye(n_channels),3).astype(np.float32) # [n_channels, n_channels, filter_width]\n hanning_tensor = tf.constant(hanning_kernel_expanded) # [length_of_window, num_channels, num_channels]\n hanning_tensor = tf.transpose(hanning_tensor, [2, 0, 1])\n return hanning_tensor", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int", "def _calc_kernel(self,\n freq_1: float,\n time_1: float,\n freq_2: float,\n time_2: float,\n dagg: tuple\n ) -> Tuple[ndarray, ndarray]:\n dt = self._process_tensor.dt\n #pieces of kernel consist of some combination of phases and\n #Bose-Einstein factors\n n_1, n_2 = 0, 0\n if self._temp > 0:\n n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))\n n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))\n\n ker_dim = int(np.round(time_2 / dt))\n # calculate index corresponding to t_1\n switch = int(np.round(time_1 / dt))\n re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n\n tpp_index, tp_index = np.meshgrid(\n np.arange(ker_dim), np.arange(ker_dim),\n indexing='ij') #array of indices for each array element\n regions = {\n 'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)\n 'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)\n 'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)\n\n def phase(region, swap_ts = False):\n tk = tp_index[regions[region]]\n tkp = tpp_index[regions[region]]\n if tk.size == 0 or tkp.size == 0:\n return 0\n a = -1j * ((2*dagg[0] - 1)) * freq_2\n b = -1j * ((2*dagg[1] - 1)) * freq_1\n if swap_ts:\n a, b = b, a\n if region in ('a','c'):\n ph = np.triu(\n np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph += np.triu(\n np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)\n sel = np.diag(tk)\n di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)\n if a + b != 0:\n di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))\n di += np.exp((a + b) * sel * dt) / (a * (a+b))\n else:\n di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)\n ph += np.diag(di)\n else:\n ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)\n ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)\n ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)\n ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)\n return ph\n\n\n if dagg == (0, 1):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')\n\n elif dagg == (1, 0):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * n_1 * phase('c')\n\n elif dagg == (1, 1):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')\n\n elif dagg == (0, 0):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +\n (2*n_1 + 1) * phase('a'))\n\n im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * n_1 * phase('c')\n\n re_kernel = np.triu(re_kernel) #only keep triangular region\n im_kernel = np.triu(im_kernel)\n return re_kernel, im_kernel", "def zfr(x: Tensor, fs: int, N: int = 150, R: int = 3, fc: float = 70.) -> Tensor:\n for _ in range(R):\n x = hann_sinc_high_pass(x, N, fs, fc)\n x = x.cumsum(dim=-1)\n x = hann_sinc_high_pass(x, N, fs, fc)\n return x", "def integrate_idemix_kernel(state):\n vs = state.variables\n settings = state.settings\n\n a_tri, b_tri, c_tri, d_tri, delta = (allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))[2:-2, 2:-2] for _ in range(5))\n forc = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n maxE_iw = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n \"\"\"\n forcing by EKE dissipation\n \"\"\"\n if settings.enable_eke:\n forc = vs.eke_diss_iw\n\n else: # shortcut without EKE model\n forc = vs.K_diss_gm + vs.K_diss_h - vs.P_diss_skew\n\n if settings.enable_store_cabbeling_heat:\n forc += -vs.P_diss_hmix - vs.P_diss_iso\n\n if settings.enable_eke and (settings.enable_eke_diss_bottom or settings.enable_eke_diss_surfbot):\n \"\"\"\n vertically integrate EKE dissipation and inject at bottom and/or surface\n \"\"\"\n a_loc = npx.sum(vs.dzw[npx.newaxis, npx.newaxis, :-1] * forc[:, :, :-1] * vs.maskW[:, :, :-1], axis=2)\n a_loc += 0.5 * forc[:, :, -1] * vs.maskW[:, :, -1] * vs.dzw[-1]\n\n forc = update(forc, at[...], 0.0)\n\n ks = npx.maximum(0, vs.kbot[2:-2, 2:-2] - 1)\n mask = ks[:, :, npx.newaxis] == npx.arange(settings.nz)[npx.newaxis, npx.newaxis, :]\n if settings.enable_eke_diss_bottom:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask, a_loc[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :], forc[2:-2, 2:-2, :]\n ),\n )\n else:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask,\n settings.eke_diss_surfbot_frac\n * a_loc[2:-2, 2:-2, npx.newaxis]\n / vs.dzw[npx.newaxis, npx.newaxis, :],\n forc[2:-2, 2:-2, :],\n ),\n )\n forc = update(\n forc,\n at[2:-2, 2:-2, -1],\n (1.0 - settings.eke_diss_surfbot_frac) * a_loc[2:-2, 2:-2] / (0.5 * vs.dzw[-1]),\n )\n\n \"\"\"\n forcing by bottom friction\n \"\"\"\n if not settings.enable_store_bottom_friction_tke:\n forc = forc + vs.K_diss_bot\n\n \"\"\"\n prevent negative dissipation of IW energy\n \"\"\"\n maxE_iw = npx.maximum(0.0, vs.E_iw[:, :, :, vs.tau])\n\n \"\"\"\n vertical diffusion and dissipation is solved implicitly\n \"\"\"\n _, water_mask, edge_mask = utilities.create_water_masks(vs.kbot[2:-2, 2:-2], settings.nz)\n\n delta = update(\n delta,\n at[:, :, :-1],\n settings.dt_tracer\n * settings.tau_v\n / vs.dzt[npx.newaxis, npx.newaxis, 1:]\n * 0.5\n * (vs.c0[2:-2, 2:-2, :-1] + vs.c0[2:-2, 2:-2, 1:]),\n )\n delta = update(delta, at[:, :, -1], 0.0)\n a_tri = update(\n a_tri, at[:, :, 1:-1], -delta[:, :, :-2] * vs.c0[2:-2, 2:-2, :-2] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n )\n a_tri = update(a_tri, at[:, :, -1], -delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -2])\n b_tri = update(\n b_tri,\n at[:, :, 1:-1],\n 1\n + delta[:, :, 1:-1] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + delta[:, :, :-2] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, 1:-1] * maxE_iw[2:-2, 2:-2, 1:-1],\n )\n b_tri = update(\n b_tri,\n at[:, :, -1],\n 1\n + delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, -1] * maxE_iw[2:-2, 2:-2, -1],\n )\n b_tri_edge = (\n 1\n + delta / vs.dzw * vs.c0[2:-2, 2:-2, :]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, :] * maxE_iw[2:-2, 2:-2, :]\n )\n c_tri = update(\n c_tri, at[:, :, :-1], -delta[:, :, :-1] / vs.dzw[npx.newaxis, npx.newaxis, :-1] * vs.c0[2:-2, 2:-2, 1:]\n )\n d_tri = update(d_tri, at[...], vs.E_iw[2:-2, 2:-2, :, vs.tau] + settings.dt_tracer * forc[2:-2, 2:-2, :])\n d_tri_edge = (\n d_tri + settings.dt_tracer * vs.forc_iw_bottom[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :]\n )\n d_tri = update_add(d_tri, at[:, :, -1], settings.dt_tracer * vs.forc_iw_surface[2:-2, 2:-2] / (0.5 * vs.dzw[-1:]))\n\n sol = utilities.solve_implicit(\n a_tri, b_tri, c_tri, d_tri, water_mask, b_edge=b_tri_edge, d_edge=d_tri_edge, edge_mask=edge_mask\n )\n vs.E_iw = update(vs.E_iw, at[2:-2, 2:-2, :, vs.taup1], npx.where(water_mask, sol, vs.E_iw[2:-2, 2:-2, :, vs.taup1]))\n\n \"\"\"\n store IW dissipation\n \"\"\"\n vs.iw_diss = vs.alpha_c * maxE_iw * vs.E_iw[..., vs.taup1]\n\n \"\"\"\n add tendency due to lateral diffusion\n \"\"\"\n flux_east = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_north = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_top = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n if settings.enable_idemix_hor_diffusion:\n flux_east = update(\n flux_east,\n at[:-1, :, :],\n settings.tau_h\n * 0.5\n * (vs.v0[1:, :, :] + vs.v0[:-1, :, :])\n * (vs.v0[1:, :, :] * vs.E_iw[1:, :, :, vs.tau] - vs.v0[:-1, :, :] * vs.E_iw[:-1, :, :, vs.tau])\n / (vs.cost[npx.newaxis, :, npx.newaxis] * vs.dxu[:-1, npx.newaxis, npx.newaxis])\n * vs.maskU[:-1, :, :],\n )\n\n flux_north = update(\n flux_north,\n at[:, :-1, :],\n settings.tau_h\n * 0.5\n * (vs.v0[:, 1:, :] + vs.v0[:, :-1, :])\n * (vs.v0[:, 1:, :] * vs.E_iw[:, 1:, :, vs.tau] - vs.v0[:, :-1, :] * vs.E_iw[:, :-1, :, vs.tau])\n / vs.dyu[npx.newaxis, :-1, npx.newaxis]\n * vs.maskV[:, :-1, :]\n * vs.cosu[npx.newaxis, :-1, npx.newaxis],\n )\n flux_north = update(flux_north, at[:, -1, :], 0.0)\n vs.E_iw = update_add(\n vs.E_iw,\n at[2:-2, 2:-2, :, vs.taup1],\n settings.dt_tracer\n * vs.maskW[2:-2, 2:-2, :]\n * (\n (flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n + (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n\n \"\"\"\n add tendency due to advection\n \"\"\"\n if settings.enable_idemix_superbee_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_superbee_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_upwind_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_upwind_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_superbee_advection or settings.enable_idemix_upwind_advection:\n vs.dE_iw = update(\n vs.dE_iw,\n at[2:-2, 2:-2, :, vs.tau],\n vs.maskW[2:-2, 2:-2, :]\n * (\n -(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n - (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n vs.dE_iw = update_add(vs.dE_iw, at[:, :, 0, vs.tau], -flux_top[:, :, 0] / vs.dzw[0:1])\n vs.dE_iw = update_add(\n vs.dE_iw,\n at[:, :, 1:-1, vs.tau],\n -(flux_top[:, :, 1:-1] - flux_top[:, :, :-2]) / vs.dzw[npx.newaxis, npx.newaxis, 1:-1],\n )\n vs.dE_iw = update_add(\n vs.dE_iw, at[:, :, -1, vs.tau], -(flux_top[:, :, -1] - flux_top[:, :, -2]) / (0.5 * vs.dzw[-1:])\n )\n\n \"\"\"\n Adam Bashforth time stepping\n \"\"\"\n vs.E_iw = update_add(\n vs.E_iw,\n at[:, :, :, vs.taup1],\n settings.dt_tracer\n * (\n (1.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.tau]\n - (0.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.taum1]\n ),\n )\n\n return KernelOutput(E_iw=vs.E_iw, dE_iw=vs.dE_iw, iw_diss=vs.iw_diss)", "def _compute_R2_from_kernel(n, m, kernel):\r\n\r\n R2 = 0\r\n ind_vec = np.arange(m)\r\n for l in range(n):\r\n ind_vec.shape = (1,)*l + (m,) + (1,)*(n-l-1)\r\n _idx1 = (slice(None),)*l + (slice(1, None),) + (slice(None),)*(n-l-1)\r\n _idx2 = (slice(None),)*l + (slice(m-1),) + (slice(None),)*(n-l-1)\r\n R2 += 2 * np.sum(ind_vec[_idx1] * kernel[_idx1] * kernel[_idx2])\r\n\r\n return R2", "def process_kernels(kernels):\n kernels = np.where(kernels == 32767, np.nan, kernels/1000.)\n return kernels", "def invredc(A, B, C, D, y, v):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[1] # the number of samples is the number of columns of y\n\n # calculate system's dimensions: number of states, number of inputs and number of outputs\n n = A.shape[0] # number of states\n # m=B.shape[1] # number of inputs, maybe it's not necessary\n p = C.shape[0] # number of outputs\n\n # A. Output Basis Change\n # here the output basis change and its important quantities and matrices are calculated\n\n # rank of the feedforward matrix:\n r = np.linalg.matrix_rank(D)\n\n # to calculate the S1 matrix, we have partitioned the matrix into [S1a;S2a]\n # firstly, we obtain S1a\n # since D0 must possess full row rank (rank(D0)=r), a simple way to do that is to use the scipy.linalg.orth function\n D0 = (scilin.orth(D.transpose())).transpose()\n # calculating S1a as a solution of the problem S1a*D=D0 using the pseudoinverse (Moore-Penrose inverse):\n S1at = scilin.pinv(D.transpose()) @ D0.transpose()\n S1a = S1at.transpose()\n # S1b is the null space (kernel) of D from the left\n S1b = (scilin.null_space(D.transpose())).transpose()\n # assembling the S1 matrix\n S1 = np.concatenate((S1a, S1b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # the C2 matrix is obtained by a partition of S1*C, which can by also obtained with the use of S1b\n # calculating C2\n C2 = S1b @ C\n # rank of C2\n q = np.linalg.matrix_rank(C2)\n\n # calculating the matrix S2, which is very similar to S1, and it is also partitioned as S2=[S2a;S2b]\n # since C2bar has to possess full row rank (rank(C2)=q)\n C2tilde = (scilin.orth(C2.transpose())).transpose()\n # calculating S2a as a solution of the problem S2a*C2=C2bar using the pseudoinverse (Moore-Penrose inverse):\n S2at = scilin.pinv(C2.transpose()) @ C2tilde.transpose()\n S2a = S2at.transpose()\n # S2b is the null space (kernel) of C2 from the left\n S2b = (scilin.null_space(C2.transpose())).transpose()\n # assembling the S2 matrix\n S2 = np.concatenate((S2a, S2b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # now that we have S1 and S2, we can assemble the S matrix\n # we defined the notation: S=Sa*S1, where Sa is partitioned as Sa=[I 0;0 S2]=[Sa1 Sa2]\n # partitions of Sa\n Sa11 = np.identity(r)\n Sa12 = np.zeros((r, p - r))\n Sa21 = np.zeros((p - r, r))\n Sa22 = S2\n # assembling the columns of Sa, Sa=[Sa1 Sa2]\n Sa1 = np.concatenate((Sa11, Sa21), axis=0) # concatenate vertically (row wise)\n Sa2 = np.concatenate((Sa12, Sa22), axis=0) # concatenate vertically (row wise)\n # finally, assembling the matrix Sa:\n Sa = np.concatenate((Sa1, Sa2), axis=1) # concatenate horizontally (column wise)\n # obtaining the S matrix by the multiplication\n S = Sa @ S1\n\n # doing the transformation of the output ytilde=Sy\n ytilde = S @ y\n # we'll not partition the output yet, first, we'll do the State-Space Basis Change\n\n # B. State-Space Basis Change\n # in this section we'll do the state-space basis change of the system\n\n # the first step is the calculation of the transformation matrix, as defined in the paper\n # we'll call T^{-1} as M, so C2tilde*M=[0 I]. And we'll partition M as M=[M1 M2]. C2tilde*M=[C2tilde*M1 C2tilde*M2]\n # since rank(C2tilde)=q, nullity(C2tilde)=n-q\n # M1 can be defined as a basis of the null space of C2tilde\n M1 = scilin.null_space(C2tilde)\n # and M2 is the solution of the equation C2tilde*M2=I. To calculate this solution, we'll use the pseudoinverse again\n M2 = scilin.pinv(C2tilde)\n # now, we assemble the M matrix with the concatenate function\n M = np.concatenate((M1, M2), axis=1) # concatenate horizontally (column wise)\n # finally, we calculate the T matrix by inverting M\n T = np.linalg.inv(M)\n\n # now, we proceed to the transformation of the state-space matrices\n # transformation of the system's dynamic matrix\n Atilde = T @ A @ M\n # transformation of the system's input matrix\n Btilde = T @ B\n # transformation of the system's output matrix\n Ctilde = C @ M\n # transformation of the system's feedforward matrix (it's the same)\n # Dtilde=D # actually, this step is not necessary\n # transformation of the additional system input v\n vtilde = T @ v\n\n # in the next step, we need to partition the new system's matrices and outputs\n\n # partition of the outputs\n # y1 has r lines and N columns\n y1 = ytilde[0:r, :]\n # y2 has q lines and N columns, and it starts at the r+1 line (which in python is the r line since the vector index starts at 0)\n y2 = ytilde[r : r + q, :]\n # y3 is irrelevant, then, it will be neglected\n\n # partitioning the system matrices\n # firstly, the system's dynamic matrix Atilde\n A11 = Atilde[0 : n - q, 0 : n - q]\n A12 = Atilde[0 : n - q, n - q : n]\n A21 = Atilde[n - q : n, 0 : n - q]\n A22 = Atilde[n - q : n, n - q : n]\n # the system's input matrix Btilde\n B1 = Btilde[0 : n - q, :]\n B2 = Btilde[n - q : n, :]\n # the system's output matrix Ctilde\n C11 = Ctilde[0:r, 0 : n - q]\n C12 = Ctilde[0:r, n - q : n]\n\n # partition the additional input vtilde\n v1 = vtilde[0 : n - q, :]\n v2 = vtilde[n - q : n, :]\n\n # C. Reduction of State-Space Dimension\n # now, we'll do the reduction of the state-space system\n\n # following the equations in the paper\n # calculating y1hat\n y1hat = y1 - C12 @ y2\n # we have to discard the last sample to make the dimensions of y1hat and y2hat match\n y1hat = y1hat[:, 0 : N - 1]\n\n # calculating y2hat\n # preallocating variables before the loop\n y2hat = np.zeros((q, N - 1))\n # running the loop\n for k in range(\n 0, N - 1\n ): # the loop has to run N-1 times, from 0 to N-2, because of y2[k+1] on the equation\n y2hat[:, k] = y2[:, k + 1] - A22 @ y2[:, k] - v2[:, k]\n\n # assembling the reduced system's output vector\n yhat = np.concatenate((y1hat, y2hat), axis=0)\n\n # calculating the additional input vhat\n vhat = v1 + A12 @ y2\n # discarding the last sample\n vhat = vhat[:, 0 : N - 1]\n\n # now, we'll assemble the reduced state-space system\n # reduced system's dynamic matrix\n Ahat = A11\n # reduced system's input matrix\n Bhat = B1\n # reduced system's output matrix\n Chat = np.concatenate((C11, A21), axis=0) # concatenate vertically (row wise)\n # reduced system's feedforward matrix\n Dhat = np.concatenate((D0, B2), axis=0) # concatenate vertically (row wise)\n # calculating rhat, the new rank of the feedforward matrix Dhat (an important quantity of the algorithm)\n rhat = np.linalg.matrix_rank(Dhat)\n\n # calculating the new dimension of the reduced system\n # reduced system's state vector dimension\n nhat = n - q\n # reduced system's output vector dimension\n phat = r + q\n\n return Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat", "def Poisson(ni, f, x, v, n):\n ne = DECSKS.lib.density.single_integration(f[n,:,:], of = x, wrt = v)\n\n xi = np.zeros(x.N)\n E_hat = np.zeros(x.N, dtype = complex) # container\n\n\n # define wave indices\n for r in range(x.N):\n if r <= x.N/2 :\n xi[r] = 2*np.pi*r / x.L\n else:\n xi[r] = 2*np.pi*(r - x.N) / x.L\n\n # total charge density, n(x), len(n) = Nx\n\n n = ni - ne\n\n N = np.fft.fft(n)\n A = max(N)\n eps = 2.0e-15\n xi_min = A*eps\n for r in range(x.N):\n if np.abs(N[r]) < xi_min:\n N[r] = 0\n\n # E_hat[0] = 0 from periodic BCs, i.e. because N[0] = 0, quasineutrality\n # equivalently, E_hat[0] is the DC field, of which there is none in\n # a quasineutral system of charged particles, only flucutations are present\n # E_hat[0] = 0 already from E_hat vector initialization\n\n for each in range(1,len(xi)):\n\n E_hat[each] = 1 / (1j*xi[each]) * N[each] # Electric field in Fourier space\n\n\n E = np.real(np.fft.ifft(E_hat)) # Electric field in configurational space\n\n\n return E", "def gkern2(kernlen=21, nsig=3):\n # create nxn zeros\n inp = np.zeros((kernlen, kernlen))\n # set element at the middle to one, a dirac delta\n inp[kernlen//2, kernlen//2] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n kernel = scipy.ndimage.filters.gaussian_filter(inp, nsig)\n\n return kernel", "def edge_kernel(isotropic):\n if isotropic:\n edge_kernel = - 1.0 * np.ones([3, 3, 3], np.float64)\n edge_kernel[1, 1, 1] = 26.0\n else:\n edge_kernel = - 1.0 * np.ones([1, 3, 3], np.float64)\n edge_kernel[0, 1, 1] = 8\n return edge_kernel", "def _kernel_F2(matrix_in) -> List[np.ndarray]: # pylint: disable=invalid-name\n size = matrix_in.shape\n kernel = []\n matrix_in_id = np.vstack((matrix_in, np.identity(size[1])))\n matrix_in_id_ech = (_row_echelon_F2(matrix_in_id.transpose())).transpose()\n\n for col in range(size[1]):\n if np.array_equal(\n matrix_in_id_ech[0 : size[0], col], np.zeros(size[0])\n ) and not np.array_equal(matrix_in_id_ech[size[0] :, col], np.zeros(size[1])):\n kernel.append(matrix_in_id_ech[size[0] :, col])\n\n return kernel", "def fftkernel(x, w):\n L = len(x)\n Lmax = L + 3 * w\n n = nextpow2(Lmax)\n X = np.fft.fft(x, n)\n f = np.arange(0, n, 1.0) / n\n f = np.concatenate((-f[:int(n / 2)], f[int(n / 2):0:-1]))\n K = np.exp(-0.5 * (w * 2 * np.pi * f) ** 2)\n y = np.fft.ifft(X * K, n)\n y = y[:L].copy()\n return y", "def gpuSIRT(tomo,angles,center,input_params):\n print('Starting GPU SIRT recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n num_iter = input_params['num_iter']\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n \n #Initialize NUFFT parameters\n nufft_params = init_nufft_params(sino,geom)\n temp_y = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n temp_x = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n x_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64) \n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n\n #allocate output array\n rec_sirt_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n\n #Pre-compute diagonal scaling matrices ; one the same size as the image and the other the same as data\n #initialize an image of all ones\n x_ones= afnp.ones((sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_x[pad_idx,pad_idx]=x_ones\n temp_proj=forward_project(temp_x,nufft_params) #*(sino['Ns']*afnp.pi/2)\n R = 1/afnp.abs(temp_proj)\n R[afnp.isnan(R)]=0\n R[afnp.isinf(R)]=0\n R=afnp.array(R,dtype=afnp.complex64)\n \n #Initialize a sinogram of all ones\n y_ones=afnp.ones((sino['Ns_orig'],num_angles),dtype=afnp.complex64)\n temp_y[pad_idx]=y_ones\n temp_backproj=back_project(temp_y,nufft_params) #*nufft_scaling/2\n C = 1/(afnp.abs(temp_backproj))\n C[afnp.isnan(C)]=0\n C[afnp.isinf(C)]=0\n C=afnp.array(C,dtype=afnp.complex64)\n \n #Move all data to GPU\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n \n #loop over all slices\n for i in range(num_slice/2):\n for iter_num in range(num_iter):\n #filtered back-projection\n temp_x[pad_idx,pad_idx]=x_recon[i]\n Ax = forward_project(temp_x,nufft_params)\n temp_y[pad_idx]=gdata[i]\n x_recon[i] = x_recon[i]+(C*back_project(R*(temp_y-Ax),nufft_params))[pad_idx,pad_idx] #nufft_scaling\n\n #Move to CPU\n #Rescale result to match tomopy\n rec_sirt=np.array(x_recon,dtype=np.complex64)\n rec_sirt_final[slice_1]=np.array(rec_sirt.real,dtype=np.float32)\n rec_sirt_final[slice_2]=np.array(rec_sirt.imag,dtype=np.float32)\n return rec_sirt_final", "def calculate_renyi(reduced_density_matrix_evals, n=2):\n result = 0.0\n if n == 1:\n\tresult = calculate_entropy(reduced_density_matrix_evals)\n else:\n result = log(sum(power(reduced_density_matrix_evals, n)))\n\tresult /= (1.0-n)\n return result", "def U(n,gamma,A,y):\n return (2*A*gamma*math.sin(math.pi*n*y/A) - 2*math.pi*n*math.cos (math.pi*n*y/A))/(A**2*gamma**2 + math.pi**2 * n**2)", "def __filter_to_bc_matrix(self, filter_mat, n):\n\n k = filter_mat.shape[1]\n\n # When an (n,n) image is convoled with a (k,k) filter, the reesulting matrix\n # has dimension (n - k + 1) assuming a stride of 1. And the total number of\n # pixels in the input image is n ** 2 (assuming the image is square)\n output_im_size, num_img_pixels = n - k + 1, n ** 2\n\n # number of rows in the output matrix is (n - k + 1) ** 2\n num_rows = output_im_size ** 2\n\n circ_mats = []\n\n # loop over the rows of the filter matrix\n for row_idx in range(k):\n\n # get the current row of the input filter matrix\n row = filter_mat[row_idx, :]\n\n # create a vector [x_1 0 0 ... 0 x_{k**2} x_{k**2-1} ... x_2] where\n # x_1, ..., x_{k**2} are the values of the flattened kernel matrix\n circ_vector = np.concatenate((\n np.array([row[0]]),\n np.zeros(n - k),\n np.flip(row[1:])\n ), axis=0)\n\n # create a circulant matrix with this vector\n row_circ = circulant(circ_vector)\n\n # remove rows that convolve the bottom row with the top row\n top_row_circ = row_circ[0:output_im_size, :]\n\n # add this matrix to the list of circulant matrices\n circ_mats.append(top_row_circ)\n\n # add n n - k matrices of zeros to the list (as long as n > k)\n for i in range(n - k):\n circ_mats.append(np.zeros_like(circ_mats[0]))\n\n # initialize V to be a ((n - k + 1) ** 2, n ** 2) matrix of zeros\n V = np.zeros((num_rows, num_img_pixels))\n\n # loop over the blocked rows of V - each block has (n - k + 1) rows\n # and there are (n - k + 1) total blocks\n for i in range(output_im_size):\n\n # concatenate all of the block matrices in circ_mats\n row = np.concatenate(tuple(circ_mats), axis=1)\n\n # broadcast this block row into V\n V[i * output_im_size:(i+1) * output_im_size, :] = row\n\n # move the last block matrix of circ_mats to the front for the next iteration\n circ_mats.insert(0, circ_mats.pop())\n\n return V", "def Char_Gate(NV,res ,B_field=400):\n\n\n #data = np.loadtxt(\"NV_Sim_8.dat\") #Placeholder data to test the script\n #NV = np.vstack((data[:,3],data[:,4]))\n #physical constants\n gamma_c = 1.071e3 #g-factor for C13 in Hz/G\n #Model parameters\n omega_larmor = 2*np.pi*gamma_c*B_field\n tau_larmor = 2*np.pi/omega_larmor\n tau = res[0]\n n_pulses = int(res[1]*2) #So that we do a pi -pulse\n\n Ix = 0.5 * np.array([[0,1],[1,0]])\n Iz = 0.5* np.array([[1,0],[0,-1]])\n H0 = (omega_larmor)*Iz\n exH0 =linalg.expm(-1j*H0*tau)\n\n\n M = np.zeros(np.shape(NV)[0])\n for idC in range(np.shape(NV)[0]):\n A= 2*np.pi*NV[idC,0]\n B= 2*np.pi*NV[idC,1] #Converts to radial frequency in Hz/G\n H1 = (A+omega_larmor) *Iz +B*Ix\n exH1 = linalg.expm(-1j*H1*tau)\n V0 = exH0.dot(exH1.dot(exH1.dot(exH0)))\n V1 = exH1.dot(exH0.dot(exH0.dot(exH1)))\n n0 = Calc_axis(V0)\n n1 =Calc_axis(V1)\n phi = np.real(2*np.arccos(np.trace(V0)/2))\n M[idC] = 1 - (1-np.dot(n0,n1))*np.sin(n_pulses * phi /2 )**2\n\n Signal = -M.prod()\n F = (1-(Signal+1)/2)\n return F", "def _sigmainf(N, h, m, dW, Km0, Pm0):\n M = m*(m-1)/2\n Im = broadcast_to(np.eye(m), (N, m, m))\n IM = broadcast_to(np.eye(M), (N, M, M))\n Ims0 = np.eye(m**2)\n factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))\n factor2 = _kp2(Im, _dot(dW, _t(dW)))\n factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))\n return 2*IM + _dot(_dot(factor1, factor2), factor3)", "def test_10_kernels(self):\n ra0, dec0 = CRVAL\n res = 0.01 * DEG\n\n # Test zenithal -- (ra0, dec0) is the reference point.\n for proj in ['TAN', 'ZEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # Test cylindrical -- pixell puts the crval[1] on the equator\n # and dec0 is used for the conformal latitude.\n for proj in ['CAR', 'CEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertNotAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # This is going to break.\n fp = FP(xi =[0., -0.01*DEG],\n eta=[0., -0.01*DEG])\n sight = get_sightline()\n tod = core.AxisManager(core.LabelAxis('dets', ['a']))\n fp = coords.get_footprint(tod, wcs_kernel=wcsk, focal_plane=fp, sight=sight)", "def fun_no_cut(self, reg_x_len, n_size, block_index, n_loop):\n data_input_ub = self.tik_instance.Tensor(self.dtype_x,\n self.shape_v,\n name=\"data_input_ub\",\n scope=tik.scope_ubuf)\n input_indices_ub = self.tik_instance.Tensor(self.dtype_indices, (8,),\n name=\"input_indices_ub\",\n scope=tik.scope_ubuf)\n self.tik_instance.data_move(input_indices_ub[0],\n self.input_indices_gm[0], 0, 1, 1, 0, 0)\n reg_start = self.tik_instance.Scalar(dtype=\"int32\")\n reg_start.set_as(input_indices_ub[0])\n reg_burst = self.tik_instance.Scalar(dtype=\"int32\")\n if self.dtype_x in (\"float32\", \"int32\"):\n reg_burst.set_as(reg_x_len // 8)\n else:\n reg_burst.set_as(reg_x_len // 16)\n\n with self.tik_instance.for_range(0, n_loop) as n_index:\n with self.tik_instance.if_scope(\n block_index * n_size + n_index != reg_start):\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_x_gm[(block_index * n_size + n_index) *\n reg_x_len], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(data_input_ub[0],\n self.input_v_gm[0], 0, 1, reg_burst,\n 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[(block_index * n_size + n_index) * reg_x_len],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)", "def gkern1(kernlen=21, nsig=3):\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1) \n kern1d = np.diff(scipy.stats.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n \n return kernel", "def E_fermi(n_e):\n return n_e / nu0 # in K", "def angular1(brdf_settings):\n # const\n scaleconst = 2*np.pi/366\n\n locals().update(brdf_settings)\n\n def scale(x, a=5, b=10, xmin=-1, xmax=1):\n \"\"\"\n rescale the sin\n a new min\n b = new max\n xmin = min of x\n xmax = max of x\n \"\"\"\n return (b - a)*(x - xmin)/(xmax - xmin) + a\n\n t = np.linspace(0, 2*np.pi, 366)\n\n\n noise = np.random.normal(0, 2*np.pi/100.0, size=366)\n\n szaMAX = 60\n szaMIN = 10\n sza_off = 0.5*np.pi # in pi\n\n sza_t = np.sin(noise + t + sza_off)\n SZA = scale(sza_t, a=szaMIN, b=szaMAX)\n\n\n # noisy it a bit?\n\n \"\"\"\n vza cycle\n \"\"\"\n vzaMAX = 45\n vzaMIN = 0\n vza_cycle = 6 # in days\n\n vza_t = np.sin(noise + t/(vza_cycle/366.0))\n VZA = scale(vza_t, a=vzaMIN, b=vzaMAX)\n\n \"\"\"\n raa cycle\n \"\"\"\n raaMAX = 360\n raaMIN = 0\n raa_cycle = 32 # in days\n\n raa_t = np.sin(t/(raa_cycle/366.0))\n RAA = scale(noise + vza_t, a=raaMAX, b=raaMIN)\n\n\n \"\"\"\n only need to return kernels really\n \"\"\"\n kerns = Kernels(VZA, SZA, RAA,\n LiType='Sparse', doIntegrals=False,\n normalise=True, RecipFlag=True, RossHS=False, MODISSPARSE=True,\n RossType='Thick',nbar=0.0)\n return kerns, VZA, SZA, RAA", "def kernel(self, cosmo, z, ell):\n z = np.atleast_1d(z)\n # Extract parameters\n pzs, m = self.params[:2]\n kernel = weak_lensing_kernel(cosmo, pzs, z, ell)\n # If IA is enabled, we add the IA kernel\n if self.config[\"ia_enabled\"]:\n bias = self.params[2]\n kernel += nla_kernel(cosmo, pzs, bias, z, ell)\n # Applies measurement systematics\n if isinstance(m, list):\n m = np.expand_dims(np.stack([mi for mi in m], axis=0), 1)\n kernel *= 1.0 + m\n return kernel", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def gpuGridrec(tomo,angles,center,input_params):\n\n print('Starting GPU NUFFT recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n sino['filter'] = input_params['fbp_filter_param'] #Paramter to control strength of FBP filter normalized to [0,1]\n\n #Initialize NUFFT parameters\n nufft_params = init_nufft_params(sino,geom)\n rec_nufft = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n Ax = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n rec_nufft_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n \n #Move all data to GPU\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n x_recon = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n #loop over all slices\n for i in range(0,num_slice/2):\n Ax[pad_idx,:]=gdata[i]\n #filtered back-projection \n rec_nufft[i] = (back_project(Ax,nufft_params))[pad_idx,pad_idx]\n\n\n #Move to CPU\n #Rescale result to match tomopy\n rec_nufft=np.array(rec_nufft,dtype=np.complex64) #*nufft_scaling\n rec_nufft_final[slice_1]=np.array(rec_nufft.real,dtype=np.float32)\n rec_nufft_final[slice_2]=np.array(rec_nufft.imag,dtype=np.float32)\n return rec_nufft_final", "def fgyro_Nash(xy, xy0, NL, theta, OmK, Omg, Mm, NP, nn):\n xgrav = Omg * (xy[:, 1] - xy0[:, 1])\n vecx = np.array([[OmK[i, j] * 0.5 * (xy[i, 1] - xy0[i, 1] - xy[NL[i, j], 1] + xy0[NL[i, j], 1] +\n np.cos(2 * theta[i, j]) * (\n -xy[i, 1] + xy0[i, 1] + xy[NL[i, j], 1] - xy0[NL[i, j], 1]) +\n np.sin(2 * theta[i, j]) * (\n xy[i, 0] - xy0[i, 0] - xy[NL[i, j], 0] + xy0[NL[i, j], 0]))\n for j in range(nn)] for i in range(NP)])\n\n ygrav = -Omg * (xy[:, 0] - xy0[:, 0])\n vecy = np.array([[-OmK[i, j] * 0.5 * (xy[i, 0] - xy0[i, 0] - xy[NL[i, j], 0] + xy0[NL[i, j], 0] +\n np.cos(2 * theta[i, j]) * (\n xy[i, 0] - xy0[i, 0] - xy[NL[i, j], 0] + xy0[NL[i, j], 0]) +\n np.sin(2 * theta[i, j]) * (\n xy[i, 1] - xy0[i, 1] - xy[NL[i, j], 1] + xy0[NL[i, j], 1]))\n for j in range(nn)] for i in range(NP)])\n\n dx = np.sum(vecx, axis=-1)\n dy = np.sum(vecy, axis=-1)\n ftx = np.hstack(((dx + xgrav).reshape(NP, 1), (dy + ygrav).reshape(NP, 1)))\n return ftx", "def decalage_haut_image(k,fichier_in,fichier_out):\n\n M = pgm_vers_matrice(fichier_in)\n C = [[0,0,0],[0,0,0],[0,1,0]]\n for __ in range(k):\n M = convolution_entiere(C,M)\n matrice_vers_pgm(M,fichier_out)\n return" ]
[ "0.7213641", "0.63983333", "0.61867654", "0.61385816", "0.61052585", "0.60678124", "0.6047397", "0.59877527", "0.598379", "0.59239495", "0.584012", "0.5723425", "0.57071584", "0.5696312", "0.55959344", "0.556116", "0.5512384", "0.5488459", "0.5458828", "0.5453336", "0.543654", "0.5372929", "0.53425217", "0.5331985", "0.53317183", "0.5323391", "0.5307182", "0.5300855", "0.52993053", "0.5298185", "0.52957135", "0.5289682", "0.5282457", "0.5277689", "0.5276658", "0.52300084", "0.52075166", "0.5196543", "0.5192771", "0.5186693", "0.5182217", "0.5179946", "0.5179946", "0.51747704", "0.51733077", "0.51719624", "0.51699185", "0.51699185", "0.516963", "0.5161396", "0.51509196", "0.5140775", "0.5138707", "0.5135967", "0.5087184", "0.508458", "0.50753486", "0.5071152", "0.5069956", "0.50606793", "0.5046429", "0.50426096", "0.5018584", "0.5002792", "0.49993837", "0.4985533", "0.4977755", "0.49736023", "0.4968082", "0.496618", "0.49619836", "0.49600446", "0.49598753", "0.49571708", "0.4948504", "0.4943493", "0.4940542", "0.4935103", "0.4927355", "0.4926263", "0.49251878", "0.4916516", "0.49137503", "0.49074373", "0.49047744", "0.49019915", "0.48972526", "0.48948836", "0.4894262", "0.4892439", "0.4887822", "0.4885747", "0.48844063", "0.48833847", "0.48833382", "0.48818752", "0.48798326", "0.4878613", "0.4875236", "0.48656762" ]
0.7255298
0
Reproducing kernel Calculate inverse FunkRadon transform and inverse spherical Laplacian of reproducing kernel for even degree subspace of spherical harmonics of maximum degree N, i.e., calculates H(\mu) = \Delta^1 G^1 K_e(\mu), where \Delta is the spherical Laplacian and G is the FunkRadon transporm. The calculation is done in spectral space.
def inv_funk_radon_even_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) p_at_zero = legp(0, N) coefs_num = 2*np.arange(0, N+1) + 1 coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1) ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den) return ker.sum() / (8.0*np.pi*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()", "def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])", "def kramers_kronig_hs(deltaE, I_EELS,\n N_ZLP=None,\n iterations=1,\n n=None,\n t=None,\n delta=0.5,\n full_output=True, prints = np.array([]), correct_S_s = False):\n output = {}\n # Constants and units\n me = 511.06\n\n e0 = 200 # keV\n beta =30 #mrad\n\n eaxis = deltaE[deltaE>0] #axis.axis.copy()\n ddeltaE = (np.max(deltaE) - np.min(deltaE))/(len(deltaE - 1))\n S_E = I_EELS[deltaE>0]\n y = I_EELS[deltaE>0]\n l = len(eaxis)\n i0 = N_ZLP\n \n # Kinetic definitions\n ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)\n\n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # We start by the \"angular corrections\"\n Im = y / (np.log(1 + (beta * tgt / eaxis) ** 2)) / ddeltaE#axis.scale\n if n is None and t is None:\n raise ValueError(\"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\")\n elif n is not None and t is not None:\n raise ValueError(\"Please provide the refractive index OR the \"\n \"thickness information, not both\")\n elif n is not None:\n # normalize using the refractive index.\n K = np.sum(Im/eaxis)*ddeltaE \n K = (K / (np.pi / 2) / (1 - 1. / n ** 2))\n te = (332.5 * K * ke / i0)\n if full_output is True:\n output['thickness'] = te\n elif t is not None:\n if N_ZLP is None:\n raise ValueError(\"The ZLP must be provided when the \"\n \"thickness is used for normalization.\")\n # normalize using the thickness\n K = t * i0 / (332.5 * ke)\n te = t\n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = next_fast_len(2*l) #2**math.floor(math.log2(l)+1)*4\n q = -2 * np.fft.fft(Im, esize).imag / esize\n\n q[:l] *= -1\n q = np.fft.fft(q)\n # Final touch, we have Re(1/eps)\n Re = q[:l].real + 1\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re ** 2 + Im ** 2)\n e2 = Im / (Re ** 2 + Im ** 2)\n\n if iterations > 0 and N_ZLP is not None:\n # Surface losses correction:\n # Calculates the surface ELF from a vaccumm border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im\n adep = (tgt / (eaxis + delta) *\n np.arctan(beta * tgt / eaxis) -\n beta / 1000. /\n (beta ** 2 + eaxis ** 2. / tgt ** 2))\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * ddeltaE #axis.scale\n if correct_S_s == True:\n print(\"correcting S_s\")\n Srfint[Srfint<0] = 0\n Srfint[Srfint>S_E] = S_E[Srfint>S_E]\n y = S_E - Srfint\n _logger.debug('Iteration number: %d / %d', io + 1, iterations)\n if iterations == io + 1 and full_output is True:\n output['S_s'] = Srfint\n del Srfint\n\n eps = (e1 + e2 * 1j)\n del y\n del I_EELS\n if 'thickness' in output:\n # As above,prevent errors if the signal is a single spectrum\n output['thickness'] = te\n if full_output is False:\n return eps\n else:\n return eps, output", "def KFilt(sample,fs=25):\n\t#kalman filter inputs\n \n # Dimensions of parameters:\n # 'transition_matrices': 2,\n # 'transition_offsets': 1,\n # 'observation_matrices': 2,\n # 'observation_offsets': 1,\n # 'transition_covariance': 2,\n # 'observation_covariance': 2,\n # 'initial_state_mean': 1,\n # 'initial_state_covariance': 2,\n \n n_timesteps = len(sample)\n trans_mat = []\n\n\t#mask missing values\n observations = np.ma.array(sample,mask=np.zeros(sample.shape))\n missing_loc = np.where(np.isnan(sample))\n observations[missing_loc[0][:],missing_loc[1][:]] = np.ma.masked\n\t\n\t#Import Kalman filter, inerpolate missing points and get 2nd, 3rd orde kinematics\n dt = 1./25\t#Length of each frame (should be iether 1/25 or 1/30)\t\n n_timesteps = len(sample)\n \n observation_matrix = np.array([[1,0,0,0],\n [0,1,0,0]])#np.eye(4) \n t = np.linspace(0,len(observations)*dt,len(observations))\n q = np.cov(observations.T[:2,:400])\n qdot = np.cov(np.diff(observations.T[:2,:400]))#np.cov(observations[:1,:400])\n\n h=(t[-1]-t[0])/t.shape[0]\n A=np.array([[1,0,h,.5*h**2], \n [0,1,0,h], \n [0,0,1,0],\n [0,0,0,1]]) \n\n init_mean = [sample[0],0,0] #initial mean should be close to the first point, esp if first point is human-picked and tracking starts at the beginning of a video\n observation_covariance = q*500 #ADJUST THIS TO CHANGE SMOOTHNESS OF FILTER\n init_cov = np.eye(4)*.001#*0.0026\n transition_matrix = A\n transition_covariance = np.array([[q[0,0],q[0,1],0,0],\n [q[1,0],q[1,1],0,0],\n [0,0,qdot[0,0],qdot[0,1]],\n [0,0,qdot[1,0],qdot[1,1]]])\n\n kf = KalmanFilter(transition_matrix, observation_matrix,transition_covariance,observation_covariance,n_dim_obs=2)\n\n kf = kf.em(observations,n_iter=1,em_vars=['transition_covariance','transition_matrix','observation_covariance'])\n\n #pdb.set_trace()\n \n global trans_mat, trans_cov, init_cond\n x_filt = kf.filter(observations[0])[0]#observations.T[0])[0]\n kf_means = kf.smooth(observations[0])[0]\n\t\n return kf_means,x_filt #np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1])),frames", "def get_func(k_center,enk,I,gamma,gamma_k):\n\n def lorentzian_k(k):\n return 1./np.pi * gamma_k / ( (k-k_center)**2 + gamma_k**2)\n\n def lorentzian(k,omega):\n return I * gamma / ( (omega-enk)**2 + gamma**2) * lorentzian_k(k)\n\n return lorentzian", "def kramers_kronig_hs(self, I_EELS,\n N_ZLP=None,\n iterations=1,\n n=None,\n t=None,\n delta=0.5, correct_S_s=False):\n output = {}\n # Constants and units\n me = 511.06\n\n\n e0 = self.e0\n beta = self.beta\n\n eaxis = self.deltaE[self.deltaE > 0] # axis.axis.copy()\n S_E = I_EELS[self.deltaE > 0]\n y = I_EELS[self.deltaE > 0]\n l = len(eaxis)\n i0 = N_ZLP\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2 #m0 v**2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me) #me c**2 / (hbar c) gamma sqrt(2Ekin /(me c**2))\n \n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # We start by the \"angular corrections\"\n Im = y / (np.log(1 + (beta * tgt / eaxis) ** 2)) / self.ddeltaE # axis.scale\n if n is None and t is None:\n raise ValueError(\"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\")\n elif n is not None and t is not None:\n raise ValueError(\"Please provide the refractive index OR the \"\n \"thickness information, not both\")\n elif n is not None:\n # normalize using the refractive index.\n K = np.sum(Im / eaxis) * self.ddeltaE\n K = K / (np.pi / 2) / (1 - 1. / n ** 2)\n te = (332.5 * K * ke / i0)\n \n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = next_fast_len(2 * l) # 2**math.floor(math.log2(l)+1)*4\n q = -2 * np.fft.fft(Im, esize).imag / esize #TODO : min twee?????\n\n q[:l] *= -1\n q = np.fft.fft(q)\n # Final touch, we have Re(1/eps)\n Re = q[:l].real + 1 #TODO: plus 1???\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re ** 2 + Im ** 2)\n e2 = Im / (Re ** 2 + Im ** 2)\n\n if iterations > 0 and N_ZLP is not None: #TODO: loop weghalen.\n # Surface losses correction:\n # Calculates the surface ELF from a vaccumm border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im\n adep = (tgt / (eaxis + delta) *\n np.arctan(beta * tgt / eaxis) -\n beta / 1000. /\n (beta ** 2 + eaxis ** 2. / tgt ** 2))\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * self.ddeltaE # axis.scale\n if correct_S_s == True:\n print(\"correcting S_s\")\n Srfint[Srfint < 0] = 0\n Srfint[Srfint > S_E] = S_E[Srfint > S_E]\n y = S_E - Srfint\n _logger.debug('Iteration number: %d / %d', io + 1, iterations)\n\n eps = (e1 + e2 * 1j)\n del y\n del I_EELS\n if 'thickness' in output:\n # As above,prevent errors if the signal is a single spectrum\n output['thickness'] = te\n\n return eps, te, Srfint", "def weiner_tf(H, K):\r\n\r\n W = (1 / H) * ((np.conjugate(H) * H) / ((np.conjugate(H) * H) + K))\r\n return W", "def folded_voigt_kernel_logst(k,log_nstbeta,log_ngammaL,dLarray):\n\n beta=jnp.exp(log_nstbeta)\n gammaL=jnp.exp(log_ngammaL)\n def ffold(val,dL):\n val=val+jnp.exp(-2.0*((jnp.pi*beta*(k[:,None]+dL))**2 \\\n + jnp.pi*gammaL[None,:]*(k[:,None]+dL)))\n val=val+jnp.exp(-2.0*((jnp.pi*beta*(k[:,None]-dL))**2 \\\n + jnp.pi*gammaL[None,:]*(dL-k[:,None])))\n null=0.0\n return val, null\n val=jnp.exp(-2.0*((jnp.pi*beta*k[:,None])**2 + jnp.pi*gammaL[None,:]*k[:,None]))\n \n val,nullstack=scan(ffold, val, dLarray)\n \n return val", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def rasm_mode(self, K, MAX_ITER=40):\r\n #old_Ki_f = np.zeros((self.N, 1))\r\n\r\n #Start f's at zero originally of if we have gone off track, try restarting\r\n if self.old_Ki_f is None or self.bad_fhat:\r\n old_Ki_f = np.random.rand(self.N, 1)/50.0\r\n #old_Ki_f = self.Y\r\n f = np.dot(K, old_Ki_f)\r\n else:\r\n #Start at the old best point\r\n old_Ki_f = self.old_Ki_f.copy()\r\n f = self.f_hat.copy()\r\n\r\n new_obj = -np.inf\r\n old_obj = np.inf\r\n\r\n def obj(Ki_f, f):\r\n return -0.5*np.dot(Ki_f.T, f) + self.noise_model.logpdf(f, self.data, extra_data=self.extra_data)\r\n\r\n difference = np.inf\r\n epsilon = 1e-7\r\n #step_size = 1\r\n #rs = 0\r\n i = 0\r\n\r\n while difference > epsilon and i < MAX_ITER:\r\n W = -self.noise_model.d2logpdf_df2(f, self.data, extra_data=self.extra_data)\r\n\r\n W_f = W*f\r\n grad = self.noise_model.dlogpdf_df(f, self.data, extra_data=self.extra_data)\r\n\r\n b = W_f + grad\r\n W12BiW12Kb, _ = self._compute_B_statistics(K, W.copy(), np.dot(K, b))\r\n\r\n #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet\r\n full_step_Ki_f = b - W12BiW12Kb\r\n dKi_f = full_step_Ki_f - old_Ki_f\r\n\r\n f_old = f.copy()\r\n def inner_obj(step_size, old_Ki_f, dKi_f, K):\r\n Ki_f = old_Ki_f + step_size*dKi_f\r\n f = np.dot(K, Ki_f)\r\n # This is nasty, need to set something within an optimization though\r\n self.tmp_Ki_f = Ki_f.copy()\r\n self.tmp_f = f.copy()\r\n return -obj(Ki_f, f)\r\n\r\n i_o = partial_func(inner_obj, old_Ki_f=old_Ki_f, dKi_f=dKi_f, K=K)\r\n #Find the stepsize that minimizes the objective function using a brent line search\r\n #The tolerance and maxiter matter for speed! Seems to be best to keep them low and make more full\r\n #steps than get this exact then make a step, if B was bigger it might be the other way around though\r\n #new_obj = sp.optimize.minimize_scalar(i_o, method='brent', tol=1e-4, options={'maxiter':5}).fun\r\n new_obj = sp.optimize.brent(i_o, tol=1e-4, maxiter=10)\r\n f = self.tmp_f.copy()\r\n Ki_f = self.tmp_Ki_f.copy()\r\n\r\n #Optimize without linesearch\r\n #f_old = f.copy()\r\n #update_passed = False\r\n #while not update_passed:\r\n #Ki_f = old_Ki_f + step_size*dKi_f\r\n #f = np.dot(K, Ki_f)\r\n\r\n #old_obj = new_obj\r\n #new_obj = obj(Ki_f, f)\r\n #difference = new_obj - old_obj\r\n ##print \"difference: \",difference\r\n #if difference < 0:\r\n ##print \"Objective function rose\", np.float(difference)\r\n ##If the objective function isn't rising, restart optimization\r\n #step_size *= 0.8\r\n ##print \"Reducing step-size to {ss:.3} and restarting optimization\".format(ss=step_size)\r\n ##objective function isn't increasing, try reducing step size\r\n #f = f_old.copy() #it's actually faster not to go back to old location and just zigzag across the mode\r\n #old_obj = new_obj\r\n #rs += 1\r\n #else:\r\n #update_passed = True\r\n\r\n #old_Ki_f = self.Ki_f.copy()\r\n\r\n #difference = abs(new_obj - old_obj)\r\n #old_obj = new_obj.copy()\r\n difference = np.abs(np.sum(f - f_old)) + np.abs(np.sum(Ki_f - old_Ki_f))\r\n #difference = np.abs(np.sum(Ki_f - old_Ki_f))/np.float(self.N)\r\n old_Ki_f = Ki_f.copy()\r\n i += 1\r\n\r\n self.old_Ki_f = old_Ki_f.copy()\r\n\r\n #Warn of bad fits\r\n if difference > epsilon:\r\n self.bad_fhat = True\r\n warnings.warn(\"Not perfect f_hat fit difference: {}\".format(difference))\r\n elif self.bad_fhat:\r\n self.bad_fhat = False\r\n warnings.warn(\"f_hat now perfect again\")\r\n\r\n self.Ki_f = Ki_f\r\n return f", "def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int", "def k_h(self):\n # Convert `self.gamma` to a regular length scale.\n gamma_scale = B.sqrt(1 / (2 * self.gamma))\n k_h = EQ().stretch(gamma_scale) # Kernel of filter before window\n k_h *= lambda t: B.exp(-self.alpha * t**2) # Window\n if self.causal:\n k_h *= lambda t: B.cast(self.dtype, t >= 0) # Causality constraint\n return k_h", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def GenLayerSN(ngf, k):\n d_in = 2**k \n d_out = 2**(k-1)\n out = nn.Sequential( nn.utils.spectral_norm(\n nn.ConvTranspose2d(ngf * d_in, ngf * d_out, kernel_size, stride, padding, bias=False)),\n nn.BatchNorm2d(ngf * d_out),\n nn.ReLU(True) )\n return out", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def EisensteinHu_nowiggle_Pk(self,\n z = 0.,\n k = np.logspace(-4., 2., 1001),\n sigma_8 = 0.83):\n\n om_m = self.omega_cdm+self.omega_b\n om_b = self.omega_b\n ns = self.ns\n h = self.h\n theta = self.T_cmb/2.7\n \n #if self.w0 != -1. or self.wa != 0.:\n # warnings.warn(\"nw_Pk is not able to reproduce non-static dark energy with w0 != -1. The dark enerdy parameters will be set to w0 = -1, wa = 0\")\n if self.Omega_K != 0.:\n #warnings.warn(\"EisensteinHu_Pk is not able to reproduce non-flat FRW metric! The Omega_K parameter will be transferred to Omega_lambda such that Omega_lambda -> (Omega_lambda + Omega_K)\")\n om_m -= self.Omega_K\n\n kEH = k*h\n s = 44.5*np.log(9.83/om_m)/np.sqrt(1+10*(om_b)**0.75)\n Gamma = om_m/h\n AG = 1 - 0.328*np.log(431*om_m)*om_b/om_m + 0.38*np.log(22.3*om_m)*(om_b/om_m)**2\n Gamma = Gamma*(AG+(1-AG)/(1+(0.43*kEH*s)**4))\n q = kEH * theta**2/Gamma/h\n L0 = np.log(2*np.e + 1.8*q)\n C0 = 14.2 + 731/(1 + 62.5*q)\n T0 = L0/(L0 + C0*q**2)\n PEH = (kEH*h)**ns*T0**2\n\n norm = sigma_8/self.compute_sigma_8(k = k, pk = PEH)\n Pk = np.expand_dims(PEH,0)*np.expand_dims(norm**2.*self.growth_factor_scale_independent(z)**2.,1)\n\n return k, Pk", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def tune_ghosal(fimg,K_s=5,k_m=None,N_k=5000,l_max=0.5,phi_min=1.45,outlier_sigma=2,blur=10):\n\t# open\n\timg = read_image(fimg)\n\tcamera = fimg[-10]\n\t\n\t# ensure N_k is even\n\tif N_k%2!=0:\n\t\tN_k+=1\n\t\tprint(\"N_k=%0.5f\"%N_k)\n\t\n\t# blur image\n\timg = blurd_image(img,order=1,strength=blur,speed='fast')\n\t\n\t# calculate k,l and phi for the whole image\n\t_,_,k,l,phi = ghosal_edge_v2(img,Ks=K_s,debug=True)\n\t\n\t# find mean k\n\tif k_m == None:\n\t\tij_m = select_points(img,n=1,prompt=\"choose a characteristic edge location\")\n\t\tij_m = np.array(ij_m[0],dtype=int)\n\t\tk_m = k[ij_m[1],ij_m[0]]\n\t\tprint(\"k_m=\",k_m)\n\t\n\t# find k limits\n\tk_sort = np.sort(k.flatten())\n\tk_sort[np.isnan(k_sort)]=0 # get rid of nans for argmin\n\t#print(np.shape(k_sort))\n\t#i_km = np.where(k_sort==k_m)[0][0]\n\ti_km = (np.abs(k_sort - k_m)).argmin()\n\ti_kmin = int(i_km-N_k/2)\n\ti_kmax = int(i_km+N_k/2)\n\tk_min = k_sort[i_kmin]\n\tk_max = k_sort[i_kmax]\n\tedg,org = ghosal_edge_v2(img,K_s,kmax=k_max,kmin=k_min,lmax=l_max,phimin=phi_min)\n\t\n\t# outlier removal\n\tpts,sig = line_fit(edg[:,1],edg[:,0])\n\tptsy = np.mean(pts[:,1])\n\taccepted = ((edg[:,0]<(ptsy+outlier_sigma*sig)) & (edg[:,0]>(ptsy-outlier_sigma*sig)))\n\tedga = edg[accepted,:]\n\t\n\t# plotting\n\tvectors = edg-org # show the vectors also\n\tplt.imshow(img)\n\tplt.quiver(org[:,1],org[:,0],vectors[:,1],vectors[:,0],angles='xy',\n\t\tscale_units='xy', scale=1, color = 'orange')\n\tplt.scatter(edga[:,1],edga[:,0],c='blue',marker='.')\n\tplt.title(\"Validated Points\")\n\tplt.show()\n\t\n\t# save\n\tprint(\"Paramters:\\n Camera %s:\tK_s=%0.5f\tk_min=%0.5f\tk_max=%0.5f\tl_max=%0.5f\tphi_min=%0.5f outlier_sigma=%0.5f\tblur=%0.5f\"\n\t\t%(camera,K_s,k_min,k_max,l_max,phi_min,outlier_sigma, blur))\n\tsave = input(\"save current parameters?(y/n)\t\")\n\tif save==\"y\":\n\t\tsavename = fname_dir(fimg)\n\t\tsavename = savename+os.sep+camera+\"_ghosal_edge_parameters.txt\"\n\t\tsavematrix = np.squeeze([K_s,k_min, k_max, l_max, phi_min, outlier_sigma, blur])\n\t\tnp.savetxt(savename,savematrix,delimiter=\"\t\")\n\t\tprint(\"saved\\n\",savematrix)", "def kernel_kmer(X, Y, k=3):\n x_kmer, y_kmer = kmer(X, Y, k)\n\n sim = 0\n for a in x_kmer:\n for b in y_kmer:\n sim += GXY(a, b)\n\n return sim", "def create_low_pass_frequency_kernel(im, radius):\n kernel = create_high_pass_frequency_kernel(im, radius)\n kernel = 1 - kernel\n return kernel", "def create_low_pass_frequency_kernel(im, radius):\n kernel = create_high_pass_frequency_kernel(im, radius)\n kernel = 1 - kernel\n return kernel", "def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def _kernel(r: float, h: float) -> float:\n sigma_2 = 10 / (7 * np.pi * h * h)\n q = abs(r / h)\n\n if q <= 1.0:\n q2 = q * q\n W = 1.0 - 1.5 * q2 * (1.0 - 0.5 * q)\n W *= sigma_2\n elif q <= 2.0:\n two_minus_q = 2 - q\n two_minus_q_c = np.power(two_minus_q, 3)\n W = 0.25 * two_minus_q_c\n W *= sigma_2\n else:\n W = 0\n\n return W", "def frame3dlin_KeMe(E,G,Kv1,Kv2,A1,A2,Iy1,Iy2,Iz1,Iz2,L,me1,me2,R=None):\n # --- Stifness matrix\n ke = np.array([\n [((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+3*Iy1)*E)/L , 0 , 0 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0] , \n [0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+3*Iz1)*E)/L , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L] , \n [-((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0 , 0 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((3*Iy2+Iy1)*E)/L , 0] , \n [0 , ((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L , 0 , -((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((3*Iz2+Iz1)*E)/L]\n ])\n # --- Mass matrix\n me = np.array([\n [(me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (3*me2+10*me1)/35 , 0 , 0 , 0 , (7*L*me2+15*L*me1)/420 , 0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , -(6*L*me2+7*L*me1)/420] , \n [0 , 0 , (3*me2+10*me1)/35 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (9*me2+9*me1)/140 , 0 , (6*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0] , \n [0 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , 0 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0] , \n [0 , (7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , (7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280] , \n [(me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , (7*L*me2+6*L*me1)/420 , 0 , (10*me2+3*me1)/35 , 0 , 0 , 0 , -(15*L*me2+7*L*me1)/420] , \n [0 , 0 , (9*me2+9*me1)/140 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , (10*me2+3*me1)/35 , 0 , (15*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0] , \n [0 , 0 , (6*L*me2+7*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , 0 , 0 , (15*L*me2+7*L*me1)/420 , 0 , (5*L**2*me2+3*L**2*me1)/840 , 0] , \n [0 , -(6*L*me2+7*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , -(15*L*me2+7*L*me1)/420 , 0 , 0 , 0 , (5*L**2*me2+3*L**2*me1)/840]\n ])\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n me = np.transpose(RR).dot(me.dot(RR))\n ke = np.transpose(RR).dot(ke.dot(RR))\n\n return ke, me", "def gauss_ker(k, sig):\n\tx = np.linspace(-(k//2), (k//2), k)\n\tgx, gy = np.meshgrid(x, x)\n\tkernel = np.exp(-1*(gx**2 + gy**2)/(2*(sig**2)))\n\treturn kernel", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def kernal_mus(n_kernels):\n l_mu = [1]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n print(l_mu)\n return l_mu", "def Heston_fft(self,alpha,n,B,K):\r\n bt = time.time()\r\n r = self.r\r\n T = self.T\r\n S0 = self.S0\r\n N = 2**n\r\n Eta = B / N\r\n Lambda_Eta = 2 * math.pi / N\r\n Lambda = Lambda_Eta / Eta\r\n \r\n J = np.arange(1,N+1,dtype = complex)\r\n vj = (J-1) * Eta\r\n m = np.arange(1,N+1,dtype = complex)\r\n Beta = np.log(S0) - Lambda * N / 2\r\n km = Beta + (m-1) * Lambda\r\n \r\n ii = complex(0,1)\r\n \r\n Psi_vj = np.zeros(len(J),dtype = complex)\r\n \r\n for zz in range(0,N):\r\n u = vj[zz] - (alpha + 1) * ii\r\n numer = self.Heston_cf(u)\r\n denom = (alpha + vj[zz] * ii) * (alpha + 1 + vj[zz] * ii)\r\n \r\n Psi_vj [zz] = numer / denom\r\n \r\n # Compute FTT\r\n xx = (Eta/2) * Psi_vj * np.exp(-ii * Beta * vj) * (2 - self.dirac(J-1))\r\n zz = np.fft.fft(xx)\r\n \r\n # Option price\r\n Mul = np.exp(-alpha * np.array(km)) / np.pi\r\n zz2 = Mul * np.array(zz).real\r\n k_List = list(Beta + (np.cumsum(np.ones((N, 1))) - 1) * Lambda)\r\n Kt = np.exp(np.array(k_List))\r\n \r\n Kz = []\r\n Z = []\r\n for i in range(len(Kt)):\r\n if( Kt[i]>1e-16 )&(Kt[i] < 1e16)& ( Kt[i] != float(\"inf\"))&( Kt[i] != float(\"-inf\")) &( zz2[i] != float(\"inf\"))&(zz2[i] != float(\"-inf\")) & (zz2[i] is not float(\"nan\")):\r\n Kz += [Kt[i]]\r\n Z += [zz2[i]]\r\n tck = interpolate.splrep(Kz , np.real(Z))\r\n price = np.exp(-r*T)*interpolate.splev(K, tck).real\r\n et = time.time()\r\n \r\n runt = et-bt\r\n\r\n return(price,runt)", "def K(p, E):\n R_loss, E_loss = p\n K_ = (8.0/pi**2)*R_loss*E_loss**2 * E / ((2.0*E_loss/pi)**2 + E**2)**2\n \"\"\"convolution kernel for Tougaard background\"\"\"\n #B, C = p\n #K_ = B * E / (C + E**2)**2\n K_ = K_*(K_>0)\n return K_", "def DisLayerSN(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out", "def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05", "def gkern1(kernlen=21, nsig=3):\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1) \n kern1d = np.diff(scipy.stats.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n \n return kernel", "def jackknife_errors_CLF(pos,Phi,Ndivs,Lbox,M,L_bins,dL,Mhost_min,Mhost_max,Mhost):\n\n n_subBox = Ndivs*Ndivs*Ndivs # The number of sub volumes for the Jackknife resampling\n V_subBox = Vbox - Vbox/n_subBox # The volume of a Jackknife sample\n N = len(pos) \n delta = Lbox/Ndivs\n \n # Indices for the galaxies positions\n index = np.asarray([floor(pos[i,0]/delta) + (floor(pos[i,1]/delta)*Ndivs) + (floor(pos[i,2]/delta)*Ndivs*Ndivs) + 1 for i in range(N)]) # index for the position of particle2\n M_sub_sample = [] # keeps the absolute magnitude for the sub-samples\n Mhost_sub_sample = [] # keeps the halo mass for the sub-samples\n CLF_all = [] # keeps the values of the CLF for the full sample and for each of the sub-samples\n CLF_all.append(Phi)\n for k in range(1,n_subBox+1): # run over the sub-samples\n for i in range(0,N): # runs over all the points (galaxies)\n if (index[i] != k): # the point is inside the sub-box\n M_sub_sample.append(M[i]) # then add to sub-box list\n Mhost_sub_sample.append(Mhost[i])\n CLF_sub,L_bins = CLF(M_sub_sample,L_bins,dL,Mhost_min,Mhost_max,Mhost_sub_sample)\n CLF_all.append(CLF_sub)\n M_sub_sample = []\n Mhost_sub_sample = []\n\n\tn_subBox = float(n_subBox)\n full = np.asarray(CLF_all[0]) # the CLF for the full sample\n sub_samples = np.asarray(CLF_all[1:]) # the CLF for the Jackknife sub-samples\n after_subtraction = sub_samples - np.mean(sub_samples,axis=0)\n squared = after_subtraction**2\n error2 = ((n_subBox-1)/n_subBox)*squared.sum(axis=0)\n errors = error2**0.5\n return errors", "def I1(k, horn_width, hplane_effective_length, theta, phi):\n # Calculate the x-component of the wavenumber primed\n kx_p = k * sin(theta) * cos(phi) + pi / horn_width\n kx_m = k * sin(theta) * cos(phi) - pi / horn_width\n\n # Calculate the arguments of the Fresnel integrals\n t1_p = sqrt(1.0 / (pi * k * hplane_effective_length)) * (-k * horn_width / 2.0 - kx_p * hplane_effective_length)\n t2_p = sqrt(1.0 / (pi * k * hplane_effective_length)) * ( k * horn_width / 2.0 - kx_p * hplane_effective_length)\n\n t1_m = sqrt(1.0 / (pi * k * hplane_effective_length)) * (-k * horn_width / 2.0 - kx_m * hplane_effective_length)\n t2_m = sqrt(1.0 / (pi * k * hplane_effective_length)) * ( k * horn_width / 2.0 - kx_m * hplane_effective_length)\n\n # Calculate the Fresnel integrals\n s1p, c1p = fresnel(t1_p)\n s2p, c2p = fresnel(t2_p)\n\n s1m, c1m = fresnel(t1_m)\n s2m, c2m = fresnel(t2_m)\n\n # Build the terms from the Fresnel integrals\n fresnel_term1 = (c2p - c1p) + 1j * (s1p - s2p)\n fresnel_term2 = (c2m - c1m) + 1j * (s1m - s2m)\n\n # Calculate the phase terms\n phase_term1 = exp(1j * kx_p ** 2 * hplane_effective_length / (2.0 * k))\n phase_term2 = exp(1j * kx_m ** 2 * hplane_effective_length / (2.0 * k))\n\n return 0.5 * sqrt(pi * hplane_effective_length / k) * (phase_term1 * fresnel_term1 + phase_term2 * fresnel_term2)", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def get_kernel(X1, X2, charges1, charges2, sigma=1, mode=\"local\"):\n\n if len(X1.shape) > 2:\n\n K = get_atomic_local_kernel(X1, X2, charges1, charges2, sigma)\n\n else:\n\n K = laplacian_kernel(X2, X1, sigma)\n\n return K", "def write_kernel(w, k):\n w.writeln(\"void {k}(const Image<int>& in, Image<int>& out\".format(k=k.name))\n # write the tap signal in the function argument list\n for tapName in k.rtapNames:\n #tapType = k.edges[tapName].dtype\n #tapCType = dtypeMap[tapType]\n tapCType = getCType(k.edges[tapName])\n for indices in expand_range(k.edges[tapName].dim):\n w.writeln(\"\\t, {type} {sig}\".format(type=tapCType, sig=mangle((tapName, indices))))\n w.writeln(\")\")\n w.writeln(\"{\")\n w.indent()\n # TODO: insert size error checking into C code here\n\n w.writeln(\"for(int y = 0; y < in.height(); y++){\")\n w.indent()\n w.writeln(\"for(int x = 0; x < in.width(); x++){\")\n w.indent()\n\n \n # Grab the register declaration for the partial-pixel output and blow it into\n # the complete list of input registers\n startName = k.ppoutName\n #startType = k.edges[startName].dtype\n #startCType = dtypeMap[startType]\n startCType = getCType(k.edges[startName])\n for indices in expand_range(k.edges[startName].dim):\n # HACK: work with multi-channel or single-channel images\n z_idx = 0\n if len(indices) == 3:\n z_idx = indices[2]\n\n w.writeln(\"{type} {reg} = in(x+{xoff}, y+{yoff}, {z});\".format(\n type=startCType,\n reg=mangle((startName, indices)),\n xoff=(indices[0]-k.centroid[0]), \n yoff=(indices[1]-k.centroid[1]), z=z_idx))\n \n # Set up the constants\n for const in k.constants:\n # TODO: be careful here, because we need to be consistent with naming/indexing\n # TODO: handle int/float; infer datatype in parser\n w.writeln(\"const float {reg} = {val};\".format(reg=mangle((const[0], [0])), val=const[1]))\n \n w.writeln(\"\")\n\n\n #Special Register Examples for Reduce:\n #fix_17_0 pixel_out_pos[1:0] # Location of Reduce pixel in output image\n #fix_17_0 centroid_pos[1:0] # Location of Centroid in input image\n if \"centroid_pos\" in k.specialRegs:\n w.writeln(\"int centroid_pos_0 = x;\")\n w.writeln(\"int centroid_pos_1 = y;\")\n\n if \"pixel_out_pos\" in k.specialRegs:\n w.writeln(\"int pixel_out_pos_0 = x;\")\n w.writeln(\"int pixel_out_pos_1 = y;\")\n \n # Create a list of (name, index) tuples representing the valid (i.e., evaluated) signal\n validRegs = [(startName, i) for i in expand_range(k.edges[startName].dim)]\n validRegs += [(tapName, i) for tapName in k.rtapNames \n for i in expand_range(k.edges[tapName].dim)]\n validRegs += [(regName, i) for regName in k.specialRegs \n for i in expand_range(k.edges[regName].dim)]\n validRegs += [(c[0], [0]) for c in k.constants]\n \n # Make a copy of the list of operations which we can remove stuff from\n unprocessed = dict(k.ops)\n \n # Process all the operations\n while len(unprocessed) > 0:\n progress = False\n for opKey in unprocessed:\n op = k.ops[opKey]\n # Find an operation that can be evaluated\n if opOk(op, validRegs):\n #dtype = k.edges[op.result[0]].dtype\n #dtype = dtypeMap[dtype] # Look up the C-equivalent for this type\n dtype = getCType(k.edges[op.result[0]])\n # TODO: include integer/fraction width\n \n # TODO: error checking that we have the right number of operands - this should be done in the parser, actually\n # Evaluate it\n if op.name in ['max', 'min']:\n write_complex_op(w, op, dtype)\n elif op.name == \"sum\": \n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"mv\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"add\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"sub\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' - ', mangle(op.operands))))\n elif op.name == \"mult\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' * ', mangle(op.operands))))\n elif op.name == \"div\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' / ', mangle(op.operands))))\n\n elif op.name == \"lshift\":\n w.writeln(\"{dtype} {dst} = {op1} << {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"rshift\":\n w.writeln(\"{dtype} {dst} = {op1} >> {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"and\":\n w.writeln(\"{dtype} {dst} = {op1} & {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"or\":\n w.writeln(\"{dtype} {dst} = {op1} | {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"ne\":\n w.writeln(\"{dtype} {dst} = {op1} != {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"eq\":\n w.writeln(\"{dtype} {dst} = {op1} == {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lt\":\n w.writeln(\"{dtype} {dst} = {op1} < {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lte\":\n w.writeln(\"{dtype} {dst} = {op1} <= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gt\":\n w.writeln(\"{dtype} {dst} = {op1} > {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gte\":\n w.writeln(\"{dtype} {dst} = {op1} >= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"not\":\n w.writeln(\"{dtype} {dst} = !{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"abs\":\n w.writeln(\"{dtype} {dst} = ({src} >= 0) ? {src} : (-{src});\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"inv\":\n w.writeln(\"{dtype} {dst} = -{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n\n elif op.name == \"mux\":\n w.writeln(\"{dtype} {dst} = {cond} ? {op1} : {op2};\".format(dtype=dtype, dst=mangle(op.result), \\\n cond=mangle(op.operands[0]), op1=mangle(op.operands[1]), op2=mangle(op.operands[2])))\n else:\n print \"Unhandled operator \" + opKey\n \n validRegs.append(op.result)\n # Remove it from the list\n unprocessed.pop(opKey)\n progress = True\n break # We changed the list, so we gotta start over\n \n # If we went through the whole list without finding any ops to evaluate,\n # something is wrong and we need to give up.\n if progress is False:\n print \"Failed to evaluate some ops!\"\n for opKey in unprocessed:\n print \"\\t %s %s\" % (unprocessed[opKey].name, unprocessed[opKey].result)\n break\n \n for indices in expand_range(k.edges[k.sink].dim):\n #writeln('printf(\"result: %f\\\\n\", {reg});'.format(reg=mangle((k.sink, indices))))\n # TODO: make this handle depths other than 3\n w.writeln('out(x,y,{z}) = {reg};'.format(z=indices[0], reg=mangle((k.sink, indices))))\n\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"} // END %s\" % k.name)\n w.writeln(\"\\n\")", "def create_filter_bank():\r\n kernels = []\r\n for theta in range(0, 2):\r\n theta = theta / 2. * np.pi\r\n for sigma in (3, 5):\r\n for frequency in (0.10, 0.25):\r\n kernel = np.real(gabor_kernel(frequency, theta=theta,\r\n sigma_x=sigma, sigma_y=sigma))\r\n kernels.append(kernel)\r\n print(len(kernels))\r\n return kernels", "def _calc_kernel(self,\n freq_1: float,\n time_1: float,\n freq_2: float,\n time_2: float,\n dagg: tuple\n ) -> Tuple[ndarray, ndarray]:\n dt = self._process_tensor.dt\n #pieces of kernel consist of some combination of phases and\n #Bose-Einstein factors\n n_1, n_2 = 0, 0\n if self._temp > 0:\n n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))\n n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))\n\n ker_dim = int(np.round(time_2 / dt))\n # calculate index corresponding to t_1\n switch = int(np.round(time_1 / dt))\n re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n\n tpp_index, tp_index = np.meshgrid(\n np.arange(ker_dim), np.arange(ker_dim),\n indexing='ij') #array of indices for each array element\n regions = {\n 'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)\n 'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)\n 'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)\n\n def phase(region, swap_ts = False):\n tk = tp_index[regions[region]]\n tkp = tpp_index[regions[region]]\n if tk.size == 0 or tkp.size == 0:\n return 0\n a = -1j * ((2*dagg[0] - 1)) * freq_2\n b = -1j * ((2*dagg[1] - 1)) * freq_1\n if swap_ts:\n a, b = b, a\n if region in ('a','c'):\n ph = np.triu(\n np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph += np.triu(\n np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)\n sel = np.diag(tk)\n di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)\n if a + b != 0:\n di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))\n di += np.exp((a + b) * sel * dt) / (a * (a+b))\n else:\n di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)\n ph += np.diag(di)\n else:\n ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)\n ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)\n ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)\n ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)\n return ph\n\n\n if dagg == (0, 1):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')\n\n elif dagg == (1, 0):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * n_1 * phase('c')\n\n elif dagg == (1, 1):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')\n\n elif dagg == (0, 0):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +\n (2*n_1 + 1) * phase('a'))\n\n im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * n_1 * phase('c')\n\n re_kernel = np.triu(re_kernel) #only keep triangular region\n im_kernel = np.triu(im_kernel)\n return re_kernel, im_kernel", "def tuto_kernel_overview(optimize=True, plot=True):\r\n ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)\r\n ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)\r\n ker3 = GPy.kern.rbf(1, .5, .5)\r\n\r\n print ker2\r\n\r\n if plot:\r\n ker1.plot()\r\n ker2.plot()\r\n ker3.plot()\r\n\r\n k1 = GPy.kern.rbf(1,1.,2.)\r\n k2 = GPy.kern.Matern32(1, 0.5, 0.2)\r\n\r\n # Product of kernels\r\n k_prod = k1.prod(k2) # By default, tensor=False\r\n k_prodtens = k1.prod(k2,tensor=True)\r\n\r\n # Sum of kernels\r\n k_add = k1.add(k2) # By default, tensor=False\r\n k_addtens = k1.add(k2,tensor=True)\r\n\r\n k1 = GPy.kern.rbf(1,1.,2)\r\n k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5)\r\n\r\n k = k1 * k2 # equivalent to k = k1.prod(k2)\r\n print k\r\n\r\n # Simulate sample paths\r\n X = np.linspace(-5,5,501)[:,None]\r\n Y = np.random.multivariate_normal(np.zeros(501),k.K(X),1)\r\n\r\n k1 = GPy.kern.rbf(1)\r\n k2 = GPy.kern.Matern32(1)\r\n k3 = GPy.kern.white(1)\r\n\r\n k = k1 + k2 + k3\r\n print k\r\n\r\n k.constrain_positive('.*var')\r\n k.constrain_fixed(np.array([1]),1.75)\r\n k.tie_params('.*len')\r\n k.unconstrain('white')\r\n k.constrain_bounded('white',lower=1e-5,upper=.5)\r\n print k\r\n\r\n k_cst = GPy.kern.bias(1,variance=1.)\r\n k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3)\r\n Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True)\r\n print Kanova\r\n\r\n # sample inputs and outputs\r\n X = np.random.uniform(-3.,3.,(40,2))\r\n Y = 0.5*X[:,:1] + 0.5*X[:,1:] + 2*np.sin(X[:,:1]) * np.sin(X[:,1:])\r\n\r\n # Create GP regression model\r\n m = GPy.models.GPRegression(X, Y, Kanova)\r\n\r\n if plot:\r\n fig = pb.figure(figsize=(5,5))\r\n ax = fig.add_subplot(111)\r\n m.plot(ax=ax)\r\n\r\n pb.figure(figsize=(20,3))\r\n pb.subplots_adjust(wspace=0.5)\r\n axs = pb.subplot(1,5,1)\r\n m.plot(ax=axs)\r\n pb.subplot(1,5,2)\r\n pb.ylabel(\"= \",rotation='horizontal',fontsize='30')\r\n axs = pb.subplot(1,5,3)\r\n m.plot(ax=axs, which_parts=[False,True,False,False])\r\n pb.ylabel(\"cst +\",rotation='horizontal',fontsize='30')\r\n axs = pb.subplot(1,5,4)\r\n m.plot(ax=axs, which_parts=[False,False,True,False])\r\n pb.ylabel(\"+ \",rotation='horizontal',fontsize='30')\r\n axs = pb.subplot(1,5,5)\r\n pb.ylabel(\"+ \",rotation='horizontal',fontsize='30')\r\n m.plot(ax=axs, which_parts=[False,False,False,True])\r\n\r\n return(m)", "def saff_kuijlaars(N):\n k = np.arange(N)\n h = -1 + 2.0 * k / (N - 1)\n theta = np.arccos(h)\n phi = np.zeros_like(h)\n for i in range(1, N - 1):\n phi[i] = (phi[i - 1] + 3.6 / np.sqrt(N * (1 - h[i]**2))) % (2.0 * np.pi)\n\n return sph2car(np.ones_like(theta), theta, phi)", "def make_mol_kernel(drugs):\n\n dict_drug = drugs.dict_drug\n dict_ind2mol = drugs.dict_ind2mol\n\n # get the ECFP fingerprints\n nb_mol = drugs.nb\n X_fingerprint = np.zeros((nb_mol, 1024), dtype=np.int32)\n list_fingerprint = []\n # for i in list(dict_ind2mol.keys()):\n for i in range(nb_mol):\n dbid = dict_ind2mol[i]\n m = Chem.MolFromSmiles(dict_drug[dbid])\n list_fingerprint.append(AllChem.GetMorganFingerprint(m, 2))\n arr = np.zeros((1,))\n DataStructs.ConvertToNumpyArray(\n AllChem.GetMorganFingerprintAsBitVect(m, \n 2, \n nBits=1024), \n arr)\n X_fingerprint[i, :] = arr\n\n # get the Tanimoto Similarity Matrix\n K = np.zeros((len(list_fingerprint), len(list_fingerprint)))\n for i in range(len(list_fingerprint)):\n for j in range(i, len(list_fingerprint)):\n K[i, j] = DataStructs.TanimotoSimilarity(list_fingerprint[i], \n list_fingerprint[j])\n K[j, i] = K[i, j]\n\n return X_fingerprint, K", "def my_kernel(X, Y):\n S = 0.84 # parameter from rhos\n\n if dset == 1:\n gamma = 0.0005\n else:\n gamma = 0.00087 # maximise variance of kernel matrix\n if np.array_equal(X, Y):\n N = X.shape[0]\n M = (1 - S) * np.ones((N, N)) + S * np.eye(N)\n else:\n M = 1\n\n pairwise_sq_dists = cdist(X, Y, 'sqeuclidean')\n K = exp(-gamma * pairwise_sq_dists) * M\n return K", "def EisensteinHu_Pk(self,\n z = 0.,\n k = np.logspace(-4., 2., 1001),\n sigma_8 = 0.83):\n\n om_m = self.Omega_m\n om_b = self.Omega_b\n n_tld = self.ns - 1.\n h = self.h\n theta = self.T_cmb/2.7\n \n if np.sum(self.M_nu) != 0.:\n warnings.warn(\"EisensteinHu_Pk is not able to reproduce massive neutrinos as it uses the Eisenstein & Hu approximation (1998) for the linear power spectrum. The Omega_nu parameter will be transferred to Omega_lambda such that Omega_lambda -> (Omega_lambda + Omega_nu)\")\n om_m -= np.sum(self.Omega_nu)\n if self.w0 != -1. or self.wa != 0.:\n warnings.warn(\"nw_Pk is not able to reproduce non-static dark energy with w0 != -1. The dark enerdy parameters will be set to w0 = -1, wa = 0\")\n if self.Omega_K != 0.:\n warnings.warn(\"EisensteinHu_Pk is not able to reproduce non-flat FRW metric! The Omega_K parameter will be transferred to Omega_lambda such that Omega_lambda -> (Omega_lambda + Omega_K)\")\n om_m -= self.Omega_K\n\n rk = k*h\n e = np.exp(1.)\n\n # Recombination and equality\n thet = 2.728/2.7\n b1 = 0.313*(om_m*h*h)**(-0.419)*(1+0.607*(om_m*h*h)**0.674)\n b2 = 0.238*(om_m*h*h)**0.223\n zd = 1291.*(1+b1*(om_b*h*h)**b2)*(om_m*h*h)**0.251/(1.+0.659*(om_m*h*h)**0.828)\n ze = 2.50e4*om_m*h*h/thet**4.\n rd = 31500.*om_b*h*h/thet**4./zd\n re = 31500.*om_b*h*h/thet**4./ze\n rke = 7.46e-2*om_m*h*h/thet**2.\n s = (2./3./rke)*np.sqrt(6./re)*np.log((np.sqrt(1.+rd)+np.sqrt(rd+re))/(1+np.sqrt(re)))\n rks = 1.6*( (om_b*h*h)**0.52 ) * ( (om_m*h*h)**0.73 ) * (1.+(10.4*om_m*h*h)**(-0.95))\n q = rk/13.41/rke\n y = (1.+ze)/(1.+zd)\n g = y*(-6.*np.sqrt(1+y)+(2.+3.*y)*np.log((np.sqrt(1.+y)+1.)/(np.sqrt(1.+y)-1.)))\n\n # Master function\n ab = g*2.07*rke*s/(1.+rd)**(0.75)\n a1 = (46.9*om_m*h*h)**0.670*(1+(32.1*om_m*h*h)**(-0.532))\n a2 = (12.0*om_m*h*h)**0.424*(1+(45.0*om_m*h*h)**(-0.582))\n ac = (a1**(-om_b/om_m)) * (a2**(-(om_b/om_m)**3.))\n B1 = 0.944/(1+(458.*om_m*h*h)**(-0.708))\n B2 = (0.395*om_m*h*h)**(-0.0266)\n bc = 1./(1.+B1*((1.-om_b/om_m)**B2-1.))\n\n # CDM transfer function\n f = 1./(1.+(rk*s/5.4)**4.)\n c1 = 14.2 + 386./(1.+69.9*q**1.08)\n c2 = 14.2/ac + 386./(1.+69.9*q**1.08)\n tc = f*np.log(e+1.8*bc*q)/(np.log(e+1.8*bc*q)+c1*q*q) +(1.-f)*np.log(e+1.8*bc*q)/(np.log(e+1.8*bc*q)+c2*q*q)\n \n # Baryon transfer function\n bb = 0.5+(om_b/om_m) + (3.-2.*om_b/om_m)*np.sqrt((17.2*om_m*h*h)**2.+1.)\n bn = 8.41*(om_m*h*h)**0.435\n ss = s/(1.+(bn/rk/s)**3.)**(1./3.)\n tb = np.log(e+1.8*q)/(np.log(e+1.8*q)+c1*q*q)/(1+(rk*s/5.2)**2.)\n fac = np.exp(-(rk/rks)**1.4)\n tb = (tb+ab*fac/(1.+(bb/rk/s)**3.))*np.sin(rk*ss)/rk/ss\n\n # Total transfer function\n T = (om_b/om_m)*tb+(1-om_b/om_m)*tc\n\n # Power spectrum and normalization\n #delta_H = 1.94e-5*om_m**(-0.785-0.05*np.log(om_m))*np.exp(-0.95*n_tld-0.169*n_tld**2.)\n #power_tmp = delta_H**2.*(const.c*rk/self.H0)**(3.+self.ns)/rk**3.*(2.*np.pi**2.)*T**2.\n power_tmp = k**self.ns*(2.*np.pi**2.)*T**2.\n norm = sigma_8/self.compute_sigma_8(k = k, pk = power_tmp)\n power_tmp *= norm**(2.)\n \n # Different redshifts\n nz = len(np.atleast_1d(z))\n if nz == 1:\n z = np.array([z])\n nk = len(np.atleast_1d(k))\n Pk = np.zeros((nz,nk))\n for i in range(nz):\n Pk[i] = power_tmp*(self.growth_factor_scale_independent(z[i])/self.growth_factor_scale_independent(0.))**2.\n\n return k, Pk", "def _sigmainf(N, h, m, dW, Km0, Pm0):\n M = m*(m-1)/2\n Im = broadcast_to(np.eye(m), (N, m, m))\n IM = broadcast_to(np.eye(M), (N, M, M))\n Ims0 = np.eye(m**2)\n factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))\n factor2 = _kp2(Im, _dot(dW, _t(dW)))\n factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))\n return 2*IM + _dot(_dot(factor1, factor2), factor3)", "def kf_update(X, P, Y, H, R):\n\n IM = dot(H, X)\n IS = R + dot(H, dot(P, H.T))\n K = dot(P, dot(H.T, inv(IS)))\n X = X + dot(K, (Y-IM))\n P = P - dot(K, dot(IS, K.T))\n LH = gauss_pdf(Y, IM, IS)\n return (X,P,K,IM,IS,LH)", "def gcheckerboard(kernelen=64, nsig=32):\n c = np.array([[-1, 1], [1, -1]])\n intsize = int(np.ceil(kernelen/2))\n return np.kron(c, np.ones([intsize, intsize])) * gkern(kernelen, nsig)", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def spec_helm_decomp(k,Cu,Cv,GM=False):\n dk = k[1]-k[0]\n s = np.log(k)\n\n Fphi = np.zeros_like(Cu)\n Fpsi = np.zeros_like(Cu)\n Cphi = np.zeros_like(Cu)\n Cpsi = np.zeros_like(Cu)\n\n # assume GM for decomposing into wave and vortex\n if GM:\n gm = np.load(\"/Users/crocha/Projects/dp_spectra/GM/gm_omega_star.npz\")\n f2omg2 = gm['rgm']\n ks = gm['k']*1.e3\n\n for i in range(s.size-1):\n\n ds = np.diff(s[i:])\n\n sh = sinh(s[i]-s[i:])\n ch = cosh(s[i]-s[i:])\n\n # the function to integrate\n Fp = Cu[i:]*sh + Cv[i:]*ch\n Fs = Cv[i:]*sh + Cu[i:]*ch\n\n # integrate using Simpson's rule\n Fpsi[i] = integrate.simps(Fs,s[i:])\n Fphi[i] = integrate.simps(Fp,s[i:])\n\n # zero out unphysical values\n Fpsi[Fpsi < 0.] = 0.\n Fphi[Fphi < 0.] = 0.\n\n # compute rotational and divergent components\n Cpsi = Fpsi - Fphi + Cu\n Cphi = Fphi - Fpsi + Cv\n\n if GM:\n\n f2omg2i = np.interp(k,ks,f2omg2)\n\n Cv_w = f2omg2i*Fphi - Fpsi + Cv\n Cv_v = Cv - Cv_w\n \n kdkromg = diff_central(ks, f2omg2)\n kdkromg = np.interp(k,ks[1:-1],kdkromg)\n\n dFphi = diff_central(k, Fphi)\n #dFphi = np.gradient(Fphi,k)\n dFphi = np.interp(k,k[1:-1],dFphi.real)\n E_w = Fphi - k*dFphi\n\n Cu_w = -k*kdkromg*Fphi + f2omg2i*(-Fpsi+Cv) + Fphi\n Cu_v = Cu - Cu_w\n\n Cb_w = E_w - (Cu_w + Cv_w)/2.\n\n return Cpsi,Cphi, Cu_w,Cv_w, Cu_v,Cv_v, E_w, Cb_w\n\n else:\n return Cpsi,Cphi", "def test_10_kernels(self):\n ra0, dec0 = CRVAL\n res = 0.01 * DEG\n\n # Test zenithal -- (ra0, dec0) is the reference point.\n for proj in ['TAN', 'ZEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # Test cylindrical -- pixell puts the crval[1] on the equator\n # and dec0 is used for the conformal latitude.\n for proj in ['CAR', 'CEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertNotAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # This is going to break.\n fp = FP(xi =[0., -0.01*DEG],\n eta=[0., -0.01*DEG])\n sight = get_sightline()\n tod = core.AxisManager(core.LabelAxis('dets', ['a']))\n fp = coords.get_footprint(tod, wcs_kernel=wcsk, focal_plane=fp, sight=sight)", "def kernel(self, modulus=None):\n M = self.matrix(modulus=modulus)\n if modulus is None:\n M = M.convert_to(QQ)\n # Note: Even when working over a finite field, what we want here is\n # the pullback into the integers, so in this case the conversion to ZZ\n # below is appropriate. When working over ZZ, the kernel should be a\n # ZZ-submodule, so, while the conversion to QQ above was required in\n # order for the nullspace calculation to work, conversion back to ZZ\n # afterward should always work.\n # TODO:\n # Watch <https://github.com/sympy/sympy/issues/21834>, which calls\n # for fraction-free algorithms. If this is implemented, we can skip\n # the conversion to `QQ` above.\n K = M.nullspace().convert_to(ZZ).transpose()\n return self.domain.submodule_from_matrix(K)", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def Ising_1D(N,h):\n sigma_x = np.array([[0,1],[1,0]])\n sigma_z = np.kron(np.array([[1,0],[0,-1]]), np.array([[1,0],[0,-1]]))\n H = np.zeros((2**N,2**N))\n\n # self-interaction\n for i in range(1,N+1): #va da 1 a N\n if (i==1):\n H += np.kron(sigma_x, np.identity(2**(N-1)))\n elif(i!=1 and i!=N):\n H += np.kron(np.identity(2**(i-1)), np.kron(sigma_x, np.identity(2**(N-i))))\n elif(i==N):\n H += np.kron(np.identity(2**(N-1)),sigma_x)\n\n # interaction\n H_tmp = np.zeros((2**N,2**N))\n for i in range(1, N):\n if(i==1):\n H_tmp += np.kron(sigma_z, np.identity(2**(N-2)))\n elif(i!=1 and i!=N-1):\n tmp=np.kron(sigma_z,np.identity(2**(N-i-1))) #dx\n H_tmp += np.kron(np.identity(2**(i-1)), tmp) #sx\n elif(i==N-1):\n H_tmp += np.kron(np.identity(2**(N-2)), sigma_z)\n\n H = -(h*H + H_tmp)\n\n return H", "def get_kernels(self, window_lmax=None):\n\n if window_lmax is None:\n window_lmax = self.lmax\n\n save_name = \"kernels\"\n save_attrs = [\"kern\", \"pkern\", \"mkern\", \"xkern\", \"window_lmax\"]\n ret = self.load_data(\n save_name,\n \"kernels\",\n fields=save_attrs,\n to_attrs=True,\n shape=self.kern_shape,\n shape_ref=\"kern\",\n value_ref={\"window_lmax\": window_lmax},\n )\n if ret is not None:\n return ret\n\n kern = OrderedDict()\n if self.pol:\n pkern = OrderedDict()\n mkern = OrderedDict()\n xkern = OrderedDict()\n else:\n pkern = None\n mkern = None\n xkern = None\n\n lmax = self.lmax\n pol = self.pol\n wls = self.wls\n\n all_ells = np.arange(2 * lmax + 1)\n for xname in self.map_pairs:\n kern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n if pol:\n pkern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n mkern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n xkern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n\n for l in all_ells[2 : lmax + 1]:\n if np.mod(l, 50) == 0:\n self.log(\"Computing kernels for ell {}/{}\".format(l, lmax), \"debug\")\n l2 = np.min([2 * lmax + 1, l + lmax + 1])\n # populate upper triangle\n for ll in all_ells[l:l2]:\n j0, j0_lmin, j0_lmax = xft.wigner3j(l, 0, ll, 0)\n if pol:\n j2, j2_lmin, j2_lmax = xft.wigner3j(l, 2, ll, -2)\n\n # only go up to window lmax\n j0_lmax = np.minimum(j0_lmax, window_lmax)\n\n # computed as in https://arxiv.org/abs/1909.09375\n # equations 128 - 136\n l3 = np.arange(j0_lmin, j0_lmax + 1)\n dl3 = 2.0 * l3 + 1.0\n vk = j0[l3] ** 2 * dl3\n if pol:\n sign = ((-1.0) ** (l + ll + l3)).astype(int)\n v = j2[l3] ** 2 * dl3\n vp = v * (1.0 + sign) / 2.0\n vm = v * (1.0 - sign) / 2.0\n vx = j2[l3] * j0[l3] * dl3\n for xname in self.map_pairs:\n wls1 = wls[xname][:, l3]\n kern[xname][l, ll] += (vk * wls1[0]).sum(axis=-1)\n if pol:\n pkern[xname][l, ll] += (vp * wls1[1]).sum(axis=-1)\n mkern[xname][l, ll] += (vm * wls1[1]).sum(axis=-1)\n xkern[xname][l, ll] += (vx * wls1[2]).sum(axis=-1)\n\n # apply symmetry relation\n for l in all_ells[2 : lmax + 1]:\n ll = np.arange(2 * lmax + 1)\n dll = (2.0 * ll + 1.0) / 4.0 / np.pi\n sll = slice(l, lmax + 1)\n for xname in self.map_pairs:\n # populate lower triangle (wigners are symmetric in l and ll)\n kern[xname][sll, l] = kern[xname][l, sll]\n if pol:\n pkern[xname][sll, l] = pkern[xname][l, sll]\n mkern[xname][sll, l] = mkern[xname][l, sll]\n xkern[xname][sll, l] = xkern[xname][l, sll]\n # apply ell scaling along the axis that we bin over\n kern[xname][l, :] *= dll\n if pol:\n pkern[xname][l, :] *= dll\n mkern[xname][l, :] *= dll\n xkern[xname][l, :] *= dll\n\n # save and return\n self.kern = kern\n self.pkern = pkern\n self.mkern = mkern\n self.xkern = xkern\n self.window_lmax = window_lmax\n\n return self.save_data(save_name, from_attrs=save_attrs)", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def optical_kernels(K, J, Lxy, fmax, num):\n\n df = 1/Lxy\n\n # Compute TCC\n TCC, f = trans_cross_coeff(K, J, df, fmax)\n \n # Convert to 2D matrix\n N = f.size\n TCCm = np.reshape(np.reshape(TCC, (N, N, -1)), (N**2, -1))\n\n # Compute largest eigenvalues and eigenvectors\n w, v = scipy.linalg.eigh(TCCm, eigvals=(N**2 - num, N**2 - 1))\n w = np.flip(w, 0)\n v = np.flip(v, 1)\n\n # Get real-space optical kernels\n phi = []\n for i in range(num):\n Phi = np.fft.ifftshift(np.reshape(v[:,i], [N, N]))\n #phi.append(np.fft.fftshift(np.fft.ifft2(Phi)))\n phi.append(np.fft.ifft2(Phi))\n \n return w, phi", "def formK(x, y, kernel, cl):\n\n if kernel == 'se':\n k = lambda x,y: np.exp(-np.sum((x-y)**2)/2/cl**2)\n else:\n raise('Kernel %s not implemented' %(kernel))\n\n # form kernel matrix\n K = np.zeros((x.shape[0], y.shape[0]))\n for i in range(len(x)):\n for j in range(len(y)):\n K[i,j] = k(x[i], y[j])\n\n return K", "def decalage_haut_image(k,fichier_in,fichier_out):\n\n M = pgm_vers_matrice(fichier_in)\n C = [[0,0,0],[0,0,0],[0,1,0]]\n for __ in range(k):\n M = convolution_entiere(C,M)\n matrice_vers_pgm(M,fichier_out)\n return", "def smooth_kernel_fp(z, z_star, h, gamma=2):\n\n # compute probabilities\n p = np.exp(-(np.abs(z-z_star)/h)**gamma)\n # rescale\n p = p / np.sum(p)\n return np.squeeze(p)", "def DL_SE(channel, precoding, power=100, loop=True):\n H, V = channel, precoding\n W = complex_normalize(V, -1)\n\n no_real, L, K, M = H.shape[0], H.shape[\n 1], H.shape[3], H.shape[4]\n intercell_intf = np.zeros((L, K))\n intracell_intf = np.zeros((no_real, L, K))\n sig = np.zeros((no_real, L, K))\n if loop:\n for n in range(no_real):\n for l in range(L):\n H_l2all = H[n, l] # L x K x M, the CSI between BS l and all\n # users\n for k in range(K):\n # for all users in cell l\n w = W[n, l, k] # M x 1\n sig[n, l, k] = (np.abs(w.conj().T @ H[n, l, l, k])) ** 2\n intracell_intf = H_l2all[l].conj() @ w # [K,]\n inner_all = H_l2all.conj() @ w # [L, K]\n intercell_intf = (np.abs(inner_all)) ** 2\n intercell_intf[l] -= (np.abs(intracell_intf)) ** 2\n intracell_intf = (np.abs(intracell_intf)) ** 2\n intracell_intf[n, l, k] -= sig[n, l, k]\n else:\n for n in range(no_real):\n for l in range(L):\n H_l = H[n, l] # (L, K, M)\n for k in range(K):\n w_l = W[n, l] # (K, M)\n H_llk = H_l[l, k] # (M, ) the channel b/w l-th BS to user k\n p_l = np.abs(np.dot(w_l.conj(), H_llk)) ** 2\n sig[n, l, k] = p_l[k]\n intracell_intf[n, l, k] = p_l.sum() - p_l[k]\n if L > 1:\n idx_othercell = list(range(L))\n idx_othercell.remove(l)\n H_intercell = H[n, idx_othercell, l:l+1, k] # (L-1, 1, M) CSI,\n # other cells\n # to\n # this user k\n w_intercell = W[n, idx_othercell] #(L-1, K, M) other cell's precoding vec\n p_inter = np.abs(w_intercell @ (H_intercell.swapaxes(\n -1, -2))) ** 2\n intercell_intf[l,k] += p_inter.sum() / no_real\n # assert np.allclose(p_sig, np.abs(w_l[k].conj() @ H_llk)\n # ** 2)\n int_noise = power * intercell_intf + power * intracell_intf + 1\n sinr = (power * sig / int_noise)\n dl_se = np.log2(1+sinr).mean(axis=0)\n\n return dl_se, sig, intracell_intf, intercell_intf", "def gkern(kernlen=21, nsig=3):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/np.max(kernel_raw)#.sum()\n return kernel", "def hgmwithfilter_evaluation(input_generator,branches,nlfuntion,iden_method,Plot,reference=None):\n input_signal = input_generator.GetOutput()\n # filter_spec_tofind = nlsp.create_bpfilter([2000,8000,30000],input_signal)\n filter_spec_tofind = nlsp.log_bpfilter(branches=branches,input=input_signal)\n # filter_spec_tofind = [i for i in reversed(filter_spec_tofind)]\n length_kernel = len(filter_spec_tofind[0])\n # filter_spec_tofind = nlsp.log_chebyfilter(branches=branches,input=input_signal)\n ref_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nlsp.nl_branches(nlfuntion,branches),\n filter_irs=filter_spec_tofind,\n max_harmonics=range(1,branches+1))\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n found_filter_spec = nlsp.change_length_filterkernels(found_filter_spec,length=length_kernel)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n # nlsp.filterkernel_evaluation_plot(filter_spec_tofind,found_filter_spec)\n # nlsp.filterkernel_evaluation_sum(filter_spec_tofind,found_filter_spec)\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n if Plot is True:\n plot.relabelandplot(ref_nlsystem.GetOutput(),\"Reference Output\",show=False)\n plot.relabelandplot(iden_nlsystem.GetOutput(),\"Identified Output\",show=True)\n # nlsp.plot_array([sumpf.modules.FourierTransform(s).GetSpectrum() for s in filter_spec_tofind],label_array=[\"reference%d\" %i for i in range(len(filter_spec_tofind))],Show=False)\n # nlsp.plot_array([sumpf.modules.FourierTransform(s).GetSpectrum() for s in found_filter_spec],label_array=[\"identified%d\" %i for i in range(len(found_filter_spec))],Show=True)\n print \"SNR between Reference and Identified output without overlapping filters: %r\" %nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput())\n sumpf.modules.SignalFile(filename=\"C:/Users/diplomand.8/Desktop/linearHGM_explannation/cheby/noise/input\", signal=reference,format=sumpf.modules.SignalFile.WAV_FLOAT)\n sumpf.modules.SignalFile(filename=\"C:/Users/diplomand.8/Desktop/linearHGM_explannation/cheby/noise/%s\" %iden_method.__name__,signal=iden_nlsystem.GetOutput(),format=sumpf.modules.SignalFile.WAV_FLOAT)\n sumpf.modules.SignalFile(filename=\"C:/Users/diplomand.8/Desktop/linearHGM_explannation/cheby/noise/reference\",signal=ref_nlsystem.GetOutput(),format=sumpf.modules.SignalFile.WAV_FLOAT)", "def gkern2(kernlen=21, nsig=3):\n # create nxn zeros\n inp = np.zeros((kernlen, kernlen))\n # set element at the middle to one, a dirac delta\n inp[kernlen//2, kernlen//2] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n kernel = scipy.ndimage.filters.gaussian_filter(inp, nsig)\n\n return kernel", "def compute_graphlet_kernel(graphs):\n start_time = time.time()\n\n N = len(graphs)\n\n phi = np.zeros((N, 2))\n\n ind = 0\n for G in graphs:\n for node1 in G.nodes():\n for node2 in G.neighbors(node1):\n for node3 in G.neighbors(node2):\n if node1 != node3:\n if node3 in G.neighbors(node1):\n increment = 1.0 / 2.0\n phi[ind, 0] += increment\n else:\n increment = 1.0 / 6.0\n phi[ind, 1] += increment\n\n ind += 1\n\n K = np.dot(phi, phi.T)\n end_time = time.time()\n print \"Total time for Graphlet kernel: \", (end_time - start_time)\n\n return K", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def gkern(kernlen=21, nsig=3):\n interval = (2 * nsig + 1.) / (kernlen)\n x = np.linspace(-nsig - interval / 2., nsig + interval / 2., kernlen + 1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw / kernel_raw.sum()\n return kernel;", "def eval_K_chol(self, S, sigma_n, sigma_f):\n K = self.eval_K(S)\n K += sigma_n * np.eye(K.shape[0])\n K_chol = jitchol(K)\n return K_chol", "def MaternKernel(X, Y=None, gamma = None, p = 0):\n assert(p == int(p))\n\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n r = manhattan_distances(X, Y)\n if p == 0:\n K = -gamma * r\n np.exp(K, K) # exponentiate K in-place\n if p == 1:\n K = -gamma * r * math.sqrt(3)\n np.exp(K, K) # exponentiate K in-place\n K *= (1+gamma * r * math.sqrt(3))\n if p == 1:\n K = -gamma * r * math.sqrt(5)\n np.exp(K, K) # exponentiate K in-place\n K *= (1+gamma * r * math.sqrt(5) + 5./3. * (r*gamma)**2)\n return K", "def normal_modes_gHST(R, NL, KL, params, dispersion=[], spin_dir=[], sublattice_labels=[], b='hang', spring='auto',\n pin='auto'):\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle.'''\n NP = 1\n NN = 0\n\n M1 = np.zeros((2 * NP, 2 * NP))\n M2 = np.zeros((2 * NP, 2 * NP))\n if spring == 'auto':\n spring = params['k'] * params['l'] ** 2 / (params['I3'] * np.abs(params['w3']))\n # If there is more than one particle, and if the speeds vary from particle to particle,\n # then make spring the same length as a dynamical matrix column\n if len(spring) > 0:\n if (abs(spring - spring[0]) > 1e-9).any():\n # The rotation rates vary from particle to particle, so reshape\n spring_new = np.zeros_like(spring)\n dmyi = 0 # a new index ('dummy i')\n for ii in range(NP):\n # Since 2 dof for position of pivot of gHST, double the size\n spring_new[dmyi] = spring[ii]\n spring_new[dmyi + 1] = spring[ii]\n dmyi += 2\n else:\n # the elements are all identical, so just keep the first one\n spring = spring[0]\n\n if pin == 'auto':\n gn = params['Mm'] * params['g']\n pin = params['l'] * gn / (params['I3'] * np.abs(params['w3']))\n # If there is more than one particle, and if the speeds vary from particle to particle,\n # then make pin the same length as a dynamical matrix column\n if len(pin) > 0:\n if (abs(pin - pin[0]) > 1e-9).any():\n # The rotation rates vary from particle to particle, so reshape\n pin_new = np.zeros_like(pin)\n dmyi = 0 # a new index ('dummy i')\n for ii in range(NP):\n # Since 2 dof for position of pivot of gHST, double the size\n pin_new[dmyi] = pin[ii]\n pin_new[dmyi + 1] = pin[ii]\n dmyi += 2\n else:\n # the elements are all identical, so just keep the first one\n pin = pin[0]\n\n m2_shape = np.shape(M2)\n\n if b == 'hang':\n b = np.zeros(NP)\n elif b == 'stand':\n b = np.ones(NP)\n\n if spin_dir == []:\n '''Assume antialigned with a, aligned with body axis 3'''\n spin_dir = np.ones(NP)\n\n print 'Constructing dynamical matrix...'\n for i in range(NP):\n for nn in range(NN):\n\n ni = NL[i, nn] # the number of the gyroscope i is connected to (particle j)\n k = KL[i, nn] # true connection?\n\n if len(dispersion) > 1:\n disp = 1. / (1. + dispersion[i])\n else:\n disp = 1.\n\n diffx = R[ni, 0] - R[i, 0]\n diffy = R[ni, 1] - R[i, 1]\n alphaij = 0.\n\n rij_mag = np.sqrt(diffx ** 2 + diffy ** 2)\n\n if k != 0:\n alphaij = np.arctan2(diffy, diffx)\n\n # for periodic systems, KL is -1 for particles on opposing boundaries\n if KL[i, nn] == -1:\n alphaij = (np.pi + alphaij) % (2 * pi)\n\n # What is this for?\n if KL[i, nn] == -2: # will only happen on first or last gyro in a line\n if i == 0 or i == (NP - 1):\n print i, '--> NL=-2 for this particle'\n yy = np.where(KL[i] == 1)\n dx = R[NL[i, yy], 0] - R[NL[i, yy], 0]\n dy = R[NL[i, yy], 1] - R[NL[i, yy], 1]\n al = (np.arctan2(dy, dx)) % (2 * pi)\n alphaij = np.pi - al\n if i == 1:\n alphaij = np.pi - ((90 / 2) * np.pi / 180.)\n else:\n alphaij = - ((90 / 2) * np.pi / 180.)\n\n Cos = np.cos(alphaij)\n Sin = np.sin(alphaij)\n\n if abs(Cos) < 10E-8:\n Cos = 0.0\n\n if abs(Sin) < 10E-8:\n Sin = 0\n\n Cos2 = Cos ** 2\n Sin2 = Sin ** 2\n CosSin = Cos * Sin\n\n # -1 for aligned with a, 1 for aligned with 3.\n # dir factor :== 1/(-1)^c = (-1)^c\n dir_factor = spin_dir[i]\n\n if len(sublattice_labels) > 0:\n if sublattice_labels[i] == 1:\n extra_factor = 1. * del_A_B\n # print self.del_A_B\n elif sublattice_labels[i] == 0:\n extra_factor = 1.\n else:\n extra_factor = 1.\n else:\n extra_factor = 1.\n\n M1[2 * i, 2 * i] += -disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dxi - dxi\n M1[2 * i, 2 * i + 1] += -disp * k * Sin2 * ((-1) ** b[i]) * dir_factor # dxi - dyi\n M1[2 * i, 2 * ni] += disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dxi - dxj\n M1[2 * i, 2 * ni + 1] += disp * k * Sin2 * ((-1) ** b[i]) * dir_factor # dxi - dyj\n\n # (y components)\n M1[2 * i + 1, 2 * i] += disp * k * Cos2 * ((-1) ** b[i]) * dir_factor # dyi - dxi\n M1[2 * i + 1, 2 * i + 1] += disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dyi - dyi\n M1[2 * i + 1, 2 * ni] += -disp * k * Cos2 * ((-1) ** b[i]) * dir_factor # dyi - dxj\n M1[2 * i + 1, 2 * ni + 1] += -disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dyi - dyj\n\n # if i==0:\n # print '\\n --- \\n added M1[2*i+1, 2*i] = ',disp*k*Cos2 *((-1)**b[i]) *dir_factor\n # print 'dir_factor = ', dir_factor\n # print 'k = ', k\n # print 'else =', ((-1)**b[i]) *dir_factor\n\n # pinning/gravitational matrix\n M2[2 * i, 2 * i + 1] = (1.) * disp * dir_factor * extra_factor\n M2[2 * i + 1, 2 * i] = -(1.) * disp * dir_factor * extra_factor\n\n # self.pin_array.append(2*pi*1*extra_factor)\n # Assumes:\n # (-1)**c adot = - spring* (-1)**b SUM{ z x nij*(nij.(dri-drj)) } + pin\n matrix = - (-spring * M1 + pin * M2)\n\n return matrix", "def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()", "def exclusive_lasso4(X, n_clusters, gamma=0.5):\n MAX_ITERS = 100\n n_dims, n_samples = X.shape\n\n # Initialize indicaotr matrix F\n F = np.zeros((n_samples, n_clusters), dtype=np.int8)\n for i in xrange(n_samples):\n F[i, randint(0, n_clusters-1)] = 1\n\n conv = False\n iteration = 0\n while iteration < MAX_ITERS and not conv:\n conv = True\n # Calculate H = XF(F^TF)^-1\n H = X.dot(sp.linalg.pinv(F.T))\n \n base_sum = ((X-H.dot(F.T))**2).sum(axis=0)\n F_counts = F.sum(axis=0)\n\n # Caculate F\n # For each row (sample)\n for i in xrange(n_samples):\n # Get current indicator\n curr_ind = F[i].nonzero()[0][0]\n F_counts[curr_ind] = F_counts[curr_ind] - 1\n\n results = []\n for j in xrange(n_clusters):\n F_counts[j] = F_counts[j] + 1\n tr = (F_counts**2).sum()\n res = ((X[:, i]-H[:, j])**2).sum() - base_sum[i] + gamma * tr\n results.append(res)\n F_counts[j] = F_counts[j] - 1\n new_ind = np.argmin(results)\n F[i, curr_ind] = 0\n F[i, new_ind] = 1\n F_counts[new_ind] = F_counts[new_ind] + 1\n # We want to find the indicator column that minimizes\n # X-HF^T + gamma * Tr(F^T11^TF)\n #\n # The trace equals sum of squared samples in each cluster\n \n if (curr_ind != new_ind):\n conv = False\n iteration = iteration + 1\n print iteration\n return map(lambda x: x.index(1), F.tolist())", "def u(self, k, m, z):\n result = self.nfw(k, m, z) * m / self.U.rho_m(z)\n # FOG\n #sigma2 = self.U.sigma2DispFog(m, z)\n #result *= np.exp(- 0.5 * sigma2 * k**2 * mu**2)\n return result", "def kernel(r, h, deriv):\n return {\n '0': h**-1 / np.sqrt(np.pi) * np.exp(-r**2/h**2),\n '1': h**-3 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-2*r),\n '2': h**-5 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * ( 4*r**2 - 2*h**2),\n '3': h**-7 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-8*r**3 + 12*h**2*r)\n }[deriv]", "def nfw(self, k, m, z):\n RS, rhoS, c = self.rS_rhoS_c(m, z)\n #\n result = np.sin(k * RS) * ( Si((1+c) * k * RS) - Si(k * RS) )\n result += - np.sin(c * k * RS) / ((1+c) * k * RS)\n result += np.cos(k * RS) * ( Ci((1+c) * k * RS) - Ci(k * RS) )\n result /= (np.log(1+c) - c/(1+c))\n return result", "def nfw(self, k, m, z):\n RS, rhoS, c = self.rS_rhoS_c(m, z)\n #\n result = np.sin(k * RS) * ( Si((1+c) * k * RS) - Si(k * RS) )\n result += - np.sin(c * k * RS) / ((1+c) * k * RS)\n result += np.cos(k * RS) * ( Ci((1+c) * k * RS) - Ci(k * RS) )\n result /= (np.log(1+c) - c/(1+c))\n return result", "def calc_K_tilda(kernel: Type[Kern], X_train: np.array, X_m: np.array):\n Knn = kernel.K(X_train, X_train)\n Knm = kernel.K(X_train, X_m)\n Kmn = kernel.K(X_m, X_train)\n Kmm = kernel.K(X_m, X_m)\n temp = np.dot(np.dot(Knm, np.linalg.inv(Kmm)), Kmn)\n K_tilda = np.subtract(Knn, temp)\n return K_tilda", "def integrate_idemix_kernel(state):\n vs = state.variables\n settings = state.settings\n\n a_tri, b_tri, c_tri, d_tri, delta = (allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))[2:-2, 2:-2] for _ in range(5))\n forc = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n maxE_iw = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n \"\"\"\n forcing by EKE dissipation\n \"\"\"\n if settings.enable_eke:\n forc = vs.eke_diss_iw\n\n else: # shortcut without EKE model\n forc = vs.K_diss_gm + vs.K_diss_h - vs.P_diss_skew\n\n if settings.enable_store_cabbeling_heat:\n forc += -vs.P_diss_hmix - vs.P_diss_iso\n\n if settings.enable_eke and (settings.enable_eke_diss_bottom or settings.enable_eke_diss_surfbot):\n \"\"\"\n vertically integrate EKE dissipation and inject at bottom and/or surface\n \"\"\"\n a_loc = npx.sum(vs.dzw[npx.newaxis, npx.newaxis, :-1] * forc[:, :, :-1] * vs.maskW[:, :, :-1], axis=2)\n a_loc += 0.5 * forc[:, :, -1] * vs.maskW[:, :, -1] * vs.dzw[-1]\n\n forc = update(forc, at[...], 0.0)\n\n ks = npx.maximum(0, vs.kbot[2:-2, 2:-2] - 1)\n mask = ks[:, :, npx.newaxis] == npx.arange(settings.nz)[npx.newaxis, npx.newaxis, :]\n if settings.enable_eke_diss_bottom:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask, a_loc[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :], forc[2:-2, 2:-2, :]\n ),\n )\n else:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask,\n settings.eke_diss_surfbot_frac\n * a_loc[2:-2, 2:-2, npx.newaxis]\n / vs.dzw[npx.newaxis, npx.newaxis, :],\n forc[2:-2, 2:-2, :],\n ),\n )\n forc = update(\n forc,\n at[2:-2, 2:-2, -1],\n (1.0 - settings.eke_diss_surfbot_frac) * a_loc[2:-2, 2:-2] / (0.5 * vs.dzw[-1]),\n )\n\n \"\"\"\n forcing by bottom friction\n \"\"\"\n if not settings.enable_store_bottom_friction_tke:\n forc = forc + vs.K_diss_bot\n\n \"\"\"\n prevent negative dissipation of IW energy\n \"\"\"\n maxE_iw = npx.maximum(0.0, vs.E_iw[:, :, :, vs.tau])\n\n \"\"\"\n vertical diffusion and dissipation is solved implicitly\n \"\"\"\n _, water_mask, edge_mask = utilities.create_water_masks(vs.kbot[2:-2, 2:-2], settings.nz)\n\n delta = update(\n delta,\n at[:, :, :-1],\n settings.dt_tracer\n * settings.tau_v\n / vs.dzt[npx.newaxis, npx.newaxis, 1:]\n * 0.5\n * (vs.c0[2:-2, 2:-2, :-1] + vs.c0[2:-2, 2:-2, 1:]),\n )\n delta = update(delta, at[:, :, -1], 0.0)\n a_tri = update(\n a_tri, at[:, :, 1:-1], -delta[:, :, :-2] * vs.c0[2:-2, 2:-2, :-2] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n )\n a_tri = update(a_tri, at[:, :, -1], -delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -2])\n b_tri = update(\n b_tri,\n at[:, :, 1:-1],\n 1\n + delta[:, :, 1:-1] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + delta[:, :, :-2] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, 1:-1] * maxE_iw[2:-2, 2:-2, 1:-1],\n )\n b_tri = update(\n b_tri,\n at[:, :, -1],\n 1\n + delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, -1] * maxE_iw[2:-2, 2:-2, -1],\n )\n b_tri_edge = (\n 1\n + delta / vs.dzw * vs.c0[2:-2, 2:-2, :]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, :] * maxE_iw[2:-2, 2:-2, :]\n )\n c_tri = update(\n c_tri, at[:, :, :-1], -delta[:, :, :-1] / vs.dzw[npx.newaxis, npx.newaxis, :-1] * vs.c0[2:-2, 2:-2, 1:]\n )\n d_tri = update(d_tri, at[...], vs.E_iw[2:-2, 2:-2, :, vs.tau] + settings.dt_tracer * forc[2:-2, 2:-2, :])\n d_tri_edge = (\n d_tri + settings.dt_tracer * vs.forc_iw_bottom[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :]\n )\n d_tri = update_add(d_tri, at[:, :, -1], settings.dt_tracer * vs.forc_iw_surface[2:-2, 2:-2] / (0.5 * vs.dzw[-1:]))\n\n sol = utilities.solve_implicit(\n a_tri, b_tri, c_tri, d_tri, water_mask, b_edge=b_tri_edge, d_edge=d_tri_edge, edge_mask=edge_mask\n )\n vs.E_iw = update(vs.E_iw, at[2:-2, 2:-2, :, vs.taup1], npx.where(water_mask, sol, vs.E_iw[2:-2, 2:-2, :, vs.taup1]))\n\n \"\"\"\n store IW dissipation\n \"\"\"\n vs.iw_diss = vs.alpha_c * maxE_iw * vs.E_iw[..., vs.taup1]\n\n \"\"\"\n add tendency due to lateral diffusion\n \"\"\"\n flux_east = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_north = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_top = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n if settings.enable_idemix_hor_diffusion:\n flux_east = update(\n flux_east,\n at[:-1, :, :],\n settings.tau_h\n * 0.5\n * (vs.v0[1:, :, :] + vs.v0[:-1, :, :])\n * (vs.v0[1:, :, :] * vs.E_iw[1:, :, :, vs.tau] - vs.v0[:-1, :, :] * vs.E_iw[:-1, :, :, vs.tau])\n / (vs.cost[npx.newaxis, :, npx.newaxis] * vs.dxu[:-1, npx.newaxis, npx.newaxis])\n * vs.maskU[:-1, :, :],\n )\n\n flux_north = update(\n flux_north,\n at[:, :-1, :],\n settings.tau_h\n * 0.5\n * (vs.v0[:, 1:, :] + vs.v0[:, :-1, :])\n * (vs.v0[:, 1:, :] * vs.E_iw[:, 1:, :, vs.tau] - vs.v0[:, :-1, :] * vs.E_iw[:, :-1, :, vs.tau])\n / vs.dyu[npx.newaxis, :-1, npx.newaxis]\n * vs.maskV[:, :-1, :]\n * vs.cosu[npx.newaxis, :-1, npx.newaxis],\n )\n flux_north = update(flux_north, at[:, -1, :], 0.0)\n vs.E_iw = update_add(\n vs.E_iw,\n at[2:-2, 2:-2, :, vs.taup1],\n settings.dt_tracer\n * vs.maskW[2:-2, 2:-2, :]\n * (\n (flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n + (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n\n \"\"\"\n add tendency due to advection\n \"\"\"\n if settings.enable_idemix_superbee_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_superbee_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_upwind_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_upwind_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_superbee_advection or settings.enable_idemix_upwind_advection:\n vs.dE_iw = update(\n vs.dE_iw,\n at[2:-2, 2:-2, :, vs.tau],\n vs.maskW[2:-2, 2:-2, :]\n * (\n -(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n - (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n vs.dE_iw = update_add(vs.dE_iw, at[:, :, 0, vs.tau], -flux_top[:, :, 0] / vs.dzw[0:1])\n vs.dE_iw = update_add(\n vs.dE_iw,\n at[:, :, 1:-1, vs.tau],\n -(flux_top[:, :, 1:-1] - flux_top[:, :, :-2]) / vs.dzw[npx.newaxis, npx.newaxis, 1:-1],\n )\n vs.dE_iw = update_add(\n vs.dE_iw, at[:, :, -1, vs.tau], -(flux_top[:, :, -1] - flux_top[:, :, -2]) / (0.5 * vs.dzw[-1:])\n )\n\n \"\"\"\n Adam Bashforth time stepping\n \"\"\"\n vs.E_iw = update_add(\n vs.E_iw,\n at[:, :, :, vs.taup1],\n settings.dt_tracer\n * (\n (1.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.tau]\n - (0.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.taum1]\n ),\n )\n\n return KernelOutput(E_iw=vs.E_iw, dE_iw=vs.dE_iw, iw_diss=vs.iw_diss)", "def k_HF(T, n, h=1e-3):\n mu_p, _ = physics_solver_mu(n * (1 + h), T)\n mu_m, _ = physics_solver_mu(n * (1 - h), T)\n dn_dmu = 2 * h / (mu_p - mu_m) # second order diff\n return dn_dmu / n" ]
[ "0.739296", "0.6562701", "0.65557134", "0.65486634", "0.63957083", "0.6381057", "0.6323597", "0.62995636", "0.6100595", "0.59519017", "0.59446114", "0.59294933", "0.59197277", "0.5910358", "0.5873097", "0.58403426", "0.5745301", "0.5744658", "0.5742014", "0.57419306", "0.5586465", "0.5575102", "0.55693614", "0.5565277", "0.554511", "0.55450875", "0.5477988", "0.54771334", "0.54395854", "0.54345185", "0.54320437", "0.54231226", "0.54215246", "0.5410336", "0.5406086", "0.5405302", "0.53770626", "0.5376996", "0.5371421", "0.53538513", "0.53538513", "0.53432447", "0.5341786", "0.53397214", "0.53390974", "0.5338149", "0.5336114", "0.5329844", "0.5324788", "0.5319931", "0.5311461", "0.5308892", "0.53071505", "0.5295234", "0.528978", "0.5288506", "0.52845293", "0.52824986", "0.52795875", "0.525514", "0.52444184", "0.5243933", "0.523949", "0.523904", "0.52378494", "0.52363217", "0.52325803", "0.52288425", "0.5218044", "0.52142555", "0.5212628", "0.52117234", "0.5207555", "0.52071637", "0.5204991", "0.51933557", "0.5176573", "0.51731825", "0.51659083", "0.5165424", "0.51633626", "0.5161685", "0.51593584", "0.5154478", "0.51531446", "0.5150926", "0.5141433", "0.5138469", "0.5117198", "0.5110873", "0.51088643", "0.5107291", "0.51067567", "0.5104431", "0.50925", "0.5087257", "0.5087257", "0.5085675", "0.5085016", "0.5081219" ]
0.7455563
0
Reproducing kernel Calculate of reproducing kernel for even subspace of spherical harmonics of maximum degree N.
def even_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*legPolys[0::2] return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()", "def _kernel(r: float, h: float) -> float:\n sigma_2 = 10 / (7 * np.pi * h * h)\n q = abs(r / h)\n\n if q <= 1.0:\n q2 = q * q\n W = 1.0 - 1.5 * q2 * (1.0 - 0.5 * q)\n W *= sigma_2\n elif q <= 2.0:\n two_minus_q = 2 - q\n two_minus_q_c = np.power(two_minus_q, 3)\n W = 0.25 * two_minus_q_c\n W *= sigma_2\n else:\n W = 0\n\n return W", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def _calc_kernel(self,\n freq_1: float,\n time_1: float,\n freq_2: float,\n time_2: float,\n dagg: tuple\n ) -> Tuple[ndarray, ndarray]:\n dt = self._process_tensor.dt\n #pieces of kernel consist of some combination of phases and\n #Bose-Einstein factors\n n_1, n_2 = 0, 0\n if self._temp > 0:\n n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))\n n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))\n\n ker_dim = int(np.round(time_2 / dt))\n # calculate index corresponding to t_1\n switch = int(np.round(time_1 / dt))\n re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n\n tpp_index, tp_index = np.meshgrid(\n np.arange(ker_dim), np.arange(ker_dim),\n indexing='ij') #array of indices for each array element\n regions = {\n 'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)\n 'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)\n 'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)\n\n def phase(region, swap_ts = False):\n tk = tp_index[regions[region]]\n tkp = tpp_index[regions[region]]\n if tk.size == 0 or tkp.size == 0:\n return 0\n a = -1j * ((2*dagg[0] - 1)) * freq_2\n b = -1j * ((2*dagg[1] - 1)) * freq_1\n if swap_ts:\n a, b = b, a\n if region in ('a','c'):\n ph = np.triu(\n np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph += np.triu(\n np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)\n sel = np.diag(tk)\n di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)\n if a + b != 0:\n di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))\n di += np.exp((a + b) * sel * dt) / (a * (a+b))\n else:\n di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)\n ph += np.diag(di)\n else:\n ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)\n ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)\n ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)\n ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)\n return ph\n\n\n if dagg == (0, 1):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')\n\n elif dagg == (1, 0):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * n_1 * phase('c')\n\n elif dagg == (1, 1):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')\n\n elif dagg == (0, 0):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +\n (2*n_1 + 1) * phase('a'))\n\n im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * n_1 * phase('c')\n\n re_kernel = np.triu(re_kernel) #only keep triangular region\n im_kernel = np.triu(im_kernel)\n return re_kernel, im_kernel", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def normkernel(S, T, n):\n\n k1 = kernel(S, S, n)\n k2 = kernel(T, T, n)\n res = kernel(S, T, n) / sqrt(k1 * k2)\n\n return res", "def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X", "def Pkernel(x):\n\n m = (x < 0.) & (x >= 1.)\n x[x < 0.] = np.zeros(np.sum(x < 0.))\n x[x >= 1.] = np.zeros(np.sum(x >= 1.))\n x = np.sqrt(x)\n\n result = np.log(2.) * np.log(2.) - np.pi *np.pi / 6. \\\n + 2. * spence(0.5 + 0.5 * x) - (x + x*x*x) / (1. - x*x) \\\n + (np.log(1. + x) - 2. * np.log(2.)) * np.log(1. - x) \\\n + 0.5 * (np.log(1. - x) * np.log(1. - x) - np.log(1. + x) * np.log(1. + x)) \\\n + 0.5 * (1. + x*x*x*x) / (1. - x*x) * (np.log(1. + x) - np.log(1. - x))\n result[x <= 0.] = np.zeros(np.sum(x <= 0.))\n result[x >= 1.] = np.zeros(np.sum(x >= 1.))\n return result", "def kernel(self, modulus=None):\n M = self.matrix(modulus=modulus)\n if modulus is None:\n M = M.convert_to(QQ)\n # Note: Even when working over a finite field, what we want here is\n # the pullback into the integers, so in this case the conversion to ZZ\n # below is appropriate. When working over ZZ, the kernel should be a\n # ZZ-submodule, so, while the conversion to QQ above was required in\n # order for the nullspace calculation to work, conversion back to ZZ\n # afterward should always work.\n # TODO:\n # Watch <https://github.com/sympy/sympy/issues/21834>, which calls\n # for fraction-free algorithms. If this is implemented, we can skip\n # the conversion to `QQ` above.\n K = M.nullspace().convert_to(ZZ).transpose()\n return self.domain.submodule_from_matrix(K)", "def edge_kernel(isotropic):\n if isotropic:\n edge_kernel = - 1.0 * np.ones([3, 3, 3], np.float64)\n edge_kernel[1, 1, 1] = 26.0\n else:\n edge_kernel = - 1.0 * np.ones([1, 3, 3], np.float64)\n edge_kernel[0, 1, 1] = 8\n return edge_kernel", "def periodic_kernel(rmax, kernel, pos, wts, log=null_log):\n if rmax>=0.5:\n raise Exception('Cannot have rmax greater than half the box size, could get periodic images')\n\n num_pts = len(pos)\n pos = array(pos)\n wts = array(wts)\n\n print('Finding optimal shift',file=log)\n pos = shift_pos_optimally(pos, rmax, log)\n print('Padding the unit cube', file=log)\n pad_idx, pad_pos = pad_unitcube(pos, rmax)\n\n print('Inserted {:,} ghost particles for periodicity'.format(len(pad_idx)),file=log)\n new_pts = concatenate((pos, pad_pos), axis=0)\n\n if sum(wts.shape)<=1:\n new_wts = empty(len(new_pts), dtype=wts.dtype)\n new_wts[:] = wts\n else:\n new_wts = concatenate((wts, wts[pad_idx]))\n\n # Scale everything to be in the new box\n scale_fac = 1.0 / (1+2*rmax) \n new_pts += rmax\n new_pts *= scale_fac\n\n pairs, sort_idx, pos, wts, accel = radial_kernel_evaluate(rmax*scale_fac, kernel, new_pts, new_wts, log=log, sort_data=True)\n\n # unsort only the real points\n unsort = empty_like(sort_idx)\n unsort[sort_idx] = arange(len(new_pts))\n unsort = unsort[:num_pts]\n\n accel = accel[unsort]\n\n # undo the scale factor (remember dx's were all shortened)\n accel *= 1.0/scale_fac\n\n return pairs, accel", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def gkern2(kernlen=21, nsig=3):\n # create nxn zeros\n inp = np.zeros((kernlen, kernlen))\n # set element at the middle to one, a dirac delta\n inp[kernlen//2, kernlen//2] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n kernel = scipy.ndimage.filters.gaussian_filter(inp, nsig)\n\n return kernel", "def gkern1(kernlen=21, nsig=3):\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1) \n kern1d = np.diff(scipy.stats.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n \n return kernel", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def sbil_kernel(delta, obs_stats, t, ar, s, kernel='Gaussian'):\n #np.random.shuffle(delta)\n print(delta)\n sbil_kernel_estimate = []\n obs_stats = obs_stats[delta > 0]\n\n sim_theta = [select.generate_theta_sv(ar) for i in range(s)]\n sim_theta = np.matrix(sim_theta).T\n\n # Generate out sample of time series.\n sim_y = [sim.sim_sv(t, sim_theta[0, i], sim_theta[1, i], sim_theta[2, i],\n sim_theta[3, i], 1) for i in range(s)]\n \n # Generate out sample statistics.\n sim_stats = [sum_stat.sv_stats(delta, sim_y[i]) for i\n in range(s)]\n\n sim_theta_mean = sum(sim_theta.T)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_theta[:, i] - sim_theta_mean.T)\n for i in range(s)])/s\n\n # Standardize parameter vectors.\n sim_theta = np.hstack([(sim_theta[:, i] - sim_theta_mean.T)/np.sqrt(u)\n for i in range(s)])\n\n global theta_sigma\n global theta_mean\n theta_sigma = np.sqrt(u)\n theta_mean = sim_theta_mean\n\n # Standardize observed statistics.\n obs_stats = (obs_stats - np.mean(sim_stats, 0))/np.std(sim_stats, 0)\n\n # Compute sample mean.\n sim_stats_mean = sum(sim_stats)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_stats[i]-sim_stats_mean) for i in range(s)])/s\n\n # Standardize simulated statistics.\n sim_stats = [(sim_stats[i] - sim_stats_mean)/np.sqrt(u) for i in range(s)]\n\n # Identify k nearest neighbors.\n norms = [np.linalg.norm(obs_stats-sim_stats[i]) for i in range(s)]\n closest_index = np.argsort(norms)\n closest_thetas = [sim_theta[:, i] for i in closest_index[0:round(s*0.03)]]\n\n # Compute k-nn estimate.\n estimate_standard = (sum(closest_thetas)/len(closest_thetas))\n\n estimate = np.array(estimate_standard.T)*np.array(\n theta_sigma.T) + np.array(theta_mean)\n\n return estimate", "def my_kernel(X, Y):\n S = 0.84 # parameter from rhos\n\n if dset == 1:\n gamma = 0.0005\n else:\n gamma = 0.00087 # maximise variance of kernel matrix\n if np.array_equal(X, Y):\n N = X.shape[0]\n M = (1 - S) * np.ones((N, N)) + S * np.eye(N)\n else:\n M = 1\n\n pairwise_sq_dists = cdist(X, Y, 'sqeuclidean')\n K = exp(-gamma * pairwise_sq_dists) * M\n return K", "def _compute_R2_from_kernel(n, m, kernel):\r\n\r\n R2 = 0\r\n ind_vec = np.arange(m)\r\n for l in range(n):\r\n ind_vec.shape = (1,)*l + (m,) + (1,)*(n-l-1)\r\n _idx1 = (slice(None),)*l + (slice(1, None),) + (slice(None),)*(n-l-1)\r\n _idx2 = (slice(None),)*l + (slice(m-1),) + (slice(None),)*(n-l-1)\r\n R2 += 2 * np.sum(ind_vec[_idx1] * kernel[_idx1] * kernel[_idx2])\r\n\r\n return R2", "def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()", "def sub_kernel(kernel, dim1, dim2):\n\n sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]\n return sub_kernel", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param", "def _compute_R1_from_kernel(n, m, kernel):\r\n\r\n R1 = 0\r\n ind_vec = np.arange(m)\r\n for l in range(n):\r\n ind_vec.shape = (1,)*l + (m,) + (1,)*(n-l-1)\r\n R1 += np.sum((2*ind_vec+1) * kernel**2)\r\n\r\n return R1", "def sobel(kernel_size: int = 3) -> Tensor:\n assert kernel_size % 2 == 1\n s = kernel_size // 2\n k = torch.linspace(-s, s, kernel_size)\n kernel = torch.stack([k] * kernel_size)\n k[k == 0] = 1e-7\n div = torch.stack([k] * kernel_size)\n return kernel / (div ** 2 + div.T ** 2)", "def dpp_sw(kernel_matrix, window_size=3, max_length=14, epsilon=1E-10):\r\n item_size = kernel_matrix.shape[0]\r\n v = np.zeros((max_length, max_length))\r\n cis = np.zeros((max_length, item_size))\r\n di2s = np.copy(np.diag(kernel_matrix))\r\n selected_items = list()\r\n selected_item = np.argmax(di2s)\r\n selected_items.append(selected_item)\r\n window_left_index = 0\r\n while len(selected_items) < max_length:\r\n k = len(selected_items) - 1\r\n ci_optimal = cis[window_left_index:k, selected_item]\r\n di_optimal = math.sqrt(di2s[selected_item])\r\n v[k, window_left_index:k] = ci_optimal\r\n v[k, k] = di_optimal\r\n elements = kernel_matrix[selected_item, :]\r\n eis = (elements - np.dot(ci_optimal, cis[window_left_index:k, :])) / di_optimal\r\n cis[k, :] = eis\r\n di2s -= np.square(eis)\r\n if len(selected_items) >= window_size:\r\n window_left_index += 1\r\n for ind in range(window_left_index, k + 1):\r\n t = math.sqrt(v[ind, ind] ** 2 + v[ind, window_left_index - 1] ** 2)\r\n c = t / v[ind, ind]\r\n s = v[ind, window_left_index - 1] / v[ind, ind]\r\n v[ind, ind] = t\r\n v[ind + 1:k + 1, ind] += s * v[ind + 1:k + 1, window_left_index - 1]\r\n v[ind + 1:k + 1, ind] /= c\r\n v[ind + 1:k + 1, window_left_index - 1] *= c\r\n v[ind + 1:k + 1, window_left_index - 1] -= s * v[ind + 1:k + 1, ind]\r\n cis[ind, :] += s * cis[window_left_index - 1, :]\r\n cis[ind, :] /= c\r\n cis[window_left_index - 1, :] *= c\r\n cis[window_left_index - 1, :] -= s * cis[ind, :]\r\n di2s += np.square(cis[window_left_index - 1, :])\r\n di2s[selected_item] = -np.inf\r\n selected_item = np.argmax(di2s)\r\n if di2s[selected_item] < epsilon:\r\n break\r\n selected_items.append(selected_item)\r\n return selected_items", "def kernel(self, cosmo, z, ell):\n z = np.atleast_1d(z)\n # Extract parameters\n pzs, m = self.params[:2]\n kernel = weak_lensing_kernel(cosmo, pzs, z, ell)\n # If IA is enabled, we add the IA kernel\n if self.config[\"ia_enabled\"]:\n bias = self.params[2]\n kernel += nla_kernel(cosmo, pzs, bias, z, ell)\n # Applies measurement systematics\n if isinstance(m, list):\n m = np.expand_dims(np.stack([mi for mi in m], axis=0), 1)\n kernel *= 1.0 + m\n return kernel", "def calc_hypersphere_volume(r: float, n: int) -> float:\n return (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def gaus_kernel_calc(kernel_size):\n base_gaus_binom = np.array([[1], [1]])\n kernel = base_gaus_binom\n\n if kernel_size == 1:\n # If the kernel size is 1 we need a 2d array that keeps the image the same.\n kernel = np.array([[1]])\n kernel = scipy.signal.convolve2d(kernel, kernel.transpose())\n return kernel\n\n for i in range(kernel_size - 2):\n kernel = scipy.signal.convolve2d(kernel, base_gaus_binom)\n\n kernel = scipy.signal.convolve2d(kernel, kernel.transpose())\n return kernel/kernel.sum()", "def _create_kernel(sm_times, sm_freqs, kernel='hanning'):\n # frequency dependent kernels\n if isinstance(sm_times, (np.ndarray, list, tuple)):\n sm_freqs = 1 # force 1hz smoothing\n kernels = [_create_kernel(\n sm, sm_freqs, kernel=kernel) for sm in sm_times]\n return kernels\n\n # frequency independent kernels\n if kernel == 'square':\n return np.full((sm_freqs, sm_times), 1. / (sm_times * sm_freqs))\n elif kernel == 'hanning':\n hann_t, hann_f = np.hanning(sm_times), np.hanning(sm_freqs)\n hann = hann_f.reshape(-1, 1) * hann_t.reshape(1, -1)\n return hann / np.sum(hann)\n else:\n raise ValueError(f\"No kernel {kernel}\")", "def rho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= concave_piece(x,k_ind,m_ind) \n\n return f", "def create_filter_bank():\r\n kernels = []\r\n for theta in range(0, 2):\r\n theta = theta / 2. * np.pi\r\n for sigma in (3, 5):\r\n for frequency in (0.10, 0.25):\r\n kernel = np.real(gabor_kernel(frequency, theta=theta,\r\n sigma_x=sigma, sigma_y=sigma))\r\n kernels.append(kernel)\r\n print(len(kernels))\r\n return kernels", "def gkern(kernlen=21, nsig=3):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/np.max(kernel_raw)#.sum()\n return kernel", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def cs4243_filter_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n filtered_image[i, j] = np.multiply(kernel, recep_area).sum()\n ###\n\n return filtered_image", "def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel", "def phase_method(input_sig, output_by_phase, N, **kwargs):\n\n def required_nb_data_func(list_nb_coeff):\n \"\"\"Compute the minimum number of data required.\"\"\"\n return max(list_nb_coeff)\n\n def core_func(phi_by_term, out_by_phase, solver, sizes=[], cast_mode=''):\n \"\"\"Core computation of the identification.\"\"\"\n\n L = out_by_phase.shape[1]\n L = 2*L if cast_mode == 'real-imag' else L\n kernels = dict()\n _phi_by_term = _cast_complex2real(phi_by_term, cast_mode)\n\n for is_odd in [False, True]:\n curr_phases = range(2-is_odd, N+1, 2)\n curr_y = np.concatenate([_complex2real(out_by_phase[p],\n cast_mode=cast_mode)\n for p in curr_phases], axis=0)\n\n curr_phi = np.bmat(\n [[_phi_by_term.get((p+2*k, k), np.zeros((L, sizes[p+2*k-1]))) *\n binomial(p+2*k, k) for k in range(1-(p+1)//2, 1+(N-p)//2)]\n for p in curr_phases])\n\n if not is_odd:\n curr_y = np.concatenate((np.real(out_by_phase[0]), curr_y),\n axis=0)\n n_even = range(2, N+1, 2)\n temp = np.concatenate([_phi_by_term[n, n//2] *\n binomial(n, n//2) for n in n_even],\n axis=1)\n curr_phi = np.concatenate((np.real(temp), curr_phi), axis=0)\n\n curr_f = _solver(curr_phi, curr_y, solver)\n\n index = 0\n for n in range(1 if is_odd else 2, N+1, 2):\n nb_term = sizes[n-1]\n kernels[n] = curr_f[index:index+nb_term]\n index += nb_term\n\n return kernels\n\n return _identification(input_sig, output_by_phase, N,\n required_nb_data_func, core_func, 'term', **kwargs)", "def gkern(kernlen=21, nsig=3):\n interval = (2 * nsig + 1.) / (kernlen)\n x = np.linspace(-nsig - interval / 2., nsig + interval / 2., kernlen + 1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw / kernel_raw.sum()\n return kernel;", "def get_kernel(kernel_size, blur=1 / 20, halo=.001):\n\n # generate x and y grids\n x, y = np.mgrid[0:kernel_size * 2 + 1, 0:kernel_size * 2 + 1]\n\n center = kernel_size + 1 # center pixel\n r = np.sqrt((x - center) ** 2 + (y - center) ** 2) # distance from center\n\n # now compute the kernel. This function is a bit arbitrary.\n # adjust this to get the effect you want.\n kernel = np.exp(-r / kernel_size / blur) + (1 - r / r[center, 0]).clip(0) * halo\n return kernel", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def even_pODF(omega, qpoints, c, N):\n\n n,m = qpoints.shape\n\n sum = 0.0\n for i in range(n):\n mu = np.dot(omega,qpoints[i,:])\n mu = np.clip(mu, -1.0, 1.0)\n\n sum += c[i]*even_kernel(mu, N)\n \n\n return sum", "def _kernel(self, x, y, t):\n return (self.C / (2 * np.pi * self.sigma_x * self.sigma_y * t)) * \\\n tf.exp(- self.beta * t - (tf.square(x)/tf.square(self.sigma_x) + tf.square(y)/tf.square(self.sigma_y)) / (2*t))", "def kernel(r, h, deriv):\n return {\n '0': h**-1 / np.sqrt(np.pi) * np.exp(-r**2/h**2),\n '1': h**-3 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-2*r),\n '2': h**-5 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * ( 4*r**2 - 2*h**2),\n '3': h**-7 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-8*r**3 + 12*h**2*r)\n }[deriv]", "def gaussian_kernel(N, mu, sigma):\n # Asserting N is odd and sigma is number\n assert assert_odd(N)\n \n # Create the normal here (with ID covariance) \n normal = multivariate_normal(mean=mu, cov=sigma*np.identity(2))\n \n # Create the position matries (x_1,x_2 in 2D)\n X_1 = np.ones((N,N))*np.arange(N) # x_1 pos\n X_2 = X_1.T #x_2 pos, just transpose the above\n \n # Shift the positions so center is at middle\n s = np.floor(N/2) #shift value\n X_1, X_2 = X_1-s, X_2-s # shifted matrices\n \n # Create holder matrix\n X = np.zeros((N,N)) # Below we have the iterator \n for (i,j) in [(i,j) for i in range(N) for j in range(N)]:\n X[i,j] = normal.pdf([X_1[i,j], X_2[i,j]]) # Normal values\n \n # Finally just return the normalized kernel\n return X*(1/np.sum(X))", "def eg2(r_train, r_test, N_train=1000, N_test=500):\n\n def eg2_kernel(r, N):\n X1 = np.random.randn(N)\n X2_1 = np.exp(X1) + 0.1 * np.random.randn(N) # add noise or not?\n X2_2 = np.random.randn(N)\n X2_prob = np.random.uniform(0, 1, N)\n X2 = np.where(X2_prob < r, X2_1, X2_2)\n X3 = np.random.randn(N)\n X4 = np.random.randn(N)\n Y = 210 + 27.4 * X1 + 13.7 * X3 + 13.7 * X4 + np.random.randn(N)\n\n data = {}\n data['X1'] = X1\n data['X2'] = X2\n data['X3'] = X3\n data['X4'] = X4\n data['Y'] = Y\n return data\n\n data_train = eg2_kernel(r_train, N_train)\n data_test = eg2_kernel(r_test, N_test)\n\n return data_train, data_test", "def Ising_1D(N,h):\n sigma_x = np.array([[0,1],[1,0]])\n sigma_z = np.kron(np.array([[1,0],[0,-1]]), np.array([[1,0],[0,-1]]))\n H = np.zeros((2**N,2**N))\n\n # self-interaction\n for i in range(1,N+1): #va da 1 a N\n if (i==1):\n H += np.kron(sigma_x, np.identity(2**(N-1)))\n elif(i!=1 and i!=N):\n H += np.kron(np.identity(2**(i-1)), np.kron(sigma_x, np.identity(2**(N-i))))\n elif(i==N):\n H += np.kron(np.identity(2**(N-1)),sigma_x)\n\n # interaction\n H_tmp = np.zeros((2**N,2**N))\n for i in range(1, N):\n if(i==1):\n H_tmp += np.kron(sigma_z, np.identity(2**(N-2)))\n elif(i!=1 and i!=N-1):\n tmp=np.kron(sigma_z,np.identity(2**(N-i-1))) #dx\n H_tmp += np.kron(np.identity(2**(i-1)), tmp) #sx\n elif(i==N-1):\n H_tmp += np.kron(np.identity(2**(N-2)), sigma_z)\n\n H = -(h*H + H_tmp)\n\n return H", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def smooth_kernel_fp(z, z_star, h, gamma=2):\n\n # compute probabilities\n p = np.exp(-(np.abs(z-z_star)/h)**gamma)\n # rescale\n p = p / np.sum(p)\n return np.squeeze(p)", "def shift_kernel(kernel, shape, centre):\n h, w = kernel.shape\n assert(h % 2 == 1)\n assert(w % 2 == 1)\n half_h = np.floor(h/2)\n half_w = np.floor(w/2)\n \n result = np.zeros((shape[0]+2*half_h, shape[1]+2*half_w)) #zero pad to simplify edge handling \n\n ind_h = centre[0] + np.arange(0, 2*half_h+1, dtype='int') \n ind_w = centre[1] + np.arange(0, 2*half_w+1, dtype='int')\n result[ind_h[:,np.newaxis], ind_w] = kernel\n result = result[half_h:-half_h,half_w:-half_w]\n return result", "def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten sรฅ stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, sรฅ feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data", "def kernel_kmer(X, Y, k=3):\n x_kmer, y_kmer = kmer(X, Y, k)\n\n sim = 0\n for a in x_kmer:\n for b in y_kmer:\n sim += GXY(a, b)\n\n return sim", "def nsphere_volume(n, r):\n return math.pi ** (n / 2) * (r ** n) / gamma(n / 2 + 1)", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def euclid_ccl(Omega_M):\n \n # Parameters from https://arxiv.org/pdf/1903.01473.pdf\n Omega_b_fraction = 0.15653724 # fraction of Omega_M\n \n sigma8 = 0.811\n Omega_b = Omega_b_fraction * Omega_M\n Omega_c = (1 - Omega_b_fraction) * Omega_M \n h = 0.674\n ns = 0.965\n w0 = -1.03\n\n cosmo_fid = ccl.Cosmology(Omega_c=Omega_c, Omega_b=Omega_b, h=0.674\n , sigma8=sigma8, n_s=ns, w0=w0)#, transfer_function='emulator', matter_power_spectrum='emu')\n\n dNdzs = np.zeros((nbins, z.size))\n shears = []\n \n for i in range(nbins):\n # edges of 1 equal width redshift bins, between 0 and 2\n zmin, zmax = i*(2./nbins), (i+1)*(2./nbins)\n # generate dNdz per bin\n dNdzs[i,:] = ccl.dNdz_tomog(z=z, zmin=zmin, zmax=zmax, pz_func=pz\n , dNdz_func = dNdz_true)\n # calculate the shear per bin\n gal_shapes = ccl.WeakLensingTracer(cosmo_fid, dndz=(z, dNdzs[i,:]))\n shears.append(gal_shapes)\n \n # calculate nbin*(nbin+1)/2 = 1 spectra from the shears\n Cls = []\n for i in range(nbins):\n for j in range(0,i+1):\n Cls.append(ccl.angular_cl(cosmo_fid, shears[i], shears[j], ells))\n \n return np.array(Cls), dNdzs", "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def perfect_sweep(N):\n\n m = np.arange(0, np.ceil(N / 2 + 1))\n P_half = np.exp(-1j * 2 * np.pi / N * m ** 2)\n return np.real(np.fft.irfft(P_half, n=N))", "def kernal_mus(n_kernels):\n l_mu = [1]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n print(l_mu)\n return l_mu", "def test_10_kernels(self):\n ra0, dec0 = CRVAL\n res = 0.01 * DEG\n\n # Test zenithal -- (ra0, dec0) is the reference point.\n for proj in ['TAN', 'ZEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # Test cylindrical -- pixell puts the crval[1] on the equator\n # and dec0 is used for the conformal latitude.\n for proj in ['CAR', 'CEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertNotAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # This is going to break.\n fp = FP(xi =[0., -0.01*DEG],\n eta=[0., -0.01*DEG])\n sight = get_sightline()\n tod = core.AxisManager(core.LabelAxis('dets', ['a']))\n fp = coords.get_footprint(tod, wcs_kernel=wcsk, focal_plane=fp, sight=sight)", "def N_out(K,P,S,N_in):\n return (int((N_in+2*P-K)/S)+1)", "def get_kernel(ktype):\n \n kernel = np.zeros(kernsize + 1)\n this_kern = partial(inp_kernel, ktype=ktype)\n\n bins = np.arange(0, 1., 1./kernsize)\n bins = np.append(bins, 1.)\n\n for ii in range(kernsize):\n\n y, yerr = integrate.quad(integral_func(this_kern, bins[ii]), 0, np.sqrt(1.-bins[ii]**2))\n kernel[ii] = y * 2.\n \n return kernel", "def kernel(self):\n V = self.matrix().kernel()\n D = self.domain()\n if not D.is_ambient():\n # Transform V to ambient space\n # This is a matrix multiply: we take the linear combinations of the basis for\n # D given by the elements of the basis for V.\n B = V.basis_matrix() * D.basis_matrix()\n V = B.row_module(D.base_ring())\n return self.domain().submodule(V, check=False)", "def convolve_spikes_2d(spikes_a,spikes_b,kernel_a,kernel_b):\n output = np.zeros((spikes_a.shape[0]+kernel_a.shape[0],kernel_a.shape[1]*kernel_b.shape[1]))\n for k_i in range(kernel_a.shape[1]):\n for k_j in range(kernel_b.shape[1]):\n mat = np.zeros((kernel_a.shape[0],kernel_b.shape[0]))\n #for l_1 in range(kernel_a.shape[0]):\n # for l_2 in range(kernel_b.shape[0]):\n # mat[l_1,l_2] = kernel_a[l_1,k_i] * kernel_b[l_2,k_j]\n for i in np.where(spikes_a)[0]:\n for j in np.where(spikes_b[(i+1):(i+1+kernel_b.shape[0])])[0]:\n if j < kernel_b.shape[0]:\n output[(i+j):(i+j+kernel_a.shape[0]),k_i * kernel_b.shape[1] + k_j] = output[(i+j):(i+j+kernel_a.shape[0]),k_i * kernel_b.shape[1] + k_j] + kernel_a[:,k_i] * kernel_b[j,k_j] #mat[:,j-i]\n return output[:spikes_a.shape[0],:]", "def _kernel(self, point, observation, bandwidth):\n denom = bandwidth * ((2*math.pi)**.5) \n num = math.exp(-0.5 * ((point-observation)/bandwidth)**2)\n return num/denom", "def process_kernels(kernels):\n kernels = np.where(kernels == 32767, np.nan, kernels/1000.)\n return kernels", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def even_pODF_opt(angles,*args): # qpoints, c, N):\n\n qpoints = args[0]\n c = args[1]\n N = args[2]\n\n n,m = qpoints.shape\n\n theta,phi = angles[0], angles[1]\n omega = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)])\n\n sum = 0.0\n for i in range(n):\n mu = np.dot(omega,qpoints[i,:])\n mu = np.clip(mu, -1.0, 1.0)\n\n sum += c[i]*even_kernel(mu, N)\n \n\n return -(N+1)**2 * sum", "def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu", "def calculate_S(func, a, b, N):\n # Trapezoid width\n h = (b - a)/N\n\n # Every even slice\n new_part = func(a) + func(b)\n for i in range(2, N, 2):\n new_part += 2 * func(a + i*h) \n \n return 1/3. * new_part", "def __set_kernels(self):\n self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))" ]
[ "0.6827804", "0.67321193", "0.67197824", "0.65318835", "0.6531037", "0.6433048", "0.62198204", "0.6084965", "0.60187733", "0.60123444", "0.59886587", "0.59792995", "0.5959411", "0.59217125", "0.5896507", "0.58899844", "0.5817358", "0.5794526", "0.5773375", "0.5705847", "0.5680283", "0.5675113", "0.5667541", "0.56564933", "0.56430817", "0.5638678", "0.56132454", "0.5592674", "0.55905527", "0.5588033", "0.557899", "0.55324495", "0.5528393", "0.54906994", "0.548902", "0.54788226", "0.5471408", "0.5468485", "0.54673314", "0.54666907", "0.5453019", "0.54477334", "0.54321796", "0.5421711", "0.5418185", "0.54151195", "0.54063004", "0.5396226", "0.53911376", "0.5388393", "0.537939", "0.5373231", "0.5363338", "0.53618705", "0.535909", "0.5357719", "0.53441465", "0.53415436", "0.5326697", "0.5312546", "0.53099126", "0.53082824", "0.5306456", "0.53027517", "0.53021896", "0.529884", "0.52980405", "0.52974826", "0.5293087", "0.5291252", "0.5290944", "0.5288471", "0.528376", "0.5275613", "0.527022", "0.5268308", "0.5261392", "0.5259535", "0.5249261", "0.5233151", "0.5230952", "0.5219942", "0.52189755", "0.5215008", "0.5213539", "0.5208751", "0.5201092", "0.5193708", "0.5191088", "0.5190211", "0.51843834", "0.5183996", "0.51776654", "0.5171981", "0.516973", "0.5165208", "0.5165162", "0.516237", "0.5158324", "0.51577294" ]
0.7083843
0
Derivative of reproducing kernel on even subspaces of maximum degree N.
def even_kernel_der(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) #Derivatives of Legendre polynomials DlegPolys = legp_der(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*DlegPolys[0::2] return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)", "def DDG(self, n, e, r, f):\n pre = (-e[:, None] + np.divide.outer((n - 1), r))**2\n pre -= np.divide.outer((n - 1), r**2)\n return pre*f", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def DG(self, n, e, r, f):\n\n pre = -e[:, None] + np.divide.outer((n - 1), r)\n return pre*f", "def nth_derivative(f, x, n):\n h = 10e-2\n out_h = 1/(h**n)\n out = 0\n for k in range(0, n+1):\n out += (-1)**(k+n)*choose(n,k)*f(x +k*h)\n return out_h*out", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def ddh_per_dim(f, dim):\n diff_ops = [\n lambda f: self._kernel_op.apply_kernel_op_x(f, 'kddx'),\n lambda f: self._kernel_op.apply_kernel_op_y(f, 'kddy'),\n lambda f: self._kernel_op.apply_kernel_op_z(f, 'kddz', 'kddzsh'),\n ]\n return tf.nest.map_structure(lambda diff: diff / grid_spacing[dim]**2,\n diff_ops[dim](f))", "def sub_kernel(kernel, dim1, dim2):\n\n sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]\n return sub_kernel", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def d(i):\n if i==0:\n return 0\n elif (i%2)==0:\n return g(i-1) % N\n else:\n return g(i) % N", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def perfect_sweep(N):\n\n m = np.arange(0, np.ceil(N / 2 + 1))\n P_half = np.exp(-1j * 2 * np.pi / N * m ** 2)\n return np.real(np.fft.irfft(P_half, n=N))", "def evolve_system(self, x, n, k, gamma):\n temp = tf.pow(k, n)/(tf.pow(x, n)+tf.pow(k,n))\n # dxdt = tf.manip.roll(temp, shift = -1, axis = 1) - gamma*x # v1.6+\n dxdt = tf.concat([ tf.reshape(temp[:, -1], [-1, 1]),\n temp[:,:-1]], axis=1) - gamma*x # v1.5\n dxdt = tf.convert_to_tensor(dxdt, dtype = tf.float32, name = \"dxdt\")\n return dxdt", "def _eunn_loop(state, capacity, diag_vec_list, off_vec_list, diag, fft):\n i = 0\n def layer_tunable(x, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n\n diag = tf.multiply(x, diag_vec)\n off = tf.multiply(x, off_vec)\n\n def even_input(off, size):\n\n def even_s(off, size):\n off = tf.reshape(off, [-1, size//2, 2])\n off = tf.reshape(tf.reverse(off, [2]), [-1, size])\n return off\n\n def odd_s(off, size):\n off, helper = tf.split(off, [size-1, 1], 1)\n size -= 1\n off = even_s(off, size)\n off = tf.concat([off, helper], 1)\n return off\n\n off = tf.cond(tf.equal(tf.mod(size, 2), 0), lambda: even_s(off, size), lambda: odd_s(off, size))\n return off\n\n def odd_input(off, size):\n helper, off = tf.split(off, [1, size-1], 1)\n size -= 1\n off = even_input(off, size)\n off = tf.concat([helper, off], 1)\n return off\n\n size = int(off.get_shape()[1])\n off = tf.cond(tf.equal(tf.mod(i, 2), 0), lambda: even_input(off, size), lambda: odd_input(off, size))\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n def layer_fft(state, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n diag = tf.multiply(state, diag_vec)\n off = tf.multiply(state, off_vec)\n\n hidden_size = int(off.get_shape()[1])\n # size = 2**i\n dist = capacity - i\n normal_size = (hidden_size // (2**dist)) * (2**(dist-1))\n normal_size *= 2\n extra_size = tf.maximum(0, (hidden_size % (2**dist)) - (2**(dist-1)))\n hidden_size -= normal_size\n\n def modify(off_normal, dist, normal_size):\n off_normal = tf.reshape(tf.reverse(tf.reshape(off_normal, [-1, normal_size//(2**dist), 2, (2**(dist-1))]), [2]), [-1, normal_size])\n return off_normal\n\n def do_nothing(off_normal):\n return off_normal\n\n off_normal, off_extra = tf.split(off, [normal_size, hidden_size], 1)\n off_normal = tf.cond(tf.equal(normal_size, 0), lambda: do_nothing(off_normal), lambda: modify(off_normal, dist, normal_size))\n helper1, helper2 = tf.split(off_extra, [hidden_size-extra_size, extra_size], 1)\n off_extra = tf.concat([helper2, helper1], 1)\n off = tf.concat([off_normal, off_extra], 1)\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n if fft:\n layer_function = layer_fft\n else:\n layer_function = layer_tunable\n output, _ = tf.while_loop(lambda state, i: tf.less(i, capacity), layer_function, [state, i])\n\n if not diag is None:\n output = tf.multiply(output, diag)\n\n\n return output", "def dpp_sw(kernel_matrix, window_size=3, max_length=14, epsilon=1E-10):\r\n item_size = kernel_matrix.shape[0]\r\n v = np.zeros((max_length, max_length))\r\n cis = np.zeros((max_length, item_size))\r\n di2s = np.copy(np.diag(kernel_matrix))\r\n selected_items = list()\r\n selected_item = np.argmax(di2s)\r\n selected_items.append(selected_item)\r\n window_left_index = 0\r\n while len(selected_items) < max_length:\r\n k = len(selected_items) - 1\r\n ci_optimal = cis[window_left_index:k, selected_item]\r\n di_optimal = math.sqrt(di2s[selected_item])\r\n v[k, window_left_index:k] = ci_optimal\r\n v[k, k] = di_optimal\r\n elements = kernel_matrix[selected_item, :]\r\n eis = (elements - np.dot(ci_optimal, cis[window_left_index:k, :])) / di_optimal\r\n cis[k, :] = eis\r\n di2s -= np.square(eis)\r\n if len(selected_items) >= window_size:\r\n window_left_index += 1\r\n for ind in range(window_left_index, k + 1):\r\n t = math.sqrt(v[ind, ind] ** 2 + v[ind, window_left_index - 1] ** 2)\r\n c = t / v[ind, ind]\r\n s = v[ind, window_left_index - 1] / v[ind, ind]\r\n v[ind, ind] = t\r\n v[ind + 1:k + 1, ind] += s * v[ind + 1:k + 1, window_left_index - 1]\r\n v[ind + 1:k + 1, ind] /= c\r\n v[ind + 1:k + 1, window_left_index - 1] *= c\r\n v[ind + 1:k + 1, window_left_index - 1] -= s * v[ind + 1:k + 1, ind]\r\n cis[ind, :] += s * cis[window_left_index - 1, :]\r\n cis[ind, :] /= c\r\n cis[window_left_index - 1, :] *= c\r\n cis[window_left_index - 1, :] -= s * cis[ind, :]\r\n di2s += np.square(cis[window_left_index - 1, :])\r\n di2s[selected_item] = -np.inf\r\n selected_item = np.argmax(di2s)\r\n if di2s[selected_item] < epsilon:\r\n break\r\n selected_items.append(selected_item)\r\n return selected_items", "def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def get_derivative(self, model, params, n):\n params1 = np.array(params)\n params2 = np.array(params)\n\n params1[n] += self.eps\n params2[n] -= self.eps\n\n res1 = model.run(params1)\n res2 = model.run(params2)\n\n d = (res1 - res2) / (2 * self.eps)\n\n return d.ravel()", "def grad_n(f: FlowFieldVal, dim: int, h: float) -> FlowFieldVal:\n if dim == 0:\n df = kernel_op.apply_kernel_op_x(f, 'kDx')\n elif dim == 1:\n df = kernel_op.apply_kernel_op_y(f, 'kDy')\n elif dim == 2:\n df = kernel_op.apply_kernel_op_z(f, 'kDz', 'kDzsh')\n else:\n raise ValueError('Unsupport dimension: {}'.format(dim))\n\n return [df_i / (2.0 * h) for df_i in df]", "def evolve(self, x, n, k, gamma):\n dxdt = tf.pow(x, n)/(tf.pow(x, n)+tf.pow(k,n)) - gamma*x\n return dxdt", "def softmax_derivative(x):\n der = derivative(softmax,x,dx=1e-9)\n return der", "def backward(g, N, K):\n\tb = np.zeros((N,K))\n\tfor t in reversed(xrange(0,N-1)):\n\t\tby = b[t+1,:]\n\t\tfor yp in xrange(K):\n\t\t\tb[t,yp] = misc.logsumexp(by + g[t,yp,:])\n\treturn b", "def ogfft2(x, N):\n x_p = brc(x)\n PI = np.pi\n for ii in np.arange(1,int(np.log2(N)) + 1):\n M = int(2**ii)\n w_M = np.exp(1j*((2*PI)/M))\n for kk in np.arange(0,N,M):\n w = 1\n m = int(M/2)\n for jj in np.arange(m):\n t = w*x_p[kk + jj + m]\n u = x_p[kk + jj]\n x_p[kk + jj] = u + t\n x_p[kk + jj + m] = u - t\n w = w*w_M\n return x_p", "def F_std(d, N):\n # memoize specht() and weyl() results (but only for current call)\n specht_mem, weyl_mem = memoize(specht), memoize(weyl)\n\n return sum(\n d ** (-N - 2)\n * sum(sqrt(specht_mem(mu) * weyl_mem(d, mu)) for mu in box_added(alpha, d)) ** 2\n for alpha in Partitions(n=N - 1, max_length=d)\n )", "def eg4(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg4_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n noise_feature = np.random.randn(n, p_noise)\n stable_feature_dependent = np.zeros([n, p_stable])\n stable_feature_independent = np.random.randn(n, p_stable)\n for i in range(p_stable):\n stable_feature_dependent[:, i] = noise_feature[:, i % p_noise] + noise_feature[:,\n (i + 1) % p_noise] + 2 * np.random.randn(\n n) # still need noise\n stable_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n stable_depend_label = np.concatenate([stable_depend_label] * p_stable, axis=1)\n stable_feature = np.where(stable_depend_label < depend_ratio, stable_feature_dependent,\n stable_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n Y = np.matmul(stable_feature, b) + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg4'\n return data\n\n data_train = eg4_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg4_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def derivative_ex(dirichl_space, neumann_space, ep_in, ep_ex, kappa, operator_assembler):\n phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)\n ep = ep_ex/ep_in\n\n dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n B = 1/ep * dF - dP\n\n F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n A = F - P\n\n ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)\n ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n D = 1/ep * (ddP - ddF)\n\n dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)\n dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n C = dF0 - 1.0/ep*dP0\n\n A_sys = bempp.api.BlockedOperator(2, 2)\n A_sys[0, 0] = (0.5*(1.0 + (1.0/ep))*phi_id) + B\n A_sys[0, 1] = -A\n A_sys[1, 0] = D\n A_sys[1, 1] = (0.5*(1.0 + (1.0/ep))*dph_id) - C\n\n return A_sys", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def DisLayerSN_d(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.Dropout2d(),\n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out", "def truncated_svd(A,k=None):", "def get_derivative(self,var,g=None):\n if (g==None):g=self.g\n A=np.zeros([self.n+1,self.n])\n B=np.zeros([self.n+1])\n for i in range(self.n):\n B[i]=self.gamma*2.*g*self.N*(self.n-self.N)+np.sum([self.XXZ.Z(k,i)*(var[k]-var[i]) for k in range(self.n) if k!=i])\n A[self.n][i]=1\n for j in range(self.n):\n if(i==j): A[i][j]=2.*var[i]+2.+g*np.sum([self.XXZ.Z(k,i) for k in range(self.n) if k!=i])\n else: A[i][j]=-g*self.XXZ.Z(j,i)\n Ainv=np.linalg.pinv(A)\n der=np.dot(Ainv,B)\n return der", "def keepsize(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):\n\n def residual(inputs, n_filters):\n x = ReflectionPadding2D()(inputs)\n x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n x = BatchNormalization()(x)\n x = Activation(activation)(x)\n x = ReflectionPadding2D()(x)\n x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n x = BatchNormalization()(x)\n x = add([x, inputs])\n\n return x\n\n inputs = Input(shape=(nx, ny, 1))\n x = GaussianNoise(noise)(inputs)\n\n x = ReflectionPadding2D()(x)\n x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n x0 = Activation(activation)(x)\n\n x = residual(x0, n_filters)\n\n for i in range(depth-1):\n x = residual(x, n_filters)\n\n x = ReflectionPadding2D()(x)\n x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n x = BatchNormalization()(x)\n x = add([x, x0])\n\n# Upsampling for superresolution\n x = UpSampling2D()(x)\n x = ReflectionPadding2D()(x)\n x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n x = Activation(activation)(x)\n\n final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)\n\n return Model(inputs=inputs, outputs=final)", "def softmax_derivative(Z):\n\treturn None", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def ddx(a):\n\t# avoid corner effects\n\tthick = 2\n\ta = np.concatenate((a[(thick-1)::-1],a,a[:-(thick+1):-1]))\n\tmode=\"same\"\n\tmode = \"valid\"\n\t\n\tK = np.array([-0.5,0,0.5])\n\tda = scig.convolve(a,K,mode=mode)\n\treturn da", "def den_evolve(self, delt, txp, src):\n self.ne += (-txp.dfluxe + src.se)*delt\n self.ni += (-txp.dfluxi + src.si)*delt", "def gauss_derivatives(im, n, ny=None):\n\n gx, gy = gauss_derivative_kernels(n, size_y=ny)\n\n imx = signal.convolve(im, gx, mode='same')\n imy = signal.convolve(im, gy, mode='same')\n\n return imx, imy", "def doubling_map_fourier(k_max: int) -> Callable[[V], V]:\n k = dual_group(k_max)\n\n def g(y: V) -> V:\n z = np.zeros_like(y)\n z[k % 2 == 0] = y[np.abs(k) <= k_max // 2]\n return z\n return g", "def get_DOGs(inner_sigma, x, shape):\n DOG = make_DOG(inner_sigma, x)\n result = np.zeros((shape[0]*shape[1], x.size**2))\n for i in range(shape[0]): \n for j in range(shape[1]): \n k = shift_kernel(DOG, shape, (i,j))\n result[i+shape[0]*j,:] = k.flatten()\n \n return result", "def peirce_dev(N: int, n: int = 1, m: int = 1) -> float:\n # Assign floats to input variables:\n N = float(N)\n n = float(n)\n m = float(m)\n\n # Check number of observations:\n if N > 1:\n # Calculate Q (Nth root of Gould's equation B):\n Q = (n ** (n / N) * (N - n) ** ((N - n) / N)) / N\n #\n # Initialize R values (as floats)\n r_new = 1.0\n r_old = 0.0 # <- Necessary to prompt while loop\n #\n # Start iteration to converge on R:\n while abs(r_new - r_old) > (N * 2.0e-16):\n # Calculate Lamda\n # (1/(N-n)th root of Gould's equation A'):\n ldiv = r_new ** n\n if ldiv == 0:\n ldiv = 1.0e-6\n Lamda = ((Q ** N) / (ldiv)) ** (1.0 / (N - n))\n # Calculate x-squared (Gould's equation C):\n x2 = 1.0 + (N - m - n) / n * (1.0 - Lamda ** 2.0)\n # If x2 goes negative, return 0:\n if x2 < 0:\n x2 = 0.0\n r_old = r_new\n else:\n # Use x-squared to update R (Gould's equation D):\n r_old = r_new\n r_new = np.exp((x2 - 1) / 2.0) * scipy.special.erfc(\n np.sqrt(x2) / np.sqrt(2.0)\n )\n else:\n x2 = 0.0\n return x2", "def get_deprojecter(self,layer,n):\n K = layer.kernel_size\n P = layer.padding\n S = layer.stride\n if (isinstance(layer,nn.MaxPool2d)):\n return (lambda slices:(\n Projector.get_slic(slices[0],S,P,K,n),\n Projector.get_slic(slices[1],S,P,K,n)\n ),\n Projector.N_out(K,P,S,n)) #TODO: dont assume square image\n else:\n return (lambda slices:(\n Projector.get_slic(slices[0],S[0],P[0],K[0],n),\n Projector.get_slic(slices[1],S[1],P[1],K[1],n)\n ),\n Projector.N_out(K[0],P[0],S[0],n)) #TODO: dont assume square image", "def even_pODF(omega, qpoints, c, N):\n\n n,m = qpoints.shape\n\n sum = 0.0\n for i in range(n):\n mu = np.dot(omega,qpoints[i,:])\n mu = np.clip(mu, -1.0, 1.0)\n\n sum += c[i]*even_kernel(mu, N)\n \n\n return sum", "def kernel_diff2_tr(self, x, kernel_res):\n x = np.atleast_2d(x)\n\n n = x.shape[0]\n d = x.shape[1]\n\n kxx, k_xx, k_x_x = kernel_res\n\n assert_shape(kxx, (n, n))\n assert_shape(k_xx, (n, n, d))\n assert_shape(k_x_x, (n, n, d))\n\n k_xx_tr = np.sum(k_xx, axis=-1)\n k_x_x_tr = np.sum(k_x_x, axis=-1)\n\n res = kxx*d - k_xx_tr - k_xx_tr.T + k_x_x_tr # (n, n)\n\n return res", "def magma_dnrm2(n, dx, incx, queue):\n\n return _libmagma.magma_dnrm2(n, int(dx), incx, queue)", "def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200,\n xp=None):\n\n ndim = image.ndim\n p = xp.zeros((image.ndim, ) + image.shape, dtype=image.dtype)\n g = xp.zeros_like(p)\n d = xp.zeros_like(image)\n i = 0\n slices_g = [slice(None), ] * (ndim + 1)\n slices_d = [slice(None), ] * ndim\n slices_p = [slice(None), ] * (ndim + 1)\n while i < n_iter_max:\n if i > 0:\n # d will be the (negative) divergence of p\n d = -p.sum(0)\n for ax in range(ndim):\n slices_d[ax] = slice(1, None)\n slices_p[ax+1] = slice(0, -1)\n slices_p[0] = ax\n d[tuple(slices_d)] += p[tuple(slices_p)]\n slices_d[ax] = slice(None)\n slices_p[ax+1] = slice(None)\n out = image + d\n E = (d * d).sum()\n else:\n out = image\n E = 0.\n\n # g stores the gradients of out along each axis\n # e.g. g[0] is the first order finite difference along axis 0\n for ax in range(ndim):\n slices_g[ax+1] = slice(0, -1)\n slices_g[0] = ax\n if xp == np:\n g[tuple(slices_g)] = xp.diff(out, axis=ax)\n else:\n g[tuple(slices_g)] = diff(out, axis=ax)\n slices_g[ax+1] = slice(None)\n\n norm = (g * g).sum(axis=0, keepdims=True)\n xp.sqrt(norm, out=norm)\n E += weight * norm.sum()\n tau = 1. / (2.*ndim)\n norm *= tau / weight\n norm += 1.\n p -= tau * g\n p /= norm\n E /= float(image.size)\n if i == 0:\n E_init = E\n E_previous = E\n else:\n if abs(E_previous - E) < eps * E_init:\n break\n else:\n E_previous = E\n i += 1\n return out", "def reduced_grad(self, n):\n\n pol = n.shape[1]\n\n if pol == 1:\n s = self.sigma(n) ** (0.5) / (2 * (3*np.pi**2)**(1/3) @ n** (4/3) )\n\n if pol == 2:\n s = np.zeros_like(n)\n s[:, 0] = self.sigma(2 * n[:,0])**(0.5) / (2 * (3*np.pi**2)**(1/3) @ (2 * n[:,0])**(4/3))\n s[:, 0] = self.sigma(2 * n[:,1])**(0.5) / (2 * (3*np.pi**2)**(1/3) @ (2 * n[:,1])**(4/3))\n\n #WARNING: S might be zero, in that case, that particular cell should be turned to zero#\n\n return s", "def f(i):\n return e(2**N-1-i) ^ 2**(N-1)", "def erosion2d(value, kernel, strides, rates, padding, name=None):\n with ops.name_scope(name, \"erosion2d\", [value, kernel]) as name:\n # Reduce erosion to dilation by duality.\n return math_ops.negative(\n gen_nn_ops.dilation2d(\n input=math_ops.negative(value),\n filter=array_ops.reverse_v2(kernel, [0, 1]),\n strides=strides,\n rates=rates,\n padding=padding,\n name=name))", "def euler_sde(self, x, rv_n):\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n dx = dt * self.evolve(x, n, k, gamma)\n x = x + dx + tf.sqrt(dt)*x*rv_n\n return tf.cast(x, tf.float32)", "def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])", "def gaussian_1yDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dy_kernel = gaussianFirstDerivative(Y, 0, sigma) * gaussianNormalised(X, 0, sigma)\n gSum = np.sum(np.abs(g_dy_kernel))\n \n if gSum == 0:\n print \"Warning dy_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dy_kernel)\n else:\n return (g_dy_kernel / gSum)", "def iterate4(x, omega=1, N=Mynum):\n omega = 1\n n = len(x)\n h = 1.0 / (N - 1.)\n A = redblackA(N)\n b = redblackb(N)\n \n m = (n-1)/2\n l = (n-1)\n \n for i in range(0,n):\n xsum=0\n for j in range(0,n):\n xsum = xsum + A[i,j]*x[j] \n xsum = xsum - A[i,i]*x[i] \n x[i] = omega * (b[i] - xsum) / A[i,i] + (1-omega)*x[i]\n \n return x", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def tf_deconv2d_infer(node):\n output_shape = np.array(node.in_node(0).value)\n kernel_shape = node.in_node(1).shape\n if output_shape is None or kernel_shape is None or node.spatial_dims is None or node.stride is None:\n return\n spatial_dims = node.spatial_dims\n output_spatial = np.array(output_shape[spatial_dims])\n stride_spatial = np.array(node.stride[spatial_dims])\n kernel_spatial = np.array(kernel_shape[0:len(spatial_dims)]) # kernel spatial dims go first\n node.pad_spatial_shape, input_spatial_for_check = tf_window_op_pad_infer(\n output_spatial, kernel_spatial, stride_spatial, node.auto_pad)\n\n assert all(input_spatial_for_check == node.in_node(2).shape[spatial_dims])\n\n pad = np.zeros((len(output_shape), 2), dtype=np.int64)\n pad[spatial_dims] = node.pad_spatial_shape\n node.pad = pad\n\n node.output_shape = output_shape\n node.out_node().shape = output_shape\n\n mark_input_bins(node, ['weights'], 1)\n assign_dims_to_weights(node.in_node(1), [0, 1], [3], [2], 4)\n\n # cut shape input at port 0, it is already consumed\n node.graph.remove_edge(node.in_node(0).id, node.id)\n\n # reconnect input tensor from port 2 to port 0\n node.in_edge(2)['in'] = 0\n\n # OK, now we are sure this is a supported Deconvolution layer\n node.type = 'Deconvolution'\n node.op = 'Deconv2D'", "def closing2d(value, kernel, stride=1, padding=\"SAME\"):\n strides = [1, stride, stride, 1]\n rates = [1, 1, 1, 1]\n out = tf.nn.dilation2d(value, kernel, strides, rates, padding)\n out = tf.nn.erosion2d(out, kernel, strides, rates, padding)\n return out", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def _partial_trace_dense(p, dims, keep):\n if isinstance(keep, Integral):\n keep = (keep,)\n if isvec(p): # p = psi\n p = np.asarray(p).reshape(dims)\n lose = ind_complement(keep, len(dims))\n p = np.tensordot(p, p.conj(), (lose, lose))\n d = int(p.size**0.5)\n return p.reshape((d, d))\n else:\n p = np.asarray(p).reshape((*dims, *dims))\n total_dims = len(dims)\n lose = ind_complement(keep, total_dims)\n lose2 = tuple(ind + total_dims for ind in lose)\n p = itrace(p, (lose, lose2))\n d = int(p.size**0.5)\n return p.reshape((d, d))", "def ddx(n, dx, f):\n fx = np.zeros(n)\n for j in range(n):\n fx[j] = (f[get_index(j+1, n)]-f[get_index(j-1, n)])/(2*dx)\n return fx", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def Derivate2D(xdata, zdata, k=3, sigma=None, s=None, n=1):\r\n der = np.zeros_like(zdata)\r\n for u, i in enumerate(zdata):\r\n der[u] = Derivate(xdata, i, k=k, sigma=sigma, s=s, n=n)\r\n return der", "def drag_der(t, params):\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n sigma = tf.cast(params['sigma'].get_value(), dtype=tf.float64)\n norm = (tf.sqrt(2 * np.pi * sigma ** 2)\n * tf.math.erf(t_final / (np.sqrt(8) * sigma))\n - t_final * tf.exp(-t_final ** 2 / (8 * sigma ** 2)))\n offset = tf.exp(-t_final ** 2 / (8 * sigma ** 2))\n der = - 2 * (tf.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2)) - offset) \\\n * (np.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2))) \\\n * (t - t_final / 2) / sigma ** 2 / norm\n return der", "def L1Uv2(A, d):\n n = shape(A)[0]\n for k in range(1,n):\n km = array([0, k - d]).max() # First index of r we need to update\n for r in range(km, k - 1):\n A[k, r] /= A[r, r]\n uk = array([k, r + d + 1]).min() # last index not included\n A[k, (r + 1):uk] -= A[r, (r + 1):uk]*A[k, r]\n A[k, k - 1] /= A[k - 1,k - 1] \n for r in range(km, k):\n uk = array([k + 1, r + d + 1]).min() # last index not included\n A[(r + 1):uk, k] -= A[(r + 1):uk, r]*A[r, k]", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def gradient_descent_mse_gp(kernel_fn,\n x_train,\n y_train,\n x_test,\n get,\n diag_reg=0.0,\n compute_cov=False):\n if get is None:\n get = ('nngp', 'ntk')\n if isinstance(get, str):\n # NOTE: This seems like an ugly solution that involves an extra\n # indirection. It might be nice to clean it up.\n return lambda t: gradient_descent_mse_gp(\n kernel_fn,\n x_train,\n y_train,\n x_test,\n diag_reg=diag_reg,\n get=(get,),\n compute_cov=compute_cov)(t)[0]\n\n _, get = canonicalize_get(get)\n\n normalization = y_train.size\n op_fn = _make_inv_expm1_fn(normalization)\n\n eigenspace = {}\n\n kdd, ktd, ktt = _get_matrices(kernel_fn, x_train, x_test, get, compute_cov)\n gp_inference_mat = (_gp_inference_mat_jit_cpu if _is_on_cpu(kdd) else\n _gp_inference_mat_jit)\n\n @_jit_cpu(kdd)\n def predict(t=None):\n \"\"\"`t=None` is equivalent to infinite time and calls `gp_inference`.\"\"\"\n if t is None:\n return gp_inference_mat(kdd, ktd, ktt, y_train, get, diag_reg)\n\n if not eigenspace:\n for g in get:\n k = kdd.nngp if g == 'nngp' else kdd.ntk\n k_dd_plus_reg = _add_diagonal_regularizer(k, diag_reg)\n eigenspace[g] = _eigh(k_dd_plus_reg)\n\n out = {}\n\n if 'nngp' in get:\n evals, evecs = eigenspace['nngp']\n op_evals = -op_fn(evals, t)\n pred_mean = _mean_prediction_einsum(evecs, op_evals, ktd.nngp, y_train)\n if compute_cov:\n op_evals_x2 = -op_fn(evals, 2 * t)\n pred_cov = ktt - np.einsum(\n 'mj,ji,i,ki,lk->ml',\n ktd.nngp,\n evecs,\n op_evals_x2,\n evecs,\n ktd.nngp,\n optimize=True)\n\n out['nngp'] = Gaussian(pred_mean, pred_cov) if compute_cov else pred_mean\n\n if 'ntk' in get:\n evals, evecs = eigenspace['ntk']\n op_evals = -op_fn(evals, t)\n pred_mean = _mean_prediction_einsum(evecs, op_evals, ktd.ntk, y_train)\n if compute_cov:\n # inline the covariance calculation with einsum.\n term_1 = np.einsum(\n 'mi,i,ki,lk->ml', evecs, op_evals, evecs, ktd.ntk, optimize=True)\n pred_cov = np.einsum(\n 'ji,jk,kl->il', term_1, kdd.nngp, term_1, optimize=True)\n term_2 = np.einsum(\n 'mj,ji,i,ki,lk->ml',\n ktd.ntk,\n evecs,\n op_evals,\n evecs,\n ktd.nngp,\n optimize=True)\n term_2 += np.transpose(term_2)\n pred_cov += (-term_2 + ktt)\n\n out['ntk'] = Gaussian(pred_mean, pred_cov) if compute_cov else pred_mean\n\n returntype = named_tuple_factory('Gaussians', get)\n return returntype(*tuple(out[g] for g in get))\n\n return predict", "def deriv(func: Callable[[ndarrray], ndarray],\n input_: ndarray,\n delta: float = 0.001) -> ndarray:\n return (func(input_ + delta) - func(input_ - delta)) / (2*delta)", "def integrate_idemix_kernel(state):\n vs = state.variables\n settings = state.settings\n\n a_tri, b_tri, c_tri, d_tri, delta = (allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))[2:-2, 2:-2] for _ in range(5))\n forc = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n maxE_iw = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n \"\"\"\n forcing by EKE dissipation\n \"\"\"\n if settings.enable_eke:\n forc = vs.eke_diss_iw\n\n else: # shortcut without EKE model\n forc = vs.K_diss_gm + vs.K_diss_h - vs.P_diss_skew\n\n if settings.enable_store_cabbeling_heat:\n forc += -vs.P_diss_hmix - vs.P_diss_iso\n\n if settings.enable_eke and (settings.enable_eke_diss_bottom or settings.enable_eke_diss_surfbot):\n \"\"\"\n vertically integrate EKE dissipation and inject at bottom and/or surface\n \"\"\"\n a_loc = npx.sum(vs.dzw[npx.newaxis, npx.newaxis, :-1] * forc[:, :, :-1] * vs.maskW[:, :, :-1], axis=2)\n a_loc += 0.5 * forc[:, :, -1] * vs.maskW[:, :, -1] * vs.dzw[-1]\n\n forc = update(forc, at[...], 0.0)\n\n ks = npx.maximum(0, vs.kbot[2:-2, 2:-2] - 1)\n mask = ks[:, :, npx.newaxis] == npx.arange(settings.nz)[npx.newaxis, npx.newaxis, :]\n if settings.enable_eke_diss_bottom:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask, a_loc[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :], forc[2:-2, 2:-2, :]\n ),\n )\n else:\n forc = update(\n forc,\n at[2:-2, 2:-2, :],\n npx.where(\n mask,\n settings.eke_diss_surfbot_frac\n * a_loc[2:-2, 2:-2, npx.newaxis]\n / vs.dzw[npx.newaxis, npx.newaxis, :],\n forc[2:-2, 2:-2, :],\n ),\n )\n forc = update(\n forc,\n at[2:-2, 2:-2, -1],\n (1.0 - settings.eke_diss_surfbot_frac) * a_loc[2:-2, 2:-2] / (0.5 * vs.dzw[-1]),\n )\n\n \"\"\"\n forcing by bottom friction\n \"\"\"\n if not settings.enable_store_bottom_friction_tke:\n forc = forc + vs.K_diss_bot\n\n \"\"\"\n prevent negative dissipation of IW energy\n \"\"\"\n maxE_iw = npx.maximum(0.0, vs.E_iw[:, :, :, vs.tau])\n\n \"\"\"\n vertical diffusion and dissipation is solved implicitly\n \"\"\"\n _, water_mask, edge_mask = utilities.create_water_masks(vs.kbot[2:-2, 2:-2], settings.nz)\n\n delta = update(\n delta,\n at[:, :, :-1],\n settings.dt_tracer\n * settings.tau_v\n / vs.dzt[npx.newaxis, npx.newaxis, 1:]\n * 0.5\n * (vs.c0[2:-2, 2:-2, :-1] + vs.c0[2:-2, 2:-2, 1:]),\n )\n delta = update(delta, at[:, :, -1], 0.0)\n a_tri = update(\n a_tri, at[:, :, 1:-1], -delta[:, :, :-2] * vs.c0[2:-2, 2:-2, :-2] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n )\n a_tri = update(a_tri, at[:, :, -1], -delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -2])\n b_tri = update(\n b_tri,\n at[:, :, 1:-1],\n 1\n + delta[:, :, 1:-1] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + delta[:, :, :-2] * vs.c0[2:-2, 2:-2, 1:-1] / vs.dzw[npx.newaxis, npx.newaxis, 1:-1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, 1:-1] * maxE_iw[2:-2, 2:-2, 1:-1],\n )\n b_tri = update(\n b_tri,\n at[:, :, -1],\n 1\n + delta[:, :, -2] / (0.5 * vs.dzw[-1:]) * vs.c0[2:-2, 2:-2, -1]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, -1] * maxE_iw[2:-2, 2:-2, -1],\n )\n b_tri_edge = (\n 1\n + delta / vs.dzw * vs.c0[2:-2, 2:-2, :]\n + settings.dt_tracer * vs.alpha_c[2:-2, 2:-2, :] * maxE_iw[2:-2, 2:-2, :]\n )\n c_tri = update(\n c_tri, at[:, :, :-1], -delta[:, :, :-1] / vs.dzw[npx.newaxis, npx.newaxis, :-1] * vs.c0[2:-2, 2:-2, 1:]\n )\n d_tri = update(d_tri, at[...], vs.E_iw[2:-2, 2:-2, :, vs.tau] + settings.dt_tracer * forc[2:-2, 2:-2, :])\n d_tri_edge = (\n d_tri + settings.dt_tracer * vs.forc_iw_bottom[2:-2, 2:-2, npx.newaxis] / vs.dzw[npx.newaxis, npx.newaxis, :]\n )\n d_tri = update_add(d_tri, at[:, :, -1], settings.dt_tracer * vs.forc_iw_surface[2:-2, 2:-2] / (0.5 * vs.dzw[-1:]))\n\n sol = utilities.solve_implicit(\n a_tri, b_tri, c_tri, d_tri, water_mask, b_edge=b_tri_edge, d_edge=d_tri_edge, edge_mask=edge_mask\n )\n vs.E_iw = update(vs.E_iw, at[2:-2, 2:-2, :, vs.taup1], npx.where(water_mask, sol, vs.E_iw[2:-2, 2:-2, :, vs.taup1]))\n\n \"\"\"\n store IW dissipation\n \"\"\"\n vs.iw_diss = vs.alpha_c * maxE_iw * vs.E_iw[..., vs.taup1]\n\n \"\"\"\n add tendency due to lateral diffusion\n \"\"\"\n flux_east = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_north = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n flux_top = allocate(state.dimensions, (\"xt\", \"yt\", \"zt\"))\n\n if settings.enable_idemix_hor_diffusion:\n flux_east = update(\n flux_east,\n at[:-1, :, :],\n settings.tau_h\n * 0.5\n * (vs.v0[1:, :, :] + vs.v0[:-1, :, :])\n * (vs.v0[1:, :, :] * vs.E_iw[1:, :, :, vs.tau] - vs.v0[:-1, :, :] * vs.E_iw[:-1, :, :, vs.tau])\n / (vs.cost[npx.newaxis, :, npx.newaxis] * vs.dxu[:-1, npx.newaxis, npx.newaxis])\n * vs.maskU[:-1, :, :],\n )\n\n flux_north = update(\n flux_north,\n at[:, :-1, :],\n settings.tau_h\n * 0.5\n * (vs.v0[:, 1:, :] + vs.v0[:, :-1, :])\n * (vs.v0[:, 1:, :] * vs.E_iw[:, 1:, :, vs.tau] - vs.v0[:, :-1, :] * vs.E_iw[:, :-1, :, vs.tau])\n / vs.dyu[npx.newaxis, :-1, npx.newaxis]\n * vs.maskV[:, :-1, :]\n * vs.cosu[npx.newaxis, :-1, npx.newaxis],\n )\n flux_north = update(flux_north, at[:, -1, :], 0.0)\n vs.E_iw = update_add(\n vs.E_iw,\n at[2:-2, 2:-2, :, vs.taup1],\n settings.dt_tracer\n * vs.maskW[2:-2, 2:-2, :]\n * (\n (flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n + (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n\n \"\"\"\n add tendency due to advection\n \"\"\"\n if settings.enable_idemix_superbee_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_superbee_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_upwind_advection:\n flux_east, flux_north, flux_top = advection.adv_flux_upwind_wgrid(state, vs.E_iw[:, :, :, vs.tau])\n\n if settings.enable_idemix_superbee_advection or settings.enable_idemix_upwind_advection:\n vs.dE_iw = update(\n vs.dE_iw,\n at[2:-2, 2:-2, :, vs.tau],\n vs.maskW[2:-2, 2:-2, :]\n * (\n -(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])\n - (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])\n / (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])\n ),\n )\n vs.dE_iw = update_add(vs.dE_iw, at[:, :, 0, vs.tau], -flux_top[:, :, 0] / vs.dzw[0:1])\n vs.dE_iw = update_add(\n vs.dE_iw,\n at[:, :, 1:-1, vs.tau],\n -(flux_top[:, :, 1:-1] - flux_top[:, :, :-2]) / vs.dzw[npx.newaxis, npx.newaxis, 1:-1],\n )\n vs.dE_iw = update_add(\n vs.dE_iw, at[:, :, -1, vs.tau], -(flux_top[:, :, -1] - flux_top[:, :, -2]) / (0.5 * vs.dzw[-1:])\n )\n\n \"\"\"\n Adam Bashforth time stepping\n \"\"\"\n vs.E_iw = update_add(\n vs.E_iw,\n at[:, :, :, vs.taup1],\n settings.dt_tracer\n * (\n (1.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.tau]\n - (0.5 + settings.AB_eps) * vs.dE_iw[:, :, :, vs.taum1]\n ),\n )\n\n return KernelOutput(E_iw=vs.E_iw, dE_iw=vs.dE_iw, iw_diss=vs.iw_diss)", "def f() -> None:\n x = torch.rand(7, 3).to(self.device)\n kernel = ScaleKernel(MaternKernel())\n allocator = GreedyVarianceReduction()\n allocator.allocate_inducing_points(x, kernel, 4, x.shape[:-2])", "def integrate_monte_carlo_nd(f, dim, limit, N=1000000):\n I, sum = 1/N, 0\n for n in range(dim):\n I *= (limit[n][1] - limit[n][0])\n\n for k in range(N):\n x = []\n for n in range(dim):\n x += [limit[n][0] + (limit[n][1] - limit[n][0])*rnd.random()]\n\n sum += f(x)\n return I*sum", "def disparitySSD(img_l: np.ndarray, img_r: np.ndarray, disp_range: (int, int), k_size: int) -> np.ndarray:\r\n kernel_half = int ((k_size*2 + 1) //2)\r\n w , h = img_r.shape\r\n # the depth of the image\r\n depth = np.zeros((w , h))\r\n for y in range (kernel_half, (w - kernel_half)): # iterate through the rows\r\n for x in range(kernel_half, (h - kernel_half)): # iterate through the columns\r\n best_offset = 0\r\n pixel = 0\r\n prev_ssd = 654354\r\n for offset in range(disp_range[0], disp_range[1]): # check the kernel which is exit in this range\r\n ssd = 0\r\n for v in range(-kernel_half, kernel_half):\r\n for u in range(-kernel_half , kernel_half):\r\n # calculate the difference between the left and right kernel and then make the disp point to be\r\n # the the offset with the minimum SSD (Sum of square difference)\r\n # arg_min =>(I_left(x , y) - I_right (x + v, y +u))^2\r\n ssd += (img_r [y+v, x+u] - img_l[(y + v), (x + u) - offset])**2\r\n if ssd < prev_ssd:\r\n prev_ssd = ssd\r\n best_offset = offset\r\n\r\n depth[y, x] = best_offset\r\n\r\n print(depth)\r\n\r\n return depth\r\n pass", "def exterior(n):\n\n def inner(x):\n return n * x\n\n return inner", "def _relax_convolve(self, n=1):\n\n self.level.mid.reshape(-1)[:] = \\\n - self.stencil.eval_convolve(self.lvl_view) \\\n * self.omega / self.center_value", "def grad_per_dim(f, dim):\n grad_ops = (\n lambda f: self._kernel_op.apply_kernel_op_x(f, 'kDx'),\n lambda f: self._kernel_op.apply_kernel_op_y(f, 'kDy'),\n lambda f: self._kernel_op.apply_kernel_op_z(f, 'kDz', 'kDzsh'),\n )\n return tf.nest.map_structure(\n lambda grad: grad / (2.0 * grid_spacing[dim]), grad_ops[dim](f))", "def disc_2d(self):\n for i in range(0, self.nt):\n pd = self.p.copy()\n\n self.p[1: -1, 1: -1] = (((pd[1: -1, 2:] + pd[1: -1, :-2]) * self.dy**2 +\n (pd[2:, 1: -1] + pd[:-2, 1: -1]) * self.dx**2 -\n self.b[1: -1, 1: -1] * self.dx**2 * self.dy**2) /\n (2 * (self.dx**2 + self.dy**2)))\n\n self.p[0, :] = 0\n self.p[self.grid_points_y-1, :] = 0\n self.p[:, 0] = 0\n self.p[:, self.grid_points_x-1] = 0", "def relax(self,n=1):\n # print(\"putin\", self.level.rhs.reshape(-1)[:])\n # print(\"getout\", self.solver(self.level.rhs.reshape(-1)))\n for i in range(n):\n self.level.mid[:] += (self.solver(self.level.rhs.reshape(-1)) -\n self.solver(self.stencil.eval_convolve(\n self.level.evaluable_view(self.stencil)).reshape(-1))).reshape(self.level.mid.shape)", "def difference_of_gauss_kernel(radius, scale_step, n_sigmas=8):\n sizex = int(n_sigmas * scale_step * radius)\n sizey = int(n_sigmas * scale_step * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x1 = x / radius\n y1 = y / radius\n g1 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))\n g1 = g1 / (2 * np.pi * radius ** 2) # g1.sum()\n x1 = x1 / scale_step\n y1 = y1 / scale_step\n g2 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))\n g2 = g2 / (2 * np.pi * radius ** 2 * scale_step ** 2) # g2.sum()\n return g1 - g2", "def add_dcdisorder(self):\n # Add gaussian noise to pinning energies\n if self.lp['V0_pin_gauss'] > 0:\n self.Omg += self.lp['V0_pin_gauss']*np.random.randn(len(self.xy_inner))\n raise RuntimeError('Adding gaussian disorder: are you sure you want to proceed?')\n if self.lp['V0_spring_gauss'] > 0:\n print 'This is not done correctly here'\n self.OmK += self.lp['V0_spring_gauss'] * np.random.randn(np.shape(self.lattice.KL)[0],\n np.shape(self.lattice.KL)[1])\n sys.exit()\n\n if self.lp['V0_pin_flat'] > 0 or self.lp['V0_spring_flat'] > 0:\n # Note that we multiply by two so that V0_pin_flat is the HALF width of the distribution\n flat_disorder = (np.random.rand(len(self.xy_inner))) * 2 - 1.0\n\n if self.lp['V0_pin_flat'] > 0:\n self.Omg += self.lp['V0_pin_flat'] * flat_disorder\n if self.lp['Omg'] < 0:\n self.Omg[self.Omg > 0] = 0.\n elif self.lp['Omg'] > 0:\n self.Omg[self.Omg < 0] = 0.\n\n print('magnetic_gyro_lattice_class.py: V0_pin_flat=', self.lp['V0_pin_flat'])\n print(self.Omg)\n\n if self.lp['V0_spring_flat'] > 0:\n to_add = self.lp['V0_spring_flat'] * flat_disorder[:, np.newaxis] * np.ones_like(self.OmK)\n self.OmK[np.abs(self.OmK) > 0] += to_add[np.abs(self.OmK) > 0]\n if self.lp['Omk'] < 0:\n self.OmK[self.OmK > 0] = 0.\n elif self.lp['Omk'] > 0:\n self.OmK[self.OmK < 0] = 0.\n\n print('magnetic_gyro_lattice_class.py: OmK after flat disorder= ')\n print(self.OmK)\n # sys.exit()", "def neldermead(func, x0s,\n ftol=1e-2, maxfev=500):\n\n fcalls = 0\n x0s = np.asarray(x0s)\n M, N = x0s.shape\n if M!=N+1:\n raise ValueError(\"x0s must be N+1 points of dimension N\")\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n one2np1 = list(range(1, N + 1))\n\n sim = np.zeros((N + 1, N), dtype=x0s.dtype)\n fsim = np.zeros((N + 1,), float)\n for i in range(N+1):\n sim[i] = x0s[i]\n fsim[i] = func(sim[i])\n fcalls += 1\n\n # sort so sim[0,:] has the lowest function value\n ind = np.argsort(fsim)\n fsim = np.take(fsim, ind, 0)\n sim = np.take(sim, ind, 0)\n\n while (fcalls < maxfev):\n if np.max(np.abs(fsim[0] - fsim[1:])) <= ftol:\n break\n\n xbar = np.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho)*xbar - rho*sim[-1]\n fxr = func(xr)\n fcalls += 1\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho*chi)*xbar - rho*chi*sim[-1]\n fxe = func(xe)\n fcalls += 1\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi*rho)*xbar - psi*rho*sim[-1]\n fxc = func(xc)\n fcalls += 1\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi)*xbar + psi*sim[-1]\n fxcc = func(xcc)\n fcalls += 1\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma*(sim[j] - sim[0])\n fsim[j] = func(sim[j])\n fcalls += 1\n\n ind = np.argsort(fsim)\n sim = np.take(sim, ind, 0)\n fsim = np.take(fsim, ind, 0)\n\n x = sim[0]\n fval = fsim[0]\n return x, fval", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def _special_diop_DN(D, N):\n\n # The following assertion was removed for efficiency, with the understanding\n # that this method is not called directly. The parent method, `diop_DN`\n # is responsible for performing the appropriate checks.\n #\n # assert (1 < N**2 < D) and (not integer_nthroot(D, 2)[1])\n\n sqrt_D = sqrt(D)\n F = [(N, 1)]\n f = 2\n while True:\n f2 = f**2\n if f2 > abs(N):\n break\n n, r = divmod(N, f2)\n if r == 0:\n F.append((n, f))\n f += 1\n\n P = 0\n Q = 1\n G0, G1 = 0, 1\n B0, B1 = 1, 0\n\n solutions = []\n\n i = 0\n while True:\n a = floor((P + sqrt_D) / Q)\n P = a*Q - P\n Q = (D - P**2) // Q\n G2 = a*G1 + G0\n B2 = a*B1 + B0\n\n for n, f in F:\n if G2**2 - D*B2**2 == n:\n solutions.append((f*G2, f*B2))\n\n i += 1\n if Q == 1 and i % 2 == 0:\n break\n\n G0, G1 = G1, G2\n B0, B1 = B1, B2\n\n return solutions", "def fourth_order_derivative(arr: np.ndarray, dim=0, isglobal=True):\n # assert isinstance(arr, np.ndarray), 'Input must be numpy array'\n output = np.zeros_like(arr)\n\n if dim == 0:\n ysize = np.shape(arr)[0]\n for lat_idx in range(2, np.shape(arr)[0] - 2):\n for lon_idx in range(np.shape(arr)[1]):\n output[lat_idx, lon_idx] = (4 / 3) * (arr[(lat_idx + 1), lon_idx] -\n arr[(lat_idx - 1), lon_idx]) / 2 \\\n - (1 / 3) * (arr[(lat_idx + 2), lon_idx] -\n arr[(lat_idx - 2), lon_idx]) / 4\n\n # First order uncentered derivative for points close to the poles\n for lat_idx in [0, 1]:\n for lon_idx in range(np.shape(arr)[1]):\n output[lat_idx, lon_idx] = (arr[(lat_idx + 1), lon_idx] -\n arr[lat_idx, lon_idx]) / 2\n for lat_idx in [-1, -2]:\n for lon_idx in range(np.shape(arr)[1]):\n output[lat_idx, lon_idx] = (arr[lat_idx, lon_idx] -\n arr[lat_idx - 1, lon_idx]) / 2\n elif dim == 1:\n xsize = np.shape(arr)[1]\n if isglobal:\n for lat_idx in range(np.shape(arr)[0]):\n\n for lon_idx in range(np.shape(arr)[1]):\n\n output[lat_idx, lon_idx] = (4 / 3) * (arr[lat_idx, (lon_idx + 1) % xsize] -\n arr[lat_idx, (lon_idx - 1) % xsize]) / 2 \\\n - (1 / 3) * (arr[lat_idx, (lon_idx + 2) % xsize] -\n arr[lat_idx, (lon_idx - 2) % xsize]) / 4\n else:\n for lat_idx in range(np.shape(arr)[0]):\n for lon_idx in range(2, np.shape(arr)[1] - 2):\n output[lat_idx, lon_idx] = (4 / 3) * (arr[lat_idx, (lon_idx + 1)] -\n arr[lat_idx, (lon_idx - 1)]) / 2 \\\n - (1 / 3) * (arr[lat_idx, (lon_idx + 2)] -\n arr[lat_idx, (lon_idx - 2)]) / 4\n # First order uncentered derivative for points close to the bondaries\n for lon_idx in [0, 1]:\n for lat_idx in range(np.shape(arr)[0]):\n output[lat_idx, lon_idx] = (arr[lat_idx, lon_idx+1] -\n arr[lat_idx, lon_idx]) / 2\n for lon_idx in [-1, -2]:\n for lat_idx in range(np.shape(arr)[0]):\n output[lat_idx, lon_idx] = (arr[lat_idx, lon_idx] -\n arr[lat_idx, lon_idx-1]) / 2\n return output", "def gkern2d(kernlen=21, nsig=3):\n x = np.linspace(-nsig, nsig, kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kern2d = np.outer(kern1d, kern1d)\n return kern2d/kern2d.max()", "def gradient_nD(stack):\n # Convert for 64-bit to avoid large number problems in squares.\n stack = np.copy(stack)\n stack = stack.astype(np.float64)\n sumsq = ndi.filters.sobel(stack, axis=0) ** 2\n for d in range(1, stack.ndim):\n sumsq = sumsq + (ndi.filters.sobel(stack, axis=d) ** 2)\n gradient = np.sqrt(sumsq)\n return gradient", "def f2d(t, Ntot0, float_params, int_params, sigmastep):\n \n # unpack parameters\n Nbar, Nstar, sigma0, deprate, DoverdeltaX2 = float_params \n nx, ny = int_params\n\n # unpack current values of y\n Fliq0 = Nbar - Nstar * np.sin(2*np.pi*(Ntot0))\n \n # Deposition\n delta = (Fliq0 - (Nbar - Nstar))/(2*Nstar)\n sigD = (sigmastep - delta * sigma0)/(1+delta*sigma0)\n depsurf = deprate * sigD\n\n dNtot_dt = depsurf\n\n # Diffusion\n dy = np.reshape(np.ascontiguousarray(diffuse_2d(t, np.reshape(np.ascontiguousarray(Fliq0),nx*ny), DoverdeltaX2, np.array((nx,ny)))), (nx,ny))\n # Combined\n dNtot_dt += dy\n\n # Package for output\n derivs = dNtot_dt.flatten() \n return derivs", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def _partial_derivative_f2(self, f1, f2, m_star, n):\r\n if f1 > 0 and f2 > 0:\r\n a_0 = self._calculate_a_0(f1, f2, n)\r\n term1 = (f1 ** 2) * (1 - a_0 ** m_star)\r\n term2 = 2 * (f2 ** 2)\r\n term3 = (m_star * f1) * (a_0 ** (m_star - 1))\r\n term4 = n * f2\r\n return 1 - (term1 / term2) + (term3 / term4)\r\n else:\r\n a_1 = self._calculate_a_1(f1, f2, n)\r\n term1 = (m_star * f1) * a_1 ** (m_star - 1)\r\n term2 = n * (f2 + 1)\r\n term3 = (f1 * (f1 - 1)) * (1 - a_1 ** m_star)\r\n term4 = 2 * (f2 + 1) ** 2\r\n return 1 + (term1 / term2) - (term3 / term4)", "def deconvolve(num, den, n=None):\n num = np.atleast_1d(num)\n den = np.atleast_1d(den)\n N = len(num)\n D = len(den)\n if D > N and n is None:\n quot = []\n rem = num\n else:\n if n is None:\n n = N - D + 1\n input = np.zeros(n, float)\n input[0] = 1\n quot = signal.lfilter(num, den, input)\n num_approx = signal.convolve(den, quot, mode=\"full\")\n if len(num) < len(num_approx): # 1d only ?\n num = np.concatenate((num, np.zeros(len(num_approx) - len(num))))\n rem = num - num_approx\n return quot, rem", "def DisLayerSN(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out", "def rho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= concave_piece(x,k_ind,m_ind) \n\n return f", "def wood_drum_env(N, sr):\n ## TODO: Fill this in\n return np.zeros(N)", "def lie_derivative(h, f, x, n):\n if n == 0:\n return h\n elif n == 1:\n return h.jacobian(x) * f\n else:\n return lie_derivative(lie_derivative(h, f, x, 1), f, x, n - 1)", "def kernel(r, h, deriv):\n return {\n '0': h**-1 / np.sqrt(np.pi) * np.exp(-r**2/h**2),\n '1': h**-3 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-2*r),\n '2': h**-5 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * ( 4*r**2 - 2*h**2),\n '3': h**-7 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-8*r**3 + 12*h**2*r)\n }[deriv]", "def davidson(mult_by_A, N, neig, x0=None, Adiag=None, verbose=logger.INFO):\n\n if isinstance(verbose, logger.Logger):\n log = verbose\n else:\n import sys\n log = logger.Logger(sys.stdout, verbose)\n\n cput1 = (logger.process_clock(), logger.perf_counter())\n\n Mmin = min(neig,N)\n Mmax = min(N,2000)\n tol = 1e-6\n\n #Adiagcheck = np.zeros(N,np.complex128)\n #for i in range(N):\n # test = np.zeros(N,np.complex128)\n # test[i] = 1.0\n # Adiagcheck[i] = mult_by_A(test)[i]\n #print \"Analytical Adiag == numerical Adiag?\", np.allclose(Adiag,Adiagcheck)\n\n if Adiag is None:\n Adiag = np.zeros(N,np.complex128)\n for i in range(N):\n test = np.zeros(N,np.complex128)\n test[i] = 1.0\n Adiag[i] = mult_by_A(test)[i]\n\n xi = np.zeros(N,np.complex128)\n\n lamda_k_old = 0\n lamda_k = 0\n target = 0\n conv = False\n if x0 is not None:\n assert x0.shape == (N, Mmin)\n b = x0.copy()\n\n Ab = np.zeros((N,Mmin),np.complex128)\n for m in range(Mmin):\n Ab[:,m] = mult_by_A(b[:,m])\n\n for istep,M in enumerate(range(Mmin,Mmax+1)):\n if M == Mmin:\n # Set of M unit vectors from lowest Adiag (NxM)\n b = np.zeros((N,M))\n idx = Adiag.argsort()\n for m,i in zip(range(M),idx):\n b[i,m] = 1.0\n ## Add random noise and orthogonalize\n #for m in range(M):\n # b[:,m] += 0.01*np.random.random(N)\n # b[:,m] /= np.linalg.norm(b[:,m])\n # b,R = np.linalg.qr(b)\n\n Ab = np.zeros((N,M),np.complex128)\n for m in range(M):\n Ab[:,m] = mult_by_A(b[:,m])\n else:\n Ab = np.column_stack( (Ab,mult_by_A(b[:,M-1])) )\n\n Atilde = np.dot(b.conj().transpose(),Ab)\n lamda, alpha = diagonalize_asymm(Atilde)\n lamda_k_old, lamda_k = lamda_k, lamda[target]\n alpha_k = alpha[:,target]\n\n if M == Mmax:\n break\n\n q = np.dot( Ab-lamda_k*b, alpha_k )\n log.info('davidson istep = %d root = %d E = %.15g dE = %.9g residual = %.6g',\n istep, target, lamda_k.real, (lamda_k - lamda_k_old).real, np.linalg.norm(q))\n cput1 = log.timer('davidson iter', *cput1)\n if np.linalg.norm(q) < tol:\n if target == neig-1:\n conv = True\n break\n else:\n target += 1\n #for i in range(N):\n #eps = 0.\n #if np.allclose(lamda_k,Adiag[i]):\n # eps = 1e-10\n #xi[i] = q[i]/(lamda_k-Adiag[i]+eps)\n eps = 1e-10\n xi = q/(lamda_k-Adiag+eps)\n\n # orthonormalize xi wrt b\n bxi,R = np.linalg.qr(np.column_stack((b,xi)))\n if FOUND_MPI4PY: # Ensure all processes search in same direction\n bxi = MPI_COMM.bcast(bxi)\n\n # append orthonormalized xi to b\n b = np.column_stack((b,bxi[:,-1]))\n\n #if M > Mmin and M == Mmax:\n # print(\"WARNING: Davidson algorithm reached max basis size \"\n # \"M = %d without converging.\"%(M))\n\n # Express alpha in original basis\n evecs = np.dot(b,alpha) # b is N x M, alpha is M x M\n return conv, lamda[:neig], evecs[:,:neig], istep", "def kernel_diff(self, x, kernel_res, arg):\n x = np.atleast_2d(x)\n n, d = x.shape\n\n kxx, k_xx, k_x_x = kernel_res\n\n assert_shape(kxx, (n, n))\n assert_shape(k_xx, (n, n, d))\n assert_shape(k_x_x, (n, n, d))\n\n if arg == 0:\n res = kxx[:, :, np.newaxis] - k_xx\n\n elif arg == 1:\n res = kxx[:, :, np.newaxis] - k_xx.swapaxes(0, 1)\n\n else:\n raise ValueError(\"arg = %d not recognized!\" % arg)\n\n return res", "def auxmaxrho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= auxmax_cc_piece(x,k_ind,m_ind) \n\n return f", "def dndt2(jx, iy, h, n, u, v, dx, dy) :\n p5 = np.float32(0.5)\n depth_jm0im0 = h[jx, iy ]+n[jx, iy]\n depth_jp1im0 = h[jx+1,iy] +n[jx+1,iy]\n depth_jm1im0 = h[jx-1,iy] +n[jx-1,iy]\n depth_jm0ip1 = h[jx, iy+1]+n[jx, iy+1]\n depth_jm0im1 = h[jx, iy-1]+n[jx, iy-1]\n \n hx_jp1 = u[jx+1,iy]*(depth_jm0im0 + depth_jp1im0)*p5\n hx_jm0 = u[jx, iy]*(depth_jm1im0 + depth_jm0im0)*p5\n \n \n hy_ip1 = v[jx,iy+1]*(depth_jm0im0 + depth_jm0ip1)*p5\n hy_im0 = v[jx,iy ]*(depth_jm0im1 + depth_jm0im0)*p5\n \n # assume u and v are zero on edge\n dhx = (hx_jp1-hx_jm0)/dx#[jx,iy]\n dhy = (hy_ip1-hy_im0)/dy#[jx,iy]\n\n \n return ( -dhx-dhy )", "def p_EPR(d, N):\n return sum(\n d ** -N * (weyl(d, alpha) * specht(alpha) * N) / (alpha[0] + d)\n for alpha in Partitions(n=N - 1, max_length=d)\n )", "def pad_periodic_2d(\n tensor: tf.Tensor,\n kernel_size: Sequence[int],\n shifts: Sequence[int] = (0, 0),\n) -> tf.Tensor:\n if len(tensor.shape) != 4:\n raise ValueError('tensor has wrong number of dimensions: {}'.format(tensor))\n\n paddings = paddings_for_conv2d(kernel_size, shifts)\n result = pad_periodic(tensor, paddings)\n return result" ]
[ "0.63502026", "0.60510343", "0.5926386", "0.59101856", "0.5898114", "0.56899774", "0.56315917", "0.5616251", "0.558586", "0.55735755", "0.55652493", "0.55386996", "0.5501476", "0.54612076", "0.54292554", "0.54179746", "0.54039854", "0.5398605", "0.5376709", "0.53656363", "0.5358466", "0.53420204", "0.53278977", "0.53239286", "0.5309184", "0.5269692", "0.52427894", "0.5233143", "0.52239406", "0.5203497", "0.5191658", "0.5175113", "0.51484084", "0.5147682", "0.51442325", "0.5140306", "0.5131266", "0.51310915", "0.5122376", "0.5121363", "0.51186824", "0.5117988", "0.5112367", "0.5090568", "0.5077127", "0.50673455", "0.50593543", "0.505724", "0.505322", "0.50488275", "0.50471026", "0.50456345", "0.503287", "0.50271255", "0.5017941", "0.50128764", "0.50068885", "0.5001978", "0.5000744", "0.49960268", "0.49897835", "0.49812666", "0.49759048", "0.4971207", "0.49657452", "0.49538842", "0.49528524", "0.49495098", "0.4948463", "0.4945481", "0.49428713", "0.49423367", "0.4939415", "0.4935674", "0.49349385", "0.4931858", "0.49317786", "0.49310607", "0.49227884", "0.49223697", "0.4922326", "0.4919707", "0.49177936", "0.49139753", "0.49119192", "0.49035072", "0.49029818", "0.49025232", "0.49020737", "0.48986354", "0.48961732", "0.48901027", "0.4889877", "0.48888072", "0.4888116", "0.48876274", "0.48856178", "0.48834988", "0.48757043", "0.48752606" ]
0.66818523
0
Reproducing kernel Calculate of reproducing kernel for subspace of spherical harmonics of maximum degree N.
def kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs*legPolys return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def _kernel(r: float, h: float) -> float:\n sigma_2 = 10 / (7 * np.pi * h * h)\n q = abs(r / h)\n\n if q <= 1.0:\n q2 = q * q\n W = 1.0 - 1.5 * q2 * (1.0 - 0.5 * q)\n W *= sigma_2\n elif q <= 2.0:\n two_minus_q = 2 - q\n two_minus_q_c = np.power(two_minus_q, 3)\n W = 0.25 * two_minus_q_c\n W *= sigma_2\n else:\n W = 0\n\n return W", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def test():\n\n S = \"cells interlinked within cells interlinked\"\n T = \"within one stem and dreadfully distinct\"\n\n n = 2\n\n res = kernel(S, T, n)\n\n print(res)\n print('k(car, car, 1) = ', kernel('car', 'car', 1),\n 'should be 3*lambda^2 = .75')\n print('k(car, car, 2) = ', kernel('car', 'car', 2),\n ' should be lambda^6 + 2*lambda^4 = 0.140625')\n print('k(car, car, 3) = ', kernel('car', 'car', 3),\n 'should be lambda^6 = 0.0156')\n\n print('normkernel(cat, car, 1) = ', normkernel('cat', 'car', 1),\n 'should be 2/3')\n print('kernel(cat, car, 2) = ', kernel('cat', 'car', 2),\n 'should be lambda^4 = 0.0625')\n print('normkernel(cat, car, 2) = ', normkernel('cat', 'car', 2),\n 'should be 1/(2+lambda^2) = 0.44444')\n\n print(\n kernel(\"AxxxxxxxxxB\", \"AyB\", 2),\n 'should be =0.5^14 = 0.00006103515625')\n print(\n kernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2),\n 'should be 12.761724710464478')\n\n print(kernel(\"ab\", \"axb\", 2), 'should be =0.5^5 = 0.03125')\n print(kernel(\"ab\", \"abb\", 2), 'should be 0.5^5 + 0.5^4 = 0.09375')\n print(normkernel(\"ab\", \"ab\", 2), 'should be 1')\n print(normkernel(\"AxxxxxxxxxB\", \"AxxxxxxxxxB\", 2), 'should be 1')\n\n kss = [0.580, 0.580, 0.478, 0.439, 0.406, 0.370]\n for x in range(1, 7):\n print(x,\n normkernel(\"science is organized knowledge\",\n \"wisdom is organized life\", x), 'should be',\n kss[x - 1])", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def kernel(self, modulus=None):\n M = self.matrix(modulus=modulus)\n if modulus is None:\n M = M.convert_to(QQ)\n # Note: Even when working over a finite field, what we want here is\n # the pullback into the integers, so in this case the conversion to ZZ\n # below is appropriate. When working over ZZ, the kernel should be a\n # ZZ-submodule, so, while the conversion to QQ above was required in\n # order for the nullspace calculation to work, conversion back to ZZ\n # afterward should always work.\n # TODO:\n # Watch <https://github.com/sympy/sympy/issues/21834>, which calls\n # for fraction-free algorithms. If this is implemented, we can skip\n # the conversion to `QQ` above.\n K = M.nullspace().convert_to(ZZ).transpose()\n return self.domain.submodule_from_matrix(K)", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def sub_kernel(kernel, dim1, dim2):\n\n sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]\n return sub_kernel", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def normkernel(S, T, n):\n\n k1 = kernel(S, S, n)\n k2 = kernel(T, T, n)\n res = kernel(S, T, n) / sqrt(k1 * k2)\n\n return res", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def _calc_kernel(self,\n freq_1: float,\n time_1: float,\n freq_2: float,\n time_2: float,\n dagg: tuple\n ) -> Tuple[ndarray, ndarray]:\n dt = self._process_tensor.dt\n #pieces of kernel consist of some combination of phases and\n #Bose-Einstein factors\n n_1, n_2 = 0, 0\n if self._temp > 0:\n n_1 += np.exp(-freq_1/self._temp) / (1 - np.exp(-freq_1/self._temp))\n n_2 += np.exp(-freq_2/self._temp) / (1 - np.exp(-freq_2/self._temp))\n\n ker_dim = int(np.round(time_2 / dt))\n # calculate index corresponding to t_1\n switch = int(np.round(time_1 / dt))\n re_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n im_kernel = np.zeros((ker_dim, ker_dim), dtype = NpDtype)\n\n tpp_index, tp_index = np.meshgrid(\n np.arange(ker_dim), np.arange(ker_dim),\n indexing='ij') #array of indices for each array element\n regions = {\n 'a': (slice(switch), slice(switch)), #(0->t_1, 0->t_1)\n 'b': (slice(switch), slice(switch, None)), #(0->t_1, t_1->t)\n 'c': (slice(switch, None), slice(switch, None))} #(t_1->t, t_1->t)\n\n def phase(region, swap_ts = False):\n tk = tp_index[regions[region]]\n tkp = tpp_index[regions[region]]\n if tk.size == 0 or tkp.size == 0:\n return 0\n a = -1j * ((2*dagg[0] - 1)) * freq_2\n b = -1j * ((2*dagg[1] - 1)) * freq_1\n if swap_ts:\n a, b = b, a\n if region in ('a','c'):\n ph = np.triu(\n np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b), k = 1)\n ph -= np.triu(\n np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b), k = 1)\n ph += np.triu(\n np.exp(a * tk*dt + b * tkp*dt) / (a * b), k = 1)\n sel = np.diag(tk)\n di = -np.exp((a * (sel + 1) + b * sel) * dt) / (a * b)\n if a + b != 0:\n di += np.exp((a + b) * (sel + 1) * dt) / (b * (a+b))\n di += np.exp((a + b) * sel * dt) / (a * (a+b))\n else:\n di += (1 + a * sel * dt + b * (sel + 1) * dt) / (a * b)\n ph += np.diag(di)\n else:\n ph = np.exp(a * (tk+1)*dt + b * (tkp+1)*dt) / (a * b)\n ph -= np.exp(a * (tk+1)*dt + b * tkp*dt) / (a * b)\n ph -= np.exp(a * tk*dt + b * (tkp+1)*dt) / (a * b)\n ph += np.exp(a * tk*dt + b * tkp*dt) / (a * b)\n return ph\n\n\n if dagg == (0, 1):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * (n_1 + 1) * phase('c')\n\n elif dagg == (1, 0):\n re_kernel[regions['a']] = phase('a') + phase('a', 1)\n\n re_kernel[regions['b']] = phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') -\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * n_1 * phase('c')\n\n elif dagg == (1, 1):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = ((2*n_1 + 1) * phase('a') +\n (2*n_2 + 1) * phase('a', 1))\n\n im_kernel[regions['b']] = (2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = 2 * (n_1 + 1) * phase('c')\n\n elif dagg == (0, 0):\n re_kernel[regions['a']] = -(phase('a') + phase('a', 1))\n\n re_kernel[regions['b']] = -phase('b')\n\n im_kernel[regions['a']] = -((2*n_2 + 1) * phase('a', 1) +\n (2*n_1 + 1) * phase('a'))\n\n im_kernel[regions['b']] = -(2*n_1 + 1) * phase('b')\n\n im_kernel[regions['c']] = -2 * n_1 * phase('c')\n\n re_kernel = np.triu(re_kernel) #only keep triangular region\n im_kernel = np.triu(im_kernel)\n return re_kernel, im_kernel", "def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int", "def gkern1(kernlen=21, nsig=3):\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1) \n kern1d = np.diff(scipy.stats.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n \n return kernel", "def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)", "def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param", "def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel", "def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X", "def sbil_kernel(delta, obs_stats, t, ar, s, kernel='Gaussian'):\n #np.random.shuffle(delta)\n print(delta)\n sbil_kernel_estimate = []\n obs_stats = obs_stats[delta > 0]\n\n sim_theta = [select.generate_theta_sv(ar) for i in range(s)]\n sim_theta = np.matrix(sim_theta).T\n\n # Generate out sample of time series.\n sim_y = [sim.sim_sv(t, sim_theta[0, i], sim_theta[1, i], sim_theta[2, i],\n sim_theta[3, i], 1) for i in range(s)]\n \n # Generate out sample statistics.\n sim_stats = [sum_stat.sv_stats(delta, sim_y[i]) for i\n in range(s)]\n\n sim_theta_mean = sum(sim_theta.T)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_theta[:, i] - sim_theta_mean.T)\n for i in range(s)])/s\n\n # Standardize parameter vectors.\n sim_theta = np.hstack([(sim_theta[:, i] - sim_theta_mean.T)/np.sqrt(u)\n for i in range(s)])\n\n global theta_sigma\n global theta_mean\n theta_sigma = np.sqrt(u)\n theta_mean = sim_theta_mean\n\n # Standardize observed statistics.\n obs_stats = (obs_stats - np.mean(sim_stats, 0))/np.std(sim_stats, 0)\n\n # Compute sample mean.\n sim_stats_mean = sum(sim_stats)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_stats[i]-sim_stats_mean) for i in range(s)])/s\n\n # Standardize simulated statistics.\n sim_stats = [(sim_stats[i] - sim_stats_mean)/np.sqrt(u) for i in range(s)]\n\n # Identify k nearest neighbors.\n norms = [np.linalg.norm(obs_stats-sim_stats[i]) for i in range(s)]\n closest_index = np.argsort(norms)\n closest_thetas = [sim_theta[:, i] for i in closest_index[0:round(s*0.03)]]\n\n # Compute k-nn estimate.\n estimate_standard = (sum(closest_thetas)/len(closest_thetas))\n\n estimate = np.array(estimate_standard.T)*np.array(\n theta_sigma.T) + np.array(theta_mean)\n\n return estimate", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def Pkernel(x):\n\n m = (x < 0.) & (x >= 1.)\n x[x < 0.] = np.zeros(np.sum(x < 0.))\n x[x >= 1.] = np.zeros(np.sum(x >= 1.))\n x = np.sqrt(x)\n\n result = np.log(2.) * np.log(2.) - np.pi *np.pi / 6. \\\n + 2. * spence(0.5 + 0.5 * x) - (x + x*x*x) / (1. - x*x) \\\n + (np.log(1. + x) - 2. * np.log(2.)) * np.log(1. - x) \\\n + 0.5 * (np.log(1. - x) * np.log(1. - x) - np.log(1. + x) * np.log(1. + x)) \\\n + 0.5 * (1. + x*x*x*x) / (1. - x*x) * (np.log(1. + x) - np.log(1. - x))\n result[x <= 0.] = np.zeros(np.sum(x <= 0.))\n result[x >= 1.] = np.zeros(np.sum(x >= 1.))\n return result", "def calc_hypersphere_volume(r: float, n: int) -> float:\n return (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)", "def gkern(kernlen=21, nsig=3):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/np.max(kernel_raw)#.sum()\n return kernel", "def edge_kernel(isotropic):\n if isotropic:\n edge_kernel = - 1.0 * np.ones([3, 3, 3], np.float64)\n edge_kernel[1, 1, 1] = 26.0\n else:\n edge_kernel = - 1.0 * np.ones([1, 3, 3], np.float64)\n edge_kernel[0, 1, 1] = 8\n return edge_kernel", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def periodic_kernel(rmax, kernel, pos, wts, log=null_log):\n if rmax>=0.5:\n raise Exception('Cannot have rmax greater than half the box size, could get periodic images')\n\n num_pts = len(pos)\n pos = array(pos)\n wts = array(wts)\n\n print('Finding optimal shift',file=log)\n pos = shift_pos_optimally(pos, rmax, log)\n print('Padding the unit cube', file=log)\n pad_idx, pad_pos = pad_unitcube(pos, rmax)\n\n print('Inserted {:,} ghost particles for periodicity'.format(len(pad_idx)),file=log)\n new_pts = concatenate((pos, pad_pos), axis=0)\n\n if sum(wts.shape)<=1:\n new_wts = empty(len(new_pts), dtype=wts.dtype)\n new_wts[:] = wts\n else:\n new_wts = concatenate((wts, wts[pad_idx]))\n\n # Scale everything to be in the new box\n scale_fac = 1.0 / (1+2*rmax) \n new_pts += rmax\n new_pts *= scale_fac\n\n pairs, sort_idx, pos, wts, accel = radial_kernel_evaluate(rmax*scale_fac, kernel, new_pts, new_wts, log=log, sort_data=True)\n\n # unsort only the real points\n unsort = empty_like(sort_idx)\n unsort[sort_idx] = arange(len(new_pts))\n unsort = unsort[:num_pts]\n\n accel = accel[unsort]\n\n # undo the scale factor (remember dx's were all shortened)\n accel *= 1.0/scale_fac\n\n return pairs, accel", "def gkern2(kernlen=21, nsig=3):\n # create nxn zeros\n inp = np.zeros((kernlen, kernlen))\n # set element at the middle to one, a dirac delta\n inp[kernlen//2, kernlen//2] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n kernel = scipy.ndimage.filters.gaussian_filter(inp, nsig)\n\n return kernel", "def rho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= concave_piece(x,k_ind,m_ind) \n\n return f", "def gkern(kernlen=21, nsig=3):\n interval = (2 * nsig + 1.) / (kernlen)\n x = np.linspace(-nsig - interval / 2., nsig + interval / 2., kernlen + 1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw / kernel_raw.sum()\n return kernel;", "def kernel(self):\n V = self.matrix().kernel()\n D = self.domain()\n if not D.is_ambient():\n # Transform V to ambient space\n # This is a matrix multiply: we take the linear combinations of the basis for\n # D given by the elements of the basis for V.\n B = V.basis_matrix() * D.basis_matrix()\n V = B.row_module(D.base_ring())\n return self.domain().submodule(V, check=False)", "def create_filter_bank():\r\n kernels = []\r\n for theta in range(0, 2):\r\n theta = theta / 2. * np.pi\r\n for sigma in (3, 5):\r\n for frequency in (0.10, 0.25):\r\n kernel = np.real(gabor_kernel(frequency, theta=theta,\r\n sigma_x=sigma, sigma_y=sigma))\r\n kernels.append(kernel)\r\n print(len(kernels))\r\n return kernels", "def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten sรฅ stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, sรฅ feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data", "def gaus_kernel_calc(kernel_size):\n base_gaus_binom = np.array([[1], [1]])\n kernel = base_gaus_binom\n\n if kernel_size == 1:\n # If the kernel size is 1 we need a 2d array that keeps the image the same.\n kernel = np.array([[1]])\n kernel = scipy.signal.convolve2d(kernel, kernel.transpose())\n return kernel\n\n for i in range(kernel_size - 2):\n kernel = scipy.signal.convolve2d(kernel, base_gaus_binom)\n\n kernel = scipy.signal.convolve2d(kernel, kernel.transpose())\n return kernel/kernel.sum()", "def _compute_R1_from_kernel(n, m, kernel):\r\n\r\n R1 = 0\r\n ind_vec = np.arange(m)\r\n for l in range(n):\r\n ind_vec.shape = (1,)*l + (m,) + (1,)*(n-l-1)\r\n R1 += np.sum((2*ind_vec+1) * kernel**2)\r\n\r\n return R1", "def my_kernel(X, Y):\n S = 0.84 # parameter from rhos\n\n if dset == 1:\n gamma = 0.0005\n else:\n gamma = 0.00087 # maximise variance of kernel matrix\n if np.array_equal(X, Y):\n N = X.shape[0]\n M = (1 - S) * np.ones((N, N)) + S * np.eye(N)\n else:\n M = 1\n\n pairwise_sq_dists = cdist(X, Y, 'sqeuclidean')\n K = exp(-gamma * pairwise_sq_dists) * M\n return K", "def _kernel(self, x, y, t):\n return (self.C / (2 * np.pi * self.sigma_x * self.sigma_y * t)) * \\\n tf.exp(- self.beta * t - (tf.square(x)/tf.square(self.sigma_x) + tf.square(y)/tf.square(self.sigma_y)) / (2*t))", "def phase_method(input_sig, output_by_phase, N, **kwargs):\n\n def required_nb_data_func(list_nb_coeff):\n \"\"\"Compute the minimum number of data required.\"\"\"\n return max(list_nb_coeff)\n\n def core_func(phi_by_term, out_by_phase, solver, sizes=[], cast_mode=''):\n \"\"\"Core computation of the identification.\"\"\"\n\n L = out_by_phase.shape[1]\n L = 2*L if cast_mode == 'real-imag' else L\n kernels = dict()\n _phi_by_term = _cast_complex2real(phi_by_term, cast_mode)\n\n for is_odd in [False, True]:\n curr_phases = range(2-is_odd, N+1, 2)\n curr_y = np.concatenate([_complex2real(out_by_phase[p],\n cast_mode=cast_mode)\n for p in curr_phases], axis=0)\n\n curr_phi = np.bmat(\n [[_phi_by_term.get((p+2*k, k), np.zeros((L, sizes[p+2*k-1]))) *\n binomial(p+2*k, k) for k in range(1-(p+1)//2, 1+(N-p)//2)]\n for p in curr_phases])\n\n if not is_odd:\n curr_y = np.concatenate((np.real(out_by_phase[0]), curr_y),\n axis=0)\n n_even = range(2, N+1, 2)\n temp = np.concatenate([_phi_by_term[n, n//2] *\n binomial(n, n//2) for n in n_even],\n axis=1)\n curr_phi = np.concatenate((np.real(temp), curr_phi), axis=0)\n\n curr_f = _solver(curr_phi, curr_y, solver)\n\n index = 0\n for n in range(1 if is_odd else 2, N+1, 2):\n nb_term = sizes[n-1]\n kernels[n] = curr_f[index:index+nb_term]\n index += nb_term\n\n return kernels\n\n return _identification(input_sig, output_by_phase, N,\n required_nb_data_func, core_func, 'term', **kwargs)", "def pure_gabor():\n \n dots = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dot-images-coh1-2000ms-s02.p\", \"rb\" ), encoding='latin1') \n x = np.arange(-40, 41, 1)\n gaborx, gabory = make_gabors(x)\n centres = np.array([[200,200]])\n \n nf = dots.shape[2]\n nrf = centres.shape[0] # number of receptive fields\n ng = gaborx.shape[1] # number of gabors per receptive field\n \n # offsets (from RF centres) of subimages to multiply with kernels\n vw = int(np.floor(gabory.size/2))\n v_offsets = np.arange(-vw, vw+1)\n hw = int(np.floor(gaborx.shape[0]/2))\n h_offsets = np.arange(-hw, hw+1)\n \n result = np.zeros((nrf, ng, nf))\n for i in range(dots.shape[2]): \n for j in range(nrf): \n v_indices = v_offsets + centres[j,0]\n h_indices = h_offsets + centres[j,1]\n region = dots[v_indices[:,np.newaxis],h_indices,i]\n for k in range(ng): \n gabor = np.outer(gabory, gaborx[:,k])\n result[j,k,i] = np.sum(gabor * region)\n return result", "def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05", "def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu", "def kernel(self, cosmo, z, ell):\n z = np.atleast_1d(z)\n # Extract parameters\n pzs, m = self.params[:2]\n kernel = weak_lensing_kernel(cosmo, pzs, z, ell)\n # If IA is enabled, we add the IA kernel\n if self.config[\"ia_enabled\"]:\n bias = self.params[2]\n kernel += nla_kernel(cosmo, pzs, bias, z, ell)\n # Applies measurement systematics\n if isinstance(m, list):\n m = np.expand_dims(np.stack([mi for mi in m], axis=0), 1)\n kernel *= 1.0 + m\n return kernel", "def fdm_2d(N,L,x,y,h,k):\n\n # Create the Laplacian as a 1d sparse matrix using central difference\n ones = np.ones(N)\n diagvalues = np.array([ones,-2*ones,ones])\n offsets = np.array([-1,0,1])\n lap1d = sps.dia_matrix((diagvalues,offsets), shape=(N,N))/h**2\n \n # Represent 2d coordinates as kronecker sum\n lap = sps.kron(lap1d,sps.diags(np.ones(N))) + \\\n sps.kron(sps.diags(np.ones(N)),lap1d)\n \n # potential terms\n pot_x = np.repeat(x**2,N)\n pot_y = np.tile(y**2,N)\n\n # The whole Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_x) + sps.diags(pot_y))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvectors\n E, psi = eigsh(A,k=k,which='SM')\n\n\n # Perturbated potential\n a = 25\n pot_new = pot_x + pot_y + gauss_pert(N,a).flatten()\n\n # Plot the new potential\n X,Y = np.meshgrid(x,y)\n fig = plt.figure()\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X, Y, pot_new.reshape((N,N)), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax = fig.add_subplot(1,2,2)\n fig.suptitle(r'Potential with a Gaussian perturbation')\n ax.imshow(pot_new.reshape(N,N),extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'perturbated_potential.png'))\n\n # The perturbated Hamiltonian in matrix form\n A = (-1*lap + sps.diags(pot_new))/2\n\n # Calculate the k smallest eigenvalues and corresponding eigenvector\n # Of the perturbated system\n E_p, psi_p = eigsh(A,k=k,which='SM')\n\n return E,psi,E_p,psi_p", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def gaussian_kernel(N, mu, sigma):\n # Asserting N is odd and sigma is number\n assert assert_odd(N)\n \n # Create the normal here (with ID covariance) \n normal = multivariate_normal(mean=mu, cov=sigma*np.identity(2))\n \n # Create the position matries (x_1,x_2 in 2D)\n X_1 = np.ones((N,N))*np.arange(N) # x_1 pos\n X_2 = X_1.T #x_2 pos, just transpose the above\n \n # Shift the positions so center is at middle\n s = np.floor(N/2) #shift value\n X_1, X_2 = X_1-s, X_2-s # shifted matrices\n \n # Create holder matrix\n X = np.zeros((N,N)) # Below we have the iterator \n for (i,j) in [(i,j) for i in range(N) for j in range(N)]:\n X[i,j] = normal.pdf([X_1[i,j], X_2[i,j]]) # Normal values\n \n # Finally just return the normalized kernel\n return X*(1/np.sum(X))", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def nsphere_volume(n, r):\n return math.pi ** (n / 2) * (r ** n) / gamma(n / 2 + 1)", "def _compute_R2_from_kernel(n, m, kernel):\r\n\r\n R2 = 0\r\n ind_vec = np.arange(m)\r\n for l in range(n):\r\n ind_vec.shape = (1,)*l + (m,) + (1,)*(n-l-1)\r\n _idx1 = (slice(None),)*l + (slice(1, None),) + (slice(None),)*(n-l-1)\r\n _idx2 = (slice(None),)*l + (slice(m-1),) + (slice(None),)*(n-l-1)\r\n R2 += 2 * np.sum(ind_vec[_idx1] * kernel[_idx1] * kernel[_idx2])\r\n\r\n return R2", "def dpp_sw(kernel_matrix, window_size=3, max_length=14, epsilon=1E-10):\r\n item_size = kernel_matrix.shape[0]\r\n v = np.zeros((max_length, max_length))\r\n cis = np.zeros((max_length, item_size))\r\n di2s = np.copy(np.diag(kernel_matrix))\r\n selected_items = list()\r\n selected_item = np.argmax(di2s)\r\n selected_items.append(selected_item)\r\n window_left_index = 0\r\n while len(selected_items) < max_length:\r\n k = len(selected_items) - 1\r\n ci_optimal = cis[window_left_index:k, selected_item]\r\n di_optimal = math.sqrt(di2s[selected_item])\r\n v[k, window_left_index:k] = ci_optimal\r\n v[k, k] = di_optimal\r\n elements = kernel_matrix[selected_item, :]\r\n eis = (elements - np.dot(ci_optimal, cis[window_left_index:k, :])) / di_optimal\r\n cis[k, :] = eis\r\n di2s -= np.square(eis)\r\n if len(selected_items) >= window_size:\r\n window_left_index += 1\r\n for ind in range(window_left_index, k + 1):\r\n t = math.sqrt(v[ind, ind] ** 2 + v[ind, window_left_index - 1] ** 2)\r\n c = t / v[ind, ind]\r\n s = v[ind, window_left_index - 1] / v[ind, ind]\r\n v[ind, ind] = t\r\n v[ind + 1:k + 1, ind] += s * v[ind + 1:k + 1, window_left_index - 1]\r\n v[ind + 1:k + 1, ind] /= c\r\n v[ind + 1:k + 1, window_left_index - 1] *= c\r\n v[ind + 1:k + 1, window_left_index - 1] -= s * v[ind + 1:k + 1, ind]\r\n cis[ind, :] += s * cis[window_left_index - 1, :]\r\n cis[ind, :] /= c\r\n cis[window_left_index - 1, :] *= c\r\n cis[window_left_index - 1, :] -= s * cis[ind, :]\r\n di2s += np.square(cis[window_left_index - 1, :])\r\n di2s[selected_item] = -np.inf\r\n selected_item = np.argmax(di2s)\r\n if di2s[selected_item] < epsilon:\r\n break\r\n selected_items.append(selected_item)\r\n return selected_items", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def sobel(kernel_size: int = 3) -> Tensor:\n assert kernel_size % 2 == 1\n s = kernel_size // 2\n k = torch.linspace(-s, s, kernel_size)\n kernel = torch.stack([k] * kernel_size)\n k[k == 0] = 1e-7\n div = torch.stack([k] * kernel_size)\n return kernel / (div ** 2 + div.T ** 2)", "def euclid_ccl(Omega_M):\n \n # Parameters from https://arxiv.org/pdf/1903.01473.pdf\n Omega_b_fraction = 0.15653724 # fraction of Omega_M\n \n sigma8 = 0.811\n Omega_b = Omega_b_fraction * Omega_M\n Omega_c = (1 - Omega_b_fraction) * Omega_M \n h = 0.674\n ns = 0.965\n w0 = -1.03\n\n cosmo_fid = ccl.Cosmology(Omega_c=Omega_c, Omega_b=Omega_b, h=0.674\n , sigma8=sigma8, n_s=ns, w0=w0)#, transfer_function='emulator', matter_power_spectrum='emu')\n\n dNdzs = np.zeros((nbins, z.size))\n shears = []\n \n for i in range(nbins):\n # edges of 1 equal width redshift bins, between 0 and 2\n zmin, zmax = i*(2./nbins), (i+1)*(2./nbins)\n # generate dNdz per bin\n dNdzs[i,:] = ccl.dNdz_tomog(z=z, zmin=zmin, zmax=zmax, pz_func=pz\n , dNdz_func = dNdz_true)\n # calculate the shear per bin\n gal_shapes = ccl.WeakLensingTracer(cosmo_fid, dndz=(z, dNdzs[i,:]))\n shears.append(gal_shapes)\n \n # calculate nbin*(nbin+1)/2 = 1 spectra from the shears\n Cls = []\n for i in range(nbins):\n for j in range(0,i+1):\n Cls.append(ccl.angular_cl(cosmo_fid, shears[i], shears[j], ells))\n \n return np.array(Cls), dNdzs", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def __set_kernels(self):\n self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))", "def get_kernel(kernel_size, blur=1 / 20, halo=.001):\n\n # generate x and y grids\n x, y = np.mgrid[0:kernel_size * 2 + 1, 0:kernel_size * 2 + 1]\n\n center = kernel_size + 1 # center pixel\n r = np.sqrt((x - center) ** 2 + (y - center) ** 2) # distance from center\n\n # now compute the kernel. This function is a bit arbitrary.\n # adjust this to get the effect you want.\n kernel = np.exp(-r / kernel_size / blur) + (1 - r / r[center, 0]).clip(0) * halo\n return kernel", "def jackknife_errors_CLF(pos,Phi,Ndivs,Lbox,M,L_bins,dL,Mhost_min,Mhost_max,Mhost):\n\n n_subBox = Ndivs*Ndivs*Ndivs # The number of sub volumes for the Jackknife resampling\n V_subBox = Vbox - Vbox/n_subBox # The volume of a Jackknife sample\n N = len(pos) \n delta = Lbox/Ndivs\n \n # Indices for the galaxies positions\n index = np.asarray([floor(pos[i,0]/delta) + (floor(pos[i,1]/delta)*Ndivs) + (floor(pos[i,2]/delta)*Ndivs*Ndivs) + 1 for i in range(N)]) # index for the position of particle2\n M_sub_sample = [] # keeps the absolute magnitude for the sub-samples\n Mhost_sub_sample = [] # keeps the halo mass for the sub-samples\n CLF_all = [] # keeps the values of the CLF for the full sample and for each of the sub-samples\n CLF_all.append(Phi)\n for k in range(1,n_subBox+1): # run over the sub-samples\n for i in range(0,N): # runs over all the points (galaxies)\n if (index[i] != k): # the point is inside the sub-box\n M_sub_sample.append(M[i]) # then add to sub-box list\n Mhost_sub_sample.append(Mhost[i])\n CLF_sub,L_bins = CLF(M_sub_sample,L_bins,dL,Mhost_min,Mhost_max,Mhost_sub_sample)\n CLF_all.append(CLF_sub)\n M_sub_sample = []\n Mhost_sub_sample = []\n\n\tn_subBox = float(n_subBox)\n full = np.asarray(CLF_all[0]) # the CLF for the full sample\n sub_samples = np.asarray(CLF_all[1:]) # the CLF for the Jackknife sub-samples\n after_subtraction = sub_samples - np.mean(sub_samples,axis=0)\n squared = after_subtraction**2\n error2 = ((n_subBox-1)/n_subBox)*squared.sum(axis=0)\n errors = error2**0.5\n return errors", "def _create_kernel(sm_times, sm_freqs, kernel='hanning'):\n # frequency dependent kernels\n if isinstance(sm_times, (np.ndarray, list, tuple)):\n sm_freqs = 1 # force 1hz smoothing\n kernels = [_create_kernel(\n sm, sm_freqs, kernel=kernel) for sm in sm_times]\n return kernels\n\n # frequency independent kernels\n if kernel == 'square':\n return np.full((sm_freqs, sm_times), 1. / (sm_times * sm_freqs))\n elif kernel == 'hanning':\n hann_t, hann_f = np.hanning(sm_times), np.hanning(sm_freqs)\n hann = hann_f.reshape(-1, 1) * hann_t.reshape(1, -1)\n return hann / np.sum(hann)\n else:\n raise ValueError(f\"No kernel {kernel}\")", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def Ising_1D(N,h):\n sigma_x = np.array([[0,1],[1,0]])\n sigma_z = np.kron(np.array([[1,0],[0,-1]]), np.array([[1,0],[0,-1]]))\n H = np.zeros((2**N,2**N))\n\n # self-interaction\n for i in range(1,N+1): #va da 1 a N\n if (i==1):\n H += np.kron(sigma_x, np.identity(2**(N-1)))\n elif(i!=1 and i!=N):\n H += np.kron(np.identity(2**(i-1)), np.kron(sigma_x, np.identity(2**(N-i))))\n elif(i==N):\n H += np.kron(np.identity(2**(N-1)),sigma_x)\n\n # interaction\n H_tmp = np.zeros((2**N,2**N))\n for i in range(1, N):\n if(i==1):\n H_tmp += np.kron(sigma_z, np.identity(2**(N-2)))\n elif(i!=1 and i!=N-1):\n tmp=np.kron(sigma_z,np.identity(2**(N-i-1))) #dx\n H_tmp += np.kron(np.identity(2**(i-1)), tmp) #sx\n elif(i==N-1):\n H_tmp += np.kron(np.identity(2**(N-2)), sigma_z)\n\n H = -(h*H + H_tmp)\n\n return H", "def gkern(kernlen=21, nsig=3.5):\n\n interval = (2 * nsig + 1.) / (kernlen)\n x = np.linspace(-nsig - interval / 2., nsig + interval / 2., kernlen + 1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw / kernel_raw.sum()\n return kernel", "def kernal_mus(n_kernels):\n l_mu = [1]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n print(l_mu)\n return l_mu", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def kernel(r, h, deriv):\n return {\n '0': h**-1 / np.sqrt(np.pi) * np.exp(-r**2/h**2),\n '1': h**-3 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-2*r),\n '2': h**-5 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * ( 4*r**2 - 2*h**2),\n '3': h**-7 / np.sqrt(np.pi) * np.exp(-r**2/h**2) * (-8*r**3 + 12*h**2*r)\n }[deriv]", "def get_kernel(ktype):\n \n kernel = np.zeros(kernsize + 1)\n this_kern = partial(inp_kernel, ktype=ktype)\n\n bins = np.arange(0, 1., 1./kernsize)\n bins = np.append(bins, 1.)\n\n for ii in range(kernsize):\n\n y, yerr = integrate.quad(integral_func(this_kern, bins[ii]), 0, np.sqrt(1.-bins[ii]**2))\n kernel[ii] = y * 2.\n \n return kernel", "def get_gaussian(nsig=1.5, kernlen=13):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n return theano.shared(kernel.astype(\"float32\"), borrow=True)", "def shift_kernel(kernel, shape, centre):\n h, w = kernel.shape\n assert(h % 2 == 1)\n assert(w % 2 == 1)\n half_h = np.floor(h/2)\n half_w = np.floor(w/2)\n \n result = np.zeros((shape[0]+2*half_h, shape[1]+2*half_w)) #zero pad to simplify edge handling \n\n ind_h = centre[0] + np.arange(0, 2*half_h+1, dtype='int') \n ind_w = centre[1] + np.arange(0, 2*half_w+1, dtype='int')\n result[ind_h[:,np.newaxis], ind_w] = kernel\n result = result[half_h:-half_h,half_w:-half_w]\n return result", "def N_out(K,P,S,N_in):\n return (int((N_in+2*P-K)/S)+1)", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def calc_hypercube_volume(r: float, n: int) -> float:\n return (r * 2) ** n", "def make_hanning_kernel_tensor_1d_no_depthwise(n_channels, downsample=2, length_of_window=8, make_plots=False, normalize=False, sqrt_window=True):\n hanning_kernel = make_hanning_kernel_1d(downsample=downsample,length_of_window=length_of_window,make_plots=make_plots, normalize=normalize, sqrt_window=sqrt_window).astype(np.float32)\n hanning_kernel_expanded = np.expand_dims(hanning_kernel,0) * np.expand_dims(np.eye(n_channels),3).astype(np.float32) # [n_channels, n_channels, filter_width]\n hanning_tensor = tf.constant(hanning_kernel_expanded) # [length_of_window, num_channels, num_channels]\n hanning_tensor = tf.transpose(hanning_tensor, [2, 0, 1])\n return hanning_tensor", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def csr2d_kick_calc(\n z_b,\n x_b,\n weight,\n *,\n gamma=None,\n rho=None,\n nz=100,\n nx=100,\n xlim=None,\n zlim=None,\n reuse_psi_grids=False,\n psi_s_grid_old=None,\n psi_x_grid_old=None,\n map_f=map,\n species=\"electron\",\n imethod='map_coordinates',\n debug=False,\n):\n assert species == \"electron\", \"TODO: support species {species}\"\n # assert np.sign(rho) == 1, 'TODO: negative rho'\n\n # Grid setup\n if zlim:\n zmin = zlim[0]\n zmax = zlim[1]\n else:\n zmin = z_b.min()\n zmax = z_b.max()\n\n if xlim:\n xmin = xlim[0]\n xmax = xlim[1]\n else:\n xmin = x_b.min()\n xmax = x_b.max()\n\n dz = (zmax - zmin) / (nz - 1)\n dx = (xmax - xmin) / (nx - 1)\n\n # Charge deposition\n t1 = time.time()\n charge_grid = histogram_cic_2d(z_b, x_b, weight, nz, zmin, zmax, nx, xmin, xmax)\n\n if debug:\n t2 = time.time()\n print(\"Depositing particles takes:\", t2 - t1, \"s\")\n\n # Normalize the grid so its integral is unity\n norm = np.sum(charge_grid) * dz * dx\n lambda_grid = charge_grid / norm\n\n # Apply savgol filter\n lambda_grid_filtered = np.array([savgol_filter(lambda_grid[:, i], 13, 2) for i in np.arange(nx)]).T\n\n # Differentiation in z\n lambda_grid_filtered_prime = central_difference_z(lambda_grid_filtered, nz, nx, dz, order=1)\n\n # Grid axis vectors\n zvec = np.linspace(zmin, zmax, nz)\n xvec = np.linspace(xmin, xmax, nx)\n\n beta = np.sqrt(1 - 1 / gamma ** 2)\n\n t3 = time.time()\n\n if reuse_psi_grids == True:\n psi_s_grid = psi_s_grid_old\n psi_x_grid = psi_x_grid_old\n\n else:\n # Creating the potential grids \n psi_s_grid, psi_x_grid, zvec2, xvec2 = green_meshes(nz, nx, dz, dx, rho=rho, beta=beta) \n \n if debug:\n t4 = time.time()\n print(\"Computing potential grids take:\", t4 - t3, \"s\")\n\n # Compute the wake via 2d convolution\n conv_s, conv_x = fftconvolve2(lambda_grid_filtered_prime, psi_s_grid, psi_x_grid)\n\n if debug:\n t5 = time.time()\n print(\"Convolution takes:\", t5 - t4, \"s\")\n\n Ws_grid = (beta ** 2 / abs(rho)) * (conv_s) * (dz * dx)\n Wx_grid = (beta ** 2 / abs(rho)) * (conv_x) * (dz * dx)\n\n # Calculate the kicks at the particle locations\n \n # Overall factor\n Nb = np.sum(weight) / e_charge\n kick_factor = r_e * Nb / gamma # m\n \n # Interpolate Ws and Wx everywhere within the grid\n if imethod == 'spline':\n # RectBivariateSpline method\n Ws_interp = RectBivariateSpline(zvec, xvec, Ws_grid)\n Wx_interp = RectBivariateSpline(zvec, xvec, Wx_grid)\n delta_kick = kick_factor * Ws_interp.ev(z_b, x_b)\n xp_kick = kick_factor * Wx_interp.ev(z_b, x_b)\n elif imethod == 'map_coordinates':\n # map_coordinates method. Should match above fairly well. order=1 is even faster.\n zcoord = (z_b-zmin)/dz\n xcoord = (x_b-xmin)/dx\n delta_kick = kick_factor * map_coordinates(Ws_grid, np.array([zcoord, xcoord]), order=2)\n xp_kick = kick_factor * map_coordinates(Wx_grid, np.array([zcoord, xcoord]), order=2) \n else:\n raise ValueError(f'Unknown interpolation method: {imethod}')\n \n if debug:\n t6 = time.time()\n print(f'Interpolation with {imethod} takes:', t6 - t5, \"s\") \n\n\n result = {\"ddelta_ds\": delta_kick, \"dxp_ds\": xp_kick}\n\n if debug:\n timing = np.array([t2-t1, t4-t3, t5-t4, t6-t5])\n result.update(\n {\n \"zvec\": zvec,\n \"xvec\": xvec,\n \"zvec2\": zvec2,\n \"xvec2\": xvec2,\n \"Ws_grid\": Ws_grid,\n \"Wx_grid\": Wx_grid,\n \"psi_s_grid\": psi_s_grid,\n \"psi_x_grid\": psi_x_grid,\n \"charge_grid\": charge_grid,\n \"lambda_grid_filtered_prime\": lambda_grid_filtered_prime,\n \"timing\": timing\n }\n )\n\n return result", "def build_spmatrix(om, numpoints, im_size, grid_size, n_shift, order, alpha):\n spmat = -1\n\n ndims = om.shape[0]\n klength = om.shape[1]\n\n # calculate interpolation coefficients using kb kernel\n def interp_coeff(om, npts, grdsz, alpha, order):\n gam = 2 * np.pi / grdsz\n interp_dist = om / gam - np.floor(om / gam - npts / 2)\n Jvec = np.reshape(np.array(range(1, npts + 1)), (1, npts))\n kern_in = -1 * Jvec + np.expand_dims(interp_dist, 1)\n\n cur_coeff = np.zeros(shape=kern_in.shape, dtype=np.complex)\n indices = abs(kern_in) < npts / 2\n bess_arg = np.sqrt(1 - (kern_in[indices] / (npts / 2))**2)\n denom = special.iv(order, alpha)\n cur_coeff[indices] = special.iv(order, alpha * bess_arg) / denom\n cur_coeff = np.real(cur_coeff)\n\n return cur_coeff, kern_in\n\n full_coef = []\n kd = []\n for i in range(ndims):\n N = im_size[i]\n J = numpoints[i]\n K = grid_size[i]\n\n # get the interpolation coefficients\n coef, kern_in = interp_coeff(om[i, :], J, K, alpha[i], order[i])\n\n gam = 2 * np.pi / K\n phase_scale = 1j * gam * (N - 1) / 2\n\n phase = np.exp(phase_scale * kern_in)\n full_coef.append(phase * coef)\n\n # nufft_offset\n koff = np.expand_dims(np.floor(om[i, :] / gam - J / 2), 1)\n Jvec = np.reshape(np.array(range(1, J + 1)), (1, J))\n kd.append(np.mod(Jvec + koff, K) + 1)\n\n for i in range(len(kd)):\n kd[i] = (kd[i] - 1) * np.prod(grid_size[i + 1:])\n\n # build the sparse matrix\n kk = kd[0]\n spmat_coef = full_coef[0]\n for i in range(1, ndims):\n Jprod = np.prod(numpoints[:i + 1])\n # block outer sum\n kk = np.reshape(\n np.expand_dims(kk, 1) + np.expand_dims(kd[i], 2),\n (klength, Jprod)\n )\n # block outer prod\n spmat_coef = np.reshape(\n np.expand_dims(spmat_coef, 1) *\n np.expand_dims(full_coef[i], 2),\n (klength, Jprod)\n )\n\n # build in fftshift\n phase = np.exp(1j * np.dot(np.transpose(om),\n np.expand_dims(n_shift, 1)))\n spmat_coef = np.conj(spmat_coef) * phase\n\n # get coordinates in sparse matrix\n trajind = np.expand_dims(np.array(range(klength)), 1)\n trajind = np.repeat(trajind, np.prod(numpoints), axis=1)\n\n # build the sparse matrix\n spmat = coo_matrix((\n spmat_coef.flatten(),\n (trajind.flatten(), kk.flatten())),\n shape=(klength, np.prod(grid_size))\n )\n\n return spmat", "def kernel_kmer(X, Y, k=3):\n x_kmer, y_kmer = kmer(X, Y, k)\n\n sim = 0\n for a in x_kmer:\n for b in y_kmer:\n sim += GXY(a, b)\n\n return sim", "def calculate_S(func, a, b, N):\n # Trapezoid width\n h = (b - a)/N\n\n # Every even slice\n new_part = func(a) + func(b)\n for i in range(2, N, 2):\n new_part += 2 * func(a + i*h) \n \n return 1/3. * new_part", "def kernel(self, cosmo, z, ell):\n z = np.atleast_1d(z)\n # Extract parameters\n pzs, bias = self.params\n # Retrieve density kernel\n kernel = density_kernel(cosmo, pzs, bias, z, ell)\n return kernel", "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def radial_BH_octree_kernel_evaluate(rmax, kernel, pts, wts, theta, log=null_log, sort_data=False, bucket_size=11, force_ngrid=None):\n\n if force_ngrid is None:\n ngrid = max(int(1.0/rmax), 1)\n\n # Avoid nasty hashing problems, make sure ngrid&3 == 3\n if ngrid&3!=3 and ngrid >=3:\n ngrid = (ngrid//4)*4 -1 \n else:\n if force_ngrid*rmax>1.0:\n raise Exception('ngrid=%d has cells smaller than rmax=%.7f'%(force_ngrid,rmax))\n ngrid = force_ngrid\n\n print('Using grid of size {:,}^3 bins, building octree down to buckets of size {:,}.'.format(ngrid, bucket_size), file=log)\n tree, sort_idx = build_octrees(pts, bucket_size, ngrid, wts, log)\n print('Initialising kernel', file=log) \n lattice_setup_kernel(rmax, kernel, log)\n print('BH kernel calculation on {:,} pts'.format(len(pts)),file=log)\n t0 = time()\n n_kernels, accel = bh_tree_walk(tree, ngrid, theta, tree.xyzw, log=log)\n dt = time() - t0\n print('Total kernels {:,} for {:,} pts at'.format(n_kernels, len(pts)),\n MU.OKBLUE+'{:,} pts/sec'.format(int(len(pts)/dt))+MU.ENDC, file=log)\n\n\n if sort_data:\n # return the sort index along with sorted positions and masses, and corresponding accelerations.\n # If you want to unsort you need to do it yourself\n return n_kernels, sort_idx, accel\n\n # indices for 'un'-sorting\n unsort = empty_like(sort_idx)\n unsort[sort_idx] = arange(len(pts), dtype=np.int32)\n\n return n_kernels, accel[unsort]", "def gridded_1d(q, n=300):\n theta = np.linspace(THETA_LOW, THETA_HIGH, n)\n Zq = kernel_1d(q=q, theta=theta)\n Zq *= abs(sin(theta))\n dx = theta[1]-theta[0]\n print(\"rect-%d\"%n, np.sum(Zq)*dx*SCALE)\n print(\"trapz-%d\"%n, np.trapz(Zq, dx=dx)*SCALE)\n print(\"simpson-%d\"%n, simps(Zq, dx=dx)*SCALE)\n print(\"romb-%d\"%n, romb(Zq, dx=dx)*SCALE)", "def main():\n\n # Create an empty array to hold our points.\n n = gpuarray.zeros(shape=(x, y, z),\n dtype=gpuarray.vec.float3)\n\n # Populate the array with randomized points from the search space.\n for k in range(z):\n for j in range(y):\n for i in range(x):\n n[i, j, k] = gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-depth, depth))\n\n # Declare our elementwise CUDA kernel.\n mod = Elementwise(\n arguments=\"float3 pt, float3 *ns, float *rs\",\n operation=\"rs[i] = sqrt(pow(pt.x-ns[i].x,2)+pow(pt.y-ns[i].y,2)+pow(pt.z-ns[i].z,2))\",\n name=\"euclidean_distance\",\n preamble=\"#include <math.h>\"\n )\n\n # Declare an empty results array.\n r = gpuarray.zeros(shape=(50, 50, 2), dtype=numpy.float32)\n start = cuda.Event()\n end = cuda.Event()\n start.record()\n # Call the kernel with a randomize point from the search space.\n mod(gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-width, width)), n, r)\n end.record()\n end.synchronize()\n print((start.time_till(end)))\n print(r)", "def disk(radius, num_kernels):\n k = tf.convert_to_tensor(morphology.disk(radius))\n k = tf.stack([k] * num_kernels, axis=-1)\n k = tf.to_int32(k)\n return k" ]
[ "0.6673899", "0.66569775", "0.6480273", "0.6411122", "0.62793094", "0.626478", "0.60912395", "0.6079251", "0.5977385", "0.5952887", "0.5948711", "0.59083223", "0.58799875", "0.58414066", "0.58391666", "0.5796582", "0.56330234", "0.5628569", "0.5627499", "0.55669326", "0.5563375", "0.5563371", "0.5557263", "0.5527974", "0.55223745", "0.55171114", "0.5512605", "0.5511828", "0.54789287", "0.54692817", "0.5449899", "0.5444194", "0.54383576", "0.54245526", "0.5412727", "0.5405591", "0.54012364", "0.539913", "0.538493", "0.53833866", "0.53589326", "0.5358881", "0.5356352", "0.5353812", "0.53496456", "0.5346607", "0.532615", "0.5322354", "0.53125066", "0.53109425", "0.53086555", "0.5303566", "0.5295119", "0.52854025", "0.52650756", "0.52609026", "0.5248451", "0.5243546", "0.523938", "0.52380615", "0.52378327", "0.52307385", "0.5229959", "0.5229016", "0.52221847", "0.5219457", "0.5214885", "0.5214789", "0.52129716", "0.520301", "0.5195946", "0.51952684", "0.51925063", "0.5177213", "0.51763546", "0.51759666", "0.517347", "0.5171172", "0.51645947", "0.5152344", "0.5151597", "0.5147039", "0.5145381", "0.51328975", "0.51141334", "0.51113987", "0.5108657", "0.51058424", "0.51048917", "0.50938857", "0.50936353", "0.5089502", "0.50871295", "0.50861263", "0.5079005", "0.5074092", "0.50658965", "0.5057179", "0.5053719", "0.5052414" ]
0.6426333
3
Given the coefficients, evaluate model at a specific direction omega
def even_pODF(omega, qpoints, c, N): n,m = qpoints.shape sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) sum += c[i]*even_kernel(mu, N) return sum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def omega(self):\n return self._data.train_X @ self._thetas", "def solve_model():\n from scipy.integrate import ode\n # Initialise constants and state variables\n (init_states, constants) = initConsts()\n\n # Set timespan to solve over\n voi = linspace(0, 100, 5000)\n\n # Construct ODE object to solve\n r = ode(computeRates)\n r.set_integrator('vode', method='bdf', atol=1e-06, rtol=1e-06, max_step=1)\n r.set_initial_value(init_states, voi[0])\n r.set_f_params(constants)\n\n # Solve model\n states = array([[0.0] * len(voi)] * sizeStates)\n states[:,0] = init_states\n for (i,t) in enumerate(voi[1:]):\n if r.successful():\n r.integrate(t)\n states[:,i+1] = r.y\n else:\n break\n\n # Compute algebraic variables\n algebraic = computeAlgebraic(constants, states, voi)\n return (voi, states, algebraic)", "def determine_in_plane_angle(self, qxy, qz=0.0, theta_incident=0.0):\n \n k = self.get_k()\n if theta_incident==None:\n # Use internal value\n theta_incident = self.theta_incident\n theta_incident_rad = np.radians(theta_incident)\n \n from scipy.optimize import fsolve\n \n def equations(p, qxy=qxy, qz=qz, theta_incident=theta_incident, k=k):\n \n # The variable we are fitting for\n omega_rad, = p\n \n # Non-fit values: qxy, qz, k, theta_incident, k\n \n return ( (qxy*cos(omega_rad))**2 + (qxy*sin(omega_rad)+k*cos(theta_incident_rad))**2 + (qz-k*sin(theta_incident_rad))**2 - k**2 )\n\n \n omega_rad, = fsolve(equations, ( np.radians(5.0) ) )\n #print( 'omega_rad = %.2f (err = %.4f)' % ( omega_rad, equations((omega_rad, )) ) )\n \n omega = abs( np.degrees(omega_rad) )\n #print( 'omega = %.2f (err = %.4f)' % ( omega, equations((omega_rad, )) ) )\n \n \n return omega", "def _compute_omega_dot(self, x, omega_cmd):\r\n phi = x[PHI_IDX]\r\n psi = x[PSI_IDX]\r\n\r\n beta_dot = x[BETA_DOT_IDX]\r\n phi_dot = x[PHI_DOT_IDX]\r\n psi_dot = x[PSI_DOT_IDX]\r\n\r\n A = np.zeros([3, 3])\r\n b = np.zeros(3)\r\n\r\n # auto-generated symbolic expressions\r\n x0 = self.p.r2**2\r\n x1 = cos(phi)\r\n x2 = self.p.l * self.p.m3 * x1\r\n x3 = self.p.r2 * x2\r\n x4 = self.p.theta1 / self.p.r1**2\r\n x5 = sin(phi)\r\n x6 = self.p.l**2 * self.p.m3\r\n x7 = self.p.l * self.p.m3 * x5\r\n x8 = sin(psi)\r\n x9 = self.p.r1 + self.p.r2\r\n x10 = x8 * x9\r\n x11 = cos(psi)\r\n x12 = -self.p.r1 - self.p.r2\r\n x13 = -x11 * x9 + x12\r\n x14 = -x10 * x7 + x13 * x2\r\n x15 = self.p.r2 * x9\r\n x16 = self.p.r2 * x13\r\n x17 = self.p.m1 * self.p.r2 * x12 + self.p.m2 * x16 + self.p.m3 * x16 - x15 * x4\r\n x18 = x9**2\r\n x19 = x18 * x8**2\r\n x20 = x13**2\r\n x21 = psi_dot**2\r\n x22 = phi_dot**2\r\n x23 = x21 * x8 * x9\r\n x24 = self.p.m3 * x23 - x22 * x7\r\n x25 = x11 * x21 * x9\r\n x26 = self.p.g * self.p.m3 - self.p.m3 * x25 + x2 * x22\r\n A[0, 0] = self.p.m1 * x0 + self.p.m2 * x0 + self.p.m3 * x0 + self.p.theta2 + x0 * x4 + x3\r\n A[0, 1] = self.p.theta3 + x1**2 * x6 + x3 + x5**2 * x6\r\n A[0, 2] = x14 + x17\r\n A[1, 0] = -1\r\n A[1, 1] = 1\r\n A[1, 2] = 0\r\n A[2, 0] = x17\r\n A[2, 1] = x14\r\n A[2, 2] = self.p.m1 * x12**2 + self.p.m2 * x19 + self.p.m2 * \\\r\n x20 + self.p.m3 * x19 + self.p.m3 * x20 + x18 * x4\r\n b[0] = -self.p.l * x1 * x24 - self.p.l * x26 * \\\r\n x5 - self.p.m2 * x15 * x21 * x8 - self.p.r2 * x24\r\n b[1] = (beta_dot + omega_cmd - phi_dot) / self.p.tau\r\n b[2] = -self.p.m2 * x13 * x23 + x10 * x26 + x10 * \\\r\n (self.p.g * self.p.m2 - self.p.m2 * x25) - x13 * x24\r\n\r\n return np.linalg.solve(A, b)", "def E(z, omega_m, omega_l):\n return 1 / np.sqrt(omega_m * (1 + z) ** 3 + omega_l)", "def omega(self):\n self.cosineSequences()", "def omega(x):\n # if ramp_start_time - ramp_constant_time <= x <= end_ramp_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n # else:\n # if bstroke == 'yes' and x <= 2 * (end_ramp_end_time +\n # ramp_constant_time):\n # x -= end_ramp_end_time + ramp_constant_time\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n # logcosh(f_t2))\n # else:\n # omegax = 0\n\n if bstroke == 'no':\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n\n else:\n if x <= end_ramp_end_time + ramp_constant_time:\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration /\n 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n logcosh(f_t3) - logcosh(f_t2))\n\n else:\n x -= end_ramp_end_time + ramp_constant_time\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n logcosh(f_t2))\n\n return omegax", "def f(z):\n omega_m = 0.308\n omega_de = 0.692\n #omega = omega_m*(1+z)**3\n #return omega**0.6 + omega_de/70*(1+omega/2) # Dodelson approx\n\n omega = omega_m*(1+z)**3*H(0)**2/H(z)**2\n omega_de = omega_de*H(0)**2/H(z)**2\n return omega**(4/7) + omega_de/70*(1+omega/2) # Dodelson approx\n #return 5*omega/(2*(omega**(4/7) - omega_de + (1 + omega/2)*(1 + omega_de/70)))\n #return omega**0.55", "def run(self, diffusion_coefficients):\n mat = self.buildmatrix(diffusion_coefficients)\n\n rhs = np.zeros(self.size)\n rhs[0] = -(diffusion_coefficients[0] + diffusion_coefficients[1]) * self.phi0\n\n if self.verbose > 0:\n print(\"System of equations:\")\n for i in range(mat.shape[0]):\n row = [\"{0:3g}*x{1}\".format(mat[i, j], j + 1) for j in range(mat.shape[1])]\n if self.verbose > 0:\n print(\"[{0}] = [{1:3g}]\".format(\" + \".join(row), rhs[i]))\n\n if parameters.solver == 'jacobi':\n x = self.jacobi_solver(mat, rhs)\n elif parameters.solver == 'gauss-seidel':\n x = self.gauss_seidel_solver(mat, rhs)\n elif parameters.solver == 'tridiag':\n x = self.tridiag_solver(mat, rhs)\n else:\n sys.exit('Unknown solver')\n\n if self.verbose > 1:\n print(\"Solution: {0}\".format(x))\n error = np.dot(mat, x) - rhs\n if self.verbose > 1:\n print(\"Error: {0}\".format(error))\n x = np.insert(x, 0, self.phi0)\n x = np.append(x, 0)\n return x", "def evaluate(self, x, y, x0, y0, order):\n try:\n iorder = self._order_mapping[int(order.flatten()[0])]\n except AttributeError:\n iorder = self._order_mapping[order]\n except KeyError:\n raise ValueError(\"Specified order is not available\")\n\n # The next two lines are to get around the fact that\n # modeling.standard_broadcasting=False does not work.\n #x00 = x0.flatten()[0]\n #y00 = y0.flatten()[0]\n\n t = np.linspace(0, 1, 10) #sample t\n xmodel = self.xmodels[iorder]\n ymodel = self.ymodels[iorder]\n lmodel = self.lmodels[iorder]\n\n dx = xmodel.evaluate(x0, y0, t)\n dy = ymodel.evaluate(x0, y0, t)\n\n if self.theta != 0.0:\n rotate = Rotation2D(self.theta)\n dx, dy = rotate(dx, dy)\n\n so = np.argsort(dx)\n tab = Tabular1D(dx[so], t[so], bounds_error=False, fill_value=None)\n\n dxr = astmath.SubtractUfunc()\n wavelength = dxr | tab | lmodel\n model = Mapping((2, 3, 0, 2, 4)) | Const1D(x0) & Const1D(y0) & wavelength & Const1D(order)\n return model(x, y, x0, y0, order)", "def omega_i(self, omega, i):\n\n delta, epsilon = self.coefficients(self.pose_a[self.links[i][0]], \n self.pose_a[self.links[i][1]], \n self.pose_b[self.links[i][0]], \n self.pose_b[self.links[i][1]])\n return np.arccos(delta*np.cos(omega) + epsilon*np.sin(omega))", "def omega_c(B, m=m_star, q=q_e):\n return q * B / m # in 1/s", "def rhs(t, Y, q, omega_d, b):\n f = np.zeros_like(Y)\n\n f[0] = Y[1]\n f[1] = -q*Y[1] - np.sin(Y[0]) + b*np.cos(omega_d*t)\n\n return f", "def function(self, omega):\n \n function = 0*omega\n \n for i in range(len(self.links)):\n function += self.omega_i(omega, i)*self.weight\n \n return function", "def evalpotential( self, X, Y, Z):\n EVAL = np.zeros_like(X) \n for b in self.beams:\n EVAL += b(X,Y,Z)\n return EVAL* self.unitfactor", "def evaluate(self, radius, mtot, m0, alpha1, alpha2):\n model = mtot + m0 * (1 - np.exp(-alpha1*(radius/self.r0)**(-alpha2)))\n return model", "def _model_dynamics(self):\n if self.acc_term:\n rne = np.ndarray(self.sim.model.nv)\n functions.mj_rne(self.sim.model, self.sim.data, True, rne)\n return rne[self.arm_index]\n else:\n return self.sim.data.qfrc_bias[self.arm_index] # stored, no need for computation", "def ne_fwd_iter(self, q_dot, q_ddot, omega_im1, alpha_im1, acc_e_im1):\n # # Calculate CM angular velocity (WCS)\n # self.omega = omega_im1 + q_dot * self.z_gl\n # # Calculate CM angular acceleration (WCS)\n # self.alpha = alpha_im1 + self.z_gl * q_ddot +\\\n # X(self.omega, self.z_gl) * q_dot\n # # Calculate CM linear acc (WCS)\n # self.acc = acc_e_im1 + X(self.alpha, -self.r_hc) +\\\n # X(self.omega, X(self.omega, -self.r_hc))\n # # Calculate body end (flange2) linear acc (WCS)\n # self.acc_e = acc_e_im1 + X(self.alpha, -self.r_ht) +\\\n # X(self.omega, X(self.omega, -self.r_ht))\n self.omega, self.alpha, self.acc, self.acc_e = fast_fwd_ne(\n self.z_gl, self.r_hc, self.r_ht,\n q_dot, q_ddot, omega_im1, alpha_im1, acc_e_im1\n )", "def evaluate(self,coeffs,evalpts):\n a1,a2,a3,A0,E0,G0,n = coeffs\n x = asarray(evalpts) #XXX: requires a numpy.array\n return (a1 + a2*x + a3*x*x + A0 * ( G0/(2*pi) )/( (x-E0)*(x-E0)+(G0/2)*(G0/2) ))/n", "def __call__(self,r):\n return self._n0 * np.power(r / self._r0, self._beta)", "def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = np.asarray(self.meshDA['Area'])\n satVel = np.array([satVel.x, satVel.y, satVel.z])\n vAtm = np.array([vAtm.x, vAtm.y, vAtm.z])\n\n relativeVelocity = vAtm - (satVel + (np.cross(omega, CoM)))\n vNorm = np.linalg.norm(relativeVelocity, axis=1)\n vDir = np.reciprocal(vNorm[:, None]) * relativeVelocity\n\n dot = np.einsum('ij,ij->i', normal, vDir)\n\n dotCondition = dot < 0\n dot = dot[dotCondition]\n if dot.size > 0:\n vDir = vDir[dotCondition]\n vNorm = vNorm[dotCondition]\n normal = normal[dotCondition]\n area = area[dotCondition]\n CoM = CoM[dotCondition]\n\n coeff = 0.5 * rho * dragCoeff * (vNorm**2)\n oMr = 1.0 - liftRatio\n f = (coeff * area * dot)[:, None]\n\n aT = np.sum(np.cross(CoM, oMr * np.absolute(f) * vDir + 2 * liftRatio * f * normal), axis=0)\n\n self._aTorque = Vector3D(float(aT[0]), float(aT[1]), float(aT[2]))\n\n else:\n self._aTorque = Vector3D.ZERO", "def _like4(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, i):\r\n\t\r\n\tplx_mod, v, sigma_v = init_par[i], init_par[-4:-1], init_par[-1] \r\n\tp, q, r = normalTriad(alpha, delta)\r\n\tmualpha_mod = np.dot(np.transpose(p),v)*plx_mod/_A ### [mas/yr]\r\n\tmudelta_mod = np.dot(np.transpose(q),v)*plx_mod/_A ### [mas/yr]\r\n\t### Add the model vector for the radial velocities:\r\n\tvrad_mod = np.dot(np.transpose(r),v) ### [km/s]\r\n \t\r\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\r\n\tC = np.zeros((4,4),dtype=np.float64) ### This is a 4x4 matrix \r\n\t### Diagonal terms:\r\n\tC[0,0],C[1,1],C[2,2] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\r\n\tC[3,3] = sigma_vrad**2.\r\n\t\r\n\tr_plx_muRa, r_plx_muDec, r_muRa_muDec = ccoef[0], ccoef[1], ccoef[2] \r\n \r\n\t### Correlation terms:\r\n\tC[0,1], C[0,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_plx_muDec*sigma_plx*sigma_mudelta\r\n\tC[1,0], C[1,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\tC[2,0], C[2,1] = r_plx_muDec*sigma_plx*sigma_mudelta, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\r\n\tE = np.zeros((4,4),dtype=np.float64) ### 4x4 matrix \r\n\tE[1,1],E[2,2] = (sigma_v**2.)*(plx_mod/_A)**2., (sigma_v**2.)*(plx_mod/_A)**2. ### [mas/yr]\r\n\tE[3,3] = sigma_v**2.\t\t\t\t\t\t\t\t ### [km/s]\r\n\r\n\t\r\n\tD = np.add(E,C)\r\n\tdetD = det(D) \r\n\tinvD = inv(D)\r\n\t\t\r\n\ta_c = np.array([plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod, vrad_obs - vrad_mod])\r\n\tg_func = row_matrix_col_4d(a_c, a_c, invD) \r\n\t\r\n\t\r\n\treturn detD, g_func", "def fit_model(self, phi, omega, sigma_eta, beta=0):\r\n # Initialize at the initial values parsed to the class\r\n par_ini = [phi, omega, sigma_eta, beta]\r\n # Approximate the jabocian for more efficient minimization\r\n Lprime = lambda x: approx_fprime(x, self.__llik_fun__, 0.01)\r\n if self.method == 'iterateRegression':\r\n # Depending on whether we include the regression coefficient, use other optimizer\r\n est = minimize(self.__llik_fun__, x0=par_ini,\r\n options=self.options,\r\n method='Newton-CG', jac=Lprime)\r\n else:\r\n est = minimize(self.__llik_fun__, x0=par_ini,\r\n options=self.options,\r\n method='BFGS')\r\n # Return optimal parameters\r\n return est.x", "def model(theta, x):\n\tw, b = theta\n\treturn w * x + b", "def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs", "def wheel_vel(vx, vy, omega):\n a = np.array([ np.pi/4, 3*np.pi/4, 5*np.pi/4, 7*np.pi/4 ]) # Alpha\n b = np.array([ np.pi/4, -np.pi/4, -3*np.pi/4, 3*np.pi/4 ]) # Beta\n g = np.array([ np.pi/4, -np.pi/4, -np.pi/4, np.pi/4 ]) # Gamma\n L = np.array([ np.sqrt(2), np.sqrt(2), np.sqrt(2), np.sqrt(2) ]) # Distance to wheel\n R = 0.05\n \n a = np.pi / 4 # Alpha\n b = np.pi / 4 # Beta\n g = np.pi / 4 # Gamma\n L = np.sqrt(2)\n R = 0.05\n \n return (-vx - vy * np.tan(a + b + g) - L * omega * np.sin(b + g) / np.cos(a + b + g)) / (R * np.sin(g) / np.cos(a + b + g))", "def compute_contact_forces(self, x=None, omega_cmd=None):\r\n if x is None:\r\n x = self.x\r\n if omega_cmd is None:\r\n print('Warning: no omega_cmd specified for contact force calculation; default to 0')\r\n omega_cmd = 0\r\n\r\n phi = x[PHI_IDX]\r\n psi = x[PSI_IDX]\r\n\r\n phi_dot = x[PHI_DOT_IDX]\r\n psi_dot = x[PSI_DOT_IDX]\r\n\r\n x_ddot = self._compute_omega_dot(x, omega_cmd)\r\n\r\n beta_dd = x_ddot[BETA_IDX]\r\n phi_dd = x_ddot[PHI_IDX]\r\n psi_dd = x_ddot[PSI_IDX]\r\n\r\n x0 = beta_dd * self.p.r2\r\n x1 = -self.p.r1 - self.p.r2\r\n x2 = psi_dot**2\r\n x3 = self.p.m2 * x2\r\n x4 = sin(psi)\r\n x5 = self.p.r1 + self.p.r2\r\n x6 = x4 * x5\r\n x7 = x5 * cos(psi)\r\n x8 = psi_dd * (x1 - x7)\r\n x9 = cos(phi)\r\n x10 = phi_dd * self.p.l * self.p.m3\r\n x11 = sin(phi)\r\n x12 = phi_dot**2 * self.p.l * self.p.m3\r\n x13 = self.p.m3 * x2\r\n x14 = self.p.m3 * x0 + self.p.m3 * x8 + x10 * x9 - x11 * x12 + x13 * x6\r\n x15 = self.p.m2 * x0 + self.p.m2 * x8 + x14 + x3 * x6\r\n x16 = psi_dd * x4 * x5\r\n x17 = self.p.g * self.p.m3 - self.p.m3 * x16 + x10 * x11 + x12 * x9 - x13 * x7\r\n x18 = self.p.g * self.p.m2 - self.p.m2 * x16 + x17 - x3 * x7\r\n\r\n F1 = np.zeros(2)\r\n F12 = np.zeros(2)\r\n F23 = np.zeros(2)\r\n\r\n F1[0] = psi_dd * self.p.m1 * x1 + self.p.m1 * x0 + x15\r\n F1[1] = self.p.g * self.p.m1 + x18\r\n F12[0] = x15\r\n F12[1] = x18\r\n F23[0] = x14\r\n F23[1] = x17\r\n\r\n return [F1, F12, F23]", "def __control_unitary(self, unitary):\n # Unpack the matrix elementwise, corresponding to the matrix\n # [ a, b ]\n # [ c, d ]\n [a, b], [c, d] = unitary\n\n # Solve for alpha, beta, gamma, delta as per the above equations\n alpha = np.log(a*d - b*c) / 2j\n gamma = np.arccos((a*d - b*c) / (a*d + b*c))\n\n # If there are zeroes then we have multiple solutions.\n # In that case, set beta to 0 then solve for delta\n if c*d == 0 and a*b*np.sin(gamma) == 0:\n beta = 0\n delta = np.log(d / (a * np.cos(gamma/2)**2)) / 1j\n else:\n beta = np.log(-4*c*d / (a*b * (np.sin(gamma)**2))) / 2j\n delta = np.log(-b*d/(a*c*(np.tan(gamma/2)**2))) / 2j\n\n # Construct the matrices as per above equations\n mat_A = ROTZ(beta).matrix @ ROTY(gamma/2).matrix\n mat_B = ROTY(-gamma/2).matrix @ ROTZ(-(delta + beta)/2).matrix\n mat_C = ROTZ((delta - beta)/2).matrix\n mat_0 = np.array(\n [\n [1, 0],\n [0, np.exp(1j * alpha)]\n ]\n ) \n return np.real([alpha, beta, gamma, delta]), mat_A, mat_B, mat_C", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n assert psi.shape[1] == 3, 'inconsistent parameter dimensions'\r\n\r\n #2. exponentiate the a and b (positive!)\r\n mpsi = psi.copy()\r\n\r\n #3. transform data\r\n z = y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def dynamics(self,eta,nu,u_actual,u_control,sampleTime): \n \n # Current velocities\n u_c = self.V_c * math.cos(self.beta_c - eta[5]) # current surge velocity\n v_c = self.V_c * math.sin(self.beta_c - eta[5]) # current sway velocity \n \n nu_c = np.array([u_c,v_c,0,0,0,0],float) # current velocity vector\n nu_r = nu - nu_c # relative velocity vector\n \n U_r = math.sqrt( nu_r[0]**2 + nu_r[1]**2 ) # relative speed\n \n # Rudder command and actual rudder angle\n delta_c = u_control[0]\n delta = u_actual[0]\n \n # Rudder forces and moment (Fossen 2021, Chapter 9.5.1)\n b = 0.7 * self.T # rudder height\n AR = b**2 / self.Lambda # aspect ratio: Lamdba = b**2/AR \n CN = 6.13 * self.Lambda / ( self.Lambda + 2.25 ) # normal coefficient\n t_R = 1 - 0.28 * self.Cb - 0.55\n a_H = 0.4\n x_R = -0.45 * self.L\n x_H = -1.0 * self.L\n\n Xdd = -0.5 * ( 1 - t_R ) * self.rho * U_r**2 * AR * CN\n Yd = -0.25 * ( 1 + a_H ) * self.rho * U_r**2 * AR * CN \n Nd = -0.25 * ( x_R + a_H * x_H ) * self.rho * U_r**2 * AR * CN \n \n # Control forces and moment\n delta_R = -delta # physical rudder angle (rad)\n T = self.tau_X # thrust (N)\n t_deduction = 0.1 # thrust deduction number\n tau1 = ( 1 - t_deduction ) * T - Xdd * math.sin( delta_R )**2 \n tau2 = -Yd * math.sin( 2 * delta_R ) \n tau6 = -Nd * math.sin( 2 * delta_R ) \n tau = np.array( [ tau1, tau2, tau6 ],float) \n \n # Linear maneuvering model\n T_surge = self.L # approx. time constant in surge (s)\n xg = 0 # approx. x-coordinate, CG (m) \n \n # 3-DOF ship model\n [M,N] = clarke83(U_r,self.L, self.B, self.T,self.Cb,self.R66,xg,T_surge)\n Minv = np.linalg.inv(M)\n nu3 = np.array( [ nu_r[0], nu_r[1], nu_r[5] ]) \n nu3_dot = np.matmul( Minv, tau - np.matmul(N,nu3) ) \n \n # 6-DOF ship model\n nu_dot = np.array( [ nu3_dot[0],nu3_dot[1],0,0,0,nu3_dot[2] ]) \n\n # Rudder angle saturation\n if ( abs(delta) >= self.deltaMax * math.pi / 180 ):\n delta = np.sign(delta) * self.deltaMax * math.pi / 180\n \n # Rudder dynamics\n delta_dot = (delta_c - delta) / self.T_delta \n\n # Forward Euler integration [k+1]\n nu = nu + sampleTime * nu_dot\n delta = delta + sampleTime * delta_dot\n\n u_actual = np.array([delta],float) \n\n return nu, u_actual", "def omega(self, forceCalculate=False, verbose=0):\n\n if self._omegaExists and not forceCalculate:\n return self._omega\n\n self.incidentWavefunction.gaugeField(verbose=verbose)\n self.targetWavefunction.adjointWilsonLine(verbose=verbose)\n\n if verbose > 0:\n print(f'Calculating {type(self).__name__} omega' + '.'*10, end='')\n\n self._omega = _calculateOmegaOpt(self.N, self.gluonDOF, self.delta, self.incidentWavefunction.gaugeField(), self.targetWavefunction.adjointWilsonLine())\n\n self._omegaExists = True\n\n if verbose > 0:\n print('finished!')\n\n return self._omega", "def input_equation(self, eq: str) -> None:\n if self.xmin >= self.xmax:\n raise Exception('Minimum > Maximum')\n\n increment = (self.xmax - self.xmin) / self.precision\n self.dependant = []\n\n x = self.xmin\n while x <= self.xmax:\n try:\n y = eval(eq)\n except ZeroDivisionError:\n print(f'Division by zero, x = {x}')\n x += increment\n except SyntaxError:\n print(f'Invalid equation: {eq}')\n x += increment\n except ValueError:\n print(f'Math domain error, {eq}: x = {x}')\n x += increment\n except TypeError:\n print('Can\\'t convert complex to float')\n x += increment\n else:\n self.dependant.append((x, y))\n x += increment\n self.equation = eq", "def application_test():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n usage = 'manual|automatic Krylov|direct degree nx ny nz'\n try:\n import sys\n J_comp = sys.argv[1]\n linear_solver = sys.argv[2]\n degree = int(sys.argv[3])\n divisions = [int(arg) for arg in sys.argv[4:]]\n except:\n print('Usage: %s' % sys.argv[0], usage)\n sys.exit(0)\n\n u = solver(q, Dq, f, divisions, degree,\n 'pde_Newton', J_comp, linear_solver)\n\n # Find max error\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n print('error: %.2E' % error)", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Lรถwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def integrate(equ):\n if \"x\" in equ:\n return polynomial_equation(equ)\n else:\n return constant_equation(equ)", "def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3tโˆ’1,โˆ’5t+2) \\n '\n f'for 0 โ‰ค t โ‰ค 1 is: {sol}')", "def run(self, **kwargs):\r\n\r\n # Get the kwargs.\r\n cases = kwargs['case']\r\n if cases == 'all':\r\n cases = scr.pfile.case.keys()\r\n elif type(cases) is not list:\r\n cases = [cases]\r\n if 'rbm' in kwargs.keys():\r\n if kwargs['rbm'].lower() == 'yes':\r\n rbm = 1\r\n else:\r\n rbm = 0\r\n else:\r\n rbm = 0\r\n\r\n # Run all the requested cases.\r\n for c in cases:\r\n # Create the current case dictionary key.\r\n if c not in self.time.keys():\r\n self.time[c] = []\r\n if c not in self.u.keys():\r\n self.u[c] = []\r\n if c not in self.eta.keys():\r\n self.eta[c] = []\r\n\r\n # Determine the modal force vector.\r\n p_modal = modal_p(self.pfile.case[c], self.phi)\r\n\r\n # Determine the time parameters in the forcing function.\r\n grid = self.pfile.case[c]['grids'][0]\r\n self.time[c] = self.pfile.case[c][grid][:, 0]\r\n dt = self.pfile.case[c]['dt']\r\n\r\n # Add 100 seconds at the end of the forcing function for ring down.\r\n add_time = [(20, 0.01), (80, 0.5)]\r\n for at in add_time:\r\n new_time = np.arange(self.time[c][-1] + dt, self.time[c][-1] + at[0], at[1])\r\n self.time[c] = np.append(self.time[c], new_time)\r\n new_p_modal = np.zeros([self.phi.num_modes, new_time.size])\r\n p_modal = np.append(p_modal, new_p_modal, axis=1)\r\n\r\n # Integrate the modal EOM using Reccurence Formulas:\r\n # etadd + 2 * zeta omn * etad + omn**2 * eta = P\r\n eta0 = np.zeros_like(p_modal)\r\n etad0 = np.zeros_like(p_modal)\r\n [self.eta[c], etad] = rf_mdof(self.time[c], p_modal, self.eig.eigenvalues,\r\n np.multiply(2 * np.pi, self.eig.frequency), self.zeta,\r\n eta0, etad0)\r\n\r\n # Remove rigid body modes unless requested not to.\r\n if rbm == 0:\r\n self.eta[c][0:6, :] = 0.0\r\n\r\n # Recover the desired responses with superposition of modes using the LTM\r\n self.u[c] = self.ltm.dtm @ self.eta[c]\r\n\r\n # Perform the required RSS set out in the HWLIST.\r\n self.rss(c)", "def Y_force(omega_B, V_B, m):\n t = Symbol(\"t\")\n return m * (diff(V_B[1], t) + omega_B[2] * V_B[0] - omega_B[0] * V_B[2])", "def get_omega(\n inequalities: List[Relational], n_bits: int, p: int = 10, as_numeric: bool = False\n) -> Matrix:\n n_vars = len(DEPENDENTS) + len(inequalities)\n q = qe.get_bit_map(n_vars=n_vars, n_bits=n_bits)\n a, b = qe.constraints_to_matrix(\n inequalities, dependents=DEPENDENTS, as_numeric=as_numeric\n )\n omega = -p * qe.get_constrained_matrix(q, a, b, as_numeric=as_numeric)\n nx = len(DEPENDENTS) * n_bits\n omega[:nx, :nx] += get_omega_0(n_bits, as_numeric=as_numeric)\n return omega", "def optim_func(params, model):\n if model.model == 'ARD':\n model.alpha, model.beta = params\n lik = model.pruning_algorithm()\n\n else:\n model.alpha = params[0]\n lik = model.pruning_algorithm()\n \n return -lik", "def cmdVelCallback(self, req):\n x = req.linear.x # m/s\n th = req.angular.z # rad/s\n\n if x == 0:\n # Turn in place\n right = th * self.wheel_track * self.gear_reduction / 2.0\n left = -right\n elif th == 0: \n # Pure forward/backward motion\n left = right = x\n else:\n # Rotation about a point in space\n left = x - th * self.wheel_track * self.gear_reduction / 2.0\n right = x + th * self.wheel_track * self.gear_reduction / 2.0\n\n # Set motor speeds in meters per second.\n self.mySerializer.mogo_m_per_s([1, 2], [left, right])", "def asymptotic_Ylm(continuum_orbs, energy, rmax=2500.0, npts_r=300, lebedev_order=65):\n # wave number for kinetic energy E=1/2 k^2\n k = np.sqrt(2*energy)\n # wavelength\n wavelength = 2.0 * np.pi / k\n \n # radial grid\n # sample points and weights for Gauss-Legendre quadrature on the interval [-1,1]\n leggauss_pts, leggauss_weights = legendre.leggauss(npts_r)\n # For Gaussian quadrature the integral [-1,1] has to be changed into [rmax,rmax+2pi/k].\n # new endpoints of interval\n a = rmax\n b = rmax+wavelength\n # transformed sampling points and weights\n r = 0.5*(b-a)*leggauss_pts + 0.5*(a+b)\n dr = 0.5*(b-a)*leggauss_weights\n # Lebedev grid for spherical wave expansion\n th,ph, weights_angular = get_lebedev_grid(lebedev_order)\n # The lebedev quadrature rule is\n # /\n # I[f] = | dOmega f(Omega) = 4 pi sum weights_angular[i] * f(th[i],ph[i])\n # / i\n # For convencience we multiply the factor of 4*pi into the weights\n weights_angular *= 4*np.pi\n\n # evaluate spherical harmonics for L=0,1,2,3 M=-L,..,L on angular Lebedev grid\n Lmax = 3\n Ys,LMs = spherical_harmonics_vec(th,ph, Lmax)\n Ys = np.array(Ys)\n # cartesian coordinates of radial and angular grids\n x = outerN(r, np.sin(th)*np.cos(ph))\n y = outerN(r, np.sin(th)*np.sin(ph))\n z = outerN(r, np.cos(th))\n \n # differential for r-integration\n r2dr = r**2*dr\n # volume element r^2 dr dOmega\n dV = outerN(r2dr, weights_angular)\n # add r-axis to angular weights\n dOmega = outerN(np.ones(npts_r), weights_angular)\n \n # nc: number of continuum wavefunctions\n nc = len(continuum_orbs)\n # number of angular components (L,M) for Lmax=2\n LMdim = Lmax*(Lmax+2)+1\n assert LMdim == len(LMs)\n # set up array for expansion coefficients\n Cs = np.zeros((LMdim,nc))\n \n for a in range(0, nc): # loop over continuum orbitals\n wfn_a = continuum_orbs[a].amp(x,y,z)\n # normalization constant\n nrm2 = np.sum(dV * abs(wfn_a)**2)\n wfn_a /= np.sqrt(nrm2)\n\n for iLM, (l,m) in enumerate(LMs):\n # add r-axis to spherical harmonics Y_(l,m)(r,th,ph) = Y_(l,m)(th,ph)\n Ylm = outerN(np.ones(npts_r), Ys[iLM])\n # radial wavefunction of continuum orbital belonging to L,M channel\n # a /pi /2pi * \n # R (r) = | sin(th) dth | dph Y (th,ph) wfn (r,th,ph)\n # L,M /0 /0 L,M a\n #\n # a\n # wfn_a = sum_(L,M) R (r) Y (th,ph)\n # L,M L,M\n \n # integrate over angles\n Rlm = np.sum(dOmega * Ylm.conjugate() * wfn_a, axis=1)\n # integrate over r\n # a /rmax+k/2pi 2 | a |2\n # C = | r dr | R | 1 / (4 pi)\n # L,M /rmax | L,M | \n Cs[iLM,a] = np.sum(r2dr * abs(Rlm)**2)\n\n print \" Asymptotic Decomposition Y_(l,m)\"\n print \" ================================\"\n row_labels = [\"orb. %2.1d\" % a for a in range(0, nc)]\n col_labels = [\"Y(%d,%+d)\" % (l,m) for (l,m) in LMs]\n txt = annotated_matrix(Cs.transpose(), row_labels, col_labels)\n print txt\n \n return Cs,LMs", "def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition", "def integrator_model(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_model()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return solver", "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n self._aTorque = Vector3D.ZERO\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'])\n\n for CoM, Normal, Area in iterator:\n CoMVelocity = satVel.add(self.V3_cross(omega, CoM))\n relativeVelocity = vAtm.subtract(CoMVelocity)\n\n vNorm2 = relativeVelocity.getNormSq()\n vNorm = sqrt(vNorm2)\n vDir = relativeVelocity.scalarMultiply(1.0 / vNorm)\n\n dot = self.V3_dot(Normal, vDir)\n if (dot < 0):\n coeff = 0.5 * rho * dragCoeff * vNorm2\n oMr = 1.0 - liftRatio\n # dA intercepts the incoming flux\n f = coeff * Area * dot\n force = Vector3D(float(oMr * abs(f)), vDir,\n float(liftRatio * f * 2), Normal)\n self._aTorque = self._aTorque.add(self.V3_cross(CoM, force))\n\n else:\n self._aTorque = Vector3D.ZERO", "def interacting(double[:] v, redshifts, in_terms, double H0):\n cdef double t = v[0]\n cdef double a = v[1]\n cdef double ombar_m = v[2]\n cdef double ombar_de = v[3]\n cdef double z = v[4]\n cdef double dl = v[5]\n cdef double gamma = in_terms[0]\n\n cdef double Hz = H0 * (ombar_m + ombar_de)**(0.5)\n\n# if ombar_m < 0 or ombar_de < 0 or math.isnan(Hz):\n# print('interacting')\n# print('z = %s, Hz = %s, gamma = %s, ombar_m = %s, ombar_de = %s'\n# %(z, Hz, gamma, ombar_m, ombar_de))\n\n if math.isnan(Hz):\n print('interacting')\n print('z = %s, Hz = %s, gamma = %s, ombar_m = %s, ombar_de = %s'\n %(z, Hz, gamma, ombar_m, ombar_de))\n\n cdef double irate = gamma/(1.0+z)/Hz\n\n cdef double dtdz = -1.0/((1.0+z) * Hz)\n cdef double dadz = -(1.0+z)**(-2.0)\n cdef double domdz = 3.0*ombar_m /(1.0+z) - irate\n cdef double ddldz = 1.0/Hz\n\n # first derivatives of functions I want to find:\n f = [dtdz,# dt/dz (= f.d wrt z of time)\n dadz,# d(a)/dz (= f.d wrt z of scale factor)\n domdz,# d(ombar_m)/dz (= f.d wrt z of density_m(t) / crit density(t0))\n irate,# d(ombar_de)/dz (= f.d wrt z of density_de(t) / crit density(t0))\n 1.0,# d(z)/dz (= f.d wrt z of redshift)\n ddldz]# d(dl)/dz (= f.d wrt z of luminosty distance) # H + Hdz*(1+z)\n\n return f", "def Z_force(omega_B, V_B, m):\n t = Symbol(\"t\")\n return m * (diff(V_B[2], t) + omega_B[0] * V_B[1] - omega_B[1] * V_B[0])", "def cost(phi, theta, omega, ket):\n evolved = jnp.dot(rot(phi, theta, omega), ket)\n return fidelity(evolved, basis(2, 0))[0][0]", "def omega(self, i, rosetta_definitions = True):\n res = self.all_residues[i]\n\n try:\n n = res['N'].get_vector()\n ca = res['CA'].get_vector()\n c = res['C'].get_vector()\n\n\n\n if rosetta_definitions and i < len(self.all_residues) -1 and self.connected_to_next(i):\n res_plus_one = self.all_residues[i + 1]\n next_n = res_plus_one['N'].get_vector()\n next_ca = res_plus_one['CA'].get_vector()\n omega = calc_dihedral(ca, c, next_n, next_ca)\n return omega\n\n elif not rosetta_definitions and i > 1 and self.connected_to_previous(i):\n res_minus_one = self.all_residues[i - 1]\n pre_c = res_minus_one['C'].get_vector()\n pre_ca = res_minus_one['CA'].get_vector()\n omega = calc_dihedral(pre_ca, pre_c, n, ca)\n return omega\n else:\n return 0.0\n\n except BaseException:\n print \"Could not get omega for \"+repr(i)\n raise LookupError", "def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like", "def coefA(x0,y0,x1,y1):\n return -(y1-y0)/(x1-x0)", "def X_force(omega_B, V_B, m):\n t = Symbol(\"t\")\n return m * (diff(V_B[0], t) + omega_B[1] * V_B[2] - omega_B[2] * V_B[1])", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def runComponent(comp, pot):\n if comp.has_key('trajsize'):\n result = agama.orbit(potential=pot, ic=comp['ic'], time=comp['inttime'], \\\n targets=comp['targets'], trajsize=comp['trajsize'])\n traj = result[-1]\n else:\n result = agama.orbit(potential=pot, ic=comp['ic'], time=comp['inttime'], targets=comp['targets'])\n if type(result) == numpy.array: result = (result,)\n # targets[0] is density, targets[1], if provided, is kinematics\n matrix = list()\n rhs = list()\n rpenl = list()\n matrix.append(result[0].T)\n rhs. append(comp['targets'][0].values())\n mass = rhs[0][-1] # the last constraint is the total mass\n avgrhs = mass/len(rhs[0]) # typical constraint magnitude\n rpenl. append(numpy.ones_like(rhs[0]) / avgrhs)\n if len(comp['targets']) == 2 and comp.has_key('beta'):\n numrow = len(comp['targets'][1]) / 2\n matrix.append(result[1].T[0:numrow] * 2*(1-comp['beta']) - result[1].T[numrow:2*numrow])\n rhs. append(numpy.zeros(numrow))\n rpenl. append(numpy.ones(numrow) * 10.)\n avgweight = mass / len(comp['ic'])\n xpenq = numpy.ones(len(comp['ic'])) / avgweight**2 / len(comp['ic']) * 0.1\n weights = agama.optsolve(matrix=matrix, rhs=rhs, rpenl=rpenl, xpenq=xpenq )\n\n # check for any outstanding constraints\n for t in range(len(matrix)):\n delta = matrix[t].dot(weights) - rhs[t]\n norm = 1e-4 * abs(comp['targets'][t].values()) + 1e-8\n for c, d in enumerate(delta):\n if abs(d) > norm[c]:\n print \"Constraint\",t,\" #\",c,\"not satisfied:\", comp['targets'][t][c], d\n print \"Entropy:\", -sum(weights * numpy.log(weights+1e-100)) / mass + numpy.log(avgweight), \\\n \" # of useful orbits:\", len(numpy.where(weights >= avgweight)[0]), \"/\", len(comp['ic'])\n\n # create an N-body model if needed\n if comp.has_key('nbody'):\n status,particles = agama.sampleOrbitLibrary(comp['nbody'], traj, weights)\n if not status:\n indices,trajsizes = particles\n print \"reintegrating\",len(indices),\"orbits; max # of sampling points is\", max(trajsizes)\n traj[indices] = agama.orbit(potential=pot, ic=comp['ic'][indices], \\\n time=comp['inttime'][indices], trajsize=trajsizes)\n status,particles = agama.sampleOrbitLibrary(comp['nbody'], traj, weights)\n if not status: print \"Failed to produce output N-body model\"\n comp['nbodymodel'] = particles\n\n # output\n comp['weights'] = weights\n comp['densitydata'] = result[0]\n if len(matrix) == 2: comp['kinemdata'] = result[1]\n if comp.has_key('trajsize'): comp['traj'] = traj\n return comp", "def forward_theta(self):\n SW = self.simplesphere.sphere_wrapper\n for dm, m in enumerate(self.simplesphere.local_m):\n m_data = [f.data[dm] for f in self.component_fields]\n # Unpack for rank 0 to counteract shortcut bug in sphere_wrapper\n if self.rank == 0:\n m_data, = m_data\n self.coeffs[dm] = SW.forward(m, self.rank, m_data)", "def orderparameter(x, Tc=100, beta=0.5, amp=1):\n # op = amp*np.real(np.power(np.complex(Tc-x),beta))\n op = amp * np.power(Tc - x, beta)\n op[np.isnan(op)] = 0.0\n return op", "def omega(self):\n return self._omega", "def fit_function(x, omega):\n return pylab.sin(omega*x)", "def _model(self, t, theta, period, tmpid):\n template = self.templates[tmpid]\n phase = (t / period - theta[2]) % 1\n return theta[0] + theta[1] * template(phase)", "def A(param):\n return (param.delta + param.nu + param.mu0) * (param.beta - param.nu)", "def beta_model(r, s0, rc, beta, c):\n return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def solver_bc(\n kappa, f, # Coefficients in the PDE\n boundary_conditions, # Dict of boundary conditions\n Nx, Ny, # Cell division of the domain\n degree=1, # Polynomial degree\n subdomains=[], # List of SubDomain objects in domain\n linear_solver='Krylov', # Alt: 'direct'\n abs_tol=1E-5, # Absolute tolerance in Krylov solver\n rel_tol=1E-3, # Relative tolerance in Krylov solver\n max_iter=1000, # Max no of iterations in Krylov solver\n log_level=PROGRESS, # Amount of solver output\n dump_parameters=False, # Write out parameter database?\n debug=False,\n ):\n # Create mesh and define function space\n mesh = UnitSquareMesh(Nx, Ny)\n V = FunctionSpace(mesh, 'P', degree)\n\n tol = 1E-14\n\n # Subdomains in the domain?\n import numpy as np\n if subdomains:\n # subdomains is list of SubDomain objects,\n # p is array of corresponding constant values of p\n # in each subdomain\n if not isinstance(kappa, (list, tuple, np.ndarray)):\n raise TypeError(\n 'kappa must be array if we have sudomains, not %s'\n % type(kappa))\n materials = CellFunction('size_t', mesh)\n materials.set_all(0) # \"the rest\"\n for m, subdomain in enumerate(subdomains[1:], 1):\n subdomain.mark(materials, m)\n\n kappa_values = kappa\n V0 = FunctionSpace(mesh, 'DG', 0)\n kappa = Function(V0)\n help = np.asarray(materials.array(), dtype=np.int32)\n kappa.vector()[:] = np.choose(help, kappa_values)\n else:\n if not isinstance(kappa, (Expression, Constant)):\n raise TypeError(\n 'kappa is type %s, must be Expression or Constant'\n % type(kappa))\n\n # Boundary subdomains\n class BoundaryX0(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[0]) < tol\n\n class BoundaryX1(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[0] - 1) < tol\n\n class BoundaryY0(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[1]) < tol\n\n class BoundaryY1(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[1] - 1) < tol\n\n # Mark boundaries\n boundary_markers = FacetFunction('size_t', mesh)\n boundary_markers.set_all(9999)\n bx0 = BoundaryX0()\n bx1 = BoundaryX1()\n by0 = BoundaryY0()\n by1 = BoundaryY1()\n bx0.mark(boundary_markers, 0)\n bx1.mark(boundary_markers, 1)\n by0.mark(boundary_markers, 2)\n by1.mark(boundary_markers, 3)\n\n # Redefine boundary integration measure\n ds = Measure('ds', domain=mesh, subdomain_data=boundary_markers)\n\n # Collect Dirichlet conditions\n bcs = []\n for i in boundary_conditions:\n if 'Dirichlet' in boundary_conditions[i]:\n bc = DirichletBC(V, boundary_conditions[i]['Dirichlet'],\n boundary_markers, i)\n bcs.append(bc)\n\n if debug:\n # Print all vertices that belong to the boundary parts\n for x in mesh.coordinates():\n if bx0.inside(x, True): print('%s is on x = 0' % x)\n if bx1.inside(x, True): print('%s is on x = 1' % x)\n if by0.inside(x, True): print('%s is on y = 0' % x)\n if by1.inside(x, True): print('%s is on y = 1' % x)\n\n # Print the Dirichlet conditions\n print('Number of Dirichlet conditions:', len(bcs))\n if V.ufl_element().degree() == 1: # P1 elements\n d2v = dof_to_vertex_map(V)\n coor = mesh.coordinates()\n for i, bc in enumerate(bcs):\n print('Dirichlet condition %d' % i)\n boundary_values = bc.get_boundary_values()\n for dof in boundary_values:\n print(' dof %2d: u=%g' % (dof, boundary_values[dof]))\n if V.ufl_element().degree() == 1:\n print(' at point %s' %\n (str(tuple(coor[d2v[dof]].tolist()))))\n\n # Define trial and test functions\n u = TrialFunction(V)\n v = TestFunction(V)\n\n # Collect Neumann integrals\n integrals_N = []\n for i in boundary_conditions:\n if 'Neumann' in boundary_conditions[i]:\n if boundary_conditions[i]['Neumann'] != 0:\n g = boundary_conditions[i]['Neumann']\n integrals_N.append(g*v*ds(i))\n\n # Collect Robin integrals\n integrals_R_a = []\n integrals_R_L = []\n for i in boundary_conditions:\n if 'Robin' in boundary_conditions[i]:\n r, s = boundary_conditions[i]['Robin']\n integrals_R_a.append(r*u*v*ds(i))\n integrals_R_L.append(r*s*v*ds(i))\n\n # Simpler Robin integrals\n integrals_R = []\n for i in boundary_conditions:\n if 'Robin' in boundary_conditions[i]:\n r, s = boundary_conditions[i]['Robin']\n integrals_R.append(r*(u - s)*v*ds(n))\n\n # Define variational problem, solver_bc\n a = kappa*dot(grad(u), grad(v))*dx + sum(integrals_R_a)\n L = f*v*dx - sum(integrals_N) + sum(integrals_R_L)\n\n # Simpler variational formulation\n F = kappa*dot(grad(u), grad(v))*dx + \\\n sum(integrals_R) - f*v*dx + sum(integrals_N)\n a, L = lhs(F), rhs(F)\n\n # Compute solution\n u = Function(V)\n\n if linear_solver == 'Krylov':\n prm = parameters['krylov_solver'] # short form\n prm['absolute_tolerance'] = abs_tol\n prm['relative_tolerance'] = rel_tol\n prm['maximum_iterations'] = max_iter\n print(parameters['linear_algebra_backend'])\n set_log_level(log_level)\n if dump_parameters:\n info(parameters, True)\n solver_parameters = {'linear_solver': 'gmres',\n 'preconditioner': 'ilu'}\n else:\n solver_parameters = {'linear_solver': 'lu'}\n\n solve(a == L, u, bcs, solver_parameters=solver_parameters)\n return u, kappa # Note: kappa may be modified (Function on V0)", "def integrator_model(self):\n\n xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \\\n = self.DAE_system()\n ODEeq_ = vertcat(*ODEeq)\n\n self.ODEeq = Function('f', [xd, u], [vertcat(*ODEeq)], ['x0', 'p'], ['xdot'])\n\n dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),\n 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}\n opts = {'tf': self.tf / self.nk} # interval length\n F = integrator('F', 'idas', dae, opts)\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return F", "def _compute_solar_torque(self):\n pass", "def main():\n prior = np.asarray([2.0, 4e-6, 1e-4])\n priorCov = np.diag(np.asarray([5.0, 1e-2, 1.0]))\n independentVariable = np.arange(0.0, 5.0e-5, 1e-7)\n \n truth = np.asarray([2.5, 1.2e-5, 4e-4])\n observation = (truth[0] * np.exp(-independentVariable/(truth[1])) + \n truth[2] + np.random.normal(0.0, 0.1, independentVariable.shape))\n observationError = (0.1 * np.sqrt(truth[0] * \n np.exp(-independentVariable/(truth[1]))) + math.sqrt(truth[2]))\n \n model = decay(prior=prior, priorCov=priorCov, \n otherModelParam=None, \n parameterNames=[\"a\", \"tau\", \"offset\"], \n verbose=2,\n observation=observation, \n observationError=observationError,\n independentVariable=independentVariable)\n \n oe = OE(model=model, maxiter=8)\n \n oe()\n \n# print(oe)\n# print(model)\n# \n# model.plot()\n \n return oe, model", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n # assert psi.shape[1] == 4, 'inconsistent parameter dimensions'\r\n mpsi = psi.copy()\r\n d = psi[-1]\r\n mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)\r\n\r\n #3. transform data\r\n z = d*y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def _get_omega(self, vehicle_id):\n pos = self.positions[vehicle_id]\n omega = self.frenets[vehicle_id].get_omega(\n pos[0], pos[1], pos[2], pos[3])\n\n return omega", "def overheadmodel(J=None, m=None, mt=None, r=None, gravity=9.81,\n counterg=False):\n\n mmat = np.diag([mt, J, m, m])\n\n # state: x = [s, beta, xd, zd].T\n\n amat = np.zeros((4, 4))\n bmat = np.array([[1., 0], [0, 1.], [0, 0], [0, 0]])\n cmat = np.array([[0, 0, 1, 0], [0, 0, 0, 1]])\n\n if counterg:\n rhs = np.array([[0, -m*gravity*r, 0, m*gravity]]).T\n else:\n rhs = np.array([[0, 0, 0, m*gravity]]).T\n\n def holoc(x=None):\n return (x[2] - x[0])**2 + x[3]**2 - (r*x[1])**2\n\n def holojaco(x):\n return 2*np.array([[-(x[2]-x[0]), -r**2*x[1], x[2]-x[0], x[3]]]).\\\n reshape((1, x.size))\n\n def holohess(x):\n return 2*np.array([[1, 0, -1, 0],\n [0, -r**2, 0, 0],\n [-1, 0, 1, 0],\n [0, 0, 0, 1]])\n\n ovhdcrn = dict(mmat=mmat, amat=amat, bmat=bmat, cmat=cmat,\n rhs=rhs, holoc=holoc, holojaco=holojaco, holohess=holohess)\n return ovhdcrn", "def solver(self, alpha):\n if alpha == 0: # skip divided by 0 error\n return [0], [0] # r and phi=0\n\n if alpha == 180:\n return [self.D], [0] # if angle= pi then, tan(pi)=0 so 1/tan=1/0\n\n # initial value for position and angular speed\n y0 = [1/self.D, 1/(self.D*math.tan(math.radians(alpha)))]\n sol = solve_ivp(fun=self._diff_eq, t_span=[0, 10*pi], y0=y0, method='Radau', events=[self._eventRs]) #, self._eventR])#,t_eval=np.linspace(0, t_max, 10000)) #dense_output=False\n\n if sol.t[-1] == 10*pi:\n raise StopIteration(\"solver error, alpha reached computation limit (loop number)\")\n\n phi = np.array(sol.t)\n r = np.abs(1/sol.y[0, :]) # must use this because solver can't be stop before infinity because negative\n\n return r, phi", "def calculator(**pars):\n # paying for parameter conversion each time to keep life simple, if not fast\n pars = revert_pars(model_info, pars)\n for k, v in pars.items():\n parts = k.split('.') # polydispersity components\n if len(parts) == 2:\n model.dispersion[parts[0]][parts[1]] = v\n else:\n model.setParam(k, v)\n return theory()", "def A_calc(self, x, y, theta, v, omega, dt):\n # Initialize 5x5 A matrix\n A = np.zeros((5,5))\n A[0,0] = 1\n A[1,1] = 1\n A[2,2] = 1\n A[3,3] = 1\n A[4,4] = 1\n \n A[0,2] = -1 * v * np.sin(theta) * dt\n A[0,3] = np.cos(theta) * dt\n A[1,2] = v * np.cos(theta) * dt\n A[1,3] = np.sin(theta) * dt\n A[2,4] = dt\n \n return(A)", "def obtain_training_parameters(para, x, y, alg = 'LR'):\n \n \n global omega\n \n # Iterate to find the optimal parameters\n if alg == 'LR': # logistic regression\n omega = np.zeros((3, 1))\n alpha = para.step_size # step size\n for i in range(para.iteration):\n grad = np.zeros((3, 1))\n for i in range(len(x[:, 0])):\n grad += np.reshape(x[i, :], (3, 1)) * (-y[i] + 1 / (1 + np.exp(-np.dot(x[i, :], omega))))\n omega -= alpha * grad \n \n elif alg == 'GNB': # Gaussian Naive Bayes\n # get counts for each class\n itszero = 0\n itsone = 0\n for i in range(len(y)):\n if y[i] == 1:\n itsone += 1\n else:\n itszero += 1\n \n # probability of see y\n theta0 = itszero / len(y)\n theta1 = 1 - theta0\n \n # mean of omega\n mew00 = 0\n mew01 = 0\n mew02 = 0\n mew10 = 0\n mew11 = 0\n mew12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n mew00 += x[i, 0] / itszero\n mew01 += x[i, 1] / itszero\n mew02 += x[i, 2] / itszero\n else:\n mew10 += x[i, 0] / itsone\n mew11 += x[i, 1] / itsone\n mew12 += x[i, 2] / itsone\n \n # variance of omega \n sigma00 = 0\n sigma01 = 0\n sigma02 = 0\n sigma10 = 0\n sigma11 = 0\n sigma12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n sigma00 += (x[i, 0] - mew00)**2 / itszero\n sigma01 += (x[i, 1] - mew01)**2 / itszero\n sigma02 += (x[i, 2] - mew02)**2 / itszero\n else:\n sigma10 += (x[i, 0] - mew10)**2 / itsone\n sigma11 += (x[i, 1] - mew11)**2 / itsone\n sigma12 += (x[i, 2] - mew12)**2 / itsone\n \n # store these parameters into the name \"omage\"\n omega = [theta0, theta1, mew00, mew01, mew02, mew10, mew11, mew12,\n sigma00, sigma01, sigma02, sigma10, sigma11, sigma12] \n \n else: # Gaussian Mixture\n pass\n \n return omega", "def f_theta_omega(angles, times):\n # gravity = 9.81\n # arm_length = 0.1 \n\n # omega = angles[0]\n # theta = angles[1]\n # ftheta = omega\n # fomega = -(gravity/arm_length) * np.sin(theta)\n pass", "def make_position_model_r2bc_math(traj_size = 731):\n # Create input layers\n t = keras.Input(shape=(traj_size), name='t')\n r0 = keras.Input(shape=(1,), name='r0')\n theta0 = keras.Input(shape=(1,), name='theta0')\n omega0 = keras.Input(shape=(1,), name='omega0')\n # The combined input layers\n inputs = [t, r0, theta0, omega0]\n \n # Reshape t to (batch_size, traj_size, 1)\n t_vec = keras.layers.Reshape(target_shape=(traj_size, 1), name='t_vec')(t)\n \n # Repeat r, theta0 and omega to be vectors of shape (batch_size, traj_size)\n r = keras.layers.RepeatVector(n=traj_size, name='r')(r0)\n theta0 = keras.layers.RepeatVector(n=traj_size, name='theta0_vec')(theta0)\n omega = keras.layers.RepeatVector(n=traj_size, name='omega_vec')(omega0)\n\n # Check shapes\n batch_size = t.shape[0]\n tf.debugging.assert_shapes(shapes={\n t_vec: (batch_size, traj_size, 1),\n r: (batch_size, traj_size, 1),\n theta0: (batch_size, traj_size, 1),\n omega: (batch_size, traj_size, 1)\n }, message='make_position_model_r2bc_math / inputs')\n \n # The angle theta at time t\n # theta = omega * t + theta0\n omega_t = keras.layers.multiply(inputs=[omega, t_vec], name='omega_t')\n theta = keras.layers.add(inputs=[omega_t, theta0], name='theta')\n\n # Cosine and sine of theta\n cos_theta = keras.layers.Activation(activation=tf.cos, name='cos_theta')(theta)\n sin_theta = keras.layers.Activation(activation=tf.sin, name='sin_theta')(theta)\n\n # Compute qx and qy from r, theta\n qx = keras.layers.multiply(inputs=[r, cos_theta], name='qx')\n qy = keras.layers.multiply(inputs=[r, sin_theta], name='qy')\n \n # Check shapes\n tf.debugging.assert_shapes(shapes={\n omega_t: (batch_size, traj_size, 1),\n theta: (batch_size, traj_size, 1),\n cos_theta: (batch_size, traj_size, 1),\n sin_theta: (batch_size, traj_size, 1),\n qx: (batch_size, traj_size, 1),\n qy: (batch_size, traj_size, 1),\n }, message='make_position_model_r2bc_math / outputs')\n \n # Wrap this into a model\n outputs = [qx, qy]\n model = keras.Model(inputs=inputs, outputs=outputs, name='model_r2bc_math')\n return model", "def omega_cyclotron(q, B, mass):\n return q * cgs.e * B / (mass * cgs.c)", "def test_coefficients_torch_interface(self):\n import torch\n\n qnode = qml.QNode(self.circuit, self.dev)\n\n weights = torch.tensor([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)", "def _model(self, X, coef, intercept):\n X_copy = (\n X[:, None]\n if self.metric.default_point_type == \"vector\"\n else X[:, None, None]\n )\n return self.metric.exp(X_copy * coef[None], intercept)", "def electrical_ode(self, state, u_sr_alphabeta, omega, *args):\n return np.matmul(self._model_constants, np.array([\n # omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,\n omega,\n state[self.I_SALPHA_IDX],\n state[self.I_SBETA_IDX],\n state[self.PSI_RALPHA_IDX],\n state[self.PSI_RBETA_IDX],\n omega * state[self.PSI_RALPHA_IDX],\n omega * state[self.PSI_RBETA_IDX],\n u_sr_alphabeta[0, 0],\n u_sr_alphabeta[0, 1],\n u_sr_alphabeta[1, 0],\n u_sr_alphabeta[1, 1],\n ]))", "def _calculateOmegaOpt(N, gluonDOF, delta, incidentGaugeField, targetAdjointWilsonLine):\n\n # 2,2 is for the 2 dimensions, x and y\n omega = np.zeros((N, N, 2, 2, gluonDOF), dtype='complex') # 2 is for two dimensions, x and y\n\n derivs = [_x_deriv, _y_deriv]\n\n for i in range(N):\n for j in range(N):\n for k in range(gluonDOF):\n for l in range(2): # 2 is number of dimensions\n for n in range(2): # 2 is number of dimensions\n omega[i,j,l,n,k] = np.sum(np.array([derivs[l](incidentGaugeField[:,:,m], i, j, N, delta) * derivs[n](targetAdjointWilsonLine[:,:,k,m], i, j, N, delta) for m in range(gluonDOF)]))\n\n return omega", "def exe(self, func_mox):\n ## combine several dict of parameters\n cond = dict(self.cond_ex, **self.cond_cal, **self.const_model, **self.funclist_cea, **self.plot_param)\n ## set several constant, function and variables before calculation\n N = self.cond_ex[\"N\"]\n func_cstr = cond[\"func_CSTAR\"]\n cond[\"time\"], cond[\"x\"], r_tmp, rdot_tmp, rdotn_tmp = mod_shape.initialize_calvalue(**cond)\n self.x = cond[\"x\"]\n val = {}\n ## Following iteration part is the main sectioin of this simulation program.\n for t in tqdm(cond[\"time\"]):\n ## update each value at the follwoing lines\n self.t_history = np.append(self.t_history, t)\n mox = func_mox(t)\n self.mox_history = np.append(self.mox_history, mox)\n if t == 0:\n Pc = cond[\"Pci\"]\n else:\n Pc = Pc_new\n val[\"Pc\"] = Pc\n self.Pc_history = np.append(self.Pc_history, Pc)\n Vox = mod_shape.func_Vox(mox, Pc, **cond)\n val[\"Vox\"] = Vox\n self.Vox_history = np.append(self.Vox_history, Vox)\n Vf = mod_shape.func_Vf(Vox, Pc, **cond)\n self.Vf_history = np.append(self.Vf_history, Vf)\n if t != 0:\n r_tmp = r_new_tmp\n rdot_tmp = rdot_new_tmp\n rdotn_tmp = rdotn_new_tmp\n ## reshape and eliminate the unneccesary part of regression shape.\n r, rdot, rdotn = mod_shape.func_rcut(r_tmp, rdot_tmp, rdotn_tmp, self.t_history, self.Vf_history, **cond)\n self.r_history = np.vstack((self.r_history, r))\n self.rdot_history = np.vstack((self.rdot_history, rdot))\n self.rdotn_history = np.vstack((self.rdotn_history, rdotn))\n ## calculate the others parameter at the following lines\n if cond[\"Vf_mode\"]:\n mf = N *mod_shape.func_mf(r[~np.isnan(r)].size-1, r[~np.isnan(r)], rdot[~np.isnan(rdot)], Vf=Vf, **cond)\n else:\n mf = N *mod_shape.func_mf(r[~np.isnan(r)].size-1, r[~np.isnan(r)], rdot[~np.isnan(rdot)], Vf=Vf, **cond)\n self.mf_history = np.append(self.mf_history, mf)\n if mf<=0.0:\n of = np.nan\n cstr_ex = Pc*np.pi*np.power(cond[\"Dt\"], 2)/(4*mox)\n else:\n of = mox/mf\n cstr_ex = cond[\"eta\"]*func_cstr(of, Pc)\n self.of_history = np.append(self.of_history, of)\n self.cstr_history = np.append(self.cstr_history, cstr_ex)\n ## calculate the next time step values at the following lines\n val[\"r\"] = r_tmp\n val[\"rdot\"] = rdot_tmp\n val[\"rdotn\"] = rdotn_tmp\n Pc_new = mod_response.exe_EULER(t, mf, Pc, func_mox, self.t_history, self.Vf_history, **cond)\n r_new_tmp, rdot_new_tmp, rdotn_new_tmp = mod_shape.exe(val, **cond)\n ## CFL [-] Courant number, which must be less than unity \n self.cond_cal[\"CFL\"] = np.abs(self.Vf_history.max()*self.cond_cal[\"dt\"]/self.cond_cal[\"dx\"])", "def eval(self, theta, phi):\n required_fields = [\"coeff\", \"order\"]\n for field in required_fields:\n if field not in self.linear_fit.keys():\n raise ValueError(\"It looks like \"\n \"interface_energy_poly_expansion \"\n \"has not been called. Call that function \"\n \"first.\")\n\n res = self.linear_fit[\"coeff\"][0]\n loop = zip(self.linear_fit[\"order\"][1:],\n self.linear_fit[\"coeff\"][1:].tolist())\n n = [np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi),\n np.cos(theta)]\n for order, coeff in loop:\n x = self._get_x_value(n, order)\n res += coeff*x\n return res", "def run(self, dist_theta, dist_phi, dist_psi):\r\n\t\t#Extract all the system information\r\n\t\tev = dynamics.sDynamics(self.w, self.ev, self.t)\r\n\t\tx = ev[0]\r\n\t\ty = ev[1]\r\n\t\tz = ev[2]\r\n\t\txD = ev[3]\r\n\t\tyD = ev[4]\r\n\t\tzD = ev[5]\r\n\t\txDD = ev[6]\r\n\t\tyDD = ev[7]\r\n\t\tzDD = ev[8]\r\n\t\ttheta = ev[9]\r\n\t\tphi = ev[10]\r\n\t\tpsi = ev[11]\r\n\t\tthetaD = ev[12] + dist_theta\r\n\t\tphiD = ev[13] + dist_phi\r\n\t\tpsiD = ev[14] + dist_psi\r\n\t\tthetaDD = ev[15]\r\n\t\tphiDD =ev[16]\r\n\t\tpsiDD = ev[17]\r\n\t\t\r\n\t\t#Apply controllers\r\n\t\tcontrol1 = self.altitudeController.iterate(self.target_z, z, zD, zDD)\r\n\t\tcontrol2 = self.phiController.iterate(self.target_phi, phi, phiD)\r\n\t\tcontrol3 = self.thetaController.iterate(self.target_theta, theta, thetaD)\r\n\t\tcontrol4 = self.psiController.iterate(self.target_psi, psi, psiD)\r\n\r\n\t\tthr1 = control1\r\n\t\tthr2 = control2\r\n\t\tthr3 = control3\r\n\t\tthr4 = control4\r\n\t\t\r\n\t\t#Compute the throttle for each motor\r\n\t\tw1=thr1-thr3-thr4\r\n\t\tw2=thr1-thr2+thr4\r\n\t\tw3=thr1+thr3-thr4\r\n\t\tw4=thr1+thr2+thr4\r\n\r\n\t\t#Redefine throttle and state vectors\r\n\t\tself.w = [w1,w2,w3,w4]\r\n\t\tself.ev = [x, y, z, xD, yD, zD, xDD, yDD, zDD, theta, phi, psi, thetaD, phiD, psiD, thetaDD, phiDD, psiDD]\r\n\r\n\t\treturn [x, y, z, theta, phi, psi]", "def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]", "def jacobian_moebius(theta, w, r, inverse=False):\n \n if inverse:\n # undo the phase translation, which ensures fixpoint property\n phi = get_rotation_back_angle(w, r)\n theta = (theta + phi) % (2 * np.pi)\n \n z = T_1(theta, r)\n\n # (B1, B2, .., 1, 2)\n dT2_dh = pder_T_2(h(z, w = w if not inverse else - w, r=r))\n\n # (B1, B2, .., 2, 2)\n dh_dT1 = pder_h(z, w = w if not inverse else - w, r=r) \n\n # (B1, B2, .., 2, 1)\n dT1_dtheta = pder_T_1(theta,r)\n\n # (B1, B2, .., BN)\n return dT2_dh.matmul(dh_dT1).matmul(dT1_dtheta).squeeze(-1).squeeze(-1)", "def run(self, data=None):\n\n if data:\n cstep, (crd, vel, pbc) = data\n else:\n crd, vel = self.__crd, self.__vel\n\n logger.debug(' calculate dynamics ...')\n\n masses = self.__tpl.get_atom_info()['masses']\n force = self.cal_force(crd)\n params = self.__setting.dynamics\n\n results = self.integrate(crd, force, vel, masses, params)\n\n return results", "def Force_on_aircraft_in_body_reference_frame(m, V_B, V_dot_B, omega_B):\n return m * (V_dot_B + omega_B.cross(V_B))", "def calculateCoefficientsTrainExp(np.ndarray[double, ndim=2, mode=\"c\"] x_logs not None, np.ndarray[double, ndim=2, mode=\"c\"] derivatives not None, np.ndarray[double, ndim=1] x_log_eigenvals not None, np.ndarray[double, ndim=2, mode=\"c\"] coefficients not None):\n cdef int n, dd, d\n\n n, dd = x_logs.shape[0], x_logs.shape[1]\n d = np.sqrt(dd)\n \n\n out = c_calculateCoefficientsTrainExp (&x_logs[0,0], &derivatives[0,0], &x_log_eigenvals[0], &coefficients[0,0], n, dd, d)\n\n return out", "def motors_update(t, x, u, params={}):\n tm = params['motor']['tm'] # Motor torque constant\n cr = params['motor']['cr'] # Motor speed constant\n wb = params['motor']['wb'] # Motor base speed\n\n u = np.clip(u / 199999, 0, 1)\n accel = [(cr * throttle + wb - speed) / tm for throttle, speed in zip(u, x)]\n\n return accel", "def test_rhs(self):\n\n _, _, generator = setup_lmde_frames_and_generator(self.basic_model, solver_frame=self.X)\n\n t = 13.1231\n y = np.eye(2, dtype=complex)\n\n output = generator(t, y, in_frame_basis=True).data\n\n X = np.array(self.X.data)\n X_diag, U = np.linalg.eigh(X)\n Uadj = U.conj().transpose()\n gen = (\n -1j\n * 2\n * np.pi\n * (self.w * np.array(self.Z.data) / 2 + self.r * np.cos(2 * np.pi * self.w * t) * X / 2)\n )\n expected = (\n Uadj @ expm(1j * t * X) @ gen @ expm(-1j * t * X) @ U + 1j * np.diag(X_diag)\n ) @ y\n\n self.assertTrue(np.allclose(expected, output))", "def calculate(x, y, z, vx, vy, vz, dt, m, g, B2, S0, omega):\n t = 0.0\n # Establish lists with initial position and velocity components and time.\n x_list = [x]\n y_list = [y]\n z_list = [z]\n vx_list = [vx]\n vy_list = [vy]\n vz_list = [vz]\n t_list = [t]\n\n # Set up visual elements.\n mound = visual.box(pos=(0,0,0), length=0.1, width=0.5, height=0.03, color=visual.color.white)\n plate = visual.box(pos=(18,0,0), length=0.5, width=0.5, height=0.03, color=visual.color.white)\n ball = visual.sphere(pos=(x,y,z), radius=0.05, color=visual.color.white)\n ball.trail = visual.curve(color=ball.color)\n\n while y >= 0.0:\n visual.rate(100) # Limit to no more than 100 iterations per second.\n t, x, y, z, vx, vy, vz = do_time_step(t, dt, x, y, z, vx, vy, vz, m, B2, g, S0, omega)\n x_list.append(x)\n y_list.append(y)\n z_list.append(z)\n vx_list.append(vx)\n vy_list.append(vy)\n vz_list.append(vz)\n t_list.append(t)\n ball.pos = (x,y,z)\n ball.trail.append(pos=ball.pos)\n\n return t_list, x_list, y_list, z_list, vx_list, vy_list, vz_list", "def R_term(\n enst, # enstrophy field\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33, # strain rate-33 component\n diff = False): # differentiation flag\n #---------------------------------------------------------------------#\n # Defining domain variables #\n #---------------------------------------------------------------------#\n pi = np.pi # pi\n dx = (2.0*pi)/64.0 # spatial step\n nu = 0.000185 # default viscosity\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n dim = 64\n kspec = np.fft.fftfreq(dim) * dim\n Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij'))\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n term1 = np.zeros((dim, dim, dim))\n term2 = np.zeros((dim, dim, dim))\n term3 = np.zeros((dim, dim, dim))\n #---------------------------------------------------------------------#\n # Numerator (numpy gradient tool) #\n #---------------------------------------------------------------------#\n if diff is not False:\n term1 = np.gradient(enst,dx, edge_order=2)[0]\n term2 = np.gradient(enst,dx, edge_order=2)[1]\n term3 = np.gradient(enst,dx, edge_order=2)[2]\n #---------------------------------------------------------------------#\n # Numerator (spectral differentiation) #\n #---------------------------------------------------------------------#\n else:\n term1 = 0.5*np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(enst) +\\\n 1j*Kfield[0]*np.fft.fftn(enst)).real\n term2 = 0.5*np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(enst) +\\\n 1j*Kfield[1]*np.fft.fftn(enst)).real\n term3 = 0.5*np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(enst) +\\\n 1j*Kfield[2]*np.fft.fftn(enst)).real\n #---------------------------------------------------------------------#\n # Numerator #\n #---------------------------------------------------------------------#\n num = nu*(term1**2.0+ term2**2.0 + term3**2.0)\n #---------------------------------------------------------------------#\n # Denominator #\n #---------------------------------------------------------------------#\n den = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n #---------------------------------------------------------------------#\n # R calculation #\n #---------------------------------------------------------------------#\n R = num/den\n\n return R", "def evaluate(self, *args, **kwargs):\n return self.constant_velocity", "def rhs_vaccination(t, y, beta_s, beta_a, epsilon,\n delta_e, delta_v, p, q,\n alpha_a, alpha_t, alpha_s,\n mu, mu_s, mu_a,\n lambda_v, lambda_t):\n s, e, i_s, i_a, r, d, v, treat = y\n #\n n_bar = s + e + i_s + i_a + r + v + treat\n force_infection = (beta_s * i_s + beta_a * i_a) / n_bar\n rhs_s = mu * n_bar - force_infection * s - (mu + lambda_v) * s + delta_v * v\n rhs_e = force_infection * (epsilon * v + s) - (mu + delta_e) * e\n rhs_i_s = p * delta_e * e - (mu + mu_s + alpha_s + lambda_t) * i_s - (1.0 - q) * alpha_t * treat\n rhs_i_a = (1 - p) * delta_e * e - (mu + mu_a + alpha_a) * i_a\n rhs_r = alpha_s * i_s + alpha_a * i_a + q * alpha_t * treat - mu * r\n rhs_d = mu_s * i_s + mu_a * i_a\n rhs_v = lambda_v * s - epsilon * force_infection * v - (mu + delta_v) * v\n rhs_treat = lambda_t * i_s - (mu + alpha_t) * treat\n rhs = np.array([rhs_s, rhs_e, rhs_i_s, rhs_i_a, rhs_r, rhs_d, rhs_v, rhs_treat])\n return rhs", "def f(r,t):\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n velocity = np.sqrt(vx**2+vy**2+vz**2)\r\n #if np.abs(z)>eps:\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n # equations for a cricket ball in motion\r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def rhoFit3d(self, x, m, z):\n result = 1. + (x/self.xc)**self.alpha(m, z)\n result **= -(self.beta(m,z)+self.gamma) / self.alpha(m,z)\n result *= (x/self.xc)**(self.gamma)\n result *= self.rho0(m, z)\n return result", "def buildObjective(self):\r\n\r\n # self.z_prior might be the modified version\r\n self.L_elbo = T.mean(self.reconst + self.conditional_prior + self.w_prior + self.z_prior)\r\n\r\n self.L_elbo_modif = T.mean(self.reconst + self.conditional_prior + self.w_prior_modif + self.z_prior_modif)\r\n\r\n #---Getting model parameter---#\r\n cg = ComputationGraph(self.L_elbo)\r\n #self.phi_theta is the list of all the parameters in q and p.\r\n self.params = VariableFilter(roles=[PARAMETER])(cg.variables)" ]
[ "0.5641719", "0.5596931", "0.5473129", "0.54537827", "0.5378322", "0.53513557", "0.528142", "0.5268483", "0.5266664", "0.5229376", "0.521022", "0.5209983", "0.52085", "0.51614934", "0.5149023", "0.50841874", "0.5070143", "0.50686336", "0.5039311", "0.5032286", "0.5028618", "0.5025035", "0.50055766", "0.49869925", "0.49868786", "0.49810827", "0.49799502", "0.49786997", "0.49658784", "0.49657422", "0.49593568", "0.4956974", "0.49529758", "0.4950648", "0.4938303", "0.4933853", "0.49312252", "0.492462", "0.4924438", "0.4921979", "0.49219167", "0.49208018", "0.49200153", "0.49160713", "0.49135587", "0.49086365", "0.48922744", "0.48921263", "0.488775", "0.4881112", "0.48766598", "0.48592457", "0.48525858", "0.48523766", "0.4852054", "0.48476633", "0.4845515", "0.48242754", "0.48209932", "0.4810444", "0.4804269", "0.47967562", "0.47945818", "0.47819903", "0.4776029", "0.4775499", "0.47607636", "0.47590154", "0.47569093", "0.4738734", "0.47369644", "0.47360447", "0.47346747", "0.47327438", "0.47241366", "0.472273", "0.47139508", "0.4710283", "0.47041017", "0.47003627", "0.46992537", "0.4699214", "0.4694288", "0.4693612", "0.4691017", "0.4689676", "0.46889517", "0.4687539", "0.46844667", "0.46800163", "0.46726444", "0.4670241", "0.46677756", "0.4666862", "0.46623352", "0.4661133", "0.46606636", "0.46595743", "0.4657616", "0.4653044", "0.4647322" ]
0.0
-1
Given the coefficients, evaluate model at a specific direction (theta,phi)
def even_pODF_opt(angles,*args): # qpoints, c, N): qpoints = args[0] c = args[1] N = args[2] n,m = qpoints.shape theta,phi = angles[0], angles[1] omega = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)]) sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) sum += c[i]*even_kernel(mu, N) return -(N+1)**2 * sum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval(self, theta, phi):\n required_fields = [\"coeff\", \"order\"]\n for field in required_fields:\n if field not in self.linear_fit.keys():\n raise ValueError(\"It looks like \"\n \"interface_energy_poly_expansion \"\n \"has not been called. Call that function \"\n \"first.\")\n\n res = self.linear_fit[\"coeff\"][0]\n loop = zip(self.linear_fit[\"order\"][1:],\n self.linear_fit[\"coeff\"][1:].tolist())\n n = [np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi),\n np.cos(theta)]\n for order, coeff in loop:\n x = self._get_x_value(n, order)\n res += coeff*x\n return res", "def evaluate(self, x, y, x0, y0, order):\n try:\n iorder = self._order_mapping[int(order.flatten()[0])]\n except AttributeError:\n iorder = self._order_mapping[order]\n except KeyError:\n raise ValueError(\"Specified order is not available\")\n\n # The next two lines are to get around the fact that\n # modeling.standard_broadcasting=False does not work.\n #x00 = x0.flatten()[0]\n #y00 = y0.flatten()[0]\n\n t = np.linspace(0, 1, 10) #sample t\n xmodel = self.xmodels[iorder]\n ymodel = self.ymodels[iorder]\n lmodel = self.lmodels[iorder]\n\n dx = xmodel.evaluate(x0, y0, t)\n dy = ymodel.evaluate(x0, y0, t)\n\n if self.theta != 0.0:\n rotate = Rotation2D(self.theta)\n dx, dy = rotate(dx, dy)\n\n so = np.argsort(dx)\n tab = Tabular1D(dx[so], t[so], bounds_error=False, fill_value=None)\n\n dxr = astmath.SubtractUfunc()\n wavelength = dxr | tab | lmodel\n model = Mapping((2, 3, 0, 2, 4)) | Const1D(x0) & Const1D(y0) & wavelength & Const1D(order)\n return model(x, y, x0, y0, order)", "def model(theta, x):\n\tw, b = theta\n\treturn w * x + b", "def evaluate(self,coeffs,evalpts):\n a1,a2,a3,A0,E0,G0,n = coeffs\n x = asarray(evalpts) #XXX: requires a numpy.array\n return (a1 + a2*x + a3*x*x + A0 * ( G0/(2*pi) )/( (x-E0)*(x-E0)+(G0/2)*(G0/2) ))/n", "def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition", "def fit(self, X, y):\n assert X.shape[0] == y.shape[0]\n\n intercept = np.ones((X.shape[0], 1))\n X = np.concatenate((X, intercept), axis=1)\n\n self.theta = np.random.randn(X.shape[1], 1)\n theta_updated = True\n while theta_updated:\n theta_updated = False\n for i in range(X.shape[0]):\n x, yy = X[i, :], y[i]\n # if label yy and sign of decision function do not agree\n # sample x on wrong side of the hyperplane\n if yy * (np.dot(self.theta.T, x.reshape(-1, 1))) <= 0:\n # -y * theta.T * x / d theta = -y*x\n # parameter := parameter - alpha * gradient\n parameter_gradient = -yy * x.reshape(-1, 1)\n self.theta = self.theta - parameter_gradient\n theta_updated = True", "def forward_theta(self):\n SW = self.simplesphere.sphere_wrapper\n for dm, m in enumerate(self.simplesphere.local_m):\n m_data = [f.data[dm] for f in self.component_fields]\n # Unpack for rank 0 to counteract shortcut bug in sphere_wrapper\n if self.rank == 0:\n m_data, = m_data\n self.coeffs[dm] = SW.forward(m, self.rank, m_data)", "def _model(self, t, theta, period, tmpid):\n template = self.templates[tmpid]\n phase = (t / period - theta[2]) % 1\n return theta[0] + theta[1] * template(phase)", "def solver(self, theta):\n\n m = len(self.x)\n n = len(self.t[0])\n h = self.step_x\n k = self.step_t\n lamb = k / (h * h)\n w = np.zeros(m + 1)\n l = np.zeros(m + 1)\n u = np.zeros(m + 1)\n\n uK = np.zeros(self.x.shape)\n print('comecei para k= ({},{})'.format(theta[0], theta[1]))\n startTime = time.clock()\n if self.first:\n self.uR = np.zeros(self.x.shape)\n self.first = False\n error = 0\n errorCurve = np.zeros(self.x.shape)\n z = np.zeros(m + 1)\n w[m] = 0 # following the initial condition u(0,t) = u(l,t) = 0. If needed, change this.\n for i in range(1, m - 1):\n w[i] = self.g(i * h)\n\n l[1] = 1 + lamb\n u[1] = -lamb / (2 * l[1])\n for i in range(2, m - 1):\n l[i] = 1 + lamb + lamb * u[i - 1] / 2\n u[i] = -lamb / (2 * l[i])\n\n l[m - 1] = 1 + lamb + lamb * u[m - 2] / 2\n for j in range(1, n + 1):\n t = j * k # current t\n z[1] = ((1 - lamb) * w[1] + lamb / 2 * w[2] + self.f(t, theta)) / l[1]\n for i in range(2, m):\n z[i] = ((1 - lamb) * w[i] + lamb / 2 * (w[i + 1] + w[i - 1] + z[i - 1]) + self.f(t, theta)) / l[i]\n w[m - 1] = z[m - 1]\n for i in range(m - 2, 0, -1):\n w[i] = z[i] - u[i] * w[i + 1]\n\n for i in range(0, m + 1):\n x = i * h\n # print(x, w[i])\n # print('oi')\n uK[i - 1, j - 1] = w[i]\n self.t[i - 1, j - 1] = t\n self.x[i - 1, j - 1] = x\n error += pow(w[i] - self.uR[i - 1, j - 1], 2) / uK.size\n errorCurve[i - 1, j - 1] = (pow(w[i] - self.uR[i - 1, j - 1], 2)) / uK.size\n print('acabei para k= ({},{}) em {} segundos'.format(theta[0], theta[1], time.clock() - startTime))\n return (uK, error, errorCurve, theta)", "def control(self, phi=0., theta=0., psi=0., thrust=0.):\n # compute current state\n acceleration, angles, position, _ = self.get_odometry()\n roll_angle = angles[0] + self.roll_correction\n pitch_angle = angles[1]\n yaw_angle = angles[2]\n altitude_position = position[1]\n # update target values\n self.target_yaw += psi\n self.target_yaw = pi_clip(self.target_yaw)\n self.target_altitude += thrust\n\n # Compute ouput values\n # Roll phi angle\n self.rollPID.setpoint = phi\n roll = (self.rollPID(roll_angle, dt=self.deltaT) * -1\n + acceleration[0])\n\n # Pitch theta angle\n self.pitchPID.setpoint = theta * -1 # positive angle to front\n pitch = (self.pitchPID(pitch_angle, dt=self.deltaT) * -1\n - acceleration[1])\n\n # Yaw psi angle\n self.yawPID.setpoint = self.target_yaw\n yaw = (self.yawPID(yaw_angle, dt=self.deltaT) * -1\n + acceleration[2])\n\n # Vertical thrust\n self.vertPID.setpoint = self.target_altitude\n altitude = self.vertPID(altitude_position, dt=self.deltaT)\n\n # update time\n self.time_counter += self.deltaT\n # leds\n self.blink_leds()\n # camera\n self.gimbal_stabilize(acceleration)\n\n # Actuate the motors taking into consideration all the computed inputs.\n fl_motor = self.lift_thrust + altitude - roll - pitch + yaw # front L\n fr_motor = self.lift_thrust + altitude + roll - pitch - yaw # front R\n rl_motor = self.lift_thrust + altitude - roll + pitch - yaw # rear L\n rr_motor = self.lift_thrust + altitude + roll + pitch + yaw # rear R\n\n # CounterClockWise motor propellers\n fr_motor *= -1 # CCW\n rl_motor *= -1 # CCW\n\n # actuate over the motors\n if not np.isnan(fl_motor):\n self.motors[0].setVelocity(fl_motor)\n self.motors[1].setVelocity(fr_motor)\n self.motors[2].setVelocity(rl_motor)\n self.motors[3].setVelocity(rr_motor)", "def evaluate(self, x, y, wavelength, order):\n if wavelength < 0:\n raise ValueError(\"Wavelength should be greater than zero\")\n\n try:\n iorder = self._order_mapping[int(order.flatten()[0])]\n except AttributeError:\n iorder = self._order_mapping[order]\n except KeyError:\n raise ValueError(\"Specified order is not available\")\n\n t = self.lmodels[iorder](wavelength)\n xmodel = self.xmodels[iorder]\n ymodel = self.ymodels[iorder]\n\n dx = xmodel.evaluate(x, y, t)\n dy = ymodel.evaluate(x, y, t)\n\n ## rotate by theta\n if self.theta != 0.0:\n rotate = Rotation2D(self.theta)\n dx, dy = rotate(dx, dy)\n\n return (x+dx, y+dy, x, y, order)", "def run(self, diffusion_coefficients):\n mat = self.buildmatrix(diffusion_coefficients)\n\n rhs = np.zeros(self.size)\n rhs[0] = -(diffusion_coefficients[0] + diffusion_coefficients[1]) * self.phi0\n\n if self.verbose > 0:\n print(\"System of equations:\")\n for i in range(mat.shape[0]):\n row = [\"{0:3g}*x{1}\".format(mat[i, j], j + 1) for j in range(mat.shape[1])]\n if self.verbose > 0:\n print(\"[{0}] = [{1:3g}]\".format(\" + \".join(row), rhs[i]))\n\n if parameters.solver == 'jacobi':\n x = self.jacobi_solver(mat, rhs)\n elif parameters.solver == 'gauss-seidel':\n x = self.gauss_seidel_solver(mat, rhs)\n elif parameters.solver == 'tridiag':\n x = self.tridiag_solver(mat, rhs)\n else:\n sys.exit('Unknown solver')\n\n if self.verbose > 1:\n print(\"Solution: {0}\".format(x))\n error = np.dot(mat, x) - rhs\n if self.verbose > 1:\n print(\"Error: {0}\".format(error))\n x = np.insert(x, 0, self.phi0)\n x = np.append(x, 0)\n return x", "def compute(self, solver=\"cbc\", **kwargs):\n self.table2es()\n logging.info(\"Creating the linear model...\")\n model = solph.Model(self.es)\n logging.info(\"Done. Optimise the model.\")\n self.solve(model, solver=solver, **kwargs)", "def phi(t, *args):\n # Unpacking data\n mu_1, pi_mu_2, distance, affine_transfo = args\n A, b = get_Ab(t)\n N = len(mu_1)\n assert len(mu_1) == len(pi_mu_2)\n # Computing value of objective function\n r = 0.\n for i in np.arange(N):\n r += distance(affine_transfo(A, b, mu_1[i]), pi_mu_2[i]) ** 2\n return r", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n assert psi.shape[1] == 3, 'inconsistent parameter dimensions'\r\n\r\n #2. exponentiate the a and b (positive!)\r\n mpsi = psi.copy()\r\n\r\n #3. transform data\r\n z = y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def run(self, dist_theta, dist_phi, dist_psi):\r\n\t\t#Extract all the system information\r\n\t\tev = dynamics.sDynamics(self.w, self.ev, self.t)\r\n\t\tx = ev[0]\r\n\t\ty = ev[1]\r\n\t\tz = ev[2]\r\n\t\txD = ev[3]\r\n\t\tyD = ev[4]\r\n\t\tzD = ev[5]\r\n\t\txDD = ev[6]\r\n\t\tyDD = ev[7]\r\n\t\tzDD = ev[8]\r\n\t\ttheta = ev[9]\r\n\t\tphi = ev[10]\r\n\t\tpsi = ev[11]\r\n\t\tthetaD = ev[12] + dist_theta\r\n\t\tphiD = ev[13] + dist_phi\r\n\t\tpsiD = ev[14] + dist_psi\r\n\t\tthetaDD = ev[15]\r\n\t\tphiDD =ev[16]\r\n\t\tpsiDD = ev[17]\r\n\t\t\r\n\t\t#Apply controllers\r\n\t\tcontrol1 = self.altitudeController.iterate(self.target_z, z, zD, zDD)\r\n\t\tcontrol2 = self.phiController.iterate(self.target_phi, phi, phiD)\r\n\t\tcontrol3 = self.thetaController.iterate(self.target_theta, theta, thetaD)\r\n\t\tcontrol4 = self.psiController.iterate(self.target_psi, psi, psiD)\r\n\r\n\t\tthr1 = control1\r\n\t\tthr2 = control2\r\n\t\tthr3 = control3\r\n\t\tthr4 = control4\r\n\t\t\r\n\t\t#Compute the throttle for each motor\r\n\t\tw1=thr1-thr3-thr4\r\n\t\tw2=thr1-thr2+thr4\r\n\t\tw3=thr1+thr3-thr4\r\n\t\tw4=thr1+thr2+thr4\r\n\r\n\t\t#Redefine throttle and state vectors\r\n\t\tself.w = [w1,w2,w3,w4]\r\n\t\tself.ev = [x, y, z, xD, yD, zD, xDD, yDD, zDD, theta, phi, psi, thetaD, phiD, psiD, thetaDD, phiDD, psiDD]\r\n\r\n\t\treturn [x, y, z, theta, phi, psi]", "def forward_model(self, x, theta):\n # unpack the model parameters\n A1, w1, A2, w2, bg = theta\n # evaluate the peaks\n peak_1 = A1 / ((1 + ((x - self.c1)/w1)**2)*(pi*w1))\n peak_2 = A2 / ((1 + ((x - self.c2)/w2)**2)*(pi*w2))\n # return the prediction of the data\n return peak_1 + peak_2 + bg", "def fit(self, X, y):\n\n n, m = X.shape[0], X.shape[1]\n\n # theta is (nx1) (one theta per dimension)\n self.theta = np.random.uniform(-10, 10, (n, 1))\n\n for i in range(self.epochs):\n # Get predictions\n y_pred = self.predict(X)\n\n # calculate cost\n # cost = ...\n cost = self._cost_function(y_pred, y, m)\n \n\n # gradient is an (n) x 1 array, it refers to the derivate per theta\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n\n # delta/update rule\n self.theta = gradient\n\n self.costs.append(cost)\n pass\n\n print(\"Final theta is {} (cost: {})\".format(self.theta.T, cost))", "def train(self, **kwargs):\n self.solver.train(**kwargs)", "def fit_model(self, phi, omega, sigma_eta, beta=0):\r\n # Initialize at the initial values parsed to the class\r\n par_ini = [phi, omega, sigma_eta, beta]\r\n # Approximate the jabocian for more efficient minimization\r\n Lprime = lambda x: approx_fprime(x, self.__llik_fun__, 0.01)\r\n if self.method == 'iterateRegression':\r\n # Depending on whether we include the regression coefficient, use other optimizer\r\n est = minimize(self.__llik_fun__, x0=par_ini,\r\n options=self.options,\r\n method='Newton-CG', jac=Lprime)\r\n else:\r\n est = minimize(self.__llik_fun__, x0=par_ini,\r\n options=self.options,\r\n method='BFGS')\r\n # Return optimal parameters\r\n return est.x", "def run(self,*args):\n \n for i in xrange(self.K):\n theta = [self.q1theta[n][i] for n in self.q1theta.dtype.names]\n r = self.po.apply_async(self.model, theta)\n self.phi[i]= r.get()[-1]#self.model(*theta)[-1] #phi is the last point in the simulation\n\n self.done_running = True", "def linear_model(inp, w1, b1):\n y = inp @ w1 + b1\n centered_y = y - y.mean()\n return centered_y.sum()", "def _model(x, p):\n y_hat = 0\n for i, pi in enumerate(reversed(p)):\n y_hat += x**i * pi\n return y_hat", "def train_normal_equation(self):\n\t\tself.theta = np.dot(np.dot(np.linalg.pinv(np.dot(np.transpose(self.X), self.X)), np.transpose(self.X)), self.y)", "def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)", "def test_coefficients_torch_interface(self):\n import torch\n\n qnode = qml.QNode(self.circuit, self.dev)\n\n weights = torch.tensor([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)", "def hexapodZernikeLinearModel():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n \n M22realTrefoil2 = b[:,37] # for x decenter\n M22imagTrefoil1 = b[:,54] \n M22TrefoilXshift = 0.5*(M22realTrefoil2+M22imagTrefoil1)\n\n M22realTrefoil1 = b[:,34] # for y decenter\n M22imagTrefoil2 = b[:,57] \n M22TrefoilYshift = 0.5*(M22realTrefoil1 - M22imagTrefoil2)\n\n M20defocus = b[:,12] # for defocus\n\n M22realComa2 = b[:,36] # for x-tilt\n M22imagComa1 = b[:,55]\n M22ComaXtilt = 0.5*(M22realComa2+M22imagComa1)\n\n M22realComa1 = b[:,35] # for y-tilt\n M22imagComa2 = b[:,56]\n M22ComaYtilt = 0.5*(M22realComa1 - M22imagComa2)\n \n pl.figure(figsize=(21,12))\n pl.subplot(2,3,1)\n t=bp.bin_scatter(M22TrefoilXshift,x,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilXshift,x)\n pl.plot(M22TrefoilXshift,M22TrefoilXshift*res[1]+res[0],'r,')\n pl.ylabel('x-decenter')\n pl.xlabel('(M22realTrefoil2+M22imagTrefoil1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,2)\n t=bp.bin_scatter(M22TrefoilYshift,y,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilYshift,y)\n pl.plot(M22TrefoilYshift,M22TrefoilYshift*res[1]+res[0],'r,')\n pl.ylabel('y-decenter')\n pl.xlabel('(M22realTrefoil1 - M22imagTrefoil2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,3)\n t=bp.bin_scatter(M20defocus,z,nbins=20,fmt='bo',scatter=True)\n res = linefit(M20defocus,z)\n pl.plot(M20defocus,M20defocus*res[1]+res[0],'r,')\n pl.ylabel('z-defocus')\n pl.xlabel('M20defocus')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,4)\n t=bp.bin_scatter(M22ComaXtilt,thetax,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaXtilt,thetax)\n pl.plot(M22ComaXtilt,M22ComaXtilt*res[1]+res[0],'r,')\n pl.ylabel('x-tilt')\n pl.xlabel('(M22realComa2+M22imagComa1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,5)\n t=bp.bin_scatter(M22ComaYtilt,thetay,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaYtilt,thetay)\n pl.plot(M22ComaYtilt,M22ComaYtilt*res[1]+res[0],'r,')\n pl.ylabel('y-tilt')\n pl.xlabel('(M22realComa1 - M22imagComa2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n\n pl.close()", "def fit(self,X,y):\n X = np.c_[np.ones(X.shape[0]),X]\n all_parameters = np.linalg.inv(X.T@X)@X.T@y\n self.intercept = all_parameters[0]\n self.theta = all_parameters[1:]", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n # assert psi.shape[1] == 4, 'inconsistent parameter dimensions'\r\n mpsi = psi.copy()\r\n d = psi[-1]\r\n mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)\r\n\r\n #3. transform data\r\n z = d*y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def evaluate_model(args,model,data_loader):\n model.eval()\n with torch.no_grad():\n for data,_ in data_loader:\n\n data,targets,angles = rotate_tensor(\n data.numpy(),\n args.init_rot_range,\n -args.eval_rotation_range,\n +args.eval_rotation_range)\n\n\n data = torch.from_numpy(data)\n targets = torch.from_numpy(targets)\n angles = torch.from_numpy(angles)\n angles = angles.view(angles.size(0), 1)\n\n # Forward pass\n output, identity_vectors, eucleidian_vectors= model(data, targets,angles*np.pi/180) \n # Get triplet loss\n losses=triple_loss(args,targets,output, identity_vectors, eucleidian_vectors)\n break\n\n return losses[0].item()", "def coefA(x0,y0,x1,y1):\n return -(y1-y0)/(x1-x0)", "def coefC(x0,y0,x1,y1):\n return (x1*y0-x0*y1)/(x1-x0)", "def ransac_method(commands):\n\n ransac = linear_model.RANSACRegressor()\n \n # We need to fing the corresponding commands (y-axis) for function\n angles_x = []\n commands_y = []\n \n for cmd, angle in commands.items():\n commands_y.append([cmd])\n angles_x.append([angle])\n \n ransac.fit(angles_x, commands_y)\n b = ransac.estimator_.intercept_\n m = ransac.estimator_.coef_\n\n return m, b", "def jacobian(theta, event, parameters_to_fit):\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)", "def _solve(self):\n B = sp.linalg.solve(self._XtX, self._XtY, assume_a='pos', overwrite_a=False, overwrite_b=False)\n self.coef_ = B[1:]\n self.intercept_ = B[0]\n self.is_fitted_ = True", "def test_rhs(self):\n\n _, _, generator = setup_lmde_frames_and_generator(self.basic_model, solver_frame=self.X)\n\n t = 13.1231\n y = np.eye(2, dtype=complex)\n\n output = generator(t, y, in_frame_basis=True).data\n\n X = np.array(self.X.data)\n X_diag, U = np.linalg.eigh(X)\n Uadj = U.conj().transpose()\n gen = (\n -1j\n * 2\n * np.pi\n * (self.w * np.array(self.Z.data) / 2 + self.r * np.cos(2 * np.pi * self.w * t) * X / 2)\n )\n expected = (\n Uadj @ expm(1j * t * X) @ gen @ expm(-1j * t * X) @ U + 1j * np.diag(X_diag)\n ) @ y\n\n self.assertTrue(np.allclose(expected, output))", "def __call__(self, parameters):\n [f, df] = self.linear_model(parameters)\n return np.concatenate((f-self.data_f, df-self.data_df))", "def solve_traj(self, init_joint, final_joint, potential_goal_states, coeffs={}, object_pos=[0, 0.2, 0.83]):\n self.scene.robot.SetDOFValues(init_joint, self.scene.manipulator.GetArmIndices())\n\n _, default_traj = self.get_default_traj(\n init_joint, final_joint, self.n_pred_timesteps, potential_goal_states)\n self.scene.robot.SetDOFValues(init_joint, self.scene.manipulator.GetArmIndices())\n\n request = req_util.create_empty_request(\n self.n_pred_timesteps, final_joint, self.scene.manipulator_name,potential_goal_states)\n if \"distance\" in coeffs:\n req_util.add_distance_cost(request, self.complete_pred_traj_means_expanded,\n self.complete_pred_traj_vars_expanded, coeffs[\"distance\"], self.n_human_joints, self.scene.all_links)\n if \"distanceBaseline\" in coeffs:\n req_util.add_distance_baseline_cost(request, self.head_pos, self.torso_pos, self.feet_pos, self.scene.all_links, self.n_pred_timesteps, coeffs[\"distanceBaseline\"])\n \n if \"visibilityBaseline\" in coeffs:\n req_util.add_visibility_baseline_cost(request, self.head_pos, object_pos, self.scene.eef_link_name, self.n_pred_timesteps, coeffs[\"visibilityBaseline\"])\n\n if \"legibilityBaseline\" in coeffs:\n req_util.add_legibility_baseline_cost(\n request, coeffs[\"legibilityBaseline\"], self.scene.eef_link_name)\n if \"collision\" in coeffs:\n req_util.add_collision_cost(\n request, coeffs[\"collision\"][\"cost\"], coeffs[\"collision\"][\"dist_pen\"])\n if \"nominal\" in coeffs:\n req_util.add_optimal_trajectory_cost(\n request, default_traj, self.scene.eef_link_name, self.n_pred_timesteps, coeffs[\"nominal\"])\n if \"regularize\" in coeffs:\n req_util.add_regularize_cost(\n request, coeffs[\"regularize\"], self.scene.eef_link_name)\n if \"smoothing\" in coeffs:\n req_util.add_smoothing_cost(\n request, coeffs[\"smoothing\"][\"cost\"], coeffs[\"smoothing\"][\"type\"])\n if \"velocity\" in coeffs:\n req_util.add_velocity_cost(request, self.complete_pred_traj_means_expanded,\n self.complete_pred_traj_vars_expanded, coeffs[\"velocity\"], self.n_human_joints, self.scene.all_links)\n if \"visibility\" in coeffs:\n head_pred_traj_mean, head_pred_traj_var = traj_utils.create_human_head_means_vars(\n self.complete_pred_traj_means_expanded, self.complete_pred_traj_vars_expanded)\n req_util.add_visibility_cost(request, head_pred_traj_mean, head_pred_traj_var,\n coeffs[\"visibility\"], object_pos, self.scene.eef_link_name)\n if \"legibility\" in coeffs:\n req_util.add_legibility_cost(\n request, coeffs[\"legibility\"], self.scene.eef_link_name)\n \n if \"joint_vel\" in coeffs:\n req_util.add_joint_vel_cost(request, coeffs[\"joint_vel\"])\n\n result = self.optimize_problem(request)\n eef_traj = self.scene.follow_trajectory(np.array(result.GetTraj()))\n return result, eef_traj", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Lรถwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def evalpotential( self, X, Y, Z):\n EVAL = np.zeros_like(X) \n for b in self.beams:\n EVAL += b(X,Y,Z)\n return EVAL* self.unitfactor", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def exec(self):\n try:\n xyz = [self.X_0, self.Y_0, self.Z_0]\n for _ in range(self.STEP):\n k_0 = self.__lorenz(xyz)\n k_1 = self.__lorenz([\n x + k * self.DT / 2 for x, k in zip(xyz, k_0)\n ])\n k_2 = self.__lorenz([\n x + k * self.DT / 2 for x, k in zip(xyz, k_1)\n ])\n k_3 = self.__lorenz([\n x + k * self.DT for x, k in zip(xyz, k_2)\n ])\n for i in range(3):\n xyz[i] += (k_0[i] + 2 * k_1[i] + 2 * k_2[i] + k_3[i]) \\\n * self.DT / 6.0\n self.res[i].append(xyz[i])\n self.__plot()\n except Exception as e:\n raise", "def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()", "def evaluate(self, parameters):\n [f, df] = self.linear_model(parameters)\n return OrderedDict([('f', f-self.data_f), ('df', df-self.data_df)])", "def solve(self, regparam):\n self.regparam = regparam\n \n #Some counters for bookkeeping\n self.stepcounter = 0\n self.flipcounter = 0\n self.nochangecounter = 0\n \n #Cached results\n self.evals = np.multiply(self.svals, self.svals)\n self.newevals = 1. / (self.evals + self.regparam)\n newevalslamtilde = np.multiply(self.evals, self.newevals)\n self.D = np.sqrt(newevalslamtilde)\n #self.D = -newevalslamtilde\n \n self.VTY = self.svecs.T * self.Y\n DVTY = np.multiply(self.D.T, self.svecs.T * self.Y)\n \n #Using lists in order to avoid unnecessary matrix slicings\n self.DVTY_list = []\n self.YTVDDVTY_list = []\n self.classFitnessList = []\n for i in range(self.labelcount):\n DVTY_i = DVTY[:,i]\n self.DVTY_list.append(DVTY_i)\n YTVDDVTY_i = DVTY_i.T * DVTY_i\n self.YTVDDVTY_list.append(YTVDDVTY_i)\n fitness_i = self.size - DVTY_i.T * DVTY_i\n self.classFitnessList.append(fitness_i)\n \n self.Dsvecs_list = []\n self.svecsDDsvecs_list = []\n for i in range(self.size):\n Dsvec = np.multiply(self.D.T, self.svecs[i].T)\n self.Dsvecs_list.append(Dsvec)\n self.svecsDDsvecs_list.append(Dsvec.T*Dsvec)\n \n self.updateA()\n \n \n converged = False\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n while True:\n \n converged = self.roundRobin()\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n if converged: break\n \n if self.oneclass:\n self.Y = self.Y[:, 0]\n self.A = self.A[:, 0]\n self.results['predicted_clusters_for_training_data'] = self.Y\n self.predictor = self.svdad.createModel(self)", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def _set_coefficients(self, user_defined_coefficients=None):\n # Check to ensure that if there any NaNs, a different basis must be used and solver must be changed\n # to least squares!\n if user_defined_coefficients is not None:\n self.coefficients = user_defined_coefficients\n return\n indices_with_nans = np.argwhere(np.isnan(self._model_evaluations))[:,0]\n if len(indices_with_nans) != 0:\n print('WARNING: One or more of your model evaluations have resulted in an NaN. We found '+str(len(indices_with_nans))+' NaNs out of '+str(len(self._model_evaluations))+'.')\n print('The code will now use a least-squares technique that will ignore input-output pairs of your model that have NaNs. This will likely compromise computed statistics.')\n self.inputs = np.delete(self._quadrature_points, indices_with_nans, axis=0)\n self.outputs = np.delete(self._model_evaluations, indices_with_nans, axis=0)\n self.subsampling_algorithm_name = None\n number_of_basis_to_prune_down = self.basis.cardinality - len(self.outputs)\n if number_of_basis_to_prune_down > 0:\n self.basis.prune(number_of_basis_to_prune_down + self.dimensions) # To make it an over-determined system!\n self.method = 'least-squares'\n self.mesh = 'user-defined'\n self._set_solver()\n self._set_points_and_weights()\n self.set_model()\n if self.mesh == 'sparse-grid':\n counter = 0\n multi_index = []\n coefficients = np.empty([1])\n multindices = np.empty([1, self.dimensions])\n for tensor in self.quadrature.list:\n P = self.get_poly(tensor.points, tensor.basis.elements)\n W = np.diag(np.sqrt(tensor.weights))\n A = np.dot(W , P.T)\n _, _ , counts = np.unique( np.vstack( [tensor.points, self._quadrature_points]), axis=0, return_index=True, return_counts=True)\n indices = [i for i in range(0, len(counts)) if counts[i] == 2]\n b = np.dot(W , self._model_evaluations[indices])\n del counts, indices\n coefficients_i = self.solver.get_coefficients(A, b) * self.quadrature.sparse_weights[counter]\n multindices_i = tensor.basis.elements\n coefficients = np.vstack([coefficients_i, coefficients])\n multindices = np.vstack([multindices_i, multindices])\n counter = counter + 1\n multindices = np.delete(multindices, multindices.shape[0]-1, 0)\n coefficients = np.delete(coefficients, coefficients.shape[0]-1)\n unique_indices, indices , counts = np.unique(multindices, axis=0, return_index=True, return_counts=True)\n coefficients_final = np.zeros((unique_indices.shape[0], 1))\n for i in range(0, unique_indices.shape[0]):\n for j in range(0, multindices.shape[0]):\n if np.array_equiv( unique_indices[i,:] , multindices[j,:]):\n coefficients_final[i] = coefficients_final[i] + coefficients[j]\n self.coefficients = coefficients_final\n self.basis.elements = unique_indices\n else:\n P = self.get_poly(self._quadrature_points)\n W = np.diag(np.sqrt(self._quadrature_weights))\n A = np.dot(W , P.T)\n b = np.dot(W , self._model_evaluations)\n if self.gradient_flag:\n # Now, we can reduce the number of rows!\n dP = self.get_poly_grad(self._quadrature_points)\n C = cell2matrix(dP, W)\n G = np.vstack([A, C])\n r = np.linalg.matrix_rank(G)\n m, n = A. shape\n print('Gradient computation: The rank of the stacked matrix is '+str(r)+'.')\n print('The number of unknown basis terms is '+str(n))\n if n > r:\n print('WARNING: Please increase the number of samples; one way to do this would be to increase the sampling-ratio.')\n self.coefficients = self.solver.get_coefficients(A, b, C, self._gradient_evaluations)\n else:\n self.coefficients = self.solver.get_coefficients(A, b)", "def estimate(self) -> None:\n\n if self.frame_1_rotations is None:\n raise ValueError('frame_1_rotations must be set before a call to estimate')\n if self.frame_2_rotations is None:\n raise ValueError('frame_2_rotations must be set before a call to estimate')\n if self.temperatures is None:\n raise ValueError('temperatures must be set before a call to estimate')\n\n relative_euler_angles = []\n\n # get the independent euler angles\n for f1, f2 in zip(self.frame_1_rotations, self.frame_2_rotations):\n\n relative_euler_angles.append(list(quaternion_to_euler(f2*f1.inv(), order=self.order)))\n\n # make the coefficient matrix\n coef_mat = np.vstack([np.ones(len(self.temperatures)), self.temperatures]).T\n\n # solve for the solution\n solution = np.linalg.lstsq(coef_mat, relative_euler_angles)[0]\n\n # store the solution\n self.angle_m_offset = solution[0, 0]\n self.angle_m_slope = solution[1, 0]\n self.angle_n_offset = solution[0, 1]\n self.angle_n_slope = solution[1, 1]\n self.angle_p_offset = solution[0, 2]\n self.angle_p_slope = solution[1, 2]", "def run(self):\n is_spsa = True\n is_steep_descent = False\n is_rprop = False\n\n k = 0\n theta = self.theta0\n\n while True:\n k = k + 1\n\n self.iter = k\n print(f'starting iter {k} ...')\n\n if self.constraints is not None:\n theta = self.constraints(theta)\n\n print('current param:')\n for name, value in utils.true_param(theta).items():\n print(f' {name}: {value[\"value\"]}')\n\n c_k = self.c / (k ** self.gamma)\n a_k = self.a / ((k + self.A) ** self.alpha)\n\n # print(f' ck: {c_k:0.5f}')\n # print(f' ak: {a_k:0.5f}')\n\n # Run the engine match here to get the gradient\n print('Run engine match ...')\n gradient = self.approximate_gradient(theta, c_k, k)\n\n # For SPSA we update with a small step (theta = theta - a_k * gradient)\n if is_spsa:\n theta = utils.linear_combinaison(1.0, theta, -a_k, gradient)\n logging.info(f'{__file__} > theta from spsa: {theta}')\n # print(f'new param after application of gradient:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # For steepest descent we update via a constant small step in the gradient direction\n elif is_steep_descent:\n mu = -0.01 / max(1.0, utils.norm2(gradient))\n theta = utils.linear_combinaison(1.0, theta, mu, gradient)\n\n # For RPROP, we update with information about the sign of the gradients\n elif is_rprop:\n theta = utils.linear_combinaison(1.0, theta, -0.01, self.rprop(theta, gradient))\n\n # Apply parameter limits\n theta = utils.apply_limits(theta)\n logging.info(f'{__file__} > theta with limits: {theta}')\n # print(f'new param after application of limits:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # We then move to the point which gives the best average of goal\n (avg_goal, avg_theta) = self.average_best_evals(30)\n logging.info(f'{__file__} > avg_theta from average_best_evals: {avg_theta}')\n\n theta = utils.linear_combinaison(0.98, theta, 0.02, avg_theta)\n logging.info(f'{__file__} > theta with avg_theta: {theta}')\n # print(f'new param after application of best average param:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # Apply parameter limits\n theta = utils.apply_limits(theta) # This is the best param.\n logging.info(f'{__file__} > best param: {theta}')\n # print(f'new param after application of limits:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # Log best param values\n for kv, vv in theta.items():\n logging.info(f'<best> iter: {k}, param: {kv}, value: {int(vv[\"value\"]*vv[\"factor\"])}')\n print('best param:')\n for n, v in theta.items():\n print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n mean_all_goal, _ = self.average_evaluations(30)\n print(f'mean all goal: {mean_all_goal}')\n\n mean_best_goal, _ = self.average_best_evals(30)\n print(f'mean best goal: {mean_best_goal}')\n\n # Save data in csv for plotting.\n plot_data = {}\n plot_data.update({'iter': k})\n plot_data.update({'meanbestgoal': mean_best_goal})\n plot_data.update({'meanallgoal': mean_all_goal})\n plot_theta = utils.true_param(theta)\n for name, value in plot_theta.items():\n plot_data.update({name: value[\"value\"]})\n\n with open(self.plot_data_file, 'a') as f:\n cnt = 0\n for name, value in plot_data.items():\n cnt += 1\n if cnt == len(plot_data):\n f.write(f'{value}\\n')\n else:\n f.write(f'{value},')\n\n print(f'done iter {k} / {self.max_iter}')\n logging.info(f'{__file__} > done iter {k} / {self.max_iter}')\n print('=========================================')\n\n # Stopping rule 1: Average goal and iteration meet the\n # stop_all_mean_goal and stop_min_iter criteria.\n if k >= self.stop_min_iter and mean_all_goal <= self.stop_all_mean_goal:\n print('Stop opimization due to good average all goal!')\n break\n\n # Stopping rule 2: Average best goal and iteration meet the\n # stop_best_mean_goal and stop_min_iter criteria.\n if k >= self.stop_min_iter and mean_best_goal <= self.stop_best_mean_goal:\n print('Stop opimization due to good average best goal!')\n break\n\n # Stopping rule 3: Max iteration is reached.\n if k >= self.max_iter:\n print('Stop opimization due to max iteration!')\n break\n\n return utils.true_param(theta)", "def calculateCoefficientsTrainExp(np.ndarray[double, ndim=2, mode=\"c\"] x_logs not None, np.ndarray[double, ndim=2, mode=\"c\"] derivatives not None, np.ndarray[double, ndim=1] x_log_eigenvals not None, np.ndarray[double, ndim=2, mode=\"c\"] coefficients not None):\n cdef int n, dd, d\n\n n, dd = x_logs.shape[0], x_logs.shape[1]\n d = np.sqrt(dd)\n \n\n out = c_calculateCoefficientsTrainExp (&x_logs[0,0], &derivatives[0,0], &x_log_eigenvals[0], &coefficients[0,0], n, dd, d)\n\n return out", "def evaluate(Agent, rules, wrappers, params_path=None, steps=1024):\n\n score = 0.0\n\n agent = Agent()\n\n if params_path is not None:\n agent.load_state_dict(params_path)\n\n env = CARLE()\n\n\n for wrapper in wrappers:\n env = wrapper[0](env)\n env.reward_scale = wrapper[1]\n\n env.batch_size = steps*len(rules)\n\n if wrapper[2] is not None:\n env.load_state_dict(torch.load(wrapper[2]))\n\n env.eval()\n\n #env.set_no_grad()\n\n total_steps = 0\n score_trace = []\n for ruleset in rules:\n\n env.inner_env.birth = ruleset[0]\n env.inner_env.survive = ruleset[0]\n\n obs = env.reset()\n\n for step in range(steps):\n\n action = agent(obs)\n\n obs, reward, done, info = env.step(action)\n\n score += reward.detach().sum().cpu().numpy()\n score_trace.append(reward.detach().sum().cpu().numpy())\n\n total_steps += 1\n\n print(\"cumulative score = {:.3e} at total steps = {}, rulset = {}\".format(\\\n score, total_steps, ruleset))\n\n score /= total_steps\n \n return score, score_trace", "def forward(alphaIn, phi_x, y):\n alphaPhi_X = robot.Distribution()\n for x, alphaX in alphaIn.items():\n yProb = phi_x[x]\n tmpProd = yProb * alphaX\n if tmpProd > 0:\n alphaPhi_X[x] = tmpProd\n\n # compute alpha out\n alphaOut = robot.Distribution()\n for x, alphaPhi in alphaPhi_X.items():\n x2Poss = transition_model(x)\n # multiply and add x2Poss to o/p\n for x2Key, x2pVal in x2Poss.items():\n alphaOut[x2Key] += x2pVal*alphaPhi\n #print(alphaOut)\n return alphaOut", "def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***", "def cost(self, X, y) :\n ### ========== TODO : START ========== ###\n # part d: compute J(theta)\n #we know for linear/polynomial regression, the cost is the square of the errors\n X = self.generate_polynomial_features(X)\n y_pred_vector = np.dot(X, self.coef_)\n cost = np.dot((y-y_pred_vector).transpose(),(y-y_pred_vector))#write in the matrix form\n ### ========== TODO : END ========== ###\n return cost", "def run_model(hyperparams, iteration): \n # Fixed random state\n rand_state = np.random.RandomState(1).get_state()\n np.random.set_state(rand_state)\n seed = np.random.randint(1, 2**31 - 1)\n tf.set_random_seed(seed)\n random.seed(seed)\n\n\n env = gym.make('CartPole-v1')\n env = DummyVecEnv([lambda: env])\n\n # Get all the current hyperparameter values\n hyperparams['timesteps_per_batch'] = hyperparams['timesteps_per_batch']\n for parameter_name in ['vf_stepsize', 'max_kl', 'gamma', 'lam']:\n hyperparams[parameter_name] = float(hyperparams[parameter_name])\n\n # Initialize model\n model = TRPO(MlpPolicy, env, \n verbose=1,\n timesteps_per_batch=hyperparams['timesteps_per_batch'],\n vf_stepsize=hyperparams['vf_stepsize'],\n max_kl=hyperparams['max_kl'],\n gamma=hyperparams['gamma'],\n lam=hyperparams['lam']\n )\n\n model.learn(total_timesteps=10000)\n model.save(\"trpo_cartpole_\" + str(iteration))\n \n result = evaluate(env, model)\n return result", "def calculate_coefficients(self, start, end):\n A = np.array([\n [self.deltaT**3, self.deltaT**4, self.deltaT**5],\n [3 * self.deltaT**2, 4 * self.deltaT**3, 5 * self.deltaT**4],\n [6 * self.deltaT, 12 * self.deltaT**2, 20 * self.deltaT**3],\n ])\n\n a_0, a_1, a_2 = start[0], start[1], start[2] / 2.0\n c_0 = a_0 + a_1 * self.deltaT + a_2 * self.deltaT**2\n c_1 = a_1 + 2 * a_2 * self.deltaT\n c_2 = 2 * a_2\n\n B = np.array([\n end[0] - c_0,\n end[1] - c_1,\n end[2] - c_2\n ])\n\n a_3_4_5 = np.linalg.solve(A, B)\n coeff = np.concatenate((np.array([a_0, a_1, a_2]), a_3_4_5))\n\n return coeff", "def pythagorean_equation(\n outputs, inputs, parameterization, return_diagnostics=False\n):\n batched = len(inputs.size()) > 1\n jac = jacobian(\n outputs, inputs, batched=batched, create_graph=True, allow_unused=False\n )\n\n frequency = (2 * np.pi * parameterization[..., 1]).view(outputs.size())\n # r$ (f * y)^2 + (y')^2 = 1$\n lhs = (frequency * outputs) ** 2 + jac.view(outputs.size()) ** 2\n rhs = lhs.new_ones(lhs.size()) * (frequency ** 2)\n if return_diagnostics:\n return lhs - rhs, (lhs, rhs, jac)\n else:\n return lhs - rhs", "def model_quadratic(train_x, train_y, test_x):\n train_x = train_x.rename('x', axis=1)\n train_x = sm.add_constant(train_x)\n train_df = train_x.copy()\n train_df['y'] = train_y\n model_fit = sm.formula.ols('y ~ np.power(x, 2) + x + const', data=train_df).fit()\n model_info = {'model': 'quadratic', 'R2': model_fit.rsquared, 'f_pvalue': model_fit.f_pvalue,\n 'const': model_fit.params.const}\n test_x = test_x.rename('x')\n test_x = sm.add_constant(test_x)\n predictions = model_fit.predict(test_x)\n return predictions, model_info", "def fit(self, X, Y):\n\n def one_vs_all(house): return [1 if y == house else 0 for y in Y]\n\n self.theta = []\n X = np.insert(X, 0, 1, axis=1)\n m = Courses.m\n theta_nb = len(X[0])\n\n for house in range(4):\n y_ova = one_vs_all(house)\n theta = np.zeros(theta_nb)\n for _ in range(self.iterations):\n theta -= self.gradient_descent(X, theta, y_ova, m)\n self.theta.append(theta)", "def application_test():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n usage = 'manual|automatic Krylov|direct degree nx ny nz'\n try:\n import sys\n J_comp = sys.argv[1]\n linear_solver = sys.argv[2]\n degree = int(sys.argv[3])\n divisions = [int(arg) for arg in sys.argv[4:]]\n except:\n print('Usage: %s' % sys.argv[0], usage)\n sys.exit(0)\n\n u = solver(q, Dq, f, divisions, degree,\n 'pde_Newton', J_comp, linear_solver)\n\n # Find max error\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n print('error: %.2E' % error)", "def model(phi_hat, Dhat):\r\n # spatial nodes\r\n ngrid = 10\r\n end = -1\r\n\r\n M, MT = get_mmt()\r\n\r\n A = MT@np.diag(Dhat)@M\r\n A = A[1:ngrid-1]\r\n\r\n # differential equations\r\n\r\n # first node\r\n m.Equation(phi_hat(0).dt() == 0)\r\n\r\n # interior nodes\r\n int_value = -A@phi_hat # value at interior nodes\r\n m.Equations(phi_hat(i).dt() == int_value(i) for i in range(0, ngrid))\r\n\r\n # terminal node\r\n m.Equation(phi_hat(ngrid).dt() == Dhat[end]*2*(phi_hat(end-1) - phi_hat(end)))", "def evaluate(self, w, X, y):\n # help avoid mistakes (as described in the assignment) by\n # potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n # Prediction is linear combination\n y_hat = X @ w\n # Residual is difference between ground truth and prediction\n # (\"what's left\" after your predicition)\n residuals = y - y_hat\n # Squared residuals gives us the objective function value\n f = 0.5 * np.sum(residuals ** 2)\n\n # Analytical gradient, written in mathematical form first\n # and then translated into Python.\n # The parentheses on the first term are just a small optimization:\n # this way, we do two matrix-vector multipliciations,\n # instead of a (more expensive) matrix-matrix mult and a matrix-vector\n g = X.T @ (X @ w) - X.T @ y\n return f, g", "def solve(self, theta: float = 1e-6) -> Tuple[np.ndarray, np.ndarray]:\n self.mdp.ensure_compiled()\n self.theta = theta\n return self._policy_improvement()", "def wolfe_linear_search(\n phi, d_phi,\n alpha_init,\n c1 = 0.5,\n c2 = 0.7,\n phi_0 = None, d_phi_0 = None,\n alpha_max = None,\n \n bracketing_augment_method = lambda alpha, **kwargs: 2 * alpha,\n \n\n max_noof_bracketing_steps = 10,\n max_noof_zooming_steps = 10,\n \n printing = True, ax = None,\n plotting_params = dict()\n):\n\n if phi_0 is None:\n phi_0 = phi(0)\n \n if d_phi_0 is None:\n d_phi_0 = d_phi(0)\n \n\n ## initialize plotting\n plotting = ax is not None\n if plotting:\n phi = Rememberer(phi)\n pl_a_max = plotting_params.get(\"alpha_max\", 5 * alpha_init)\n \n if plotting_params.get(\"plot_fun\", False):\n ## we evaluate `phi` on a grid \n ## so the calculated values will be written into `phi.history`.\n for x in np.linspace(0, pl_a_max, plotting_params.get(\"n\", 10)):\n phi(x)\n else:\n phi.history = phi.history.append({\"x\": 0, \"f\": phi_0}, ignore_index = True)\n \n fun_plot = pl.plot(phi.history.x, phi.history.f)[0]\n \n \n # plot c1, c2 lines\n ax.autoscale(False)\n pl.plot([0, pl_a_max],[phi_0, phi_0 + c1 * d_phi_0 * pl_a_max],)\n pl.plot([0, pl_a_max],[phi_0, phi_0 + c2 * d_phi_0 * pl_a_max],)\n ax.autoscale(True)\n\n ## prepare the dots depicting current state of our algorithm\n dots = ax.scatter(0, phi_0)\n\n def update_plot(new_dots = []):\n if plotting:\n dots.set_offsets(new_dots)\n \n vvv = phi.history.sort_values(\"x\")\n fun_plot.set_xdata(vvv.x)\n fun_plot.set_ydata(vvv.f)\n \n # recompute the ax.dataLim\n ax.relim()\n # update ax.viewLim using the new dataLim\n ax.autoscale_view()\n \n # redraw\n ax.figure.canvas.draw()\n \n if \"sleep_time\" in plotting_params:\n sleep(plotting_params[\"sleep_time\"])\n \n\n ## Initialize bracketing phase\n a_prev = 0\n phi_a_prev = phi_0\n\n a = alpha_init\n phi_a = phi(a)\n\n \n if plotting:\n # update plot\n update_plot([[a, phi_a], [a_prev, phi_a_prev]])\n \n\n ###################\n ## Bracketing phase\n\n for i in range(max_noof_bracketing_steps):\n print(\"New bracketing step with a_prev={}; a={}\".format(a_prev, a))\n # If the 'sufficient descent' fails or we are already increasing (from `a_prev`)\n # then this means that we have increased `a` far enough and we can start zooming.\n if (phi_a > phi_0 + c1 * a * d_phi_0) or (phi_a >= phi_a_prev):\n a_lo, a_hi = a_prev, a\n phi_a_lo, phi_a_hi = phi_a_prev, phi_a\n\n if printing:\n if (phi_a > phi_0 + c1 * a * d_phi_0):\n print(\"Sufficient descent fails.\")\n if (phi_a >= phi_a_prev):\n print(\"phi_a >= phi_a_prev\")\n print(\"You can start zooming with a_lo={}, a_hi={}\".format(a_lo, a_hi))\n break\n\n d_phi_a = d_phi(a)\n\n # If 'curvature condition' is satisfied then we are done since we already know \n # that a satisfies the 'sufficient descent'.\n if np.abs(d_phi_a) <= c2 * np.abs(d_phi_0):\n if printing:\n print(\"You are done and you can use alpha ={}\".format(a))\n if plotting:\n ## plot the final alpha\n ax.scatter(a, phi_a, s = 40, c = \"r\")\n ax.figure.canvas.draw()\n\n return a\n\n # If `phi` is increasing in `a`, we can start zooming.\n if(d_phi_a >= 0):\n a_lo, a_hi = a, a_prev\n phi_a_lo, phi_a_hi = phi_a, phi_a_prev\n if printing:\n print(\"d_phi_a >= 0;\")\n print(\"You can start zooming with a_lo={}, a_hi={}\".format(a_lo, a_hi))\n break\n\n ## If none of the previous happens, we must start everything with larger `a`:\n # choose new a in (a, alpha_max):\n if a == alpha_max:\n raise WolfeLineSearchException(\"We reached a == alpha_max in bracketing phase. Can't augment it any more.\")\n \n a_new = bracketing_augment_method(alpha = a)\n \n if alpha_max is not None:\n a_new = min(a_new, alpha_max)\n\n a_prev, phi_a_prev = a, phi_a\n a, phi_a = a_new, phi(a_new)\n\n if printing:\n print(\"Our a={} seems not to be large enough. Try with larger a ={}\".format(a_prev, a))\n if plotting:\n update_plot([[a, phi_a], [a_prev, phi_a_prev]])\n \n else:\n raise WolfeLineSearchException(\"Too many steps in bracketing phase.\")\n \n \n\n ################\n ## Zooming phase\n # In the whole process:\n # a) the interval bounded by `a_lo`, `a_hi` contains step-lengths that satisfy Wolfe.\n # (Warning: `a_hi` can be smaller than `a_lo`.)\n # b) `a_lo` satisfies the 'sufficient decrease' and\n # `phi(a_lo) = min {phi(a) that we calculated so far and a satisfied sufficient decrease}`.\n # c) in particular `a_hi` either does not satisfy 'sufficient decrease' or \n # it does and in that case it phi(a_hi) > phi(a_lo)\n # d) $\\phi'(a_{lo})\\cdot(a_{hi} - a_{lo})$ \n # This means that phi is decreasing in `a_lo` in the direction towards `a_hi`.\n\n\n \n for j in range(max_noof_zooming_steps):\n\n print(\"New zooming step with a_lo = {}; a_hi = {}\".format(a_lo, a_hi))\n # update plot\n if plotting:\n update_plot([[a_lo, phi_a_lo], [a_hi, phi_a_hi]])\n \n\n # choose `a` between `a_lo` and `a_hi`\n a = (a_lo + a_hi) / 2\n phi_a = phi(a)\n\n # If 'sufficient decrease' at `a` fails, or we get larger value than at `a_lo`\n # then we shrink the interval: keep `a_lo` and take `a_hi = a`.\n if (phi_a > phi_0 + c1 * a * d_phi_0) or (phi_a >= phi_a_lo):\n a_hi = a\n phi_a_hi = phi_a\n else:\n # Now we know that 'sufficient decrease' holds at `a`.\n # Check the 'curvature condition':\n d_phi_a = d_phi(a) \n if abs(d_phi_a) <= c2 * abs(d_phi_0):\n print(\"You are done and you can use alpha ={}\".format(a))\n break\n\n # If we got here, we know that 'curvature condition' failed so we must \n # update our interval. \n # We also know that `a` satisfies the 'sufficient decrease' and `phi_a < phi_lo'.\n # Thus (according to (b)) in the next step, `a` must become the new `a_lo`.\n # Therefore new `a_lo, a_hi` will be either `a, a_hi` or `a, a_lo`.\n # We choose so that\n # phi will be decreasing at `a_lo` in the direction towards `a_hi`.\n if d_phi_a * (a_hi - a_lo) >= 0:\n a_hi = a_lo\n phi_a_hi = phi_a_lo\n\n a_lo = a\n phi_a_lo = phi_a\n else:\n raise WolfeLineSearchException(\"Too many steps in zooming phase.\")\n \n \n if plotting:\n ## plot the final alpha\n ax.scatter(a, phi_a, s = 40, c = \"r\")\n ax.figure.canvas.draw()\n\n return a", "def cost(phi, theta, omega, ket):\n evolved = jnp.dot(rot(phi, theta, omega), ket)\n return fidelity(evolved, basis(2, 0))[0][0]", "def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)", "def cost_function(param, Y, R, n_features):\r\n # theta (user, feature), (943, 10): user preference\r\n # X (movie, feature), (1682, 10): movie features\r\n n_movie, n_user = Y.shape\r\n X, theta = deserialize(param, n_movie, n_user, n_features)\r\n\r\n inner = np.multiply(X @ theta.T - Y, R)\r\n\r\n return np.power(inner, 2).sum() / 2", "def theta():\n pass", "def main():\n ###################\n ## Dataset ##\n ###################\n # (X, y) m = 100, n = 1\n _data_x, data_y = make_regression(n_samples=100, n_features=1, noise=10)\n\n # show the dataset\n plt.subplot(2, 2, 1)\n plt.title(\"dataset\")\n plt.scatter(_data_x, data_y)\n\n # Transform the dataset into matrices.\n # That is used for writing the equations in the matrix form.\n data_x = np.hstack((_data_x, np.ones(_data_x.shape)))\n data_y = data_y.reshape(data_y.shape[0], 1)\n\n #################\n ## Model ##\n #################\n # initial parameters\n init_params = np.random.randn(2, 1)\n\n # initial model\n init_model = model(data_x, init_params)\n\n # plot initial model\n plt.subplot(2, 2, 2)\n plt.title(\"initial model\")\n plt.scatter(_data_x, data_y)\n plt.plot(_data_x, init_model, c='g')\n\n #########################\n ## cost function ##\n #########################\n # show cost function for initial parameters\n print(cost_function(data_x, data_y, init_params))\n\n ####################\n ## training ##\n ####################\n # learning rate\n learn_rate = 0.005\n # number of iterations\n number_iterations = 1_000\n\n # final parameters for our model\n final_params, cost_tracking = gradient_descent(\n data_x, data_y, init_params, learn_rate, number_iterations)\n\n # final model\n final_model = model(data_x, final_params)\n\n # show cost function for final parameters\n print(cost_function(data_x, data_y, final_params))\n\n # plot final model\n plt.subplot(2, 2, 3)\n plt.title(\"final model\")\n plt.scatter(_data_x, data_y)\n plt.plot(_data_x, final_model, c='r')\n\n ##########################\n ## learning curve ##\n ##########################\n # plot Cost history\n plt.subplot(2, 2, 4)\n plt.title(\"cost tracking\")\n plt.plot(range(number_iterations), cost_tracking)\n\n ########################################\n ## Coefficient of determination ##\n ########################################\n print(coefficient_determination(data_y, final_model))\n\n plt.show()", "def predict(self, X) :\n if self.coef_ is None :\n raise Exception(\"Model not initialized. Perform a fit first.\")\n\n X = self.generate_polynomial_features(X) # map features\n\n ### ========== TODO : START ========== ###\n # part c: predict y\n # for this we first get the single value of feature vector, then X in the transposed form and then we have to multiply by Theta\n\n y = np.dot(X, self.coef_)#coef is the coef matrix\n ### ========== TODO : END ========== ###\n\n\n return y", "def evaluate_model(fn_string, df, features,\n coefficients=None,\n target=None,\n fit_intercept=False):\n features = list(set(df.columns).intersection(features))\n array = df[features].to_numpy()\n func = process_fn(fn_string, features)\n n_samples = len(df)\n predictions = func(array.T)\n if coefficients is None:\n if target is None:\n target = df.columns[0]\n target_values = df[target]\n coefficients = lsq_coefficients(predictions, target_values,\n fit_intercept=fit_intercept)\n slope, intercept = coefficients\n else:\n slope, intercept = coefficients\n predictions = np.add(np.multiply(predictions, slope), intercept)\n return predictions, coefficients", "def _extract_coefficients(self, expr):\n\n theta_s = sp.Symbol('theta_s')\n\n N_fn = self.SRF.ncoefs + self.V.ncoefs - 1\n\n fn = []\n\n # find f_0 coefficient\n repl0 = dict([[sp.cos(theta_s), 0]])\n fn = fn + [expr.xreplace(repl0)]\n\n # find f_1 coefficient\n repl1 = dict([[sp.cos(theta_s)**i, 0] for i in list(range(N_fn, 0, -1))\n if i != 1] + [[sp.cos(theta_s), 1]])\n fn = fn + [expr.xreplace(repl1) - fn[0]]\n\n for n in np.arange(2, N_fn, dtype=int):\n repln = dict([[sp.cos(theta_s)**int(n), 1]])\n fn = fn + [(expr.xreplace(repln)).xreplace(repl0) - fn[0]]\n\n# # alternative way of extracting the coefficients:\n# theta_s = sp.Symbol('theta_s')\n# # collect terms with equal powers of cos(theta_s)\n# expr_sort = sp.collect(expr, sp.cos(theta_s), evaluate=False)\n#\n# # convert generated dictionary to list of coefficients\n# # the use of .get() is necessary for getting the dict-values since\n# # otherwise coefficients that are actually 0. would not appear\n# # in the list of fn-coefficients\n#\n# fn = [expr_sort.get(sp.cos(theta_s) ** n, 0.)\n# for n in range(self.SRF.ncoefs + self.V.ncoefs - 1)]\n\n return fn", "def evaluate_model(model, model_name, X_train, Y_train, X_test, ground_truth):\n\tprint(\"\t\tModel [\" + model_name + \"]\")\n\tmodel.fit(X_train, Y_train)\n\tY_pred = model.predict(X_test).astype(int)\n\tregression = np.sqrt(metrics.mean_squared_error(ground_truth, Y_pred))\n\treturn regression", "def forward(ctx,gamma_mu,xtilde,mode_training=True):\n # Device CPU/GPU\n # if device == \"cuda\":\n # dtype = torch.cuda.FloatTensor\n # else :\n dtype = torch.FloatTensor\n #initialize variables\n n,_,nx = xtilde.size()\n x1,x2,x3 = torch.zeros(n,1,1).type(dtype),torch.zeros(n,1,1).type(dtype),torch.zeros(n,1,1).type(dtype) \n crit,crit_compare = torch.zeros(n,1,1).type(dtype),torch.zeros(n,1,1).type(dtype)\n sol = torch.zeros(n,1,nx).type(dtype)\n kappa = torch.zeros(n,1,1).type(dtype)\n xmin = 0\n xmax = 1\n u = 1/nx**2*torch.linspace(1,nx,nx)\n norm_u = torch.norm(u)**2\n uTx = torch.matmul(xtilde,u).view(n,1,1)\n torch_u = u.view(1,1,-1)+torch.zeros(n,1,nx).type(dtype)#broadcast\n torch_one = torch.ones(n,1,1).type(dtype)\n #set coefficients\n a = -(xmin+xmax+uTx)\n b = xmin*xmax + uTx*(xmin+xmax) - 2*gamma_mu*norm_u\n c = gamma_mu*(xmin+xmax)*norm_u - uTx*xmin*xmax\n p = b - (a**2)/3\n q = c - a*b/3 + 2*(a**3)/27\n delta = (p/3)**3 + (q/2)**2\n #three cases depending on the sign of delta\n #########################################################################\n #when delta is positive\n ind = delta>0\n z1 = -q[ind]/2\n z2 = torch.sqrt(delta[ind])\n w = (z1+z2).sign() * torch.pow((z1+z2).abs(),1/3)\n v = (z1-z2).sign() * torch.pow((z1-z2).abs(),1/3) \n x1[ind] = w + v \n x2[ind] = -(w + v)/2 ; #real part of the complex solution\n x3[ind] = -(w + v)/2 ; #real part of the complex solution\n #########################################################################\n #when delta is 0\n ind = delta==0\n x1[ind] = 3 *q[ind] / p[ind]\n x2[ind] = -1.5 * q[ind] / p[ind]\n x3[ind] = -1.5 * q[ind] / p[ind]\n #########################################################################\n #when delta is negative\n ind = delta<0\n cos = (-q[ind]/2) * ((27 / torch.pow(p[ind],3)).abs()).sqrt() \n cos[cos<-1] = 0*cos[cos<-1]-1\n cos[cos>1] = 0*cos[cos>1]+1\n phi = torch.acos(cos)\n tau = 2 * ((p[ind]/3).abs()).sqrt() \n x1[ind] = tau * torch.cos(phi/3) \n x2[ind] = -tau * torch.cos((phi + np.pi)/3)\n x3[ind] = -tau * torch.cos((phi - np.pi)/3)\n #########################################################################\n x1 = x1-a/3\n x2 = x2-a/3\n x3 = x3-a/3\n # when gamma_mu is very small there might be some numerical instabilities\n # in case there are nan values, we set the corresponding pixels equal to 2*xmax\n # these values will be replaced by valid values at least once\n if (x1!=x1).any():\n x1[x1!=x1]=2*xmax\n if (x2!=x2).any():\n x2[x2!=x2]=2*xmax\n if (x3!=x3).any():\n x3[x3!=x3]=2*xmax\n #########################################################################\n #\n sol = xtilde + (x1 - uTx)/norm_u*torch_u\n kappa = x1\n #########################################################################\n #take x1\n p1 = sol\n uTp1 = torch.matmul(p1,u).view(n,1,1)\n ind = (uTp1>xmin)&(uTp1<xmax)\n crit[~ind] = np.inf\n crit[ind] = -(torch.log(uTp1[ind]-xmin)+torch.log(xmax-uTp1[ind]))\n crit = 0.5*torch.norm(p1-xtilde,dim=2).view(n,1,1)**2+gamma_mu*crit \n #########################################################################\n #test x2\n p2 = xtilde + (x2 - uTx)/norm_u*torch_u\n uTp2 = torch.matmul(p2,u).view(n,1,1) \n ind = (uTp2 >xmin)&(uTp2 <xmax)\n crit_compare[~ind] = np.inf\n crit_compare[ind] = -(torch.log(uTp2[ind]-xmin)+torch.log(xmax-uTp2[ind]))\n crit_compare = 0.5*torch.norm(p2-xtilde,dim=2).view(n,1,1)**2+gamma_mu*crit_compare\n # Select solution between p1 and p2\n ind = (crit_compare<=crit) + torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n sol[ind] = p2[ind]\n kappa[crit_compare<=crit]= x2[crit_compare<=crit]\n crit[crit_compare<=crit] = crit_compare[crit_compare<=crit]\n #########################################################################\n #test x3\n p3 = xtilde + (x3 - uTx)/norm_u*torch_u\n uTp3 = torch.matmul(p3,u).view(n,1,1)\n ind = (uTp3>xmin)&(uTp3<xmax)\n crit_compare[~ind] = np.inf\n crit_compare[ind] = -(torch.log(uTp3[ind]-xmin)+torch.log(xmax-uTp3[ind]))\n crit_compare = 0.5*torch.norm(p3-xtilde,dim=2).view(n,1,1)**2+gamma_mu*crit_compare\n # Select solution between p3 and (p2,p1)\n ind = (crit_compare<=crit)+ torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n sol[ind] = p3[ind]\n kappa[crit_compare<=crit]= x3[crit_compare<=crit]\n crit[crit_compare<=crit] = crit_compare[crit_compare<=crit]\n #########################################################################\n #test xmin+1e-10\n crit_compare = 0.5*torch.norm(xtilde-xmin-1e-10,dim=2).view(n,1,1)**2-gamma_mu*(\n torch.log(1e-10*torch_one)+torch.log((xmax-xmin-1e-10)*torch_one))\n ind = (crit_compare<=crit)+ torch.zeros(n,1,nx)#broadcasting \n ind = ind>0 \n sol[ind] = 0*sol[ind]+(xmin+1e-10)\n kappa[crit_compare<=crit]= uTx\n crit[crit_compare<=crit] = crit_compare[crit_compare<=crit]\n #########################################################################\n #test xmax-1e-10\n crit_compare = 0.5*torch.norm(xtilde-xmax+1e-10,dim=2).view(n,1,1)**2-gamma_mu*(\n torch.log(1e-10*torch_one)+torch.log((xmax-xmin-1e-10)*torch_one))\n ind = (crit_compare<=crit)+ torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n sol[ind] = 0*sol[ind]+(xmax-1e-10)\n kappa[crit_compare<=crit]= uTx[crit_compare<=crit]\n crit[crit_compare<=crit] = crit_compare[crit_compare<=crit]\n #########################################################################\n # when gamma_mu is very small and uTx is very close to one of the bounds,\n # the solution of the cubic equation is not very well estimated -> test xtilde\n #denom = (sol-xmin)*(sol-xmax)-2*gamma_mu -(sol-xtilde)*(xmin+xmax-2*sol)\n uTx_ok = (uTx>xmin)&(uTx<xmax)\n crit_compare[~uTx_ok] = np.inf\n crit_compare[uTx_ok] = -(torch.log(xmax-uTx[uTx_ok])+torch.log(uTx[uTx_ok]-xmin))\n crit_compare = gamma_mu*crit_compare\n ind = (crit_compare<crit)+ torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n sol[ind] = xtilde[ind]\n kappa[crit_compare<=crit]= uTx[crit_compare<=crit]\n \n if mode_training==True:\n ctx.save_for_backward(gamma_mu,kappa,uTx,sol)\n return sol", "def evaluate(self, radius, mtot, m0, alpha1, alpha2):\n model = mtot + m0 * (1 - np.exp(-alpha1*(radius/self.r0)**(-alpha2)))\n return model", "def test_coefficients_one_param_circuits(\n self, circuit, degree, expected_coeffs, use_broadcasting\n ):\n coeffs = coefficients(circuit, circuit.n_inputs, degree, use_broadcasting=use_broadcasting)\n assert np.allclose(coeffs, expected_coeffs)", "def calculator(**pars):\n # paying for parameter conversion each time to keep life simple, if not fast\n pars = revert_pars(model_info, pars)\n for k, v in pars.items():\n parts = k.split('.') # polydispersity components\n if len(parts) == 2:\n model.dispersion[parts[0]][parts[1]] = v\n else:\n model.setParam(k, v)\n return theory()", "def forward(self, state):\n\n _, _, theta, dtheta = (\n state[:, 0], state[:, 1], state[:, 2], state[:, 3])\n\n # predict a change in force\n # we only use relevant information to simplify the problem\n controller_input = torch.stack([\n torch.cos(theta),\n torch.sin(theta),\n dtheta\n ]).T.to(device)\n force = self.f(controller_input)[:, 0]\n\n # observe change in system\n du = self.cartpole(state, force)\n\n return du", "def evaluate(self):\n RV = -self.predict()\n RV += self.Ystar()\n return RV", "def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like", "def remove_observation(X, y, mu, k, theta_nz, nz, K, verbose=False):\n \n if verbose: sss=0#print '\\ncompute path between t=1 and t=0' \n \n n, m = X.shape\n psi = np.atleast_2d(X[k,:]).T\n yb = y[k]\n \n X_nz = np.atleast_2d(X[:, nz])\n v1 = np.sign(theta_nz)\n b = np.dot(X.T, y)\n \n nbr = 0\n t = 1\n trans_type = -1\n trans_sign = 0\n trans_ind = -1\n if verbose: sss=0#print 'initial active features =', nz\n \n while t > 0:\n \n # update various parameters\n theta_nz = np.dot(K, b[nz] - mu*v1)\n eb = np.dot(psi[nz].T, theta_nz) - yb\n err = np.dot(X_nz, theta_nz) - y\n u = np.dot(K, psi[nz])\n alpha = np.dot(psi[nz].T, u)\n \n # find the breakpoints where coefficients become zero\n tmp = 1 + (eb * u / theta_nz - alpha)**(-1)\n tmp[tmp < 0] = 0\n t_0 = tmp**.5\n \n # find the breakpoints where new coefficients become active\n z = np.setdiff1d(np.arange(m), nz)\n X_z = np.atleast_2d(X[:, z])\n v = np.dot(np.dot(X_z.T, X_nz), u)\n Xe = np.dot(X_z.T, err)\n \n tmp = 1 + (eb*(psi[z] - v)/(-mu - Xe) - alpha)**(-1)\n tmp[tmp < 0] = 0\n t_1 = tmp**.5\n tmp = 1 + (eb*(psi[z] - v)/(mu - Xe) - alpha)**(-1)\n tmp[tmp < 0] = 0\n t_m1 = tmp**.5\n \n if trans_type > 0: t_0[-1] = 0\n t_0[t_0 >= t] = 0\n if len(t_0) > 0: \n t_0_argmax = t_0.argmax()\n t_0_max = t_0[t_0_argmax][0]\n else:\n t_0_max = 0\n if trans_type == 0:\n if trans_sign == 1: t_1[np.where(z == trans_ind)[0]] = 0\n else: t_m1[np.were(z == trans_ind)[0]] = 0\n t_1[t_1 >= t] = 0\n if len(t_1) > 0: \n t_1_argmax = t_1.argmax()\n t_1_max = t_1[t_1_argmax][0]\n else:\n t_1_min = 0\n t_m1[t_m1 >= t] = 0\n if len(t_m1) > 0: \n t_m1_argmax = t_m1.argmax()\n t_m1_max = t_m1[t_m1_argmax][0]\n else:\n t_m1_max = 0\n \n # compute the breakpoint\n t_br_all = np.array([t_0_max, t_1_max, t_m1_max])\n trans_type = t_br_all.argmax()\n t_br = t_br_all[trans_type]\n \n if t_br > 0:\n \n nbr += 1\n t = t_br\n \n if trans_type == 0: # an element of theta(t) goes to zero\n trans_ind = nz[t_0_argmax]\n trans_sign = v1[t_0_argmax]\n if verbose: sss=0#print 'transition point :: t = %.4f :: feature %d is inactive'%(t, trans_ind)\n nzind = range(len(nz))\n nzind=np.delete(nzind,np.where(nzind==nz.index(trans_ind)))#nzind.remove(nz.index(trans_ind))\n v1 = v1[nzind]\n nz=np.delete(nz,np.where(nz==trans_ind))#nz.remove(trans_ind)\n X_nz = X[:, nz]\n K = invupdatered(K, t_0_argmax)\n else: # new active element\n if trans_type == 1: # it is positive\n trans_ind = z[t_1_argmax]\n if verbose: sss=0#print 'transition point :: t = %.4f :: feature %d is positive'%(t, trans_ind)\n nz.append(trans_ind)\n v1 = np.vstack([v1, 1])\n else: # it is negative\n trans_ind = z[t_m1_argmax]\n if verbose: sss=0#print 'transition point :: t = %.4f :: feature %d is negative'%(t, trans_ind)\n nz.append(trans_ind)\n v1 = np.vstack([v1, -1])\n X_new = np.atleast_2d(X[:, trans_ind]).T\n K = invupdateapp(K, np.dot(X_nz.T, X_new), np.dot(X_new.T, X_nz), \n np.dot(X_new.T, X_new))\n X_nz = X[:, nz]\n \n else: # compute solution at mu1\n \n if verbose: sss=0#print 'compute solution at t = 0'\n t = 0\n theta_nz += eb * u / (1 - alpha)\n \n return theta_nz, nz, K, nbr", "def pcoef(\n xte,yte,rle,\n x_cre,y_cre,d2ydx2_cre,th_cre,\n surface):\n\n # Initialize coefficients\n coef = np.zeros(6)\n\n # 1st coefficient depends on surface (pressure or suction)\n if surface.startswith('p'):\n coef[0] = -sqrt(2*rle)\n else:\n coef[0] = sqrt(2*rle)\n \n # Form system of equations\n A = np.array([\n [xte**1.5, xte**2.5, xte**3.5, xte**4.5, xte**5.5],\n [x_cre**1.5, x_cre**2.5, x_cre**3.5, x_cre**4.5, \n x_cre**5.5],\n [1.5*sqrt(xte), 2.5*xte**1.5, 3.5*xte**2.5, \n 4.5*xte**3.5, 5.5*xte**4.5],\n [1.5*sqrt(x_cre), 2.5*x_cre**1.5, 3.5*x_cre**2.5, \n 4.5*x_cre**3.5, 5.5*x_cre**4.5],\n [0.75*(1/sqrt(x_cre)), 3.75*sqrt(x_cre), 8.75*x_cre**1.5, \n 15.75*x_cre**2.5, 24.75*x_cre**3.5]\n ]) \n\n B = np.array([\n [yte - coef[0]*sqrt(xte)],\n [y_cre - coef[0]*sqrt(x_cre)],\n [tan(th_cre*pi/180) - 0.5*coef[0]*(1/sqrt(xte))],\n [-0.5*coef[0]*(1/sqrt(x_cre))],\n [d2ydx2_cre + 0.25*coef[0]*x_cre**(-1.5)]\n ])\n \n # Solve system of linear equations\n try:\n X = np.linalg.solve(A,B)\n except:\n X = np.linalg.solve(A+(1e-12*np.eye(5)),B)\n\n\n # Gather all coefficients\n coef[1:6] = X[0:5,0]\n\n # Return coefficients\n return coef", "def model_fun(params, slope, x):\n w = params['w']\n t0 = params['t0']\n offset = params['offset']\n return offset + slope * jax.nn.sigmoid(jnp.dot(x, w) - t0)", "def test_coefficients_tf_interface(self):\n import tensorflow as tf\n\n qnode = qml.QNode(self.circuit, self.dev)\n\n weights = tf.Variable([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)", "def find_results(data,weight_matrix,params):\r\n \r\n data = data.astype(np.float32)\r\n weight_matrix = weight_matrix.astype(np.float32)\r\n \r\n rank = params['rank']\r\n lamb = params['lambda']\r\n lr = params['lr']\r\n hidden_pairs = params['hidden_pairs']\r\n cost_functions.lamb = lamb\r\n\r\n f = cost_functions.frobenius \r\n V_masked = create_mask(data,hidden_pairs)\r\n bool_mask = V_masked.notnull().values\r\n tf_mask = tf.Variable(bool_mask)\r\n \r\n V = tf.constant(V_masked.values)\r\n laplacian_matrix = laplacian(weight_matrix).astype(np.float32)\r\n W, H = init_W_H(V.shape, rank=rank)\r\n WH = tf.matmul(W, H)\r\n L = tf.constant(laplacian_matrix)\r\n WTLW = tf.matmul(tf.matmul(tf.transpose(W), L), W)\r\n\r\n cost = f(V, tf_mask, WH, WTLW)\r\n train_step = tf.train.ProximalGradientDescentOptimizer(lr).minimize(cost)\r\n init = tf.global_variables_initializer()\r\n clip = get_clip(W, H)\r\n\r\n sess = tf.Session()\r\n sess.run(init)\r\n\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n initial_difference = previous_cost - sess.run(cost)\r\n\r\n matrix_errors = []\r\n graph_errors = []\r\n imputation_error = []\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n \r\n i = 0\r\n while np.isfinite(sess.run(cost)) and previous_cost-sess.run(cost) > TARGET_DIFFERENCE * initial_difference and i<=max_iterations:\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n matrix_errors.append(sess.run(cost_functions.matrix_cost))\r\n graph_errors.append(sess.run(cost_functions.graph_cost))\r\n i+=1\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n imputation_error.append(imputation_norm)\r\n\r\n return {'imputation_error':imputation_norm,'W':sess.run(W),'H':sess.run(H),\r\n 'graph_error':graph_errors,'matrix_error':matrix_errors,'imputation_error_list':imputation_error}", "def compute_vel(self, state, goal):\n\n \"\"\"\n Unicycle model control law:\n [v;w] = [kp 0 0; 0 ka kb]*[p;a;b]\n v = commanded linear velocity of robot\n w = commanded rotational velcoity of robot\n kp = gain parameter where kp > 0\n ka = gain parameter where ka - kp > 0\n kb = gain parameter where kb < 0\n p = distance from robot to goal\n a = angle between current robot heading and heading to goal\n b = error between current heading to goal and target end heading\n \"\"\"\n \n #print('state,goal,v,w')\n #print(state)\n #print(goal)\n\n xr = state[0][0] # m in world frame\n yr = state[1][0] # m in world frame\n thetar = state[2][0] #rads\n\n xg = goal[0] # m in world frame\n yg = goal[1] # m in world frame\n\n dy = yg - yr\n dx = xg - xr\n\n #print('')\n #print(state)\n #print(goal)\n \n # Calculate a\n a = -1*thetar + math.atan2(dy,dx)\n\n #print(a)\n\n if a > math.pi:\n a = a - 2*math.pi\n\n if a < -1*math.pi:\n a = a + 2*math.pi\n\n #print(a)\n\n # Set omega according to control law\n omega = self.ka*a\n if math.fabs(omega) > self.MAX_OMEGA:\n if omega > 0:\n omega = self.MAX_OMEGA\n else:\n omega = -1*self.MAX_OMEGA\n\n # Calculate P\n p = math.sqrt(dy*dy + dx*dx)\n\n # Set v \n v = self.kp*p\n if v > self.MAX_SPEED:\n v = self.MAX_SPEED\n\n # set the done value\n done = (p <= self.done_distance)\n\n #print(v)\n #print(omega)\n\n out_tuple = (v, omega, done)\n \n return out_tuple", "def _train_step(\n *,\n compute_phi,\n compute_psi,\n params,\n optimizer,\n optimizer_state,\n key,\n method,\n oracle_states,\n lissa_kappa,\n main_batch_size,\n covariance_batch_size,\n weight_batch_size,\n d,\n num_tasks,\n compute_feature_norm_on_oracle_states,\n sample_states,\n use_tabular_gradient = True,\n):\n source_states_key, task_key, key = jax.random.split(key, num=3)\n source_states, key = sample_states(source_states_key, main_batch_size)\n task = jax.random.choice(task_key, num_tasks, (1,))\n\n gradient, key = compute_gradient(\n source_states=source_states,\n task=task,\n compute_phi=compute_phi,\n compute_psi=compute_psi,\n params=params,\n key=key,\n method=method,\n oracle_states=oracle_states,\n lissa_kappa=lissa_kappa,\n main_batch_size=main_batch_size,\n covariance_batch_size=covariance_batch_size,\n weight_batch_size=weight_batch_size,\n d=d,\n compute_feature_norm_on_oracle_states=compute_feature_norm_on_oracle_states,\n sample_states=sample_states,\n use_tabular_gradient=use_tabular_gradient,\n )\n\n updates, optimizer_state = optimizer.update(gradient, optimizer_state)\n params = optax.apply_updates(params, updates)\n\n return { # pytype: disable=bad-return-type # numpy-scalars\n 'params': params,\n 'key': key,\n 'optimizer_state': optimizer_state,\n }", "def gen_phi_wrt_yt(z_div_L, phiw, fcn_D, vw_div_vw0, y_div_R_arr, phi_arr, cond_GT):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n \n phi_arr[0] = phiw\n int_INV_D_pre = 0.\n Ny = size(y_div_R_arr)\n for i in range(1, Ny):\n y2 = y_div_R_arr[i]; y1 = y_div_R_arr[i-1]\n dy = y2 - y1 \n yh = y1 + dy/2. # for RK4 method \n \n\n phi_1 = phi_arr[i-1]\n k1 = dy * cal_f_RK(y1, 0., phi_1, 0., int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT)\n k2 = dy * cal_f_RK(y1, dy/2., phi_1, k1/2., int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT)\n k3 = dy * cal_f_RK(y1, dy/2., phi_1, k2/2., int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT)\n k4 = dy * cal_f_RK(y1, dy, phi_1, k3, int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT)\n phi_2 = phi_1 + (1./6.)*(k1 + 2.*k2 + 2.*k3 + k4)\n phi_arr[i] = phi_2\n\n int_INV_D_pre += (dy/2.) * (1./fcn_D(phi_2, cond_GT) + 1./fcn_D(phi_1, cond_GT))\n\n return 0", "def fit_modelopts(xvalues, yvalues, d_sample, r1_func, nefv, fit_method):\n params1 = Parameters()\n params1.add('ds', value=d_sample, vary=False)\n params1.add('thetaS', value=0.00001, min=0, max=d_sample)\n params1.add('f', value=1000, min=3, max=300000)\n ## originally max was 1\n params1.add('phiS', value=0.00005, min=0, max=1)\n params1.add('w', value=2.0/3.0, vary=False)\n params1.add('a', value=4.0/3.0, vary=False)\n ##originally thetaP, phiP had no minima\n params1.add('thetaP', expr='(ds*(1 + phiS*w*f + a*thetaS)-thetaS)/ \\\n ((1 - a*ds)*(phiS*w*f + a*thetaS)-(a*ds))')\n params1.add('phiP', expr='phiS*thetaP/thetaS')\n params1.add('c', expr='w*phiS*f/(1+w*phiS*f+thetaS*a)')\n params1.add('dp', expr='thetaP/(1+a*thetaP)')\n params1.add('dc', expr='thetaS/(1+a*thetaS)')\n result = minimize(fcn2min, params1, args=(xvalues, yvalues, r1_func), method=fit_method, max_nfev=nefv)\n return result", "def make_model(self, incl, psi, PA=0.0, get_2d=True, int_kwargs={}, vel_kwargs={}, lw_kwargs=None):\n if PA: x_plane, y_plane = Rosenfeld2d._rotate_sky_plane(self.grid.XYZ[0], self.grid.XYZ[1], -PA)\n else: x_plane, y_plane = self.grid.XYZ[:2]\n\n cos_incl = np.cos(incl)\n sin_incl = np.sin(incl)\n y_plane_cos_incl = y_plane/cos_incl\n\n #**********************\n #ROSENFELD COEFFICIENTS\n fac = -2*np.sin(psi)**2\n A = np.cos(2*incl) + np.cos(2*psi)\n B = fac * 2*(sin_incl/cos_incl) * y_plane\n C = fac * (x_plane**2 + (y_plane_cos_incl)**2)\n t = self._get_t(A,B,C).T\n\n #****************************\n #ROSENFELD CONVERSION X<-->X'\n x_true_near = x_plane\n y_true_near = y_plane_cos_incl + t[1]*sin_incl\n \n x_true_far = x_plane\n y_true_far = y_plane_cos_incl + t[0]*sin_incl\n \n #np.hypot 2x faster than np.linalg.norm([x,y], axis=0)\n R_true_near = np.hypot(x_true_near, y_true_near) \n R_true_far = np.hypot(x_true_far, y_true_far)\n\n z_true_near = t[1] * cos_incl\n z_true_far = t[0] * cos_incl \n\n phi_true_near = np.arctan2(y_true_near, x_true_near) \n phi_true_far = np.arctan2(y_true_far, x_true_far) \n\n #****************************\n \n grid_true = {'near': [x_true_near, y_true_near, z_true_near, R_true_near, phi_true_near], \n 'far': [x_true_far, y_true_far, z_true_far, R_true_far, phi_true_far]}\n\n #*******************************\n #COMPUTE PROPERTIES ON TRUE GRID\n avai_kwargs = [vel_kwargs, int_kwargs, lw_kwargs]\n avai_funcs = [self.velocity_func, self.intensity_func, self.linewidth_func]\n true_kwargs = [isinstance(kwarg, dict) for kwarg in avai_kwargs]\n prop_kwargs = [kwarg for i, kwarg in enumerate(avai_kwargs) if true_kwargs[i]]\n prop_funcs = [func for i, func in enumerate(avai_funcs) if true_kwargs[i]]\n props = self._compute_prop(grid_true, prop_funcs, prop_kwargs)\n #Positive vel is positive along z, i.e. pointing to the observer, for that reason imposed a (-) factor to convert to the standard convention: (+) receding \n if true_kwargs[0]:\n ang_fac_near = -sin_incl * np.cos(phi_true_near)\n ang_fac_far = -sin_incl * np.cos(phi_true_far)\n props[0]['near'] *= ang_fac_near \n props[0]['far'] *= ang_fac_far\n \n #*************************************\n\n return [{side: prop[side].reshape(self.grid.Nodes[:2]) for side in ['near', 'far']} for prop in props]", "def test_formula():\n config = {\"samples\": {\"x1\": onp.ones((2, 10)), \"x2\": onp.ones((2, 10))}}\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(\n x1=dict(transformer=1, prior=dist.Normal(0, 1)),\n x2=dict(transformer=2, prior=dist.Normal(0, 1)),\n )\n\n model = Model.from_dict(config)\n formula = model.formula\n expected = \"y = exp(\\n x1 * 1.00000(+-0.00000)\\n + x2 * 1.00000(+-0.00000)\\n)\"\n assert formula == expected", "def eval(self, theta, force=False):\n \n self.update_A_b(theta, force)\n \n if self.b.ndim != 2:\n raise ValueError(\"self.b.ndim not equal to 2.\")\n \n n,p = self.b.shape\n \n #x = numpy.zeros_like(self.b)\n #for k in range(p):\n # x[:,k] = self.solver.backsolve(self.b[:,k], transp='N')\n #return x\n \n # Using the multiple-r.h.s capability of solver.backsolve\n return self.solver.backsolve(self.b)", "def LinealizeCarModel(xb, u, dt, lr):\n\n x = xb[0]\n y = xb[1]\n v = xb[2]\n theta = xb[3]\n\n a = u[0]\n beta = u[1]\n\n t1 = -dt * v * sin(theta + beta)\n t2 = dt * v * cos(theta + beta)\n\n A = np.eye(xb.shape[0])\n A[0, 2] = dt * cos(theta + beta)\n A[1, 2] = dt * sin(theta + beta)\n A[3, 2] = dt * sin(beta) / lr\n A[0, 3] = t1\n A[1, 3] = t2\n\n B = np.zeros((xb.shape[0], u.shape[0]))\n B[2, 0] = dt\n B[0, 1] = t1\n B[1, 1] = t2\n B[3, 1] = dt * v * cos(beta) / lr\n\n tm = np.zeros((4, 1))\n tm[0, 0] = v * cos(theta + beta) * dt\n tm[1, 0] = v * sin(theta + beta) * dt\n tm[2, 0] = a * dt\n tm[3, 0] = v / lr * sin(beta) * dt\n C = xb + tm\n C = C - A * xb - B * u\n\n return A, B, C", "def predict(self, params, exog=None, *args, **kwargs):\n raise NotImplementedError # pragma: no cover", "def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction", "def V_phi(self, x):\n\n x = self.featureExtractor.getFeatures(x)\n\n x = torch.tensor(x).float()\n\n x = F.relu(self.linear(x))\n\n v = self.linear_v(x)\n\n return v", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def add_observation(X, y, mu, theta_nz, nz, K, verbose=False, showpath=False, fignum=2):\n \n if verbose: sss=0#print '\\ncompute path between t=0 and t=1'\n \n n, m = X.shape\n X_nz = np.atleast_2d(X[:, nz])\n v1 = np.sign(theta_nz)\n psi = np.atleast_2d(X[-1,:]).T\n b = np.dot(X.T, y)\n \n # update K to take into account added row\n K -= (1 / (1 + np.dot(psi[nz].T, np.dot(K, psi[nz])))) * np.dot(K, np.dot(psi[nz], np.dot(psi[nz].T, K)))\n \n nbr = 0\n t = 0\n trans_type = -1\n trans_sign = 0\n trans_ind = -1\n if verbose: sss=0#print 'initial active features =', nz\n if showpath:\n import matplotlib.pyplot as plt\n pth = np.linspace(0, 1, 100)\n thetapth = np.zeros((m, 100))\n fig = plt.figure(fignum)\n plt.clf()\n allbr = []\n \n while t < 1:\n \n # update various parameters\n theta_nz = np.dot(K, b[nz] - mu*v1)\n eb = np.dot(psi[nz].T, theta_nz) - y[-1]\n err = np.dot(X_nz, theta_nz) - y\n u = np.dot(K, psi[nz])\n alpha = np.dot(psi[nz].T, u)\n if len(nz) == 0: # because of numpy bug\n alpha = 0.\n eb = -y[-1]\n \n # find the breakpoints where coefficients become zero\n tmp = 1 + (eb * u / theta_nz - alpha)**(-1)\n tmp[tmp < 0] = 1\n t_0 = tmp**.5\n \n # find the breakpoints where new coefficients become active\n z = np.setdiff1d(np.arange(m), nz)\n X_z = np.atleast_2d(X[:, z])\n v = np.dot(np.dot(X_z.T, X_nz), u)\n Xe = np.dot(X_z.T, err)\n \n tmp = 1 + (eb*(psi[z] - v)/(-mu - Xe) - alpha)**(-1)\n tmp[tmp < 0] = 1\n t_1 = tmp**.5\n tmp = 1 + (eb*(psi[z] - v)/(mu - Xe) - alpha)**(-1)\n tmp[tmp < 0] = 1\n t_m1 = tmp**.5\n \n if trans_type > 0: t_0[-1] = 1\n t_0[t_0 <= t] = 1\n if len(t_0) > 0: \n t_0_argmin = t_0.argmin()\n t_0_min = t_0[t_0_argmin][0]\n else:\n t_0_min = 1\n if trans_type == 0:\n if trans_sign == 1: t_1[np.where(z == trans_ind)[0]] = 1\n else: t_m1[np.where(z == trans_ind)[0]] = 1\n t_1[t_1 <= t] = 1\n if len(t_1) > 0: \n t_1_argmin = t_1.argmin()\n t_1_min = t_1[t_1_argmin][0]\n else:\n t_1_min = 1\n t_m1[t_m1 <= t] = 1\n if len(t_m1) > 0: \n t_m1_argmin = t_m1.argmin()\n t_m1_min = t_m1[t_m1_argmin][0]\n else:\n t_m1_min = 1\n \n # compute the breakpoint\n t_br_all = np.array([t_0_min, t_1_min, t_m1_min])\n trans_type = t_br_all.argmin()\n t_br = t_br_all[trans_type]\n \n if t_br < 1:\n \n if showpath:\n if len(nz) > 0:\n inds = np.intersect1d(np.where(pth >= t)[0], np.where(pth < t_br)[0]) \n thetapth[np.ix_(nz, inds)] = np.tile(theta_nz, (1, len(inds))) - np.tile(u, (1, len(inds))) * \\\n np.tile(eb * (pth[inds]**2 - 1) / (1 + alpha*(pth[inds]**2 - 1)), (len(nz), 1))\n allbr.append(t_br)\n \n nbr += 1\n t = t_br\n \n if trans_type == 0: # an element of theta(t) goes to zero\n trans_ind = nz[t_0_argmin]\n trans_sign = v1[t_0_argmin]\n if verbose: sss=0#print 'transition point :: t = %.4f :: feature %d is inactive'%(t, trans_ind)\n\n \n nzind = range(len(nz))\n index=np.where(nz==trans_ind)[0][0]\n nzind=np.delete(nzind,np.where(nzind==index))#nzind.remove(index)\n v1 = v1[nzind]\n nz=np.delete(nz,np.where(nz==trans_ind)) \n \n \n \n X_nz = X[:, nz]\n K = invupdatered(K, t_0_argmin)\n else: # new active element\n if trans_type == 1: # it is positive\n trans_ind = z[t_1_argmin]\n if verbose: sss=0#print 'transition point :: t = %.4f :: feature %d is positive'%(t, trans_ind)\n nz=np.append(nz,trans_ind)\n #nz.append(trans_ind)\n v1 = np.vstack([v1, 1])\n else: # it is negative\n trans_ind = z[t_m1_argmin]\n if verbose: sss=0#print 'transition point :: t = %.4f :: feature %d is negative'%(t, trans_ind)\n nz=np.append(nz,trans_ind)\n #nz.append(trans_ind)\n v1 = np.vstack([v1, -1])\n X_new = np.atleast_2d(X[:, trans_ind]).T\n K = invupdateapp(K, np.dot(X_nz.T, X_new), np.dot(X_new.T, X_nz), \n np.dot(X_new.T, X_new))\n if len(nz) == 1: K = 1 / np.dot(X_new.T, X_new) # because of numpy bug\n X_nz = X[:, nz]\n \n else: # compute solution at mu1\n \n if verbose: sss=0#print 'compute solution at t = 1'\n if showpath and len(nz) > 0:\n inds = np.intersect1d(np.where(pth >= t)[0], np.where(pth <= 1)[0])\n thetapth[np.ix_(nz, inds)] = np.tile(theta_nz, (1, len(inds))) - np.tile(u, (1, len(inds))) * \\\n np.tile(eb * (pth[inds]**2 - 1) / (1 + alpha*(pth[inds]**2 - 1)), (len(nz), 1))\n t = 1\n \n if showpath:\n fig = plt.figure(fignum)\n leg = []\n for i in range(m):\n plt.plot(pth, thetapth[i, :])\n leg.append(r'$\\theta_%d(t)$'%(i+1))\n plt.plot(pth, np.zeros(len(pth),), 'k')\n plt.xlabel(r'$t$', fontsize=16)\n plt.title(r'Step 2: homotopy in $t$', fontsize=16)\n plt.legend(leg, loc='best')\n plt.plot(allbr, np.zeros(nbr), 'ko')\n plt.show()\n \n return theta_nz, nz, K, nbr", "def solve_model():\n from scipy.integrate import ode\n # Initialise constants and state variables\n (init_states, constants) = initConsts()\n\n # Set timespan to solve over\n voi = linspace(0, 100, 5000)\n\n # Construct ODE object to solve\n r = ode(computeRates)\n r.set_integrator('vode', method='bdf', atol=1e-06, rtol=1e-06, max_step=1)\n r.set_initial_value(init_states, voi[0])\n r.set_f_params(constants)\n\n # Solve model\n states = array([[0.0] * len(voi)] * sizeStates)\n states[:,0] = init_states\n for (i,t) in enumerate(voi[1:]):\n if r.successful():\n r.integrate(t)\n states[:,i+1] = r.y\n else:\n break\n\n # Compute algebraic variables\n algebraic = computeAlgebraic(constants, states, voi)\n return (voi, states, algebraic)", "def fit(self, Y, delta, model='C-mix'):\n verbose = self.verbose\n max_iter = self.max_iter\n print_every = self.print_every\n tol = self.tol\n self._start_solve()\n\n # Split at random the sample with probability 0.5\n n_samples = Y.shape[0]\n pi = 0.5\n Z = np.random.binomial(1, pi, size=n_samples)\n p1 = 1. / np.mean(Y[(delta == 1) + (Z == 1)])\n p0 = 1. / np.mean(Y[(delta == 1) + (Z == 0)])\n if p0 > p1:\n tmp = p0\n p0 = p1\n p1 = tmp\n\n if model == 'CURE':\n p0 = 0\n pc = 1. / np.mean(Y[delta == 0])\n\n log_lik = self.log_lik(Y, delta, np.array([p0, p1, pc, pi]))\n obj = -log_lik\n rel_obj = 1.\n self.history.update(n_iter=0, obj=obj, rel_obj=rel_obj)\n if verbose:\n self.history.print_history()\n\n for n_iter in range(max_iter):\n if n_iter % print_every == 0:\n self.history.update(n_iter=n_iter, obj=obj,\n rel_obj=rel_obj)\n if verbose:\n self.history.print_history()\n # E-Step\n a = ((1. - p1) ** (Y - 1.) * p1) ** delta * ((1. - p1) ** Y) ** (\n 1. - delta) * (1. - pi)\n b = ((1. - p0) ** (Y - 1.) * p0) ** delta * ((1. - p0) ** Y) ** (\n 1. - delta) * pi\n q = a / (a + b)\n # M-Step\n if model == 'C-mix':\n p0 = ((1. - q) * delta).mean() / ((1. - q) * Y).mean()\n p1 = (delta * q).mean() / (q * Y).mean()\n pi = 1. - np.mean(q)\n prev_obj = obj\n log_lik = self.log_lik(Y, delta, np.array([p0, p1, pc, pi]))\n obj = -log_lik\n rel_obj = abs(obj - prev_obj) / abs(prev_obj)\n if (n_iter > max_iter) or (rel_obj < tol):\n break\n\n n_iter += 1\n self.history.update(n_iter=n_iter, obj=obj, rel_obj=rel_obj)\n if verbose:\n self.history.print_history()\n self._end_solve()\n self.p0 = p0\n self.p1 = p1\n self.pc = pc\n self.pi = pi\n self.coeffs[:] = np.array([p0, p1, pc, pi])", "def iterateRegression(self, plot=True, estimate=False, init_params=None):\r\n # Create empty arrays to store values\r\n F = np.zeros(len(self.y))\r\n a_b = np.zeros((2,len(self.y))) # alphas and betas\r\n v = np.zeros(len(self.y))\r\n P = np.zeros((len(self.y),2,2))\r\n # Initialize at the initial values parsed to the class\r\n if estimate == True:\r\n self.T = np.array([[init_params[0],0],[0,1]])\r\n self.c = np.vstack(([init_params[1],0]))\r\n self.R = np.vstack(([init_params[2]],[0]))\r\n self.a_start = np.vstack(([self.alpha_mean], [init_params[3]]))\r\n P[0,:,:] = self.P_start\r\n a_b[:,0:1] = self.a_start\r\n # Iterate\r\n for t in range(0, len(self.y) - 1):\r\n # Slightly different updating equations for KF since we now have regression coefficient\r\n v[t] = self.y[t] - np.dot(self.Z[:,t:t+1].T,a_b[:,t]) - self.d\r\n F[t] = np.dot(np.dot(self.Z[:,t:t+1].T, P[t]),self.Z[:,t:t+1]) + self.H\r\n a_t = a_b[:,t:t+1] + np.dot(P[t],self.Z[:,t:t+1] / F[t]) * v[t]\r\n a_b[:,t + 1:t+2] = np.dot(self.T, a_t) + self.c\r\n P_t = P[t] - np.dot((np.dot(P[t],self.Z[:,t:t+1]) / F[t]),np.dot(self.Z[:,t:t+1].T, P[t]))\r\n P[t + 1,:,:] = np.dot(np.dot(self.T, P_t),self.T.transpose()) + np.dot(self.R * self.Q,self.R.transpose())\r\n F[-1] = np.dot(np.dot(self.Z[:,-1:].T, P[-1]),self.Z[:,-1:]) + self.H\r\n v[-1] = self.y[-1] - a_b[0,-1:]\r\n # Obtain std error of prediction form variance\r\n std = np.sqrt((P[:,0,0] * self.H) / (P[:,0,0] + self.H))\r\n return a_b, std, P, v, F" ]
[ "0.60838443", "0.5885254", "0.5861287", "0.5416153", "0.52906", "0.5278243", "0.52711785", "0.518602", "0.5174683", "0.51556545", "0.51364434", "0.51288843", "0.5110274", "0.5109226", "0.5090163", "0.5054213", "0.5048513", "0.50369865", "0.5005936", "0.49957308", "0.4991553", "0.49838448", "0.4982643", "0.49676222", "0.49631804", "0.49600056", "0.49586987", "0.4936876", "0.4934503", "0.49343213", "0.49202204", "0.49048644", "0.4899695", "0.48904032", "0.4888713", "0.48865688", "0.48834363", "0.48763543", "0.48708144", "0.4864828", "0.48637417", "0.48636007", "0.48623723", "0.48616165", "0.4837243", "0.48220304", "0.48180395", "0.4816326", "0.48105806", "0.4806758", "0.48031217", "0.47851545", "0.47832578", "0.47798267", "0.47727543", "0.47711527", "0.47711155", "0.4770916", "0.4764014", "0.47598177", "0.47585326", "0.47537023", "0.47531205", "0.47491145", "0.47467765", "0.4743654", "0.47426254", "0.47420627", "0.47278753", "0.4723036", "0.47225302", "0.4720277", "0.47193414", "0.47125617", "0.47124842", "0.47119918", "0.47113732", "0.4702666", "0.470169", "0.46987072", "0.46969214", "0.4694641", "0.46945053", "0.4693035", "0.46875405", "0.46864355", "0.46863133", "0.46843287", "0.46824956", "0.46741262", "0.46706834", "0.46700764", "0.46689254", "0.46677688", "0.46649107", "0.46627268", "0.46562892", "0.4654896", "0.46484956", "0.46441472", "0.46427402" ]
0.0
-1
Given the coefficients, evaluate gradient of model at a specific direction (theta,phi) returns 2x1 gradient
def even_pODF_opt_grad(angles, *args): qpoints = args[0] c = args[1] N = args[2] n,m = qpoints.shape theta,phi = angles[0], angles[1] omega = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)]) #Partial in theta direction sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) r_i, theta_i, phi_i = car2sph(qpoints[i,0],qpoints[i,1],qpoints[i,2]) sum += c[i]*even_kernel_der(mu, N)*(-np.cos(theta_i)*np.cos(theta) + np.cos(phi - phi_i)*np.cos(theta)*np.sin(theta_i)) p_theta = sum #Partial in phi direction sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) r_i, theta_i, phi_i = car2sph(qpoints[i,0],qpoints[i,1],qpoints[i,2]) sum += c[i]*even_kernel_der(mu, N)*( -np.sin(phi - phi_i)*np.sin(theta)*np.sin(theta_i) ) p_phi = sum return -(N + 1)**2 * np.array([p_theta,p_phi])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad", "def get_gradient(phi, pred, t, dot_product, weight, reg= 1, regression= \"logistic\"):\n if regression == \"logistic\":\n gradient = np.matmul(phi.T, pred - t)\n elif regression == \"probit\":\n R = np.eye(pred.shape[0])\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n R[i,i] = pdf/(y_n*(1-y_n) + TOLERANCE)\n gradient = np.matmul(np.matmul(phi.T, R), pred-t)\n elif regression == \"multiclass\":\n gradient = np.matmul(phi.T, pred - t)\n\n # Add regularization\n gradient += weight/ reg\n return gradient", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad", "def gradient_calculation(self, coefficients, x_values, y_values):\n gradient_coeffs = np.array([0]*len(coefficients))\n\n for xi in range(len(x_values)):\n x = x_values[xi]\n power_array = np.power(\n np.array([x]*len(coefficients)), np.array(range(len(coefficients))))\n\n diff = (2/len(x_values))*(self.f(x, coefficients) - y_values[xi])\n gradient_coeffs = gradient_coeffs + np.multiply(diff, power_array)\n\n return gradient_coeffs", "def gradient_function(theta, X, y):\n\n grad = None\n #######################################################################\n # TODO: #\n # Compute the gradient for a particular choice of theta. #\n # Compute the partial derivatives and set grad to the partial #\n # derivatives of the cost w.r.t. each parameter in theta #\n # #\n #######################################################################\n \n theta = theta[:, np.newaxis]\n \n thetatrans = theta.T\n Xtrans = X.T\n \n MulThetaX = np.dot(thetatrans, Xtrans)\n \n h = sigmoid(MulThetaX)\n \n grad = (y - h) * Xtrans\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return grad", "def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def gradient(self, theta):\n pass", "def gradient(self, theta):\n pass", "def lr_cost_function_grad(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> np.ndarray:\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n grad = np.zeros(theta.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the partial derivatives and set grad to the partial\n # derivatives of the cost w.r.t. each parameter in theta.\n\n # =============================================================\n return grad", "def gradient(self, X, V, W, Y):\n one, d_plus_one = X.shape\n K, H_plus_one = W.shape\n d = d_plus_one - 1\n H = H_plus_one - 1\n\n Z, Yhat = self.forward(X, V, W)\n assert one == 1\n x = X\n y = Y\n z = Z.ravel()\n yhat = Yhat.ravel()\n\n # Update W\n # grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)\n # grad__L__z[:] = 0.0\n # for k in range(K):\n # grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])\n # # Last element corresponds to constant offset 1 appended to z\n # # vector; it does not change / has no derivative.\n # grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])\n # grad__L__z += grad__L__yhat[k] * grad__yhat_k__z\n # W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k\n grad__L__z = (W.T * (yhat - y)).sum(axis=1)\n zz = z.reshape((1, H + 1)).repeat(K, 0)\n grad__L__W = diag(yhat - y) @ zz\n\n # Update V\n # for h in range(H):\n # grad__z_h__V_h = x * (1 - z[h] ** 2)\n # grad__L__V_h = grad__L__z[h] * grad__z_h__V_h\n # V[h, :] -= self.learning_rate * grad__L__V_h\n xx = x.reshape((1, d + 1)).repeat(H + 1, 0)\n grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx\n\n return grad__L__V, grad__L__W", "def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def calculate_model_gradient(logger, model_1, model_2):\n model_1_parameters = list(dict(model_1.state_dict()))\n model_2_parameters = list(dict(model_2.state_dict()))\n\n return calculate_parameter_gradients(logger, model_1_parameters, model_2_parameters)", "def apply_gradient(params: torch.Tensor, grads: torch.Tensor, lr: float) -> torch.Tensor:\n params_prime = params + lr * grads\n return params_prime", "def compute_loss_gradient(theta_vector, *args):\n\n psi = args[0] # feed psi as a parameter\n circ_depth = args[1]\n num_qbits = args[2]\n theta = np.reshape(theta_vector, (circ_depth, num_qbits)) # reshapes the flat theta vector\n fidelity = get_fidelity(theta, psi)\n\n # the derivative of the loss wrt fidelity\n dl_df = -0.5 * fidelity ** (-0.5)\n\n df_dtheta = [] # a list of partial derivatives of the fidelity wrt the theta parameters\n\n for index in range(len(theta_vector)):\n layer_index = index // num_qbits\n qbit_index = index % num_qbits\n\n theta_plus = np.copy(theta)\n theta_plus[layer_index][qbit_index] += np.pi / 2 # added pi/2 to the ith theta parameter\n\n theta_minus = np.copy(theta)\n theta_minus[layer_index][qbit_index] -= np.pi / 2 # subtracted pi/2 to the ith theta parameter\n\n df_dtheta_i = 0.5 * (get_fidelity(theta_plus, psi) - get_fidelity(theta_minus, psi)) # ith derivative\n df_dtheta.append(df_dtheta_i)\n\n df_dtheta = np.array(df_dtheta)\n dl_dtheta = dl_df * df_dtheta # chain rule to get partial derivative of loss wrt theta parameters\n\n return dl_dtheta", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def gradient(x, y, theta):\n if x.ndim == 1:\n x = x[:, np.newaxis]\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if theta.ndim == 2 and theta.shape[1] == 1:\n theta = theta.flatten()\n\n if (x.size == 0 or y.size == 0 or theta.size == 0\n or x.ndim != 2 or y.ndim != 1 or theta.ndim != 1\n or x.shape[0] != y.shape[0] or x.shape[1] + 1 != theta.shape[0]):\n return None\n\n x_padded = np.c_[np.ones(x.shape[0]), x]\n\n return x_padded.T.dot(x_padded.dot(theta) - y) / y.shape[0]", "def calc_gradient(self, W, X, y, reg):\n\n N = X.shape[0]\n grad_W = np.zeros_like(W)\n I = np.ones((1,10))\n score = np.dot(X, W) # (N, C)\n out = np.exp(score-np.dot(np.max(score, axis=1, keepdims=True ),I))\n #print(\"out\", out)\n out /= np.sum(out, axis=1, keepdims=True) # (N, C)\n \n dout = np.copy(out) # (N, C)\n dout[np.arange(N), y] -= 1\n grad_W = np.dot(X.T, dout) # (D, C)\n grad_W /= N\n #grad_W += reg * W\n \n return grad_W", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def check_gradients(model, X, Y, eps=1e-5):\n\n # Import methods from the model\n layers = model.layers\n regularizer = model.regularizer\n propagate_forward = model.propagate_forward\n compute_cost = model.compute_cost\n propagate_backward = model.propagate_backward\n\n # Dirty regularizers such as dropout may yield errors\n assert(regularizer is None)\n for layer in layers:\n assert(not isinstance(layer, Dropout))\n assert(not isinstance(layer, BatchNorm))\n\n # Get params currently stored in the layers (for reset)\n params = roll_params(layers, 'params')\n grads = roll_params(layers, 'grads')\n\n # Perform one iteration on X and Y to compute and store new gradients\n out = propagate_forward(X)\n propagate_backward(out, Y)\n\n # Extract new gradients and roll them into a vector\n param_theta = roll_params(layers, 'params')\n grad_theta = roll_params(layers, 'grads')\n\n # Initialize vector of the same shape for approximated gradients\n num_params = len(param_theta)\n grad_approx = np.zeros(num_params)\n\n # Repeat for each number in the vector\n for i in range(num_params):\n # Use two-sided Taylor approximation which is 2x more precise than one-sided\n # Add epsilon to the number\n # Note: Epsilon higher than 1e-5 likely to produce numeric instability\n theta_plus = np.copy(param_theta)\n theta_plus[i] = theta_plus[i] + eps\n # Calculate new cost\n unroll_params(theta_plus, layers, 'params')\n out_plus = propagate_forward(X, predict=True)\n cost_plus = compute_cost(out_plus, Y)\n\n # Subtract epsilon from the number\n theta_minus = np.copy(param_theta)\n theta_minus[i] = theta_minus[i] - eps\n # Calculate new cost\n unroll_params(theta_minus, layers, 'params')\n out_minus = propagate_forward(X, predict=True)\n cost_minus = compute_cost(out_minus, Y)\n\n # Approximate the gradient, error is eps^2\n grad_approx[i] = (cost_plus - cost_minus) / (2 * eps)\n\n # Reset model params\n unroll_params(params, layers, 'params')\n unroll_params(grads, layers, 'grads')\n\n # Compute relative error\n relative_error = calculate_diff(grad_theta, grad_approx)\n\n return relative_error", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n cost = compute_cost(features, values, theta)/(2.0*m)\r\n cost_history.append([cost])\r\n \r\n error = features.dot(theta) - values\r\n error = np.reshape(error,(error.shape[0], 1))\r\n errorWeighted = features*error\r\n errorSum = (np.sum(errorWeighted,0))/(m*1.0)\r\n theta = theta - alpha*errorSum \r\n \r\n return theta, pandas.Series(cost_history)", "def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b", "def _calc_gradients(self, X, y, y_hat):\n # calculate gradient of weight and bias\n grad_b = 2 * np.mean(y_hat - y)\n grad_W = 2 * np.mean(np.matmul((y_hat - y), X))\n return grad_W, grad_b", "def gradient(theta, X, y, Lambda=0.0):\n m = X.shape[0] # number of samples\n\n h = hypothesis(theta, X)\n\n if Lambda:\n g_0 = (1/m)*(X.T@(h - y))[0]\n g_1 = (1/m)*(X.T@(h - y))[1:] + (Lambda/m)*theta[1:] # skip theta-0\n \n return np.append(g_0, g_1)\n else:\n return (1/m)*(X.T@(h - y))", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def gradient(self):\n result = np.zeros(len(self.variables))\n result[self.bivariateGradInd] = (self.shape-1)/self.variable - self.rate\n return result", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def gradient_descent(X, y, theta, alpha, total_iterations, hypothesis):\n len_theta = len(theta)\n m = len(y)\n one_over_m = (1.0 / float(m))\n\n for _ in range(0, total_iterations):\n temp_theta = numpy.zeros(len_theta)\n\n X_by_theta_minus_y = numpy.subtract(hypothesis(numpy.matrix(theta), X), y)\n\n for j in range(0, len_theta):\n jth_column_of_X = X[:,j]\n derivative_j = one_over_m * numpy.multiply(X_by_theta_minus_y, jth_column_of_X).sum()\n temp_theta[j] = theta[j] - alpha*derivative_j\n\n theta = temp_theta\n\n return numpy.matrix(theta)", "def calculate_gradients(self, X, Y):\n Z1 = np.matmul(self.weights[0], X) + self.biases[0] #(30, m)\n A1 = sigmoid(Z1) #(30, m)\n Z2 = np.matmul(self.weights[1], A1) + self.biases[1] #(10, m)\n A2 = sigmoid(Z2) #(10, m)\n # number of examples\n m = X.shape[1]\n dZ2 = A2 - Y #(784, m)\n dW2 = (1 / m) * np.matmul(dZ2, A1.T) #(10, 30)\n db2 = (1 / m) * np.sum(dZ2, axis = 1, keepdims = True) #(10, 1)\n dZ1 = np.multiply(np.matmul(self.weights[1].T, dZ2), sigmoid_deri(Z1)) #(30, m)\n dW1 = (1 / m) * np.matmul(dZ1, X.T) #(30, 784)\n db1 = (1 / m) * np.sum(dZ1, axis = 1, keepdims = True) #(30, 1)\n \n grads = {\"dW1\":dW1, \"db1\":db1, \"dW2\":dW2, \"db2\":db2} \n return grads", "def _evaluate_gradient(self, **variables):\n pass", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def gradient(self, inputs):\n raise NotImplementedError", "def compute_gradient(self, function, arguments):", "def compute_grad(W, x, y, loss_c, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_grad\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_grad\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n dW, db = model_grad(loss_c, x, y)\n dW += config.reg_lambda * l2_grad(W)\n\n return dW, db", "def approx_grad_reg(theta, X, y, _lambda):\n grad_a = np.array([(cost_reg(theta + e, X, y, _lambda) - cost_reg(theta - e, X, y, _lambda)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def gradient(self):\n functional = self\n\n if self.exponent == 1:\n class L1Gradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\"\"\"\n return x.ufuncs.sign()\n\n return L1Gradient()\n elif self.exponent == 2:\n class L2Gradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in 0.\n \"\"\"\n norm_of_x = x.norm()\n if norm_of_x == 0:\n return self.domain.zero()\n else:\n return x / norm_of_x\n\n return L2Gradient()\n else:\n raise NotImplementedError('`gradient` only implemented for p=1 or '\n 'p=2')", "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta", "def scl_grad(X, phi):\r\n shape = list(phi.shape)\r\n shape.insert(0,3)\r\n grad = _np.zeros(shape,dtype=phi.dtype)\r\n\r\n polynomial = _interp(X[0], phi, axis=0)\r\n grad[0,...] = polynomial(X[0], nu=1)\r\n\r\n polynomial = _interp(X[1], phi, axis=1)\r\n grad[1,...] = polynomial(X[1], nu=1)\r\n\r\n polynomial = _interp(X[2], phi, axis=2)\r\n grad[2,...] = polynomial(X[2], nu=1)\r\n\r\n return grad", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def _grad(V):\n dv = diag(V)\n weights, A, _, AinvB = _weights(dv)\n Ey = (weights.T.dot(Y_control) - Y_treated).getA()\n dGamma0_dV_term2 = zeros(K)\n #dPI_dV = zeros((N0, N1)) # stupid notation: PI = W.T\n #Ai = A.I\n for k in range(K):\n if verbose: # for large sample sizes, linalg.solve is a huge bottle neck,\n print(\"Calculating gradient, linalg.solve() call %s of %s\" % (k ,K,))\n #dPI_dV.fill(0) # faster than re-allocating the memory each loop.\n dA = dA_dV_ki[k]\n dB = dB_dV_ki[k]\n dPI_dV = linalg.solve(A,(dB - dA.dot(AinvB))) \n #dPI_dV = Ai.dot(dB - dA.dot(AinvB))\n dGamma0_dV_term2[k] = np.einsum(\"ij,kj,ki->\",Ey, Y_control, dPI_dV) # (Ey * Y_control.T.dot(dPI_dV).T.getA()).sum()\n return LAMBDA + 2 * dGamma0_dV_term2", "def costFunction(theta,X,y):\n m = X.shape[0]\n J = 0\n h = sigmoid (np.dot(X,theta))\n \n J = (1/m)* ((-np.dot(y.T,(np.log(h)))) - np.dot((1 - y).T,(np.log(1-h))))\n \n #grad = (1/m) * np.dot(X.T,(h-y))\n grad = (1/m) * np.dot((h.T - y), X).T\n \n return J, grad", "def get_gradient(self):\n # Fast path\n if self._gradient is not None:\n return self._gradient\n # Flatten (make if necessary)\n gradient = tools.flatten(tools.grads_of(self._model.parameters()))\n self._gradient = gradient\n return gradient", "def gradient_model (self, x, initial_weights = None, \\\n step_size = 5.0e-6, tol = 2.5e+7, n_iters = 501, l2 = 0):\n # setup initial intercept, slope, iter number and rss\n if initial_weights is None:\n weights = self.initial_weight\n else:\n weights = initial_weights\n # Compute indicator value for (y_i = +1)\n indicators = np.array([int (i) for i in (self.train_output_y==1)])\n for itr in range(n_iters):\n # Predict P(y_i = +1|x_1,w) using your predict_probability() function\n _, pred_probs = self.predict_probability(self.train_feature_x, weights)\n \n # Compute the errors as indicator - predictions\n errors = indicators - pred_probs\n\n #Update the weights:\n derivative = self.feature_derivative(errors, weights, l2)\n weights = weights + derivative * (step_size) \n \n #check if converged\n #todo\n \"\"\"\n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = self.compute_log_likelihood(indicators,weights)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(n_iters))), itr, lp)\n \"\"\"\n \n #check weights\n #print \"\\n\"\n #print \"The weights for features: \", weights\n #final prediction\n preds = self.prediction(x, weights)\n return preds, weights", "def least_squares_gradient(y, tx, w): \n e = y - tx.dot(w)\n grad = -tx.T.dot(e) / len(e)\n return grad, e", "def gradient(poly):\n return differential(\n poly, chaospy.poly.collection.basis(1, 1, poly.dim, sort=\"GR\"))", "def compute_gradient(y, tx, w):\n\tN = y.shape[0]\n\te = y - np.dot(tx, w)\n\n\tgradLw = -1/N * np.dot(tx.T, e)\n\treturn gradLw", "def simple_gradient(x, y, theta):\n if x.shape[0] * y.shape[0] * theta.shape[0] == 0:\n return None\n if x.shape[0] != y.shape[0] or theta.shape[0] != 2:\n return None\n\n x = add_intercept(x)\n\n result = [\n forumla(x, y, theta, 0),\n forumla(x, y, theta, 1)\n ]\n return result", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def gradient_descend(self, X, y, state_generators, operator_programs=None,\r\n qvm=None):\r\n history_theta, history_loss, history_grad = [], [], []\r\n coeff, theta = 1.0, self.initial_theta\r\n \r\n prog_input_gen = state_generators['input']\r\n prog_output_gen = state_generators['output']\r\n prog_output_grad = state_generators['grad']\r\n \r\n n_samples = len(X)\r\n n_theta = len(theta)\r\n \r\n if qvm is None:\r\n self.qvm = api.QVMConnection()\r\n else:\r\n self.qvm = qvm\r\n \r\n # Check operators\r\n if not isinstance(operator_programs, list):\r\n operator_programs = [operator_programs]\r\n n_operators = len(operator_programs)\r\n \r\n # Check batch size\r\n if self.batch_size is None:\r\n self.batch_size = n_samples\r\n self.batch_size = min(self.batch_size, n_samples)\r\n \r\n # Loop over epochs\r\n for e in range(self.epochs): \r\n \r\n # Loop over batches\r\n batches = self.generate_batches(X, y, self.batch_size)\r\n n_batches = len(batches)\r\n for i, batch in enumerate(batches):\r\n \r\n batch_X, batch_y = batch\r\n n_samples_in_batch = len(batch_X)\r\n \r\n # Predictions\r\n batch_y_pred = np.zeros((n_samples_in_batch, n_operators))\r\n for k in range(n_samples_in_batch):\r\n prog = prog_input_gen(batch_X[k,:])\r\n prog += prog_output_gen(theta)\r\n batch_y_pred[k,:] = coeff * np.array(qvm.expectation(prog, operator_programs))\r\n if self.loss == self.loss_entropy:\r\n batch_y_pred[k,:] = np.exp(batch_y_pred[k,:]) / np.sum(np.exp(batch_y_pred[k,:]))\r\n \r\n # Comput loss\r\n loss_value = self._compute_loss(batch_y, batch_y_pred)\r\n \r\n # Display status\r\n if self.verbose:\r\n print('Epoch: {}/{} ::: Batch: {}/{} ::: Loss: {:.5f}'.format(e+1, self.epochs, i+1, n_batches, loss_value)) \r\n \r\n # Gradient\r\n if not (e == self.epochs - 1 and i == n_batches - 1):\r\n grad = np.zeros((n_samples_in_batch, n_operators, n_theta))\r\n for k in range(n_samples_in_batch):\r\n \r\n # Define input state \r\n prog_input = prog_input_gen(batch_X[k,:])\r\n \r\n # Caclulate gradient for each theta_j\r\n for j in range(n_theta):\r\n \r\n # Gradient +/- \r\n for sign in [1,-1]:\r\n grad_sign = np.zeros(n_operators)\r\n grad_progs = prog_output_grad(theta, j, sign)\r\n # Generally, the gradient programs could return\r\n # a program or list of programs (in case the \r\n # gradient +/- is the sum of expectations)\r\n if not isinstance(grad_progs, list):\r\n grad_progs = [grad_progs]\r\n for grad_prog in grad_progs:\r\n prog = prog_input\r\n prog += grad_prog\r\n # B_j +/- expectation\r\n grad_sign += np.array(qvm.expectation(prog, operator_programs))\r\n # Gradient = (B_j+ - B_j-) / 2\r\n grad[k, :, j] += sign / 2.0 * grad_sign\r\n \r\n # Gradient update\r\n grad_full = self._compute_grad_full(batch_y, batch_y_pred, grad)\r\n if self.loss == self.loss_mse:\r\n grad_full_coeff = -2.0 * np.mean((batch_y - batch_y_pred) * batch_y_pred)\r\n \r\n # Update theta\r\n theta -= self.learning_rate * grad_full\r\n if self.loss == self.loss_mse:\r\n coeff -= self.learning_rate * grad_full_coeff\r\n \r\n # Append to history\r\n history_loss.append(loss_value)\r\n history_theta.append(theta)\r\n history_grad.append(grad)\r\n \r\n # Prepare results\r\n results = OptResults()\r\n results.theta, results.coeff = theta, coeff\r\n results.loss = loss_value\r\n results.history_loss = history_loss\r\n results.history_theta = history_theta\r\n results.history_grad = history_grad\r\n \r\n return results", "def compute_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def compute_reg_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C) + self.lambda_reg * self.w # add regularization term", "def gradientDescent(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n m = y.size # number of training examples\n\n for i in range(num_iters):\n # ====================== YOUR CODE HERE ======================\n # Instructions: Perform a single gradient step on the parameter vector\n # theta.\n #\n # Hint: While debugging, it can be useful to print out the values\n # of the cost function (computeCost) and gradient here.\n #\n # Calculate the gradient step according to the equation for theta1:\n g_step1 = (alpha / m * np.sum( (np.dot(X,theta) - y) * X[:,1]) )\n # Gradient step for theta knot:\n g_step0 = (alpha / m * np.sum( (np.dot(X,theta) - y) ) )\n \n #update theta\n theta[0] = (theta[0] - g_step0)\n theta[1] = (theta[1] - g_step1)\n \n #print([theta , g_step1, g_step0])\n\n # ============================================================\n\n # Save the cost J in every iteration\n J_history.append(computeCost(X, y, theta))\n\n return theta, J_history", "def compute_cost_gradient2(x, y0, W, V, U, b0, b1, b2):\n # compute cost\n A1 = x @ W + b0\n A2 = x @ V + b1\n z0 = sigmoid(A1)\n z1 = sigmoid(A2)\n z = np.array([z0, z1]).T\n A3 = z @ U + b2\n y = sigmoid(A3)\n if y0 is None:\n return y\n cost = np.sum((y - y0) ** 2)\n # compute gradient\n dy = 2 * (y - y0)\n dA3 = dy * (y * (1 - y))\n dz0 = dA3 * U[0]\n dz1 = dA3 * U[1]\n dA1 = dz0 * (z0 * (1 - z0))\n dA2 = dz1 * (z1 * (1 - z1))\n dW = x.T @ dA1\n dV = x.T @ dA2\n dU = z.T @ dA3\n db0 = np.sum(dA1)\n db1 = np.sum(dA2)\n db2 = np.sum(dA3)\n return cost, dW, dV, dU, db0, db1, db2", "def gradient_bias(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the bias\n # The following line is just a placeholder\n return np.subtract(np.dot(np.transpose(predict(X, model)), np.ones(len(Y))), np.dot(np.transpose(len(Y)), np.ones(10))) #np.zeros(Y.shape[1])", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n x, y = [node.output_value for node in self.input_nodes]\n\n dx = backend.dot(grad, backend.transpose(y))\n dy = backend.dot(backend.transpose(x), grad)\n\n return [dx, dy]", "def gradient_descent(X, Y, epsilon=1e-6, l=1, step_size=1e-4, max_steps=1000):\n beta = np.zeros(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient", "def backward_gradient(\n self, input: np.ndarray, head_gradients: Dict[str, np.ndarray]\n ) -> np.ndarray:\n raise NotImplementedError", "def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))", "def backward(self, gradient):\n raise NotImplementedError()", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def gradient(self):\n functional = self\n\n class KLGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n The gradient is not defined in points where one or more\n components are non-positive.\n \"\"\"\n if functional.prior is None:\n return (-1.0) / x + 1\n else:\n return (-functional.prior) / x + 1\n\n return KLGradient()", "def gradient_descent(X, Y, iterations, alpha, l = 0):\n \n # initialize B0, B1, ..., Bp\n betas = np.array([0.0]*(len(X[0])+1))\n \n # initialize list of cost vs iterations; should see a gradual descent\n costs = np.array([0.0]*iterations)\n \n # number of observations\n m = len(X)\n \n for i in range(iterations):\n sumterms = 1.0/m * ([estimation(xvec,betas) for xvec in X] - Y)\n errors = np.array([0.0]*len(betas))\n errors[0] = sum(sumterms) # error term for B0 has no multiplier\n for k in range(1,len(betas)):\n errors[k] = np.dot(sumterms, [row[k-1] for row in X]) + l/m*betas[k]\n \n betas = betas - alpha * errors\n costs[i] = cost(X, Y, betas, l)\n \n return betas, costs", "def backward(self, gradient: Tensor) -> Tensor:\n self.b_grad = np.sum(gradient, axis=0)\n self.w_grad = self.inputs.T @ gradient\n return gradient @ self.w.T", "def gradient_step(Y, X, lmbd, L, current_W):\n XT = X.T\n gradient = np.dot(XT.dot(X),current_W) - XT.dot(Y)\n C = current_W - (1/L) * gradient\n U, s, V = np.linalg.svd(C)\n\n s = s - lmbd/L\n final_s = np.maximum(s,0)\n U_dim = U.shape[1]\n V_dim = V.shape[0]\n S = np.zeros((U_dim, V_dim))\n S_rank = min(U_dim, V_dim)\n S[0:S_rank,0:S_rank] = np.diag(final_s)\n\n output = np.dot(U, np.dot(S, V))\n\n return output", "def curves_gradient(theta, m_, LAMBDA=0):\n # X0: column of ones\n theta0_gradient = (1 / m_) * sum((curves_hypothesis(theta, m_) - y[:m_].ravel()) * np.ones(m_))\n # X1: data given as X\n theta1_gradient = ((1 / m_) * sum((curves_hypothesis(theta, m_) - y[:m_].ravel()) * X[:m_].ravel())) \\\n + (LAMBDA / m_) * theta[1]\n return np.array([theta0_gradient, theta1_gradient])", "def gradient(img):\n G = np.zeros(img.shape)\n theta = np.zeros(img.shape)\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n Gx = partial_x(img)\n Gy = partial_y(img)\n G = np.sqrt(np.square(Gx) + np.square(Gy))\n theta = np.degrees(np.arctan2(Gy, Gx)) % 360\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return G, theta", "def gradient(C, w, b):\n return list(partial_deriv(C, b)) + list(partial_deriv(C, wi) for wi in w)", "def get_gradient(self, y, x, weight):\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)", "def compute_gradient(c, x, y):\n\n vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])\n rows, cols = c.shape\n\n result = np.empty_like(x)\n\n for i in nb.prange(rows):\n for j in nb.prange(cols):\n c_remainder = c[i, j] % 4\n gradient_co = vectors[c_remainder]\n result[i, j] = gradient_co[0] * x[i, j] + gradient_co[1] * y[i, j]\n\n return result", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def compute_gradient_logreg(y, tx, w):\n assert len(set(y).difference({0., 1.})) == 0, \"Class labels must be encoded as {0, 1}\"\n\n s = sigmoid(tx.dot(w)) - y\n grad = tx.T.dot(s)\n\n return grad", "def EvaluateGradient(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def l2_reg_gradient_descent(Y, weights, cache, alpha, lambtha, L):\n m = Y.shape[1]\n for i in range(L - 1, -1, -1):\n curr = str(i + 1)\n prev = str(i)\n if i == L - 1:\n dz = cache['A' + str(L)] - Y\n else:\n dz = da * (1 - cache['A' + curr] ** 2)\n Ap = cache['A' + prev]\n dw = (np.matmul(dz, Ap.T) + lambtha * weights['W' + curr]) / m\n db = np.sum(dz, axis=1, keepdims=True) / m\n da = np.matmul(weights['W' + curr].T, dz)\n weights['W' + curr] = weights['W' + curr] - alpha * dw\n weights['b' + curr] = weights['b' + curr] - alpha * db", "def gradient(self, x, Y):\n if self.is_sparse:\n x = x.todense()\n Y = Y.todense()\n assert(len(shape(x))==1)\n assert(len(shape(Y))==2)\n assert(len(x)==shape(Y)[1])\n \n x_2d=reshape(x, (1, len(x)))\n k = self.kernel(x_2d, Y)\n differences = Y - x\n G = (1.0 / self.width ** 2) * (k.T * differences)\n return G", "def calc_gradient(self):\n \n self.setup()\n\n # Create our 2D dictionary the first time we execute.\n if not self.gradient:\n for name in self.param_names:\n self.gradient[name] = {}\n \n # Pull initial state and stepsizes from driver's parameters\n base_param = OrderedDict()\n stepsize = {}\n for key, item in self._parent.get_parameters().iteritems():\n base_param[key] = item.evaluate()\n \n if item.fd_step:\n stepsize[key] = item.fd_step\n else:\n stepsize[key] = self.default_stepsize\n\n # For Forward or Backward diff, we want to save the baseline\n # objective and constraints. These are also needed for the\n # on-diagonal Hessian terms, so we will save them in the class\n # later.\n base_data = self._run_point(base_param)\n \n # Set up problem based on Finite Difference type\n if self.form == 'central':\n deltas = [1, -1]\n func = diff_1st_central\n elif self.form == 'forward':\n deltas = [1, 0]\n func = diff_1st_fwrdbwrd\n else:\n deltas = [0, -1]\n func = diff_1st_fwrdbwrd\n\n self.gradient_case = OrderedDict()\n\n # Assemble input data\n for param in self.param_names:\n \n pcase = []\n for j_step, delta in enumerate(deltas):\n \n case = base_param.copy()\n case[param] += delta*stepsize[param]\n pcase.append({ 'param': case })\n \n self.gradient_case[param] = pcase\n \n # Run all \"cases\".\n # TODO - Integrate OpenMDAO's concurrent processing capability once it\n # is formalized. This operation is inherently paralellizable.\n for key, case in self.gradient_case.iteritems():\n for ipcase, pcase in enumerate(case):\n if deltas[ipcase]:\n pcase['data'] = self._run_point(pcase['param'])\n else:\n pcase['data'] = base_data\n \n \n # Calculate gradients\n for key, case in self.gradient_case.iteritems():\n \n eps = stepsize[key]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.gradient[key][name] = \\\n func(case[0]['data'][name],\n case[1]['data'][name], eps)\n\n # Save these for Hessian calculation\n self.base_param = base_param\n self.base_data = base_data", "def compute_gradient(y, tx, w, method=\"mse\"):\n err = y - tx.dot(w)\n if method.lower() == \"mse\":\n grad = -tx.T.dot(err) / len(err)\n elif method.lower() == \"mae\":\n sign = (err < 0)*(-1)+(err >= 0)*1\n sign = np.reshape(sign, (-1, 1))\n grad = (np.sum(tx*sign, axis=0)*(-1)/len(err))[:, np.newaxis]\n else:\n return NotImplementedError\n return grad", "def gradient(self):\n functional = self\n\n class KLCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in points where one or more\n components are larger than or equal to one.\n \"\"\"\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)\n\n return KLCCGradient()", "def gradient(self, theta):\n return (1 / (self.sigma * np.sqrt(2 * np.pi))) * (\n -theta / (self.sigma ** 2) * np.exp(-(theta ** 2) / (2 * self.sigma ** 2))\n )", "def gradient(self,x=None,y=None,save=True):\n\n\t\tif (x is not None) and (y is not None):\n\n\t\t\tassert x.shape==y.shape,\"x and y must have the same shape!\"\n\n\t\t\t#x coordinates\n\t\t\tif type(x)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert x.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\tj = np.mod(((x / self.resolution).decompose().value).astype(np.int32),self.data.shape[1])\n\n\t\t\telse:\n\n\t\t\t\tj = np.mod((x / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[1])\t\n\n\t\t\t#y coordinates\n\t\t\tif type(y)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert y.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\ti = np.mod(((y / self.resolution).decompose().value).astype(np.int32),self.data.shape[0])\n\n\t\t\telse:\n\n\t\t\t\ti = np.mod((y / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[0])\n\n\t\telse:\n\t\t\ti = None\n\t\t\tj = None\n\t\t\n\t\t#Call the C backend\n\t\tgradient_x,gradient_y = _topology.gradient(self.data,j,i)\n\n\t\t#Return the gradients\n\t\tif (x is not None) and (y is not None):\n\n\t\t\treturn gradient_x.reshape(x.shape),gradient_y.reshape(x.shape)\n\n\t\telse:\n\t\t\n\t\t\tif save:\n\t\t\t\tself.gradient_x = gradient_x\n\t\t\t\tself.gradient_y = gradient_y\n\t\t\n\t\t\treturn gradient_x,gradient_y", "def gradient(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n dC = self.dC_func(u, order=1)\n P = self.P\n numerator = np.sum((C - P) * dC, axis=0)\n denominator = np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))\n if np.abs(denominator) > 0:\n gradient = numerator/denominator\n else:\n gradient = np.asarray(0)[np.newaxis]\n return gradient", "def gradient(series, **options):\n x = series.index\n y = series.values\n\n a = np.gradient(y, x, **options)\n return series.__class__(a, series.index)", "def gradient(self, theta):\n return np.zeros([theta.shape[0]])" ]
[ "0.73058945", "0.7201665", "0.7106998", "0.70931894", "0.70859104", "0.70620006", "0.7045631", "0.6992574", "0.6935396", "0.67486787", "0.67435336", "0.67086285", "0.67086285", "0.6706915", "0.67043006", "0.6637536", "0.6625402", "0.6624418", "0.6577811", "0.65765494", "0.65734255", "0.6572734", "0.65217596", "0.6521416", "0.6520855", "0.6500086", "0.64775133", "0.64682776", "0.644909", "0.644853", "0.64437807", "0.64411217", "0.6433613", "0.6418188", "0.63980496", "0.6395193", "0.6387407", "0.6383033", "0.6372345", "0.6368244", "0.63659585", "0.6362896", "0.636274", "0.63549143", "0.6352596", "0.63480216", "0.6347323", "0.63442755", "0.63420695", "0.6341872", "0.63220716", "0.631348", "0.6306848", "0.63021266", "0.6293564", "0.6288158", "0.627926", "0.6263364", "0.6253316", "0.6251071", "0.6248145", "0.62454414", "0.6245411", "0.6236361", "0.6224641", "0.6219898", "0.6219254", "0.62150264", "0.6205408", "0.6202563", "0.6194188", "0.61845934", "0.61838907", "0.6182986", "0.6179899", "0.6179741", "0.6178906", "0.61709815", "0.617031", "0.6157307", "0.6151675", "0.6151252", "0.61483294", "0.61381745", "0.6134565", "0.61343014", "0.61269975", "0.612324", "0.61199945", "0.61196333", "0.6119614", "0.6117501", "0.6109179", "0.61081743", "0.6108105", "0.61055404", "0.60998964", "0.60949224", "0.6092434", "0.6089852", "0.6077114" ]
0.0
-1
Returns truncated iterated logarithm y = log( log(x) ) where if x<delta, x = delta and if 1delta < x, x = 1delta.
def exp_integral(x): gamma = 0.577215665 return (-gamma - expn(x,1) - np.log(x))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ilog(x,delta):\n if(delta < x and x < 1.0 - delta):\n return np.log( -np.log(x) )\n elif(x < delta):\n return np.log( -np.log(delta) )\n else: \n return np.log( -np.log(1.0 - delta) )", "def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))", "def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))", "def diff_log(x):\n \n return np.diff(np.log(x)),np.log(x)[0]", "def diff_log(x):\n\n return np.diff(np.log(x)),np.log(x)[0]", "def log_transform(x, epsilon = 1e-4):\n if x.min() < 0: epsilon += np.abs(x.min())\n return (x.fillna(0).astype(float) + epsilon).apply(np.log)", "def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def logarithm(x, eps=10e-5):\n if abs(x) >= 1:\n return float('Nan')\n\n pre_x = x\n tmp = x ** 2\n sign = -1\n i = 2\n res_x = pre_x + sign * tmp / i\n\n while abs(res_x - pre_x) > eps:\n sign = -sign\n i += 1\n tmp *= x\n pre_x = res_x\n res_x += sign * tmp / i\n\n return res_x", "def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def lognormalize(x, temp = 1):\n if type(x) is list: x = np.array(x)\n\n x = x - np.max(x)\n # anneal\n xp = np.power(np.exp(x), temp)\n return xp / xp.sum()", "def ln(x):\n return log(x, const.e)", "def log(x, base=math.e):\n return 0.0", "def log10_inplace(a):", "def safe_log(x):\n safe_x = jnp.where(x > 0.0, x, jnp.ones_like(x))\n return jnp.where(x > 0.0, jnp.log(safe_x), jnp.zeros_like(x))", "def logaddexp(X, Y):\n XY_max = T.maximum(X, Y)\n XY_min = T.minimum(X, Y)\n return XY_max + T.log1p(T.exp(XY_min - XY_max))", "def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)", "def smart_log(self, value: float) -> float:\n if value > 0:\n return math.log(value, self.log_scale)\n elif value == 0:\n return 0\n elif value < 0:\n return -(math.log(abs(value), self.log_scale))", "def log(self, base):\n\n\t\tvalues = map(lambda x: x > 0, self.val)\n\t\tif not all(values):\n\t\t\traise ValueError(\"Non-positive number encountered in log.\")\n\t\telse:\n\t\t\tval = np.array([np.math.log(v, base) for v in self.val])\n\t\t\tif len(self.der.shape):\n\t\t\t\tto_multiply = 1 / np.multiply(np.log(base), self.val)\n\t\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\t\tder = np.multiply(to_multiply, self.der)\n\t\t\telse:\n\t\t\t\tder = None\n\t\treturn Var(val, der)", "def lg(x: Union[int, float]) -> float:\n res = 0.0\n try:\n res = log(x, 2)\n except ValueError:\n pass\n return res", "def log10(x):\n return 0.0", "def _signed_log(x, base):\n return numpy.sign(x) * numpy.log10(numpy.abs(x)) / numpy.log10(base)", "def log2(x: float) -> float:\n return math.log2(x) if x > 0 else 0", "def log_inplace(a):", "def _loglike(self, y, f):\n ll = y * tf.log(pos(f)) + (1 - y) * tf.log(pos(1 - f))\n return ll", "def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))", "def logtrapz(logy, x=None, dx=1.0):\n n_intvls = logy.shape[0]-1\n loghalf = log(.5)\n if x is not None:\n logdel = x[1:] - x[0:-1]\n else:\n logdel = ones(n_intvls)*dx\n logdel = log(logdel)\n lo = logy[0] + loghalf + logdel[0]\n hi = logy[-1] + loghalf + logdel[-1]\n lsum = logaddexp(lo, hi)\n for i in xrange(1,n_intvls):\n lsum = logaddexp(lsum, logy[i] + logdel[i])\n return lsum", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def _logsumexp(x):\n # Search maximum.\n max_x = None\n length = len(x)\n for i in range(length):\n if max_x is None or x[i] > max_x:\n max_x = x[i]\n\n # Calculate sum of exponential differences.\n sum_exp = 0\n for i in range(length):\n diff = x[i] - max_x\n sum_exp += np.exp(diff)\n\n log_sum_exp = max_x + np.log(sum_exp)\n\n return log_sum_exp", "def log2_inplace(a):", "def log(base, real):\n return math.log(real, base)", "def log(tensor, base=np.e):\n if base == np.e:\n return _elementary_op(tensor, np.log, lambda x: 1 / x)\n return log(tensor) / log(base)", "def log_sum_exp(x):\n log_reduce_sum = P.ReduceSum()\n log = P.Log()\n exp = P.Exp()\n x_max = max(x.data)\n return log(log_reduce_sum(exp(x - x_max), 1)) + x_max", "def log1p(x):\n return 0.0", "def log10(a):", "def _log_add(*values):\n x = max(values)\n if x > -np.inf:\n sum_diffs = 0\n for value in values:\n sum_diffs += 2 ** (value - x)\n return x + np.log2(sum_diffs)\n else:\n return x", "def wrap_log(to_wrap):\r\n with np.errstate(divide='ignore'):\r\n result = np.log(to_wrap)\r\n return result", "def normalize_log_likelihoods(X):\n h, w = np.shape(X)\n return X - np.tile(logsumexp(X, axis=0), (h, 1))\n # return X - np.matlib.repmat(logsumexp(X, axis=0), h, 1)", "def logbasechange(a,b):\n return np.log(b)/np.log(a)", "def log(x, eps=1e-7, name=None):\n return tf.log(x + eps, name=name)", "def log1mexp(x: torch.Tensor) -> torch.Tensor:\n mask = (x < _log05).to(x.dtype)\n impl1 = torch.log1p(-torch.exp(x))\n impl2 = torch.log(-torch.expm1(x))\n return impl1 * mask + impl2 * (1 - mask)", "def log_shift(data):\n result = [np.log(1 + np.abs(d.copy())) for d in data]\n return result", "def linlogspace(start, stop, base=0.5, **kwargs) -> np.ndarray:\n p = (1 - np.logspace(0, 1, base=base, **kwargs)) * (1 / (1 - base))\n return (1 - p) * start + p * stop", "def log2(x):\n return math.log(x) / math.log(2)", "def started_log(X,params):\n \n offset = params['offset']\n if not is_number(params['offset']):\n offset=1.\n X = np.log(X+offset)\n return X", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def log_add(x, y):\n maximum = np.maximum(x,y)\n minimum = np.minimum(x,y)\n if(np.abs(maximum - minimum) > 30):\n # the difference is too small, return the just the maximum\n return maximum\n return maximum + np.log1p(np.exp(minimum - maximum))", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log2(x):\n raise NotImplementedError", "def log_norm(log_x):\n c = np.max(log_x)\n\n if np.isinf(c):\n return c\n\n sum_exp = 0\n\n for x in log_x:\n sum_exp += np.exp(x - c)\n\n log_sum_exp = np.log(sum_exp)\n\n log_Z = log_sum_exp + c\n\n return log_Z", "def log_poisson(k, l):\n return k*np.log(l) -l - gammaln(k+1)", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def unifLogOne(self, x=np.array([]), low=0., hi=100.):\n \n const = 1.0/low**2. - 1.0/hi**2.\n lnPrior = np.log(const) - np.log(x)\n bOut = (x <= low) | (x > hi)\n lnPrior[bOut] = -np.inf\n\n return lnPrior", "def trunc_gumbel(logits, truncation):\n gumbels = np.random.gumbel(np.zeros_like(logits)) + logits\n return -np.log(np.exp(-gumbels) + np.exp(-truncation))", "def logarithmic():\n return Equivalency(\n [(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],\n \"logarithmic\",\n )", "def log10(tensor):\n return log(tensor, base=10)", "def dlogdp(self):\n return np.log10(self.bins[:, -1]) - np.log10(self.bins[:, 0])", "def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)", "def logspace_from_lin(start, stop, num=50):\n unit = unit_of(start)\n start_ = np.log2(to_unitless(start, unit))\n stop_ = np.log2(to_unitless(stop, unit))\n return np.exp2(np.linspace(start_, stop_, num)) * unit", "def logp(self, x):\n pass", "def Log(num):\n return math.log(float(num))", "def log_trans(vec):\n m = vec[vec != 0]\n c = int(np.log(min(m)))\n d = 10 ^ c\n return np.log(vec + d) - c", "def _log_add(logx: float, logy: float) -> float:\n a, b = min(logx, logy), max(logx, logy)\n if a == -np.inf: # adding 0\n return b\n # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)\n return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)", "def _call(self, x):\n if self.prior is None:\n tmp = ((x - 1 - np.log(x)).inner(self.domain.one()))\n else:\n # This is the old line from odl version 0.6.0.\n # tmp = ((x - self.prior + self.prior * np.log(self.prior / x))\n tmp = ((x - self.prior + self.prior * np.log((self.prior + 1e-12) / x))\n .inner(self.domain.one()))\n if np.isnan(tmp):\n # In this case, some element was less than or equal to zero\n return np.inf\n else:\n return tmp", "def log_target(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_parameters(x) + self.log_prior_wilson_coeffs(x)", "def weight_log(val):\n return val * math.log(val)", "def _log_sub(logx: float, logy: float) -> float:\n if logx < logy:\n raise ValueError(\"The result of subtraction must be non-negative.\")\n if logy == -np.inf: # subtracting 0\n return logx\n if logx == logy:\n return -np.inf # 0 is represented as -np.inf in the log space.\n\n try:\n # Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).\n return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1\n except OverflowError:\n return logx", "def logsumexp_trick(sum_term):\n max_term = np.max(sum_term)\n return max_term + np.log(np.sum(np.exp(sum_term-max_term)))", "def log_cust(x):\n if type(x) != str:\n if x < 0:\n return 0\n elif x == 0:\n return 0\n elif x > 0:\n return np.log(x)", "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))", "def _loglike(self, y, f):\n ll = -0.5 * (tf.log(2 * self.variance * np.pi) +\n (y - f)**2 / self.variance)\n return ll", "def log_prob_from_logits(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x-m), axis, keep_dims=True))", "def log_i0(x):\n return pt.switch(\n pt.lt(x, 5),\n pt.log1p(\n x**2.0 / 4.0\n + x**4.0 / 64.0\n + x**6.0 / 2304.0\n + x**8.0 / 147456.0\n + x**10.0 / 14745600.0\n + x**12.0 / 2123366400.0\n ),\n x\n - 0.5 * pt.log(2.0 * np.pi * x)\n + pt.log1p(\n 1.0 / (8.0 * x)\n + 9.0 / (128.0 * x**2.0)\n + 225.0 / (3072.0 * x**3.0)\n + 11025.0 / (98304.0 * x**4.0)\n ),\n )", "def logpowerlaw(x, p=default()):\n xtr, ytr, gradtr = logcontinuity(p)\n power = p[3]\n x0 = xtr - power/gradtr\n b = ytr - power*np.log(xtr-x0)\n return b + power*np.log(x-x0)", "def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)", "def _loglike(self, y, f):\n bincoef = tf.lgamma(self.n + 1) - tf.lgamma(y + 1) \\\n - tf.lgamma(self.n - y + 1)\n ll = bincoef + y * tf.log(pos(f)) + (self.n - y) * tf.log(pos(1 - f))\n return ll", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))", "def mpf_log(x, prec, rnd=round_fast):\n sign, man, exp, bc = x\n #------------------------------------------------------------------\n # Handle special values\n if not man:\n if x == fzero: return fninf\n if x == finf: return finf\n if x == fnan: return fnan\n if sign:\n raise ComplexResult(\"logarithm of a negative number\")\n wp = prec + 20\n #------------------------------------------------------------------\n # Handle log(2^n) = log(n)*2.\n # Here we catch the only possible exact value, log(1) = 0\n if man == 1:\n if not exp:\n return fzero\n return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)\n mag = exp+bc\n abs_mag = abs(mag)\n #------------------------------------------------------------------\n # Handle x = 1+eps, where log(x) ~ x. We need to check for\n # cancellation when moving to fixed-point math and compensate\n # by increasing the precision. Note that abs_mag in (0, 1) <=>\n # 0.5 < x < 2 and x != 1\n if abs_mag <= 1:\n # Calculate t = x-1 to measure distance from 1 in bits\n tsign = 1-abs_mag\n if tsign:\n tman = (MPZ_ONE<<bc) - man\n else:\n tman = man - (MPZ_ONE<<(bc-1))\n tbc = bitcount(tman)\n cancellation = bc - tbc\n if cancellation > wp:\n t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')\n return mpf_perturb(t, tsign, prec, rnd)\n else:\n wp += cancellation\n # TODO: if close enough to 1, we could use Taylor series\n # even in the AGM precision range, since the Taylor series\n # converges rapidly\n #------------------------------------------------------------------\n # Another special case:\n # n*log(2) is a good enough approximation\n if abs_mag > 10000:\n if bitcount(abs_mag) > wp:\n return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)\n #------------------------------------------------------------------\n # General case.\n # Perform argument reduction using log(x) = log(x*2^n) - n*log(2):\n # If we are in the Taylor precision range, choose magnitude 0 or 1.\n # If we are in the AGM precision range, choose magnitude -m for\n # some large m; benchmarking on one machine showed m = prec/20 to be\n # optimal between 1000 and 100,000 digits.\n if wp <= LOG_TAYLOR_PREC:\n m = log_taylor_cached(lshift(man, wp-bc), wp)\n if mag:\n m += mag*ln2_fixed(wp)\n else:\n optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO\n n = optimal_mag - mag\n x = mpf_shift(x, n)\n wp += (-optimal_mag)\n m = -log_agm(to_fixed(x, wp), wp)\n m -= n*ln2_fixed(wp)\n return from_man_exp(m, -wp, prec, rnd)", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def log1p(x):\n return Log1p().apply((x,))[0]", "def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood", "def logrels(rets):\n return np.log(rets + 1)", "def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)", "def _get_log_energy(strided_input, epsilon, energy_floor):\n log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)\n if energy_floor == 0.0:\n return log_energy\n else:\n return torch.max(log_energy,\n torch.tensor(math.log(energy_floor), dtype=torch.get_default_dtype()))", "def _log_util(chips: float,\n bet_size: float,\n payout: float) -> float:\n if chips <= 0 or chips + payout*bet_size <= 0:\n return MIN_REWARD\n return max(math.log(1.0 + chips + payout*bet_size) - math.log(1.0 + chips),\n MIN_REWARD)", "def logadd(logx, logy):\n\n if logy > logx:\n logx, logy = logy, logx\n\n if logx == -float(\"inf\"):\n return logx\n\n diff = logy - logx\n if diff < -53: # does not make a difference at least in python 2.7.6\n return logx\n\n return logx + log2(1.0 + 2**diff)", "def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll", "def _convert_normlogprice(self, series):\n try:\n return np.log(series.div(series[0]))\n except:\n raise TypeError('ERROR: Could not transform prices to log function. Check price history data.')", "def log2(a):", "def _log_posterior_x(self, X):\r\n LL = self.log_likelihood(X=X)\r\n LP = self._log_prior_x(X)\r\n return LL + LP", "def log_prob_easier(self, x):\n normalization_const = -0.5 * tf.math.log(2 * np.pi) - tf.math.log(self.stddev)\n sq_term = - 0.5 * ((x - self.mean) / self.stddev) ** 2\n l_prob = tf.math.reduce_sum(normalization_const + sq_term, axis=1)\n return l_prob", "def log(x, err=defaultError):\n if (defaultError >= err) or isinstance(err, AbsoluteError):\n if isinstance(err, RelativeError):\n _err = real.RelativeError(0, err.relativeerrorrange, 2)\n elif isinstance(err, AbsoluteError):\n _err = real.AbsoluteError(0, err.absoluteerrorrange, 2)\n if x in real.theRealField:\n x = +x\n if x > 0:\n return real.log(x, err=_err)\n elif x < 0:\n return Complex(real.log(abs(x), _err), real.pi(_err))\n return Complex(real.log(abs(x), err=_err), real.atan2(x.imag, x.real, _err))\n else:\n return Complex(cmath.log(complex(x.real, x.imag)))", "def log1p_exp(x):\n x_ = x * x.ge(0).to(torch.float32)\n res = x_ + torch.log1p(torch.exp(-torch.abs(x)))\n return res", "def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx", "def log_sum_exp(self, x):\n b = numpy.max(x[(x<sys.maxsize)]) # ignore inf values\n\n s = b + numpy.log(numpy.sum(numpy.exp(x-b)))\n\n return s", "def log2_python(x):\n\n if (x == 0):\n return -float(\"inf\")\n\n return x.bit_length() - 1", "def logtomo(self, psi):\n return -1j / self.wavenumber() * self.mlog(psi) / self.voxelsize", "def log10(data):\n return _make.log10(data)", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True" ]
[ "0.8148231", "0.7274531", "0.7241603", "0.7214562", "0.7204403", "0.7167102", "0.71177113", "0.6768097", "0.669359", "0.6662506", "0.66573983", "0.66425556", "0.66316724", "0.66110545", "0.6557691", "0.6542005", "0.6526384", "0.6520332", "0.6498638", "0.6453334", "0.6449421", "0.6434719", "0.6430832", "0.64077026", "0.6386738", "0.635827", "0.635643", "0.63361716", "0.6333567", "0.6330683", "0.6304013", "0.6294575", "0.6284556", "0.6269603", "0.6267071", "0.6251614", "0.6247318", "0.62352294", "0.6228356", "0.6223079", "0.62221414", "0.62195015", "0.6214859", "0.6213524", "0.6211597", "0.6211515", "0.6183768", "0.61830586", "0.6170116", "0.6158252", "0.612324", "0.61180997", "0.61180997", "0.6112702", "0.6102069", "0.6098807", "0.60960054", "0.60910416", "0.6089245", "0.6084294", "0.60739446", "0.60730547", "0.60690427", "0.6063732", "0.60542005", "0.6053674", "0.60532", "0.60530865", "0.60508484", "0.60427654", "0.6029681", "0.6027868", "0.6018275", "0.6012872", "0.60109174", "0.59937114", "0.59768736", "0.59759796", "0.5965413", "0.59545267", "0.59461015", "0.5937944", "0.59355706", "0.5935002", "0.59346116", "0.5916596", "0.5915761", "0.5914281", "0.59130234", "0.59122056", "0.59075963", "0.5904533", "0.58937854", "0.5889349", "0.58891493", "0.58885264", "0.5887892", "0.58878386", "0.5885412", "0.5882827", "0.58817595" ]
0.0
-1
Returns truncated iterated logarithm y = log( log(x) ) where if x<delta, x = delta and if 1delta < x, x = 1delta.
def ilog(x,delta): if(delta < x and x < 1.0 - delta): return np.log( -np.log(x) ) elif(x < delta): return np.log( -np.log(delta) ) else: return np.log( -np.log(1.0 - delta) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))", "def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))", "def diff_log(x):\n \n return np.diff(np.log(x)),np.log(x)[0]", "def diff_log(x):\n\n return np.diff(np.log(x)),np.log(x)[0]", "def log_transform(x, epsilon = 1e-4):\n if x.min() < 0: epsilon += np.abs(x.min())\n return (x.fillna(0).astype(float) + epsilon).apply(np.log)", "def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def logarithm(x, eps=10e-5):\n if abs(x) >= 1:\n return float('Nan')\n\n pre_x = x\n tmp = x ** 2\n sign = -1\n i = 2\n res_x = pre_x + sign * tmp / i\n\n while abs(res_x - pre_x) > eps:\n sign = -sign\n i += 1\n tmp *= x\n pre_x = res_x\n res_x += sign * tmp / i\n\n return res_x", "def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def lognormalize(x, temp = 1):\n if type(x) is list: x = np.array(x)\n\n x = x - np.max(x)\n # anneal\n xp = np.power(np.exp(x), temp)\n return xp / xp.sum()", "def ln(x):\n return log(x, const.e)", "def log(x, base=math.e):\n return 0.0", "def log10_inplace(a):", "def safe_log(x):\n safe_x = jnp.where(x > 0.0, x, jnp.ones_like(x))\n return jnp.where(x > 0.0, jnp.log(safe_x), jnp.zeros_like(x))", "def logaddexp(X, Y):\n XY_max = T.maximum(X, Y)\n XY_min = T.minimum(X, Y)\n return XY_max + T.log1p(T.exp(XY_min - XY_max))", "def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)", "def smart_log(self, value: float) -> float:\n if value > 0:\n return math.log(value, self.log_scale)\n elif value == 0:\n return 0\n elif value < 0:\n return -(math.log(abs(value), self.log_scale))", "def log(self, base):\n\n\t\tvalues = map(lambda x: x > 0, self.val)\n\t\tif not all(values):\n\t\t\traise ValueError(\"Non-positive number encountered in log.\")\n\t\telse:\n\t\t\tval = np.array([np.math.log(v, base) for v in self.val])\n\t\t\tif len(self.der.shape):\n\t\t\t\tto_multiply = 1 / np.multiply(np.log(base), self.val)\n\t\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\t\tder = np.multiply(to_multiply, self.der)\n\t\t\telse:\n\t\t\t\tder = None\n\t\treturn Var(val, der)", "def lg(x: Union[int, float]) -> float:\n res = 0.0\n try:\n res = log(x, 2)\n except ValueError:\n pass\n return res", "def log10(x):\n return 0.0", "def _signed_log(x, base):\n return numpy.sign(x) * numpy.log10(numpy.abs(x)) / numpy.log10(base)", "def log2(x: float) -> float:\n return math.log2(x) if x > 0 else 0", "def log_inplace(a):", "def _loglike(self, y, f):\n ll = y * tf.log(pos(f)) + (1 - y) * tf.log(pos(1 - f))\n return ll", "def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))", "def logtrapz(logy, x=None, dx=1.0):\n n_intvls = logy.shape[0]-1\n loghalf = log(.5)\n if x is not None:\n logdel = x[1:] - x[0:-1]\n else:\n logdel = ones(n_intvls)*dx\n logdel = log(logdel)\n lo = logy[0] + loghalf + logdel[0]\n hi = logy[-1] + loghalf + logdel[-1]\n lsum = logaddexp(lo, hi)\n for i in xrange(1,n_intvls):\n lsum = logaddexp(lsum, logy[i] + logdel[i])\n return lsum", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def _logsumexp(x):\n # Search maximum.\n max_x = None\n length = len(x)\n for i in range(length):\n if max_x is None or x[i] > max_x:\n max_x = x[i]\n\n # Calculate sum of exponential differences.\n sum_exp = 0\n for i in range(length):\n diff = x[i] - max_x\n sum_exp += np.exp(diff)\n\n log_sum_exp = max_x + np.log(sum_exp)\n\n return log_sum_exp", "def log2_inplace(a):", "def log(base, real):\n return math.log(real, base)", "def log(tensor, base=np.e):\n if base == np.e:\n return _elementary_op(tensor, np.log, lambda x: 1 / x)\n return log(tensor) / log(base)", "def log_sum_exp(x):\n log_reduce_sum = P.ReduceSum()\n log = P.Log()\n exp = P.Exp()\n x_max = max(x.data)\n return log(log_reduce_sum(exp(x - x_max), 1)) + x_max", "def log1p(x):\n return 0.0", "def log10(a):", "def _log_add(*values):\n x = max(values)\n if x > -np.inf:\n sum_diffs = 0\n for value in values:\n sum_diffs += 2 ** (value - x)\n return x + np.log2(sum_diffs)\n else:\n return x", "def wrap_log(to_wrap):\r\n with np.errstate(divide='ignore'):\r\n result = np.log(to_wrap)\r\n return result", "def normalize_log_likelihoods(X):\n h, w = np.shape(X)\n return X - np.tile(logsumexp(X, axis=0), (h, 1))\n # return X - np.matlib.repmat(logsumexp(X, axis=0), h, 1)", "def logbasechange(a,b):\n return np.log(b)/np.log(a)", "def log(x, eps=1e-7, name=None):\n return tf.log(x + eps, name=name)", "def log1mexp(x: torch.Tensor) -> torch.Tensor:\n mask = (x < _log05).to(x.dtype)\n impl1 = torch.log1p(-torch.exp(x))\n impl2 = torch.log(-torch.expm1(x))\n return impl1 * mask + impl2 * (1 - mask)", "def log_shift(data):\n result = [np.log(1 + np.abs(d.copy())) for d in data]\n return result", "def linlogspace(start, stop, base=0.5, **kwargs) -> np.ndarray:\n p = (1 - np.logspace(0, 1, base=base, **kwargs)) * (1 / (1 - base))\n return (1 - p) * start + p * stop", "def log2(x):\n return math.log(x) / math.log(2)", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def started_log(X,params):\n \n offset = params['offset']\n if not is_number(params['offset']):\n offset=1.\n X = np.log(X+offset)\n return X", "def log_add(x, y):\n maximum = np.maximum(x,y)\n minimum = np.minimum(x,y)\n if(np.abs(maximum - minimum) > 30):\n # the difference is too small, return the just the maximum\n return maximum\n return maximum + np.log1p(np.exp(minimum - maximum))", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log2(x):\n raise NotImplementedError", "def log_norm(log_x):\n c = np.max(log_x)\n\n if np.isinf(c):\n return c\n\n sum_exp = 0\n\n for x in log_x:\n sum_exp += np.exp(x - c)\n\n log_sum_exp = np.log(sum_exp)\n\n log_Z = log_sum_exp + c\n\n return log_Z", "def log_poisson(k, l):\n return k*np.log(l) -l - gammaln(k+1)", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def unifLogOne(self, x=np.array([]), low=0., hi=100.):\n \n const = 1.0/low**2. - 1.0/hi**2.\n lnPrior = np.log(const) - np.log(x)\n bOut = (x <= low) | (x > hi)\n lnPrior[bOut] = -np.inf\n\n return lnPrior", "def trunc_gumbel(logits, truncation):\n gumbels = np.random.gumbel(np.zeros_like(logits)) + logits\n return -np.log(np.exp(-gumbels) + np.exp(-truncation))", "def logarithmic():\n return Equivalency(\n [(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],\n \"logarithmic\",\n )", "def log10(tensor):\n return log(tensor, base=10)", "def dlogdp(self):\n return np.log10(self.bins[:, -1]) - np.log10(self.bins[:, 0])", "def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)", "def logspace_from_lin(start, stop, num=50):\n unit = unit_of(start)\n start_ = np.log2(to_unitless(start, unit))\n stop_ = np.log2(to_unitless(stop, unit))\n return np.exp2(np.linspace(start_, stop_, num)) * unit", "def logp(self, x):\n pass", "def Log(num):\n return math.log(float(num))", "def log_trans(vec):\n m = vec[vec != 0]\n c = int(np.log(min(m)))\n d = 10 ^ c\n return np.log(vec + d) - c", "def _log_add(logx: float, logy: float) -> float:\n a, b = min(logx, logy), max(logx, logy)\n if a == -np.inf: # adding 0\n return b\n # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)\n return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)", "def _call(self, x):\n if self.prior is None:\n tmp = ((x - 1 - np.log(x)).inner(self.domain.one()))\n else:\n # This is the old line from odl version 0.6.0.\n # tmp = ((x - self.prior + self.prior * np.log(self.prior / x))\n tmp = ((x - self.prior + self.prior * np.log((self.prior + 1e-12) / x))\n .inner(self.domain.one()))\n if np.isnan(tmp):\n # In this case, some element was less than or equal to zero\n return np.inf\n else:\n return tmp", "def log_target(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_parameters(x) + self.log_prior_wilson_coeffs(x)", "def weight_log(val):\n return val * math.log(val)", "def _log_sub(logx: float, logy: float) -> float:\n if logx < logy:\n raise ValueError(\"The result of subtraction must be non-negative.\")\n if logy == -np.inf: # subtracting 0\n return logx\n if logx == logy:\n return -np.inf # 0 is represented as -np.inf in the log space.\n\n try:\n # Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).\n return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1\n except OverflowError:\n return logx", "def logsumexp_trick(sum_term):\n max_term = np.max(sum_term)\n return max_term + np.log(np.sum(np.exp(sum_term-max_term)))", "def log_cust(x):\n if type(x) != str:\n if x < 0:\n return 0\n elif x == 0:\n return 0\n elif x > 0:\n return np.log(x)", "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))", "def _loglike(self, y, f):\n ll = -0.5 * (tf.log(2 * self.variance * np.pi) +\n (y - f)**2 / self.variance)\n return ll", "def log_prob_from_logits(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x-m), axis, keep_dims=True))", "def log_i0(x):\n return pt.switch(\n pt.lt(x, 5),\n pt.log1p(\n x**2.0 / 4.0\n + x**4.0 / 64.0\n + x**6.0 / 2304.0\n + x**8.0 / 147456.0\n + x**10.0 / 14745600.0\n + x**12.0 / 2123366400.0\n ),\n x\n - 0.5 * pt.log(2.0 * np.pi * x)\n + pt.log1p(\n 1.0 / (8.0 * x)\n + 9.0 / (128.0 * x**2.0)\n + 225.0 / (3072.0 * x**3.0)\n + 11025.0 / (98304.0 * x**4.0)\n ),\n )", "def logpowerlaw(x, p=default()):\n xtr, ytr, gradtr = logcontinuity(p)\n power = p[3]\n x0 = xtr - power/gradtr\n b = ytr - power*np.log(xtr-x0)\n return b + power*np.log(x-x0)", "def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)", "def _loglike(self, y, f):\n bincoef = tf.lgamma(self.n + 1) - tf.lgamma(y + 1) \\\n - tf.lgamma(self.n - y + 1)\n ll = bincoef + y * tf.log(pos(f)) + (self.n - y) * tf.log(pos(1 - f))\n return ll", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))", "def mpf_log(x, prec, rnd=round_fast):\n sign, man, exp, bc = x\n #------------------------------------------------------------------\n # Handle special values\n if not man:\n if x == fzero: return fninf\n if x == finf: return finf\n if x == fnan: return fnan\n if sign:\n raise ComplexResult(\"logarithm of a negative number\")\n wp = prec + 20\n #------------------------------------------------------------------\n # Handle log(2^n) = log(n)*2.\n # Here we catch the only possible exact value, log(1) = 0\n if man == 1:\n if not exp:\n return fzero\n return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)\n mag = exp+bc\n abs_mag = abs(mag)\n #------------------------------------------------------------------\n # Handle x = 1+eps, where log(x) ~ x. We need to check for\n # cancellation when moving to fixed-point math and compensate\n # by increasing the precision. Note that abs_mag in (0, 1) <=>\n # 0.5 < x < 2 and x != 1\n if abs_mag <= 1:\n # Calculate t = x-1 to measure distance from 1 in bits\n tsign = 1-abs_mag\n if tsign:\n tman = (MPZ_ONE<<bc) - man\n else:\n tman = man - (MPZ_ONE<<(bc-1))\n tbc = bitcount(tman)\n cancellation = bc - tbc\n if cancellation > wp:\n t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')\n return mpf_perturb(t, tsign, prec, rnd)\n else:\n wp += cancellation\n # TODO: if close enough to 1, we could use Taylor series\n # even in the AGM precision range, since the Taylor series\n # converges rapidly\n #------------------------------------------------------------------\n # Another special case:\n # n*log(2) is a good enough approximation\n if abs_mag > 10000:\n if bitcount(abs_mag) > wp:\n return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)\n #------------------------------------------------------------------\n # General case.\n # Perform argument reduction using log(x) = log(x*2^n) - n*log(2):\n # If we are in the Taylor precision range, choose magnitude 0 or 1.\n # If we are in the AGM precision range, choose magnitude -m for\n # some large m; benchmarking on one machine showed m = prec/20 to be\n # optimal between 1000 and 100,000 digits.\n if wp <= LOG_TAYLOR_PREC:\n m = log_taylor_cached(lshift(man, wp-bc), wp)\n if mag:\n m += mag*ln2_fixed(wp)\n else:\n optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO\n n = optimal_mag - mag\n x = mpf_shift(x, n)\n wp += (-optimal_mag)\n m = -log_agm(to_fixed(x, wp), wp)\n m -= n*ln2_fixed(wp)\n return from_man_exp(m, -wp, prec, rnd)", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def log1p(x):\n return Log1p().apply((x,))[0]", "def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood", "def logrels(rets):\n return np.log(rets + 1)", "def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)", "def _get_log_energy(strided_input, epsilon, energy_floor):\n log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)\n if energy_floor == 0.0:\n return log_energy\n else:\n return torch.max(log_energy,\n torch.tensor(math.log(energy_floor), dtype=torch.get_default_dtype()))", "def _log_util(chips: float,\n bet_size: float,\n payout: float) -> float:\n if chips <= 0 or chips + payout*bet_size <= 0:\n return MIN_REWARD\n return max(math.log(1.0 + chips + payout*bet_size) - math.log(1.0 + chips),\n MIN_REWARD)", "def logadd(logx, logy):\n\n if logy > logx:\n logx, logy = logy, logx\n\n if logx == -float(\"inf\"):\n return logx\n\n diff = logy - logx\n if diff < -53: # does not make a difference at least in python 2.7.6\n return logx\n\n return logx + log2(1.0 + 2**diff)", "def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll", "def _convert_normlogprice(self, series):\n try:\n return np.log(series.div(series[0]))\n except:\n raise TypeError('ERROR: Could not transform prices to log function. Check price history data.')", "def log2(a):", "def _log_posterior_x(self, X):\r\n LL = self.log_likelihood(X=X)\r\n LP = self._log_prior_x(X)\r\n return LL + LP", "def log_prob_easier(self, x):\n normalization_const = -0.5 * tf.math.log(2 * np.pi) - tf.math.log(self.stddev)\n sq_term = - 0.5 * ((x - self.mean) / self.stddev) ** 2\n l_prob = tf.math.reduce_sum(normalization_const + sq_term, axis=1)\n return l_prob", "def log2_python(x):\n\n if (x == 0):\n return -float(\"inf\")\n\n return x.bit_length() - 1", "def log1p_exp(x):\n x_ = x * x.ge(0).to(torch.float32)\n res = x_ + torch.log1p(torch.exp(-torch.abs(x)))\n return res", "def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx", "def log_sum_exp(self, x):\n b = numpy.max(x[(x<sys.maxsize)]) # ignore inf values\n\n s = b + numpy.log(numpy.sum(numpy.exp(x-b)))\n\n return s", "def log(x, err=defaultError):\n if (defaultError >= err) or isinstance(err, AbsoluteError):\n if isinstance(err, RelativeError):\n _err = real.RelativeError(0, err.relativeerrorrange, 2)\n elif isinstance(err, AbsoluteError):\n _err = real.AbsoluteError(0, err.absoluteerrorrange, 2)\n if x in real.theRealField:\n x = +x\n if x > 0:\n return real.log(x, err=_err)\n elif x < 0:\n return Complex(real.log(abs(x), _err), real.pi(_err))\n return Complex(real.log(abs(x), err=_err), real.atan2(x.imag, x.real, _err))\n else:\n return Complex(cmath.log(complex(x.real, x.imag)))", "def logtomo(self, psi):\n return -1j / self.wavenumber() * self.mlog(psi) / self.voxelsize", "def log10(data):\n return _make.log10(data)", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True" ]
[ "0.72744215", "0.7240807", "0.7214393", "0.72042274", "0.716658", "0.711701", "0.6767486", "0.6693635", "0.6662013", "0.6657874", "0.66419184", "0.66316617", "0.66109407", "0.6556733", "0.6541151", "0.65251803", "0.6519674", "0.6498009", "0.645293", "0.6449499", "0.64343315", "0.64312285", "0.64077866", "0.6386402", "0.63583314", "0.63548434", "0.63365805", "0.63337666", "0.63309705", "0.6303123", "0.6294047", "0.6284663", "0.62713015", "0.6267282", "0.6250981", "0.62455416", "0.6235491", "0.62289655", "0.62223154", "0.62219256", "0.6218812", "0.621392", "0.6213806", "0.6212332", "0.62112993", "0.61833274", "0.6183046", "0.6170297", "0.6158339", "0.6123104", "0.6118103", "0.6118103", "0.6113181", "0.6102258", "0.6097456", "0.6095487", "0.6093019", "0.6089082", "0.60824823", "0.60750747", "0.6072256", "0.60695046", "0.6063484", "0.6055214", "0.6053372", "0.6053166", "0.6052913", "0.6050826", "0.60422087", "0.60304254", "0.602744", "0.601904", "0.60124516", "0.6010478", "0.5993571", "0.5976852", "0.59751517", "0.5965208", "0.595413", "0.59458995", "0.5938453", "0.5936067", "0.59348637", "0.5934038", "0.59169775", "0.59169036", "0.591354", "0.59132487", "0.59114605", "0.5908239", "0.5905125", "0.5894909", "0.5889119", "0.58891016", "0.58886844", "0.58884346", "0.58875996", "0.5885607", "0.5881963", "0.5881738" ]
0.8147607
0
Create a 3D rotation matrix for rotation about xaxis. (1 0 0 ) R(theta) = (0 cos(x) sin(x)) (0 sin(x) cos(x))
def rotation3Dx(theta): rmat = np.zeros((3,3)) rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0 rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta) rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta) return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rotation3D_x(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]])", "def rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)\n r = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])\n else:\n r = np.identity(3)\n\n if angle_y != 0:\n c, s = cos(angle_y), sin(angle_y)\n r = r.dot(np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]))\n\n if angle_z != 0:\n c, s = cos(angle_z), sin(angle_z)\n r = r.dot(np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]))\n\n return r", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def rotation_matrix_3x3_axis(angle, axis):\n assert axis.lower() in ['x','y','z']\n assert -180.0 <= angle <= 180.0\n angle_r = angle * (np.pi / 180.0)\n sa = np.sin(angle_r)\n ca = np.cos(angle_r)\n\n if axis == 'x':\n R = np.array([ [1, 0, 0],\n [0, ca, -sa],\n [0, sa, ca],\n ])\n elif axis == 'y':\n R = np.array([ [ca, 0, sa],\n [0, 1, 0],\n [-sa, 0, ca],\n ])\n elif axis == 'z':\n R = np.array([ [ca, -sa, 0],\n [sa, ca, 0],\n [0, 0, 1],\n ])\n return R", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotate3(x, angle_x=0, angle_y=0, angle_z=0, origin=(0, 0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix3(angle_x, angle_y, angle_z)\n return x.dot(r.T) + origin", "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def get_3drotation_matrix(axis, angle):\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat", "def x_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[1, 0, 0],\n [0, cos_t, -sin_t],\n [0, sin_t, cos_t]])", "def rotation_matrix( axis, angle ):\n\n # Trig factors.\n ca = cos(angle)\n sa = sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = tuple( axis )\n\n # Multiplications (to remove duplicate calculations).\n xs = x*sa\n ys = y*sa\n zs = z*sa\n xC = x*C\n yC = y*C\n zC = z*C\n xyC = x*yC\n yzC = y*zC\n zxC = z*xC\n\n # Update the rotation matrix.\n matrix \t = np.zeros( (3,3) )\n matrix[0, 0] = x*xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y*yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z*zC + ca\n return matrix", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotation_matrix_xyz(axis, angle, angle_dim):\n assert angle_dim is \"deg\" or angle_dim is \"rad\"\n assert axis is \"x\" or axis is \"y\" or axis is \"z\"\n x = 0\n y = 0\n z = 0\n\n if angle_dim is \"deg\":\n a = np.deg2rad(angle)\n else:\n a = angle\n\n if axis is \"x\":\n x = 1\n y = 0\n z = 0\n if axis is \"y\":\n x = 0\n y = 1\n z = 0\n if axis is \"z\":\n x = 0\n y = 0\n z = 1\n\n s = np.sin(a)\n c = np.cos(a)\n rotation_matrix = np.array([[c + x ** 2 * (1 - c), x * y * (1 - c) - z * s, x * z * (1 - c) + y * s],\n [y * x * (1 - c) + z * s, c + y ** 2 * (1 - c), y * z * (1 - c) - x * s],\n [z * x * (1 - c) - y * s, z * y * (1 - c) + x * s, c + z ** 2 * (1 - c)]])\n\n return rotation_matrix", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def RotationX(theta):\n\n return Rotation([1., 0., 0.], theta)", "def rot_x(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[1, 0, 0],\n [0, np.cos(theta_rad), -np.sin(theta_rad)],\n [0, np.sin(theta_rad), np.cos(theta_rad)]]\n return np.matrix(rotation_matrix)", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotation3Dy(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta)\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0\n rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta)\n\n return rmat", "def rotateX(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n y = self.y * cosa - self.z * sina\r\n z = self.y * sina + self.z * cosa\r\n return Point3D(self.x, y, z)", "def so3_matrix_generator(axis, theta):\n theta = np.asarray(theta)\n\n theta = theta[:, None, None]\n x, y, z = axis.T\n zero = np.zeros_like(x)\n k = np.stack([zero, -z, y, z, zero, -x, -y, x, zero], 1).reshape((-1, 3, 3))\n rot = np.eye(3)[None] + np.sin(theta) * k + (1 - np.cos(theta)) * k @ k\n\n return rot", "def z_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, -sin_t, 0],\n [sin_t, cos_t, 0],\n [0, 0, 1]])", "def rotateX(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n y = self.y * cosa - self.z * sina\n z = self.y * sina + self.z * cosa\n return Point3D(self.x, y, z)", "def rotateX(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n y = self.y * cosa - self.z * sina\n z = self.y * sina + self.z * cosa\n return Point3D(self.x, y, z)", "def rotateX(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n y = self.y * cosa - self.z * sina\n z = self.y * sina + self.z * cosa\n return Point3D(self.x, y, z)", "def rotateX(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n y = self.y * cosa - self.z * sina\n z = self.y * sina + self.z * cosa\n return Point3D(self.x, y, z)", "def quaternion2rot3d(quat):\n q01 = quat[0] * quat[1]\n q02 = quat[0] * quat[2]\n q03 = quat[0] * quat[3]\n q11 = quat[1] * quat[1]\n q12 = quat[1] * quat[2]\n q13 = quat[1] * quat[3]\n q22 = quat[2] * quat[2]\n q23 = quat[2] * quat[3]\n q33 = quat[3] * quat[3]\n\n # Obtain the rotation matrix\n rotation = np.zeros((3, 3))\n rotation[0, 0] = (1. - 2. * (q22 + q33))\n rotation[0, 1] = 2. * (q12 - q03)\n rotation[0, 2] = 2. * (q13 + q02)\n rotation[1, 0] = 2. * (q12 + q03)\n rotation[1, 1] = (1. - 2. * (q11 + q33))\n rotation[1, 2] = 2. * (q23 - q01)\n rotation[2, 0] = 2. * (q13 - q02)\n rotation[2, 1] = 2. * (q23 + q01)\n rotation[2, 2] = (1. - 2. * (q11 + q22))\n\n return rotation", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def rotation_matrix(axis,theta):\n\taxis = np.asarray(axis)\n\ttheta = np.asarray(theta)\n\tif np.all(axis==0): return np.identity(3) \n\taxis = axis/np.sqrt(np.dot(axis,axis))\n\ta = np.cos(theta/2)\n\tb, c, d = -axis*np.sin(theta/2)\n\taa, bb, cc, dd = a*a, b*b, c*c, d*d\n\tbc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n\treturn np.array([[aa+bb-cc-dd,2*(bc+ad),2*(bd-ac)],[2*(bc-ad),aa+cc-bb-dd,2*(cd+ab)],\n\t\t[2*(bd+ac),2*(cd-ab),aa+dd-bb-cc]])", "def rotateX(self, angle):\n rad = math.radians(angle)\n cosa = math.cos(rad)\n sina = math.sin(rad)\n y = self.y * cosa - self.z * sina\n z = self.y * sina + self.z * cosa\n return Point3D(self.x, y, z)", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def rotateXMatrix(self, radians):\n\n c = np.cos(radians)\n s = np.sin(radians)\n return np.array([[1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))", "def rotateX(self, angle):\n\t\trad = angle * math.pi / 180\n\t\tcosa = math.cos(rad)\n\t\tsina = math.sin(rad)\n\t\ty = self.y * cosa - self.z * sina\n\t\tz = self.y * sina + self.z * cosa\n\t\treturn Point3D(self.x, y, z)", "def rotation(x,y,z):\r\n phi = np.arctan(z/sqrt(x**2+y**2))\r\n lamb = np.arctan2(y,x)\r\n G = np.array([[-sin(lamb), cos(lamb), 0],\r\n [-sin(phi) * cos(lamb), -sin(phi) * sin(lamb), cos(phi)],\r\n [cos(phi) * cos(lamb), cos(phi) * sin(lamb), sin(phi)]])\r\n return (G)", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def rot_z(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), -np.sin(theta_rad), 0],\n [np.sin(theta_rad), np.cos(theta_rad), 0],\n [0, 0, 1]]\n return np.matrix(rotation_matrix)", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def rotmat(axis, angle):\n mat = np.eye(3)\n if angle is None or np.isclose(angle, 0.0):\n return mat\n cang = np.cos(angle*radians)\n sang = np.sin(angle*radians)\n if axis == 1:\n mat = np.array(((1, 0, 0), (0, cang, -sang), (0, sang, cang)))\n elif axis == 2:\n mat = np.array(((cang, 0, sang), (0, 1, 0), (-sang, 0, cang)))\n else:\n mat = np.array(((cang, -sang, 0), (sang, cang, 0), (0, 0, 1)))\n return np.matrix(mat)", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])", "def rotation3(size=None): # noqa\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n theta, phi, z = 2. * random((3, 1) + size)\n theta *= pi # Initial rotation angle about z-axis.\n phi *= pi # Angle in xy plane for tilt of z-axis.\n # Magnitude of tilt is random variable z.\n r = sqrt(z)\n v = concatenate((r*sin(phi), r*cos(phi), sqrt(2.-z)))\n st, ct = sin(theta), cos(theta)\n s = concatenate((v[0]*ct - v[1]*st, v[0]*st + v[1]*ct))\n m = v[:, newaxis].repeat(3, axis=1)\n m[:, :2] *= s\n m[0, :2] -= concatenate((ct, st))\n m[1, :2] += concatenate((st, -ct))\n m[:2, 2] *= v[2]\n m[2, 2] = 1. - z # Equals v[2]*v[2] - 1.\n if m.ndim > 2:\n m = transpose(m, roll(range(m.ndim), -2)).copy()\n return m", "def rotate_z(angle):\n log.dev(\"lib.mathp.rotate_z is deprecated. Use lib.rotation.R3 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[cosA, sinA, 0], [-sinA, cosA, 0], [0, 0, 1]])\n return R", "def Rpz(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,1] = -S\n M[1,0] = +S\n M[1,1] = +C\n\n return M", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n\n R = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n T = np.identity(4)\n T[:3, :3] = R\n return T", "def quaternion_from_axis_angle(x, y, z, theta):\n if x == y == z == 0:\n return np.array([1, 0, 0, 0])\n axis = np.array([x, y, z])\n axis /= np.linalg.norm(axis)\n return rowan.from_axis_angle(axis, theta)", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def rotate_x(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[1, 0, 0],\n [0, np.cos(angle), -np.sin(angle)],\n [0, np.sin(angle), np.cos(angle)]]))", "def rotateXYZ(self, angle):\n return self.transform(Matrix3(\n Vec3(1, 0, 0),\n Vec3(0, math.cos(angle), -math.sin(angle)),\n Vec3(0, math.sin(angle), math.cos(angle)),\n ) * Matrix3(\n Vec3(math.cos(angle), 0, math.sin(angle)),\n Vec3(0, 1, 0),\n Vec3(-math.sin(angle), 0, math.cos(angle)),\n ) * Matrix3(\n Vec3(math.cos(angle), -math.sin(angle), 0),\n Vec3(math.sin(angle), math.cos(angle), 0),\n Vec3(0, 0, 1),\n ))", "def rot_x(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rx = np.array([[1.0, 0.0, 0.0],\n [0.0, cangle, sangle],\n [0.0, -sangle, cangle]])\n return rx", "def rotate_x(angle):\n log.dev(\"lib.mathp.rotate_x is deprecated. Use lib.rotation.R1 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[1, 0, 0], [0, cosA, sinA], [0, -sinA, cosA]])\n return R", "def rotation_to_transformation_matrix(R):\n R = Matrix(R)\n T = R.col_insert(3, Matrix([0., 0., 0.]))\n T = T.row_insert(3, Matrix([[0., 0., 0., 1.]]))\n return T", "def RotationZ(theta):\n\n return Rotation([0., 0., 1.], theta)", "def matrix_rotate_3d_y(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_y = -deg * pi/180\n c_y = cos(rad_y)\n s_y = sin(rad_y)\n return np.matrix([[c_y, 0, s_y], [0, 1, 0], [-s_y, 0, c_y]])", "def rotateZ(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n x = self.x * cosa - self.y * sina\r\n y = self.x * sina + self.y * cosa\r\n return Point3D(x, y, self.z)", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def rotation(self, x, omega):\n \n x0, y0 = x.T[0], x.T[1]\n c, s = np.cos(omega), np.sin(omega)\n x1 = c*x0 - s*y0\n y1 = s*x0 + c*y0\n x_1 = np.array([x1, y1])\n return x_1", "def rotate_3D(image, angle, axes=(1, 2)):\n rotated_image = scipy.ndimage.interpolation.rotate(\n image, angle, axes, reshape=False)\n return rotated_image", "def rotateZ(self, angle):\n rad = math.radians(angle)\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def axis_angle_matrix3(unit, theta):\n x, y, z = unit\n c = math.cos(theta)\n s = math.sin(theta)\n C = 1 - c\n return np.matrix([\n [x * x * C + c, x * y * C - z * s, x * z * C + y * s],\n [y * x * C + z * s, y * y * C + c, y * z * C - x * s],\n [z * x * C - y * s, z * y * C + x * s, z * z * C + c],\n ])", "def transform3D_rpy(\n x: float, y: float, z: float, roll: float, pitch: float, yaw: float\n) -> np.array:\n sr = np.sin(roll)\n cr = np.cos(roll)\n sp = np.sin(pitch)\n cp = np.cos(pitch)\n sy = np.sin(yaw)\n cy = np.cos(yaw)\n return np.array(\n [\n [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr, x],\n [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr, y],\n [-sp, cp * sr, cp * cr, z],\n [0, 0, 0, 1.0],\n ]\n )", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotateZ(self, angle):\n\t\trad = angle * math.pi / 180\n\t\tcosa = math.cos(rad)\n\t\tsina = math.sin(rad)\n\t\tx = self.x * cosa - self.y * sina\n\t\ty = self.x * sina + self.y * cosa\n\t\treturn Point3D(x, y, self.z)", "def rotation_matrix(self, rotation, rotation_order=\"zyx\"):\n x = math.radians(rotation[0])\n y = math.radians(rotation[1])\n z = math.radians(rotation[2])\n\n cos = math.cos\n sin = math.sin\n if rotation_order == 'zyx':\n index_0 = cos(y) * cos(z)\n index_1 = cos(z) * sin(x) * sin(y) - cos(x) * sin(z)\n index_2 = cos(x) * cos(z) * sin(y) + sin(x) * sin(z)\n\n index_3 = cos(y) * sin(z)\n index_4 = cos(x) * cos(z) + sin(x) * sin(y) * sin(z)\n index_5 = -cos(z) * sin(x) + cos(x) * sin(y) * sin(z)\n\n index_6 = -sin(y)\n index_7 = -cos(y) * sin(x)\n index_8 = cos(x) * cos(y)\n elif rotation_order == 'xyz':\n index_0 = cos(y) * cos(z)\n index_1 = -cos(z) * sin(z)\n index_2 = sin(y)\n\n index_3 = cos(x) * sin(z) + sin(x) * sin(y) * cos(z)\n index_4 = cos(x) * cos(z) - sin(x) * sin(y) * sin(z)\n index_5 = -sin(x) * cos(y)\n\n index_6 = sin(x) * sin(z) - cos(x) * sin(y) * cos(z)\n index_7 = sin(x) * cos(z) + cos(x) * sin(y) * sin(z)\n index_8 = cos(x) * cos(y)\n\n rot_mat = ((index_0, index_1, index_2),\n (index_3, index_4, index_5),\n (index_6, index_7, index_8))\n\n return rot_mat", "def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")", "def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def rotation_matrix(theta=0, phi=0, psi=0, units='deg'):\n\n rpy = Rpy(theta,units)\n rmx = Rmx(phi, units)\n rpz = Rpz(psi, units)\n\n return np.matmul(rpy, np.matmul(rmx, rpz))", "def xform_Z_rot( self , thetaZrad ):\r\n self.xform_homog( homogeneous_Z( thetaZrad , [ 0 , 0 , 0 ] ) )", "def angleAxis2rot3D(axis, theta):\n if len(axis) is not 3:\n raise ValueError('Number of axis element must be 3!')\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cosTheta = np.cos(theta)\n bracket = 1 - cosTheta\n aBracket = a * bracket\n bBracket = b * bracket\n cBracket = c * bracket\n sinTheta = np.sin(theta)\n aSinTheta = a * sinTheta\n bSinTheta = b * sinTheta\n cSinTheta = c * sinTheta\n rot3D = np.array([[a*aBracket+cosTheta, a*bBracket-cSinTheta, a*cBracket+bSinTheta],\n [b*aBracket+cSinTheta, b*bBracket+cosTheta, b*cBracket-aSinTheta],\n [c*aBracket-bSinTheta, c*bBracket+aSinTheta, c*cBracket+cosTheta]])\n return rot3D", "def homog_rot_mtx(angle_rads: float, axis: str) -> numpy.array:\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n if \"z\" == axis:\n return numpy.array(\n (\n (cosang, -sinang, 0, 0),\n (sinang, cosang, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n elif \"y\" == axis:\n return numpy.array(\n (\n (cosang, 0, sinang, 0),\n (0, 1, 0, 0),\n (-sinang, 0, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n else:\n return numpy.array(\n (\n (1, 0, 0, 0),\n (0, cosang, -sinang, 0),\n (0, sinang, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )", "def rot_z(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rz = np.array([[cangle, sangle, 0.0],\n [-sangle, cangle, 0.0],\n [0.0, 0.0, 1.0]])\n return rz", "def rotation_matrix(theta, axis=None):\n if axis is None:\n axis = [0, 0, 1]\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def rotateX(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[1, 0, 0, 0],\r\n [0, c, s, 0],\r\n [0, -s, c, 0],\r\n [0, 0, 0, 1]], self.mtrx)\r\n self.rtn[0] = angle\r\n self.was_moved = True", "def test_x_y_and_z_rot(self):\n\n axis = Vec3(4, 5, 6)\n # Create a Matrix representing a rotation.\n mat = Matrix44.from_axis_angle_deg(axis, 45.0)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure it matches the expected quaternion.\n expected_quat = Quat.from_axis_angle_deg(axis, 45.0)\n self.assertAlmostEqual(quat.x, expected_quat.x)\n self.assertAlmostEqual(quat.y, expected_quat.y)\n self.assertAlmostEqual(quat.z, expected_quat.z)\n self.assertAlmostEqual(quat.w, expected_quat.w)", "def rotation3D_rpy(roll: float, pitch: float, yaw: float) -> np.array:\n\n sr = np.sin(roll)\n cr = np.cos(roll)\n sp = np.sin(pitch)\n cp = np.cos(pitch)\n sy = np.sin(yaw)\n cy = np.cos(yaw)\n return np.array(\n [\n [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr],\n [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr],\n [-sp, cp * sr, cp * cr],\n ]\n )", "def R_axis_angle(axis, angle):\n\n # Trig factors.\n ca = math.cos(angle)\n sa = math.sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = axis\n\n # Multiplications (to remove duplicate calculations).\n xs = x * sa\n ys = y * sa\n zs = z * sa\n xC = x * C\n yC = y * C\n zC = z * C\n xyC = x * yC\n yzC = y * zC\n zxC = z * xC\n\n # Update the rotation matrix.\n matrix = np.zeros((3, 3))\n matrix[0, 0] = x * xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y * yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z * zC + ca\n return matrix", "def AffineRz(theta, units='deg'):\n if units == 'deg':\n theta = np.deg2rad(theta)\n\n return np.mat([[np.cos(theta), -np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ],\n dtype='f8'\n )", "def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = angles[0:3]\n return numpy_utils.rotation_matrix(phi, chi, omega)", "def rotation_matrices_from_angles(angles):\n\n angles = np.atleast_1d(angles)\n npts = len(angles)\n\n sina = np.sin(angles)\n cosa = np.cos(angles)\n\n R = np.zeros((npts, 2, 2))\n R[:, 0, 0] = cosa\n R[:, 1, 1] = cosa\n\n R[:, 0, 1] = -sina\n R[:, 1, 0] = sina\n\n return R" ]
[ "0.80429703", "0.77990216", "0.77074045", "0.76701725", "0.7432882", "0.7401681", "0.73251915", "0.7267913", "0.71938413", "0.71556383", "0.70411855", "0.7036028", "0.70312065", "0.7028589", "0.7012928", "0.69417447", "0.692393", "0.68872285", "0.6844814", "0.6838681", "0.68080956", "0.6798901", "0.67988205", "0.6758144", "0.6729803", "0.67192096", "0.66868836", "0.66717553", "0.6632204", "0.6627981", "0.6621583", "0.6607602", "0.6607602", "0.6607602", "0.6607602", "0.6590661", "0.6579802", "0.6562416", "0.65366614", "0.65364885", "0.65364885", "0.6529475", "0.65163505", "0.65141755", "0.65138775", "0.65131706", "0.65046525", "0.6498061", "0.6479598", "0.64739305", "0.6449672", "0.6441815", "0.64396137", "0.6438786", "0.64382684", "0.6433405", "0.64286506", "0.63959485", "0.6386619", "0.63805944", "0.63755715", "0.6369848", "0.6362034", "0.6355442", "0.6347457", "0.6343577", "0.63430387", "0.6323661", "0.63129026", "0.6306528", "0.6306513", "0.6306513", "0.6306513", "0.6306513", "0.62646574", "0.62597996", "0.6255736", "0.6244119", "0.6237839", "0.62331796", "0.62285954", "0.6226805", "0.6184329", "0.61771965", "0.6155163", "0.6154938", "0.61499053", "0.61482", "0.6147654", "0.6140533", "0.6135383", "0.61346006", "0.6128606", "0.6124536", "0.6111494", "0.6107927", "0.6102341", "0.6098281", "0.6098092", "0.60932976" ]
0.78315115
1
Create a 3D rotation matrix for rotation about yaxis. ( cos(x) 0 sin(x)) R(theta) = ( 0 1 0 ) ( sin(x) 0 cos(x))
def rotation3Dy(theta): rmat = np.zeros((3,3)) rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta) rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0 rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta) return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_y(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_y = -deg * pi/180\n c_y = cos(rad_y)\n s_y = sin(rad_y)\n return np.matrix([[c_y, 0, s_y], [0, 1, 0], [-s_y, 0, c_y]])", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def rotation3D_y(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, 0.0, s], [0.0, 1.0, 0.0], [-s, 0.0, c]])", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def z_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, -sin_t, 0],\n [sin_t, cos_t, 0],\n [0, 0, 1]])", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)\n r = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])\n else:\n r = np.identity(3)\n\n if angle_y != 0:\n c, s = cos(angle_y), sin(angle_y)\n r = r.dot(np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]))\n\n if angle_z != 0:\n c, s = cos(angle_z), sin(angle_z)\n r = r.dot(np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]))\n\n return r", "def yz_rotation(vector,theta):\r\n R = np.array([[1,0,0],\r\n [0, np.cos(theta),-np.sin(theta)],\r\n [0, np.sin(theta), np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def rotation_matrix_3x3_axis(angle, axis):\n assert axis.lower() in ['x','y','z']\n assert -180.0 <= angle <= 180.0\n angle_r = angle * (np.pi / 180.0)\n sa = np.sin(angle_r)\n ca = np.cos(angle_r)\n\n if axis == 'x':\n R = np.array([ [1, 0, 0],\n [0, ca, -sa],\n [0, sa, ca],\n ])\n elif axis == 'y':\n R = np.array([ [ca, 0, sa],\n [0, 1, 0],\n [-sa, 0, ca],\n ])\n elif axis == 'z':\n R = np.array([ [ca, -sa, 0],\n [sa, ca, 0],\n [0, 0, 1],\n ])\n return R", "def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))", "def rot_z(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), -np.sin(theta_rad), 0],\n [np.sin(theta_rad), np.cos(theta_rad), 0],\n [0, 0, 1]]\n return np.matrix(rotation_matrix)", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def y_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, 0, sin_t],\n [0, 1, 0],\n [-sin_t, 0, cos_t]])", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotation(x,y,z):\r\n phi = np.arctan(z/sqrt(x**2+y**2))\r\n lamb = np.arctan2(y,x)\r\n G = np.array([[-sin(lamb), cos(lamb), 0],\r\n [-sin(phi) * cos(lamb), -sin(phi) * sin(lamb), cos(phi)],\r\n [cos(phi) * cos(lamb), cos(phi) * sin(lamb), sin(phi)]])\r\n return (G)", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def rotationMatrix_RzRyRz(self):\n\n R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def RotationZ(theta):\n\n return Rotation([0., 0., 1.], theta)", "def rotate_z(angle):\n log.dev(\"lib.mathp.rotate_z is deprecated. Use lib.rotation.R3 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[cosA, sinA, 0], [-sinA, cosA, 0], [0, 0, 1]])\n return R", "def Rz(theta, units='deg'):\n if units == 'deg':\n theta = np.deg2rad(theta)\n\n return np.mat([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]\n ],\n dtype='f8'\n )", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def AffineRz(theta, units='deg'):\n if units == 'deg':\n theta = np.deg2rad(theta)\n\n return np.mat([[np.cos(theta), -np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ],\n dtype='f8'\n )", "def multi_rot_Y(angle_rads: numpy.ndarray) -> numpy.ndarray:\n ry = numpy.empty((angle_rads.shape[0], 4, 4))\n ry[...] = numpy.identity(4)\n ry[:, 0, 0] = ry[:, 2, 2] = numpy.cos(angle_rads)\n ry[:, 0, 2] = numpy.sin(angle_rads)\n ry[:, 2, 0] = -ry[:, 0, 2]\n\n return ry", "def rot_z(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rz = np.array([[cangle, sangle, 0.0],\n [-sangle, cangle, 0.0],\n [0.0, 0.0, 1.0]])\n return rz", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def rotation_matrix( axis, angle ):\n\n # Trig factors.\n ca = cos(angle)\n sa = sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = tuple( axis )\n\n # Multiplications (to remove duplicate calculations).\n xs = x*sa\n ys = y*sa\n zs = z*sa\n xC = x*C\n yC = y*C\n zC = z*C\n xyC = x*yC\n yzC = y*zC\n zxC = z*xC\n\n # Update the rotation matrix.\n matrix \t = np.zeros( (3,3) )\n matrix[0, 0] = x*xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y*yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z*zC + ca\n return matrix", "def get_rotation_matrix_2D(transform):\n yaw = np.deg2rad(transform.rotation.yaw)\n cy = np.cos(yaw)\n sy = np.sin(yaw)\n\n rotation_matrix_2D = np.array([[cy, -sy],\n [sy, cy]])\n return rotation_matrix_2D", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n\n R = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n T = np.identity(4)\n T[:3, :3] = R\n return T", "def homog_rot_mtx(angle_rads: float, axis: str) -> numpy.array:\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n if \"z\" == axis:\n return numpy.array(\n (\n (cosang, -sinang, 0, 0),\n (sinang, cosang, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n elif \"y\" == axis:\n return numpy.array(\n (\n (cosang, 0, sinang, 0),\n (0, 1, 0, 0),\n (-sinang, 0, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n else:\n return numpy.array(\n (\n (1, 0, 0, 0),\n (0, cosang, -sinang, 0),\n (0, sinang, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )", "def get_3drotation_matrix(axis, angle):\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat", "def rotation3D_rpy(roll: float, pitch: float, yaw: float) -> np.array:\n\n sr = np.sin(roll)\n cr = np.cos(roll)\n sp = np.sin(pitch)\n cp = np.cos(pitch)\n sy = np.sin(yaw)\n cy = np.cos(yaw)\n return np.array(\n [\n [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr],\n [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr],\n [-sp, cp * sr, cp * cr],\n ]\n )", "def rotation_matrix(phi):\n return np.asmatrix([\n [np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]\n ])", "def RotationY(theta):\n\n return Rotation([0., 1., 0.], theta)", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def rotation_matrix(axis,theta):\n\taxis = np.asarray(axis)\n\ttheta = np.asarray(theta)\n\tif np.all(axis==0): return np.identity(3) \n\taxis = axis/np.sqrt(np.dot(axis,axis))\n\ta = np.cos(theta/2)\n\tb, c, d = -axis*np.sin(theta/2)\n\taa, bb, cc, dd = a*a, b*b, c*c, d*d\n\tbc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n\treturn np.array([[aa+bb-cc-dd,2*(bc+ad),2*(bd-ac)],[2*(bc-ad),aa+cc-bb-dd,2*(cd+ab)],\n\t\t[2*(bd+ac),2*(cd-ab),aa+dd-bb-cc]])", "def rot_y(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), 0, np.sin(theta_rad)],\n [0, 1, 0],\n [-np.sin(theta_rad), 0, np.cos(theta_rad)]]\n return np.matrix(rotation_matrix)", "def rot_y(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n ry = np.array([[cangle, 0.0, -sangle],\n [0.0, 1.0, 0.0],\n [sangle, 0.0, cangle]])\n return ry", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotation_matrix(theta=0, phi=0, psi=0, units='deg'):\n\n rpy = Rpy(theta,units)\n rmx = Rmx(phi, units)\n rpz = Rpz(psi, units)\n\n return np.matmul(rpy, np.matmul(rmx, rpz))", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotation3(size=None): # noqa\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n theta, phi, z = 2. * random((3, 1) + size)\n theta *= pi # Initial rotation angle about z-axis.\n phi *= pi # Angle in xy plane for tilt of z-axis.\n # Magnitude of tilt is random variable z.\n r = sqrt(z)\n v = concatenate((r*sin(phi), r*cos(phi), sqrt(2.-z)))\n st, ct = sin(theta), cos(theta)\n s = concatenate((v[0]*ct - v[1]*st, v[0]*st + v[1]*ct))\n m = v[:, newaxis].repeat(3, axis=1)\n m[:, :2] *= s\n m[0, :2] -= concatenate((ct, st))\n m[1, :2] += concatenate((st, -ct))\n m[:2, 2] *= v[2]\n m[2, 2] = 1. - z # Equals v[2]*v[2] - 1.\n if m.ndim > 2:\n m = transpose(m, roll(range(m.ndim), -2)).copy()\n return m", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def so3_matrix_generator(axis, theta):\n theta = np.asarray(theta)\n\n theta = theta[:, None, None]\n x, y, z = axis.T\n zero = np.zeros_like(x)\n k = np.stack([zero, -z, y, z, zero, -x, -y, x, zero], 1).reshape((-1, 3, 3))\n rot = np.eye(3)[None] + np.sin(theta) * k + (1 - np.cos(theta)) * k @ k\n\n return rot", "def multi_rot_Z(angle_rads: numpy.ndarray) -> numpy.ndarray:\n rz = numpy.empty((angle_rads.shape[0], 4, 4))\n rz[...] = numpy.identity(4)\n rz[:, 0, 0] = rz[:, 1, 1] = numpy.cos(angle_rads)\n rz[:, 1, 0] = numpy.sin(angle_rads)\n rz[:, 0, 1] = -rz[:, 1, 0]\n return rz", "def Rpz(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,1] = -S\n M[1,0] = +S\n M[1,1] = +C\n\n return M", "def rotZ(theta, mode = 'radians'):\n\n\tif mode != 'radians' and mode != 'degrees':\n\t\traise ValueError('Mode should either be ``radians`` or ``degrees``.')\n\tif mode == 'degrees':\n\t\ttheta = np.deg2rad(theta)\n\treturn np.matrix([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], \\\n\t\t[0., 0., 1.]])", "def apply_rotation_y(self, phi=0.0 ):\n \n phi = radians(phi)\n new_rotation_matrix = [[ +cos(phi) , 0 , +sin(phi) ],\n [ 0 , 1 , 0 ],\n [ -sin(phi) , 0 , +cos(phi) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def rotation_matrix(self, rotation, rotation_order=\"zyx\"):\n x = math.radians(rotation[0])\n y = math.radians(rotation[1])\n z = math.radians(rotation[2])\n\n cos = math.cos\n sin = math.sin\n if rotation_order == 'zyx':\n index_0 = cos(y) * cos(z)\n index_1 = cos(z) * sin(x) * sin(y) - cos(x) * sin(z)\n index_2 = cos(x) * cos(z) * sin(y) + sin(x) * sin(z)\n\n index_3 = cos(y) * sin(z)\n index_4 = cos(x) * cos(z) + sin(x) * sin(y) * sin(z)\n index_5 = -cos(z) * sin(x) + cos(x) * sin(y) * sin(z)\n\n index_6 = -sin(y)\n index_7 = -cos(y) * sin(x)\n index_8 = cos(x) * cos(y)\n elif rotation_order == 'xyz':\n index_0 = cos(y) * cos(z)\n index_1 = -cos(z) * sin(z)\n index_2 = sin(y)\n\n index_3 = cos(x) * sin(z) + sin(x) * sin(y) * cos(z)\n index_4 = cos(x) * cos(z) - sin(x) * sin(y) * sin(z)\n index_5 = -sin(x) * cos(y)\n\n index_6 = sin(x) * sin(z) - cos(x) * sin(y) * cos(z)\n index_7 = sin(x) * cos(z) + cos(x) * sin(y) * sin(z)\n index_8 = cos(x) * cos(y)\n\n rot_mat = ((index_0, index_1, index_2),\n (index_3, index_4, index_5),\n (index_6, index_7, index_8))\n\n return rot_mat", "def axis_angle_matrix3(unit, theta):\n x, y, z = unit\n c = math.cos(theta)\n s = math.sin(theta)\n C = 1 - c\n return np.matrix([\n [x * x * C + c, x * y * C - z * s, x * z * C + y * s],\n [y * x * C + z * s, y * y * C + c, y * z * C - x * s],\n [z * x * C - y * s, z * y * C + x * s, z * z * C + c],\n ])", "def _rmatrix(theta):\n r = np.zeros((4, 4), np.complex128)\n\n cos_term = np.cos(theta / 2.0) * complex(1, 0)\n sin_term = np.sin(theta / 2.0) * complex(1, 0)\n\n r[0, 0] = cos_term\n r[1, 1] = cos_term\n\n r[0, 2] = sin_term\n r[1, 3] = sin_term\n\n r[2, 0] = -sin_term\n r[3, 1] = -sin_term\n\n r[2, 2] = cos_term\n r[3, 3] = cos_term\n\n return r", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotmat(axis, angle):\n mat = np.eye(3)\n if angle is None or np.isclose(angle, 0.0):\n return mat\n cang = np.cos(angle*radians)\n sang = np.sin(angle*radians)\n if axis == 1:\n mat = np.array(((1, 0, 0), (0, cang, -sang), (0, sang, cang)))\n elif axis == 2:\n mat = np.array(((cang, 0, sang), (0, 1, 0), (-sang, 0, cang)))\n else:\n mat = np.array(((cang, -sang, 0), (sang, cang, 0), (0, 0, 1)))\n return np.matrix(mat)", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])", "def transform3D_rpy(\n x: float, y: float, z: float, roll: float, pitch: float, yaw: float\n) -> np.array:\n sr = np.sin(roll)\n cr = np.cos(roll)\n sp = np.sin(pitch)\n cp = np.cos(pitch)\n sy = np.sin(yaw)\n cy = np.cos(yaw)\n return np.array(\n [\n [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr, x],\n [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr, y],\n [-sp, cp * sr, cp * cr, z],\n [0, 0, 0, 1.0],\n ]\n )", "def quaternion2rot3d(quat):\n q01 = quat[0] * quat[1]\n q02 = quat[0] * quat[2]\n q03 = quat[0] * quat[3]\n q11 = quat[1] * quat[1]\n q12 = quat[1] * quat[2]\n q13 = quat[1] * quat[3]\n q22 = quat[2] * quat[2]\n q23 = quat[2] * quat[3]\n q33 = quat[3] * quat[3]\n\n # Obtain the rotation matrix\n rotation = np.zeros((3, 3))\n rotation[0, 0] = (1. - 2. * (q22 + q33))\n rotation[0, 1] = 2. * (q12 - q03)\n rotation[0, 2] = 2. * (q13 + q02)\n rotation[1, 0] = 2. * (q12 + q03)\n rotation[1, 1] = (1. - 2. * (q11 + q33))\n rotation[1, 2] = 2. * (q23 - q01)\n rotation[2, 0] = 2. * (q13 - q02)\n rotation[2, 1] = 2. * (q23 + q01)\n rotation[2, 2] = (1. - 2. * (q11 + q22))\n\n return rotation", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def rotation_to_transformation_matrix(R):\n R = Matrix(R)\n T = R.col_insert(3, Matrix([0., 0., 0.]))\n T = T.row_insert(3, Matrix([[0., 0., 0., 1.]]))\n return T", "def rotation_matrix(theta):\n return np.array([\n [np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]\n ]);", "def angleAxis2rot3D(axis, theta):\n if len(axis) is not 3:\n raise ValueError('Number of axis element must be 3!')\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cosTheta = np.cos(theta)\n bracket = 1 - cosTheta\n aBracket = a * bracket\n bBracket = b * bracket\n cBracket = c * bracket\n sinTheta = np.sin(theta)\n aSinTheta = a * sinTheta\n bSinTheta = b * sinTheta\n cSinTheta = c * sinTheta\n rot3D = np.array([[a*aBracket+cosTheta, a*bBracket-cSinTheta, a*cBracket+bSinTheta],\n [b*aBracket+cSinTheta, b*bBracket+cosTheta, b*cBracket-aSinTheta],\n [c*aBracket-bSinTheta, c*bBracket+aSinTheta, c*cBracket+cosTheta]])\n return rot3D", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def eulerAnglesToRotationMatrix(theta):\n\n R_x = np.array([[1, 0, 0 ],\n [0, np.cos(theta[0]), -np.sin(theta[0]) ],\n [0, np.sin(theta[0]), np.cos(theta[0]) ]\n ])\n R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]) ],\n [0, 1, 0 ],\n [-np.sin(theta[1]), 0, np.cos(theta[1]) ]\n ])\n R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],\n [np.sin(theta[2]), np.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R", "def Rz(q):\n sin_q, cos_q = sin(q), cos(q)\n return numpy.matrix([\n [cos_q, sin_q, 0, 0],\n [-sin_q, cos_q, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ])", "def rotation_matrix2(angle):\n c, s = cos(angle), sin(angle)\n return np.array([[c, -s], [s, c]])", "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation(X, Y, C, S) :\n Xrot = X*C + Y*S \n Yrot = Y*C - X*S \n return Xrot, Yrot", "def rotate_y(angle):\n log.dev(\"lib.mathp.rotate_y is deprecated. Use lib.rotation.R2 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[cosA, 0, -sinA], [0, 1, 0], [sinA, 0, cosA]])\n return R", "def RZ(rotRadian: float):\n return np.array([\n [np.cos(rotRadian), -np.sin(rotRadian), 0],\n [np.sin(rotRadian), np.cos(rotRadian), 0],\n [0, 0, 1]\n ])", "def rotY(theta, mode = 'radians'):\n\n\tif mode != 'radians' and mode != 'degrees':\n\t\traise ValueError('Mode should either be ``radians`` or ``degrees``.')\n\tif mode == 'degrees':\n\t\ttheta = np.deg2rad(theta)\n\treturn np.matrix([[np.cos(theta), 0., np.sin(theta)], [0, 1, 0], \\\n\t\t[-np.sin(theta), 0, np.cos(theta)]])", "def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array", "def rotation_axis_matrix(phi: numbers.Real, axis: int):\n\n if axis == 0:\n return [[1, 0, 0, 0],\n [0, cos(phi), sin(phi), 0],\n [0, sin(phi), cos(phi), 0],\n [0, 0, 0, 1]]\n elif axis == 1:\n return [[cos(phi), 0, sin(phi), 0],\n [0, 1, 0, 0],\n [-sin(phi), 0, cos(phi), 0],\n [0, 0, 0, 1]]\n elif axis == 2:\n return [[cos(phi), -sin(phi), 0, 0],\n [sin(phi), cos(phi), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\n else:\n raise ValueError(\"only 3d space coordinates as homogeneous vectors are supported\")", "def rotation_matrix_2d(angle):\n psi = Angle(angle).rad\n return np.array([[cos(psi), -sin(psi)],\n [sin(psi), cos(psi)]])", "def rotation_matrix(theta, axis=None):\n if axis is None:\n axis = [0, 0, 1]\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])", "def rotation_matrix(dt, omega):\n R = np.array([\n [np.cos(omega * dt), -np.sin(omega * dt)],\n [np.sin(omega * dt), np.cos(omega * dt)]\n ])\n return R", "def rot(theta):\n cos = np.cos(theta)\n sin = np.sin(theta)\n return( np.array( [[cos, sin], [-sin, cos]] ) )", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def rotation(theta, axis):\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])" ]
[ "0.7846392", "0.7652564", "0.75773746", "0.72624236", "0.72098386", "0.71632046", "0.711217", "0.707963", "0.7057155", "0.70560473", "0.701651", "0.6999073", "0.6945559", "0.69309556", "0.68679565", "0.6837997", "0.68178195", "0.6785411", "0.6732221", "0.67160934", "0.6714742", "0.6696784", "0.6681741", "0.66815233", "0.66807014", "0.666236", "0.6657872", "0.66576976", "0.66576976", "0.6656944", "0.66481817", "0.66446054", "0.66067916", "0.66060257", "0.6592972", "0.6589696", "0.6566252", "0.6554498", "0.65497434", "0.6539326", "0.6538688", "0.65219575", "0.6485697", "0.6468184", "0.64640355", "0.6462954", "0.64565533", "0.64396554", "0.64178056", "0.6412968", "0.63976395", "0.6396702", "0.6373472", "0.6351478", "0.63486594", "0.6347959", "0.6346984", "0.6346864", "0.6346558", "0.6337408", "0.63310623", "0.6328166", "0.63136256", "0.63036793", "0.6302877", "0.6300622", "0.62978137", "0.62894213", "0.62893575", "0.62887836", "0.6268062", "0.6267911", "0.62577003", "0.62521553", "0.6239102", "0.6239102", "0.6239102", "0.6239102", "0.6229592", "0.62229794", "0.6211928", "0.6207859", "0.6204693", "0.62043893", "0.61914545", "0.6181906", "0.6181542", "0.6177753", "0.6173475", "0.616745", "0.616674", "0.616616", "0.61622995", "0.6161987", "0.6161947", "0.61507314", "0.61493343", "0.61472535", "0.6144788", "0.61416614" ]
0.751571
3
Create a 3D rotation matrix for rotation about zaxis. ( cos(x) sin(x) 0) R(theta) = (sin(x) cos(x) 0) ( 0 0 1)
def rotation3Dz(theta): rmat = np.zeros((3,3)) rmat[0,0] = rmat[1,1] = np.cos(theta) rmat[0,1] = np.sin(theta) rmat[1,0] = -rmat[0,1] rmat[2,2] = 1 return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)\n r = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])\n else:\n r = np.identity(3)\n\n if angle_y != 0:\n c, s = cos(angle_y), sin(angle_y)\n r = r.dot(np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]))\n\n if angle_z != 0:\n c, s = cos(angle_z), sin(angle_z)\n r = r.dot(np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]))\n\n return r", "def z_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, -sin_t, 0],\n [sin_t, cos_t, 0],\n [0, 0, 1]])", "def rot_z(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), -np.sin(theta_rad), 0],\n [np.sin(theta_rad), np.cos(theta_rad), 0],\n [0, 0, 1]]\n return np.matrix(rotation_matrix)", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotate_z(angle):\n log.dev(\"lib.mathp.rotate_z is deprecated. Use lib.rotation.R3 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[cosA, sinA, 0], [-sinA, cosA, 0], [0, 0, 1]])\n return R", "def rot_z(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rz = np.array([[cangle, sangle, 0.0],\n [-sangle, cangle, 0.0],\n [0.0, 0.0, 1.0]])\n return rz", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def rotation3(size=None): # noqa\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n theta, phi, z = 2. * random((3, 1) + size)\n theta *= pi # Initial rotation angle about z-axis.\n phi *= pi # Angle in xy plane for tilt of z-axis.\n # Magnitude of tilt is random variable z.\n r = sqrt(z)\n v = concatenate((r*sin(phi), r*cos(phi), sqrt(2.-z)))\n st, ct = sin(theta), cos(theta)\n s = concatenate((v[0]*ct - v[1]*st, v[0]*st + v[1]*ct))\n m = v[:, newaxis].repeat(3, axis=1)\n m[:, :2] *= s\n m[0, :2] -= concatenate((ct, st))\n m[1, :2] += concatenate((st, -ct))\n m[:2, 2] *= v[2]\n m[2, 2] = 1. - z # Equals v[2]*v[2] - 1.\n if m.ndim > 2:\n m = transpose(m, roll(range(m.ndim), -2)).copy()\n return m", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def rotation_matrix_3x3_axis(angle, axis):\n assert axis.lower() in ['x','y','z']\n assert -180.0 <= angle <= 180.0\n angle_r = angle * (np.pi / 180.0)\n sa = np.sin(angle_r)\n ca = np.cos(angle_r)\n\n if axis == 'x':\n R = np.array([ [1, 0, 0],\n [0, ca, -sa],\n [0, sa, ca],\n ])\n elif axis == 'y':\n R = np.array([ [ca, 0, sa],\n [0, 1, 0],\n [-sa, 0, ca],\n ])\n elif axis == 'z':\n R = np.array([ [ca, -sa, 0],\n [sa, ca, 0],\n [0, 0, 1],\n ])\n return R", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))", "def rotateZ(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n x = self.x * cosa - self.y * sina\r\n y = self.x * sina + self.y * cosa\r\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = math.radians(angle)\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def RotationZ(theta):\n\n return Rotation([0., 0., 1.], theta)", "def rotation(x,y,z):\r\n phi = np.arctan(z/sqrt(x**2+y**2))\r\n lamb = np.arctan2(y,x)\r\n G = np.array([[-sin(lamb), cos(lamb), 0],\r\n [-sin(phi) * cos(lamb), -sin(phi) * sin(lamb), cos(phi)],\r\n [cos(phi) * cos(lamb), cos(phi) * sin(lamb), sin(phi)]])\r\n return (G)", "def get_3drotation_matrix(axis, angle):\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat", "def rotationMatrix_RzRyRz(self):\n\n R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rotateZ(self, angle):\n\t\trad = angle * math.pi / 180\n\t\tcosa = math.cos(rad)\n\t\tsina = math.sin(rad)\n\t\tx = self.x * cosa - self.y * sina\n\t\ty = self.x * sina + self.y * cosa\n\t\treturn Point3D(x, y, self.z)", "def quaternion2rot3d(quat):\n q01 = quat[0] * quat[1]\n q02 = quat[0] * quat[2]\n q03 = quat[0] * quat[3]\n q11 = quat[1] * quat[1]\n q12 = quat[1] * quat[2]\n q13 = quat[1] * quat[3]\n q22 = quat[2] * quat[2]\n q23 = quat[2] * quat[3]\n q33 = quat[3] * quat[3]\n\n # Obtain the rotation matrix\n rotation = np.zeros((3, 3))\n rotation[0, 0] = (1. - 2. * (q22 + q33))\n rotation[0, 1] = 2. * (q12 - q03)\n rotation[0, 2] = 2. * (q13 + q02)\n rotation[1, 0] = 2. * (q12 + q03)\n rotation[1, 1] = (1. - 2. * (q11 + q33))\n rotation[1, 2] = 2. * (q23 - q01)\n rotation[2, 0] = 2. * (q13 - q02)\n rotation[2, 1] = 2. * (q23 + q01)\n rotation[2, 2] = (1. - 2. * (q11 + q22))\n\n return rotation", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def multi_rot_Z(angle_rads: numpy.ndarray) -> numpy.ndarray:\n rz = numpy.empty((angle_rads.shape[0], 4, 4))\n rz[...] = numpy.identity(4)\n rz[:, 0, 0] = rz[:, 1, 1] = numpy.cos(angle_rads)\n rz[:, 1, 0] = numpy.sin(angle_rads)\n rz[:, 0, 1] = -rz[:, 1, 0]\n return rz", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def RZ(rotRadian: float):\n return np.array([\n [np.cos(rotRadian), -np.sin(rotRadian), 0],\n [np.sin(rotRadian), np.cos(rotRadian), 0],\n [0, 0, 1]\n ])", "def Rz(theta, units='deg'):\n if units == 'deg':\n theta = np.deg2rad(theta)\n\n return np.mat([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]\n ],\n dtype='f8'\n )", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])", "def rotation_matrix(xangle, yangle, zangle, order='zxy', degrees=False):\r\n if degrees:\r\n xangle = math.radians(xangle)\r\n yangle = math.radians(yangle)\r\n zangle = math.radians(zangle)\r\n\r\n # Here we assume we rotate z, then x then y.\r\n c1 = math.cos(xangle) # The x angle\r\n c2 = math.cos(yangle) # The y angle\r\n c3 = math.cos(zangle) # the z angle\r\n s1 = math.sin(xangle)\r\n s2 = math.sin(yangle)\r\n s3 = math.sin(zangle)\r\n\r\n # see http://en.wikipedia.org/wiki/Rotation_matrix for\r\n # additional info.\r\n\r\n if order=='zxy':\r\n rot_mat = np.array([[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],[-c1*s3, c1*c3, s1],[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]])\r\n else:\r\n rot_mat = np.eye(3)\r\n for i in range(len(order)):\r\n if order[i]=='x':\r\n rot_mat = np.dot(np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]),rot_mat)\r\n elif order[i] == 'y':\r\n rot_mat = np.dot(np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]),rot_mat)\r\n elif order[i] == 'z':\r\n rot_mat = np.dot(np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]),rot_mat)\r\n\r\n return rot_mat", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def rotation3Dy(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta)\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0\n rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta)\n\n return rmat", "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def Rpz(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,1] = -S\n M[1,0] = +S\n M[1,1] = +C\n\n return M", "def Rz(q):\n sin_q, cos_q = sin(q), cos(q)\n return numpy.matrix([\n [cos_q, sin_q, 0, 0],\n [-sin_q, cos_q, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ])", "def rotZ(theta, mode = 'radians'):\n\n\tif mode != 'radians' and mode != 'degrees':\n\t\traise ValueError('Mode should either be ``radians`` or ``degrees``.')\n\tif mode == 'degrees':\n\t\ttheta = np.deg2rad(theta)\n\treturn np.matrix([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], \\\n\t\t[0., 0., 1.]])", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def rotate3(x, angle_x=0, angle_y=0, angle_z=0, origin=(0, 0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix3(angle_x, angle_y, angle_z)\n return x.dot(r.T) + origin", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])", "def AffineRz(theta, units='deg'):\n if units == 'deg':\n theta = np.deg2rad(theta)\n\n return np.mat([[np.cos(theta), -np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ],\n dtype='f8'\n )", "def rotation_matrix( axis, angle ):\n\n # Trig factors.\n ca = cos(angle)\n sa = sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = tuple( axis )\n\n # Multiplications (to remove duplicate calculations).\n xs = x*sa\n ys = y*sa\n zs = z*sa\n xC = x*C\n yC = y*C\n zC = z*C\n xyC = x*yC\n yzC = y*zC\n zxC = z*xC\n\n # Update the rotation matrix.\n matrix \t = np.zeros( (3,3) )\n matrix[0, 0] = x*xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y*yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z*zC + ca\n return matrix", "def matrix_rotate_3d_y(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_y = -deg * pi/180\n c_y = cos(rad_y)\n s_y = sin(rad_y)\n return np.matrix([[c_y, 0, s_y], [0, 1, 0], [-s_y, 0, c_y]])", "def xform_Z_rot( self , thetaZrad ):\r\n self.xform_homog( homogeneous_Z( thetaZrad , [ 0 , 0 , 0 ] ) )", "def rotateZMatrix(self, radians):\n\n c = np.cos(radians)\n s = np.sin(radians)\n return np.array([[c,-s, 0, 0],\n [s, c, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])", "def rotation_matrix(self, rotation, rotation_order=\"zyx\"):\n x = math.radians(rotation[0])\n y = math.radians(rotation[1])\n z = math.radians(rotation[2])\n\n cos = math.cos\n sin = math.sin\n if rotation_order == 'zyx':\n index_0 = cos(y) * cos(z)\n index_1 = cos(z) * sin(x) * sin(y) - cos(x) * sin(z)\n index_2 = cos(x) * cos(z) * sin(y) + sin(x) * sin(z)\n\n index_3 = cos(y) * sin(z)\n index_4 = cos(x) * cos(z) + sin(x) * sin(y) * sin(z)\n index_5 = -cos(z) * sin(x) + cos(x) * sin(y) * sin(z)\n\n index_6 = -sin(y)\n index_7 = -cos(y) * sin(x)\n index_8 = cos(x) * cos(y)\n elif rotation_order == 'xyz':\n index_0 = cos(y) * cos(z)\n index_1 = -cos(z) * sin(z)\n index_2 = sin(y)\n\n index_3 = cos(x) * sin(z) + sin(x) * sin(y) * cos(z)\n index_4 = cos(x) * cos(z) - sin(x) * sin(y) * sin(z)\n index_5 = -sin(x) * cos(y)\n\n index_6 = sin(x) * sin(z) - cos(x) * sin(y) * cos(z)\n index_7 = sin(x) * cos(z) + cos(x) * sin(y) * sin(z)\n index_8 = cos(x) * cos(y)\n\n rot_mat = ((index_0, index_1, index_2),\n (index_3, index_4, index_5),\n (index_6, index_7, index_8))\n\n return rot_mat", "def test_z_rot(self):\n\n # Create a Matrix representing 90 deg z rot.\n mat = Matrix44.from_rot_z(90)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure the quat matches a 90 degree x rotation.\n expected = Quat.from_axis_angle_deg(Vec3(0, 0, 1), 90)\n AssertQuatAlmostEqual(quat, expected, self)", "def transform3D_rpy(\n x: float, y: float, z: float, roll: float, pitch: float, yaw: float\n) -> np.array:\n sr = np.sin(roll)\n cr = np.cos(roll)\n sp = np.sin(pitch)\n cp = np.cos(pitch)\n sy = np.sin(yaw)\n cy = np.cos(yaw)\n return np.array(\n [\n [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr, x],\n [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr, y],\n [-sp, cp * sr, cp * cr, z],\n [0, 0, 0, 1.0],\n ]\n )", "def rotation3D_rpy(roll: float, pitch: float, yaw: float) -> np.array:\n\n sr = np.sin(roll)\n cr = np.cos(roll)\n sp = np.sin(pitch)\n cp = np.cos(pitch)\n sy = np.sin(yaw)\n cy = np.cos(yaw)\n return np.array(\n [\n [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr],\n [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr],\n [-sp, cp * sr, cp * cr],\n ]\n )", "def rotation3D_x(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]])", "def quaternion2rot3D(quaternion):\n theta, axis = quaternion2AngleAxis(quaternion)\n return angleAxis2rot3D(axis, theta)", "def rotation_matrix_xyz(axis, angle, angle_dim):\n assert angle_dim is \"deg\" or angle_dim is \"rad\"\n assert axis is \"x\" or axis is \"y\" or axis is \"z\"\n x = 0\n y = 0\n z = 0\n\n if angle_dim is \"deg\":\n a = np.deg2rad(angle)\n else:\n a = angle\n\n if axis is \"x\":\n x = 1\n y = 0\n z = 0\n if axis is \"y\":\n x = 0\n y = 1\n z = 0\n if axis is \"z\":\n x = 0\n y = 0\n z = 1\n\n s = np.sin(a)\n c = np.cos(a)\n rotation_matrix = np.array([[c + x ** 2 * (1 - c), x * y * (1 - c) - z * s, x * z * (1 - c) + y * s],\n [y * x * (1 - c) + z * s, c + y ** 2 * (1 - c), y * z * (1 - c) - x * s],\n [z * x * (1 - c) - y * s, z * y * (1 - c) + x * s, c + z ** 2 * (1 - c)]])\n\n return rotation_matrix", "def vrrotvec2mat(r):\n s = np.sin(r[3])\n c = np.cos(r[3])\n t = 1 - c\n \n n = normalize(r[0:3])\n \n x = n[0]\n y = n[1]\n z = n[2]\n \n m = np.array(\n [[t*x*x + c, t*x*y - s*z, t*x*z + s*y],\n [t*x*y + s*z, t*y*y + c, t*y*z - s*x],\n [t*x*z - s*y, t*y*z + s*x, t*z*z + c]]\n )\n return m", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def rotation_matrix(dx, dy, dz, roll=0):\n l = (dx**2 + dy**2 + dz**2)**0.5\n rx, ry, rz = dx / l, dy / l, dz / l\n c, s = cos(-roll), sin(-roll)\n\n if rx == 0 and rz == 0:\n r = [[0, ry, 0], [-ry*c, 0, s], [ry*s, 0, c]]\n else:\n rxz = (rx**2 + rz**2)**0.5\n r = [[rx, ry, rz],\n [(-rx*ry*c - rz*s)/rxz, rxz*c, (-ry*rz*c + rx*s)/rxz],\n [(rx*ry*s - rz*c)/rxz, -rxz*s, (ry*rz*s + rx*c)/rxz]]\n\n return np.array(r, dtype='float')", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def _r_z(angle: tf.Tensor) -> tf.Tensor:\n zero = tf.constant(0, dtype=tf.float64)\n exponent = tf.complex(zero, angle)\n exp = tf.exp(exponent)\n zero_complex = tf.complex(zero, zero)\n one_complex = tf.complex(tf.constant(1, dtype=tf.float64), zero)\n rz = tf.stack([[one_complex, zero_complex], [zero_complex, exp]])\n\n return rz", "def rotate_z(self, angle: float):\n self.vertices = list(\n Matrix44.z_rotate(angle).transform_vertices(self.vertices)\n )\n return self", "def rotation_to_transformation_matrix(R):\n R = Matrix(R)\n T = R.col_insert(3, Matrix([0., 0., 0.]))\n T = T.row_insert(3, Matrix([[0., 0., 0., 1.]]))\n return T", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def random_rotation_matrix(strength=None, dtype=None):\n if strength is None:\n strength = 1.0\n\n if dtype is None:\n dtype = np.float32\n\n x = np.random.rand(3)\n theta = x[0] * 2 * np.pi * strength\n phi = x[1] * 2 * np.pi\n z = x[2] * strength\n\n r = np.sqrt(z)\n V = np.array([np.sin(phi) * r, np.cos(phi) * r, np.sqrt(2.0 - z)])\n\n st = np.sin(theta)\n ct = np.cos(theta)\n\n Rz = np.array([[ct, st, 0], [-st, ct, 0], [0, 0, 1]])\n\n rand_R = (np.outer(V, V) - np.eye(3)).dot(Rz)\n return rand_R.astype(dtype)", "def build_rotation_matrix(azim, plng, rake):\n # pylint: disable=bad-whitespace\n azim, plng, rake = radians(azim), radians(plng), radians(rake)\n\n R1 = np.array((( cos(rake), 0., sin(rake)),\n ( 0., 1., 0. ),\n (-sin(rake), 0., cos(rake))))\n\n R2 = np.array((( 1., 0., 0. ),\n ( 0., cos(plng), sin(plng)),\n ( 0., -sin(plng), cos(plng))))\n\n R3 = np.array((( cos(azim), sin(azim), 0. ),\n (-sin(azim), cos(azim), 0. ),\n ( 0., 0., 1. )))\n\n return R3.dot(R2).dot(R1)", "def from_rotation_mat(rot: np.ndarray) -> Quaternion:\n if rot.shape != (3, 3):\n raise TypeError('input rot should be a 3x3 matrix')\n\n t = rot.trace()\n if t > 0:\n t = np.sqrt(t + 1.0)\n w = 0.5 * t\n t = 0.5 / t\n x = (rot[2, 1] - rot[1, 2]) * t\n y = (rot[0, 2] - rot[2, 0]) * t\n z = (rot[1, 0] - rot[0, 1]) * t\n return Quaternion(w, np.array([x, y, z]))\n else:\n i = 0\n if rot[1, 1] > rot[0, 0]:\n i = 1\n if rot[2, 2] > rot[i, i]:\n i = 2\n j = (i + 1) % 3\n k = (j + 1) % 3\n\n data = np.zeros(4) # quaternion item [x, y, z, w]\n t = np.sqrt(rot[i, i] - rot[j, j] - rot[k, k] + 1.0)\n data[i] = 0.5 * t\n t = 0.5 / t\n data[-1] = (rot[k, j] - rot[j, k]) * t # w\n data[j] = (rot[j, i] + rot[i, j]) * t\n data[k] = (rot[k, i] + rot[i, k]) * t\n return Quaternion(data[-1], data[:3])", "def rotateZ(self, point):\n\n rad = self.offset_rotation.z * math.pi / 180\n\n cosa = math.cos(rad)\n sina = math.sin(rad)\n\n x = point.x * cosa - point.y * sina\n y = point.x * sina + point.y * cosa\n\n return OpenMaya.MVector(x, y, point.z)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def Rot_to_quaternion(r: array):\n\n # Compute the trace of the rotation matrix\n tr = r[0, 0] + r[1, 1] + r[2, 2]\n\n if tr > 0:\n S = sqrt(tr + 1.0) * 2\n qw = 0.25 * S\n qx = (r[2, 1] - r[1, 2]) / S\n qy = (r[0, 2] - r[2, 0]) / S\n qz = (r[1, 0] - r[0, 1]) / S\n elif (r[0, 0] > r[1, 1]) and (r[0, 0] > r[2, 2]):\n S = sqrt(1.0 + r[0, 0] - r[1, 1] - r[2, 2]) * 2\n qw = (r[2, 1] - r[1, 2]) / S\n qx = 0.25 * S\n qy = (r[0, 1] + r[1, 0]) / S\n qz = (r[0, 2] + r[2, 0]) / S\n elif r[1, 1] > r[2, 2]:\n S = sqrt(1.0 + r[1, 1] - r[0, 0] - r[2, 2]) * 2\n qw = (r[0, 2] - r[2, 0]) / S\n qx = (r[0, 1] + r[1, 0]) / S\n qy = 0.25 * S\n qz = (r[1, 2] + r[2, 1]) / S\n else:\n S = sqrt(1.0 + r[2, 2] - r[0, 0] - r[1, 1]) * 2\n qw = (r[1, 0] - r[0, 1]) / S\n qx = (r[0, 2] + r[2, 0]) / S\n qy = (r[1, 2] + r[2, 1]) / S\n qz = 0.25 * S\n\n q = array([qw, qx, qy, qz])\n q = q * sign(qw)\n\n return q", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation_matrix(theta=0, phi=0, psi=0, units='deg'):\n\n rpy = Rpy(theta,units)\n rmx = Rmx(phi, units)\n rpz = Rpz(psi, units)\n\n return np.matmul(rpy, np.matmul(rmx, rpz))", "def z_rotation(self):\n before = ('R', 'r', 'U', 'u', 'L', 'l', 'D', 'd', 'M', 'E', 'x', 'y')\n after = ('U', 'u', 'L', 'l', 'D', 'd', 'R', 'r', 'E', 'M\\'', \"y\", \"x'\")\n solve = self.solve_helper.maketrans(dict(zip(before, after)))\n solve_trans = self.solve_helper.translate(solve)\n solve_trans = solve_trans.replace(\"\\'\\'\", \"\")\n self.solve_helper = solve_trans", "def rotate_3D(image, angle, axes=(1, 2)):\n rotated_image = scipy.ndimage.interpolation.rotate(\n image, angle, axes, reshape=False)\n return rotated_image", "def rotateZ(self, *args, **kwargs):\n ...", "def rotateZ(self, theta):\n rot_m = numpy.array([[numpy.sin(theta), numpy.cos(theta)], \\\n [numpy.cos(theta), -numpy.sin(theta)]])\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n xy = numpy.dot([nrn.x3d(i), nrn.y3d(i)], rot_m)\n nrn.pt3dchange(i, float(xy[0]), float(xy[1]), nrn.z3d(i), \\\n nrn.diam3d(i))", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def quaternion_from_axis_angle(x, y, z, theta):\n if x == y == z == 0:\n return np.array([1, 0, 0, 0])\n axis = np.array([x, y, z])\n axis /= np.linalg.norm(axis)\n return rowan.from_axis_angle(axis, theta)", "def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:\n\n # First row of the rotation matrix\n r00 = 2 * (q0 * q0 + q1 * q1) - 1\n r01 = 2 * (q1 * q2 - q0 * q3)\n r02 = 2 * (q1 * q3 + q0 * q2)\n\n # Second row of the rotation matrix\n r10 = 2 * (q1 * q2 + q0 * q3)\n r11 = 2 * (q0 * q0 + q2 * q2) - 1\n r12 = 2 * (q2 * q3 - q0 * q1)\n\n # Third row of the rotation matrix\n r20 = 2 * (q1 * q3 - q0 * q2)\n r21 = 2 * (q2 * q3 + q0 * q1)\n r22 = 2 * (q0 * q0 + q3 * q3) - 1\n\n # 3x3 rotation matrix\n rot_matrix = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]])\n\n return rot_matrix", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def rotatematrix(m, x, y ,z):\r\n for i in xrange(x):\r\n m = rotatem_x(m)\r\n for i in xrange(y):\r\n m = rotatem_y(m)\r\n for i in xrange(z):\r\n m = rotatem_z(m)\r\n return m", "def test_rotate_around_v3_z_axis(self):\n from pedemath.vec3 import rotate_around_vector_v3\n\n vec_a = Vec3(3, 4, 5)\n vec_b = Vec3(0, 0, 1)\n\n result = rotate_around_vector_v3(vec_a, math.pi, vec_b)\n expected = Vec3(-3, -4, 5)\n\n self.assertAlmostEqual(result.x, expected.x)\n self.assertAlmostEqual(result.y, expected.y)\n self.assertAlmostEqual(result.z, expected.z)", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def axis_angle_matrix3(unit, theta):\n x, y, z = unit\n c = math.cos(theta)\n s = math.sin(theta)\n C = 1 - c\n return np.matrix([\n [x * x * C + c, x * y * C - z * s, x * z * C + y * s],\n [y * x * C + z * s, y * y * C + c, y * z * C - x * s],\n [z * x * C - y * s, z * y * C + x * s, z * z * C + c],\n ])", "def test_x_y_and_z_rot(self):\n\n axis = Vec3(4, 5, 6)\n # Create a Matrix representing a rotation.\n mat = Matrix44.from_axis_angle_deg(axis, 45.0)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure it matches the expected quaternion.\n expected_quat = Quat.from_axis_angle_deg(axis, 45.0)\n self.assertAlmostEqual(quat.x, expected_quat.x)\n self.assertAlmostEqual(quat.y, expected_quat.y)\n self.assertAlmostEqual(quat.z, expected_quat.z)\n self.assertAlmostEqual(quat.w, expected_quat.w)", "def computeOrientation3D(object, P):\n\n # compute rotational matrix around yaw axis\n R = [[np.cos(object.ry), 0, np.sin(object.ry)],\n [0, 1, 0],\n [-np.sin(object.ry), 0, np.cos(object.ry)]]\n\n # orientation in object coordinate system\n orientation_3D = [[0.0, object.l],\n [0.0, 0.0],\n [0.0, 0.0]]\n\n # rotate and translate in camera coordinate system, project in image\n orientation_3D = R * orientation_3D\n orientation_3D[0, :] += object.t[0]\n orientation_3D[1, :] += object.t[1]\n orientation_3D[2, :] += object.t[2]\n\n # vector behind image plane?\n if any(orientation_3D[2, :] < 0.1):\n orientation_2D = []\n else:\n # project orientation into the image plane\n orientation_2D = projectToImage(orientation_3D, P)\n return orientation_2D" ]
[ "0.8186974", "0.802003", "0.7951418", "0.76159006", "0.75433534", "0.74739504", "0.7473712", "0.7445926", "0.7439543", "0.74234265", "0.736115", "0.7298137", "0.7291585", "0.72050965", "0.7194949", "0.7179259", "0.71736616", "0.71396255", "0.71220154", "0.71090776", "0.7098334", "0.70957506", "0.70957506", "0.70957506", "0.70957506", "0.7043044", "0.7041205", "0.70372653", "0.7017589", "0.7012499", "0.70114833", "0.6994724", "0.6955733", "0.69406945", "0.69359493", "0.69340175", "0.69178396", "0.6897936", "0.68899876", "0.68771327", "0.68705326", "0.68705326", "0.68705326", "0.68705326", "0.68597054", "0.68597054", "0.6858706", "0.6840831", "0.6821787", "0.6818175", "0.68052584", "0.6794145", "0.6784886", "0.6754727", "0.67538106", "0.6749553", "0.67403483", "0.6730162", "0.6725085", "0.6722591", "0.6699759", "0.6675953", "0.6664133", "0.6623622", "0.6615952", "0.65964437", "0.6581054", "0.65673494", "0.65635705", "0.654686", "0.654056", "0.64408416", "0.6429457", "0.64262134", "0.6397289", "0.6394158", "0.63919854", "0.6378807", "0.634105", "0.6325449", "0.63147277", "0.6311453", "0.6306528", "0.63040507", "0.62980586", "0.6296198", "0.6279878", "0.6273818", "0.6269175", "0.6262145", "0.6246256", "0.6240363", "0.622621", "0.62083226", "0.6193611", "0.617254", "0.6166948", "0.6166076", "0.61423105", "0.6140471" ]
0.84704345
0
Return angle between two vectors in R^3, in radians
def angle(x, y, deg=False): rad_angle = np.arccos(np.dot(x, y)/ (norm(x)*norm(y))) if deg: return rad_angle*(180.0/np.pi) else: return rad_angle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))", "def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)", "def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))", "def angle(vec1, vec2):\n\n return math.acos(dotproduct(vec1, vec2) / (length(vec1) * length(vec2)))", "def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))", "def angleBetweenVectors(v1, v2):\n v2Size = vectorLength(v2)\n if not v2Size:\n theta = 0.0\n else:\n theta = math.acos(dotProduct(v1, v2) / v2Size)\n return theta", "def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n #takes out if vectors are 1 or -1 (basically if they're the same direction)\n angle = math.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))\n return angle", "def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))", "def calc_angle(v1, v2, v3):\n v1 = v1 - v2\n v3 = v3 - v2\n return v1.angle(v3)", "def angle_between_vectors(vec1, vec2):\n vec = vec1 - vec2\n vec = vec.perpendicular()\n return vec.angle", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang", "def angle_between_vectors(vector1,vector2):\n value = np.sum(np.multiply(vector1, vector2)) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n if (value<-1) | (value>1):\n value = np.sign(value)\n angle = np.arccos(value)\n return angle", "def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle", "def compute_angle_v2v(v1, v2, v3=None):\n\n alpha = math.acos(dot_product(v1, v2) / (vlength(v1)*vlength(v2)))\n if v3 is not None:\n cross = cross_product(v2, v1)\n if dot_product(cross,v3) > 0.0:\n return 2*math.pi-alpha\n\n return alpha", "def angle_between(v1, v2):\n return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))", "def angle_to( self, vector3 ):\n # make sure neither vector is zero-length\n sm = self.magnitude\n vm = vector3.magnitude\n if abs(sm) < self.EPSILON or abs(vm) < self.EPSILON:\n raise ZeroDivisionError(\n \"can't calculate angle between zero-length vectors!\" )\n \n # calculation will fail if vectors have same heading\n # catch error and return zero\n try:\n return math.degrees( math.acos(self.dot(vector3) / (sm * vm)) )\n except ValueError:\n # test whether direction is same or opposite\n if Vector3( self ).add( vector3 ).magnitude < sm:\n return 180.0\n return 0.0", "def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))", "def AngleBetween(a, b):\n r = a.Length() * b.Length()\n if r < 1.0e-8:\n return BadVectorError()\n dot = (a.x*b.x + a.y*b.y + a.z*b.z) / r\n if dot <= -1.0:\n return 180.0\n if dot >= +1.0:\n return 0.0\n return math.degrees(math.acos(dot))", "def angle(vec1, vec2):\n assert vec1.shape == vec2.shape\n \n cos_vec = np.inner(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))\n angle = math.acos(cos_vec)\n in_deg = math.degrees(angle)\n if in_deg >= 90:\n return (180-in_deg)\n return in_deg", "def angle(first, other=FreeCAD.Vector(1,0,0)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return math.acos(dotproduct(normalized(first),normalized(other)))", "def angle_between(a, b):\n from math import acos\n return acos( dot_product(a, b) / (magnitude(a) * magnitude(b)) )", "def angleBetween(v1, v2):\n v1_u = unitVector(v1)\n v2_u = unitVector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def cal_angle_between_two_vectors(vec_1, vec_2):\n unit_vec_1 = vec_1 / np.linalg.norm(vec_1)\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n dot_product = np.dot(unit_vec_1, unit_vec_2)\n \n return np.arccos(dot_product) / np.pi * 180", "def angle_between(v1: Vec2, v2: Vec2):\n v = dir_vector(v1, v2)\n a = atan2(v.y, v.x)\n if a < 0:\n a = 2 * pi + a\n return a", "def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle", "def getAngle(v1,v2,prec=1E-6):\n \n return(math.acos((np.dot(v1,v2))/np.linalg.norm(v1)/np.linalg.norm(v2)))", "def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))", "def vector_angle_finder(vect_1, vect_2):\n theta = np.arccos(np.dot(vect_1, vect_2) / (magnitude_vect(vect_1) * magnitude_vect(vect_2)))\n angle = theta * 180 / math.pi\n return angle", "def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest", "def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def angle_between_vectors(self, u, v):\n vec1_unit = self.get_unit_vector(u)\n vec2_unit = self.get_unit_vector(v)\n return np.arccos(np.clip(np.dot(vec1_unit, vec2_unit), -1.0, 1.0)) * (180/math.pi)", "def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0", "def angle_btw(v1, v2):\n cos_ang = np.dot(v1, v2)\n sin_ang = np.linalg.norm(np.cross(v1, v2))\n return np.arctan2(sin_ang, cos_ang) * 180 / math.pi", "def calcul_angle_vector(vec1, vec2):\n \n try:\n div=(vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n if div>1:\n div=1\n if div<-1:\n div=-1\n #KC#CG# tranlation to degrees\n angle=180/math.pi*math.acos(div)\n except:\n print vec1\n print vec2\n print (vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n return angle", "def angle(self, vector):\n\n return (math.degrees(math.acos((self.dot(vector) / (self.magnitude() *\n vector.magnitude())))))", "def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)", "def angle(p1, p2):\n return dot(p1, p2)", "def angle_between_vectors(u, v):\r\n mag_u = math.sqrt(u[0]**2 + u[1]**2 + u[2]**2)\r\n mag_v = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)\r\n dot_prod = u[0] * v[0] + u[1] * v[1] + u[2] * v[2]\r\n return math.acos(dot_prod/(mag_u*mag_v))", "def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)", "def angle_between(v1, v2):\n v = np.array(v1)\n w = np.array(v2)\n\n norm_v = norm(v)\n norm_w = norm(w)\n\n cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)\n\n if not -1 <= cos_angle <= 1:\n return None\n else:\n return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)", "def get_angle_between_vectors(self, A, B):\n\t\tdot_prod = A[0]*B[0] + A[1]*B[1]\n\t\tlen_A = math.sqrt(A[0]**2 + A[1]**2)\n\t\tlen_B = math.sqrt(B[0]**2 + B[1]**2)\n\n\t\treturn math.acos(dot_prod / (len_A + len_B))", "def angle_between_vectors_degrees(u, v):\n a = np.dot(u, v)\n b = np.linalg.norm(u)\n c = np.linalg.norm(v)\n d = a / (b* c)\n if d > 1:\n d = 1\n if d < -1:\n d = -1\n e = acos(d)\n f = np.degrees(e)\n return f", "def angle_between(v2, v1):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n result = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n if np.isnan(result):\n if abs(v1_u + v2_u) < .5 * (abs(v1_u) + abs(v2_u)):\n return np.pi\n else:\n return 0.0\n if Left( [v2[1],v2[3]], [0,0], [v1[1],v1[3]] ):\n return 2*np.pi - result\n return result", "def angle_between_vectors_degrees(u, v):\n return np.degrees(\n math.acos(np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))))", "def angle_between_vectors(vect_ref, vect):\n\n c = np.dot(vect_ref.T, vect) / (np.linalg.norm(vect_ref) * np.linalg.norm(vect))\n angle = np.arccos(np.clip(c, -1, 1))\n\n return angle", "def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)", "def get_angle(a, b, c):\n\n ba = a - b\n cb = c - b\n\n ba_mod = mod(ba)\n cb_mod = mod(cb)\n val = dot(ba, cb) / (ba_mod * cb_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n\n return np.arccos(val)", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])", "def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle", "def angle_between(i1, j1, i2, j2):\n\n dot_product = i1 * i2 + j1 * j2\n magnitude1 = np.sqrt(i1 ** 2 + j1 ** 2)\n magnitude2 = np.sqrt(i2 ** 2 + j2 ** 2)\n\n theta = np.arccos(dot_product / (magnitude1 * magnitude2))\n\n return np.rad2deg(theta).round(3)", "def _angle(*vectors):\n if len(vectors) == 1:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[0][1], vectors[0][0]))\n elif len(vectors) == 2:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[1][1], vectors[1][0]) - np.arctan2(vectors[0][1], vectors[0][0]))\n else:\n raise AttributeError()", "def angle_between(vec1, vec2, radian=True):\n cos = np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2)\n angle = np.arccos(np.clip(cos, -1, 1))\n if not radian:\n angle = angle / np.pi * 180\n return angle", "def get_vec_angle(vec1: List, vec2: List) -> Union[float, None]:\n if np.linalg.norm(np.array(vec1)) == 0 or np.linalg.norm(np.array(vec2)) == 0:\n warnings.warn(\"Do not input 0 vector\")\n return\n\n diff_degree = np.dot(np.array(vec1), np.array(vec2))\n diff_degree /= np.linalg.norm(np.array(vec1))\n diff_degree /= np.linalg.norm(np.array(vec2))\n diff_degree = np.clip(diff_degree, -1, 1)\n diff_degree = np.arccos(diff_degree) * 180 / np.pi\n return diff_degree", "def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None", "def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def angle_2D(v):\n len_v=(v[0]**2+v[1]**2)**(0.5)\n if len_v==0:\n return 0\n ret = math.acos(v[0]/len_v)\n if v[1]<0:\n ret=6.283185307179586-ret\n return ret", "def compute_angle_in_rad(location1, location2):\n return np.arctan2(location1[0] - location2[0], location1[1] - location2[1])", "def angle(pt_a, pt_b):\n x1, y1 = pt_a\n x2, y2 = pt_b\n return atan2(y2-y1, x2-x1)", "def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi", "def angle(p1, p2):\n x_dist = p2[0] - p1[0]\n y_dist = p2[1] - p1[1]\n return math.atan2(-y_dist, x_dist) % (2 * math.pi)", "def angle(self):\n self._normalise()\n norm = np.linalg.norm(self.vector)\n return self._wrap_angle(2.0 * atan2(norm,self.scalar))", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def angle(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n from math import acos\n return acos(self.dot(vec2) / (self.magnitude() * vec2.magnitude()))", "def getangle(p1, p2):\n\treturn atan2( p2[1]-p1[1], p2[0]-p1[0] )", "def angle_between(x1: float, y1: float, x2: float, y2: float) -> float:\n dx = x2 - x1\n dy = y2 - y1\n\n # We return negative because pyglet and math treat rotation differently\n return -math.atan2(dy, dx)", "def get_angle_rad_between_joints(joint_a: Joint2D, joint_b: Joint2D) -> float:\n return math.atan2(joint_a.y - joint_b.y, joint_a.x - joint_b.x)", "def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)", "def get_angle(vert1, vert2):\n x_axis = np.array([1, 0])\n input_axis = vert2 - vert1\n input_axis = input_axis / np.linalg.norm(input_axis)\n return math.degrees(np.arccos(np.dot(x_axis, input_axis)))", "def _get_angle(point1, point2):\n ydelta = point2[0] - point1[0]\n xdelta = point2[1] - point1[1]\n if xdelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arcsin(ydelta / hypot)\n elif ydelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arccos(xdelta / hypot)\n else:\n theta = np.arctan(ydelta / xdelta)\n return theta", "def getAngle(A, B, C):\n if A * B == 0:\n return 180\n else:\n return degrees(acos((A * A + B * B - C * C)/(2.0 * A * B)))", "def get_angle(a, b, c):\n\n # Law of cosines:\n # C = acos((a^2 + b^2 - c^2) / (2ab))\n return math.acos((a * a + b * b - c * c) / (2 * a * b))", "def compute_angle(a: [float], b: [float], c: [float]) -> float:\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(np.abs(cosine_angle))\n\n return angle", "def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))", "def angle(v,w):\n cosx = dot_product(v,w) / (length(v) * length(w))\n #det = determinant(A,B)\n rad = math.acos(cosx) # in radians\n return rad\n #return rad*180/math.pi # returns degrees", "def test_vectors_angle2(self):\n\n # Example 1.4\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle2_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle2_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def angle_between_ll(ll1, ll2, ll3):\n return angle_between_points(ll2xy(*ll1)[0:2], ll2xy(*ll2)[0:2], ll2xy(*ll3)[0:2])", "def calc_angle(atom_R, env_R_arr1, env_R_arr2):\n assert atom_R not in env_R_arr1\n assert atom_R not in env_R_arr2\n atom_env1 = env_R_arr1 - atom_R # (num_env1, 3)\n atom_env2 = env_R_arr2 - atom_R # (num_env2, 3))\n norm1 = np.linalg.norm(atom_env1, axis=1)\n norm2 = np.linalg.norm(atom_env2, axis=1)\n cosine = atom_env1.dot(atom_env2.T) / np.outer(norm1, norm2)\n cosine[cosine > 1.0] = 1.0\n return np.arccos(cosine)", "def get_radians(g1,g2):\n unit_vector_1 = g1 / np.linalg.norm(g1) if np.linalg.norm(g1) != 0 else 0\n unit_vector_2 = g2 / np.linalg.norm(g2) if np.linalg.norm(g2) != 0 else 0\n dot_product = np.dot(unit_vector_1, unit_vector_2)\n radians = np.arccos(dot_product)\n return radians", "def get_vector(a, b):\n dx = float(b[0] - a[0])\n dy = float(b[1] - a[1])\n\n distance = math.sqrt(dx ** 2 + dy ** 2)\n\n if dy > 0:\n angle = math.degrees(math.atan(-dx / dy))\n elif dy == 0:\n if dx < 0:\n angle = 90.0\n elif dx > 0:\n angle = -90.0\n else:\n angle = 0.0\n else:\n if dx < 0:\n angle = 180 - math.degrees(math.atan(dx / dy))\n elif dx > 0:\n angle = -180 - math.degrees(math.atan(dx / dy))\n else:\n angle = 180.0\n\n return distance, angle", "def angle(self, other):\n return acosd(self.normalized().dot(other.normalized()))", "def angle(a: Point, b: Point) -> int:\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang", "def py_ang(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def angle(v1,v2, deg = False):\n # v1.v2 = ||v1||||v2|| cos(angle) => angle = arcos(v1.v2/||v1||||v2||)\n # see more: http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n # tested with http://codereview.stackexchange.com/a/54413\n if deg: return np.rad2deg(np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))) # *180.0/np.pi\n return np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))", "def calcul_angle(point1, point2, point3):\n \n x1,y1,z1=point1\n x2,y2,z2=point2\n x3,y3,z3=point3\n \n vec1=[x1-x2, y1-y2, z1-z2]\n vec2=[x3-x2, y3-y2, z3-z2]\n\n return calcul_angle_vector(vec1, vec2)", "def angle(p1, p2):\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n if dx == 0:\n if dy == 0:\n return 0\n return 90\n alpha = math.atan(dy / dx) * 180 / math.pi\n if alpha < 0:\n alpha = 180 - alpha\n return alpha", "def angle_difference(ฮธ1, ฮธ2):\n ordinary_diff = (ฮธ2 - ฮธ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))", "def angle(self, factor):\n n1 = self.getNormalizedVector()\n n2 = factor.getNormalizedVector()\n\n # Determine angle between the two vectors.\n cos_angle = n1.scalarProduct(n2)\n angle = np.arccos(cos_angle)\n # Edoardo: numpy.arccos() always returns an angle in radians in [0, pi].\n\n # Mark's version:\n # By convention always return the smaller angle.\n # while angle > 2.0 * np.pi:\n # angle -= 2.0 * np.pi\n\n # if angle > np.pi:\n # angle = 2.0 * np.pi - angle\n\n return angle", "def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)", "def angle(p0, p1, prv_ang=0):\r\n ang = math.atan2(p0[1] - p1[1], p0[0] - p1[0])\r\n a0 = (ang - prv_ang)\r\n a0 = a0 % (PI * 2) - PI\r\n return a0", "def angle_diff(a1, a2):\n a = a1 - a2\n if abs(a) > 180:\n return np.sign(a)*360 - a\n else:\n return a", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def addVectors(r1, r2):\n \"\"\" [0] = angle, [1] = lenght \"\"\"\n x = (math.sin(r1[0]) * r1[1]) + (math.sin(r2[0]) * r2[1])\n y = (math.cos(r1[0]) * r1[1]) + (math.cos(r2[0]) * r2[1])\n \n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n\n return (angle, length)", "def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))", "def angle(l, m, n):\n q = round(m ** 2 + n ** 2 - l ** 2, 2)\n r = round(2 * m * n, 2)\n return math.acos(q / r)", "def calculate_angle(start: tuple, end: tuple):\n radians = -math.atan2(end[0] - start[0], end[1] - start[1])\n return math.degrees(radians) % 360" ]
[ "0.84158295", "0.8327667", "0.82317805", "0.816077", "0.8124094", "0.8080465", "0.8073348", "0.80499214", "0.791963", "0.7917364", "0.78978354", "0.78865665", "0.7873176", "0.7855662", "0.7855398", "0.7853263", "0.7845532", "0.7814118", "0.7813398", "0.778545", "0.7780554", "0.7772393", "0.7769316", "0.77676886", "0.775649", "0.771552", "0.7697259", "0.76880145", "0.766387", "0.7650838", "0.7641763", "0.76177347", "0.76161575", "0.7603833", "0.76037264", "0.7590118", "0.7571487", "0.75633657", "0.7547546", "0.75428915", "0.7514203", "0.7512505", "0.7509353", "0.7497432", "0.74759096", "0.7472227", "0.7444574", "0.7444017", "0.7424131", "0.74238515", "0.74225825", "0.73774314", "0.73574966", "0.7356699", "0.73530006", "0.73436284", "0.73425555", "0.73307174", "0.7327405", "0.7312936", "0.72820634", "0.7274318", "0.7269661", "0.7251229", "0.7249034", "0.724089", "0.72348595", "0.72172314", "0.7214055", "0.7207519", "0.7197259", "0.71956354", "0.71932346", "0.7192433", "0.7191406", "0.7156319", "0.7128191", "0.71113265", "0.7099764", "0.7091059", "0.7090092", "0.70861614", "0.70724046", "0.70707226", "0.7060118", "0.7053714", "0.70530367", "0.7027836", "0.70258236", "0.7023089", "0.70219874", "0.7020485", "0.70110387", "0.7009842", "0.70080763", "0.6988435", "0.6986896", "0.69638497", "0.6962801", "0.69305605", "0.69192934" ]
0.0
-1
Compute the geodesic distance on the sphere for two points. The points are assumed to lie on the surface of the same sphere.
def spherical_distances(x, y): # Compute the norms of all points, we do NOT check they actually all lie on # the same sphere (that's the caller's responsibility). xn = np.sqrt((x**2).sum(axis=1)) yn = np.sqrt((y**2).sum(axis=1)) ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :]) # Protect against numerical noise giving us cosine values outside the -1,1 # range, where arccos would return nans. ang_cos = np.clip(ang_cos, -1, 1) return xn[:, None]*np.arccos(ang_cos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance_sphere(self, other):\n if not self.crs == getattr(other, \"crs\", \"EPSG:4326\") == \"EPSG:4326\":\n raise ValueError(\"Only can calculate spherical distance with 'EPSG:4326' crs.\")\n return _binary_op(arctern.ST_DistanceSphere, self, other)", "def distance_on_sphere(lat1, long1, lat2, long2):\n degrees_to_radians = math.pi/180.0\n \n # phi = 90 - latitude\n phi1 = (90.0 - float(lat1))*degrees_to_radians\n phi2 = (90.0 - float(lat2))*degrees_to_radians\n \n # theta = longitude\n theta1 = float(long1)*degrees_to_radians\n theta2 = float(long2)*degrees_to_radians\n \n # Compute spherical distance from spherical coordinates.\n \n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta', phi')\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n \n cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +\n math.cos(phi1)*math.cos(phi2))\n arc = math.acos( cos )\n \n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n return round(arc * 6373 / 10 * 60)", "def spherical_distance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6373 * c\n km = '%d' % km\n return float(km)", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def geodesic_distance(coord1, coord2):\n # convert coordinates to radians\n s = math.pi * np.squeeze(np.array(coord1)) / 180\n f = math.pi * np.squeeze(np.array(coord2)) / 180\n\n delta = (f - s)/2\n t = math.cos(f[0]) * math.cos(s[0]) * math.sin(delta[1])**2 + math.sin(delta[0])**2\n\n return earth_radius() * 2 * math.atan2(t**(1/2),(1-t)**(1/2))", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def distance(lat1, lon1, lat2, lon2):\r\n radius = 6373 * 1000\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2))**2\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n return radius * c", "def distance(lat1, lon1, lat2, lon2):\r\n earth_radius=3959.0 #miles\r\n if lat1==lat2 and lon1==lon2:\r\n dst=0\r\n else:\r\n dst = acos(\r\n (sin(radians(lat1)) * sin(radians(lat2))) +\r\n (cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(lon1) - radians(lon2)))\r\n ) * earth_radius\r\n return dst", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def gpx_distance(lat1, lon1, lat2, lon2):\n theta = lon1 - lon2\n rads = sin(radians(lat1)) * sin(radians(lat2)) + cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(theta))\n\n # make sure rads is [-1, 1]\n rads = 1 if rads > 1 else rads\n rads = -1 if rads < -1 else rads\n\n rads = acos(rads)\n\n # multiply by radius of the earth to get distance\n return rads * 6367", "def dist_between_spheres(r1, r2, Y, C):\n h = C - Y\n \n d1 = np.sqrt(r1**2 - h**2)\n d2 = np.sqrt(r2**2 - h**2)\n\n dist = r1 - d1 + r2 - d2\n \n return dist", "def get_spherical_distance(lat1,lat2,long1,long2):\n lat1,lat2,long1,long2= float(lat1),float(lat2),float(long1),float(long2)\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def calculate_distance(point1, point2):\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def getDistance(point1, point2):\n\n \"\"\"Convert in radians\"\"\"\n lat1 = radians(point1.getLatitude())\n lon1 = radians(point1.getLongitude())\n lat2 = radians(point2.getLatitude())\n lon2 = radians(point2.getLongitude())\n d_lon = lon2 - lon1\n d_lat = lat2 - lat1\n\n \"\"\"Approximate radius of earth in km\"\"\"\n R = 6373.0\n\n \"\"\"Apply the formula\"\"\"\n a = sin(d_lat / 2)**2 + cos(lat1) * cos(lat2) * sin(d_lon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n \"\"\"Get the distance between point1 and point2\"\"\"\n distance = R * c\n\n return distance", "def distance(latitude_1: float, longitude_1: float, latitude_2: float, longitude_2: float) -> float:\n lat1, lon1, lat2, lon2 = map(radians, (latitude_1, longitude_1, latitude_2, longitude_2))\n return (\n 2\n * EARTH_RADIUS\n * asin(\n sqrt(\n sin((lat2 - lat1) / 2) ** 2 + cos(lat1) * cos(lat2) * (sin((lon2 - lon1) / 2) ** 2)\n )\n )\n )", "def get_spherical_distance(lat1,lat2,long1,long2):\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def geodesicDistance(A, B = geolocate(\"Colosseo\")):\n # colosseo = (41.890183, 12.492369)\n return geopy.distance.vincenty(A, B).meters", "def distance(lat1, lon1, lat2, lon2):\n lon1, lat1 = math.radians(lon1), math.radians(lat1)\n lon2, lat2 = math.radians(lon2), math.radians(lat2)\n a = (math.sin((lat2 - lat1) / 2) ** 2 +\n math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = 6371000 * c\n\n return d", "def distance(self, coord1, coord2):\n sinsin_lat = coord1.lat.sin() * coord2.lat.sin()\n coscos_lat = coord1.lat.cos() * coord2.lat.cos()\n cos_deltalong = coord1.delta_long(coord2).cos()\n\n angle = AngleDeg().acos(sinsin_lat + coscos_lat * cos_deltalong)\n\n return angle.dist_from_radius(EARTH_RADIUS)", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def geo_distance(lat1,lon1,lat2,lon2):\n \n # radius of earth in km\n R=6373.0\n\n # pi\n pi=math.pi\n\n lat1=math.radians(lat1)\n lat2=math.radians(lat2)\n lon1=math.radians(lon1)\n lon2=math.radians(lon2)\n\n dlon=lon2 - lon1\n dlat=lat2 - lat1\n\n a=sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c=2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance=R * c\n\n tc1=atan2(sin(lon2-lon1)*cos(lat2),\n cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(lon2-lon1))\n\n tc1=tc1 % (2*pi)\n\n bearing=math.degrees(tc1)\n\n return [distance,bearing]", "def get_distance(lat1, lon1, lat2, lon2) -> float:\n # Earth radius in meters\n radius = 6371000\n\n # Degress to radian\n lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])\n\n # Deltas\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n\n # Calculate distance\n arch = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n arch_sin = 2 * np.arcsin(np.sqrt(arch))\n\n return radius * arch_sin", "def distance(self, coord1, coord2):\n return (abs(coord1.x - coord2.x) + abs(coord1.y - coord2.y) + abs(coord1.z - coord2.z))//2", "def get_distance(lat1, lon1, lat2, lon2):\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n d_phi = math.radians(lat2 - lat1)\n d_lam = math.radians(lon2 - lon1)\n a = math.sin(d_phi/2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lam/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return 6371000 * c", "def get_distance(p1, p2):\n\n deg_rad = math.pi / 180\n\n dphi = p1[1] - p2[1]\n phim = 0.5 * (p1[1] + p2[1])\n dlam = p1[0] - p2[0]\n\n k1 = (111.13209 - 0.56605 * math.cos(2 * phim * deg_rad) + 0.00120 * \n math.cos(4 * phim * deg_rad))\n k2 = (111.41513 * math.cos(phim * deg_rad) - 0.09455 * \n math.cos(3 *phim * deg_rad) + 0.0012 * math.cos(5 * phim * deg_rad))\n\n return numpy.sqrt(k1**2 * dphi**2 + k2**2 * dlam**2)", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def distance_between(lat_1, lon_1, lat_2, lon_2):\n lat_1, lon_1 = math.radians(lat_1), math.radians(lon_1)\n lat_2, lon_2 = math.radians(lat_2), math.radians(lon_2)\n theta = lon_1 - lon_2\n dist = math.sin(lat_1)*math.sin(lat_2) + math.cos(lat_1)*math.cos(lat_2)*math.cos(theta)\n dist = math.acos(dist)\n dist = math.degrees(dist)\n dist = dist * 69.06 # 69.09 = circumference of earth in miles / 360 degrees\n return dist", "def dist_sf( lon1, lon2, lat1, lat2 ):\n\n subfalla_i = (lon1, lat1)\n subfalla_j = (lon2, lat2)\n distancia = distance.distance( subfalla_i, subfalla_j ).meters\n\n return distancia", "def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def earth_distance(lat1: float, lon1: float, lat2: float, lon2: float)\\\n -> float:\n # R = 6373.0 # earth radius in km\n R = 3963.0 # earth radius in miles\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n distance = R * c\n return distance", "def distance(coord1, coord2):\n \n return sqrt((coord1[0]-coord2[0])**2+\n (coord1[1]-coord2[1])**2+\n (coord1[2]-coord2[2])**2)", "def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)", "def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def distance_between_coordinates(lat1, long1, lat2, long2):\n # Reference: To calulate Great Circle Distance - https://en.wikipedia.org/wiki/Great-circle_distance\n\n lat1 = float(lat1)\n lat2 = float(lat2)\n long1 = float(long1)\n long2 = float(long2)\n\n # converting degrees to radians\n lat1 = degree_to_radians(lat1)\n long1 = degree_to_radians(long1)\n lat2 = degree_to_radians(lat2)\n long2 = degree_to_radians(long2)\n\n # delta between longitudes\n delta_long = abs(long1 - long2)\n\n # central angle between point 1 and point 2\n central_angle = acos( sin(lat1)\n * sin(lat2)\n + cos(lat1)\n * cos(lat2)\n * cos(delta_long))\n\n\n return EARTH_RADIUS * central_angle", "def distance(d1, d2):\n projection_onto_plane = d2 - projection(d1, d2)\n dist = np.linalg.norm(projection_onto_plane)\n\n return dist", "def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)", "def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)", "def distance_coordinates(lat1: Decimal, lon1: Decimal, lat2: Decimal, lon2: Decimal) -> Decimal:\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = Decimal(R * c)\n\n return distance", "def distance_to(self, other):\n # Radius of earth in km\n R = 6373.0\n\n lat1r = radians(self.lat)\n lon1r = radians(self.lon)\n lat2r = radians(other.lat)\n lon2r = radians(other.lon)\n\n dlon = lon2r - lon1r\n dlat = lat2r - lat1r\n\n a = sin(dlat / 2)**2 + cos(lat1r) * cos(lat2r) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance", "def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def minkowski_distance(point1, point2):\n md = 0\n for p1,p2 in zip(point1,point2):\n md += abs((p1-p2)**3)\n md = md**(1/3)\n return md\n raise NotImplementedError", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def distance(x1, y1, z1, x2, y2, z2):\n return math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)", "def distance(coords1, coords2):\n dx = coords1.x - coords2.x\n dy = coords1.y - coords2.y\n return math.sqrt(dx * dx + dy * dy)", "def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def calc_dist(c1: Coordinates, c2: Coordinates = None) -> float:\n\t\n\t# Get distances for each dimension in a common unit, meters.\n\tlat_dist = (c1.lat - c2.lat) * LAT_RATIO\n\tlong_dist = (c1.lon - c2.lon) * LONG_RATIO\n\treturn math.sqrt(lat_dist**2 + long_dist**2)", "def distance_from_cylinder(self, points, params, sqrt=False):\n # axis: 3 x 1, center: 1 x 3\n axis, center, radius = params\n center = center.reshape((1, 3))\n axis = axis.reshape((3, 1))\n\n v = points - center\n prj = (v @ axis) ** 2\n\n # this is going negative at some point! fix it. Numerical issues.\n # voilating pythagoras\n dist_from_surface = torch.sum(v * v, 1) - prj[:, 0]\n dist_from_surface = torch.clamp(dist_from_surface, min=1e-5)\n\n distance = torch.sqrt(dist_from_surface) - radius\n # distance.register_hook(self.print_norm)\n distance = distance ** 2\n\n if sqrt:\n distance = guard_sqrt(distance)\n\n if torch.sum(torch.isnan(distance)):\n import ipdb;\n ipdb.set_trace()\n if self.reduce:\n distance = torch.mean(distance)\n\n return distance", "def spherical_distance(coord_pair, radius=MEAN_EARTH_RADIUS_M):\n\n return spherical_distance_haversine(np.array([coord_pair]), radius)[0]", "def compute_distance(point_1, point_2):\n x1, y1, x2, y2 = point_1[0], point_1[1], point_2[0], point_2[1]\n distance = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n return distance", "def sphere_isclose(c1, c2, *args, **kwargs):\n return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose(\n c1.center, c2.center, *args, **kwargs\n )", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)", "def distance(self, point1, point2):\n\n\t\tprint \"Inside Distance!-----\"\n\t\tdist = math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2);\n\t\treturn dist", "def distanceKmTo(self, other):\n lon1 = math.radians(self.longitude)\n lon2 = math.radians(other.longitude)\n dlon = lon2 - lon1\n lat1 = math.radians(self.latitude)\n lat2 = math.radians(other.latitude)\n dlat = lat2 - lat1\n\n \n a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n return Point.R * c", "def dist(self, other):\n return math.sqrt((self.x - other.x)**2 +\n (self.y - other.y)**2 +\n (self.z - other.z)**2)", "def Distance_orthonormique(lon1, lat1, lon2, lat2):\r\n \r\n #Convert position in radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n #rvmT = Earth radius [km]\r\n rvmT = 6371 \r\n #Project the position on\r\n a = sin((lat2 - lat1)/2)**2 + cos(lat1) * cos(lat2) * sin((lon2 - lon1)/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n \r\n d = c * rvmT\r\n return d", "def distance(point0, point1):\n if point0 is None or point1 is None:\n return None\n diff = np.subtract(point0, point1)\n return np.sqrt(diff[0] ** 2 + diff[1] ** 2)", "def dist_between_gps_points(self, pointA, pointB):\n\n # radius of earth (m)\n r_earth = 6371e3\n\n # extract lat and long coordinates\n lat1 = pointA[0]\n lon1 = pointA[1]\n lat2 = pointB[0]\n lon2 = pointB[1]\n\n dlat = lat2 - lat1 # change in latitude\n dlon = lon2 - lon1 # change in longitude\n\n dx = r_earth * dlon * cos((lat1+lat2)/2)\n dy = r_earth * dlat\n\n dist = sqrt(square(dx)+square(dy)) # straight line approximation\n\n return dist", "def distance(p1, p2):\n return math.hypot(p1.x-p2.x, p1.y-p2.y)", "def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance", "def distance_gps(point1, point2):\n return haversine_distance(point1.get_latitude(), point1.get_longitude(),\n point2.get_latitude(), point2.get_longitude())", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * asin(sqrt(a))\n m = 6367 * c * 1000\n return m", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def euclidean_distance(point1, point2):\n\n return math.sqrt(sum([(x - y) ** 2 for x, y in zip(point1, point2)]))", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def distance_between_two_points(p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def distance(XYZ1=np.array([0, 0, 0], dtype='float32'),\n XYZ2=np.array([1, 1, 1], dtype='float32')):\n a=XYZ2-XYZ1\n b=a**2\n c=b.sum()\n return np.sqrt(c)", "def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5", "def dist_sf_alt( lon1, lon2, lat1, lat2 ):\n\n dist = Geodesic.WGS84.Inverse( lat1, lon1, lat2, lon2 )[ \"s12\" ]\n \n return dist", "def get_dist(pose1, pose2):\n return math.sqrt((pose1.position.x - pose2.position.x)**2 + (pose1.position.y - pose2.position.y)**2 + (pose1.position.z - pose2.position.z)**2)", "def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km", "def euclidean_dist(ss1, ss2):\n lat1, lon1 = ss1.centroid\n lat2, lon2 = ss2.centroid\n\n return sqrt((lat1 - lat2)**2 + (lon1 - lon2)**2)", "def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def coord_distance(lat1, lon1, lat2, lon2):\n\tlon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\ta = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n\tc = 2 * math.asin(math.sqrt(a))\n\tkm = 6367 * c \n\treturn km", "def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))", "def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)", "def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))", "def distance_to(self, p1):\n # due to rounding errors the actual function can return non-zero distance for the same point!\n if self.round() == p1.round():\n return 0\n from_theta = float(self.lat) / 360.0 * 2.0 * math.pi\n from_landa = float(self.long) / 360.0 * 2.0 * math.pi\n to_theta = float(p1.lat) / 360.0 * 2.0 * math.pi\n to_landa = float(p1.long) / 360.0 * 2.0 * math.pi\n tmp = math.sin(from_theta) * math.sin(to_theta) + math.cos(from_theta) * math.cos(to_theta) * math.cos(\n to_landa - from_landa)\n # if I don't round the number, ValueError: math domain error may occur\n return math.acos(round(tmp, 15)) * R_EARTH", "def dist_vincenty(lat1, lon1, lat2, lon2, iterations=20):\r\n if lat1 < -90 or lat1 > 90 or lat2 < -90 or lat2 > 90 or lon1 < -180 or lon1 > 180 or lon2 < -180 or lon2 > 180:\r\n raise ValueError(\r\n \"Latitude values shoulds range from (-90,90) and longitude from (-180,180) but one of the input values is out of bounds. Latitude_1: %f, Logitude_1: %f, Latitude_2: %f, Logitude_2: %f\" %\r\n (lat1, lon1, lat2, lon2))\r\n\r\n major, minor, f = 6378137, 6356752.314245, 1 / 298.257223563\r\n\r\n lat1, lng1, lat2, lng2 = radians(\r\n lat1), radians(lon1), radians(lat2), radians(lon2)\r\n delta_lng = lng2 - lng1\r\n reduced_lat1, reduced_lat2 = atan(\r\n (1 - f) * tan(lat1)), atan((1 - f) * tan(lat2))\r\n\r\n sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)\r\n sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)\r\n\r\n lambda_lng = delta_lng\r\n lambda_prime = 2 * pi\r\n while abs(lambda_lng - lambda_prime) > 10e-12 and iterations > 0:\r\n sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)\r\n\r\n sin_sigma = sqrt(\r\n (cos_reduced2 * sin_lambda_lng) ** 2 +\r\n (cos_reduced1 * sin_reduced2 -\r\n sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2\r\n )\r\n if sin_sigma == 0:\r\n return 0 # Coincident points\r\n\r\n cos_sigma = (\r\n sin_reduced1 * sin_reduced2 +\r\n cos_reduced1 * cos_reduced2 * cos_lambda_lng\r\n )\r\n sigma = atan2(sin_sigma, cos_sigma)\r\n\r\n sin_alpha = (cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma)\r\n cos_sq_alpha = 1 - sin_alpha ** 2\r\n\r\n if cos_sq_alpha != 0:\r\n cos2_sigma_m = cos_sigma - 2 * \\\r\n (sin_reduced1 * sin_reduced2 / cos_sq_alpha)\r\n else:\r\n cos2_sigma_m = 0.0 # Equatorial line\r\n\r\n C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))\r\n\r\n lambda_prime = lambda_lng\r\n lambda_lng = (\r\n delta_lng + (1 - C) * f * sin_alpha * (\r\n sigma + C * sin_sigma * (\r\n cos2_sigma_m + C * cos_sigma * (-1 + 2 * cos2_sigma_m ** 2)\r\n )\r\n )\r\n )\r\n iterations -= 1\r\n\r\n if iterations == 0:\r\n raise ValueError(\"Vincenty formula failed to converge!\")\r\n\r\n u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2\r\n A = 1 + u_sq / 16384. * (4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq)))\r\n B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))\r\n delta_sigma = B * sin_sigma * (\r\n cos2_sigma_m + B / 4. * (cos_sigma * (-1 + 2 * cos2_sigma_m ** 2) -\r\n B / 6. * cos2_sigma_m * (-3 + 4 * sin_sigma ** 2) *\r\n (-3 + 4 * cos2_sigma_m ** 2))\r\n )\r\n s = minor * A * (sigma - delta_sigma)\r\n\r\n return round(s, 3) # round to 1mm precision\r", "def distance_between(point_one, point_two):\n sum = 0\n for d1,d2 in zip(point_one,point_two):\n sum += math.pow(float(d1) - float(d2), 2)\n\n return math.sqrt(sum)", "def dist(pos1, pos2):\n a, b = pos1\n c, d = pos2\n \n return sqrt((a-c)**2 + (b-d)**2)", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def calculate_distance(point1, point2):\n # modify points to be a [x,y,z] numpy array\n np_point_1 = convert_point_type(point1)\n np_point_2 = convert_point_type(point2)\n\n distance = ((np_point_1[0] - np_point_2[0])**2 + (np_point_1[1] - np_point_2[1])**2)**0.5\n\n return distance", "def dist(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def coord_distance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n km = 2 * 6367 * math.asin(math.sqrt(a))\n mi = 0.621371 * km\n return mi" ]
[ "0.7638325", "0.70502967", "0.6934199", "0.6927222", "0.6903243", "0.6792316", "0.6787839", "0.6785977", "0.6757207", "0.6749283", "0.6746814", "0.6737798", "0.6729415", "0.6711453", "0.66890496", "0.66774637", "0.664188", "0.6629779", "0.6612115", "0.66114235", "0.66067094", "0.65922534", "0.65896803", "0.65835136", "0.65729874", "0.65598667", "0.6552568", "0.6549336", "0.65461516", "0.6543318", "0.65393955", "0.6522427", "0.64987874", "0.6495", "0.64896613", "0.64824605", "0.647885", "0.64680374", "0.6461263", "0.64575917", "0.64539504", "0.64500874", "0.64435095", "0.64074194", "0.64044833", "0.63970554", "0.6392891", "0.6387769", "0.63877493", "0.63853294", "0.6384836", "0.6383158", "0.63731027", "0.6364114", "0.6362125", "0.636061", "0.6360068", "0.6359201", "0.6352939", "0.6346948", "0.6344932", "0.633581", "0.63269603", "0.63239557", "0.63219047", "0.63208926", "0.6320833", "0.632015", "0.6319319", "0.63178056", "0.63144577", "0.63140625", "0.63127047", "0.6312703", "0.6312703", "0.6311962", "0.63057345", "0.6298885", "0.62980354", "0.62971294", "0.6294636", "0.62892807", "0.62821174", "0.6272846", "0.6271524", "0.6266564", "0.62625253", "0.6261918", "0.62487066", "0.6245512", "0.62444615", "0.62442327", "0.62431467", "0.62426037", "0.62347084", "0.6229329", "0.62290573", "0.62277615", "0.6226769", "0.6223316" ]
0.7146757
1
Estimate the bandwith ie the radius to use with an RBF kernel in the MeanShift algorithm
def estimate_bandwidth(X, quantile=0.3): distances = spherical_distances(X, X) distances = np.triu(distances, 1) distances_sorted = np.sort(distances[distances > 0]) bandwidth = distances_sorted[np.floor(quantile * len(distances_sorted))] return bandwidth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def get_bollinger_bands(rm, rstd):\n \n upper_band=rm+2*rstd\n lower_band=rm-2*rstd\n return upper_band, lower_band", "def _kernel(self, point, observation, bandwidth):\n denom = bandwidth * ((2*math.pi)**.5) \n num = math.exp(-0.5 * ((point-observation)/bandwidth)**2)\n return num/denom", "def question_18():\n rbf = RadialBiasFunction()\n wins = 0.0\n for i in range(100):\n rbf.fit(1.5, 9)\n rbf_error = rbf.error()\n if rbf_error == 0:\n wins += 1\n rbf.resample()\n return wins / 100", "def _kernel(r: float, h: float) -> float:\n sigma_2 = 10 / (7 * np.pi * h * h)\n q = abs(r / h)\n\n if q <= 1.0:\n q2 = q * q\n W = 1.0 - 1.5 * q2 * (1.0 - 0.5 * q)\n W *= sigma_2\n elif q <= 2.0:\n two_minus_q = 2 - q\n two_minus_q_c = np.power(two_minus_q, 3)\n W = 0.25 * two_minus_q_c\n W *= sigma_2\n else:\n W = 0\n\n return W", "def get_radius(self):\r\n return 1", "def add_bollinger_bands(self, rstd):\n self.data['upper_band'] = self.data['rolling_mean'] + 2 * rstd\n self.data['lower_band'] = self.data['rolling_mean'] - 2 * rstd", "def _radial_basis(self):\n errexp = 10\n cutbasis = self.rcut + self.sigma*np.sqrt(2.*errexp*np.log(10.))\n spacebasis = cutbasis/self.nmax\n rbasis = np.zeros(self.nmax)\n rbasis[0] = 1.\n for i in range(1, self.nmax):\n rbasis[i] = rbasis[i-1] + spacebasis\n return rbasis", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def f_2(center):\r\n Ri = calc_R(center)\r\n return Ri - Ri.mean()", "def get_binary_rf_area(self):\n\n if self.thr is None:\n raise LookupError('To th area, the receptive field should be thresholded!!')\n\n alt_step = abs(np.mean(np.diff(self.altPos).astype(np.float)))\n azi_step = abs(np.mean(np.diff(self.aziPos).astype(np.float)))\n\n return len(self.weights) * alt_step * azi_step", "def ComputeNrb(self):\r\n pass", "def get_radius(self):", "def rbf(input_dim,variance=1., lengthscale=None,ARD=False):\r\n part = parts.rbf.RBF(input_dim,variance,lengthscale,ARD)\r\n return kern(input_dim, [part])", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def dcsrbf(r):\n return 3*(num.power(num.maximum(0, 1-r), 3) - num.power(num.maximum(0, 1-r),2)*(3*r+1))", "def get_bollinger_bonds(rm, rstd):\r\n upper_band = rm + rstd * 2\r\n lower_band = rm - rstd * 2\r\n return upper_band, lower_band", "def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()", "def beta_r(r):\n return 0.", "def radius(x) :\r\n return Feature(x, \"radius\")", "def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()", "def rbf(self, dists):\r\n # Compute the RBF kernel, broadcasting appropriately.\r\n scales = self.sigma_fn(self.sigma)[None, None, None, :]\r\n a, b, c = dists.shape\r\n return torch.exp(-0.5 * dists.view(a, b, c, -1) / scales ** 2)", "def rbf(self, dists):\r\n # Compute the RBF kernel, broadcasting appropriately.\r\n scales = self.sigma_fn(self.sigma)[None, None, None, :]\r\n a, b, c = dists.shape\r\n return torch.exp(-0.5 * dists.view(a, b, c, -1) / scales ** 2)", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def get_bollinger_bands(rm, rstd, deviation=2):\n upper_band = rm + rstd * deviation\n lower_band = rm - rstd * deviation\n return upper_band, lower_band", "def kernel_rbf(x, y,gamma):\r\n return np.exp(- gamma * np.linalg.norm(x- y)**2)", "def get_bollinger_bands(rm, rstd, degrees):\n\tupper_band = rm + rstd * degrees\n\tlower_band = rm - rstd * degrees\n\treturn upper_band, lower_band", "def get_wl_band(radar_frequency):\n return 0 if (30 < radar_frequency < 40) else 1", "def z_r(b):\n return b/1e-2 + 0.5", "def bilateral(filename,input_image, sigma_spatial, sigma_intensity):\n\t# make a simple Gaussian function taking the squared radius\n\tgaussian = lambda r2, sigma: np.exp(-0.5*r2/sigma**2 )\n\t#print(input_image.shape)\n\tinput_image = cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)\n\n\t# define the window width to be the 2 time the spatial std. dev. to\n\t# be sure that most of the spatial kernel is actually captured\n\twin_width = int(3*sigma_spatial +1)\n\twgt_sum = np.zeros_like(input_image).astype(np.float64)\n\tresult = np.zeros_like(input_image).astype(np.float64)\n\tout= np.zeros_like(input_image).astype(np.float64)\n\t\n\tfor i in tqdm(range(input_image.shape[-1]),desc=\"Going through color channels\"):\n\t\tnorm_image = normalize(input_image[:,:,i])\n\t\tfor shft_x in range(-win_width,win_width+1):\n\t\t\tfor shft_y in range(-win_width,win_width+1):\n\t\t\t\t# compute the spatial contribution\n\t\t\t\tspatial = gaussian(shft_x**2+shft_y**2, sigma_spatial )\n\t\n\t\t\t\t# shift by the offsets to get image window\n\t\t\t\twindow = np.roll(norm_image, [shft_y, shft_x], axis=[0,1])\n\t\n\t\t\t\t# compute the intensity contribution\n\t\t\t\tcombined_filter = spatial*gaussian( (window-norm_image)**2, sigma_intensity )\n\t\n\t\t\t\t# result stores the mult. between combined filter and image window\n\t\t\t\tresult[:,:,i] += window*combined_filter\n\t\t\t\twgt_sum[:,:,i] += combined_filter\n\tout = normalize(result/wgt_sum)\n\n\t# normalize the result and return\n\tplt.imsave(\"outputImages/Bilateral_\"+filename+\"_\"+str(sigma_spatial)+\"_\"+ str(sigma_intensity) + \".png\" ,out,dpi=600)\n\treturn out", "def estimate_r(y, Fs, freq_cutoff, plot_freqz=False):\n Fs_nq = Fs / 2 # Nyquist frequency\n wp = np.min([Fs_nq - 10, freq_cutoff]) # passband edge frequency in Hz\n ws = wp - 2 # stopband edge frequency in Hz\n N, Wn = signal.buttord(wp=wp, ws=ws, gpass=1, gstop=50, fs=Fs) # select filter order\n sos = signal.butter(N, Wn, btype=\"high\", output='sos', fs=Fs) # highpass filter\n\n # Visualize the filter frequency response\n if plot_freqz:\n import matplotlib.pyplot as plt\n w, h = signal.sosfreqz(sos, fs=Fs)\n plt.figure()\n plt.plot(w, 20 * np.log10(np.maximum(np.abs(h), 1e-3)))\n plt.title('Butterworth filter frequency response')\n plt.xlabel('Frequency [Hz]')\n plt.ylabel('Amplitude [dB]')\n plt.margins(0, 0.1)\n plt.grid(which='both', axis='both')\n plt.axvline(Fs_nq, color='green')\n plt.show()\n\n # Apply the high pass filter and estimate observation noise covariance\n y_filt = signal.sosfiltfilt(sos, y)\n R = np.cov(y_filt) * Fs_nq / (Fs_nq - wp) # scale up based on wp\n\n return R", "def question_17():\n rbf = RadialBiasFunction()\n for i in range(20):\n rbf.fit(1.5, 9)\n error_in_lo = rbf.error()\n error_out_lo = rbf.error(in_sample=False)\n rbf.fit(2.0, 9)\n error_in_hi = rbf.error()\n error_out_hi = rbf.error(in_sample=False)\n print(\"Error (in/out) for gamma = 1.5: {0} / {1}\\nError (in/out) for gamma = 2.0: {2} / {3}\\n\"\n .format(error_in_lo, error_out_lo, error_in_hi, error_out_hi))\n rbf.resample()", "def wR(r, rc):\n nr = norm_numba(r)\n return (1 - nr / rc) if nr / rc < 1.0 else 0.0", "def mean_radius(self):\n return (self.semimajor_axis + self.semimedium_axis + self.semiminor_axis) / 3", "def estimate_bandpass(data):\n \n est = filter(data, params.st_bp_window_f, axis=0)\n est = filter(est, params.st_bp_window_t, axis=1)\n \n return est", "def get_bprop_rsqrt(self):\n\n def bprop(x, out, dout):\n grad = F.fill(F.dtype(x), F.shape(x), -0.5) / (F.sqrt(x)*x)\n dx = dout * grad\n return (dx,)\n return bprop", "def create_low_pass_frequency_kernel(im, radius):\n kernel = create_high_pass_frequency_kernel(im, radius)\n kernel = 1 - kernel\n return kernel", "def create_low_pass_frequency_kernel(im, radius):\n kernel = create_high_pass_frequency_kernel(im, radius)\n kernel = 1 - kernel\n return kernel", "def _calc_r2(self):\n sse = np.sum((self.data.y - self.predict(self.data.x))**2)\n sst = np.sum((self.data.y - self.data.y.mean())**2)\n return (1. - sse/sst)", "def radial_data(data,annulus_width=1,working_mask=None,x=None,y=None,rmax=None):\n \n# 2010-03-10 19:22 IJC: Ported to python from Matlab\n# 2005/12/19 Added 'working_region' option (IJC)\n# 2005/12/15 Switched order of outputs (IJC)\n# 2005/12/12 IJC: Removed decifact, changed name, wrote comments.\n# 2005/11/04 by Ian Crossfield at the Jet Propulsion Laboratory\n \n import numpy as np\n\n class radialDat:\n \"\"\"Empty object container.\n \"\"\"\n def __init__(self): \n\t self.q75 = None\n\t self.q25 = None\n self.mean = None\n self.std = None\n self.median = None\n self.numel = None\n self.max = None\n self.min = None\n self.r = None\n\t self.fractionAbove = None\n\t self.fractionAboveidx = None\n #---------------------\n # Set up input parameters\n #---------------------\n data = np.array(data)\n \n #if working_mask==None:\n if working_mask is None:\n working_mask = np.ones(data.shape,bool)\n \n npix, npiy = data.shape\n if x is None or y is None:\n x1 = np.arange(-npix/2.,npix/2.)\n y1 = np.arange(-npiy/2.,npiy/2.)\n x,y = np.meshgrid(y1,x1)\n\n r = abs(x+1j*y)\n\n if rmax is None:\n rmax = r[working_mask].max()\n\n #---------------------\n # Prepare the data container\n #---------------------\n dr = np.abs([x[0,0] - x[0,1]]) * annulus_width\n radial = np.arange(rmax/dr)*dr + dr/2.\n nrad = len(radial)\n radialdata = radialDat()\n radialdata.q25 = np.zeros(nrad)\n radialdata.q75 = np.zeros(nrad)\n radialdata.mean = np.zeros(nrad)\n radialdata.std = np.zeros(nrad)\n radialdata.median = np.zeros(nrad)\n radialdata.numel = np.zeros(nrad)\n radialdata.max = np.zeros(nrad)\n radialdata.min = np.zeros(nrad)\n radialdata.r = radial\n radialdata.fractionAboveidx = []\n radialdata.fractionAbove = np.zeros(nrad)\n \n #---------------------\n # Loop through the bins\n #---------------------\n for irad in range(nrad): #= 1:numel(radial)\n minrad = irad*dr\n maxrad = minrad + dr\n thisindex = (r>=minrad) * (r<maxrad) * working_mask\n if not thisindex.ravel().any():\n\tradialdata.q25[irad] = np.nan\n\tradialdata.q75[irad] = np.nan\n radialdata.mean[irad] = np.nan\n radialdata.std[irad] = np.nan\n radialdata.median[irad] = np.nan\n radialdata.numel[irad] = np.nan\n radialdata.max[irad] = np.nan\n radialdata.min[irad] = np.nan\n\tradialdata.fractionAbove[irad] = np.nan\n else:\n datanow = data[thisindex]\n\tidx = np.isinf(datanow)\n\tidx = [not i for i in idx]\n if len(idx) > 0:\n\t\tdatanow = datanow[idx]\n\t\t#print 'DELETE ALL THE THINGS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n\tif len(datanow) != 0:\n\t\tradialdata.q25[irad] = np.percentile(datanow,25)\n\t\tradialdata.q75[irad] = np.percentile(datanow,75)\n \tradialdata.mean[irad] = datanow.mean()\n \tradialdata.std[irad] = datanow.std()\n \tradialdata.median[irad] = np.median(datanow)\n \tradialdata.numel[irad] = datanow.size\n \tradialdata.max[irad] = datanow.max()\n \tradialdata.min[irad] = datanow.min()\n \t\tradialdata.fractionAbove[irad] = (len(np.where(datanow > datanow.mean())[0])/float(len(datanow)))\n \telse:\n\t\t#print 'ALL ZEROS 000000000000000000000000000000000000000000000000000000'\n\t\tradialdata.q25[irad] = np.nan\n \tradialdata.q75[irad] = np.nan\n \tradialdata.mean[irad] = np.nan\n \t\tradialdata.std[irad] = np.nan\n \tradialdata.median[irad] = np.nan\n \tradialdata.numel[irad] = np.nan\n \tradialdata.max[irad] = np.nan\n \tradialdata.min[irad] = np.nan\n \tradialdata.fractionAbove[irad] = np.nan\n #---------------------\n # Return with data\n #---------------------\n \n return radialdata", "def get_average_end_radius(self):\n total_length = len(self.pixel_list)\n\n if not total_length:\n return 0\n elif total_length < 5:\n total_radius = 0\n for i in range(total_length):\n total_radius += self.pixel_list[i].radius\n return total_radius/total_length\n else:\n total_radius = 0\n for i in range(total_length-5, total_length):\n total_radius += self.pixel_list[i].radius\n return total_radius/5", "def question_16():\n rbf = RadialBiasFunction()\n for i in range(10):\n rbf.fit(1.5, 9)\n error_in_9 = rbf.error()\n error_out_9 = rbf.error(in_sample=False)\n rbf.fit(1.5, 12)\n error_in_12 = rbf.error()\n error_out_12 = rbf.error(in_sample=False)\n print(\"Error (in/out) for 9 clusters: {0} / {1}\\nError (in/out) for 12 clusters: {2} / {3}\\n\"\n .format(error_in_9, error_out_9,error_in_12, error_out_12))\n rbf.resample()", "def beta_r(r):\n return 1.", "def ComputeRegenerativeBraking(self):\r\n pass", "def difference_of_gauss_kernel(radius, scale_step, n_sigmas=8):\n sizex = int(n_sigmas * scale_step * radius)\n sizey = int(n_sigmas * scale_step * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x1 = x / radius\n y1 = y / radius\n g1 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))\n g1 = g1 / (2 * np.pi * radius ** 2) # g1.sum()\n x1 = x1 / scale_step\n y1 = y1 / scale_step\n g2 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))\n g2 = g2 / (2 * np.pi * radius ** 2 * scale_step ** 2) # g2.sum()\n return g1 - g2", "def __weights(self):\n r, c = np.mgrid[:self.size, :self.size] + 0.5\n rad = np.sqrt((r - self.size/2)**2 + (c - self.size/2)**2)\n img = np.zeros((self.size, self.size))\n rmin = np.sqrt(2) * 0.5 * self.damp * rad.max()\n rmax = np.sqrt(2) * 0.5 * rad.max()\n zone = np.logical_and(rad > rmin, rad < rmax)\n img[rad < rmin] = 1.0\n img[rad > rmax] = 0.0\n img[zone] = (rmax - rad[zone]) / (rmax - rmin)\n return img", "def _calc_neighborhood_func(self, curr_it: int, mode: str) -> float:\n return decreasing_rate(\n self.radius_max_,\n self.radius_min_,\n iteration_max=self.max_iterations_,\n iteration=curr_it,\n mode=mode,\n )", "def csrbf(r):\n return num.power((num.maximum(0, 1-r)), 3)*(3*r+1)", "def blur(I, r):\n ones = np.ones_like(I, dtype=np.float32)\n N = box_filter(ones, r)\n ret = box_filter(I, r)\n return ret / N", "def n_band(self):\n pass", "def bands(self) -> int:\n ...", "def _c_numeric(self, rij):\n radial_fun = np.zeros((self.lmax+1, self.nmax))\n radial_fun[0,1] = 1.0\n\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb \n for n in range(1, self.nmax+1):\n argbess = 2*alpha*rb[n-1]*rij\n ep = np.exp(-alpha*(rij + rb[n-1])**2)\n em = np.exp(-alpha*(rij - rb[n-1])**2)\n #In the loops below, msb prefix refers to modified spherical bessel.\n for l in range(self.lmax+1):\n if l == 0:\n if argbess == 0.0:\n msb_fi_ki_l = np.exp(-alpha*(rb[n-1]**2 + rij**2))\n else:\n #msb_fi_ki_lm = cosh(arg_bess)/arg_bess\n #msb_fi_ki_l = sinh(arg_bess)/arg_bess\n msb_fi_ki_lm = 0.5 * (em + ep) / argbess\n msb_fi_ki_l = 0.5 * (em - ep) / argbess\n else:\n if argbess == 0.0:\n msb_fi_ki_l = 0.0\n else:\n msb_fi_ki_lmm = msb_fi_ki_lm\n msb_fi_ki_lm = msb_fi_ki_l\n msb_fi_ki_l = msb_fi_ki_lmm-(2*l-1)*msb_fi_ki_lm/argbess\n\n radial_fun[l,n-1] = msb_fi_ki_l #* rb[n-1]\n fc = fcut(rij, self.rcut, self.trans_width)\n return np.dot(radial_fun, self.transformbasis)*fc", "def radialApproxEffect(hubdist1,hubdist2,width,length):\n #Grating coordinates\n x,y = np.meshgrid(np.linspace(-width,width,1000),\\\n np.linspace(-length,length,1000))\n y1 = y + hubdist1\n y2 = y + hubdist2\n\n #Convert to period and yaw angle\n period1 = np.sqrt(x**2+y1**2)/hubdist1*160. #nm\n period2 = np.sqrt(x**2+y2**2)/hubdist2*160. #nm\n yaw = blazeYaw(1.5*np.pi/180,2.4,3,160.)\n yaw1 = np.pi/2 - np.arctan(x/y1) + yaw\n yaw2 = np.pi/2 - np.arctan(x/y2) + yaw\n\n #Determine alpha and beta\n beta0,alpha0 = litBetaAlpha(1.5*np.pi/180,2.4,3,160.)\n alpha1 = alpha0 + 3*2.4/period1*np.sin(yaw1)\n alpha2 = alpha0 + 3*2.4/period2*np.sin(yaw2)\n beta1 = beta0 + (3*2.4/period1)*np.cos(yaw1)\n beta2 = beta0 + (3*2.4/period2)*np.cos(yaw2)\n\n #Determine spot shifts\n x1 = hubdist2*(alpha1/beta1)\n x2 = hubdist2*(alpha2/beta2)\n \n\n pdb.set_trace()\n \n return x1,x2", "def getB(self):\n return ((self.bPlusbStar() / self.nPos) + (self.bMinusbStar / self.nNeg)) / 2", "def boringInterlude (radiusIn):\n\n\n import math\n volIn = (4/3) * math.pi * (radiusIn ** 3)\n vol = volIn/ 1728\n return vol", "def rbf_classify(self, point):\n sum = self.b\n for i, center in enumerate(self.centers):\n sum += self.g[i] * np.exp(-self.gamma * distance.euclidean(center, point) ** 2)\n if sum > 0:\n return 1.0\n else:\n return -1.0", "def get_radius(self):\n return self.R", "def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2", "def nse_bound(self) -> float:\n nse_ = self.nse()\n nse_c2m_ = nse_ / (2 - nse_)\n\n return nse_c2m_", "def get_bias(self):", "def b_vs_r(r, t=.97):\n min_b = (np.log(1-t))/(np.log(1-.95**r))\n return min_b", "def kge_bound(self) -> float:\n kge_ = self.kge(return_all=True)[0, :]\n kge_c2m_ = kge_ / (2 - kge_)\n\n return float(kge_c2m_)", "def calc_kernel_influence(src_lon, src_lat, target_lon, target_lat, d):\n Cd = d / (2 * np.pi)\n kernel_exponent = 1.5\n delta_lon = src_lon - target_lon\n delta_lat = src_lat - target_lat\n distance = np.sqrt(np.power(delta_lon, 2) + np.power(delta_lat, 2))\n Kr = Cd / np.power(distance + d, kernel_exponent)\n return Kr", "def get_radius(self):\n return self.r", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def _compute_bn(self, lvl):\n bn = [0] # number of samples crossing the left/right boundary\n for n in range(lvl):\n # 1. down-sampling of N samples by the factor scl gives (N-1)//scl + 1 samples\n # 2. bn[-1]+M-1 is the number of samples acrossing the left/right boundary, with M being the number of freqeuncies\n # => hence after the downsampling the number of boundary crossing samples is:\n bn.append((bn[-1]+self.nfreq-2)//self.scaling+1)\n bn.append(bn[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return bn[1:][::-1]", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def effective_radius(self, n):\n\n er2 = 5.0 * self.sa / n\n er = np.sqrt(er2)\n\n return er", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def rbf_kernel(x_1, x_2, l):\n\n\tassert l > 0, \"The hyperparameter l must be > 0\"\n\tdist = euclidean_distances(x_1.reshape(-1,1), x_2.reshape(-1,1))\n\treturn np.exp(dist**2 / -(2*l**2))", "def bisection(self,\n low_bound: float,\n up_bound: float,\n epsilon: float,\n n: int,\n Rmn: dict,\n ytm: dict) -> float:\n for i in range(100):\n y = (low_bound + up_bound) / 2\n if abs(Bootstrapping.f(self,y, n, Rmn, ytm)) < epsilon:\n ym = ytm[list(ytm.keys())[-1]]\n m = list(ytm.keys())[-1]\n for j in range(list(ytm.keys())[-1] + 1, n + 1):\n ytm[j] = ym + (y - ym) * (j - m) / (n - m)\n return ytm\n else:\n if Bootstrapping.f(self,y, n, Rmn, ytm) * Bootstrapping.f(self,up_bound, n, Rmn, ytm) < 0:\n low_bound = y\n elif Bootstrapping.f(self,y, n, Rmn, ytm) * Bootstrapping.f(self,low_bound, n, Rmn, ytm) < 0:\n up_bound = y", "def brate(self):\n try:\n return self.pos / self.runtime\n except ZeroDivisionError:\n return 0", "def FDR_cut(grz):\n\tg,r,z=grz; yrz = (r-z); xgr = (g-r)\n\tibool = (r<23.4) & (yrz>.3) & (yrz<1.6) & (xgr < (1.15*yrz)-0.15) & (xgr < (1.6-1.2*yrz))\n\treturn ibool", "def bc(self, rng=None):\n return self.bcR(rng)", "def get_average_start_radius(self):\n total_length = len(self.pixel_list)\n\n if not total_length:\n return 0\n elif total_length < 5:\n total_radius = 0\n for i in range(total_length):\n total_radius += self.pixel_list[i].radius\n return total_radius/total_length\n else:\n total_radius = 0\n for i in range(5):\n total_radius += self.pixel_list[i].radius\n return total_radius/5", "def get_receptive_field_radius(self):\n raise NotImplementedError()", "def calculate_WR():\n C = df['rating_avg'].mean()\n m = df['rating_count'].quantile(0.9)\n v = df['rating_count']\n R = df['rating_avg']\n return (v/(v+m) * R) + (m/(m+v) * C)", "def radial_BH_octree_kernel_evaluate(rmax, kernel, pts, wts, theta, log=null_log, sort_data=False, bucket_size=11, force_ngrid=None):\n\n if force_ngrid is None:\n ngrid = max(int(1.0/rmax), 1)\n\n # Avoid nasty hashing problems, make sure ngrid&3 == 3\n if ngrid&3!=3 and ngrid >=3:\n ngrid = (ngrid//4)*4 -1 \n else:\n if force_ngrid*rmax>1.0:\n raise Exception('ngrid=%d has cells smaller than rmax=%.7f'%(force_ngrid,rmax))\n ngrid = force_ngrid\n\n print('Using grid of size {:,}^3 bins, building octree down to buckets of size {:,}.'.format(ngrid, bucket_size), file=log)\n tree, sort_idx = build_octrees(pts, bucket_size, ngrid, wts, log)\n print('Initialising kernel', file=log) \n lattice_setup_kernel(rmax, kernel, log)\n print('BH kernel calculation on {:,} pts'.format(len(pts)),file=log)\n t0 = time()\n n_kernels, accel = bh_tree_walk(tree, ngrid, theta, tree.xyzw, log=log)\n dt = time() - t0\n print('Total kernels {:,} for {:,} pts at'.format(n_kernels, len(pts)),\n MU.OKBLUE+'{:,} pts/sec'.format(int(len(pts)/dt))+MU.ENDC, file=log)\n\n\n if sort_data:\n # return the sort index along with sorted positions and masses, and corresponding accelerations.\n # If you want to unsort you need to do it yourself\n return n_kernels, sort_idx, accel\n\n # indices for 'un'-sorting\n unsort = empty_like(sort_idx)\n unsort[sort_idx] = arange(len(pts), dtype=np.int32)\n\n return n_kernels, accel[unsort]", "def test_BCGPLVM_rbf_bias_white_kern_2D(self):\n N, input_dim, D = 50, 1, 2\n X = np.random.rand(N, input_dim)\n k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)\n K = k.K(X)\n Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T\n m = GPy.models.BCGPLVM(Y, input_dim, kernel=k)\n self.assertTrue(m.checkgrad())", "def _run_rbf_interpolation(out_dir, layer, bounds, function, smooth):\n # if running scipy methods prepend root dir to out path\n out_dir = OPJ(path_root, out_dir)\n if not os.path.isdir(out_dir):\n print(\n os.path.abspath(out_dir),\n ' does not exist, creating directory.\\n'\n )\n Path(out_dir).mkdir(parents=True, exist_ok=True)\n\n out_file = OPJ(\n out_dir, \n '{time_agg}.tiff'.format(time_agg=layer)\n )\n print(\n '\\nInterpolating {g} point bias ratios for: {t}\\n'.\\\n format(g=grid_var, t=layer),\n 'Using the \"{}\" method\\n'.format(function),\n 'Resolution (pixel size) of output raster: {} degrees'.format(res)\n )\n print( \n 'GeoTIFF raster will be saved to: \\n',\n os.path.abspath(out_file)\n )\n\n\n # get grid extent based on station locations in CSV\n if not bounds:\n bounds = get_subgrid_bounds(in_path, buffer=buffer, grid_res=CS) \n lon_min, lon_max, lat_min, lat_max = bounds\n # fix any minor adjustments to make raster fit gridMET fishnet extent\n # if scale_factor=1 make sure raster pixels align exactly w/gridcells\n # raster extent may exceed fishnet grid to fill gaps for zonal stats\n if scale_factor:\n nxcells = abs(lon_min-lon_max) / (CS*scale_factor)\n nycells = abs(lat_min-lat_max) / (CS*scale_factor)\n remainder_x = int(nxcells) - nxcells\n remainder_y = int(nycells) - nycells\n if abs(remainder_x) > CS:\n remainder_x -= CS * (remainder_x / CS) \n if abs(remainder_y) > CS:\n remainder_y -= CS * (remainder_y / CS)\n lon_min -= remainder_x\n lon_max += CS\n lat_min -= remainder_y\n lat_min -= CS\n \n # check if layer is in summary CSV \n existing_layers = pd.read_csv(in_path).columns\n if not layer in existing_layers:\n print('Column {} does not exist in input CSV:\\n {}'.format(\n layer, in_path),\n '\\nSkipping interpolation.'\n )\n return\n \n # get point station data from summary CSV\n in_df = pd.read_csv(in_path, na_values=[-999])\n lon_pts, lat_pts = in_df.STATION_LON.values, in_df.STATION_LAT.values\n values = in_df[layer].values\n \n # mask out stations with missing data\n if in_df[layer].isnull().sum() > 0:\n mask = in_df[layer].notnull()\n n_missing = in_df[layer].isna().sum()\n # if one point or less data points exists exit\n if len(mask) == n_missing or len(values) - n_missing == 1:\n print('Missing sufficient point data for variable: {} {}'.\\\n format(grid_var, layer),\n '\\nNeed at least two stations with data, skipping.')\n return\n print('Warning:\\n',\n 'Data missing for {} of {} stations for variable: {} {}'.\\\n format(n_missing, len(values), grid_var, layer),\n '\\nproceeding with interpolation.')\n # get locations where ratio is not nan\n values = values[mask]\n lon_pts = lon_pts[mask]\n lat_pts = lat_pts[mask]\n\n nx_cells = int(np.round(np.abs((lon_min - lon_max) / CS)))\n ny_cells = int(np.round(np.abs((lat_min - lat_max) / CS)))\n # rbf requires uniform grid (n X n) so \n # extend short dimension and clip later \n nx_cells_out = copy.copy(nx_cells)\n ny_cells_out = copy.copy(ny_cells)\n # gdal requires \"upper left\" corner coordinates\n lat_max_out = copy.copy(lat_max)\n lon_max_out = copy.copy(lon_max)\n # extend short dimension to make square grid\n if not nx_cells == ny_cells:\n diff = np.abs(nx_cells - ny_cells)\n if nx_cells > ny_cells:\n lat_max += diff * CS\n ny_cells += diff\n else:\n lon_max += diff * CS\n nx_cells += diff\n\n if scale_factor == 1:\n # make finer/coarse grid by scale factor\n lons = np.linspace(lon_min, lon_max, \n int(np.round(nx_cells/scale_factor))-1)\n lats = np.linspace(lat_min, lat_max, \n int(np.round(ny_cells/scale_factor))-1)\n # extent for original created by spatial.build_subgrid\n # add one to make sure raster covers full extent\n lons_out = np.linspace(lon_min, lon_max_out, \n int(np.round(nx_cells_out/scale_factor))-1)\n lats_out = np.linspace(lat_min, lat_max_out, \n int(np.round(ny_cells_out/scale_factor))-1)\n\n else:\n # add one extra cell to cover grid buffer extent for upscaling\n # raster extent always >= grid buffer\n lons = np.linspace(lon_min, lon_max, \n int(np.round(nx_cells/scale_factor)))\n lats = np.linspace(lat_min, lat_max, \n int(np.round(ny_cells/scale_factor)))\n lons_out = np.linspace(lon_min, lon_max_out, \n int(np.round(nx_cells_out/scale_factor)))\n lats_out = np.linspace(lat_min, lat_max_out, \n int(np.round(ny_cells_out/scale_factor)))\n\n # if function was 'linear_rbf' \n function = function.replace('_rbf', '')\n # make sampling square grid\n XI, YI = np.meshgrid(lons, lats)\n # apply rbf interpolation\n rbf = Rbf(lon_pts, lat_pts, values, function=function, smooth=smooth)\n ZI = rbf(XI, YI)\n # clip to original extent, rbf array flips axes, and row order... \n ZI_out = ZI[0:len(lats_out),0:len(lons_out)]\n ZI_out = np.flip(ZI_out,axis=0)\n\n #### save scipy interpolated data as raster \n pixel_size = CS * scale_factor\n # number of pixels in each direction\n x_size = len(lons_out)\n y_size = len(lats_out)\n # set geotransform info\n gt = [\n lon_min,\n pixel_size,\n 0,\n lat_max_out,\n 0,\n -pixel_size\n ]\n # make geotiff raster\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(\n out_file,\n x_size, \n y_size, \n 1, \n gdal.GDT_Float32, \n )\n # set projection geographic lat/lon WGS 84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n # assign spatial dimensions \n ds.SetGeoTransform(gt)\n outband = ds.GetRasterBand(1)\n # save rbf interpolated array as geotiff raster close\n outband.WriteArray(ZI_out)\n ds = None\n\n # calc residuals add to shapefile and in_path CSV, move shape to out_dir\n # only residuals for bias ratios, i.e. not for std dev, etc\n if layer in InterpGdal.default_layers:\n calc_pt_error(in_path, out_dir, layer, grid_var, \n grid_id_name=grid_id_name\n )\n # calculate zonal statistics save means for each gridMET cell\n if z_stats:\n zonal_stats(in_path, out_file, grid_id_name=grid_id_name)\n \n # plot layer's interpolated residuals as bar plot witheach Wx station \n # only produce residual plots for bias ratios, i.e. not for std dev, etc\n if res_plot and layer in InterpGdal.default_layers:\n layer = InterpGdal.var_residual_names.get(\n layer, \n layer.replace('mean','res')\n )\n y_label = 'residual (interpolated minus station value)'\n title = 'layer: {} algorithm: {} (RBF) resolution: {} deg.'.format(\n layer, function ,res)\n res_plot_dir = Path(out_dir)/'residual_plots'\n subtitle='parameters: smooth={}'.format(smooth)\n source_file = Path(out_dir)/Path(in_path).name\n\n station_bar_plot(source_file, layer, out_dir=res_plot_dir, \n y_label=y_label, title=title, subtitle=subtitle)", "def bv_to_radius(b_minus_v):\n # Boyajian 2012\n X = b_minus_v\n a0 = 0.3830\n a1 = 0.9907\n a2 = -0.6038\n Y = 0\n # Ignore metallicity\n a3 = 0\n a4 = 0\n a5 = 0\n return (a0 + a1 * X + a2 * X ** 2 + a3 * X * Y +\n a4 * Y + a5 * Y ** 2) * R_sun", "def r2(self) -> float:\n zx = (self.true - np.mean(self.true)) / np.std(self.true, ddof=1)\n zy = (self.predicted - np.mean(self.predicted)) / np.std(self.predicted, ddof=1)\n r = np.sum(zx * zy) / (len(self.true) - 1)\n return float(r ** 2)", "def test_10_kernels(self):\n ra0, dec0 = CRVAL\n res = 0.01 * DEG\n\n # Test zenithal -- (ra0, dec0) is the reference point.\n for proj in ['TAN', 'ZEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # Test cylindrical -- pixell puts the crval[1] on the equator\n # and dec0 is used for the conformal latitude.\n for proj in ['CAR', 'CEA']:\n wcsk = coords.get_wcs_kernel(proj, ra0, dec0, res)\n msg = f'Check crpix for {proj}'\n self.assertAlmostEqual(wcsk.wcs.crpix[0], 1, delta=TOL_RAD, msg=msg)\n self.assertNotAlmostEqual(wcsk.wcs.crpix[1], 1, delta=TOL_RAD, msg=msg)\n\n # This is going to break.\n fp = FP(xi =[0., -0.01*DEG],\n eta=[0., -0.01*DEG])\n sight = get_sightline()\n tod = core.AxisManager(core.LabelAxis('dets', ['a']))\n fp = coords.get_footprint(tod, wcs_kernel=wcsk, focal_plane=fp, sight=sight)", "def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude", "def radprojsim_bin(B0, B1, n_samples=1000000, n_iter=1, dz=None,\n gaussian_sigma=None):\n global r_edges \n global n_bins\n\n result = np.zeros(n_bins, dtype=np.float64)\n\n for iter_idx in range(n_iter):\n\n # Simulate the radial displacements using inverse CDF sampling.\n # These are selected with uniform probability from a thin spherical\n # shell\n r = np.cbrt(B0**3 + (B1**3 - B0**3) * np.random.random(size=n_samples))\n\n # Simulate angular displacements by sampling on the surface of \n # the unit sphere\n a = np.random.normal(size=(n_samples, 3))\n a = (a.T / np.sqrt((a**2).sum(axis=1))).T \n\n # Combine radial and angular parts \n a = (a.T * r).T \n\n # If desired, simulate a finite range of observation in z\n if not dz is None:\n hz = dz * 0.5\n\n # Uniform probability of detection in *z*\n if gaussian_sigma is None:\n a[:,0] = a[:,0] + np.random.uniform(-hz, hz, size=n_samples)\n a = a[np.abs(a[:,0])<=hz, :]\n\n # Gaussian probability of detection in *z*\n else:\n start = np.random.normal(scale=gaussian_sigma, size=n_samples)\n outside = np.abs(start) > hz\n while outside.any():\n start[outside] = np.random.normal(\n scale=gaussian_sigma, size=outside.sum())\n outside = np.abs(start) > hz\n a[:,0] = a[:,0] + start\n\n # Take the XY displacements \n r = np.sqrt((a[:,1:]**2).sum(axis=1))\n H = np.histogram(r, bins=r_edges)[0].astype(np.float64)\n result += H \n\n result /= (n_iter * n_samples)\n return result", "def set_bandwidth(ops, kernel):\n \n if kernel == 'epanechnikov':\n bw_constant = 2.2\n else:\n bw_constant = 1\n\n n = np.shape(ops[0].traj)[0]\n bandwidth = bw_constant*n**(-1/6)\n\n print('Selected bandwidth: ' + str(bandwidth)+ '\\n')\n\n return bandwidth", "def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)", "def ramp_kernel_real(cutoff, length):\n pos = np.arange(-length, length, 1)\n return cutoff ** 2.0 * (2.0 * np.sinc(2 * pos * cutoff) - np.sinc(pos * cutoff) ** 2.0)", "def dwalls(r,param):\r\n V = param[0]\r\n sig = param[1]\r\n L = param[2]\r\n\r\n a = 1/sig\r\n\r\n x0 = L/2.\r\n y0 = 0.\r\n V0 = 10000*V\r\n Rx = 0.01*L\r\n Ry = 0.6*L\r\n\r\n x = r[0] - x0*np.sign(r[0])\r\n y = r[1] - y0*np.sign(r[1])\r\n\r\n\r\n px = np.sqrt(x**2)\r\n py = np.sqrt(y**2)\r\n try:\r\n f1 = -V0*((np.sign(x)*np.exp((px-Rx)/a))/(a*(np.exp((px-Rx)/a)+1)**2))*(1/(1 + np.exp((py-Ry)/a)))\r\n\r\n x0 = 0.\r\n y0 = L/2.\r\n V0 = 10000*V\r\n Rx = 0.6*L\r\n Ry = 0.01*L\r\n\r\n x = r[0] - x0*np.sign(r[0])\r\n y = r[1] - y0*np.sign(r[1])\r\n px = np.sqrt(x**2)\r\n py = np.sqrt(y**2)\r\n\r\n f2 = -V0*((np.sign(x)*np.exp((Rx+px)/a))/(a*(np.exp(Rx/a)+np.exp(px/a))**2))*(1/(1 + np.exp((py-Ry)/a)))\r\n except RuntimeWarning:\r\n f1 = 0.\r\n f2 = 0.\r\n except FloatingPointError:\r\n f1 = 0.\r\n f2 = 0.\r\n f = f1 + f2\r\n return f", "def f_2b(c):\n Ri = calc_R(*c)\n return Ri - Ri.mean()", "def __guassian_kernel(x, sigma=200):\n return (1 / (sqrt(2.*pi) * sigma)) * exp(-x ** 2 / (2.*sigma**2))", "def phs(x, y, rbfParam) :\n return (x**2 + y**2) ** (rbfParam/2)", "def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius", "def sigma_nfw_rmax(R, r_max, m_x, r_x, c_x, **kwargs):\n r_s = r_x / c_x\n rho_s = (\n m_x / (4 * np.pi * r_x**3) * c_x**3\n / (np.log(1 + c_x) - c_x / (1 + c_x))\n )\n\n prefactor = 2 * r_s * rho_s\n\n if R == r_s:\n sigma = (\n prefactor * (r_max - r_s) * (2 * r_s + r_max)\n / (3 * (r_s + r_max) * (r_max**2 - r_s**2)**0.5))\n\n elif R < r_s:\n x = R / r_s\n prefactor *= 1. / (1 - x**2)**1.5\n sigma = (\n prefactor * (\n np.log(\n x * (r_s + r_max) / (\n r_max + r_s * x**2 - ((1 - x**2) * (r_max**2 - r_s**2 * x**2))**0.5\n )\n ) - ((1 - x**2) * (r_max**2 - r_s**2 * x**2))**0.5 / (r_s + r_max)\n )\n )\n\n elif R > r_s and R < r_max:\n x = R / r_s\n prefactor *= 1. / (x**2 - 1)**1.5\n sigma = (\n prefactor * (\n -np.arctan(\n ((r_max**2 - r_s**2 * x**2) * (x**2 - 1))**0.5\n / (r_max + r_s * x**2)\n )\n + ((x**2 - 1) * (r_max**2 - r_s**2 * x**2))**0.5\n / (r_s + r_max)\n )\n )\n\n else:\n sigma = 0.\n\n return sigma", "def _calculate_filter_parameters(self):\n dt = 1.0 / self._fs\n nl_b_wq = 180.0\n nl_b_wp = 0.14\n nlin_bw = nl_b_wp * self._cf + nl_b_wq\n nlin_phi = 2.0 * numpy.pi * nlin_bw * dt\n nlin_theta = 2.0 * numpy.pi * self._cf * dt\n nlin_cos_theta = numpy.cos(nlin_theta)\n nlin_sin_theta = numpy.sin(nlin_theta)\n nlin_alpha = -numpy.exp(-nlin_phi) * nlin_cos_theta\n nlin_a1 = 2.0 * nlin_alpha\n nlin_a2 = numpy.exp(-2.0 * nlin_phi)\n nlin_z1 = complex(\n (1.0 + nlin_alpha * nlin_cos_theta), -\n (nlin_alpha * nlin_sin_theta))\n nlin_z2 = complex(\n (1.0 + nlin_a1 * nlin_cos_theta), -\n (nlin_a1 * nlin_sin_theta))\n nlin_z3 = complex(\n (nlin_a2 * numpy.cos(2.0 * nlin_theta)), -\n (nlin_a2 * numpy.sin(2.0 * nlin_theta)))\n nlin_tf = (nlin_z2 + nlin_z3) / nlin_z1\n nlin_b0 = abs(nlin_tf)\n nlin_b1 = nlin_alpha * nlin_b0\n\n lin_b_wq = 235.0\n lin_b_wp = 0.2\n lin_bw = lin_b_wp * self._cf + lin_b_wq\n lin_phi = 2.0 * numpy.pi * lin_bw * dt\n lin_c_fp = 0.62\n lin_c_fq = 266.0\n lin_cf = lin_c_fp * self._cf + lin_c_fq\n lin_theta = 2.0 * numpy.pi * lin_cf * dt\n lin_cos_theta = numpy.cos(lin_theta)\n lin_sin_theta = numpy.sin(lin_theta)\n lin_alpha = -numpy.exp(-lin_phi) * lin_cos_theta\n lin_a1 = 2.0 * lin_alpha\n lin_a2 = numpy.exp(-2.0 * lin_phi)\n lin_z1 = complex(\n (1.0 + lin_alpha * lin_cos_theta), -\n (lin_alpha * lin_sin_theta))\n lin_z2 = complex(\n (1.0 + lin_a1 * lin_cos_theta), -\n (lin_a1 * lin_sin_theta))\n lin_z3 = complex(\n (lin_a2 * numpy.cos(2.0 * lin_theta)), -\n (lin_a2 * numpy.sin(2.0 * lin_theta)))\n lin_tf = (lin_z2 + lin_z3) / lin_z1\n lin_b0 = abs(lin_tf)\n lin_b1 = lin_alpha * lin_b0\n\n return [lin_a1, lin_a2, lin_b0, lin_b1, nlin_a1, nlin_a2, nlin_b0,\n nlin_b1]", "def computeBound(A, omega, Q, omega2, Q2, k):\n A = A.todense()\n M = Q2.T.dot(A).dot(Q2)\n R = A.dot(Q2) - Q2.dot(M)\n \n normR = numpy.linalg.norm(R) \n lmbda, U = numpy.linalg.eigh(M)\n L2 = omega[k:]\n\n delta = float(\"inf\")\n \n for i in lmbda: \n for j in L2: \n if abs(i-j) < delta: \n delta = abs(i-j) \n \n #print(lmbda, L2)\n print(\"normR=\" + str(normR), \"delta=\"+ str(delta), \"bound=\"+str(normR/delta))\n \n return normR/delta", "def condensate_belowdew(Rs, Rv, Rsi, Rvi, Bo, Bg, Np, Gp):\n Btg = ((Bg * (1 - (Rs * Rvi))) + (Bo * (Rvi - Rv))) / (1 - (Rv * Rs)) # in RB/STB\n Bto = ((Bo * (1 - (Rv * Rsi))) + (Bg * (Rsi - Rs))) / (1 - (Rv * Rs)) # in RB/scf\n\n Gi = 0\n F = (Np * ((Bo - (Rs * Bg)) / (1 - (Rv * Rs)))) + ((Gp - Gi) * ((Bg - (Rv * Bo)) / (1 - (Rv * Rs))))\n Eg = Btg - Bg[0]\n return(F, Eg)", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def srf(self, step=99e6):\n if self.sideband == 0:\n centres = [self.centre_frequency]\n else:\n centres = [self.centre_frequency-self.sideband,\n self.centre_frequency+self.sideband]\n if self.shape == \"rectangular\":\n edges = [(float(c-self.width), float(c+self.width)) \n for c in centres]\n pp = [numpy.arange(e[0], e[1]+step/2, step) for e in edges]\n f_grid = numpy.concatenate(pp)\n y = numpy.ones(f_grid.size)/f_grid.size\n return (f_grid, y)\n else:\n raise NotImplementedError(\"Unknown shape: {}\".format(self.shape))" ]
[ "0.6161731", "0.61592114", "0.6056328", "0.59708244", "0.5914149", "0.5861118", "0.58004457", "0.5719034", "0.57188016", "0.57011205", "0.5696227", "0.5671052", "0.56710136", "0.5659563", "0.56463724", "0.56430066", "0.5637317", "0.5626428", "0.56258416", "0.5621374", "0.5596587", "0.5590795", "0.5575506", "0.5569796", "0.5569796", "0.5567527", "0.5556907", "0.55548733", "0.55339456", "0.5527909", "0.55263346", "0.55195576", "0.54822755", "0.5481786", "0.5455705", "0.54520917", "0.5449945", "0.5441069", "0.5429834", "0.5429834", "0.5417991", "0.54134256", "0.5404497", "0.5381681", "0.5381659", "0.5380651", "0.53779596", "0.5368898", "0.53649604", "0.536299", "0.5350285", "0.5348545", "0.5340073", "0.53336596", "0.5332025", "0.5325254", "0.5321963", "0.5321434", "0.5313708", "0.53065354", "0.5306279", "0.52995306", "0.5293815", "0.5286751", "0.5274815", "0.52737314", "0.5270141", "0.5260198", "0.5254262", "0.5250361", "0.5245169", "0.5237647", "0.5233315", "0.5227032", "0.52059466", "0.5201224", "0.5196573", "0.5194364", "0.51941967", "0.5180795", "0.517516", "0.5174208", "0.5171944", "0.5170662", "0.5167977", "0.5164461", "0.5162565", "0.5160046", "0.51583", "0.5152393", "0.51468354", "0.5143543", "0.5142134", "0.5141713", "0.5139554", "0.51380074", "0.51370275", "0.5135731", "0.51336443", "0.5130955", "0.5129282" ]
0.0
-1
Perform MeanShift Clustering of data using a flat kernel
def mean_shift(X, bandwidth=None): if bandwidth is None: bandwidth = estimate_bandwidth(X) n_points, n_features = X.shape n_clusters = 0 bandwidth_squared = bandwidth**2 points_idx_init = np.arange(n_points) stop_thresh = 1e-3*bandwidth # when mean has converged cluster_centers = [] # center of clusters # track if a points been seen already been_visited_flag = np.zeros(n_points, dtype=np.bool) # number of points to possibly use as initilization points n_points_init = n_points # used to resolve conflicts on cluster membership cluster_votes = [] random_state = np.random.RandomState(0) while n_points_init: # pick a random seed point tmp_index = random_state.randint(n_points_init) # use this point as start of mean start_idx = points_idx_init[tmp_index] my_mean = X[start_idx, :] # intilize mean to this points location # points that will get added to this cluster my_members = np.zeros(n_points, dtype=np.bool) # used to resolve conflicts on cluster membership this_cluster_votes = np.zeros(n_points, dtype=np.uint16) while True: # loop until convergence # dist squared from mean to all points still active # FIXME - this needs to be converted to spherical distances. sqrt_dist_to_all = np.sum((my_mean - X)**2, axis=1) # points within bandwidth in_idx = sqrt_dist_to_all < bandwidth_squared # add a vote for all the in points belonging to this cluster this_cluster_votes[in_idx] += 1 my_old_mean = my_mean # save the old mean my_mean = np.mean(X[in_idx, :], axis=0) # compute the new mean # add any point within bandwidth to the cluster my_members = np.logical_or(my_members, in_idx) # mark that these points have been visited been_visited_flag[my_members] = True if np.linalg.norm(my_mean-my_old_mean) < stop_thresh: # check for merge possibilities merge_with = -1 for c in range(n_clusters): # distance from possible new clust max to old clust max dist_to_other = np.linalg.norm(my_mean - cluster_centers[c]) # if its within bandwidth/2 merge new and old if dist_to_other < bandwidth/2: merge_with = c break if merge_with >= 0: # something to merge # record the max as the mean of the two merged # (I know biased twoards new ones) cluster_centers[merge_with] = 0.5 * (my_mean+ cluster_centers[merge_with]) # add these votes to the merged cluster cluster_votes[merge_with] += this_cluster_votes else: # its a new cluster n_clusters += 1 # increment clusters cluster_centers.append(my_mean) # record the mean cluster_votes.append(this_cluster_votes) break # we can initialize with any of the points not yet visited points_idx_init = np.where(been_visited_flag == False)[0] n_points_init = points_idx_init.size # number of active points in set # a point belongs to the cluster with the most votes labels = np.argmax(cluster_votes, axis=0) return cluster_centers, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_meanshift(self, data, bandwidth=None, bin_seeding=False):\n if bandwidth is None:\n bandwidth = cl.estimate_bandwidth(data)\n\n ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding)\n ms.fit(data)\n\n labels = ms.labels_\n\n return labels, [np.nan]", "def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,min_bin_freq=1, cluster_all=True, max_iter=300,\n n_jobs=None):\n print(get_bin_seeds(X, bin_seeding))\n # find the points within the sphere\n nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)\n ##########################################parallel computing############################\n center_intensity_dict = {}\n all_res = Parallel(n_jobs=n_jobs)(\n delayed(_mean_shift_single_seed)\n (seed, X, nbrs, max_iter) for seed in seeds)#\n ##########################################parallel computing############################\n\n return cluster_centers, labels", "def kmeans_cluster(X_train_input, n_clusters=100):\r\n from sklearn.cluster import MiniBatchKMeans\r\n image_descriptors = []\r\n [image_descriptors.extend(ORB_feature_extractor(img)) for img in X_train_input]\r\n image_descriptors = np.array(image_descriptors) \r\n \r\n kmeans_model = MiniBatchKMeans(n_clusters=n_clusters, init_size=5*n_clusters,\r\n random_state=34, batch_size=128).fit(image_descriptors)\r\n \r\n return kmeans_model", "def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto", "def train_KMean(data: np.array, labels: np.array, n_clusters: int)->None:\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n\n # Scale the data so that Euclidian distance makes sense\n means = np.mean(data, axis = 0)\n stddevs = np.std(data, axis = 0, ddof = 1)\n\n #print(means)\n #print(stddevs)\n\n data_scaled = np.zeros((n_examples, n_features))\n\n for i in range(n_features):\n data_scaled[:, i] = (data[:,i] - means[i]) / stddevs[i]\n\n study_correlation(data_scaled)\n\n # Initialize the centroids\n idx = np.random.randint(n_examples, size = n_clusters)\n centroids = data_scaled[idx, :]\n\n counter = 0\n\n while True:\n\n distances = np.array([[np.sqrt(np.sum(np.square(example-centroid))) for centroid in centroids] for example in data_scaled])\n centroid_idx = np.argmin(distances, axis = 1)\n old_centroids = centroids\n centroids = update_centroids(data_scaled, centroid_idx, n_examples)\n #displacement = get_displacement(old_centroids, centroids)\n displacement = np.linalg.norm(np.array([old - new for old, new in zip(old_centroids, centroids)]))\n\n #assert np.linalg.norm(np.array([old - new for old, new in zip([1, 2, 3, 4], [5, 6, 7, 8])])) == 8\n\n if counter == 0:\n# print(\"Initial displacement = {}\".format(displacement))\n initial_displacement = displacement\n\n counter += 1\n\n if displacement < (initial_displacement / 10000): break\n\n #print(\"Total number of loops before ending : {}\".format(counter))\n converted_predictions = convert_predictions(centroid_idx)\n accuracy = np.mean([p == l for p, l in zip(converted_predictions, labels)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def naive_sharding(self, ds, k):\n\n n = np.shape(ds)[1]\n m = np.shape(ds)[0]\n centroids = np.mat(np.zeros((k,n)))\n\n # Sum all elements of each row, add as col to original dataset, sort\n composite = np.mat(np.sum(ds, axis=1))\n ds = np.append(composite.T, ds, axis=1)\n ds.sort(axis=0)\n\n # Step value for dataset sharding\n step = floor(m/k)\n\n # Vectorize mean ufunc for numpy array\n vfunc = np.vectorize(self._get_mean)\n\n # Divide matrix rows equally by k-1 (so that there are k matrix shards)\n # Sum columns of shards, get means; these column means are centroids\n for j in range(k):\n if j == k-1:\n centroids[j:] = vfunc(np.sum(ds[j*step:,1:], axis=0), step)\n else:\n centroids[j:] = vfunc(np.sum(ds[j*step:(j+1)*step,1:], axis=0), step)\n\n return centroids", "def update_mean(img: np.ndarray, clustermask: np.ndarray):\n\n for k in range(numclusters):\n current_cluster_centers[k, 0, :] = np.mean(img[clustermask==k], axis=0)", "def KmeansSegmentation(image, kernel_sigma, N_classes, N_iter = 1, tol = 10e-6): \n\n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n nr, nc = image.shape;\n image_vec = image.reshape(nr * nc, 1);\n mask_pos = image_vec > 0;\n X = image_vec[mask_pos].reshape(mask_pos.sum(), 1);\n kmeans = KMeans(n_clusters = N_classes, random_state=0, max_iter = N_iter, tol = tol).fit(X);\n labels = kmeans.labels_; \n \n mask = np.zeros((nr * nc, 1)); \n mask[mask_pos] = labels;\n mask = mask.reshape(nr, nc);\n \n return mask;", "def kmeans(img):\n max_iter = 10\n max_change_rate = 0.02\n dist = sys.float_info.max\n\n clustermask = np.zeros((h1, w1, 1), np.uint8)\n result = np.zeros((h1, w1, 3), np.uint8)\n\n # initializes each pixel to a cluster\n # iterate for a given number of iterations or if rate of change is\n # very small\n initialize(img)\n i = 0\n while i < max_iter and dist > max_change_rate:\n assign_to_current_mean(img, result, clustermask)\n clustermask = update_mean(img, clustermask)\n i += 1\n refill_real(img, result, clustermask, cluster_colors)\n return result", "def cluster_index_2(X):\n \n global_mean = X.mean(axis=0)\n\n sum_squared_distances = (((X - global_mean)**2).sum(axis = 1)).sum()\n #Sum of squared distances of each sample from the global mean\n \n centroids, labels, inertia = k_means(X, 2)\n\n ci = inertia / sum_squared_distances\n\n return ci , labels", "def kmean(encoder,tsne,true_data,true_label):\n enc_output = encoder.predict(true_data)\n kmean = KMeansClustering()\n kmean.fit(enc_output)\n pred = kmean.predict(enc_output)\n accuracy(true_label,pred)\n confusion_matrix(true_label,pred, save_name = \"confusion_matrix_kmean.png\")\n tsne.tsne_plot(true_data,pred,save_data_dir =\"kmean\",save_name=\"kmean\")", "def cluster_mean_shift(vectors):\n ms = MeanShift()\n ms.fit(vectors)\n\n labels = ms.labels_\n labels_unique = np.unique(labels)\n n_clusters = len(labels_unique)\n\n print(\"Discovered {} clusters\".format(n_clusters))\n print(labels)\n\n return labels, n_clusters", "def cluster_feature(feature_mat, k):\n whitened = whiten(feature_mat.transpose())\n centroid, distortion = kmeans(whitened, k)\n\n return centroid, distortion", "def KMeans(dataTable, k, epsilon=0.00001, srcDims = 1000000000000000, iters=20, normData = False):\n #load up the configuration\n kmOptions = KMeansConfig(dataTable,k,epsilon,srcDims)\n \n \n #load and format the table for use.\n data = loadMatrix(dataTable)[:,:kmOptions['sourceDims']]\n \n #check if we should normalise the data (this is really quick and dirty, replace it with something better)\n if normData:\n dmax = amax(data)\n dmin = amin(data)\n data = (data-dmin)/(dmax-dmin+0.00000001)\n \n \n #make our starting point solutions from the dataset\n solutions = [array(random.sample(data,k)) for i in xrange(iters)]\n \n #chunk solutions if necessary\n for i in xrange(len(solutions)):\n sol = []\n while len(solutions[i]) > kmOptions['chunkSize']:\n sol.append(solutions[i][:kmOptions['chunkSize']])\n solutions[i] = solutions[i][kmOptions['chunkSize']:]\n sol.append(solutions[i])\n solutions[i] = sol\n \n #create our chunked problem data\n dataChunks = []\n while len(data) > kmOptions['chunkSize']:\n dataChunks.append(data[:kmOptions['chunkSize']])\n data = data[kmOptions['chunkSize']:]\n dataChunks.append(data)\n kNorm = (len(dataChunks)-1)+len(dataChunks[-1])/float(len(dataChunks[0]))\n \n #create the CUDA kernels\n program = SourceModule(open(KernelLocation+\"KMEANS_LABEL.nvcc\").read())\n prg = program.get_function(\"KMEANS_LABEL\")\n program = SourceModule(open(KernelLocation+\"KMEANS_UPDATE.nvcc\").read())\n prg2 = program.get_function(\"KMEANS_UPDATE\")\n t0 = time.time()\n \n #store the resultant performance of each solution here\n results = []\n finalSols = []\n \n #make GPU allocations and support variables\n total = 0.\n dists = [numpy.zeros(kmOptions['chunkSize']).astype(numpy.float32)+10000000000000000. for i in xrange(len(dataChunks))] #this is used as an intermediate step\n labels = [numpy.zeros(kmOptions['chunkSize']).astype(numpy.uint32) for i in xrange(len(dataChunks))] #this is used as an intermediate step\n data_gpu = drv.mem_alloc(dataChunks[0].nbytes)\n k_gpu = drv.mem_alloc(solutions[0][0].nbytes)\n labels_gpu = drv.mem_alloc(labels[0].nbytes)\n dists_gpu = drv.mem_alloc(dists[0].nbytes)\n \n #calculate KMeans\n for sol in solutions:\n t0 = time.time()\n for i in xrange(10000):\n #Step 1: find all the closest labels\n for i in xrange(len(sol)):\n #copy in blank distances, labels, and the label coordinates\n drv.memcpy_htod(k_gpu, sol[i])\n for j in xrange(len(dataChunks)):\n drv.memcpy_htod(data_gpu, dataChunks[j])\n drv.memcpy_htod(labels_gpu, labels[j])\n drv.memcpy_htod(dists_gpu, dists[j])\n prg(k_gpu,\n data_gpu,\n kmOptions[\"dimensions\"],\n labels_gpu,\n dists_gpu,\n kmOptions['k'],\n kmOptions['dataSize'],\n kmOptions['chunkSize'],\n numpy.int64(i*kmOptions['chunkSize']), #k offset\n numpy.int64(j*kmOptions['chunkSize']), #data offset\n kmOptions['maxThreads'],\n block=kmOptions['block'],\n grid=kmOptions['grid'])\n drv.memcpy_dtoh(labels[i], labels_gpu)\n #Step 2: find the new averages\n old_sol = [s.copy() for s in sol]\n for i in xrange(len(sol)):\n #load up a blank set of k matrices\n drv.memcpy_htod(k_gpu, sol[i]*0.)\n for j in xrange(len(dataChunks)):\n drv.memcpy_htod(data_gpu, dataChunks[j])\n drv.memcpy_htod(labels_gpu, labels[j])\n prg2(k_gpu,\n data_gpu,\n kmOptions[\"dimensions\"],\n labels_gpu,\n kmOptions['k'],\n kmOptions['dataSize'],\n kmOptions['chunkSize'],\n numpy.int64(i*kmOptions['chunkSize']), #label offset\n numpy.int64(j*kmOptions['chunkSize']), #data offset\n kmOptions['maxThreads'],\n block=kmOptions['block'],\n grid=kmOptions['grid'])\n drv.memcpy_dtoh(sol[i], k_gpu)\n sol[i] /= kNorm #final normalisation\n #Step 3: check that the update distance is larger than epsilon\n total = 0.\n for j in xrange(len(sol)):\n tmp = sol[j]-old_sol[j]\n tmp = tmp*tmp\n total += sum([sum(t**0.5) for t in tmp])\n if total/kmOptions['dataSize'] < kmOptions['eps']:\n break\n print \"solution done in \",time.time()-t0\n results.append((total,len(results)))\n finalSols.append(numpy.concatenate(sol)[:kmOptions['dataSize']])\n results.sort()\n return finalSols[results[0][1]]", "def SSE_clust(data, transpose=True, do_mask=False): \n mask = np.loadtxt('/net/tarea/scratch/Rafael/phd/apogee/python/comb_SkyTel_mask.dat')\n masked = np.where(mask == 1)[0]\n SSE_CLUST = np.zeros(data['nc'])\n if (transpose): sclusters = data['sclusters'].transpose()\n else: sclusters = data['sclusters']\n for NC in range(data['nc']):\n if (do_mask == True): SSE_CLUST[NC] = np.nansum(sclusters[NC][masked]**2)\n else: SSE_CLUST[NC] = np.nansum(sclusters[NC]**2)\n return SSE_CLUST", "def cluster(X=None, datalabels=None, nc=2):\n from sklearn.cluster import KMeans\n from sklearn.cluster import AffinityPropagation\n\n C = KMeans(n_clusters=nc,n_init=10,init='random')\n C.fit(X[:,:1])\n\n #C = AffinityPropagation(preference=-80,damping=0.5).fit(X)\n #cluster_centers_indices = C.cluster_centers_indices_\n\n clust = {}\n for (i, label) in enumerate(C.labels_):\n key = C.cluster_centers_[label][0]\n #print label,key, datalabels[i],X[i][1]\n if not clust.has_key(key):\n clust[key]=[]\n clust[key].append(datalabels[i])\n #print clust\n return C, clust", "def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred", "def run_kmeans(x, nmb_clusters, verbose=False):\n n_data, d = x.shape\n\n # faiss implementation of k-means\n clus = faiss.Clustering(d, nmb_clusters)\n clus.niter = 10\n clus.max_points_per_centroid = 10000000\n res = faiss.StandardGpuResources()\n flat_config = faiss.GpuIndexFlatConfig()\n flat_config.useFloat16 = False\n flat_config.device = 0\n index = faiss.GpuIndexFlatL2(res, d, flat_config)\n\n # perform the training\n clus.train(x, index)\n _, I = index.search(x, 1)\n \n stats = clus.iteration_stats\n losses = np.array([\n stats.at(i).obj for i in range(stats.size())\n ])\n if verbose:\n print('k-means loss evolution: {0}'.format(losses))\n\n return [int(n[0]) for n in I], losses[-1]", "def recluster(cluster, min_size, guard, func):\r\n if cluster.get_length() == 0:\r\n return\r\n if cluster.get_length() <= min_size:\r\n return cluster\r\n sim = func(cluster.get_tweets())\r\n if sim < guard:\r\n kmeans = TweetKMeans(2)\r\n kmeans.set_data(cluster.get_tweets())\r\n return kmeans.start_algorithm()\r\n return cluster", "def cluster_center_update_dataset(spn, dataset):\n\n if isinstance(spn, Categorical):\n\n insert_into_categorical_leaf(spn, np.array([dataset]), np.array([1.0]))\n\n return spn\n elif isinstance(spn, IdentityNumericLeaf):\n\n insert_into_identity_numeric_leaf(spn, np.array([dataset]), np.array([1.0]))\n\n return spn\n elif isinstance(spn, Sum):\n cc = spn.cluster_centers\n\n node_idx = 0\n\n min_dist = np.inf\n min_idx = -1\n for n in spn.children:\n # distance calculation between the dataset and the different clusters\n # (there exist a much faster version on scipy)\n # this? https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html\n #\n proj = projection(dataset, n.scope)\n dist = distance.euclidean(cc[node_idx], proj)\n if dist < min_dist:\n min_dist = dist\n min_idx = node_idx\n\n node_idx += 1\n assert min_idx > -1\n assert min_idx < len(spn.children)\n adapt_weights(spn, min_idx)\n cluster_center_update_dataset(spn.children[min_idx], dataset)\n elif isinstance(spn, Product):\n\n for n in spn.children:\n cluster_center_update_dataset(n, dataset)\n else:\n raise Exception(\"Invalid node type \" + str(type(spn)))\n spn.cardinality += 1", "def Client_K_Means_Orch(log,\n\t\t\t\t train_data,\n\t\t\t\t orig_data,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_analysis = False,\n\t\t\t\t silhouette_cluster_range = range(0,0),\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = False,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\n\t\n\tprint(\"\\nData\\n\")\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\torig_data = orig_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data)\n\n\tprint(\"\\nPCA\\n\")\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\tprint(\"\\nElbow Chart Analysis\\n\")\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\tif silhouette_analysis:\n\t\tprint(\"\\nSilhouette Analysis\\n\")\n\t\t#Conduct Silhouette analysis\n\t\ttest.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\tprint(\"\\nLog Saving\\n\")\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)", "def image_segmentain(img_flat):\n\tkmeans = KMeans(n_clusters = n_CLUSTERS, random_state = 0).\\\n\t\t\t\t\t\t\t\t\t\t\tfit(img_flat.reshape(-1,1)) \n\t\"\"\"Kmeans lables had issue with masking so center of each cluster\n\tis assigned for corresponding labels\"\"\"\n\n\tkmeans_centers = kmeans.cluster_centers_[kmeans.labels_]\n\n\treturn kmeans_centers.flatten()", "def cluster(n: int, model: str) -> NDArray[int]:\n return MiniBatchKMeans(n).fit_predict(XS[model])", "def kmeans_cluster(\n cn,\n min_k=2,\n max_k=100,\n ):\n\n X = cn.T.values\n ks = range(min_k, max_k + 1)\n\n logging.info(f'trying with max k={max_k}')\n\n kmeans = []\n bics = []\n for k in ks:\n logging.info(f'trying with k={k}')\n model = sklearn.cluster.KMeans(n_clusters=k, init=\"k-means++\").fit(X)\n bic = compute_bic(model, X)\n kmeans.append(model)\n bics.append(bic)\n\n opt_k = np.array(bics).argmax()\n logging.info(f'selected k={opt_k}')\n\n model = kmeans[opt_k]\n\n embedding = umap.UMAP(\n n_neighbors=15,\n min_dist=0.1,\n n_components=2,\n random_state=42,\n metric='euclidean',\n ).fit_transform(cn.fillna(0).values.T)\n\n clusters = pd.DataFrame({\n 'cell_id': cn.columns, 'cluster_id': model.labels_,\n 'umap1': embedding[:, 0], 'umap2': embedding[:, 1]\n })\n\n return clusters", "def kmean(X,initial_centroids,max_iters):\n m = np.size(X,0)\n K = np.size(initial_centroids,0)\n centroids = initial_centroids\n idx = np.zeros((m,1))\n for i in range(1,max_iters):\n idx = nearest_cluster(X,centroids)\n centroids = update_centroids(X,idx,K)\n return centroids,idx", "def disaggregate_by_cluster(self):\n # wt = np.zeros((1, self.ds.shape[1]))\n # total = np.zeros((self.n_ahead, self.ds.shape[1]))\n \n agg_cluster_ds = np.zeros((self.n_ahead+1, self.n_clusters))\n agg_cluster_ds[0] = self.ds_agg_by_c[-1]\n agg_cluster_ds[1:] = self.ds_c_for\n cluster_perc_change = np.diff(agg_cluster_ds, axis = 0) / agg_cluster_ds[:-1]\n\n cluster_scaling_vector = np.zeros((2, self.ds.shape[1]))\n\n # break down proportionally -> don't work well\n # for c in range(self.n_clusters):\n # c_m = self.ds.iloc[-self.cluster_n_period:, np.where(self.ds_c == c)[0]]\n # c_sum = sum(c_m)\n # indiv_sum = np.sum(c_m, axis = 0)\n # wt[:,np.where(self.ds_c == c)[0]] = (indiv_sum/c_sum)\n # total[:,np.where(self.ds_c == c)[0]] = np.reshape(\n # np.repeat(self.ds_c_for[:,c], c_m.shape[1]), (self.n_ahead, c_m.shape[1]))\n \n # multiply by the perc change\n \n for i in range(self.ds_c.shape[0]):\n cluster_scaling_vector[:,i] = cluster_perc_change[:,self.ds_c[i]]\n cluster_scaling_vector = cluster_scaling_vector+1\n cluster_scaling_vector = np.array(cluster_scaling_vector)\n \n self.ds_for = self.ds.copy()\n\n for yr in range(self.n_ahead)[::-1]:\n # forecast on foretasted number\n yr_ind = self.ds_for.index[-(yr+1)]\n self.ds_for.ix[yr_ind] = self.ds_for.iloc[-(yr+2),:].values * cluster_scaling_vector[-(yr+1)]\n\n # self.ds_for.iloc[-(self.n_ahead):,:] = self.ds_for.iloc[-(self.n_ahead+1):-1,:].values * np.array(cluster_scaling_vector)\n\n # if negative -> 0\n self.ds_for[self.ds_for < 0] = 0", "def cluster_timeseries(X, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n import scipy as sp\n import time \n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n print('Beginning Calculating pairwise distances between voxels')\n \n X = np.array(X)\n X_dist = sp.spatial.distance.pdist(X.T, metric = similarity_metric)\n \n temp=X_dist\n temp[np.isnan(temp)]=0\n tempmax=temp.max()\n \n X_dist = sp.spatial.distance.squareform(X_dist)\n X_dist[np.isnan(X_dist)]=tempmax\n #import pdb;pdb.set_trace()\n sim_matrix=1-sk.preprocessing.normalize(X_dist, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n #import pdb;pdb.set_trace()\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n# # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n return y_pred", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def kmeans(X, skin_mask, k=3):\n _N_ATTEMPTS = 20\n _MAX_ITER = 300\n _TOL = 0.0001\n\n km = KMeans(\n n_clusters=k, n_init=_N_ATTEMPTS, max_iter=_MAX_ITER, tol=_TOL,\n n_jobs=-1\n )\n\n # Standardise data to [-1, 1]\n X_scaler = preprocessing.StandardScaler()\n if X.ndim > 1:\n X = np.float32(X.reshape(-1, X.shape[1]))\n X_scaled = X_scaler.fit_transform(X)\n\n labels = km.fit_predict(X_scaled)\n else:\n X_scaled = X_scaler.fit_transform(X.reshape(-1, 1))\n labels = km.fit_predict(X_scaled)\n\n centre = km.cluster_centers_\n\n largest_centre_idx = np.argmax(centre)\n for i in range(0, len(labels)):\n if labels[i] == largest_centre_idx:\n labels[i] = 1\n else:\n labels[i] = 0\n\n _lm = np.copy(skin_mask)\n _lm[_lm == 0] = 0\n _lm[_lm == 1] = labels\n lesion_mask = _lm\n\n return lesion_mask", "def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist", "def meanshift_cluster_occurrences(X_data, quantile):\n try:\n bandwidth = sklearn.cluster.estimate_bandwidth(\n X_data, quantile=quantile, n_samples=500\n )\n assert bandwidth != 0, '[occur] bandwidth is 0. Cannot cluster'\n # bandwidth is with respect to the RBF used in clustering\n # ms = sklearn.cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=True)\n ms = sklearn.cluster.MeanShift(\n bandwidth=bandwidth, bin_seeding=True, cluster_all=False\n )\n ms.fit(X_data)\n label_arr = ms.labels_\n\n unique_labels = np.unique(label_arr)\n max_label = max(0, unique_labels.max())\n num_orphans = (label_arr == -1).sum()\n label_arr[label_arr == -1] = np.arange(max_label + 1, max_label + 1 + num_orphans)\n except Exception as ex:\n ut.printex(\n ex,\n 'error computing meanshift',\n key_list=['X_data', 'quantile'],\n iswarning=True,\n )\n # Fallback to all from same occurrence\n label_arr = np.zeros(X_data.size)\n return label_arr", "def test_sparse():\n\n rng = np.random.RandomState(0)\n\n X = rng.rand(20, 2)\n X[X < 0.8] = 0\n X_csr = sp.csr_matrix(X)\n\n bisect_means = BisectingKMeans(n_clusters=3, random_state=0)\n\n bisect_means.fit(X_csr)\n sparse_centers = bisect_means.cluster_centers_\n\n bisect_means.fit(X)\n normal_centers = bisect_means.cluster_centers_\n\n # Check if results is the same for dense and sparse data\n assert_allclose(normal_centers, sparse_centers, atol=1e-8)", "def cluster(ops, seeds, mut):\n\n old_centers = []\n centers = copy.deepcopy(seeds)\n\n while (set(centers) != set(old_centers)):\n\n old_centers = copy.deepcopy(centers)\n centers = []\n groups = grouping(old_centers, ops, mut)\n\n for i in range(len(groups)):\n result = group_evaluation(groups[i], mut)\n centers.append(result)\n\n return centers", "def kmeans(img, k):\n # Randomly pick k pixels as initial cluster \"means\"\n # Random indices are picked without replacement; to avoid duplicate means\n n = len(img) \n rand_ind = np.random.choice(n, size=k, replace=False) \n means = img[rand_ind, :].astype(np.float32) \n\n print \"Using Kmeans..\"\n return kmeans_driver(img, means)", "def _cmeans0_kth(data, u_old, c, m, *para):\n\tk = para[0]\n\n\t# Normalizing, then eliminating any potential zero values.\n\tu_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))\n\tu_old = np.fmax(u_old, np.finfo(np.float64).eps)\n\n\tum = u_old ** m\n\n\t# remain the belonging rate >= the k-th max location of each cluster in um_c\n\tfilter_k = lambda row:row < sorted(row, reverse=True)[k-1]\n\tfail_indices = np.apply_along_axis(filter_k, axis=1, arr=u_old)\n\tum[fail_indices] = 0\n\n\t# Calculate cluster centers\n\t# data1:2861,2; um:30,2861\n\tdata = data.T\n\tcntr = um.dot(data) / (np.ones((data.shape[1],1)).dot(np.atleast_2d(um.sum(axis=1))).T)\n\td = cdistance.get_center_distance(data, cntr)\n\t\n\td = np.fmax(d, np.finfo(np.float64).eps)\n\tjm = (um * d ** 2).sum()\n\n\tu = d ** (- 2. / (m - 1))\n\tu /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))\n\treturn cntr, u, jm, d", "def k_means (X, K):\n K_clusters = initialize_centroids(X, K)\n m = X.shape[0]\n dif = 1\n while (dif > 10**(-7)): # we stop when the centroids almost don't move\n groups = np.empty(m)\n K_clusters_old = K_clusters\n #cluster assignment step\n for i in range(m):\n groups[i] = np.argmin(compute_distance(X[i,:],K_clusters))\n #centroids update step\n for k in range(K):\n K_clusters[k,:] = np.mean(X[groups==k,:],axis=0)\n dif = np.linalg.norm(K_clusters-K_clusters_old, 2) / (np.linalg.norm(K_clusters, 2) + np.linalg.norm(K_clusters_old, 2))\n return groups.astype(int), K_clusters", "def cluster_kmeans(df, k):\r\n # Sample fron the original df\r\n sample_df=df.sample(n = k)\r\n obs, attr= df.shape\r\n # Make copies \r\n copy_df=df.copy()\r\n flag=0\r\n sse_old=0\r\n while (flag==0): \r\n sse=0\r\n Labels=[]\r\n for i in range(0, obs):\r\n dist= []\r\n for j in range(0,k):\r\n #Calculate Eucledian distance\r\n diff=list((df.iloc[i,:]-sample_df.iloc[j,:])**2)\r\n eu_dist=(sum(diff))**(1/attr)\r\n dist.append(eu_dist) \r\n #Add Labels to the observations based on the variable they are close to\r\n label=(dist.index(min(dist)))\r\n Labels.append(label)\r\n # Calculate SSE\r\n sse=sse+((min(dist) )**2)\r\n sse=sse**(1/2)\r\n copy_df['labels']=Labels\r\n # Stopping criteria is change in SSE should be 2 %\r\n if (sse_old !=0):\r\n if(abs(sse_old-sse)/sse_old<=0.05):\r\n flag=1 \r\n return_df=copy_df['labels'].to_frame()\r\n return (return_df, sse)\r\n else:\r\n sse_old=sse\r\n #Empty the sample df\r\n sample_df.drop(sample_df.index, inplace=True)\r\n # Now pick random values from each label and add it to the sample df\r\n for val in range(0,k):\r\n #Create new sample df\r\n sample_df = pd.concat([sample_df, copy_df[copy_df['labels']==val].iloc[:,0:attr].sample(n=1)])\r\n else:\r\n sse_old=sse\r\n #Empty the sample df\r\n sample_df.drop(sample_df.index, inplace=True)\r\n for val in range(0,k):\r\n #Create new sample df \r\n sample_df = pd.concat([sample_df, copy_df[copy_df['labels']==val].iloc[:,0:attr].sample(n=1)])", "def update_mean(img, clustermask):\n flat = img.flatten()\n flat.reshape((int(flat.shape[0] / 3), 3))\n w, h, _ = clustermask.shape\n cluster_assignees={}\n for cid,_ in enumerate(current_cluster_centers):\n cluster_assignees[cid] = []\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y][0]\n cluster_assignees[cid].append(img[x,y])\n for cid, pixels in cluster_assignees.items():\n current_cluster_centers[cid] = np.mean(np.array(pixels),axis=0)\n return clustermask", "def call(self, inputs, **kwargs):\n inputs_expanded = K.expand_dims(inputs, axis=1)\n #print('inputs_expanded', inputs_expanded.shape)\n #print('robust_mean', self.robust_mean.shape)\n\n x_minus_mu = inputs_expanded - self.robust_mean\n #print('x_minus_mu', x_minus_mu.shape)\n\n left_term = list()\n for i in range(self.n_clusters):\n x = x_minus_mu[:, i, :]\n left = K.dot(x, self.inv_covmat[i])\n left = K.expand_dims(left, axis=1)\n if len(left_term) == 0:\n left_term = left\n else:\n #left_term = K.stack([left_term, left], axis=1)\n left_term = K.concatenate([left_term, left], axis=1)\n\n #print('inv_covmat', self.inv_covmat.shape)\n #print('left_term', left_term.shape)\n\n left_term_T = K.permute_dimensions(left_term, (1, 0, 2))\n x_minus_mu_T = K.permute_dimensions(x_minus_mu, (1, 0, 2))\n #print('x_minus_mu_T', x_minus_mu_T.shape)\n #print('left_term_T', left_term_T.shape)\n\n mahal = K.batch_dot(left_term_T, x_minus_mu_T, axes=[2, 2])\n #print('mahal', mahal.shape)\n\n mahal_diagonal = list()\n for i in range(self.n_clusters):\n m = mahal[i, :, :]\n diagonal = tf.linalg.tensor_diag_part(m)\n diagonal = K.expand_dims(diagonal, axis=1)\n\n if len(mahal_diagonal) == 0:\n mahal_diagonal = diagonal\n else:\n #mahal_diagonal = K.stack([mahal_diagonal, diagonal], axis=1)\n mahal_diagonal = K.concatenate(\n [mahal_diagonal, diagonal], axis=1)\n\n #print('mahal_diagonal', mahal_diagonal.shape)\n\n md = K.sqrt(mahal_diagonal)\n #print('md', md.shape)\n\n divide_alpha = md / self.alpha\n\n # the numnerator in q_แป‹j formular in the paper\n numerator = 1.0 / (1.0 + divide_alpha)\n numerator **= (self.alpha + 1.0) / 2.0\n\n denominator = K.sum(numerator, axis=1)\n\n quiu = K.transpose(numerator) / denominator\n quiu = K.transpose(quiu)\n\n #print('quiu', quiu.shape)\n\n return quiu", "def Demo_K_Means_Orch(log,\n\t\t\t\t train_data,\n\t\t\t\t class_label,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_cluster_range,\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = False,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\t\t\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data, \n\t\t\t\t\t\t\t\t col_names = train_col_names)\n\ttest.Preprocess.set_class_label(class_label)\n\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\n\tif viz:\n\t\t#Create cluster plot visualization if requested\n\t\tcluster_plot = cluster_viz(test.train_data, test.class_label, x_feature_index = x_feature_index, y_feature_index = y_feature_index)\n\t\t\n\t\t#Show the plot at runtime if requested\n\t\tif show:\n\t\t\tcluster_plot.show()\n\n\t\t#Save the image\n\t\ttest.Log.saveImage(cluster_plot, \"cluster_plot\", test.viz_folder_name)\n\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\t#Conduct Silhouette analysis\n\t#test.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)", "def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)", "def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])", "def assign_to_current_mean(img: np.ndarray, clustermask: np.ndarray) -> float:\n\n rows, cols = img.shape[:2]\n distances = np.zeros((numclusters, 1))\n overall_dist = 0\n\n for i in range(rows):\n for j in range(cols):\n distances = distance(img[i, j, :]) # returned shape: (numclusters, 1)\n \n k = np.argmin(distances) # closest cluster\n clustermask.itemset((i, j), k) # update cluster mask\n overall_dist += distances[k, 0] # sum distance\n\n return overall_dist", "def kmeans(img: np.ndarray, max_iter: int = 10, max_change_rate: float = 0.02) -> np.ndarray:\n\n dist = sys.float_info.max\n\n h, w = image.shape[:2]\n clustermask = np.zeros((h, w), np.uint8)\n result = np.zeros((h, w, 3), np.uint8)\n\n # initializes cluster centers\n #initialize_pos(img)\n initialize_pp(img)\n overall_dist = assign_to_current_mean(img, clustermask)\n\n # iterate for a given number of iterations or \n # if a rate of change is very small\n for step in range(max_iter):\n change = abs(overall_dist-dist) / dist\n if (change < max_change_rate):\n break\n\n dist = overall_dist\n update_mean(img, clustermask)\n overall_dist = assign_to_current_mean(img, clustermask)\n\n # update result\n for i in range(h):\n for j in range(w):\n k = clustermask.item((i, j))\n result[i, j, :] = np.array(cluster_colors[k], dtype=np.uint8)\n\n print(\"Total within cluster distance: \", round(overall_dist, 2))\n\n return result", "def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster", "def UI_KMeans_Orch(\n\t\t\t\t train_data,\n\t\t\t\t orig_data,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_analysis = False,\n\t\t\t\t silhouette_cluster_range = range(0,0),\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = True,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\t#Make directory on the users desktop\n\tsegmentation_folder_name = \"Customer-Segmentation-Test\" + str(dt.datetime.now().strftime(\"_%Y-%m-%d_%H.%M.%S\"))\n\tos.makedirs(str(Path.home()) + \"\\\\Desktop\\\\\" + segmentation_folder_name)\n\n\t#Make the log\n\tlog = Log(\"Master-Log\", \"Preprocess-Log\", \"SegMethod-Log\", directory = str(Path.home()) + \"\\\\Desktop\\\\\" + segmentation_folder_name)\n\t\n\tprint(\"\\nData\\n\")\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\torig_data = orig_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data)\n\n\tprint(\"\\nPCA\\n\")\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\tprint(\"\\nElbow Chart Analysis\\n\")\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\tif silhouette_analysis:\n\t\tprint(\"\\nSilhouette Analysis\\n\")\n\t\t#Conduct Silhouette analysis\n\t\ttest.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\tprint(\"\\nLog Saving\\n\")\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)\n\tlog.saveMasterLog()", "def assign_to_current_mean(img, result, clustermask):\n overall_dist = 0\n w, h, _ = img.shape\n for x in range(w):\n for y in range(h):\n ipixel = img[x, y]\n dists = {}\n for i, c in enumerate(current_cluster_centers):\n dists[i] = distance(ipixel, c)\n cid, dist = min(dists.items(), key=lambda d: d[1])\n clustermask[x, y] = cid\n result[x, y] = current_cluster_centers[cid]\n overall_dist += dist\n return overall_dist", "def cluster_rgbxy(im,k, lambda_1 = 0.5, lambda_2 = 0.5):\n print(\"Starting RGBXY K-Means algorithm...\")\n # convert BGR to RGB\n # im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n h, w, d = np.shape(im)\n im_vector = im.reshape((-1, 3))\n arr = [[[j, i] for i in range(w)] for j in range(h)]\n arr = np.asarray(arr)\n im_vector_xy = arr.reshape((-1, 2))\n # cluster_centers ~ initialization\n\n # pick random x and y and choose their rgb value\n np.random.seed(100)\n cluster_centers_xy = [np.random.choice(h, k), np.random.choice(w, k)]\n cluster_centers_xy = np.asarray(cluster_centers_xy)\n cluster_centers_xy.reshape((k, -1))\n cluster_centers_xy = cluster_centers_xy.transpose()\n # cluster_centers_rgb = [[im[cluster[0]][cluster[1]] for cluster in cluster_centers_xy]]\n cluster_centers_rgb = []\n for cluster in cluster_centers_xy:\n cluster_centers_rgb.append(im[cluster[0]][cluster[1]])\n cluster_centers_rgb = np.asarray(cluster_centers_rgb)\n # cluster_centers_rgb = im[cluster_centers_xy[:, 0], cluster_centers_xy[:, 1]]\n # cluster_centers = (np.random.choice(255, k*3)).reshape((-1,3))\n\n # dist = np.linalg.norm( np.tile(im_vector[0] , (k,1)) - cluster_centers, axis=1)\n # dist = np.linalg.norm( np.repeat(im_vector, repeats=k, axis=0) - np.tile(cluster_centers , (len(im_vector),1)), axis=1 )\n # dist = dist.reshape(len(dist),-1)\n\n im_vec_dist = np.repeat(im_vector, repeats=k, axis=0)\n im_vec_dist = np.array(im_vec_dist, dtype=np.int16)\n im_vec_dist_xy = np.repeat(im_vector_xy, repeats=k, axis=0)\n im_vec_dist_xy = np.array(im_vec_dist_xy, dtype=np.int16)\n distance_moved = float('inf')\n # convergence threshold\n thresh = 20\n if (k >= 10):\n thresh = 20\n if (k >= 25):\n thresh = 45 # 50\n if (k >= 50):\n thresh = 85 # 90\n if (k >= 100):\n thresh = 300\n if (k >= 250):\n thresh = 430\n final_cluster_points = 0\n\n while (distance_moved > thresh):\n\n # initialize empty cluster bins\n cluster_points = []\n for i in range(k):\n cluster_points.append([])\n\n dist_rgb = np.linalg.norm(im_vec_dist - np.tile(cluster_centers_rgb, (len(im_vector), 1)), axis=1)\n dist_xy = np.linalg.norm(im_vec_dist_xy - np.tile(cluster_centers_xy, (len(im_vector_xy), 1)), axis=1)\n\n dist = lambda_1 * dist_rgb + lambda_2 * dist_xy\n\n # allocate cluster center to each pixel\n it = 0\n for i in range(0, len(dist), k):\n current_dist = dist[i: i + k]\n min_value = min(current_dist)\n min_position = np.argmin(current_dist)\n # segregate the points in k bins\n # append point index\n cluster_points[min_position].append(it)\n it += 1\n\n # recompute cluster centers\n distance_moved = 0\n for i in range(k):\n cp = cluster_points[i]\n # get points\n a = im_vector[cp]\n b = im_vector_xy[cp]\n if (len(a)):\n new_centers = a.mean(axis=0)\n distance_moved += np.linalg.norm(cluster_centers_rgb[i] - new_centers)\n # update new cluster centers\n cluster_centers_rgb[i] = new_centers\n if (len(b)):\n new_centers = b.mean(axis=0)\n distance_moved += np.linalg.norm(cluster_centers_xy[i] - new_centers)\n # update new cluster centers\n cluster_centers_xy[i] = new_centers\n\n # for cp in cluster_points:\n # print(np.shape(cp))\n\n final_cluster_points = cluster_points\n\n print(distance_moved)\n\n index_vector = np.empty([len(im_vector), 1])\n # cp is the point instances\n for i in range(k):\n cp = final_cluster_points[i]\n # print(np.shape(cp))\n index_vector[cp] = i\n segmap = index_vector.reshape((h, w))\n # print(segmap)\n # print(np.shape(segmap))\n # at every location, put the corresponding cluster number\n # segmap = np.empty()\n # segmap is nXm. Each value in the 2D array is the cluster assigned to that pixel\n return segmap", "def train(self, data):\n\t\tepsilon = self.epsilon\n\t\ttempDist = 1.0\n\t\tk = self.k\n\t\tcenters = data.rdd.takeSample(False, k, 1)\n\t\ti = 0 \n\t\twhile tempDist > epsilon or self.maxNoOfIteration > i:\n\t\t\ti+=1\t\t\t\n\t\t\tclosest = data.map(lambda p: (closestCluster(p, centers), (np.array(p), 1)))\n \t\t\tpointStats = closest.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n \t\tnewPoints = pointStats.map(lambda x: (x[0], x[1][0] / float(x[1][1]))).collect()\n \t\ttempDist = sum(np.sum((centers[index] - p) ** 2) for (index, p) in newPoints)\n \t\tfor (ind, p) in newPoints:\n\t\t\t\tcenters[ind] = p\n\t\tself.centers = centers\n\t\treturn self.centers", "def cluster_matrix_average(M, cluster_assignments):\n\n# #TODO FIGURE OUT TEST FOR THIS FUNCTION\n# \n# ## from individual_group_clustered_maps(indiv_stability_list, clusters_G, roi_mask_file)\n# \n# indiv_stability_set = np.asarray([np.load(ism_file) for ism_file in indiv_stability_list])\n# #\n# \n# cluster_voxel_scores = np.zeros((nClusters, nSubjects, nVoxels))\n# for i in range(nSubjects):\n# cluster_voxel_scores[:,i] = utils.cluster_matrix_average(indiv_stability_set[i], clusters_G)\n# ##\n# \n \n\n if np.any(np.isnan(M)):\n #np.save('bad_M.npz', M)\n raise ValueError('M matrix has a nan value')\n\n cluster_ids = np.unique(cluster_assignments)\n vox_cluster_label = np.zeros((cluster_ids.shape[0], cluster_assignments.shape[0]), dtype='float64')\n s_idx = 0\n K_mask=np.zeros(M.shape)\n for cluster_id in cluster_ids:\n #import pdb;pdb.set_trace()\n vox_cluster_label[s_idx, :] = M[:,cluster_assignments == cluster_id].mean(1)\n \n \n \n k = (cluster_assignments == cluster_id)[:, np.newaxis]\n k=k*1\n print('Cluster %i size: %i' % (cluster_id, k.sum()))\n K = np.dot(k,k.T)\n K[np.diag_indices_from(K)] = False\n Ktemp=K*1\n K_mask=K_mask+Ktemp\n #import pdb;pdb.set_trace()\n if K.sum() == 0: # Voxel with its own cluster\n #import pdb; pdb.set_trace()\n vox_cluster_label[k[:,0]] = 0.0\n s_idx += 1\n else:\n Kbool=K.astype(bool)\n vox_cluster_label[s_idx,k[:,0].T] = M[Kbool].mean()\n s_idx += 1\n #import pdb; pdb.set_trace()\n return vox_cluster_label, K_mask", "def shift(self):\n \"\"\"\n shift cluster randomly within bounds of im\n \"\"\"\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.im_size - self.mid_pixel - r - 10\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n im_shift = np.roll(self.im,shift=y,axis=0)\n self.im = np.roll(im_shift,shift=x,axis=1)\n \n return", "def KMeansCluster(matrix):\n\n # Possibly need to scale the data first\n data = scale(matrix)\n\n # Approximate the number of clusters using c = root(n/2)\n # num_clusters = int(sqrt(len(matrix) / 2))\n num_clusters = 5\n number_init = 10 # Default\n number_iter = 300\n num_cpus = 2\n\n print \"===================\"\n print \"Training KMeans with (num_clusters, num_init, num_iters, num_cpus)\"\n print num_clusters, number_init, number_iter, num_cpus\n\n # estimator = KMeans(init='k-means++', n_clusters = num_clusters, n_init = number_init)\n # estimator.fit(data)\n # clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, \n # init='k-means++', n_jobs = num_cpus)\n clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, n_jobs = num_cpus)\n\n\n return clusters", "def findClusters(data):\n\tcentroids = data[0, None]\n\tmin_R = 0.1\n\n\tfor _ in range(8):\n\t\tdists = np.linalg.norm(data[:, None, :] - centroids[None, :, :], axis=-1)\n\t\tpotentials = (1 / dists).sum(axis=1)\n\n\t\tnew_c_idx = np.argmin(potentials)\n\n\t\tif np.min(dists[new_c_idx]) < min_R:\n\t\t\t# if this is close to an existing centroid, stop finding centroids\n\t\t\tbreak\n\n\t\tcentroids = np.concatenate([centroids, data[new_c_idx, None]], axis=0)\n\n\tax.scatter(*centroids.T, color='tab:orange')\n\n\t# run a single k-means to find the centroid of each cluster\n\tk = centroids.shape[0]\n\tdists = np.linalg.norm(data[:, None, :] - centroids[None, :, :], axis=-1)\n\tclosest_centroid = np.argmin(dists, axis=-1)\n\n\tfor n in range(k):\n\t\tnew_centroid = data[closest_centroid == n].mean(axis=0)\n\t\tcentroids[n] = new_centroid\n\tprint(centroids)\n\tax.scatter(*centroids.T, color='tab:blue')", "def fit(self, data): \n \"\"\"\n Takes a 2D numpy array of shape=(n, 2), where n is the number of samples in the dataset.\n \n Returns a numpy array that has shape=(n,) that contains the cluster of each data sample,\n where the third channel is used to indicate the cluster of each point in the data set.\n \"\"\"\n old_clusters = self.random_assignment_of_clusters(data)\n while True:\n centroids = self.calculate_centroids(data, old_clusters)\n new_clusters = self.assign_clusters(data, centroids)\n if (new_clusters==old_clusters).all():\n break\n old_clusters=new_clusters \n return new_clusters, centroids", "def nlm_fast(data,FS,BS,sigma,dev = None, proc = None):\n\n if dev is None:\n dev = imgtools.__DEFAULT_OPENCL_DEVICE__\n\n if dev is None:\n raise ValueError(\"no OpenCLDevice found...\")\n\n if proc is None:\n proc = OCLProcessor(dev,absPath(\"kernels/nlm_fast.cl\"),options=\"-D FS=%i -D BS=%i\"%(FS,BS))\n\n img = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg2 = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n\n accBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n weightBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n\n dev.writeImage(img,data);\n dev.writeBuffer(weightBuf,np.zeros_like(data,dtype=np.float32));\n\n for dx in range(BS+1):\n for dy in range(-BS,BS+1):\n proc.runKernel(\"dist\",img.shape,None,img,tmpImg,np.int32(dx),np.int32(dy))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg,tmpImg2,np.int32(1))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg2,distImg,np.int32(2))\n\n proc.runKernel(\"computePlus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n if any([dx,dy]):\n proc.runKernel(\"computeMinus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n acc = dev.readBuffer(accBuf,dtype=np.float32).reshape(data.shape)\n weights = dev.readBuffer(weightBuf,dtype=np.float32).reshape(data.shape)\n\n return acc/weights", "def kmeans(X, k, iterations=1000):\n\n # Initialize the cluster centroids (C <- centroid \"means\")\n C = initialize(X, k)\n\n if C is None:\n return None, None\n if not isinstance(iterations, int) or iterations <= 0:\n return None, None\n\n # n: number of dada points\n # d: dimension of each data point\n n, d = X.shape\n\n # # Initialize the cost/distortion function;\n # # defined as J = sum/n(sum/k(r(ij)*||x(i) - c(j)||**2))\n # J = np.inf\n\n # Iterate over iterations\n for iteration in range(iterations):\n # print(\"iteration:\", iteration)\n\n # Maintain a deep copy of C\n # C_prev = np.array([x for x in C])\n # Another alternative (removes for loop):\n C_prev = np.copy(C)\n\n # OPTION 1: FOR LOOPS\n\n # Initialize the array of pairwise data point-centroid\n # distances with zeros\n # dist = np.zeros((n, k))\n\n # for i in range(n):\n # for j in range(k):\n # dist[i, j] = np.linalg.norm(X[i, ...] - C[j, ...])\n # Note: squared distances can alternatively be inferred\n # directtly from the inner product of (X - C) with itself\n # dist[i, j] = np.inner(X[i,:]-C[j,:], X[i,:]-C[j,:])\n # print(\"dist:\", dist)\n # Squared distances from \"dist\":\n # print(\"dist ** 2:\", dist ** 2)\n\n # OPTION 2: VECTORIZATION\n\n # Convert X into an array suitable for vectorization\n Xv = np.repeat(X, k, axis=0)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n Xv = Xv.reshape(n, k, d)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n\n # Convert C into an array suitable for vectorization\n Cv = np.tile(C, (n, 1))\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n Cv = Cv.reshape(n, k, d)\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n\n # Compute the \"dist\" matrix of euclidean distances between\n # data points and centroids; shape (n, k)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n\n # Assign each point of the dataset to a centroid:\n # Evaluate argmin(dist**2) for comparison with k\n # r(ij) = 1 if argmin(dist**2) == j\n # -> point i assigned to centroid k\n # otherwise r(ij) = 0 -> ignore point i wrt centroid k\n clss = np.argmin(dist ** 2, axis=1)\n # print(\"centroid indices:\", clss)\n # print(\"clss.shape:\", clss.shape)\n # Note: here, clss is a 1D array of the unique centroid index\n # to which each point in the dataset as been assigned (closest to);\n # the indices array is used in place of r(ij) in J evaluations\n\n # OPTION 1: EXIT CONDITION BASED ON J_prev == J\n\n # # Make a copy of the previous J value & reinitialize J\n # J_prev = J\n # # J = 0\n\n # # Update J (summing over the n data points),\n # # based on the (shortest) distances inferred from \"indices\"\n # # From \"for\" loop:\n # # for i in range(n):\n # # J += (dist[i, clss[i]] ** 2)\n # # From vectorization:\n # J = np.sum(dist[..., clss] ** 2)\n # # Normalize J to the number of data points to\n # # reduce the computational cost (optional)\n # J /= n\n # # print(\"J:\", J)\n\n # if J == J_prev:\n # # print(\"last iteration:\", iteration)\n # return C, clss\n\n # Move the cluster centroids to the center (mean) of\n # the refined cluster by updating C (centroid coordinates)\n for j in range(k):\n # Infer the array of data point indices that correspond\n # to each assigned cluster centroid\n indices = np.where(clss == j)[0]\n # print(\"indices:\", indices)\n if len(indices) == 0:\n C[j] = initialize(X, 1)\n else:\n C[j] = np.mean(X[indices], axis=0)\n\n # OPTION 2: EXIT CONDITION BASED ON C == C_prev\n\n if (C == C_prev).all():\n # print(\"last iteration:\", iteration)\n return C, clss\n\n # Update clss before returning C, clss\n Cv = np.tile(C, (n, 1))\n Cv = Cv.reshape(n, k, d)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n clss = np.argmin(dist ** 2, axis=1)\n\n return C, clss", "def train_clustermodel_nonsparse(self):\n \n segtimes_df, nonsparse_matrix = self.create_nonsparse_matrix(self.data)\n segtimes_df['index']=segtimes_df.index\n nonsparse_matrix['index']=nonsparse_matrix.index\n data_to_scale = pd.merge(segtimes_df, nonsparse_matrix, on=['index'])\n data_scaled = self.scale_matrix(data_to_scale)\n data_to_cluster = data_scaled.drop(columns = ['segment_id','level_0','date','time'])\n \n print('Clustering using nonsparse segment/time matrix and: ' + self.algorithm)\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(data_to_cluster)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_nonsparse'])\n clusters_df['segtimekey'] = clusters_df.index\n segtimes_df['segtimekey'] = segtimes_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(segtimes_df, clusters_df, on=['segtimekey'])\n self.clusters_df_final['cluster_nonsparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_nonsparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','date','time','cluster_nonsparse']]", "def kMeans(d, k):\n #First get the random centroids from the data\n newCentroids = getRandomCentroids(d, k)\n #newCentroids = [[-2.0, 1.0], [-2.0, -2.0], [2.0, 2.0], [0.0, 0.0]]\n\n #Get the clusters from these random centroids\n clusters = initiateCentroid(d, newCentroids, k)\n oldCentroids = []\n\n counter = 0\n #While the old centroids are not equal to the new ones\n while oldCentroids != newCentroids:\n #old is equal to new\n oldCentroids = newCentroids\n #Calculate the new centroids\n k, newCentroids = calcCentroids(d, clusters)\n #Calculate the new clusters\n clusters = initiateCentroid(d, newCentroids, k)\n #Count how many iterations\n counter += 1\n\n return counter, clusters", "def shift_kernel(kernel, shape, centre):\n h, w = kernel.shape\n assert(h % 2 == 1)\n assert(w % 2 == 1)\n half_h = np.floor(h/2)\n half_w = np.floor(w/2)\n \n result = np.zeros((shape[0]+2*half_h, shape[1]+2*half_w)) #zero pad to simplify edge handling \n\n ind_h = centre[0] + np.arange(0, 2*half_h+1, dtype='int') \n ind_w = centre[1] + np.arange(0, 2*half_w+1, dtype='int')\n result[ind_h[:,np.newaxis], ind_w] = kernel\n result = result[half_h:-half_h,half_w:-half_w]\n return result", "def k_means_step(X, k, means):\n dists = np.array([np.sum((X - mean) * (X - mean), axis=1) for mean in means]) # k*m\n clusters = np.argmin(dists, axis=0)\n new_means = np.array([np.mean(X[clusters == i, :], axis=0) for i in range(k)])\n return new_means, clusters", "def _cmeans0_ori(data, u_old, c, m, *para):\n\t# Normalizing, then eliminating any potential zero values.\n\tu_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))\n\tu_old = np.fmax(u_old, np.finfo(np.float64).eps)\n\n\tum = u_old ** m\n\n\t# Calculate cluster centers\n\t# data1:2861,2; um:30,2861\n\tdata = data.T\n\tcntr = um.dot(data) / (np.ones((data.shape[1],1)).dot(np.atleast_2d(um.sum(axis=1))).T)\n\td = cdistance.get_center_distance(data, cntr)\n\t#d = _distance(data, cntr) # euclidean distance\n\n\td = np.fmax(d, np.finfo(np.float64).eps)\n\tjm = (um * d ** 2).sum()\n\n\tu = d ** (- 2. / (m - 1))\n\tu /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))\n\treturn cntr, u, jm, d", "def ksc_toy(A, K):\n m = A.shape[0]\n mem = ceil(np.dot(K, rand(m, 1)))\n cent = np.zeros(shape=(K, A.shape[1]), dtype='float64')\n for iter_ in range(1, 101):\n prev_mem = mem\n for k in range(1, (K +1)):\n cent[(k -1), :] = ksc_center(mem, A, k, cent[(k -1), :])\n for i in range(1, (m +1)):\n x = A[(i -1), :]\n for k in range(1, (K +1)):\n y = cent[(k -1), :]\n dist = dhat_shift(x, y)\n D[(i -1), (k -1)] = dist\n val, mem = np.min(D, np.array([]), 2) # nargout=2\n if norm(prev_mem - mem) == 0:\n break\n return mem, cent", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def kmeans(data, initial=None, K=2, distfn_method='L2', centroidfn_method='mean',\n VERBOSE=True):\n def assignment(data, assigns, means, distfn):\n \"\"\" For each observation A in DATA, assign A to the closest\n mean in MEANS, by mutating ASSIGNS.\n \"\"\"\n for i in xrange(data.shape[0]):\n bestidx, mindist = None, None\n for idx, mean in enumerate(means):\n dist = distfn(data[i,:], mean)\n if bestidx == None or dist < mindist:\n bestidx = idx\n mindist = dist\n assigns[i] = bestidx\n return assigns\n def update_means(data, assigns, means, distfn, centfn):\n \"\"\" For the clustering specified by ASSGNS, compute new means\n by mutating MEANS.\n \"\"\"\n for i in xrange(len(means)):\n rows = data[np.where(assigns == i)]\n means[i] = centfn(rows)\n return means\n if distfn_method == 'L2':\n distfn = lambda a,b: np.linalg.norm(a-b)\n elif distfn_method == 'vardiff':\n distfn = vardiff\n else:\n distfn = lambda a,b: np.linalg.norm(a-b)\n if centroidfn_method == 'mean':\n centroidfn = np.mean\n elif centroidfn_method == 'median':\n centroidfn = np.median\n else:\n centroidfn = np.mean\n\n if initial == None:\n initial_idxs = []\n _len = range(len(data))\n for _ in xrange(K):\n _i = random.choice(_len)\n while _i in initial_idxs:\n _i = random.choice(_len)\n initial_idxs.append(_i)\n initial = data[initial_idxs]\n if VERBOSE:\n print \"...initial means:\", initial\n means = initial\n assigns = np.zeros(data.shape[0])\n done = False\n iters = 0\n while not done:\n if VERBOSE:\n print \"...kmeans iteration\", iters\n # 1.) Assignment of data to current means\n prev_assigns = assigns.copy()\n assigns = assignment(data, assigns, means, distfn)\n # 2.) Halt if assignments don't change\n if np.all(np.equal(prev_assigns, assigns)):\n done = True\n else:\n # 3.) Re-compute means from new clusters\n means = update_means(data, assigns, means, distfn, centroidfn)\n iters += 1\n return assigns", "def cluster_centroids(self,mydata, clusters, k=None):\n\t\tif k is None:\n\t\t\tk = np.max(clusters) + 1\n\t\tresult = np.empty(shape=(k,) + mydata.shape[1:])\n\t\tfor i in range(k):\n\t\t\tnp.mean(mydata[clusters == i], axis=0, out=result[i])\n\t\treturn result", "def k_means_segment(image_values: np.ndarray, k=3, initial_means=None):\n if initial_means is None:\n initial_means = get_initial_means(image_values.reshape(-1, 3), k)\n r, c, ch = image_values.shape\n new_values = image_values.copy().reshape(-1, 3)\n while True:\n next_means, clusters = k_means_step(new_values, k, initial_means)\n diff = np.sum(next_means - initial_means)\n if not diff:\n break\n initial_means = next_means\n for i, mean in enumerate(initial_means):\n new_values[clusters == i] = mean\n return new_values.reshape(r, c, ch)", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def kmeans_2D(data, initial=None, K=2, distfn_method='L2',\n assigns=None,\n MAX_ITERS=200, VERBOSE=True):\n def assignment(data, assigns, means, distfn):\n \"\"\" For each observation A in DATA, assign A to the closest\n mean in MEANS, by mutating ASSIGNS.\n \"\"\"\n for i in xrange(data.shape[0]):\n bestidx, mindist = None, None\n for idx, mean in enumerate(means):\n I = data[i,:,:]\n try:\n dist = distfn(I, mean)\n except:\n traceback.print_exc()\n pdb.set_trace()\n if dist == np.nan:\n print \"Uhoh, nan dist.\"\n pdb.set_trace()\n if bestidx == None or dist < mindist:\n if dist == mindist:\n # To prevent cycles, always tie-break via smallest\n # index.\n bestidx = min(bestidx, idx)\n else:\n bestidx = idx\n mindist = dist\n assigns[i] = bestidx\n return assigns\n def update_means(data, assigns, means):\n \"\"\" For the clustering specified by ASSGNS, update MEANS. \"\"\"\n for i in xrange(len(means)):\n cluster_i = data[np.where(assigns == i)]\n if len(cluster_i) == 0:\n # Empty cluster - reinitialize with a random datapoint\n print \"...Empty cluster for mean {0}, reinitializing.\".format(i)\n means[i] = random.choice(data)\n continue\n mean_i = mean_nan(cluster_i)\n if len(mean_i[~np.isnan(mean_i)]) == 0:\n print \"Uhoh, only NaN's here.\"\n pdb.set_trace()\n means[i] = mean_i\n return means\n def init_means(data):\n initial_idxs = []\n _len = range(data.shape[0])\n for _ in xrange(K):\n _i = random.choice(_len)\n while _i in initial_idxs:\n _i = random.choice(_len)\n initial_idxs.append(_i)\n return initial_idxs\n\n distfn = _get_distfn(distfn_method)\n if initial == None:\n means = data[init_means(data)]\n else:\n means = initial\n # TODO: Why infinite loop?\n #initial_idxs = [np.array([16]), np.array([23])]\n if VERBOSE:\n print \"...initial means:\", means\n assigns = np.zeros(data.shape[0])\n done = False\n iters = 0\n prevprev_assigns = None\n while not done:\n if VERBOSE:\n print \"...kmeans iteration\", iters\n if iters >= MAX_ITERS:\n print \"...Exceeded MAX_ITERS:\", MAX_ITERS\n done = True\n # 1.) Assignment of data to current means\n prev_assigns = assigns.copy()\n assigns = assignment(data, assigns, means, distfn)\n # 2.) Halt if assignments don't change\n if np.all(np.equal(prev_assigns, assigns)):\n done = True\n elif prevprev_assigns != None and np.all(np.equal(prevprev_assigns, assigns)):\n print \"...len-2 Cycle detected, restarting...\"\n means = update_means(data, assigns, means)\n iters += 1\n #means = data[init_means(data)]\n #assigns = np.zeros(data.shape[0])\n #prevprev_assigns = None\n else:\n # 3.) Re-compute clusters from new clusters\n means = update_means(data, assigns, means)\n prevprev_assigns = prev_assigns\n iters += 1\n if np.all(assigns == assigns[0]):\n # Currently, this happens if all elements in DATA are 'too close',\n # i.e. distfn always outputs 0.0.\n print \"Degenerate clustering detected - splitting evenly.\"\n _chunk = int(len(assigns) / K)\n out = np.zeros(data.shape[0])\n for i in xrange(K-1):\n out[i*_chunk:(i+1)*_chunk] = i\n out[(K-1)*_chunk:] = (K-1)\n return out\n return assigns", "def ComputeKmeans(x, y, label, total_data, num_centroids):\n matrix = np.column_stack((x, y))\n data = np.zeros((total_data, 3))\n data[:, :2] = matrix\n s = False\n while (not s):\n try:\n c_test = num_centroids # number of centroids guessed\n centroidx = []\n centroidy = []\n flag = False\n centroidupx = np.zeros(c_test)\n centroidupy = np.zeros(c_test)\n comp = np.zeros(int(c_data))\n for i in xrange(c_test):\n if ((comp.sum() == c_data) and (i < c_test)):\n comp = np.zeros(int(c_data))\n while (not flag):\n j = np.random.randint(0, int(c_data))\n if comp[j] == 0:\n comp[j] = 1\n flag = True\n centroidx = np.append(centroidx, np.random.normal(np.random.randint(10, 60), np.random.randint(5, 10), 1))\n centroidy = np.append(centroidy, np.random.normal(np.random.randint(10, 60), np.random.randint(5, 10), 1))\n flag = False\n centroids = np.column_stack((centroidx, centroidy))\n new_label_new = np.zeros(matrix[:, 0].size)\n new_label_old = np.ones(matrix[:, 0].size)\n dist = np.zeros(centroids[:, 0].size)\n iter = 0\n # looping until no data point were reassingned\n while (iter < 50):\n a = np.zeros((c_test, 1))\n for i in xrange(matrix[:, 0].size):\n for j in xrange(centroids[:, 0].size):\n dist[j] = np.sqrt(sum((matrix[i, :] - centroids[j, :]) ** 2))\n new_label_new[i], = np.where(dist == dist.min())\n if (np.array_equal(new_label_new, new_label_old)):\n break;\n s = True\n else:\n new_label_old = np.copy(new_label_new)\n unique = np.unique(new_label_new)\n data[:, 2] = new_label_new\n for i in xrange(new_label_new.size):\n for j in xrange(unique.size):\n if (new_label_old[i] == unique[j]):\n a[j] = a[j] + 1\n\n centroidup = npi.GroupBy(data[:, 2]).sum(data)[1]\n centroidup = centroidup[:, :2]\n centroids = centroidup / a\n centroidupx = centroids[:, 0]\n centroidupy = centroids[:, 1]\n s = True\n\n plt.scatter(x, y, c=new_label_new, cmap='rainbow')\n plt.scatter(centroidx, centroidy, c='green', marker='8')\n plt.scatter(centroidupx, centroidupy, c='black')\n plt.grid(True)\n plt.show()\n iter = iter + 1\n\n except Exception as e:\n print (str(e))\n return matrix, centroids, data", "def train(self):\n\n print \"==> Running Kmeans on data set of shape: {}\".format(self.data.shape)\n km = KMeans(n_clusters = self.n_clusters)\n km.fit(self.data.values)\n self.labels = km.labels_\n self.inertia = km.inertia_", "def trainKMeans_getOutputs(top_level_model, cluster_num):\n top_level_model.train_kmeans()\n top_level_model.print_outputs()", "def kmeans(X, n_clust):\n\n X = scale(X)\n estimator = KMeans(init = 'k-means++', n_clusters = n_clust, n_init = 10, verbose = 2)\n \n estimator.fit(X)\n labels = estimator.predict(X)\n return labels", "def kmeans_007():\n n_centroids = 5000\n s = 50\n crop = 200\n # Originally, 1600 centroids for 400,000 patches, or 250 patches per centroid\n # 800000 / 5000 = will give us 160 patches per centroid\n n_patches = 800000\n rf_size = 20\n # 31 x 31 = 961 patches per image, which is 10x more patches than the original settings\n # If we set stride 2, then it's 16 x 16 patches = 256, only twice as many patches\n stride = 2\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n images = train_x_crop_scale.transform()\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n patches = patch_extractor.transform(images)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_007'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_007.npy', stride_size=stride, memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, parallel_estimator=True)\n\n \"\"\"\n wrapper.fit(train_x, train_y)\n\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n test_images = test_x_crop_scale.transform()\n test_x = kmeans_generator.transform(test_images, save_to_file='data/data_test_kmeans_features_007.npy'.format(n_centroids), memmap=True)\n res = wrapper.predict(test_x)\n sub = classes.Submission(res)\n sub.to_file('sub_kmeans_006.csv')\n \"\"\"", "def update_centroids(X,idx,K):\n n = np.size(X,1)\n centroids = np.zeros((K,n))\n for i in range(0,K):\n ci = idx==i\n ci = ci.astype(int)\n total_number = sum(ci)\n ci.resize((np.size(X,0),1))\n total_matrix = np.matlib.repmat(ci,1,n)\n ci = np.transpose(ci)\n total = np.multiply(X,total_matrix)\n try:\n centroids[i] = (1/total_number)*np.sum(total,axis=0)\n except Exception:\n centroids[i] = 0 \n return centroids", "def segment(X, MU, k, r):\n cls = cluster(r)\n new_x = X.copy()\n for i in range(k):\n new_x[cls == i, :] = MU[i]\n return new_x", "def initialize_pp(img: np.ndarray):\n\n h, w, c = img.shape\n pixels = img.copy().reshape(h*w, c)\n\n # Choose one center uniformly at random \n # from among the data points\n r = np.random.randint(h*w)\n current_cluster_centers[0, 0, :] = pixels[r, :]\n\n # remove that point from the data set\n pixels = np.delete(pixels, r, axis=0)\n\n # For each data point x, compute D(x), \n # the distance between x and the nearest center \n # that has already been chosen.\n for k in range(1, numclusters):\n dist_sq = np.zeros(pixels.shape[0])\n for i in range(pixels.shape[0]): # over data points\n dist = []\n for j in range(k): # over current clusters\n # calculate distance to the cluster\n diff = pixels[i, :] - current_cluster_centers[j, 0, :]\n dist.append(np.inner(diff, diff))\n \n # choose the distance closest to the cluster\n dist_sq.itemset(i, min(dist))\n\n probs = dist_sq / dist_sq.sum()\n cumprobs = probs.cumsum()\n r = np.random.uniform()\n for i, prob in enumerate(cumprobs):\n if r <= prob:\n index = i\n break\n \n # add a new cluster\n current_cluster_centers[k, 0, :] = pixels[index, :]\n\n # remove that point from the data set\n pixels = np.delete(pixels, index, axis=0)\n\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def _kmeans_clustering_model_fn(features, labels, mode, params, config):\n assert labels is None, labels\n (all_scores, model_predictions, losses,\n is_initialized, init_op, training_op) = clustering_ops.KMeans(\n _parse_tensor_or_dict(features),\n params.get('num_clusters'),\n initial_clusters=params.get('training_initial_clusters'),\n distance_metric=params.get('distance_metric'),\n use_mini_batch=params.get('use_mini_batch'),\n mini_batch_steps_per_iteration=params.get(\n 'mini_batch_steps_per_iteration'),\n random_seed=params.get('random_seed'),\n kmeans_plus_plus_num_retries=params.get(\n 'kmeans_plus_plus_num_retries')).training_graph()\n incr_step = state_ops.assign_add(training_util.get_global_step(), 1)\n loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)\n summary.scalar('loss/raw', loss)\n training_op = with_dependencies([training_op, incr_step], loss)\n predictions = {\n KMeansClustering.ALL_SCORES: all_scores[0],\n KMeansClustering.CLUSTER_IDX: model_predictions[0],\n }\n eval_metric_ops = {KMeansClustering.SCORES: loss}\n training_hooks = [_InitializeClustersHook(\n init_op, is_initialized, config.is_chief)]\n relative_tolerance = params.get('relative_tolerance')\n if relative_tolerance is not None:\n training_hooks.append(_LossRelativeChangeHook(relative_tolerance))\n return ModelFnOps(\n mode=mode,\n predictions=predictions,\n eval_metric_ops=eval_metric_ops,\n loss=loss,\n train_op=training_op,\n training_hooks=training_hooks)", "def segmentation_rgb(self, image, k=2):\n\n \n iterations = 5\n \n print(image.shape)\n imageW = image.shape[0]\n imageH = image.shape[1]\n\n\n dataVector = np.ndarray(shape=(imageW * imageH, 5), dtype=float)\n \n pixelClusterAppartenance = np.ndarray(shape=(imageW * imageH), dtype=int)\n\n \n for y in range(0, imageH):\n for x in range(0, imageW):\n xy = (x, y)\n \n rgb=image[x,y]\n print(rgb)\n #rgb = image.getpixel(xy)\n\n dataVector[x + y * imageW, 0] = rgb[0]\n dataVector[x + y * imageW, 1] = rgb[1]\n dataVector[x + y * imageW, 2] = rgb[2]\n dataVector[x + y * imageW, 3] = x\n dataVector[x + y * imageW, 4] = y\n print(\"data vector\")\n print(dataVector)\n \n dataVector_scaled = preprocessing.normalize(dataVector)\n minValue = np.amin(dataVector_scaled)\n maxValue = np.amax(dataVector_scaled)\n\n centers = np.ndarray(shape=(k,5))\n for index, center in enumerate(centers):\n centers[index] = np.random.uniform(minValue, maxValue, 5)\n print(\"center\")\n print(centers[index])\n\n for iteration in range(iterations):\n \n for idx, data in enumerate(dataVector_scaled):\n distanceToCenters = np.ndarray(shape=(k))\n for index, center in enumerate(centers):\n distanceToCenters[index] = euclidean_distances(data.reshape(1, -1), center.reshape(1, -1))\n pixelClusterAppartenance[idx] = np.argmin(distanceToCenters)\n\n \n clusterToCheck = np.arange(k) \n \n clustersEmpty = np.in1d(clusterToCheck, pixelClusterAppartenance)\n \n for index, item in enumerate(clustersEmpty):\n if item == False:\n pixelClusterAppartenance[np.random.randint(len(pixelClusterAppartenance))] = index\n \n\n for i in range(k):\n dataInCenter = []\n\n for index, item in enumerate(pixelClusterAppartenance):\n if item == i:\n dataInCenter.append(dataVector_scaled[index])\n dataInCenter = np.array(dataInCenter)\n centers[i] = np.mean(dataInCenter, axis=0)\n\n \n print(\"Centers Iteration num\", iteration, \": \\n\", centers)\n\n \n for index, item in enumerate(pixelClusterAppartenance):\n dataVector[index][0] = int(round(centers[item][0] * 255))\n dataVector[index][1] = int(round(centers[item][1] * 255))\n dataVector[index][2] = int(round(centers[item][2] * 255))\n\n \n image = Image.new(\"RGB\", (imageW, imageH))\n\n for y in range(imageH):\n for x in range(imageW):\n image.putpixel((x, y), (int(dataVector[y * imageW + x][0]),\n int(dataVector[y * imageW + x][1]),\n int(dataVector[y * imageW + x][2])))\n\n print(type(image))\n image = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2GRAY)\n print(type(image))\n \n return image", "def K_Means(self, n_clusters: int=150):\n start_time = datetime.datetime.now()\n self.func_log(\"\\n\\tIn K-Measn()\")\n \n kmeans = KMeans(n_clusters = n_clusters)\n kmeans.fit(self.descriptor_list)\n self.visual_words = kmeans.cluster_centers_ \n \n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def k_means(prev_args, data_set_obj):\n parser = argparse.ArgumentParser(description='kmeans')\n parser.add_argument('--clusters', required=True,\n help='The number of clusters to use for kmeans.', type=int)\n parser.add_argument('--iterations', default=300,\n help='The maximum number of iterations for the algorithm.', type=int)\n parser.add_argument('--metric', default='euclidean',\n help='The distance metric to use.')\n args, unknown = parser.parse_known_args()\n kmeans = KMeans(prev_args.rank, args.clusters, args.iterations, args.metric)\n kmeans.fit_predict(data_set_obj.gallery_idx, data_set_obj)\n return kmeans.ranked_acc", "def kmeansClustering(data, x_scaled, clust, random_s):\n np.random.seed(random_s)\n #Performs clustering with the right number of clusters\n kmeans = KMeans(n_clusters=clust, random_state=random_s, n_jobs=-1).fit(x_scaled)\n kmeans = pd.DataFrame(kmeans.labels_, index=data.index, columns=[\"Clusters\"])\n #Merge on our main dataframe for better vizualisation of the clusters\n data_clust = pd.merge(data, kmeans, left_index=True, right_index=True, how='left')\n return data_clust", "def scalar_image_kmeans_image_filter(*args, **kwargs):\n import itk\n instance = itk.ScalarImageKmeansImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")", "def gmm_clustering(X, K):\n\n # Initialization:\n pi = []\n mu = []\n cov = []\n for k in range(K):\n pi.append(1.0 / K)\n mu.append(list(np.random.normal(0, 0.5, 2)))\n temp_cov = np.random.normal(0, 0.5, (2, 2))\n temp_cov = np.matmul(temp_cov, np.transpose(temp_cov))\n cov.append(list(temp_cov.reshape(4)))\n #print(pi)\n ### you need to fill in your solution starting here ###\n X = np.array(X) \n num_data = len(X) #number of data points\n # Run 100 iterations of EM updates\n for t in range(100):\n like = np.zeros((num_data,1))\n post = np.zeros((K, num_data)) #stores posterior for all the classes - each row corresponding to a class k (k=1:K)\n for k in range(K):\n mu_k = np.array(mu[k]).reshape(1,2)\n #print(mu_k.shape)\n #print(X.shape)\n cov_k = np.array(cov[k]).reshape(2,2)\n #print(cov_k.shape)\n pi_k = pi[k]\n logpx_k = []\n for sample in X:\n logpx_samp = - 0.5*(np.dot(sample - mu_k, np.dot(np.linalg.inv(cov_k),np.transpose(sample - mu_k)))) - np.log(2*np.pi) - np.log(np.sqrt(np.linalg.det(cov_k))) + np.log(pi_k)\n #print(logpx_k)\n logpx_k.append(logpx_samp[0][0]) \n logpx_k = np.array(logpx_k)\n #print(logpx_k.shape)\n #print(logpx_k)\n explog_k = np.exp(logpx_k)\n #print(explog_k.shape)\n #print(post.shape)\n post[k] = explog_k\n like = np.sum(post, axis=0)\n #print(like.shape)\n #print(post.shape)\n post_nrm = post\n\n mu_new = []\n cov_new = []\n N = 0\n Nk_ls = []\n for k in range(K):\n post_nrm[:][k] = post[:][k] / like #posterior for all the classes\n \n #compute new parameters\n Nk = np.sum(post_nrm[:][k])\n #print(Nk.shape)\n N += Nk\n Nk_ls.append(Nk)\n mu_k_new = np.dot(post_nrm[:][k], X) / Nk\n mu_new.append(list(mu_k_new))\n #print(post_nrm[:][k].shape)\n cov_k_new = np.dot(np.multiply(np.transpose(X - mu_k_new), post_nrm[:][k]), X - mu_k_new) / Nk\n cov_new.append(list(cov_k_new.reshape(4)))\n\n pi_new = Nk_ls / N\n #update parameters for the next iteration \n pi = pi_new\n mu = mu_new\n cov = cov_new\n return mu, cov", "def mySpectralClustering(W, K, normalized):\n\n D = np.diag(np.sum(W,axis=0))\n L = D - W\n if normalized == 1:\n L = lin.inv(D) @ L \n vals, vecs = lin.eig(L)\n idx = vals.argsort()[::-1] \n vals = vals[idx]\n vecs = vecs[:,idx]\n N, _ = W.shape\n Y = np.zeros((K,N))\n for kk in range(K):\n Y[kk,:] = vecs[:,kk]\n kmeans = KMeans(n_clusters=K).fit(Y.T)\n estlabels = kmeans.labels_\n return estlabels, Y", "def byMeans(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n if verbosity >= 2: # optionally print dataset shape and info\n print(dataset.shape)\n print(dataset)\n\n old_dataset = dataset.copy()\n dataset = dataset.drop(columns=class_header) # remove non-float class column\n\n # Assign centroids to random values which fit into dataset space.\n centroids = pandas.DataFrame(columns=dataset.columns,\n data=numpy.random.uniform(dataset.min(), dataset.max(),\n (number_of_clusters, dataset.shape[1])))\n if verbosity >= 1: # optionally print centroids and random dataset\n print(\"INITIAL CENTROIDS\")\n print(centroids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1: # optionally print iteration count\n print(\"ITERATIONS\")\n print(iterations)\n\n # calculate clustering of data\n clusters = Cluster.calcClusters(dataset, centroids, number_of_clusters, verbosity=verbosity)\n\n old_centroids = centroids.copy() # copy centroid dataframe\n\n if verbosity >= 2: # optionally print cluster list\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n for cluster_index, cluster in enumerate(clusters): # Calculate new centroids\n cluster_mean = cluster.mean()\n if not cluster_mean.isnull().any(): # make sure we dont write null means to centroid list\n centroids.loc[cluster_index] = cluster_mean\n\n if verbosity >= 1:\n print(\"OLD CENTROIDS\")\n print(old_centroids)\n print(\"NEW CENTROIDS\")\n print(centroids)\n\n if old_centroids is not None: # Calculate sum of centroid movements.\n centroid_change = 0\n for centroid_index, centroid in centroids.iterrows():\n centroid_change += abs(Cluster.calcDistance(centroid, old_centroids.loc[centroid_index]))\n\n if verbosity >= 1:\n print(\"CENTROID DIFF\")\n print(centroid_change)\n\n if centroid_change < SETTLE_THRESHOLD: # break if centroid movement is below threshold.\n break\n\n # Final Cluster re-calculation\n clusters = Cluster.calcClusters(old_dataset, centroids, number_of_clusters,\n verbosity=verbosity, class_header=class_header)\n # Create new dataframe with class column of and row for each centroid\n centroids_class = pandas.DataFrame(data=[\"NOCLASS\"] * centroids.shape[0], columns=[class_header])\n if verbosity >= 2:\n print(centroids_class)\n print(centroids)\n for cluster_index, cluster in enumerate(clusters): # For each cluster\n if verbosity >= 2:\n print(cluster_index)\n print(cluster)\n if cluster.size > 0: # If cluster is not empty set centroid class to most common class in cluster\n centroids_class.iat[cluster_index, 0] = cluster.mode().loc[0][0]\n if old_dataset.columns[0] == class_header: # check if class column should be first or last.\n print(\"CLASS IS FIRST COL\")\n centroids = pandas.concat([centroids_class, centroids], axis=1) # merge class to centroids as first column\n else:\n print(\"CLASS IS NOT FIRST COL\")\n centroids = pandas.concat([centroids, centroids_class], axis=1) # merge class to centroids as last column\n for centroid in centroids.iterrows(): # For each centroid\n if centroid[1][class_header] is \"NOCLASS\": # Trim NOCLASS centroids (empty cluster)\n centroids = centroids.drop(centroid[0])\n centroids = centroids.reset_index(drop=True) # Reindex centroids\n\n if return_clusters is True: # optionally return cluster list\n return centroids, clusters\n pass\n else:\n return centroids # return centroids dataframe", "def k_means_cluster(datapoint, *args, n_clusters = 54):\n\n # run k-means\n km = KMeans(n_clusters = n_clusters, random_state = 1)\n # fit the data to the k-means model\n km.fit(datapoint)\n # obtain labels for the resulting clusters\n labels = km.labels_\n plt.figure(figsize=(9,5))\n # plot the data, coloring points based on the cluster\n #for label in np.unique(labels):\n #plt.scatter(datapoint[labels==label,0], datapoint[labels==label,1],c=label,label=label)\n plt.set_cmap('Set2')\n \n plt.scatter(datapoint[:,0], datapoint[:,1], label = labels, c = labels) \n try:\n plt.scatter(args[0][:,0],args[0][:,1],c=\"red\")\n plt.legend()\n \"\"\" for cat, (x,y) in zip(labels, args[0]):\n #plt.scatter(args[0][:,0],args[0][:,1],c=colors[cat])\n plt.text(x+0.1, y+0.1, cat)\"\"\"\n except IndexError:\n plt.title(\"K-means, {} clusters\".format(n_clusters))\n plt.legend(loc = \"lower right\")\n plt.show()", "def _cmeans0_kth_lfreq(data, u_old, c, m, *para):\n\t# parameters\n\tk = para[0]\n\tlocation_frequency = para[1]\n\n\t# Normalizing, then eliminating any potential zero values.\n\tu_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))\n\tu_old = np.fmax(u_old, np.finfo(np.float64).eps)\n\n\tum = u_old ** m\n\n\t# calculating u_c\n\t#u_c = u_old / u_old.sum(axis=1)[:,None]\n\n\t# remain the belonging rate >= the k-th max location of each cluster in um_c\n\tfilter_k = lambda row:row < sorted(row, reverse=True)[k-1]\n\tfail_indices = np.apply_along_axis(filter_k, axis=1, arr=u_old)\n\tum[fail_indices] = 0\n\tum_f = um * (np.ones((um.shape[0], 1)).dot(np.atleast_2d(location_frequency)))\n\n\t# cluster freqeuncy\n\ttrue_indices = np.invert(fail_indices)\n\tcluster_frequency = true_indices.dot(np.atleast_2d(location_frequency).T)\n\tcluster_frequency = cluster_frequency / np.amax(cluster_frequency)\n\n\t# Calculate cluster centers\n\tdata = data.T\n\t# data1:2861,2; um:30,2861\n\tcntr = um_f.dot(data) / (np.ones((data.shape[1],1)).dot(np.atleast_2d(um_f.sum(axis=1))).T)\n\t#d = _distance(data, cntr) * 100 # euclidean distance\n\td = cdistance.get_center_distance(data, cntr)\n\n\tjm = (um * d ** 2).sum()\n\n\tu = (d ** (- 2. / (m - 1))) * cluster_frequency.dot(np.ones((1, d.shape[1])))\n\tu /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))\n\t#print(\" - d:\", d[0:5, 0:3], \"\\n - u:\", u[0:5, 0:3])\n\treturn cntr, u, jm, d", "def run_k_means(self):\r\n centroids = self.centroids\r\n\r\n for i in range(self.max_iters):\r\n self.closestcentroids()\r\n self.newcentroids()\r\n\r\n J = 0\r\n X = self.x\r\n m = len(X)\r\n idx = self.index\r\n K = self.K\r\n dim = X.shape[1]\r\n\r\n for num in range(K):\r\n # find the index of all entries where idx==n\r\n indexentries = np.nonzero(idx == num)[0]\r\n # the values in X that have the index in indesxentries\r\n values = X[indexentries]\r\n # using one of the K centroids to do the calculation. K<=2 doesn't\r\n # work here for some reason.\r\n centroid = centroids[num, 0]\r\n J += np.sum((values - centroid) ** 2)\r\n\r\n return [centroids.reshape((1, K, dim)), [X[idx == k].size for k in range(K)], J / m]", "def kmeans_002():\n train_mmap_path = 'data/train_cropped_150_scale_15.memmap'\n test_mmap_path = 'data/test_cropped_150_scale_15.memmap'\n\n if not os.path.exists('data/train_cropped_150.memmap'):\n classes.crop_to_memmap(150, training=True)\n if not os.path.exists('data/test_cropped_150.memmap'):\n classes.crop_to_memmap(150, training=False)\n\n if not os.path.exists(train_mmap_path):\n logger.info(\"Prepping training images\")\n pre_scale = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n trainX = classes.rescale_memmap(15, pre_scale, train_mmap_path)\n del pre_scale\n else:\n trainX = np.memmap(train_mmap_path, mode='r', shape=(N_TRAIN, 15, 15, 3))\n\n if not os.path.exists(test_mmap_path):\n logger.info(\"Prepping testing images\")\n pre_scale = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n testX = classes.rescale_memmap(15, pre_scale, test_mmap_path)\n del pre_scale\n else:\n testX = np.memmap(test_mmap_path, mode='r', shape=(N_TEST, 15, 15, 3))\n\n\n n_jobs = multiprocessing.cpu_count()\n\n if not os.path.exists('data/mdl_kmeans_002_centroids.npy'):\n logger.info(\"Pretraining KMeans feature encoder\")\n km = models.KMeansFeatures.KMeansFeatures(rf_size=5, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n km.save_to_file('mdl_kmeans_002')\n else:\n logger.info(\"Loading KMeans feature encoder from file\")\n km = models.KMeansFeatures.KMeansFeatures.load_from_file('mdl_kmeans_002', rf_size=5)\n\n # Takes waaaay too long to finish. At least an hour per tree. Clearly too\n # many dimensions\n\n # Instead ran with ridge rf manually\n mdl = models.RandomForest.KMeansRandomForest(km, trainX, testX, n_jobs=n_jobs, cv_sample=0.5)\n # mdl.run('cv')\n mdl.run('train')\n res = mdl.run('predict')\n np.save('submissions/sub_kmeans_rf_002.npy', res)\n output = classes.Submission(res)\n output.to_file('sub_kmeans_rf_002.csv')", "def cluster_segment(img, n_clusters, random_state=0):\n # Remove this line when you implement this function.\n # raise NotImplementedError()\n\n # Downsample img first using the mean to speed up K-means\n img_d = block_reduce(img, block_size=(2, 2, 1), func=np.mean)\n\n # TODO: Generate a clustered image using K-means\n\n # first convert our 3-dimensional img_d array to a 2-dimensional array\n # whose shape will be (length * width, number of channels) hint: use img_d.shape\n img_r = img_d.reshape((np.shape(img_d)[0]*np.shape(img_d)[1], np.shape(img_d)[2]))\n \n # fit the k-means algorithm on this reshaped array img_r using the\n # the scikit-learn k-means class and fit function\n # see https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n # the only parameters you have to worry about are n_clusters and random_state\n kmeans = KMeans(n_clusters, random_state=random_state).fit(img_r)\n\n # get the labeled cluster image using kmeans.labels_\n clusters = kmeans.labels_\n\n # reshape this clustered image to the original downsampled image (img_d) shape\n cluster_img = clusters.reshape((np.shape(img_d)[0], np.shape(img_d)[1]))\n\n # Upsample the image back to the original image (img) using nearest interpolation\n img_u = imresize(cluster_img, (img.shape[0], img.shape[1]), interp='nearest')\n\n return img_u.astype(np.uint8)", "def private_center_recovery(\n raw_data,\n cluster_assignment,\n norm,\n epsilon,\n delta,\n):\n\n cluster_ids = np.unique(cluster_assignment)\n _, d = raw_data.shape\n centers = []\n for _ in range(max(cluster_ids) + 1):\n centers.append(np.zeros(d))\n\n for c_id in cluster_ids:\n points = raw_data[c_id == cluster_assignment]\n if np.size(points) == 0:\n centers[c_id] = np.random.uniform(-norm, norm)\n centers[c_id] = private_centers(points, norm, epsilon, delta)\n return np.vstack(centers)", "def FE_kmeans_resampler(x_train, y_train, target, smote=\"\", verbose=0):\r\n x_train_c = copy.deepcopy(x_train)\r\n x_train_c[target] = y_train.values\r\n\r\n # Regression problem turned into Classification problem\r\n n_clusters = max(3, int(np.log10(len(y_train))) + 1)\r\n # Use KMeans to find natural clusters in your data\r\n km_model = KMeans(n_clusters=n_clusters,\r\n n_init=5,\r\n random_state=99)\r\n #### remember you must predict using only predictor variables!\r\n y_train_c = km_model.fit_predict(x_train)\r\n\r\n if verbose >= 1:\r\n print('Number of clusters created = %d' %n_clusters)\r\n\r\n #### Generate the over-sampled data\r\n #### ADASYN / SMOTE oversampling #####\r\n if isinstance(smote, str):\r\n x_train_ext, _ = oversample_SMOTE(x_train_c, y_train_c)\r\n else:\r\n x_train_ext, _ = smote.fit_resample(x_train_c, y_train_c)\r\n y_train_ext = x_train_ext[target].values\r\n x_train_ext.drop(target, axis=1, inplace=True)\r\n return (x_train_ext, y_train_ext)", "def cal_SSE(data,clu,group,k):\n SSE = []\n for i in range(k):\n idx = np.where(group==i)\n tmpclu = np.asarray([clu[i].tolist()])\n clu_dis = cal_dis(data[idx],tmpclu)\n SSE.append(sum([clu_dis[j][0] for j in range(len(clu_dis))]))\n\n return sum(SSE)", "def clusters(self,rng):\n #clusts = subclust(normalize(self.training_data),0.4,0.5)\n if self.extended:\n dat = self.training_data / rng\n else:\n dat = self.training_data[:,0:-1] / rng[0:-1]\n\n clusts = subclust(normalize(dat))\n\n print len(clusts),\"initial clusters for class\",self.name\n if self.extended:\n return np.array([self.training_data[i] for i in clusts])\n else:\n return np.array([self.training_data[i,0:-1] for i in clusts])", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans", "def spectral_clusterin(data, K, Random):\r\n # Create the Weighted Adjacency Matrix\r\n W = sp.create_weighted_matrix(data)\r\n\r\n # Create D^(-1/2). D is the Diagonal Degree Matrix\r\n D_neg_sqrt = sp.create_diagonal_matrix(W)\r\n\r\n # Create the Laplacian Matrix\r\n L = sp.create_laplacian_matrix(D_neg_sqrt, W)\r\n\r\n # Iterative QR to get A' and Q'\r\n Atag, Qtag = sp.qr_iterator(L)\r\n\r\n #detemin k from user\r\n k=K\r\n\r\n # if Random = True, use the Eigengap Heuristic to select k\r\n if Random:\r\n k = sp.eigengap(Atag)\r\n\r\n # Get the eigenvector matrix U\r\n U = sp.get_spectral_matrix(Atag, Qtag, k)\r\n\r\n # Get T by Normalizing U's rows to unit length\r\n T = sp.normalize_matrix(U)\r\n\r\n return T, k", "def k_means(data, K):\n\n # randomly choose k centroids from the data points\n centroids = data[np.random.choice(len(data), size=K, replace=False)]\n\n # assign each data point to closest centroid\n distances = euclidean_distances(data, centroids)\n labels = np.array([np.argmin(i) for i in distances])\n\n # track the largest centroid movements\n deltas = []\n\n for i in range(MAX_ITERATIONS):\n # keep track of the largest centroid movement for this iteration\n max_delta_mu = 0\n\n for k in range(K):\n # get all data points with label of this centroid k\n cluster_points = data[labels == k]\n if len(cluster_points) == 0:\n continue\n\n # get mean r, g, and b values of all points in this cluster\n # e.g. mu = [112.5, 95.6, 204.2]\n mu = cluster_points.mean(axis=0)\n\n # get the max difference in an r,g, or b value\n # abs(centroids[k] - mu) will return diff in RGB values\n # e.g. abs(centroids[k] - mu) = [15.2, 25.4, 4.7]\n max_delta_mu = max(max_delta_mu, abs(centroids[k] - mu).max())\n\n # update the kth centroid to the new mean value\n centroids[k] = mu\n\n deltas.append(max_delta_mu)\n\n # assign each data point to closest centroid\n distances = euclidean_distances(data, centroids)\n labels = np.array([np.argmin(i) for i in distances])\n\n # stop the iterations early if the largest change in an r, g, or b value is < MIN_DELTA_MU\n if max_delta_mu < MIN_DELTA_MU:\n print(\n f\"reached delta_mu {max_delta_mu:.2f} < {MIN_DELTA_MU} in {i} iterations for K={K}\")\n break\n\n return centroids, labels, deltas" ]
[ "0.6720604", "0.6584007", "0.6550382", "0.64145166", "0.6413286", "0.627287", "0.6238827", "0.61965424", "0.61758494", "0.6154797", "0.6143025", "0.6117063", "0.6048166", "0.6015218", "0.6009818", "0.6004305", "0.5915785", "0.59050244", "0.5881358", "0.58780664", "0.5875312", "0.58694065", "0.58676106", "0.585958", "0.58452076", "0.5841199", "0.58394164", "0.5790039", "0.5788512", "0.57677126", "0.5765566", "0.5755045", "0.575367", "0.57484823", "0.5741384", "0.57150805", "0.5711968", "0.5705073", "0.5702384", "0.56933403", "0.56703573", "0.5666577", "0.5664131", "0.5660411", "0.56579906", "0.5657908", "0.56296754", "0.5625136", "0.5613738", "0.561013", "0.5606345", "0.5596514", "0.5593805", "0.5580103", "0.5574531", "0.55684215", "0.5566427", "0.5548927", "0.5536584", "0.553617", "0.5519556", "0.55061454", "0.55056363", "0.5492029", "0.5486739", "0.5474218", "0.54722875", "0.54689544", "0.5455202", "0.54476935", "0.5444677", "0.54437065", "0.5442782", "0.54372746", "0.54322064", "0.5432034", "0.54231405", "0.54212904", "0.5414591", "0.54102343", "0.5407502", "0.5406541", "0.5403135", "0.5397547", "0.53890854", "0.53877425", "0.5386936", "0.53787196", "0.5367259", "0.53665197", "0.53641236", "0.5359608", "0.5358266", "0.53563285", "0.53509855", "0.53504443", "0.5336932", "0.53338814", "0.5310777", "0.53012425" ]
0.62292767
7
References 'Distributing many points on a sphere' by E.B. Saff and A.B.J. Kuijlaars, Mathematical Intelligencer, 19.1 (1997), pp. 511
def saff_kuijlaars(N): k = np.arange(N) h = -1 + 2.0 * k / (N - 1) theta = np.arccos(h) phi = np.zeros_like(h) for i in range(1, N - 1): phi[i] = (phi[i - 1] + 3.6 / np.sqrt(N * (1 - h[i]**2))) % (2.0 * np.pi) return sph2car(np.ones_like(theta), theta, phi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sphere(indiv):\n return sum([ x ** 2 for x in indiv])", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def partsphere(self, x):\r\n self.counter += 1\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n dim = len(x)\r\n x = array([x[i % dim] for i in range(2*dim)])\r\n N = 8\r\n i = self.counter % dim\r\n #f = sum(x[i:i + N]**2)\r\n f = sum(x[np.random.randint(dim, size=N)]**2)\r\n return f", "def generate_sphere_points(n):\r\n points = []\r\n inc = math.pi * (3 - math.sqrt(5))\r\n offset = 2 / float(n)\r\n for k in range(int(n)):\r\n y = k * offset - 1 + (offset / 2)\r\n r = math.sqrt(1 - y*y)\r\n phi = k * inc\r\n points.append([math.cos(phi)*r, y, math.sin(phi)*r])\r\n return points", "def on_sphere():\n vec = np.random.standard_normal(3)\n return vec / np.linalg.norm(vec)", "def pointsOn4Sphere(numPts):\n points = np.zeros((2*numPts, 4))\n N = 4\n surfaceArea = N * np.pi ** (N/2) / (N/2) # for even N\n delta = np.exp(np.log(surfaceArea / numPts) / 3)\n Iter = 0\n ind = 0\n maxIter = 1000\n while ind != numPts and Iter < maxIter:\n ind = 0\n deltaW1 = delta\n w1 = 0.5 * deltaW1\n while w1 < np.pi:\n q0 = np.cos(w1)\n deltaW2 = deltaW1 / np.sin(w1)\n w2 = 0.5 * deltaW2\n while w2 < np.pi:\n q1 = np.sin(w1) * np.cos(w2)\n deltaW3 = deltaW2 / np.sin(w2)\n w3 = 0.5 * deltaW3\n while w3 < 2 * np.pi:\n q2 = np.sin(w1) * np.sin(w2) * np.cos(w3)\n q3 = np.sin(w1) * np.sin(w2) * np.sin(w3)\n points[ind, :] = np.array([q0, q1, q2, q3])\n ind += 1\n w3 += deltaW3\n w2 += deltaW2\n w1 += deltaW1\n delta *= np.exp(np.log(float(ind) / numPts) / 3)\n Iter += 1\n return points[0:numPts, :]", "def xrndSphere(n):\n for i in xrange(n):\n yield rndSphere()", "def uniform_sphere(batch_size, dim, epsilon=1, ord=2):\r\n\r\n random = numpy.random.randn(batch_size, dim)\r\n random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)\r\n random *= epsilon\r\n\r\n return random", "def insphere(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n n = int(prod(size))\n if n < 70:\n # For small n, interpreted overhead dominates. Using sin and cos\n # results in fewer interpreted instructions than rejection method.\n # Compiled code should never use this algorithm.\n mu, phi, z = random((3,) + size + (1,))\n mu = 2.*mu - 1.\n phi *= 2. * pi\n s = sqrt(1. - mu)\n return z**(1./3.) * concatenate((s*cos(phi), s*sin(phi), mu), axis=-1)\n # Beats this:\n # p = onsphere(size)\n # return p * random(p.shape[:-1] + (1,)) ** (1./3.)\n # For large n, higher intrinsic cost of sin and cos compared to\n # rejection method dominates, and it is worth taking a few more\n # interpreted instructions to benefit from the superior algorithm.\n nmore = n\n p = []\n fac = 6./pi # 1/prob random point in unit sphere\n while nmore > 0:\n m = int((nmore + 5.*sqrt(nmore))*fac) # 99.9+% chance of nmore\n q = 2.*random((m, 3)) - 1.\n q = q[(q * q).sum(axis=-1) < 1., :]\n nmore -= len(q)\n p.append(q)\n return concatenate(p)[:n].reshape(size + (3,))", "def spherefcn(x: np.ndarray) -> np.ndarray:\n if x.ndim == 1:\n x = x.reshape(-1, len(x))\n f = np.sum(x**2, axis=1)\n return f.reshape(-1, 1)", "def new_uniform_spherical_particle_distribution(number_of_particles, size, total_mass, **keyword_arguments):\n particles = Particles(number_of_particles)\n particles.mass = total_mass * 1.0 / number_of_particles\n x, y, z = UniformSphericalDistribution(number_of_particles, **keyword_arguments).result\n particles.x = size * x\n particles.y = size * y\n particles.z = size * z\n return particles", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def sphere_sample(radius, num_pts=500):\n position_list = []\n for _ in range(int(num_pts)):\n # see https://stackoverflow.com/questions/33976911/\n # generate-a-random-sample-of-points-distributed-on-the-surface-of-a-unit-sphere/33977530#33977530\n # for discussion on this algorithm\n\n vec = np.random.normal(0, 1, 3) # select three random points (if normal dist no skip needed)\n vec /= np.linalg.norm(vec) # normalize vector\n vec *= radius # lengthen vector to desired radius\n position_list.append(list(vec))\n\n return position_list", "def arndSphere(N):\n sph = np.empty( (N,3), np.float64 )\n \n sph[:,2] = np.random.uniform(-1.0,1.0,N) # z-coordinates\n z2 = np.sqrt(1.0 - sph[:,2]**2)\n phi = (2.0 * math.pi) * np.random.random( N )\n sph[:,0] = z2 * np.cos(phi) # x \n sph[:,1] = z2 * np.sin(phi) # y\n \n return sph", "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def points_on_sphere(\n N,\n origin = numpy.zeros(3),\n radius = 1.):\n phi = (1 + numpy.sqrt(5)) / 2 # the golden ratio\n long_incr = 2*numpy.pi / phi # how much to increment the longitude\n\n dz = 2.0 / float(N) # a unit sphere has diameter 2\n bands = numpy.arange(N) # each band will have one point placed on it\n z = bands * dz - 1 + (dz/2) # the height z of each band/point\n r = numpy.sqrt(1 - z*z) # project onto xy-plane\n az = bands * long_incr # azimuthal angle of point modulo 2 pi\n x = r * numpy.cos(az)\n y = r * numpy.sin(az)\n ## get triangles\n points = numpy.array([x, y, z])\n tri = scipy.spatial.ConvexHull(points.T)\n points = origin[None, :] + points.T*radius\n return points, tri.simplices", "def onsphere(size=None):\n xy = oncircle(size)\n z = 2.*random(xy.shape[:-1] + (1,)) - 1.\n xy *= sqrt(1. - z*z)\n return concatenate((xy, z), axis=-1)", "def sphere_sre(solution):\n a = 0\n bias = 0.2\n x = solution.get_x()\n x1 = x[:10]\n x2 = x[10:]\n value1 = sum([(i-bias)*(i-bias) for i in x1])\n value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2])\n return value1 + value2", "def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r", "def sample_sphere(n, truncate=True):\n point_dfs = []\n accumulated_samples = 0\n while accumulated_samples < n:\n # (2*r)^3 / (4/3 pi r^3) = 6/pi\n iter_npoints = min(int(np.round((n-accumulated_samples)*6/np.pi)),\n max_iter_npoints)\n # do 3-sigma more\n iter_npoints = iter_npoints + np.int(3*np.sqrt(iter_npoints))\n iter_npoints = max(iter_npoints, min_iter_npoints)\n\n x = np.random.uniform(-1, 1, iter_npoints)\n y = np.random.uniform(-1, 1, iter_npoints)\n z = np.random.uniform(-1, 1, iter_npoints)\n\n r = np.sqrt(x*x+y*y+z*z)\n in_sphere = r < 1.0\n\n r = r[in_sphere]\n x = x[in_sphere]/r\n y = y[in_sphere]/r\n z = z[in_sphere]/r\n\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n ra = (np.degrees(phi) + 360) % 360\n decl = 90.0-np.degrees(theta)\n\n new_df = pd.DataFrame({'ra': ra, 'decl': decl})\n new_df = new_df[['ra', 'decl']]\n\n point_dfs.append(new_df)\n new_samples = ra.shape[0]\n accumulated_samples += new_samples\n info('completed %d samples' % accumulated_samples)\n\n points = pd.concat(point_dfs)\n if truncate:\n points = points[:n]\n\n points.reset_index(drop=True, inplace=True)\n points.index.rename('sample_idx', inplace=True)\n\n return points", "def randomPointOnSphere(r):\n x = np.random.normal()\n y = np.random.normal()\n z = np.random.normal()\n point = np.array([x, y, z])\n point *= r/(x**2 + y**2 + z**2)**.5\n return point", "def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])", "def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y", "def fix_sphere_h (center_x, center_y, center_z, radius, centers, radii, len_points, list_of_a):\n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n for i in range(0, len(points)):\n check = 0\n check_b = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n h = 0\n while ((check_b == 0) and (h <= len(list_of_a) -1)):\n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], list_of_a[h].x, list_of_a[h].y, list_of_a[h].z) <= 1.50): \n check_b += 1\n h += 1\n if ((check == 0) and (check_b == 0)):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n return g_x, g_y, g_z", "def test_points_on_1sphere_8x():\n points = generate.points_on_1sphere(8, 'x')\n assert np.allclose(points[0], cst.quat1)\n assert np.allclose(points[2], cst.quatx90)\n assert np.allclose(points[4], cst.quatx)", "def _fast_sphere_pattern(n, radius):\n phi = (1 + np.sqrt(5)) / 2\n long_incr = 2*np.pi / phi\n dz = 2.0 / float(n)\n bands = np.arange(n)\n z = bands * dz - 1.0 + (dz/2.0)\n r = np.sqrt(1.0 - z*z)\n az = bands * long_incr\n x = r * np.cos(az)\n y = r * np.sin(az)\n points = np.column_stack((x, y, z)) * np.asarray([radius])\n\n return points", "def sphere_generator():\n\n sphericalRadius = np.sqrt(N / (4 * np.pi * pointDensity))\n sphericalThreshold = sphericalRadius * np.arccos(1 - 2 * thresholdFrac)\n\n data_sphere = []\n # np.random.seed(2020)\n for r in range(num_graphs):\n coords = sample_spherical(N, sphericalRadius, 3)\n # computes the adjacency matrix\n Adj_Matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n a = coords[:, i]\n b = coords[:, j]\n dot_prod = np.dot(a, b)/sphericalRadius**2\n dot_prod = min(dot_prod, 1) # <-- sometimes np.dot returns 1.00000000002, messing up np.arccos()\n\n \"\"\" note that when np.arrcos gets 1, it returns a nan \"\"\"\n theta = np.arccos(dot_prod) # gets the angle between a and b (in radians)\n\n # ij_dist = np.linalg.norm(a-b) # calculate euclidean distance\n ij_dist = sphericalRadius * theta # arclength distance\n if ij_dist < sphericalThreshold:\n Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix\n\n data_sphere.append(Adj_Matrix)\n\n return data_sphere", "def new_plummer_spatial_distribution(number_of_particles, \n total_mass = 1.0|nbody_system.mass, \n virial_radius = 1.0|nbody_system.length,\n mass_cutoff = 0.999,\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n particles = Particles(number_of_particles)\n particle_mass = total_mass * 1.0 / number_of_particles\n particles.mass = particle_mass\n x, y, z = UniformSphericalDistribution(\n number_of_particles, mass_cutoff=mass_cutoff, **keyword_arguments).result\n \n # Now scale the uniformly distributed particle positions to match the radial density profile\n r_old = numpy.sqrt(x*x + y*y + z*z)\n scale_factor = (0.1875 * numpy.pi * virial_radius.number) / numpy.sqrt(1.0 - r_old**2)\n particles.x = scale_factor * x | virial_radius.unit\n particles.y = scale_factor * y | virial_radius.unit\n particles.z = scale_factor * z | virial_radius.unit\n return particles", "def compute_h3_pn_spheres(S: List[Vec3], n: int) -> Generator[Tuple[Vec3, int], None, None]:\n # It just occurred to me that we could speed up the computation of P_n for\n # generating sets containing 0 by computing the nth dilate iteratively on S\n # \\ {(0, 0, 0)}.\n #\n # We could maybe generalize this to any set where there's some (0, 0, z)\n # vectors? Compute the... hm. I think we can do this. Let S' be the\n # generating set with all the z-vectors removed. Now... Compute the dilates\n # P_k(S') for k = 1, 2, ..., n. For k = 1, ..., n - 1, shift up P_k(S') by\n # z_1 + ... + z_(n - k) for some nonzero z values that you can get in S.\n #\n # This might not be that much faster if you don't have zero. Ugh. Also I\n # might have written it down wrong. Or thought wrong. Ugh.\n #\n # Instead of having to compute products for |S|^n things, you instead have\n # to compute products for (|S| - 1) + (|S| - 1)^2 + ... + (|S| - 1)^n\n # things. If you have c of the z vectors then... I think would be\n # (|S| - 1) + (|S| - 1)^2 + ... + (|S| - c)^n products\n assert n >= 0\n if n == 0:\n return set()\n \n yield from ((s, 1) for s in S)\n \n # Is it more efficient to compute sums over a fresh product every time,\n # or to do something else?\n #\n # If we compute the product sums every time, it should take on the order\n # of sum(i = 1 to k) |S|^k time.\n #\n #\n # If we instead take p_1 = S, then compute p_(i+1) as\n #\n # {v * s | v in P_i, s in S},\n #\n # then what's the number of things we have to process?\n #\n # p_2 will take the same number of operations. It's p_3 where things get\n # interesting. Maybe, anyway. Here we do |p_2| * |S| operations. Now...\n # ah! We have to have |p_2| <= |S|, so in fact this *should* be more\n # efficient, because it's reasonably likely that two words will map to\n # the same vector for any reasonably complex generating set. (So\n # |p_k| < |S^k| for k >= k_0.)\n #\n # ALSO, because we can reuse our previous work, we don't need to do this\n # triangular summing thing (where we start over fresh each time), which\n # should save time also.\n seen = set(tuple(s) for s in S)\n p_k = S\n for k in range(2, n+1):\n words = it.product(p_k, S)\n p_k = list(map(vectuple_h3_sum, words))\n\n for vec in p_k:\n tup = tuple(vec)\n if tup not in seen:\n seen.add(tup)\n yield (vec, k)", "def sphere(geometry,\n psd_name,psd_shape,psd_loc,psd_scale,\n pore_seed='pore.seed',\n psd_offset=0,\n **kwargs):\n import scipy.stats as spst\n prob_fn = getattr(spst,psd_name)\n P = prob_fn(psd_shape,loc=psd_loc,scale=psd_scale)\n value = P.ppf(geometry[pore_seed])+psd_offset\n return value", "def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)", "def plane_sphere(p, s):\n\n p.normalize()\n\n d = dot(s.o-p.o, p.n)\n\n if d > s.r:\n return False\n else:\n return (s.o - d*p.n, sqrt(s.r*s.r - d*d))", "def rndSphere():\n sph = [0,0,0]\n \n sph[2] = random.uniform(-1.0,1.0)\n z2 = math.sqrt(1.0 - sph[2]*sph[2])\n phi = (2. * math.pi) * random.random()\n sph[0] = z2 * math.cos(phi)\n sph[1] = z2 * math.sin(phi)\n \n return sph", "def nsphere_volume(n, r):\n return math.pi ** (n / 2) * (r ** n) / gamma(n / 2 + 1)", "def rand_sphere(d0):\n p1 = np.random.randn(d0, 3)\n m = np.sqrt(np.sum(p1**2, axis=1))\n\n rad = pow(np.random.rand(d0), 1.0 / 3.0)\n return (p1.T * (rad / m)).T", "def exp_map(b, p):\n \"\"\"\n EXP_MAP The exponential map for n-spheres\n b is the base point (vector in R^n), norm(b)=1\n p is a point on the tangent plane to the hypersphere at b (also a vector in R^n)\n\n method can be 0 or 1:\n 0: hypersphere (e.g. quaternions)\n 1: dual quaternion\n \"\"\"\n if np.allclose(b, p):\n x = b\n else:\n theta = np.linalg.norm(b - p)\n dminusbx = np.sqrt(2 - 2. * np.cos(np.pi - theta))\n l = 2. * np.sin(theta / 2)\n alpha = np.arccos((4 + dminusbx ** 2 - l ** 2) / (4 * dminusbx))\n dpb = 2. * np.tan(alpha)\n v = b + ((p - b) / np.linalg.norm(p - b)) * dpb\n x = ((v + b) / np.linalg.norm(v + b)) * dminusbx - b\n\n return x", "def pointsOn1Sphere(numPts, rotationAxis):\n points = np.zeros((numPts, 4))\n incAng = 360. / numPts\n myAng = 0\n if rotationAxis == 'y':\n for i in range(numPts):\n points[i, :] = euler2quaternion(0, myAng * np.pi / 180, 0)\n myAng += incAng\n elif rotationAxis == 'z':\n for i in range(numPts):\n points[i, :] = euler2quaternion(0, 0, myAng * np.pi / 180)\n myAng += incAng\n return points", "def cartesian2spherical(coords):\n sphere = np.zeros(coords.shape)\n xy_sq = coords[:, 0]**2 + coords[:, 1]**2\n sphere[:, 0] = np.sqrt(xy_sq + coords[:, 2]**2)\n sphere[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n sphere[:, 2] = np.arctan2(np.sqrt(xy_sq), coords[:, 2])\n return sphere", "def f(points):\n distances = np.zeros((points.shape[0],1))\n for i in range(len(points)):\n #print points[i,:], points[i,:]**2\n distances[i] = np.sqrt(np.sum(points[i,:]**2))\n return distances * np.sin(distances)", "def sphere(\n network,\n pore_diameter='pore.diameter'\n):\n return 4/3*_pi*(network[pore_diameter]/2)**3", "def _spherical_to_cartesian_fast(ra, dec, threads):\n import numexpr as ne\n\n #nthreads = ne.detect_number_of_cores()\n nthreads = threads\n ne.set_num_threads(nthreads)\n\n pi = math.pi\n rar = ne.evaluate('ra*pi/180.0')\n decr = ne.evaluate('dec*pi/180.0')\n\n hold1=ne.evaluate('cos(decr)') \n\n x = ne.evaluate('cos(rar) * hold1')\n y = ne.evaluate('sin(rar) * hold1')\n z = ne.evaluate('sin(decr)')\n \n return x, y, z", "def random_projection_cosine_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n left_norm = norm(data[left])\n right_norm = norm(data[right])\n \n if left_norm == 0.0:\n left_norm = 1.0\n \n if right_norm == 0.0:\n right_norm = 1.0\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points)\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = ((data[left, d] / left_norm) -\n (data[right, d] / right_norm))\n\n hyperplane_norm = norm(hyperplane_vector)\n if hyperplane_norm == 0.0:\n hyperplane_norm = 1.0\n \n for d in range(dim):\n hyperplane_vector[d] = hyperplane_vector[d] / hyperplane_norm\n\n # For each point compute the margin (project into normal vector)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = 0.0\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right", "def random_spherical(R, N=10000, R0=0):\n\tu1 = numpy.random.random(size=N)\n\tr = u1 ** (1./3.) * R + R0\n\tu2 = numpy.random.random(size=N) * 2 -1\n\tphi = numpy.random.random(size=N) * 2 * math.pi\n\tx = numpy.sqrt(1-u2**2) * numpy.cos(phi) * r\n\ty = numpy.sqrt(1-u2**2) * numpy.sin(phi) * r\n\tz = u2 * r\n\treturn x, y, z", "def sphvol(r):\n return (4./3.)*np.pi*(r**3.)", "def mHollowSphere(a=3, b=6, N=250):\n a = float(a)\n b = float(b)\n N = int(N)\n rmin = 0\n rmax = 2*b\n dr = (rmax-rmin)/float(N)\n r = np.zeros((N))\n g = np.zeros((N))\n for i in range(N):\n r[i] = rmin+i*dr\n g[i] = 0\n if r[i] >= a and r[i] < b:\n g[i] = (r[i]-a)/(b-a)/np.power(r[i], 2)\n elif r[i] >= b:\n g[i] = 1/np.power(r[i], 2)\n return r, g", "def sectorsphere(self, x):\r\n return sum(x**2) + (1e6-1) * sum(x[x<0]**2)", "def spherical_distances(x, y):\n # Compute the norms of all points, we do NOT check they actually all lie on\n # the same sphere (that's the caller's responsibility).\n \n xn = np.sqrt((x**2).sum(axis=1))\n yn = np.sqrt((y**2).sum(axis=1))\n ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :])\n # Protect against numerical noise giving us cosine values outside the -1,1\n # range, where arccos would return nans.\n ang_cos = np.clip(ang_cos, -1, 1)\n\n return xn[:, None]*np.arccos(ang_cos)", "def sphere_volume(r):\n return (4/3) * 3.14159 * r**3", "def fibonacci_sphere(nr_points, R=1):\n assert nr_points % 2 == 1, \"The number of points must be odd\"\n points = []\n # The golden ratio\n phi = (1 + math.sqrt(5)) / 2.\n N = int((nr_points - 1)/2)\n for i in range(-N, N+1):\n lat = math.asin(2 * i / nr_points)\n lon = 2 * math.pi * i / phi\n x = R * math.cos(lat) * math.cos(lon)\n y = R * math.cos(lat) * math.sin(lon)\n z = R * math.sin(lat)\n points.append((x, y, z))\n return np.array(points, dtype = float)", "def generatePoints(N, k=2, scale=1, same_quadrant=False):\n if same_quadrant:\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n else:\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def generate_base_points(num_points, domain_size, density_map=None,\n reflect=True):\n\n def _try_points(num_points, prob):\n prob = np.atleast_3d(prob)\n prob = np.array(prob)/np.amax(prob) # Ensure prob is normalized\n base_pts = []\n N = 0\n while N < num_points:\n pt = np.random.rand(3) # Generate a point\n # Test whether to keep it or not\n [indx, indy, indz] = np.floor(pt*np.shape(prob)).astype(int)\n if np.random.rand(1) <= prob[indx][indy][indz]:\n base_pts.append(pt)\n N += 1\n base_pts = np.array(base_pts)\n return base_pts\n\n if len(domain_size) == 1: # Spherical\n domain_size = np.array(domain_size)\n r = domain_size[0]\n if density_map is None:\n # Make an image of a sphere filled with ones and use _try_points\n density_map = np.ones([41, 41, 41])\n density_map[20, 20, 20] = 0\n density_map = spim.distance_transform_edt(density_map) < 20\n base_pts = _try_points(num_points, density_map)\n # Convert to spherical coordinates\n X, Y, Z = np.array(base_pts - [0.5, 0.5, 0.5]).T\n r = 2*np.sqrt(X**2 + Y**2 + Z**2)*domain_size[0]\n theta = 2*np.arctan(Y/X)\n phi = 2*np.arctan(np.sqrt(X**2 + Y**2)/Z)\n # Trim points outside the domain (from improper prob images)\n inds = r <= domain_size[0]\n [r, theta, phi] = [r[inds], theta[inds], phi[inds]]\n # Reflect base points across perimeter\n if reflect:\n r, theta, phi = reflect_base_points(np.vstack((r, theta, phi)),\n domain_size)\n # Convert to Cartesean coordinates\n X, Y, Z = from_spherical(r, theta, phi)\n base_pts = np.vstack([X, Y, Z]).T\n\n elif len(domain_size) == 2: # Cylindrical or Disk\n domain_size = np.array(domain_size)\n if density_map is None:\n density_map = np.ones([41, 41, 41])\n density_map[20, 20, :] = 0\n if domain_size[1] == 0: # Disk\n density_map = density_map[:, :, 0]\n density_map = spim.distance_transform_edt(density_map) < 20\n base_pts = _try_points(num_points, density_map)\n # Convert to cylindrical coordinates\n X, Y, Z = np.array(base_pts - [0.5, 0.5, 0]).T # Center on z-axis\n r = 2*np.sqrt(X**2 + Y**2)*domain_size[0]\n theta = 2*np.arctan(Y/X)\n z = Z*domain_size[1]\n # Trim points outside the domain (from improper prob images)\n inds = r <= domain_size[0]\n [r, theta, z] = [r[inds], theta[inds], z[inds]]\n inds = ~((z > domain_size[1]) + (z < 0))\n [r, theta, z] = [r[inds], theta[inds], z[inds]]\n if reflect:\n r, theta, z = reflect_base_points(np.vstack([r, theta, z]),\n domain_size)\n # Convert to Cartesean coordinates\n X, Y, Z = from_cylindrical(r, theta, z)\n base_pts = np.vstack([X, Y, Z]).T\n\n elif len(domain_size) == 3: # Cube or square\n if density_map is None:\n density_map = np.ones([41, 41, 41])\n if domain_size[2] == 0:\n density_map = density_map[:, :, 0]\n base_pts = _try_points(num_points, density_map)\n base_pts = base_pts*domain_size\n if reflect:\n base_pts = reflect_base_points(base_pts, domain_size)\n\n return base_pts", "def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3", "def sample_hypersphere(n_samples, sample_shape, radius, l_norm=2, mode='sphere', sample_gen=None, seed=None):\n\n if sample_gen is not None:\n assert seed is None, \"Can't provide individual seeds if using the multi-threaded generator.\"\n assert sample_shape == sample_gen.shape\n\n # Get precalculated samples from the generator\n gauss = np.empty(shape=(n_samples, np.prod(sample_shape)), dtype=np.float64)\n for i in range(n_samples):\n gauss[i] = sample_gen.get_normal().reshape(-1)\n else:\n if seed is not None:\n np.random.seed(seed)\n gauss = np.random.normal(size=(n_samples, np.prod(sample_shape)))\n\n # Norm to\n norm = np.linalg.norm(gauss, ord=l_norm, axis=1)\n perturbation = (gauss / norm[:, np.newaxis])\n\n # Sphere: sample only the surface of the hypersphere.\n # Ball: sample inside the sphere. Note: this is probably not uniform.\n if mode == 'sphere':\n perturbation *= radius\n elif mode == 'ball':\n perturbation *= np.random.uniform(low=0.0, high=radius, size=(n_samples, 1))\n else:\n raise ValueError(\"Unknown sampling mode.\")\n\n perturbation = np.reshape(perturbation, (n_samples,) + sample_shape)\n\n return perturbation", "def random_spherepos(n):\n signs = np.sign(rand.uniform(-1,1,size=n))\n thetas = Angle(np.arccos(rand.uniform(size=n)*signs),unit=u.rad) #random b/w 0 and 180\n phis = Angle(rand.uniform(0,2*np.pi,size=n),unit=u.rad)\n c = SkyCoord(phis,thetas,1,representation='physicsspherical')\n return c", "def Sphere(self,radius=1.0, npoints=10):\n\n # RESET MESH\n self.__reset__()\n\n from math import pi, cos, sin\n from meshpy.tet import MeshInfo, build\n from meshpy.geometry import generate_surface_of_revolution, EXT_OPEN, GeometryBuilder\n\n r = radius\n\n points = npoints\n dphi = pi/points\n\n def truncate(r):\n if abs(r) < 1e-10:\n return 0\n else:\n return r\n\n rz = [(truncate(r*sin(i*dphi)), r*cos(i*dphi)) for i in range(points+1)]\n\n geob = GeometryBuilder()\n geob.add_geometry(*generate_surface_of_revolution(rz,\n closure=EXT_OPEN, radial_subdiv=10))\n\n mesh_info = MeshInfo()\n geob.set(mesh_info)\n\n mesh = build(mesh_info)\n\n self.points = np.asarray(mesh.points)\n self.elements = np.asarray(mesh.elements)\n # self.faces = np.asarray(mesh.faces)\n # self.edges = np.asarray(self.edges)\n self.nelem = self.elements.shape[0]\n self.element_type = \"tet\"\n\n\n # GET EDGES & FACES - NONE ASSIGNMENT IS NECESSARY OTHERWISE IF FACES/EDGES ALREADY EXIST\n # THEY WON'T GET UPDATED\n self.faces = None\n self.edges = None\n self.GetBoundaryFacesTet()\n self.GetBoundaryEdgesTet()\n\n # CHECK MESH\n points = self.points[np.unique(self.faces),:]\n if not np.isclose(np.linalg.norm(points,axis=1),radius).all():\n raise ValueError(\"MeshPy could not construct a valid linear mesh for sphere\")", "def calc_hypersphere_volume(r: float, n: int) -> float:\n return (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)", "def calculate_pore_shape(elements, coordinates, adjust=1, increment=0.1,\n **kwargs):\n # Copy the coordinates as will perform many opertaions on them\n coordinates = deepcopy(coordinates)\n # Center of our cartesian system is always at origin\n origin = np.array([0, 0, 0])\n # Initial center of mass to reverse translation at the end\n initial_com = center_of_mass(elements, coordinates)\n # We just shift the cage to the origin.\n coordinates = shift_com(elements, coordinates)\n # We create an array of vdw radii of elements.\n elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])\n # We calculate maximum diameter of a molecule to determine the radius\n # of a sampling sphere neccessary to enclose the whole molecule.\n shpere_radius = max_dim(elements, coordinates)[2]/2\n sphere_surface_area = 4 * np.pi * shpere_radius**2\n # Here we determine the number of sampling points necessary for a fine\n # sampling. Smaller molecules require more finner density of sampling\n # points on the sampling sphere's surface, whereas largen require less.\n # This formula was created so that larger molecule do not take much longer\n # to analyse, as number_sampling_points*length_of_sampling_vectors\n # results in quadratic increase of sampling time. The 250 factor was\n # specificly determined to produce close to 1 sampling point /Angstrom^2\n # for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the\n # sampling by changing the adjust factor.\n number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)\n # Here I use code by Alexandre Devert for spreading points on a sphere:\n # http://blog.marmakoide.org/?p=1\n golden_angle = np.pi * (3 - np.sqrt(5))\n theta = golden_angle * np.arange(number_of_points)\n z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,\n number_of_points)\n radius = np.sqrt(1 - z * z)\n points = np.zeros((number_of_points, 3))\n points[:, 0] = radius * np.cos(theta) * shpere_radius\n points[:, 1] = radius * np.sin(theta) * shpere_radius\n points[:, 2] = z * shpere_radius\n # Here we will compute the eps parameter for the sklearn.cluster.DBSCAN\n # (3-dimensional spatial clustering algorithm) which is the mean distance\n # to the closest point of all points.\n values = []\n tree = KDTree(points)\n for i in points:\n dist, ind = tree.query(i.reshape(1, -1), k=10)\n values.extend(dist)\n mean_distance = np.mean(values)\n # The best eps is parametrized when adding the mean distance and it's root.\n eps = mean_distance + mean_distance**0.5\n # Here we either run the sampling points vectors analysis in serial\n # or parallel. The vectors that go through molecular voids return\n # as analysed list with the increment at vector's path with largest\n # included sphere, coordinates for this narrow channel point. vectors\n # that find molecule on theirs path are return as NoneType object.\n results = [\n vector_analysis_pore_shape(point, coordinates, elements_vdw)\n for point in points\n ]\n results_cleaned = [x for x in results if x is not None]\n ele = np.array(['X'] * len(results_cleaned))\n coor = np.array(results_cleaned)\n return coor", "def new_spherical_particle_distribution(number_of_particles, \n radial_density_func = None, # not yet supported, specify radii and densities tables:\n radii = None, densities = None, \n total_mass = None, size = None, # if total_mass is not given, it will be deduced from size or max(radii)\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n if (radii is None) or (densities is None):\n raise AmuseException(\"Using an arbitrary radial density function is not yet \"\n \"supported. Radius and density tables must be passed instead.\")\n \n interpolator = EnclosedMassInterpolator()\n interpolator.initialize(radii, densities)\n if total_mass is None:\n total_mass = interpolator.get_enclosed_mass(size or max(radii))\n particles = Particles(number_of_particles)\n particle_mass = total_mass * 1.0 / number_of_particles\n particles.mass = particle_mass\n x, y, z = UniformSphericalDistribution(number_of_particles, **keyword_arguments).result\n # Now scale the uniformly distributed particle positions to match the radial density profile\n r_old = numpy.sqrt(x*x + y*y + z*z)\n indices = numpy.argsort(r_old)\n if r_old[indices[0]] == 0.0:\n r_old[indices[0]] = 1.0\n f_scale = interpolator.get_radius_for_enclosed_mass(\n (numpy.arange(0.5, number_of_particles + 0.5) | units.none) * particle_mass) / r_old[indices]\n particles.x = (f_scale * x[indices]).as_quantity_in(radii.unit)\n particles.y = (f_scale * y[indices]).as_quantity_in(radii.unit)\n particles.z = (f_scale * z[indices]).as_quantity_in(radii.unit)\n return particles", "def spread(self, n=2):\n for point in self.points:\n point *= n", "def center_to_sphere(places, size, resolution=0.50, min_value=np.array([0., -50., -4.5]), scale=4, x=(0, 90), y=(-50, 50), z=(-4.5, 5.5)):\n x_logical = np.logical_and((places[:, 0] < x[1]), (places[:, 0] >= x[0]))\n y_logical = np.logical_and((places[:, 1] < y[1]), (places[:, 1] >= y[0]))\n z_logical = np.logical_and((places[:, 2] < z[1]), (places[:, 2] >= z[0]))\n xyz_logical = np.logical_and(x_logical, np.logical_and(y_logical, z_logical))\n center = places.copy()\n center[:, 2] = center[:, 2] + size[:, 0] / 2.\n sphere_center = ((center[xyz_logical] - min_value) / (resolution * scale)).astype(np.int32)\n return sphere_center", "def test_random_sphere_vector():\n\ttest_vector = o_gen_instance.generate_random_sphere_vector()\n\tassert isinstance(test_vector, np.ndarray)\n\tassert test_vector.shape == (3,)\n\tfor component in test_vector:\n\t\tassert component != 0.\n\tassert np.isclose(np.linalg.norm(test_vector), 1.0)", "def iterate_center_of_mass(sphere, inner_radius, stepsize=0.05,\n com_kwargs=None):\n\n if com_kwargs is None:\n com_kwargs = {}\n\n yield sphere\n while (sphere.radius > inner_radius):\n com = sphere.quantities.center_of_mass(**com_kwargs)\n try:\n sphere = sphere.ds.sphere(com, (1-stepsize) * sphere.radius)\n yield sphere\n except YTSphereTooSmall:\n yield None\n break", "def sphere_centers(r_x, r_y, r_z):\n a_ccs_p_trans_m = hom_translation_matrix(\n t_x=0.265, t_y=0, t_z=0.014)\n a_ccs_p_rot_m = hom_rotation(x_axis_rotation_matrix(r_x) @\n y_axis_rotation_matrix(r_y) @\n z_axis_rotation_matrix(r_z))\n a_p_sph_1_2 = hom_translation_matrix(\n t_x=0.015, t_y=0.029, t_z=-0.0965)\n a_p_sph_2_2 = hom_translation_matrix(\n t_x=0.015, t_y=-0.029, t_z=-0.0965)\n\n a_ccs_ = a_ccs_p_trans_m @ a_ccs_p_rot_m\n a_c1 = a_ccs_ @ a_p_sph_1_2\n a_c2 = a_ccs_ @ a_p_sph_2_2\n\n return get_translation(a_c1), get_translation(a_c2)", "def test_euclidean_scale(self):\n\n s = space(curvature=0)\n\n magic = 77773.333773777773733\n for mul in (2, 5, 1/3, 1/11, magic, 1/magic):\n for name, dim in (\n ('sphere_s1', 1),\n ('sphere_v2', 2),\n ('sphere_s2', 2),\n ('sphere_v3', 3)\n ):\n self.assertTrue(isclose(\n getattr(s, name)(1) * mul**dim,\n getattr(s, name)(mul)\n ))", "def sphere_generator(command_line_arguments):\n start_simulation(parse_command_line_arguments(command_line_arguments))", "def fit_hypersphere(data, method=\"Hyper\"):\n num_points = len(data)\n# print >>stderr, \"DEBUG: num_points=\", num_points\n \n if num_points==0:\n return (0,None)\n if num_points==1:\n return (0,data[0])\n dimen = len(data[0]) # dimensionality of hypersphere\n# print >>stderr, \"DEBUG: dimen=\", dimen\n \n if num_points<dimen+1:\n raise ValueError(\\\n \"Error: fit_hypersphere needs at least {} points to fit {}-dimensional sphere, but only given {}\".format(dimen+1,dimen,num_points))\n \n # central dimen columns of matrix (data - centroid)\n central = np.matrix(data, dtype=float) # copy the data\n centroid = np.mean(central, axis=0)\n for row in central:\n row -= centroid\n# print >>stderr, \"DEBUG: central=\", repr(central)\n\n # squared magnitude for each centered point, as a column vector\n square_mag= [sum(a*a for a in row.flat) for row in central] \n square_mag = np.matrix(square_mag).transpose()\n# print >>stderr, \"DEBUG: square_mag=\", square_mag\n \n if method==\"Taubin\":\n # matrix of normalized squared magnitudes, data\n mean_square = square_mag.mean()\n data_Z = np.bmat( [[(square_mag-mean_square)/(2*sqrt(mean_square)), central]])\n # print >> stderr, \"DEBUG: data_Z=\",data_Z\n u,s,v = linalg.svd(data_Z, full_matrices=False)\n param_vect= v[-1,:]\n params = [ x for x in np.asarray(param_vect)[0]] # convert from (dimen+1) x 1 matrix to list\n params[0] /= 2*sqrt(mean_square)\n params.append(-mean_square*params[0])\n params=np.array(params)\n \n else:\n # matrix of squared magnitudes, data, 1s\n data_Z = np.bmat( [[square_mag, central, np.ones((num_points,1))]])\n # print >> stderr, \"DEBUG: data_Z=\",data_Z\n\n # SVD of data_Z\n # Note: numpy's linalg.svd returns data_Z = u * s * v\n # not u*s*v.H as the Release 1.4.1 documentation claims.\n # Newer documentation is correct.\n u,s,v = linalg.svd(data_Z, full_matrices=False)\n # print >>stderr, \"DEBUG: u=\",repr(u)\n # print >>stderr, \"DEBUG: s=\",repr(s)\n # print >>stderr, \"DEBUG: v=\",repr(v)\n # print >>stderr, \"DEBUG: v.I=\",repr(v.I)\n\n if s[-1]/s[0] < 1e-12:\n # singular case\n # param_vect as (dimen+2) x 1 matrix\n param_vect = v[-1,:]\n # Note: I get last ROW of v, while Chernov claims last COLUMN,\n # because of difference in definition of SVD for MATLAB and numpy\n\n # print >> stderr, \"DEBUG: singular, param_vect=\", repr(param_vect)\n # print >> stderr, \"DEBUG: data_Z*V=\", repr(data_Z*v)\n # print >> stderr, \"DEBUG: data_Z*VI=\", repr(data_Z*v.I)\n # print >> stderr, \"DEBUG: data_Z*A=\", repr(data_Z*v[:,-1])\n else: \n Y = v.H*np.diag(s)*v\n Y_inv = v.H*np.diag([1./x for x in s])*v\n # print >>stderr, \"DEBUG: Y=\",repr(Y)\n # print >>stderr, \"DEBUG: Y.I=\",repr(Y.I), \"\\nY_inv=\",repr(Y_inv)\n #Ninv is the inverse of the constraint matrix, after centroid has been removed\n Ninv = np.asmatrix(np.identity(dimen+2, dtype=float))\n if method==\"Hyper\":\n Ninv[0,0] = 0\n Ninv[0,-1]=0.5\n Ninv[-1,0]=0.5\n Ninv[-1,-1] = -2*square_mag.mean()\n elif method==\"Pratt\":\n Ninv[0,0] = 0\n Ninv[0,-1]=-0.5\n Ninv[-1,0]=-0.5\n Ninv[-1,-1]=0\n else: \n raise ValueError(\"Error: unknown method: {} should be 'Hyper', 'Pratt', or 'Taubin'\")\n # print >> stderr, \"DEBUG: Ninv=\", repr(Ninv)\n\n # get the eigenvector for the smallest positive eigenvalue\n matrix_for_eigen = Y*Ninv*Y\n # print >> stderr, \"DEBUG: {} matrix_for_eigen=\\n{}\".format(method, repr(matrix_for_eigen))\n eigen_vals,eigen_vects = linalg.eigh(matrix_for_eigen)\n # print >> stderr, \"DEBUG: eigen_vals=\", repr(eigen_vals)\n # print >> stderr, \"DEBUG: eigen_vects=\", repr(eigen_vects)\n\n positives = [x for x in eigen_vals if x>0]\n if len(positives)+1 != len(eigen_vals):\n # raise ValueError(\"Error: for method {} exactly one eigenvalue should be negative: {}\".format(method,eigen_vals))\n print>>stderr, \"Warning: for method {} exactly one eigenvalue should be negative: {}\".format(method,eigen_vals)\n smallest_positive = min(positives)\n # print >> stderr, \"DEBUG: smallest_positive=\", smallest_positive\n # chosen eigenvector as 1 x (dimen+2) matrix\n A_colvect =eigen_vects[:,list(eigen_vals).index(smallest_positive)]\n # print >> stderr, \"DEBUG: A_colvect=\", repr(A_colvect)\n # now have to multiply by Y inverse\n param_vect = (Y_inv*A_colvect).transpose()\n # print >> stderr, \"DEBUG: nonsingular, param_vect=\", repr(param_vect) \n params = np.asarray(param_vect)[0] # convert from (dimen+2) x 1 matrix to array of (dimen+2)\n\n \n# print >> stderr, \"DEBUG: params=\", repr(params)\n radius = 0.5* sqrt( sum(a*a for a in params[1:-1])- 4*params[0]*params[-1])/abs(params[0])\n center = -0.5*params[1:-1]/params[0]\n#y print >> stderr, \"DEBUG: center=\", repr(center), \"centroid=\", repr(centroid)\n center += np.asarray(centroid)[0]\n return (radius,center)", "def two_sphere_system(\n omega: float,\n rot_axis: np.ndarray,\n size: int = 200,\n s1_center_rel: np.ndarray = np.array([0.2, 0.2, 0.2]),\n s1_radius_rel: float = 0.05,\n s2_center_rel: np.ndarray = np.array([-0.2, -0.2, -0.2]),\n s2_radius_rel: float = 0.06,\n) -> np.ndarray:\n # get the rotation object\n rot_axis = rot_axis / np.linalg.norm(rot_axis)\n rotation = R.from_rotvec(-omega * rot_axis)\n # calculate the rotated sphere centers\n # sphere 1\n s1_rel = rotation.apply(s1_center_rel)\n # sphere 2\n s2_rel = rotation.apply(s2_center_rel)\n # get the index grid\n # NOTE: extend the range to make sure the sphere is not rotated out of the volume\n # grid_x, grid_y, grid_z = np.mgrid[0:size, 0:size, 0:size]\n # remapping to compensate for the strange coordinate system in tomopy projector\n grid_y, grid_z, grid_x = np.mgrid[0:size, 0:size, 0:size]\n # rescale to [-0.5, 0.5]\n grid_x = grid_x / (size - 1) - 0.5\n grid_y = -(grid_y / (size - 1) - 0.5)\n grid_z = grid_z / (size - 1) - 0.5\n # init volume\n vol = np.zeros_like(grid_x)\n # mark the voxels of sphere 1 to be 1\n s1_dist_squared = (grid_x - s1_rel[0]) ** 2 + (grid_y - s1_rel[1]) ** 2 + (grid_z - s1_rel[2]) ** 2\n r1_squared = s1_radius_rel**2\n vol[s1_dist_squared < r1_squared] = 1.0\n # mark the voxels of sphere 2 to be 2\n s2_dist_squared = (grid_x - s2_rel[0]) ** 2 + (grid_y - s2_rel[1]) ** 2 + (grid_z - s2_rel[2]) ** 2\n r2_squared = s2_radius_rel**2\n vol[s2_dist_squared < r2_squared] = 1.0\n return vol", "def nearest_sphere_surface(x_input, y_input, z_input):\n\n vm = math.sqrt(sum([x_input**2, y_input**2, z_input**2]))\n return (x_input/vm, y_input/vm, z_input/vm)", "def run_lpme(self) -> np.array:\n q = self.sphere.n\n signs = []\n for i in range(q):\n a = np.ones(q)\n a = a / np.sqrt(q)\n a_prime = np.copy(a)\n a_prime[i] = -a_prime[i]\n\n z_a = a * self.sphere.radius + self.sphere.origin\n z_a_prime = a_prime * self.sphere.radius + self.sphere.origin\n\n if self.oracle.compare(z_a, z_a_prime):\n signs.append(1.0)\n else:\n signs.append(-1.0)\n\n orthants = initialize_orthants(signs)\n\n # number of cycles\n nc = 4\n theta_list = [(orth.start + orth.stop) / 2 for orth in orthants]\n for _ in range(0, nc):\n for j in range(0, q - 1):\n theta_a = orthants[j].start\n theta_b = orthants[j].stop\n while abs(theta_b - theta_a) > self.e:\n theta_c = (theta_a * 3 + theta_b) / 4\n theta_d = (theta_a + theta_b) / 2\n theta_e = (theta_a + theta_b * 3) / 4\n\n theta_list[j] = theta_a\n vec_a = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_b\n vec_b = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_c\n vec_c = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_d\n vec_d = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_e\n vec_e = compute_vector(self.sphere, theta_list)\n\n # compare ac\n cac = self.oracle.compare(vec_a, vec_c)\n ccd = self.oracle.compare(vec_c, vec_d)\n cde = self.oracle.compare(vec_d, vec_e)\n ceb = self.oracle.compare(vec_e, vec_b)\n self.num_queries += 4\n\n if self.check_i:\n context = {\n \"theta_list\": theta_list,\n \"j\": j,\n \"theta_a\": theta_a,\n \"theta_b\": theta_b,\n \"theta_c\": theta_c,\n \"theta_d\": theta_d,\n \"theta_e\": theta_e,\n }\n self.check_inconsistency(cac, ccd, cde, ceb, context)\n\n if cac:\n theta_b = theta_d\n elif ccd:\n theta_b = theta_d\n elif cde:\n theta_a = theta_c\n theta_b = theta_e\n elif ceb:\n theta_a = theta_d\n else:\n theta_a = theta_d\n\n # update theta list\n theta_list[j] = (theta_a + theta_b) / 2\n\n # save theta list\n self.theta_list = theta_list\n return normalize(compute_vector(self.sphere, theta_list) - self.sphere.origin)", "def hemisphere_point(radius, inclination):\n if radius <= 0:\n raise AssertionError('Radius mast be grater than 0')\n\n alpha = np.random.rand() * pi*2\n r_small = radius*sin(radians(inclination))\n r = np.random.rand() * r_small\n\n # Find points on the sphere\n x = r * cos(alpha)\n y = r * sin(alpha)\n z = sqrt(radius**2 - x**2 - y**2)\n\n return x, y, z", "def center_of_mass(points):\n # break into many triangles\n # each point is part of two triangles\n cor = [sum(points) / len(points)]\n mass_points = []\n area = 0\n for i in range(len(points) - 1):\n triangle = cor + points[i:i + 2]\n # print(triangle)\n mass_points.append(build_triangle_point_mass(triangle))\n area += shoelace_area(triangle)\n # print(triangle, area)\n mass_points.append(build_triangle_point_mass(cor + [points[-1], points[0]]))\n area += shoelace_area(cor + [points[-1], points[0]])\n return Vector2D(*find_com(*zip(*mass_points))), area", "def rgb_sphere(n=128):\n sphere = np.zeros((n, n, 3), dtype=np.uint8)\n\n for x in range(n):\n xx = (n // 2 - x) / (n // 2)\n for y in range(n):\n yy = (n // 2 - y) / (n // 2)\n if xx**2 + yy**2 > 1:\n continue\n zz = np.sqrt(1 - xx**2 - yy**2)\n\n sphere[x, y, :] = _vec_to_rgb(xx, yy, zz)\n\n return sphere", "def test_points_on_1sphere_4y():\n points = generate.points_on_1sphere(4, 'y')\n assert np.allclose(points[0], cst.quat1)\n assert np.allclose(points[1], cst.quaty90)\n assert np.allclose(points[2], cst.quaty)", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def nearest_in_n_sphere(self, value, r):\n return self.nearest_in_bounding_box(value, r)\n \n # This seems right\n # return self.binary_search_find_nearest_neighbors_in_radius(value, r)\n \n # This seems wrong\n # return self.recur_find_nearest_n_neighbor(value, r)", "def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh", "def calcBasis2(xpts,basis_size,R):\n out = np.zeros((len(xpts),basis_size))\n for n in range(1,basis_size+1):\n out[:,n-1] = n*spherical_jn(0,n*np.pi*xpts/R)\n # Alturnatively\n #out[:,n-1] = (R/xpts)*np.sin(n*np.pi*xpts/R)\n return out", "def sph(grlat, elong, ht):\n\n # Initialize Variables\n global cth, sth, clg, slg, dif, radn, gl # common/obs/\n gn = 9.798277692\n ae = 6378140.0\n f = 0.00335281\n rm = 0.00344978\n dr = 0.01745329252\n\n clong = np.cos(elong * dr)\n slong = np.sin(elong * dr)\n # latitude difference\n dvert = f * (1.0 + 0.5 * f) * np.sin(2.0 * grlat * dr) - 0.5 * f * f * np.sin(\n 4.0 * grlat * dr\n )\n gcclat = (3.1415926535898 / 2.0) - (grlat * dr - dvert)\n cthet = np.cos(gcclat)\n sthet = np.sin(gcclat)\n # geocentric radius\n radn = 1 - f * (cthet ** 2) * (1 + 1.5 * f * (sthet ** 2))\n # formulae for g are from jeffreys, 4.022 and 4.023\n g = gn * (\n 1\n + f\n - 1.5 * rm\n + f * (f - (27 / 14) * rm)\n + (2.5 * rm - f - f * (f - (39 / 14) * rm)) * (cthet ** 2)\n - (f / 2) * (7 * f - 15.0 * rm) * ((cthet * sthet) ** 2)\n )\n # free air correction\n g = g - g * (2.0 * ht * (1.0 + f + rm - 2.0 * f * (cthet ** 2)) / ae)\n\n # Conversion Here for Globals\n cth = cthet\n sth = sthet\n clg = clong\n slg = slong\n dif = dvert\n gl = g", "def test_sphere(self):\n fun = get_problem('sphere', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def line_sphere(l, s):\n o = l.o\n l = l.d\n c = s.o\n r = s.r\n LO = dot(l,o)\n LC = dot(l,c)\n OC = dot(o,c)\n A = LO - LC\n AA = A*A\n\n LL = dot(l,l)\n OO = dot(o,o)\n CC = dot(c,c)\n RR = r*r\n\n B = OO + CC - RR - 2*OC\n\n C = LL\n\n tsqr = AA - C*B\n\n if tsqr < 0:\n return tuple()\n\n tsqr = sqrt(tsqr)\n k1 = (-A + tsqr)/LL\n k2 = (-A - tsqr)/LL\n\n return (l*k1+o, l*k2+o)", "def createSphere( position=(0,0,0), radius=1, colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20 ):\r\n return createEllipsoid( position, (radius,radius,radius), colour, samplesY, samplesXZ )", "def _integrate_sphere(self, rank: int) -> complex:\n assert isinstance(self.f, np.ndarray) # noqa: S101\n s_p = ssht.inverse(\n self.slepian.eigenvectors[rank],\n self.L,\n Method=sleplet._vars.SAMPLING_SCHEME,\n )\n weight = sleplet._integration_methods.calc_integration_weight(self.L)\n return sleplet._integration_methods.integrate_whole_sphere(\n weight,\n self.f,\n s_p.conj(),\n )", "def generate_point_inside_hypermoon(m_rel: int, c: Tuple[List[float], List[float]], r: Tuple[float, float]) \\\n -> List[float]:\n c_big, c_small = c\n r_big, r_small = r\n\n x = generate_point_inside_hypersphere(m_rel, c_big, r_big)\n\n while is_point_inside_hypersphere(x, c_small, r_small):\n x = generate_point_inside_hypersphere(m_rel, c_big, r_big)\n\n return x", "def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume", "def compute_s(\n traj,\n surface_normal_dim=2,\n pore_center = 0.0,\n max_distance = 1.0,\n bin_width=0.01\n ):\n # Make molecules whole first\n\n #remove the below task as molecules are not split\n #traj.make_molecules_whole(inplace=True)\n\n # Select ow and hw\n water_o = traj.top.select(\"name O1\")\n water_h = traj.top.select(\"name H1 H2\")\n traj_ow = traj.atom_slice(water_o)\n traj_hw = traj.atom_slice(water_h)\n\n # Compute angles between surface normal ([0,0,1]) and h-o-h bisector\n hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames,-1,2,3).mean(axis=2)\n vectors = (traj_ow.xyz - hw_midpoints)\n vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)\n cos_angles = vectors[:,:,surface_normal_dim]\n\n # Compute distances -- center of pore already @ 0,0; use OW position\n distances = traj_ow.xyz[:,:,surface_normal_dim] - pore_center\n bin_centers = []\n s_values = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width\n )\n s = (3.0 * np.mean(cos_angles[mask]**2) - 1.0) / 2.0\n bin_centers.append(bin_center)\n s_values.append(s)\n\n return bin_centers, s_values", "def generate_small_hypersphere(m_rel: int, max_r: float, min_r: float) -> Tuple[List[float], float]:\n c = [0] * m_rel\n r = (random.random() * (max_r - min_r)) + min_r\n\n for j in np.random.permutation(m_rel):\n bound = ((1 - r) ** 2) - np.sum(np.square(c))\n bound = math.sqrt(bound) if bound > 0 else 0\n max_c = bound\n min_c = -bound\n\n c[j] = (random.random() * (max_c - min_c)) + min_c\n\n return c, r", "def get_quad_points():\n points = np.array(\n [[0.333333333333333333333333333333, 0.333333333333333333333333333333],\n [0.950275662924105565450352089520, 0.024862168537947217274823955239],\n [0.024862168537947217274823955239, 0.950275662924105565450352089520],\n [0.024862168537947217274823955239, 0.024862168537947217274823955239],\n [0.171614914923835347556304795551, 0.414192542538082326221847602214],\n [0.414192542538082326221847602214, 0.171614914923835347556304795551],\n [0.414192542538082326221847602214, 0.414192542538082326221847602214],\n [0.539412243677190440263092985511, 0.230293878161404779868453507244],\n [0.230293878161404779868453507244, 0.539412243677190440263092985511],\n [0.230293878161404779868453507244, 0.230293878161404779868453507244],\n [0.772160036676532561750285570113, 0.113919981661733719124857214943],\n [0.113919981661733719124857214943, 0.772160036676532561750285570113],\n [0.113919981661733719124857214943, 0.113919981661733719124857214943],\n [0.009085399949835353883572964740, 0.495457300025082323058213517632],\n [0.495457300025082323058213517632, 0.009085399949835353883572964740],\n [0.495457300025082323058213517632, 0.495457300025082323058213517632],\n [0.062277290305886993497083640527, 0.468861354847056503251458179727],\n [0.468861354847056503251458179727, 0.062277290305886993497083640527],\n [0.468861354847056503251458179727, 0.468861354847056503251458179727],\n [0.022076289653624405142446876931, 0.851306504174348550389457672223],\n [0.022076289653624405142446876931, 0.126617206172027096933163647918],\n [0.851306504174348550389457672223, 0.022076289653624405142446876931],\n [0.851306504174348550389457672223, 0.126617206172027096933163647918],\n [0.126617206172027096933163647918, 0.022076289653624405142446876931],\n [0.126617206172027096933163647918, 0.851306504174348550389457672223],\n [0.018620522802520968955913511549, 0.689441970728591295496647976487],\n [0.018620522802520968955913511549, 0.291937506468887771754472382212],\n [0.689441970728591295496647976487, 0.018620522802520968955913511549],\n [0.689441970728591295496647976487, 0.291937506468887771754472382212],\n [0.291937506468887771754472382212, 0.018620522802520968955913511549],\n [0.291937506468887771754472382212, 0.689441970728591295496647976487],\n [0.096506481292159228736516560903, 0.635867859433872768286976979827],\n [0.096506481292159228736516560903, 0.267625659273967961282458816185],\n [0.635867859433872768286976979827, 0.096506481292159228736516560903],\n [0.635867859433872768286976979827, 0.267625659273967961282458816185],\n [0.267625659273967961282458816185, 0.096506481292159228736516560903],\n [0.267625659273967961282458816185, 0.635867859433872768286976979827]]);\n\n w = np.array(\n [0.051739766065744133555179145422,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190])*0.5;\n quad_x = np.copy(points[:,0])\n quad_y = np.copy(points[:,1])\n return (quad_x, quad_y, w)", "def spherical_project_array(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n x = np.atleast_1d(np.asarray(x))\n y = np.atleast_1d(np.asarray(y))\n cos_lat = np.atleast_1d(np.asarray(cos_lat))\n sin_lat = np.atleast_1d(np.asarray(sin_lat))\n celestial_pole_x = np.atleast_1d(np.asarray(celestial_pole_x))\n celestial_pole_y = np.atleast_1d(np.asarray(celestial_pole_y))\n celestial_cos_lat = np.atleast_1d(np.asarray(celestial_cos_lat))\n celestial_sin_lat = np.atleast_1d(np.asarray(celestial_sin_lat))\n native_pole_x = np.atleast_1d(np.asarray(native_pole_x))\n\n sizes = np.array([x.size, celestial_pole_x.size, native_pole_x.size])\n max_array = np.argmax(sizes)\n if max_array == 0:\n theta = np.empty_like(x, dtype=nb.float64)\n phi = np.empty_like(x, dtype=nb.float64)\n n = x.size\n else:\n theta = np.empty_like(celestial_pole_x, dtype=nb.float64)\n phi = np.empty_like(celestial_pole_x, dtype=nb.float64)\n n = celestial_pole_x.size\n\n singular_celestial = celestial_pole_x.size == 1\n singular_coordinate = x.size == 1\n singular_native = native_pole_x.size == 1\n\n for i in range(n):\n coord_i = 0 if singular_coordinate else i\n celes_i = 0 if singular_celestial else i\n nativ_i = 0 if singular_native else i\n\n theta[i], phi[i] = spherical_project(\n x=x[coord_i],\n y=y[coord_i],\n cos_lat=cos_lat[coord_i],\n sin_lat=sin_lat[coord_i],\n celestial_pole_x=celestial_pole_x[celes_i],\n celestial_pole_y=celestial_pole_y[celes_i],\n celestial_cos_lat=celestial_cos_lat[celes_i],\n celestial_sin_lat=celestial_sin_lat[celes_i],\n native_pole_x=native_pole_x[nativ_i])\n\n return theta, phi", "def fib_sphere_grid(npoints):\n\n phi = (1.0 + np.sqrt(5.0)) / 2.0\n\n i = np.arange(npoints, dtype=float)\n i2 = 2*i - (npoints-1)\n theta = (2.0*np.pi * i2/phi) % (2.*np.pi)\n sphi = i2/npoints\n phi = np.arccos(sphi)\n return theta, phi", "def tf(xp, yp, zp, spheres, inc, dec, pmag=None):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n tf = numpy.zeros_like(xp)\n # Calculate the 3 components of the unit vector in the direction of the\n # regional field\n fx, fy, fz = utils.dircos(inc, dec)\n if pmag is not None:\n if isinstance(pmag, float) or isinstance(pmag, int):\n pintensity = pmag\n pmx, pmy, pmz = fx, fy, fz\n else:\n pintensity = numpy.linalg.norm(pmag)\n pmx, pmy, pmz = numpy.array(pmag) / pintensity\n for sphere in spheres:\n if sphere is None or ('magnetization' not in sphere.props\n and pmag is None):\n continue\n radius = sphere.radius\n # Get the intensity and unit vector from the magnetization\n if pmag is None:\n mag = sphere.props['magnetization']\n if isinstance(mag, float) or isinstance(mag, int):\n intensity = mag\n mx, my, mz = fx, fy, fz\n else:\n intensity = numpy.linalg.norm(mag)\n mx, my, mz = numpy.array(mag) / intensity\n else:\n intensity = pintensity\n mx, my, mz = pmx, pmy, pmz\n # First thing to do is make the computation point P the origin of the\n # coordinate system\n x = sphere.x - xp\n y = sphere.y - yp\n z = sphere.z - zp\n # Calculate the 3 components of B\n dotprod = mx * x + my * y + mz * z\n r_sqr = x ** 2 + y ** 2 + z ** 2\n r5 = r_sqr ** (2.5)\n moment = intensity * (4. * numpy.pi * (radius ** 3) / 3.)\n bx = moment * (3 * dotprod * x - r_sqr * mx) / r5\n by = moment * (3 * dotprod * y - r_sqr * my) / r5\n bz = moment * (3 * dotprod * z - r_sqr * mz) / r5\n tf += (fx * bx + fy * by + fz * bz)\n tf *= CM * T2NT\n return tf", "def simpleSphere(precision):\n b = polyhedron([vertex(0, 0, 1)], [], [])\n rot1 = vector(math.pi / precision, 0, 0)\n for i in range(2 * precision + 1):\n b.rotateSweep(rot1, (0, 0, 0))\n rot2 = vector(0, 0, math.pi / precision)\n for j in range(precision):\n b.rotateSweep(rot2, (0, 0, 0))\n return b", "def spheres_and_cylinders(\n target,\n pore_diameter='pore.diameter',\n throat_diameter='throat.diameter'\n):\n from openpnm.models.geometry import conduit_lengths\n out = conduit_lengths.spheres_and_cylinders(\n target, pore_diameter=pore_diameter, throat_diameter=throat_diameter\n )\n return out[:, 1]", "def generate_point_inside_hypersphere(m_rel: int, c: List[float], r: float) -> List[float]:\n x = [0] * m_rel\n\n for j in np.random.permutation(m_rel):\n bound = r ** 2 - sum([(x_h - c_h) ** 2 if x_h != 0 else 0 for x_h, c_h in zip(x, c)])\n bound = math.sqrt(bound) if bound > 0 else 0\n\n max_x = c[j] + bound\n min_x = c[j] - bound\n\n x_i = (random.random() * (max_x - min_x)) + min_x\n\n if not (min_x <= x_i <= max_x):\n raise ValueError(\"This shouldn't happen!\")\n\n x[j] = x_i\n\n return x", "def create_spheric_poses(radius, n_poses=120):\n\n def spheric_pose(theta, phi, radius):\n trans_t = lambda t: np.array([\n [1, 0, 0, 0],\n [0, 1, 0, -0.9 * t],\n [0, 0, 1, t],\n [0, 0, 0, 1],\n ])\n\n rot_phi = lambda phi: np.array([\n [1, 0, 0, 0],\n [0, np.cos(phi), -np.sin(phi), 0],\n [0, np.sin(phi), np.cos(phi), 0],\n [0, 0, 0, 1],\n ])\n\n rot_theta = lambda th: np.array([\n [np.cos(th), 0, -np.sin(th), 0],\n [0, 1, 0, 0],\n [np.sin(th), 0, np.cos(th), 0],\n [0, 0, 0, 1],\n ])\n\n c2w = rot_theta(theta) @ rot_phi(phi) @ trans_t(radius)\n c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w\n return c2w[:3]\n\n spheric_poses = []\n for th in np.linspace(0, 2 * np.pi, n_poses + 1)[:-1]:\n spheric_poses += [spheric_pose(th, -np.pi / 5, radius)] # 36 degree view downwards\n return np.stack(spheric_poses, 0)", "def Calc_axe_spheroid(r,c):\n return np.sqrt((r**3)/c)", "def _cage_pts(xyz, neighbor_xyzs, sigma, neighbor_diameters, L, M, R):\n pts = rand_sphere(M) * R + xyz\n for nxyz, nsig in zip(neighbor_xyzs, neighbor_diameters):\n dpts = np.remainder(pts - nxyz + L / 2.0, L) - L / 2.0\n dists_sq = np.sum(dpts**2, axis=1)\n goodix = dists_sq >= ((nsig + sigma) / 2.0)**2\n pts = pts[goodix, :]\n return pts", "def __div__(self, i):\n s = Shape([])\n for p in self.pts:\n s.add_point(p.__div__(i))\n return s", "def spherical2cartesian(sphere):\n cart = np.zeros(sphere.shape, dtype=np.float64)\n sine_phi = np.sin(sphere[:, 2])\n\n cart[:, 0] = sphere[:, 0] * np.cos(sphere[:, 1]) * sine_phi\n cart[:, 1] = sphere[:, 0] * np.sin(sphere[:, 1]) * sine_phi\n cart[:, 2] = sphere[:, 0] * np.cos(sphere[:, 2])\n return cart", "def spherical_parallel_transport(p_from, p_to, v):\n assert p_from.shape == p_to.shape == v.shape\n axis = np.cross(p_from, p_to)\n axis = axis / (np.linalg.norm(axis, axis=-1, keepdims=True) + 1e-20)\n theta = np.arccos(np.sum(p_to * p_from, axis=1).clip(-1, 1))\n rot = so3_matrix_generator(axis, theta)\n v_transformed = np.einsum(\"nij,nj->ni\", rot, v)\n return v_transformed", "def _setup_grid_sphere(self, dk, kgrid, k0):\n kvec = defaultdict(list)\n kvec_centered = defaultdict(list)\n # With elongated box, we choose the smallest k0 component to setup the integer grid\n # This must be consistent with expo_grid() otherwise it wont find the vectors\n kmax = kgrid[-1] + dk[-1]\n kbin_max = 1 + int(kmax / min(k0))\n # TODO: it would be more elegant to define an iterator over ix, iy, iz for sphere, hemisphere, ... unless kmax is very high it might be more efficient to operate on a 3d grid to construct the vectors\n kmax_sq = kmax**2\n for ix in range(-kbin_max, kbin_max+1):\n for iy in range(-kbin_max, kbin_max+1):\n for iz in range(-kbin_max, kbin_max+1):\n # Slightly faster and more explicit than\n # ksq = sum([(x*y)**2 for x, y in zip(k0, [ix, iy, iz])])\n ksq = ((k0[0]*ix)**2 + (k0[1]*iy)**2 + (k0[2]*iz)**2)\n if ksq > kmax_sq:\n continue\n # beware: numpy.sqrt is x5 slower than math one!\n knorm = math.sqrt(ksq)\n # Look for a shell of vectors in which the vector could fit.\n # This expression is general and allows arbitrary k grids\n # However, searching for the shell like this is not fast\n # (it costs about as much as the above)\n for ki, dki in zip(kgrid, dk):\n if abs(knorm - ki) < dki:\n kvec[ki].append((ix+kbin_max, iy+kbin_max, iz+kbin_max))\n kvec_centered[ki].append((ix, iy, iz))\n break\n\n # if len(kvec.keys()) != len(kgrid):\n # _log.info('some k points could not be found')\n\n return kvec, kvec_centered" ]
[ "0.7038946", "0.6950157", "0.69140077", "0.68093324", "0.66389656", "0.6611303", "0.6572137", "0.648919", "0.6481571", "0.6474984", "0.64451575", "0.64319617", "0.6428179", "0.64160025", "0.640967", "0.6393354", "0.6356308", "0.633187", "0.62893623", "0.62585074", "0.6178017", "0.6119817", "0.6105972", "0.61018896", "0.60893345", "0.60574013", "0.6055691", "0.6050873", "0.6027095", "0.6026866", "0.59819955", "0.59625447", "0.59535307", "0.5945188", "0.5926182", "0.5909708", "0.58806854", "0.587988", "0.58792067", "0.58513534", "0.58505476", "0.58435005", "0.5821895", "0.58158916", "0.58101016", "0.58037543", "0.58000594", "0.5789639", "0.5769554", "0.5768311", "0.5755576", "0.5753241", "0.57443035", "0.5741403", "0.57173246", "0.5697288", "0.56951135", "0.569299", "0.56920534", "0.568567", "0.56855816", "0.56645817", "0.5658976", "0.56496143", "0.5648782", "0.5626796", "0.5616361", "0.56024534", "0.5601413", "0.56001806", "0.55949515", "0.55936855", "0.5589392", "0.55732846", "0.55617404", "0.5555588", "0.5528299", "0.552575", "0.5522737", "0.55189306", "0.5518045", "0.5516822", "0.5512334", "0.55053693", "0.5503061", "0.54989326", "0.5495886", "0.5488039", "0.54873496", "0.54804176", "0.54797804", "0.5478476", "0.5473312", "0.5459541", "0.54488266", "0.544684", "0.5446155", "0.5443352", "0.5439136", "0.54370147", "0.54337674" ]
0.0
-1
Convert spherical coordinates to Cartesian coordinates.
def sph2car(r, theta, phi): x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) return x, y, z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n return Cartesian\n else:\n # vr,vtheta,vphi -> vx,vy,vz\n vr = Spherical[:,3]\n vt = Spherical[:,4]\n vp = Spherical[:,5]\n vx = vr*st*cp - vt*ct*cp - vp*sp\n vy = vr*st*sp + vt*ct*sp + vp*cp\n vz = vr*ct - vt*st\n Cartesian= np.column_stack((x,y,z,vx,vy,vz))\n return Cartesian", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def spherical2cartesian(spherical):\n spherical = np.array(spherical).squeeze()\n distance, azimuth, elevation = spherical\n x = distance * np.sin(azimuth) * np.cos(elevation)\n y = distance * np.sin(azimuth) * np.sin(elevation)\n z = distance * np.cos(azimuth)\n return np.array([x, y, z])", "def spherical2cartesian(sphere):\n cart = np.zeros(sphere.shape, dtype=np.float64)\n sine_phi = np.sin(sphere[:, 2])\n\n cart[:, 0] = sphere[:, 0] * np.cos(sphere[:, 1]) * sine_phi\n cart[:, 1] = sphere[:, 0] * np.sin(sphere[:, 1]) * sine_phi\n cart[:, 2] = sphere[:, 0] * np.cos(sphere[:, 2])\n return cart", "def CartesianToSpherical(Cartesian):\n\n # x,y,z -> r,theta,phi\n x = Cartesian[:,0]\n y = Cartesian[:,1]\n z = Cartesian[:,2]\n r = np.sqrt(x*x + y*y + z*z)\n projR = np.sqrt(x*x + y*y)\n theta = np.arccos(z/r)\n phi = np.arctan2(y,x)\n theta[theta<0.] +=2.*np.pi\n \n if (len(Cartesian[0,:])==3):\n Spherical = np.column_stack((r,theta,phi))\n return Spherical\n else:\n # vx,vy,vz -> vr,vtheta,vphi\n vx = Cartesian[:,3]\n vy = Cartesian[:,4]\n vz = Cartesian[:,5]\n vr = (x*vx + y*vy + z*vz)/r\n vt = (z*vr - r*vz)/projR\n vp = r*np.sin(theta)*(vy*x-y*vx)/(projR*projR) \n Spherical = np.column_stack((r,theta,phi,vr,vt,vp))\n return Spherical", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def spherical2cartesian(phi, theta, depth):\n x = depth * np.sin(theta) * np.cos(phi)\n y = depth * np.cos(theta)\n z = depth * np.sin(theta) * np.sin(phi)\n\n return x, y, z", "def cartesian_to_spherical(x, y, z):\n import math\n\n xsq = x ** 2\n ysq = y ** 2\n zsq = z ** 2\n\n r = (xsq + ysq + zsq) ** 0.5\n s = (xsq + ysq) ** 0.5\n\n if np.isscalar(x) and np.isscalar(y) and np.isscalar(z):\n lon = math.atan2(y, x)\n lat = math.atan2(z, s)\n else:\n lon = np.arctan2(y, x)\n lat = np.arctan2(z, s)\n\n return r, lat, lon", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def spherical2cartesian(v):\n \n x = np.cos(v[0]) * np.cos(v[1]) \n y = np.cos(v[0]) * np.sin(v[1]) \n z = np.sin(v[0]) \n \n return [x,y,z]", "def sphericalToCartesian(magnitude, azimuthal, polar):\r\n azimuthal = azimuthal*math.pi/180.0\r\n polar = polar*math.pi/180.0\r\n xval = magnitude * math.sin(azimuthal) * math.cos(polar)\r\n yval = magnitude * math.sin(azimuthal) * math.sin(polar)\r\n zval = magnitude * math.cos(azimuthal)\r\n return [xval, yval, zval]", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def cartesian2spherical(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y, z = cartesian\n distance = np.linalg.norm(cartesian)\n azimuth = np.arccos(z / distance)\n elevation = np.arctan2(y, x) # Use arctan2 instead of arctan to get proper sign!\n return np.array([distance, azimuth, elevation])", "def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)", "def cartesian_to_spherical(self, v):\n x = Vector.x(v)\n y = Vector.y(v)\n z = Vector.z(v)\n r = Vector.length(v)\n phi = atan2(y, x)\n theta = acos(z / r)\n \n return [r, phi, theta]", "def cartesian2spherical(coords):\n sphere = np.zeros(coords.shape)\n xy_sq = coords[:, 0]**2 + coords[:, 1]**2\n sphere[:, 0] = np.sqrt(xy_sq + coords[:, 2]**2)\n sphere[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n sphere[:, 2] = np.arctan2(np.sqrt(xy_sq), coords[:, 2])\n return sphere", "def to_cartes(self):\n if self.__coordsys in (Polar, PhySpherical, MathSpherical):\n self.__coordsys = Cartesian if self.__coordsys == Polar else Cartesian_3\n self.update_coord(vct.rec(self.list_repr()))", "def _position_cartesian2spherical(pos):\n\n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(x**2+y**2+z**2) #radius position of each particle\n\n #define theta and take care of r=0 case\n theta=np.zeros(np.size(x))\n ind_zero=(r == 0.) #is there any point where radius is 0 ?\n theta= np.arccos(z/r) \n theta[ind_zero]=0.\n\n phi=np.arctan2(y,x)\n\n return np.dstack((r,theta,phi))[0]", "def convert_coords_cart_sphere(coords_cart):\n shape = coords_cart.shape\n coords = coords_cart.reshape(3,-1)\n\n lat, lon, alt = np.zeros_like(coords)\n for i in range(coords.shape[1]):\n p_rec = [coords[0, i], coords[1, i], coords[2, i]]\n p_lat = sp.spiceypy.reclat(p_rec)\n alt[i], lon[i], lat[i] = p_lat\n \n lat = lat*180/np.pi\n lon = lon*180/np.pi\n alt = alt - mars_r \n\n coords_sphere = np.array([lat, lon, alt]).reshape(shape)\n return coords_sphere", "def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi", "def cartesian2spherical(v):\n theta = np.arcsin(v[2]) \n phi = np.arctan2(v[1], v[0])\n \n return [theta, phi]", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def to_spherical(self):\n return cartesian_to_spherical(self.x, self.y, self.z)", "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def GalacticToCartesian(Galactic,SolarPosition): \n \n # l,b,s->x,y,z\n cl = np.cos(Galactic[:,0])\n sl = np.sin(Galactic[:,0])\n cb = np.cos(Galactic[:,1])\n sb = np.sin(Galactic[:,1])\n x = SolarPosition[0]-Galactic[:,2]*cb*cl\n y = Galactic[:,2]*cb*sl\n z = Galactic[:,2]*sb+SolarPosition[1]\n\n if(len(Galactic[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vlos,mu_lcos(b),mu_b -> vx,vy,vz\n vl = pm2vel*Galactic[:,2]*Galactic[:,4]\n vb = pm2vel*Galactic[:,2]*Galactic[:,5]\n tmp2 = cb*Galactic[:,3]-sb*vb\n vx = cl*tmp2-sl*vl+SolarPosition[2]\n vy = sl*tmp2+cl*vl+SolarPosition[3]\n vz = sb*Galactic[:,3]+cb*vb+SolarPosition[4]\n Cartesian = np.column_stack((x,y,z,-vx,vy,vz))\n \n return Cartesian", "def spherical_project(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n right_angle = np.pi / 2\n\n d_lon = x - celestial_pole_x\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n phi = native_pole_x + d_lon + np.pi\n theta = y\n else:\n phi = native_pole_x - d_lon\n theta = -y\n else:\n cos_d_lon = np.cos(d_lon)\n\n phi = native_pole_x + np.arctan2(\n -cos_lat * np.sin(d_lon),\n (sin_lat * celestial_cos_lat)\n - (cos_lat * celestial_sin_lat * cos_d_lon))\n\n theta = asin(\n (sin_lat * celestial_sin_lat)\n + (cos_lat * celestial_cos_lat * cos_d_lon))\n\n phi = np.fmod(phi, two_pi)\n\n return theta, phi", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def spherical_2_cartesian(r, phi, theta, units='degrees'):\n phi = np.copy(phi)\n theta = np.copy(theta)\n if units == 'degrees':\n phi, theta = np.deg2rad(phi), np.deg2rad(theta)\n elif units == 'radians':\n pass\n else:\n raise AssertionError(\"Unexpected value entered for 'units', only supports either degrees or radians\", units)\n x = r * np.cos(phi) * np.sin(theta)\n y = r * np.sin(phi) * np.sin(theta)\n z = r * np.cos(theta)\n return x, y, z", "def cart2spher(x: np.ndarray, y: np.ndarray,\n z: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n indexes = np.where((x == 0) & (y == 0))[0]\n if indexes.size:\n x[indexes] = np.nan\n y[indexes] = np.nan\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n if indexes.size:\n lon[indexes] = 0\n lat[indexes] = np.pi * 0.5 * np.sign(z[indexes])\n return np.degrees(lon), np.degrees(lat)", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def getCartesian(self, phi, theta, radius):\n point_x = round(sin(theta) * cos(phi) * radius,4)\n point_y = round(sin(theta) * sin(phi) * radius,4)\n point_z = round(cos(theta) * radius,4)\n return [point_x, point_y, point_z]", "def spheric2cart(theta, phi):\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n return x, y, z", "def _sphere2cart(xyz, axtheta=0, axphi=1, unit='rad'):\n # Get theta / phi :\n theta, phi = xyz[:, 0], xyz[:, 1]\n if unit is 'degree':\n np.deg2rad(theta, out=theta)\n np.deg2rad(phi, out=phi)\n # Get radius :\n r = np.sin(theta)\n # Get cartesian coordinates :\n np.multiply(np.cos(phi), r, out=xyz[:, 0])\n np.multiply(np.sin(phi), r, out=xyz[:, 1])\n np.cos(theta, xyz[:, 2])\n return xyz", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def cartesian2spherical(vector: tuple[float, float, float]) -> tuple[float, float, float]:\n x, y, z = vector\n r = m.sqrt(x**2 + y**2 + z**2)\n # acos returns the angle in radians between 0 and pi\n theta = m.degrees(m.acos(z / r))\n # atan2 returns the angle in radians between -pi and pi\n phi = m.degrees(m.atan2(y, x))\n # lets ensure the angle in degrees is always between 0 and 360, as SHIELD-HIT12A requires\n if phi < 0.:\n phi += 360.\n return theta, phi, r", "def centers_cartesian(self):\n polar_centers, azimuthal_centers = self.centers()\n x_centers, y_centers, z_centers = \\\n starwinds_magnetogram.coordinate_transforms.rectangular_coordinates_from_spherical(\n np.ones(polar_centers.shape),\n polar_centers,\n azimuthal_centers)\n\n return x_centers, y_centers, z_centers", "def from_cartesian(cls, cartesian):\n z = cartesian.z\n y = cartesian.y\n x = cartesian.x\n theta = math.asin(z / R_EARTH)\n phi = math.atan2(y, x)\n lat = math.degrees(theta)\n lon = math.degrees(phi)\n if lon < 0:\n lon += 360\n return cls(lat, lon)", "def spherical2cylindrical(sph):\n cyl = np.zeros(sph.shape)\n cyl[:, 0] = sph[:, 0] * np.sin(sph[:, 2])\n cyl[:, 1] = sph[:, 1]\n cyl[:, 2] = sph[:, 0] * np.cos(sph[:, 2])\n return cyl", "def _position_cartesian2cylindrical(pos):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n rho= np.sqrt(x**2+y**2)\n theta=np.arctan2(y,x)\n\n\n return np.dstack((rho,theta,z))[0]", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def polarCameraToCartesian(self):\n x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180)\n z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n self.cameraPosition = [x, y, z]", "def _polar_to_cartesian(self, radius: float, radians: float) -> None:\n self.x = round(radius * math.cos(radians), EPSILON_EXP_MINUS_1)\n self.y = round(radius * math.sin(radians), EPSILON_EXP_MINUS_1)", "def get_cartesian_coord(lat, lon, h):\n a = 6378137.0\n rf = 298.257223563\n lat_rad = radians(lat)\n lon_rad = radians(lon)\n N = sqrt(a / (1 - (1 - (1 - 1 / rf) ** 2) * (sin(lat_rad)) ** 2))\n X = (N + h) * cos(lat_rad) * cos(lon_rad)\n Y = (N + h) * cos(lat_rad) * sin(lon_rad)\n Z = ((1 - 1 / rf) ** 2 * N + h) * sin(lat_rad)\n return X, Y, Z", "def _position_cylindrical2cartesian(pos):\n \n rho=pos[:,0]\n theta=pos[:,1]\n z=pos[:,2]\n\n x=rho*np.cos(theta)\n y=rho*np.sin(theta)\n z=z\n\n return np.dstack((x,y,z))[0]", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def lon_lat_to_cartesian(lon, lat, R = 1):\n lon_r = np.radians(lon)\n lat_r = np.radians(lat)\n\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return x,y,z", "def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def spher2cart(lon: np.ndarray,\n lat: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n rlon = np.radians(lon)\n rlat = np.radians(lat)\n x = np.cos(rlon) * np.cos(rlat)\n y = np.sin(rlon) * np.cos(rlat)\n z = np.sin(rlat)\n return x, y, z", "def _spherical_to_cartesian_fast(ra, dec, threads):\n import numexpr as ne\n\n #nthreads = ne.detect_number_of_cores()\n nthreads = threads\n ne.set_num_threads(nthreads)\n\n pi = math.pi\n rar = ne.evaluate('ra*pi/180.0')\n decr = ne.evaluate('dec*pi/180.0')\n\n hold1=ne.evaluate('cos(decr)') \n\n x = ne.evaluate('cos(rar) * hold1')\n y = ne.evaluate('sin(rar) * hold1')\n z = ne.evaluate('sin(decr)')\n \n return x, y, z", "def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]", "def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def cartesianToSpherical(xComp, yComp, zComp, negateMagnitude=False, \r\n tolerance=1E-10):\r\n ans = None\r\n mag = math.sqrt(xComp*xComp + yComp*yComp + zComp*zComp)\r\n if mag < tolerance:\r\n ans = [0.0, 0.0, 0.0]\r\n\r\n proj2 = xComp*xComp + yComp*yComp\r\n if ans is None and proj2 < tolerance:\r\n ans = [mag, 0.0, 0.0]\r\n elif abs(zComp) < tolerance:\r\n if abs(xComp) < tolerance:\r\n ans = [mag, 90.0, 90.0]\r\n if abs(yComp) < tolerance:\r\n ans = [mag, 90.0, 0.0]\r\n else:\r\n ans = [mag, 90.0, math.acos(xComp/mag)*_CONV]\r\n else:\r\n azimuth = math.acos(zComp/mag)\r\n ans = [mag, azimuth*_CONV, \r\n math.acos(xComp/(mag*math.sin(azimuth)))*_CONV]\r\n \r\n if negateMagnitude:\r\n ans = [-1*ans[0], 180+ans[1], ans[2]]\r\n return ans", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def spherical_deproject(phi, theta,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat,\n native_pole_x): # pragma: no cover\n\n d_phi = phi - native_pole_x\n right_angle = np.pi / 2\n\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n cx = celestial_pole_x + d_phi - np.pi\n cy = theta\n else:\n cx = celestial_pole_x - d_phi\n cy = -theta\n\n else:\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n cos_d_phi = np.cos(d_phi)\n cx = celestial_pole_x + np.arctan2(\n -cos_theta * np.sin(d_phi),\n ((sin_theta * celestial_cos_lat)\n - (cos_theta * celestial_sin_lat * cos_d_phi)))\n cy = asin(\n (sin_theta * celestial_sin_lat)\n + (cos_theta * celestial_cos_lat * cos_d_phi))\n\n return cx, cy", "def spherical_to_cartesian(grid, vec=None):\n grid = np.atleast_2d(grid)\n\n if vec is None:\n return np.hstack([\n mkvc(grid[:, 0] * np.sin(grid[:, 2]) * np.cos(grid[:, 1]), 2),\n mkvc(grid[:, 0] * np.sin(grid[:, 2]) * np.sin(grid[:, 1]), 2),\n mkvc(grid[:, 0] * np.cos(grid[:, 2]), 2)\n ])\n\n if len(vec.shape) == 1 or vec.shape[1] == 1:\n vec = vec.reshape(grid.shape, order='F')\n\n x = (\n vec[:, 0] * np.sin(grid[:, 2]) * np.cos(grid[:, 1]) +\n vec[:, 2] * np.cos(grid[:, 2]) * np.cos(grid[:, 1]) -\n vec[:, 1] * np.sin(grid[:, 1])\n )\n y = (\n vec[:, 0] * np.sin(grid[:, 2]) * np.sin(grid[:, 1]) +\n vec[:, 2] * np.cos(grid[:, 2]) * np.sin(grid[:, 1]) -\n vec[:, 1] * np.cos(grid[:, 1])\n )\n z = (\n vec[:, 0] * np.cos(grid[:, 2]) -\n vec[:, 2] * np.sin(grid[:, 2])\n )\n\n newvec = [x, y, z]\n\n return np.vstack(newvec).T", "def spherical_2_cartesian(grid, vec=None):\n return spherical_to_cartesian(grid, vec)", "def polar_to_cartesian(dist, theta, phi):\n z = np.cos(phi)\n s = np.sin(phi)\n x = s * np.cos(theta)\n y = s * np.sin(theta)\n return np.stack((x, y, z), axis=-1) * np.expand_dims(dist, axis=-1)", "def spherical_distances(x, y):\n # Compute the norms of all points, we do NOT check they actually all lie on\n # the same sphere (that's the caller's responsibility).\n \n xn = np.sqrt((x**2).sum(axis=1))\n yn = np.sqrt((y**2).sum(axis=1))\n ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :])\n # Protect against numerical noise giving us cosine values outside the -1,1\n # range, where arccos would return nans.\n ang_cos = np.clip(ang_cos, -1, 1)\n\n return xn[:, None]*np.arccos(ang_cos)", "def cylindrical2spherical(cyl):\n sph = np.zeros(cyl.shape)\n sph[:, 0] = np.sqrt(cyl[:, 0]**2 + cyl[:, 2]**2)\n sph[:, 1] = cyl[:, 1]\n sph[:, 2] = np.arctan2(cyl[:, 0], cyl[:, 2])\n return sph", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)", "def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y", "def spherical_project_array(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n x = np.atleast_1d(np.asarray(x))\n y = np.atleast_1d(np.asarray(y))\n cos_lat = np.atleast_1d(np.asarray(cos_lat))\n sin_lat = np.atleast_1d(np.asarray(sin_lat))\n celestial_pole_x = np.atleast_1d(np.asarray(celestial_pole_x))\n celestial_pole_y = np.atleast_1d(np.asarray(celestial_pole_y))\n celestial_cos_lat = np.atleast_1d(np.asarray(celestial_cos_lat))\n celestial_sin_lat = np.atleast_1d(np.asarray(celestial_sin_lat))\n native_pole_x = np.atleast_1d(np.asarray(native_pole_x))\n\n sizes = np.array([x.size, celestial_pole_x.size, native_pole_x.size])\n max_array = np.argmax(sizes)\n if max_array == 0:\n theta = np.empty_like(x, dtype=nb.float64)\n phi = np.empty_like(x, dtype=nb.float64)\n n = x.size\n else:\n theta = np.empty_like(celestial_pole_x, dtype=nb.float64)\n phi = np.empty_like(celestial_pole_x, dtype=nb.float64)\n n = celestial_pole_x.size\n\n singular_celestial = celestial_pole_x.size == 1\n singular_coordinate = x.size == 1\n singular_native = native_pole_x.size == 1\n\n for i in range(n):\n coord_i = 0 if singular_coordinate else i\n celes_i = 0 if singular_celestial else i\n nativ_i = 0 if singular_native else i\n\n theta[i], phi[i] = spherical_project(\n x=x[coord_i],\n y=y[coord_i],\n cos_lat=cos_lat[coord_i],\n sin_lat=sin_lat[coord_i],\n celestial_pole_x=celestial_pole_x[celes_i],\n celestial_pole_y=celestial_pole_y[celes_i],\n celestial_cos_lat=celestial_cos_lat[celes_i],\n celestial_sin_lat=celestial_sin_lat[celes_i],\n native_pole_x=native_pole_x[nativ_i])\n\n return theta, phi", "def convert_to_cartesian(grid: List[Tuple[float, float]], radius: float = 1.0) -> List[Tuple[float, float, float]]:\n\n # conversion radians -> degrees\n r2d = 180.0 / np.pi\n\n # calculate x/y/z coordinates, assuming r=1\n return [\n (\n radius * np.cos(lat / r2d) * np.cos(lon / r2d),\n radius * np.cos(lat / r2d) * np.sin(lon / r2d),\n radius * np.sin(lat / r2d),\n )\n for lon, lat in grid\n ]", "def make_cartesian(r: float, phi: float):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def cartesian2cylindrical(coords):\n cyl = np.zeros(coords.shape)\n cyl[:, 0] = np.sqrt(coords[:, 0] ** 2 + coords[:, 1] ** 2)\n cyl[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n cyl[:, 2] = coords[:, 2]\n return cyl", "def _velocity_cartesian2spherical(pos,vel):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n #save cartesian velocities\n vx=vel[:,0]\n vy=vel[:,1]\n vz=vel[:,2]\n\n #convert to spherical coordinates\n pos_sph=_position_cartesian2spherical(pos) #spherical coordinates\n r=pos_sph[:,0]\n theta=pos_sph[:,1]\n phi=pos_sph[:,2]\n\n\n #compute spherical velocities\n vr = vx*np.sin(theta)*np.cos(phi) + vy*np.sin(theta)*np.sin(phi) + vz*np.cos(theta)\n vtheta = vx*np.cos(theta)*np.cos(phi) + vy*np.cos(theta)*np.sin(phi) - vz*np.sin(theta)\n vphi = -vx*np.sin(phi) + vy*np.cos(phi)\n\n if np.sum(r==0)!=0: #if some points are at the origin\n warnings.warn(\"Spherical velocity is not defined at origin. Returning 0.\")\n vr[r==0]=0\n vtheta[r==0]=0\n vphi[r==0]=0\n\n\n return np.dstack((vr,vtheta,vphi))[0]", "def _geodetic_to_cartesian(cls, lat, lon, alt):\n C = Earth.r / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n S = Earth.r * (1 - Earth.e ** 2) / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n r_d = (C + alt) * np.cos(lat)\n r_k = (S + alt) * np.sin(lat)\n\n norm = np.sqrt(r_d ** 2 + r_k ** 2)\n return norm * np.array(\n [np.cos(lat) * np.cos(lon), np.cos(lat) * np.sin(lon), np.sin(lat)]\n )", "def CartesianToGalactic(Cartesian,SolarPosition): \n\t \n # x,y,z->l,b,s\n tmp1 = SolarPosition[0]-Cartesian[:,0]\n tmp2 = Cartesian[:,1]\n tmp3 = Cartesian[:,2]-SolarPosition[1]\n s = np.sqrt(tmp1*tmp1+tmp2*tmp2+tmp3*tmp3)\n l = np.arctan2(tmp2,tmp1)\n b = np.arcsin(tmp3/s)\n l[l<0.] += 2.*np.pi; \n\n if(len(Cartesian[0,:])==3):\n Galactic = np.column_stack((l,b,s))\n else:\n \t # vx,vy,vz -> vlos,mu_lcos(b),mu_b\n vx = -Cartesian[:,3]-SolarPosition[2]\n vy = Cartesian[:,4]-SolarPosition[3]\n vz = Cartesian[:,5]-SolarPosition[4]\n cl = np.cos(l)\n sl = np.sin(l)\n cb = np.cos(b)\n sb = np.sin(b)\n vlos = vx*cl*cb+vy*sl*cb+vz*sb;\n mul = (-vx*sl+vy*cl)/(pm2vel*s)\n mub = (-vx*cl*sb-vy*sl*sb+vz*cb)/(pm2vel*s)\n Galactic = np.column_stack((l,b,s,vlos,mul,mub))\n \n return Galactic", "def geo2Cartesian(lat, lon, h, julian_date):\n\n lat_rad = np.radians(lat)\n lon_rad = np.radians(lon)\n\n # Calculate ECEF coordinates\n ecef_x, ecef_y, ecef_z = latLonAlt2ECEF(lat_rad, lon_rad, h)\n\n\n # Get Local Sidreal Time\n LST_rad = math.radians(JD2LST(julian_date, np.degrees(lon_rad))[0])\n\n\n # Calculate the Earth radius at given latitude\n Rh = math.sqrt(ecef_x**2 + ecef_y**2 + ecef_z**2)\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid)\n lat_geocentric = math.atan2(ecef_z, math.sqrt(ecef_x**2 + ecef_y**2))\n\n # Calculate Cartesian ECI coordinates (in meters), in the epoch of date\n x = Rh*np.cos(lat_geocentric)*np.cos(LST_rad)\n y = Rh*np.cos(lat_geocentric)*np.sin(LST_rad)\n z = Rh*np.sin(lat_geocentric)\n\n return x, y, z", "def cylindrical2cartesian(cylinder):\n cart = np.zeros(cylinder.shape)\n cart[:, 0] = cylinder[:, 0] * np.cos(cylinder[:, 1])\n cart[:, 1] = cylinder[:, 0] * np.sin(cylinder[:, 1])\n cart[:, 2] = cylinder[:, 2]\n return cart", "def cartesian_to_lon_lat(x, y, z, R = 1):\n lon = np.degrees(np.arctan2(y,x))\n lat = np.degrees(np.pi/2-np.arctan2((x**2+y**2)**0.5,z))\n\n return lon,lat", "def sph2cart(az, el, r):\n \n rcos_theta = r * np.cos(el)\n x = rcos_theta * np.cos(az)\n y = rcos_theta * np.sin(az)\n z = r * np.sin(el)\n \n return (x, y, z)", "def cosines_to_global(self):\n r = Rotation.from_matrix(self.R2global())\n a, b, g = r.as_euler('xyz', degrees=False)\n return np.cos(a), np.cos(b), np.cos(g)", "def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta", "def sph2cart(ra, dec):\n \n x = np.cos(ra) * np.cos(dec)\n y = np.sin(ra) * np.cos(dec)\n z = np.sin(dec)\n \n return (x, y, z)", "def cart2spher(vectors, axis_order=[0, 1, 2]):\n\n # print axis_order\n vectors = np.asarray(vectors)\n if vectors.shape[0] != 3:\n import ipdb\n\n ipdb.set_trace()\n raise ValueError(\n \"Expected vector shape is [3, N], actual shape is \" + str(vectors.shape)\n ) # , 'foo', 'bar', 'baz')\n # radius distance\n radius = np.linalg.norm(vectors, axis=0)\n normalized = vectors / radius\n\n # polar angle\n theta = np.arccos(normalized[axis_order[2]])\n # azimuth\n phi = np.arctan2(normalized[axis_order[1]], normalized[axis_order[0]])\n return np.asarray([radius, theta, phi])", "def polar_to_cartesian(radius, angle_deg):\n\n theta = np.deg2rad(angle_deg)\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return(x, y)", "def PolarToCartesian(Polar):\n\t \n # R,phi,z -> x,y,z\n cp = np.cos(Polar[:,1])\n sp = np.sin(Polar[:,1])\n x = Polar[:,0] * cp\n y = Polar[:,0] * sp\n z = Polar[:,2]\n\n if (len(Polar[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vR,vphi,vz -> vx,vy,vz\n vx = Polar[:,3]*cp-Polar[:,4]*sp\n vy = Polar[:,4]*cp+Polar[:,3]*sp\n vz = Polar[:,5]\n Cartesian = np.column_stack((x,y,z,vx,vy,vz))\n \n return Cartesian", "def spherical_to_xyz(self, angles: np.ndarray) -> np.ndarray:\n # https://en.wikipedia.org/wiki/Spherical_coordinate_system\n azimuth_iso = (np.pi / 2 - angles[:, 0] * np.pi / 180) % (2 * np.pi)\n altitude_iso = (np.pi / 2 - angles[:, 1] * np.pi / 180) % (2 * np.pi)\n xyz = np.column_stack(\n (\n np.sin(altitude_iso) * np.cos(azimuth_iso),\n np.sin(altitude_iso) * np.sin(azimuth_iso),\n np.cos(altitude_iso),\n )\n )\n if angles.shape[1] > 2:\n xyz *= angles[:, 2:3]\n xyz += self.xyz\n return xyz", "def cart2sph(x: float, y: float, z: float) -> typing.Tuple[float, float, float]:\n hxy = hypot(x, y)\n r = hypot(hxy, z)\n el = atan2(z, hxy)\n az = atan2(y, x)\n return az, el, r", "def cartesian(self):\n raise NotImplementedError(\"This is not implemented.\")\n return CartCoord()", "def cartesian_to_spherical(grid, vec=None):\n\n grid = np.atleast_2d(grid)\n\n if vec is None:\n return np.hstack([\n mkvc(np.sqrt(grid[:, 0]**2 + grid[:, 1]**2 + grid[:, 2]**2), 2),\n mkvc(np.arctan2(grid[:, 1], grid[:, 0]), 2),\n mkvc(\n np.arctan2(np.sqrt(grid[:, 0]**2 + grid[:, 1]**2), grid[:, 2]),\n 2\n ),\n ])\n\n if len(vec.shape) == 1 or vec.shape[1] == 1:\n vec = vec.reshape(grid.shape, order='F')\n\n theta = np.arctan2(grid[:, 1], grid[:, 0])\n phi = np.arctan2(np.sqrt(grid[:, 0]**2 + grid[:, 1]**2), grid[:, 2])\n\n r = (\n vec[:, 0] * np.sin(phi) * np.cos(theta) +\n vec[:, 1] * np.sin(phi) * np.sin(theta) +\n vec[:, 2] * np.cos(phi)\n )\n\n theta = - vec[:, 0] * np.sin(theta) + vec[:, 1] * np.cos(theta)\n\n phi = (\n vec[:, 0] * np.cos(phi) * np.cos(theta) +\n vec[:, 1] * np.cos(phi) * np.sin(theta) -\n vec[:, 2] * np.sin(phi)\n )\n\n newvec = [r, theta, phi]\n\n return np.vstack(newvec).T", "def CartesianToPolar(Cartesian):\n \n # x,y,z -> R,phi,z\n R = np.sqrt(Cartesian[:,0]*Cartesian[:,0]+Cartesian[:,1]*Cartesian[:,1])\n phi = np.arctan2(Cartesian[:,1],Cartesian[:,0])\n z = Cartesian[:,2]\n phi[phi<0.] += 2.*np.pi\n if (len(Cartesian[0,:])==3):\n Polar = np.column_stack((R,phi,z))\n else:\n # vx,vy,vz -> vR,vphi,vz\n cp = np.cos(phi)\n sp = np.sin(phi)\n vR = Cartesian[:,3]*cp+Cartesian[:,4]*sp\n vphi = Cartesian[:,4]*cp-Cartesian[:,3]*sp\n vz = Cartesian[:,5]\n Polar = np.column_stack((R,phi,z,vR,vphi,vz))\n\t\t\n return Polar", "def _get_polar_sky_coords(self, x0, y0):\n x_sky, y_sky = self._get_cart_sky_coords(x0, y0)\n return np.hypot(y_sky, x_sky), np.arctan2(x_sky, y_sky)", "def cartesian_2_spherical(grid, vec=None):\n return cartesian_to_spherical(grid, vec)", "def get_spherical_coordinates(xyz: numpy.array) -> Tuple[float, float, float]:\n r = numpy.linalg.norm(xyz)\n if 0 == r:\n return (0, 0, 0)\n azimuth = _get_azimuth(xyz[0], xyz[1])\n polar_angle = numpy.arccos(xyz[2] / r)\n\n return (r, azimuth, polar_angle)", "def coords_on_spherical_earth(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['coord_x_earth'] = 6371.009 * self.df_attributes['coord_x']\n self.df_attributes['coord_y_earth'] = 6371.009 * self.df_attributes['coord_y']\n self.df_attributes['coord_z_earth'] = 6371.009 * self.df_attributes['coord_z']", "def sph2cart(az: float, el: float, r: float) -> typing.Tuple[float, float, float]:\n rcos_theta = r * cos(el)\n x = rcos_theta * cos(az)\n y = rcos_theta * sin(az)\n z = r * sin(el)\n return x, y, z", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list" ]
[ "0.79381543", "0.77366406", "0.77325046", "0.76741", "0.7667967", "0.76263297", "0.7475714", "0.7456057", "0.7433543", "0.7418165", "0.7412523", "0.736814", "0.7343558", "0.73400825", "0.73142827", "0.7298736", "0.7206924", "0.7128369", "0.707744", "0.6962617", "0.6935745", "0.689513", "0.68795913", "0.6834517", "0.6778356", "0.6744523", "0.67366874", "0.6723968", "0.6720716", "0.67051744", "0.66944355", "0.669141", "0.6686916", "0.6675721", "0.66713834", "0.6642371", "0.6622078", "0.6616531", "0.6596915", "0.65914094", "0.653922", "0.6538302", "0.6527214", "0.6519616", "0.65153164", "0.65031636", "0.6482265", "0.6481671", "0.6466874", "0.64655656", "0.6462375", "0.64616406", "0.6459616", "0.64574593", "0.6435751", "0.6421486", "0.6411226", "0.63999903", "0.63874084", "0.63732755", "0.63729733", "0.63699794", "0.6362064", "0.6360433", "0.6356692", "0.63555074", "0.633757", "0.63107294", "0.6306226", "0.62675697", "0.6262779", "0.623818", "0.62378687", "0.6224496", "0.6202333", "0.61581826", "0.61276406", "0.6115715", "0.6105221", "0.6100913", "0.6090031", "0.6064157", "0.6042625", "0.60141927", "0.6010686", "0.60100675", "0.6007513", "0.6006759", "0.60029656", "0.60025704", "0.5977068", "0.5957461", "0.59543544", "0.5953885", "0.59485406", "0.5941644", "0.5915133", "0.5895352", "0.5889556", "0.5881314" ]
0.5963385
91
Sample the pODF using a rejection technique.
def sample_pODF(nsamples,qpoints,coefs,N): points = np.zeros((nsamples,4)) #Maximum of pODF C = ( (N + 1.0)**2 / (4.0 * np.pi) ) * coefs.sum() number_of_samples = 0 while number_of_samples < nsamples: #Random sample on the sphere rphi = np.random.uniform( 0.0, 2.0*np.pi) rmu = np.random.uniform(-1.0, 1.0) rsin_theta = np.sqrt(1.0 - rmu**2) x,y,z = rsin_theta * np.cos(rphi), rsin_theta * np.sin(rphi), rmu f = np.abs(even_pODF(np.array([x,y,z]),qpoints,coefs,N)) #Uniform random used for rejection rho = np.random.uniform(0.0, 1.0) if C*rho < f: #Accept random point points[number_of_samples,:] = np.array([x,y,z,f/C]) number_of_samples += 1 return points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_does_not_sample_twice_ppswor(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n s.process(\"a\", 1)", "def rejection_sample(self, trial_count):\n count = 0\n valid_trial_count = 1\n\n for i in xrange(trial_count):\n values = {}\n\n valid_sample = True\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n values[letter] = self.sample(prob)\n\n if letter in self.query.evidence:\n if (self.query.evidence[letter] != values[letter]):\n valid_sample = False\n break\n\n if valid_sample:\n valid_trial_count += 1\n\n if values[self.query.variable]:\n count += 1\n\n return float(count) / valid_trial_count", "def test_rejection_sampling():\n # Check that it works with a numpy array\n original_samples = np.random.uniform(0, 10, (n_samples, n_params))\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n # new_samples should have less samples than what we started with originally\n assert len(new_samples) <= n_samples\n # Each sample should be in the original posterior table\n assert all(new_sample in original_samples for new_sample in new_samples)\n # Each sample should be unique\n unique = np.unique(new_samples, axis=0)\n assert len(unique) == len(new_samples)\n\n # Now check that it works as expected for the\n # pesummary.utils.samples_dict.SamplesDict object\n original_samples = SamplesDict(\n {param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}\n )\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert new_samples.parameters == original_samples.parameters\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )", "def test_does_not_sample_negligible_weight_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n s.process(\n \"a\",\n math.log(\n FAILURE_PROBABILITY_INVERSE / (FAILURE_PROBABILITY_INVERSE - 1),\n math.e))\n self.assertEmpty(s.elements)", "def sample_response(self, slate_p):\n slate_p[slate_p >= 0.5] = 1.0\n slate_p[slate_p < 0.5] = 0.0\n# m = Bernoulli(slate_p)\n# return m.sample()\n return slate_p", "def rejection_sampling(data, weights):\n weights = np.asarray(weights)\n idx = weights > np.random.uniform(0, np.max(weights), len(weights))\n logger.info(\n \"Rejection sampling resulted in {} samples ({} input)\".format(\n idx.sum(), len(idx)\n )\n )\n return data[idx]", "def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p", "def rejection_sampling(target_density, proposal_density,\n generate_proposal_samples, envelope_factor,\n num_vars, num_samples, verbose=False,\n batch_size=None):\n if batch_size is None:\n batch_size = num_samples\n\n cntr = 0\n num_proposal_samples = 0\n samples = np.empty((num_vars, num_samples), dtype=float)\n while cntr < num_samples:\n proposal_samples = generate_proposal_samples(batch_size)\n target_density_vals = target_density(proposal_samples)\n proposal_density_vals = proposal_density(proposal_samples)\n assert target_density_vals.shape[0] == batch_size\n assert proposal_density_vals.shape[0] == batch_size\n urand = np.random.uniform(0., 1., (batch_size))\n\n # ensure envelop_factor is large enough\n if np.any(target_density_vals > (envelope_factor*proposal_density_vals)):\n I = np.argmax(\n target_density_vals/(envelope_factor*proposal_density_vals))\n msg = 'proposal_density*envelop factor does not bound target '\n msg += 'density: %f,%f' % (\n target_density_vals[I],\n (envelope_factor*proposal_density_vals)[I])\n raise ValueError(msg)\n\n I = np.where(\n urand < target_density_vals/(envelope_factor*proposal_density_vals))[0]\n\n num_batch_samples_accepted = min(I.shape[0], num_samples-cntr)\n I = I[:num_batch_samples_accepted]\n samples[:, cntr:cntr+num_batch_samples_accepted] = proposal_samples[:, I]\n cntr += num_batch_samples_accepted\n num_proposal_samples += batch_size\n\n if verbose:\n print(('num accepted', num_samples))\n print(('num rejected', num_proposal_samples-num_samples))\n print(('inverse envelope factor', 1/envelope_factor))\n print(('acceptance probability', float(\n num_samples)/float(num_proposal_samples)))\n return samples", "def test_oss_sample_wt_fit():\n\n # Create the object\n oss = OneSidedSelection(random_state=RND_SEED)\n assert_raises(RuntimeError, oss.sample, X, Y)", "def test_reject_proposal_demand(self):\n pass", "def test_oss_sample_wrong_X():\n\n # Create the object\n oss = OneSidedSelection(random_state=RND_SEED)\n oss.fit(X, Y)\n assert_raises(RuntimeError, oss.sample, np.random.random((100, 40)),\n np.array([0] * 50 + [1] * 50))", "def sample(self, policy, condition, save=True, noisy=True, reset_cond=None, **kwargs):\n pass", "def posterior_sample(self):\n pass", "def accept_reject_sample(prob: Callable, n: int, limits: Space,\n sample_and_weights_factory: Callable = UniformSampleAndWeights,\n dtype=ztypes.float, prob_max: Union[None, int] = None,\n efficiency_estimation: float = 1.0) -> tf.Tensor:\n multiple_limits = limits.n_limits > 1\n\n # if limits.n_limits == 1:\n # lower, upper = limits.limits\n # lower = ztf.convert_to_tensor(lower[0], dtype=dtype)\n # upper = ztf.convert_to_tensor(upper[0], dtype=dtype)\n\n sample_and_weights = sample_and_weights_factory()\n\n n = tf.to_int64(n)\n\n def enough_produced(n, sample, n_total_drawn, eff):\n return tf.greater(n, tf.shape(sample, out_type=tf.int64)[0])\n\n def sample_body(n, sample, n_total_drawn=0, eff=1.0):\n if sample is None:\n n_to_produce = n\n else:\n n_to_produce = n - tf.shape(sample, out_type=tf.int64)[0]\n do_print = settings.get_verbosity() > 5\n if do_print:\n print_op = tf.print(\"Number of samples to produce:\", n_to_produce, \" with efficiency \", eff)\n with tf.control_dependencies([print_op] if do_print else []):\n n_to_produce = tf.to_int64(ztf.to_real(n_to_produce) / eff * 1.01) + 100 # just to make sure\n # TODO: adjustable efficiency cap for memory efficiency (prevent too many samples at once produced)\n n_to_produce = tf.minimum(n_to_produce, tf.to_int64(5e5)) # introduce a cap to force serial\n\n rnd_sample, thresholds_unscaled, weights, weights_max, n_drawn = sample_and_weights(n_to_produce=n_to_produce,\n limits=limits,\n dtype=dtype)\n\n # if n_produced is None:\n # raise ShapeIncompatibleError(\"`sample_and_weights` has to return thresholds with a defined shape.\"\n # \"Use `Tensor.set_shape()` if the automatic propagation of the shape \"\n # \"is not available.\")\n n_total_drawn += n_drawn\n n_total_drawn = tf.to_int64(n_total_drawn)\n\n probabilities = prob(rnd_sample)\n if prob_max is None: # TODO(performance): estimate prob_max, after enough estimations -> fix it?\n # TODO(Mayou36): This control dependency is needed because otherwise the max won't be determined\n # correctly. A bug report on will be filled (WIP).\n # The behavior is very odd: if we do not force a kind of copy, the `reduce_max` returns\n # a value smaller by a factor of 1e-14\n # with tf.control_dependencies([probabilities]):\n # UPDATE: this works now? Was it just a one-time bug?\n prob_max_inferred = tf.reduce_max(probabilities)\n else:\n prob_max_inferred = prob_max\n\n if weights_max is None:\n weights_max = tf.reduce_max(weights) * 0.99 # safety margin, also taking numericals into account\n\n weights_scaled = prob_max_inferred / weights_max * weights\n random_thresholds = thresholds_unscaled * weights_scaled\n if run.numeric_checks:\n assert_op = [tf.assert_greater_equal(x=weights_scaled, y=probabilities,\n message=\"Not all weights are >= probs so the sampling \"\n \"will be biased. If a custom `sample_and_weights` \"\n \"was used, make sure that either the shape of the \"\n \"custom sampler (resp. it's weights) overlap better \"\n \"or decrease the `max_weight`\")]\n else:\n assert_op = []\n with tf.control_dependencies(assert_op):\n take_or_not = probabilities > random_thresholds\n # rnd_sample = tf.expand_dims(rnd_sample, dim=0) if len(rnd_sample.shape) == 1 else rnd_sample\n take_or_not = take_or_not[0] if len(take_or_not.shape) == 2 else take_or_not\n filtered_sample = tf.boolean_mask(rnd_sample, mask=take_or_not, axis=0)\n\n if sample is None:\n sample = filtered_sample\n else:\n sample = tf.concat([sample, filtered_sample], axis=0)\n\n # efficiency (estimate) of how many samples we get\n eff = ztf.to_real(tf.shape(sample, out_type=tf.int64)[1]) / ztf.to_real(n_total_drawn)\n return n, sample, n_total_drawn, eff\n\n # TODO(Mayou36): refactor, remove initial call\n sample = tf.while_loop(cond=enough_produced, body=sample_body, # paraopt\n loop_vars=sample_body(n=n, sample=None, # run first once for initialization\n n_total_drawn=0, eff=efficiency_estimation),\n swap_memory=True,\n parallel_iterations=4,\n back_prop=False)[1] # backprop not needed here\n if multiple_limits:\n sample = tf.random.shuffle(sample) # to make sure, randomly remove and not biased.\n new_sample = sample[:n, :] # cutting away to many produced\n\n # TODO(Mayou36): uncomment below. Why was set_shape needed? leave away to catch failure over time\n # if no failure, uncomment both for improvement of shape inference\n # with suppress(AttributeError): # if n_samples_int is not a numpy object\n # new_sample.set_shape((n_samples_int, n_dims))\n return new_sample", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def take_one_sample(self, event, pw_intensity):\n phase = self.get_current_time(event) % self.time_period\n # Rejection sampling with self.random_state\n new_sample = 0\n num_segments = pw_intensity.shape[0]\n s_max = np.max(pw_intensity)\n while True:\n new_sample += self.random_state.exponential(scale=1.0 / s_max)\n current_piece_index = int(num_segments * ((new_sample + phase) % self.time_period) / self.time_period)\n if self.random_state.rand() < pw_intensity[current_piece_index] / s_max:\n # print('Sample chosen: ', new_sample)\n return new_sample", "def test_proof_model():\n m = build_proof_model()\n with m:\n trace = pm.sample(50)\n assert trace.report.ok", "def detectPatientFeedback(self):\n\n noise_prob = self.param[\"noise_probability\"]\n resp_with_noise = self.patient.feedback\n if noise_prob > 0:\n max_noise_val = 10 # todo hardcoded for the noise\n min_noise_val = -10\n max_eval = 10\n min_eval = 0\n for v in self.eval_var:\n max_eval = self.variables_universe[v][-1]\n min_eval = self.variables_universe[v][0]\n break # todo assuming that every eval var is the same\n\n actual_resp = self.patient.feedback\n prob = self.r.uniform(0, 1)\n if prob <= noise_prob:\n noise = 0.0\n if self.param[\"noise_type\"] == \"gaussian\":\n noise = self.r.gauss(0, 2)\n if noise > max_noise_val:\n noise = max_noise_val\n if noise < min_noise_val:\n noise = min_noise_val\n if self.param[\"noise_type\"] == \"inv_gaussian\":\n noise = self.r.gauss(0, 2)\n if noise > max_noise_val:\n noise = max_noise_val\n if noise < min_noise_val:\n noise = min_noise_val\n if noise < 0:\n noise = (min_noise_val - max_noise_val) - noise\n elif noise > 0:\n noise = max_noise_val - noise\n else:\n if self.r.uniform(0, 1) > 0.5:\n noise = max_noise_val\n else:\n noise = min_noise_val\n if self.param[\"noise_type\"] == \"reversed_feedback\":\n dist_from_max = max_eval - actual_resp\n noise = min_eval + dist_from_max - actual_resp # i remove actual_resp to obtain the noise, otherwise it's already the new response, I add it again later\n resp_with_noise = actual_resp + noise\n if resp_with_noise > max_eval:\n resp_with_noise = max_eval\n if resp_with_noise < min_eval:\n resp_with_noise = min_eval\n return resp_with_noise", "def sample(self):\r\n raise NotImplementedError", "def test_does_not_sample_twice_priority(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"a\", 0.1)", "def random_sample(prob):\n def _random_sample_xducer(step):\n def _random_sample_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if random() < prob else r\n return _random_sample_step\n return _random_sample_xducer", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def sample_refine(self, **kwargs):", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def varietySample(V, x, count_max, R2, eps_bound):\n \n #x = np.array(V.gens);\n N = len(x)\n t = sp.symbols(\"t\") #Parameter of line\n #count_max = 200\n count = 0\n #N = length(x)\n P = np.empty([N,0])\n while count < count_max:\n # Corrupt the variety with bounded noise \n epsilon = np.random.uniform(-eps_bound, eps_bound)\n Ve = V + epsilon \n \n # Get a line u + v t in space \n U = sp.Matrix(np.random.randn(2, N+1));\n Ur = np.array(U.rref()[0].transpose().tolist(), dtype=float)\n u = Ur[1:, 0]\n v = Ur[1:, 1]\n \n L = u + v * t \n \n #substitute in the line and find real roots\n VL = Ve.subs([i for i in zip(x, L)])\n cVL = sp.Poly(VL).coeffs()\n rVL = np.roots(cVL)\n r_real = np.real(rVL[np.isreal(rVL)])\n \n \n \n #recover points of intersection and append to array\n p = u[:, np.newaxis] + np.outer(v, r_real) \n pnorm = np.sum(p**2, 0)\n \n pcand= p[:, pnorm <= R2]\n\n # if pcand.shape[1] <= 1:\n # pcand0 = pcand\n # else:\n # #pcand0 = pcand[:, 0]\n # pcand0 = pcand[..., 0][:, np.newaxis] #this is dumb\n #\n # P = np.concatenate([P, pcand0], 1)\n P = np.concatenate([P, pcand], 1)\n \n #start new sampling iteration\n count = count + np.size(pcand, 1)\n \n return P", "def reject_test(self):\n self.__genes_test = None\n self.__fitness_test = None", "def sample_action(self, obs, explore_prob):\n raise NotImplementedError", "def test_samples_close_to_inclusion_probability_ppswor(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n for i in range(n):\n s.process(i, math.log(2.0, math.e))\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def accept_sample(self, proposal: np.array) -> bool:\n ratio = self.objective.p(proposal) / self.objective.p(self.theta)\n if np.random.uniform() < ratio:\n return True\n return False", "def test_does_not_sample_negligible_weight_priority(self):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 1.0 / FAILURE_PROBABILITY_INVERSE)\n self.assertEmpty(s.elements)", "def sample(self, i_episode, action_values):\n sigma = max(self.max_sigma + (self.min_sigma - self.max_sigma)/self.end_episode * i_episode, self.min_sigma) \n return np.random.normal(action_values, sigma)", "def sample(self, policy_params, **kwargs):\n return self.head.sample(policy_params, **kwargs)", "def sample(self,p0=None,nsamp=None): \r\n raise NotImplementedError('Need to implement sample function')", "def samplePercNaN(df, specie = \"Caenorhabditis elegans OX=6239\", sample = \"S01\"):\n spec = rawSpecies(df = df, specie = specie)\n nonDecoy = spec[spec[\"EG.IsDecoy\"] == False] \n sample = sample\n sampleDat = nonDecoy[nonDecoy[\"R.Condition\"].str[-3:] == sample] \n perc_NaN = sampleDat[\"PG.Quantity\"].isna().sum() / (sampleDat[\"PG.Quantity\"].count() + sampleDat[\"PG.Quantity\"].isna().sum())\n return perc_NaN", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n repeat_ = num_expected // neg_inds.numel()\n return torch.cat((neg_inds.repeat(repeat_), self.random_choice(neg_inds, num_expected % neg_inds.numel())))\n else:\n return self.random_choice(neg_inds, num_expected)", "def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))", "def test_sample(self):\n dist = self.many_samples([0, 0, 0, 1])\n self.assertEquals(3, dist.argMax())\n\n dist = self.many_samples([1, 0, 0, 0, 0])\n self.assertEquals(0, dist.argMax())\n\n dist = self.many_samples([0.5, 0, 0, 0.25, 0.25])\n self.assertAlmostEquals(dist[0], 0.5, delta=0.01)\n self.assertAlmostEquals(dist[3], 0.25, delta=0.01)\n self.assertAlmostEquals(dist[4], 0.25, delta=0.01)\n self.assertEquals(dist[1], 0)\n self.assertEquals(dist[2], 0)\n\n with self.assertRaises(AssertionError):\n diffp.sample([0.5, 0.5, 0.01])", "def sample_r(self, method='rejection_numpy') -> None:\n if not self.inverse_transformation:\n sampler = getattr(SamplerMixin, method)\n samples = sampler(pdf=self.pdf, size=self.sample_size[0])\n else:\n samples: np.ndarray = getattr(np.random, self.pdf)(size=self.sample_size, **self.kwargs)\n self.r_sample = samples", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "def get_negative_sample(context, num, prob, Gn):\n\tnegative_list = []\n\twhile len(negative_list) < Gn:\n\t\tnegative_sample = np.random.choice(num, p=prob.ravel())\n\t\tif negative_sample != context:\n\t\t\tnegative_list.append(negative_sample)\n\t\telse:\n\t\t\tpass\n\treturn np.array([negative_list])", "def sample(self):", "def get_posterior_sample(self):\n (a, b) = (self.prior_success + 1e-6 - 1, self.prior_failure + 1e-6 - 1)\n # The modes are not well defined unless alpha, beta > 1\n assert np.all(a > 0)\n assert np.all(b > 0)\n\n \"\"\"\n g(ฯ†) denote a log-concave probability density function\n ๅฏนไบŽไบŒ้กนๅˆ†ๅธƒ่€Œ่จ€, ๆฆ‚็އๅˆ†ๅธƒไธบ:P(x)=c(n,r)*(x^r)*(1-x)^(n-r), xไธบๅ˜้‡\n lnP(x) = lnc(n,r) + r*lnx+(n-r)*ln(1-x)\n dlnP(x)/dx = r/x + (n-r)/(1-x)*(-1) = a/x - b/(1-x)\n d2lnP(x)/d2x = -a/x^2 - b/(1-x)^2\n \n ๆญคๅค„xไธบไผ—ๆ•ฐ,ๅณ:x = np = a/(a+b)\n \n ไผ—ๆ•ฐ๏ผˆMode๏ผ‰ๆ˜ฏๆŒ‡ๅœจ็ปŸ่ฎกๅˆ†ๅธƒไธŠๅ…ทๆœ‰ๆ˜Žๆ˜พ้›†ไธญ่ถ‹ๅŠฟ็‚น็š„ๆ•ฐๅ€ผ๏ผŒไปฃ่กจๆ•ฐๆฎ็š„ไธ€่ˆฌๆฐดๅนณใ€‚ \n ไนŸๆ˜ฏไธ€็ป„ๆ•ฐๆฎไธญๅ‡บ็Žฐๆฌกๆ•ฐๆœ€ๅคš็š„ๆ•ฐๅ€ผ๏ผŒๆœ‰ๆ—ถไผ—ๆ•ฐๅœจไธ€็ป„ๆ•ฐไธญๆœ‰ๅฅฝๅ‡ ไธช\n ๅœจ้ซ˜ๆ–ฏๅˆ†ๅธƒไธญ๏ผŒไผ—ๆ•ฐไฝไบŽๅณฐๅ€ผใ€‚\n \"\"\"\n\n mode = a / (a + b) # ไผ—ๆ•ฐ(ๅฏนไบŽ่ฟž็ปญๅˆ†ๅธƒ), a:[arm,1], b: [arm, 1]\n hessian = a / mode + b / (1 - mode) # [arm, 1], TODO:ๆญคๅค„ๆ˜ฏๅฆ่ฎก็ฎ—ๆœ‰่ฏฏ?ๅบ”่ฏฅไธบ a/mode**2 + b/(1-mode)**2 ?\n \"\"\"\n ๅ‚่ง่ฎบๆ–‡5.2:\n An approximate posterior sample ฮธห† is then drawn\n from a Gaussian distribution with mean ฮธ and covariance matrix\n (โˆ’โˆ‡2 ln(ftโˆ’1(ฮธ)))โˆ’1\n \"\"\"\n laplace_sample = mode + np.sqrt(1 / hessian) * np.random.randn(self.n_arm) # ้‡‡ๆ ทarmไธชๆ ทๆœฌ\n return laplace_sample", "def sample_episode(env, policy):\n states = []\n actions = []\n rewards = []\n dones = []\n \n # YOUR CODE HERE\n done = False\n state = env.reset() # Could also use env._get_obs(), but codegrade seems to expect this\n while done == False:\n states.append(state)\n \n action = policy.sample_action(state)\n actions.append(action)\n \n state, reward, done, _ = env.step(action)\n \n rewards.append(reward)\n dones.append(done)\n\n return states, actions, rewards, dones", "def test_samples_high_weight_elements_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n self.assertCountEqual([\"a\"], s.elements.keys())", "def _create_sample(self, policy_output, next_state, reward, done, info,\n env_id):\n return {\n \"policy_output\": policy_output,\n \"next_state\": next_state,\n \"reward\": reward,\n \"done\": done,\n \"info\": info,\n \"env_id\": env_id\n }", "def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def __call__(self, params):\r\n return self.sample(params)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def sample(self):\n return gc.rand_state.choice(self.domain)", "def sample_posterior(self):\n \n# print (\"SAMPLING FROM LINEAR SIMILARITY VB\")\n if (self.posterior_mean == False):\n self.weight = Vil.sample_posterior(self.mu_weight, Vil.softplus(self.rho_weight))\n self.bias = Vil.sample_posterior(self.mu_bias, Vil.softplus(self.rho_bias))\n# print (self.bias)\n else:\n self.weight.data = self.mu_weight.data\n self.bias.data = self.mu_bias.data", "def sample_action(observation, sample_q = False, eps = 0):\n \n if sample_q: # sampling from the Q-function\n if np.random.random() <= eps:\n return env.action_space.sample()\n \n p = sess.run(q_reward, feed_dict = {states: [observation]})[0]\n # argmax for q-function\n return np.argmax(p)\n else: # sampling from the policy\n p = sess.run(logits_policy, feed_dict = {states: [observation]})[0]\n \n # choice for real policy\n return np.random.choice(range(2), p = p)", "def sample_survey(self, **kwargs):", "def _sample(self, X, y):\n self._validate_estimator()\n X_res, y_res = self.tomek_.fit_sample(X, y)\n\n return self.smote_.fit_sample(X_res, y_res)", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def _sample_noise(self) -> np.ndarray:\n return np.random.randn(self.actor_action_size)", "def _sample(self, rnn_output, temperature):\n pass", "def reset_rf_samples():\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n_samples))", "def perturb(infile,outfile,err,num,ptype,est,mod,lots):\n if lots:\n infiles = [infile.format(x) for x in range(lots)] \n else:\n infiles = [infile]\n surveys = []\n for in_i,infile in enumerate(infiles):\n num_acks = 0\n second_order_acks = 0\n num_errs = 0\n hubble_constant = 100\n galaxies = common.loadData(infile,'CF2')\n perturbed_vs = []\n delta_vs = []\n\n for galaxy in galaxies:\n #q_0 = -0.595\n #z = galaxy.cz/(3*10**8)\n #zmod = z*(1 + 0.5*(1-q_0)*z + (1/6)*(2-q_0-3q_0**2)*z**2)\n if abs(galaxy.v) > galaxy.cz/10:\n num_acks += 1\n\n if ptype == \"distance\":\n skewed_distance = np.random.normal(galaxy.d,abs(galaxy.d*err),num)\n elif ptype == \"modulus\":\n inmod = modulusify(galaxy.d, mod)\n pmod = np.random.normal(inmod,err,num)\n skewed_distance = unmodulusify(pmod, mod)\n elif ptype == \"relative\":\n inmod = modulusify(galaxy.d,mod)\n pmod = np.random.normal(inmod,np.abs(err*inmod),num)\n skewed_distance = unmodulusify(pmod,mod)\n \n if est == \"cz\":\n try:\n velocities = galaxy.cz - hubble_constant * skewed_distance\n dv = galaxy.d*err*hubble_constant\n except FloatingPointError: #I don't think it's possible to have a FP error here... Could be wrong?\n num_errs += 1\n print(\"I was wrong\")\n continue\n elif est == \"feldman\":\n try:\n velocities = galaxy.cz * np.log(galaxy.cz / (hubble_constant * skewed_distance) )\n dv = galaxy.cz*err#calculate_error(distance_modulus,galaxy.d,frac_error,args)\n for velocity in velocities:\n if abs(velocity) > galaxy.cz / 10:\n second_order_acks += 1\n except FloatingPointError:\n num_errs += 1\n continue\n perturbed_vs.append((velocities,dv,skewed_distance,galaxy))\n\n print(\"{} out of {} galaxies ({:.2f}) had true velocity NOT much less than redshift,\".format(num_acks,len(galaxies),num_acks/len(galaxies)))\n print(\"i.e. the condition on our estimator that v << cz was not satisfied.\")\n print(\"This happened to the random data {} times out of {}.\".format(second_order_acks,num*len(galaxies)))\n print(\"Also, {} FloatingPoint errors happened, even after taking out the close-by galaxies.\".format(num_errs))\n print()\n survey = []\n for v,dv,d,galaxy in perturbed_vs:\n np1 = np.array((galaxy.normx,\n galaxy.normy,\n galaxy.normz,\n galaxy.redx,\n galaxy.redy,\n galaxy.redz,\n dv\n ))\n \n survey.append(np.concatenate((np1,d,v)))\n surveys.append(survey)\n maxlength = max([len(survey) for survey in surveys])\n surveylength = len(surveys[0][0])\n for survey in surveys:\n for x in range(len(survey),maxlength):\n filler = np.empty(surveylength)\n filler[:] = np.NAN\n survey.append(filler)\n surveysnp = np.array(surveys)\n print(surveysnp.shape)\n np.save(outfile,surveysnp)", "def test_estimate_statistics_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n element_weight = math.log(FAILURE_PROBABILITY_INVERSE, math.e)\n s.process(\"a\", element_weight)\n sampling_probability = (FAILURE_PROBABILITY_INVERSE -\n 1) / FAILURE_PROBABILITY_INVERSE\n self.assertEqual(s.estimate_statistics(),\n element_weight / sampling_probability)", "def sample(self):\n raise NotImplementedError(\"Override me!\")", "def default(N=500, p=5001, f0=(2.5,)):\n #N = N\n #f0=f0\n nsamples = p\n fsample = 400\n\n ripple_db = 40.0\n cutoff_hz = 10.0\n \n harmonics = [Harmonic(1, 0.5, 0), \n Harmonic(0.4, 2.5, pi/2 + 0.1),\n Harmonic(0.2, 15.3, pi/2),\n Harmonic(0.1, 23.45, pi/2 + 0.8)]\n \n t = np.arange(nsamples) / fsample\n signal = np.zeros((N, nsamples))\n for ampl, freq, phase in harmonics:\n ampl = ampl*np.ones(t.shape)\n ampl = np.tile(ampl, (N,1)) + np.outer(0.2*np.random.randint(2, size=(N,)), ampl)\n \n phase = phase*np.ones(t.shape)\n phase = np.tile(phase, (N,1)) + np.outer(0.5*np.random.randint(2, size=(N,)), phase)\n \n x = ampl * np.cos(2*pi*freq*t + phase)\n signal += np.random.normal(0, 0.8*ampl, (N, nsamples)) + x\n \n references = np.zeros((2*len(f0), nsamples))\n for i, fx in enumerate(f0):\n references[2*i,:] = np.cos(2*pi*fx*t)\n references[2*i+1,:] = np.sin(2*pi*fx*t)\n \n nyq_rate = fsample/2.\n width = 5. / nyq_rate\n \n ntaps, beta = kaiserord(ripple_db, width)\n window = firwin(ntaps, cutoff_hz/nyq_rate, window=('kaiser', beta))\n\n return signal, references, window", "def sample_action(self, obs, explore_prob):\n dx, dy, dz, da, close = self._action_space.sample()\n if np.random.random() < self._height_hack_prob:\n dz = -1\n return [dx, dy, dz, da, 0]", "def sample_negative_answers(self, answer_list, batch_size):\n return np.random.choice(answer_list, batch_size)", "def test_sample_from_extra_bounds_bad(self):\n dim = Real(\"yolo\", \"norm\", 0, 2, low=-2, high=+2, shape=(4, 4))\n with pytest.raises(ValueError) as exc:\n dim.sample(8)\n assert \"Improbable bounds\" in str(exc.value)", "def sample(self, observation):\n raise NotImplementedError", "def read_evoked_lfp(probe,group,p,data):\n\n print('#### Low-pass filtering the data ####')\n \n\n nr_of_electrodes = p['nr_of_electrodes_per_group']\n save_file = p['path'] + '/probe_{:g}_group_{:g}/probe_{:g}_group_{:g}_evoked.pickle'.format(probe,group,probe,group)\n\n #Low pass filtering\n filt = lowpassFilter(rate = p['sample_rate'], high = p['cutoff_freq'], order = 3, axis = 1)\n filtered = filt(data)\n\n\n #Notch filtering\n #if p['notch_filt_freq'] != 0:\n # notchFilt = notchFilter(rate = p['sample_rate'], low = p['notch_filt_freq']-5, high = p['notch_filt_freq']+5, order = 3)\n #Reading the trigger timestamps (process varies depending on the file format\n\n if p['fileformat'] == 'dat':\n trigger_filepath = p['path'] + '/' + p['stim_file']\n with open(trigger_filepath, 'rb') as fid:\n trigger = np.fromfile(fid, np.int16)\n stim_timestamps = extract_stim_timestamps_der(trigger,p)\n\n elif p['fileformat'] == 'cont':\n\t\t#Reading the digital input from file\n trigger_filepath = p['path'] + '/all_channels.events'\n trigger_events = loadEvents(trigger_filepath)\n\n #Acquiring the timestamps of the ttl pulses\n timestamps = trigger_events['timestamps']\n eventId = trigger_events['eventId']\n eventType = trigger_events['eventType']\n channel = trigger_events['channel']\n\n timestamps_global = timestamps[eventType == 5]\n timestamps_ttl = []\n\n ttl_events = (eventType == 3)\n ttl_rise = (eventId == 1)\n\n for i in range(len(timestamps)):\n if (ttl_events[i]) and (ttl_rise[i]):\n timestamps_ttl = np.append(timestamps_ttl, timestamps[i])\n\n stim_timestamps = timestamps_ttl - timestamps_global[0]\n\n elif p['fileformat'] == 'rhd':\n trigger_all = []\n for file in range(len(p['rhd_file'])):\n data = read_data(p['path']+'/'+ p['rhd_file'][file])\n trigger = data['board_dig_in_data'][1]\n trigger_all = np.append(trigger_all, trigger)\n\n stim_timestamps = []\n for i in range(1,len(trigger_all)):\n if trigger_all[i-1] == 0 and trigger_all[i] == 1:\n stim_timestamps = np.append(stim_timestamps, i)\n\n evoked = read_evoked_lfp_from_stim_timestamps(filtered, stim_timestamps, p)\n\n #Save all evoked activity in a pickle file\n fid = open(save_file, 'wb')\n pickle.dump({'evoked':evoked, 'stim_timestamps':stim_timestamps}, fid, protocol=-1)\n fid.close()\n\n #Downsampling\n if(p['down_sample'] == True):\n down_sampled = signal.decimate(filtered, p['down_sample_rate'], zero_phase=True) #Downsampling signal\n small_sample_rate = p['sample_rate']/p['down_sample_rate']\n stim_downsampled = (stim_timestamps/p['down_sample_rate']).astype(int) #Shifting stim_timestamps\n\n evoked_downsampled = read_evoked_lfp_from_stim_timestamps_downsampled(down_sampled, stim_downsampled, p, small_sample_rate)\n\n #Save all evoked activity in a pickle file\n save_file = p['path'] + '/probe_{0}_group_{1}/probe_{2}_group_{3}_evoked_down_sampled.pickle'.format(probe,group,probe,group)\n fid = open(save_file, 'wb')\n pickle.dump({'evoked':evoked_downsampled, 'stim_timestamps':stim_downsampled}, fid, protocol=-1)\n fid.close()", "def testPluginUnexpectedError(self):\n self.config.plugins[self.algName].flux0 = 0.0 # this causes a divide by zero\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=1)\n task.log.setLevel(task.log.FATAL)\n task.run(cat, exposure)\n source = cat[0]\n self.assertTrue(source.get(self.algName + \"_flag\"))\n self.assertFalse(source.get(self.algName + \"_flag_containsNan\"))\n self.assertFalse(source.get(self.algName + \"_flag_edge\"))", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def corrupt_example(self, e):\n import random\n import copy\n e = copy.copy(e)\n last = e[-1]\n cnt = 0\n while e[-1] == last:\n e[-1] = random.randint(0, self.parameters.vocab_size-1)\n pr = 1./self.parameters.vocab_size\n cnt += 1\n # Backoff to 0gram smoothing if we fail 10 times to get noise.\n if cnt > 10: e[-1] = random.randint(0, self.parameters.vocab_size-1)\n weight = 1./pr\n return e, weight", "def _get_sample(self):\n p = self._get_mean()\n u = self.random.random_sample(p.shape)\n sample = u < p\n return sample", "def sampling_algorithm(self, X, y):\r\n\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed.\")\r\n\r\n # standardization is needed to make the range of the propensity scores\r\n # similar to that of the features\r\n mms = MinMaxScaler()\r\n X_trans = mms.fit_transform(X) # pylint: disable=invalid-name\r\n\r\n X_min = X_trans[y == self.min_label]\r\n\r\n # adding propensity scores as a new feature\r\n X_new = np.column_stack([X_trans, self.propensity_scores(X_trans, y)])\r\n X_min_new = X_new[y == self.min_label] # pylint: disable=invalid-name\r\n\r\n # finding nearest neighbors of minority samples\r\n n_neighbors = min([len(X_new), self.n_neighbors+1])\r\n\r\n ind = self.neighborhood_structure(X_new, y, n_neighbors, X_min_new)\r\n\r\n # noise removal\r\n t_hat = np.sum(y[ind[:, 1:]] == self.min_label, axis=1)\r\n to_remove = np.where(t_hat < self.t * n_neighbors)[0]\r\n\r\n if len(to_remove) >= len(X_min) - 1:\r\n return self.return_copies(X, y,\r\n \"most minority samples indentified as noise\")\r\n\r\n n_to_sample = n_to_sample + to_remove.shape[0]\r\n\r\n samples = self.generate_samples(X_min=X_min,\r\n to_remove=to_remove,\r\n X_trans=X_trans,\r\n y=y,\r\n ind=ind,\r\n n_to_sample=n_to_sample)\r\n\r\n X_min = np.delete(X_min, to_remove, axis=0)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.randint(len(X_min))\r\n # # finding the number of minority neighbors\r\n # t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\r\n # if t_hat < self.t*n_neighbors:\r\n # # removing the minority point if the number of minority\r\n # # neighbors is less then the threshold\r\n # # to_remove indexes X_min\r\n # if idx not in to_remove:\r\n # to_remove.append(idx)\r\n # # compensating the removal of the minority point\r\n # n_to_sample = n_to_sample + 1\r\n #\r\n # if len(to_remove) == len(X_min):\r\n # _logger.warning(self.__class__.__name__ + \": \" +\r\n # \"all minority samples identified as noise\")\r\n # return X.copy(), y.copy()\r\n # else:\r\n # # otherwise do the sampling\r\n # X_b = X_trans[self.random_state.choice(ind[idx][1:])]\r\n # samples.append(self.sample_between_points(X_min[idx], X_b))\r\n\r\n return (mms.inverse_transform(np.vstack([X_trans[y == self.maj_label],\r\n X_min,\r\n samples])),\r\n np.hstack([np.repeat(self.maj_label,\r\n np.sum(y == self.maj_label)),\r\n np.repeat(self.min_label, len(X_min)),\r\n np.repeat(self.min_label, len(samples))]))", "def _sample_discrete_gaussian_helper(scale, shape, dtype):\n scale = tf.cast(scale, tf.int64)\n sq_scale = tf.square(scale)\n\n # Do rejection sampling by oversampling.\n oversample_factor = 2\n # Draw at least some samples in case we got unlucky with small input shape.\n min_n = tf.cast(1000, tf.int64)\n target_n = tf.reduce_prod(tf.cast(shape, tf.int64))\n draw_n = tf.maximum(min_n, oversample_factor * target_n)\n\n # Scale for discrete Laplace.\n t = tf.cast(scale, tf.int64) + 1\n\n def draw_samples(inp_samples, inp_accept):\n \"\"\"Sample with rejection.\"\"\"\n y = _sample_discrete_laplace(t, shape=(draw_n,))\n z_numer = tf.pow((tf.abs(y) * t - sq_scale), 2)\n z_denom = 2 * sq_scale * t * t\n bern_probs = tf.exp(-tf.cast(z_numer, tf.float64) /\n tf.cast(z_denom, tf.float64))\n accept = _sample_bernoulli(bern_probs)\n # Outputs from previous iterations are only used for restoring shapes.\n y.set_shape(inp_samples.get_shape())\n accept.set_shape(inp_accept.get_shape())\n return [y, accept]\n\n # Retry in the (extremely unlikely) case that oversampling doesn't suffice.\n samples = tf.zeros((draw_n,), dtype=tf.int64)\n accept = tf.zeros((draw_n,), dtype=tf.int64)\n samples, accept = tf.while_loop(\n cond=lambda _, accept: tf.reduce_sum(accept) < target_n,\n body=draw_samples,\n loop_vars=[samples, accept])\n\n accepted_samples = samples[tf.equal(accept, 1)][:target_n]\n return tf.cast(tf.reshape(accepted_samples, shape), dtype)", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def sample_bool(p=.5):\n return bool(np.random.choice([True, False], p=[p, 1-p]))", "def __init__(self,outerPPRF):\n self.outerPPRF = outerPPRF\n\n self.g = random.randint(0,self.outerPPRF.N-1)", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def post(self, s):\n return np.random.choice(self.sample_list)", "def test_reject_agreement(self):\n pass", "def sample_gp(\n params: Dict[str, float],\n data: pd.DataFrame,\n model: ModelType,\n t_val: np.ndarray,\n n: int = 1,\n):\n\n if n < 1:\n raise ValueError(\"'n' must be equal or greater 1.\")\n\n resid = predict_resid(params, data, model)\n pos_resid = resid[[\"x\", \"x_err\", \"y\", \"y_err\", \"technique\"]].dropna()\n\n gpx, gpy = create_gp(params)\n\n sample = pd.DataFrame(index=t_val)\n for technique, df in pos_resid.groupby(\"technique\"):\n if technique == \"interferometry\":\n # unaffected by source confusion\n pass\n else:\n assert technique == \"imaging\"\n gpx.compute(df.index, df[\"x_err\"])\n gpy.compute(df.index, df[\"y_err\"])\n if n == 1:\n sample[\"x\"] = gpx.sample_conditional(df[\"x\"], t_val)\n sample[\"y\"] = gpy.sample_conditional(df[\"y\"], t_val)\n else:\n # average over multiple realizations\n for i in range(n):\n sample[f\"x-{i}\"] = gpx.sample_conditional(df[\"x\"], t_val)\n sample[f\"y-{i}\"] = gpy.sample_conditional(df[\"y\"], t_val)\n sample[\"x\"] = sample.filter(regex=\"x-\").mean(axis=1)\n sample[\"y\"] = sample.filter(regex=\"y-\").mean(axis=1)\n\n return sample", "def should_sample(self, span_context):\n raise NotImplementedError", "def sample_action(self, state, timestep, explore_prob):\r\n\r\n if np.random.random() < explore_prob:\r\n return np.random.uniform(*self.bounds, size=(self.action_size,))\r\n return self.action_select_eval(self.model, state, timestep)[0].detach()", "def signalroisample(filename,obs):\n from samplingdist import readworkspace,readfile\n #f,w,obsdict,modeldict,databkgdict,datasigdict = readworkspace(filename)\n f,obsdict,modeldict,databkgdict,datasigdict = readfile(filename)\n if not obsdict.has_key(obs):\n raise RuntimeError(\"Observable '%s' not defined\" % obs)\n sd = ObservableSamplingProb(obsdict[obs])\n sd.setupmodel('bkg','negative_binomial_pdf')\n sd.setupmodel('sig','negative_binomial_sum_pdf')\n\n datasig = datasigdict['dvsig_'+obs]\n databkg = databkgdict['dvbkg_'+obs]\n sd.fitTo(datasig,'sig')\n sd.fitTo(databkg,'bkg')\n\n samplename = filename.split('_')[1]\n sd.plot(samplename,datasig,'sig',sample=samplename+'_sig')\n sd.plot(samplename,databkg,'bkg',sample=samplename+'_bkg')\n\n nfile = filename.split('_')[1]+'_bkgsig_'+obs+'_ws.root'\n sd.update('w',nfile,[datasig,databkg])", "def sample(self, like_params):\n\t\traise NotImplementedError", "def test_gradient_exception_on_sample(self):\n dev = qml.device(\"default.qubit\", wires=2, shots=1000)\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Circuits that include sampling can not be differentiated.\",\n ):\n grad_fn = autograd.jacobian(circuit)\n grad_fn(1.0)", "def test_rsp_bad_dataset(self):\n\n def handle(event):\n def test():\n pass\n\n return 0x0000, test\n\n self.ae = ae = AE()\n ae.add_requested_context(ModalityPerformedProcedureStep)\n ae.add_supported_context(ModalityPerformedProcedureStep)\n\n handlers = [(evt.EVT_N_CREATE, handle)]\n scp = ae.start_server((\"localhost\", 11112), evt_handlers=handlers, block=False)\n\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n assoc = ae.associate(\"localhost\", 11112)\n\n assert assoc.is_established\n\n # Event Information\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_create(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n\n assert status.Status == 0x0110\n assert ds is None\n\n assoc.release()\n scp.shutdown()", "def prior_sample(self):\n pass", "def sample_from(self):\n raise RuntimeError(\"Needs to be implemented in base class\")", "def sample_action(policy, state):\n nS, nA = policy.shape\n all_actions = np.arange(nA)\n return np.random.choice(all_actions, p=policy[state])", "def __init__(self, p=0.5):\n assert 0. <= p <= 1.\n self.p = p\n self.rng = T.shared_randomstreams.RandomStreams(seed=123456)\n self.params = []", "def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x", "def test_sampling(self):\n dim = Fidelity(\"epoch\", 1, 2)\n assert dim.sample() == [2]\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.sample() == [5]\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.sample(4) == [5] * 4", "def process_sample(self, value: PhyPropType) -> PhyPropType:\n pass", "def sample(self):\n return self._sample_func", "def reproducibility_from_fisher(disdf, samplesizes, qthresh):\n\n ## Turn disdf into tidy dataframe\n longpvals = copy.deepcopy(disdf)\n longpvals['otu'] = longpvals.index\n longpvals = pd.melt(longpvals, id_vars='otu',\n value_name='p', var_name='study')\n\n ## Convert two-tailed signed p-values into one-tailed pvalues\n longpvals = convert_to_one_tailed(longpvals).dropna()\n longpvals = pd.melt(longpvals, id_vars=['otu', 'study'],\n value_vars=['p-dis', 'p-h'], var_name='pval_direction')\n\n ## Add sample size for each study\n longpvals['sample_size'] = \\\n longpvals.apply(lambda row: samplesizes.loc[row['study'], 'total'],\n axis=1)\n\n ## Get the combined p-value using weighted stouffer's method\n metap = []\n for grp, subdf in longpvals.groupby(['otu', 'pval_direction']):\n # Only consider genera which are in more than one study\n if subdf.shape[0] > 1:\n # grp is the tuple that defines the group: (otu, direction)\n direction = grp[1]\n otu = grp[0]\n numstudies = subdf.shape[0]\n # Stouffer's weight z-score test\n z, p = combine_pvalues(subdf['value'].astype(float),\n method='stouffer',\n weights=subdf['sample_size'].apply(np.sqrt))\n metap.append([otu, direction, z, p, numstudies])\n metap = pd.DataFrame(metap, columns=['otu', 'direction', 'z', 'p', 'num_studies'])\n\n ## Count number of significant healthy and disease bugs\n # Note that from manual inspection, it doesn't look like any genera\n # are returned as significant in both directions from this method...\n sig_h = metap.query('direction == \"p-h\"').query('p < @qthresh')\n sig_dis = metap.query('direction == \"p-dis\"').query('p < @qthresh')\n\n return sig_h.shape[0] + sig_dis.shape[0]", "def sample_free(self):\n # type: () -> Pose\n assert self._world is not None\n return self._world.sample_free()", "def noise(self, freq: int, /) -> None:", "def generate_samples(self):\n self.analytic_probability()", "def dropout(X, p=0.):\n if p > 0:\n retain_prob = 1 - p\n X *= t_rng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)\n X /= retain_prob\n return X" ]
[ "0.6341869", "0.6320421", "0.6018086", "0.5842575", "0.58349407", "0.57898366", "0.57707685", "0.5673086", "0.5649213", "0.56451184", "0.562531", "0.5600256", "0.55618894", "0.55617553", "0.5559686", "0.5514322", "0.55063957", "0.5383353", "0.5362512", "0.53615665", "0.53103817", "0.5272219", "0.5272219", "0.52547824", "0.5251037", "0.5246682", "0.5194006", "0.51939374", "0.5187338", "0.5179035", "0.51616925", "0.51584667", "0.5149521", "0.51295793", "0.5126264", "0.511896", "0.5106176", "0.51009697", "0.5099804", "0.50858843", "0.5085319", "0.50657564", "0.50449544", "0.50277144", "0.5025824", "0.50111413", "0.500865", "0.500541", "0.500293", "0.5001119", "0.49871394", "0.49842575", "0.49683335", "0.49671194", "0.49635226", "0.49600285", "0.49593353", "0.49559906", "0.49365816", "0.4932512", "0.49267352", "0.49227327", "0.4921538", "0.49204695", "0.49122655", "0.49081847", "0.49022695", "0.4898905", "0.48952752", "0.4890253", "0.4888795", "0.48840263", "0.48779103", "0.4877855", "0.4869565", "0.48686603", "0.48625213", "0.4857608", "0.48554674", "0.48479506", "0.48456296", "0.4843631", "0.4841216", "0.48386684", "0.4834027", "0.4827786", "0.4825181", "0.48224422", "0.48224035", "0.4822073", "0.48213372", "0.48148853", "0.4813302", "0.48126543", "0.4806988", "0.48048973", "0.4801103", "0.4797461", "0.47951654", "0.47928658" ]
0.577311
6
Compute a similarity matrix for a set of points. The points are assumed to lie on the surface of the same sphere.
def similarity_matrix(points, sigma): distances_squared = spherical_distances(points, points)**2 return np.exp( -distances_squared / (2.0 * sigma) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))", "def cosine_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = cosine_similarity(references[i, :], queries[j, :])\n return scores", "def compute_similarity_transform(source_points, target_points):\n assert target_points.shape[0] == source_points.shape[0]\n assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n source_points = source_points.T\n target_points = target_points.T\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n var1 = np.sum(X1 ** 2)\n K = X1.dot(X2.T)\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n R = V.dot(Z.dot(U.T))\n scale = np.trace(R.dot(K)) / var1\n t = mu2 - scale * R.dot(mu1)\n source_points_hat = scale * R.dot(source_points) + t\n source_points_hat = source_points_hat.T\n return source_points_hat", "def test_cosine_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def test_cosine_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)", "def get_sims(centroids):\n\n sims = []\n length = len(centroids)\n \n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n sims.append(similarity(centroids[i], centroids[j]))\n \n return sims", "def matrix(self, references: List[Spectrum], queries: List[Spectrum],\n array_type: str = \"numpy\",\n is_symmetric: bool = False) -> np.ndarray:\n reference_vectors = self.calculate_vectors(references)\n if is_symmetric:\n assert np.all(references == queries), \\\n \"Expected references to be equal to queries for is_symmetric=True\"\n query_vectors = reference_vectors\n else:\n query_vectors = self.calculate_vectors(queries)\n\n ms2ds_similarity = cosine_similarity_matrix(reference_vectors, query_vectors)\n return ms2ds_similarity", "def get_sim_matrix(centroids):\n\n matrix = {}\n length = len(centroids)\n\n for i in xrange(0, length):\n matrix[i] = {}\n\n for j in xrange(i + 1, length):\n matrix[i][j] = similarity(centroids[i], centroids[j])\n\n return matrix", "def similarity_matrix(P, similarity_measure, normalize=True, inverse=True):\n N = len(P) \n S = np.zeros((N, N))\n for i in range(N): \n for j in range(i): \n S[i][j] = similarity_measure(P[i], P[j])\n\n S = square(S)\n if normalize: \n S = S / np.max(S)\n if inverse:\n S = 1 - S # Higher value = more similar\n\n return S", "def cosine_similarity(X):\n matrix = X.dot(X.transpose()).todense()\n mat_len = len(matrix)\n norms = [0] * mat_len\n for i in range(0, mat_len):\n norms[i] = 1.0 / np.sqrt(matrix.item((i, i)))\n norm_mat = np.matrix(norms)\n return np.multiply(norm_mat.transpose().dot(norm_mat), matrix)", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def spherical_distances(x, y):\n # Compute the norms of all points, we do NOT check they actually all lie on\n # the same sphere (that's the caller's responsibility).\n \n xn = np.sqrt((x**2).sum(axis=1))\n yn = np.sqrt((y**2).sum(axis=1))\n ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :])\n # Protect against numerical noise giving us cosine values outside the -1,1\n # range, where arccos would return nans.\n ang_cos = np.clip(ang_cos, -1, 1)\n\n return xn[:, None]*np.arccos(ang_cos)", "def dice_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = dice_similarity(references[i, :], queries[j, :])\n return scores", "def fit_hypersphere(data, method=\"Hyper\"):\n num_points = len(data)\n# print >>stderr, \"DEBUG: num_points=\", num_points\n \n if num_points==0:\n return (0,None)\n if num_points==1:\n return (0,data[0])\n dimen = len(data[0]) # dimensionality of hypersphere\n# print >>stderr, \"DEBUG: dimen=\", dimen\n \n if num_points<dimen+1:\n raise ValueError(\\\n \"Error: fit_hypersphere needs at least {} points to fit {}-dimensional sphere, but only given {}\".format(dimen+1,dimen,num_points))\n \n # central dimen columns of matrix (data - centroid)\n central = np.matrix(data, dtype=float) # copy the data\n centroid = np.mean(central, axis=0)\n for row in central:\n row -= centroid\n# print >>stderr, \"DEBUG: central=\", repr(central)\n\n # squared magnitude for each centered point, as a column vector\n square_mag= [sum(a*a for a in row.flat) for row in central] \n square_mag = np.matrix(square_mag).transpose()\n# print >>stderr, \"DEBUG: square_mag=\", square_mag\n \n if method==\"Taubin\":\n # matrix of normalized squared magnitudes, data\n mean_square = square_mag.mean()\n data_Z = np.bmat( [[(square_mag-mean_square)/(2*sqrt(mean_square)), central]])\n # print >> stderr, \"DEBUG: data_Z=\",data_Z\n u,s,v = linalg.svd(data_Z, full_matrices=False)\n param_vect= v[-1,:]\n params = [ x for x in np.asarray(param_vect)[0]] # convert from (dimen+1) x 1 matrix to list\n params[0] /= 2*sqrt(mean_square)\n params.append(-mean_square*params[0])\n params=np.array(params)\n \n else:\n # matrix of squared magnitudes, data, 1s\n data_Z = np.bmat( [[square_mag, central, np.ones((num_points,1))]])\n # print >> stderr, \"DEBUG: data_Z=\",data_Z\n\n # SVD of data_Z\n # Note: numpy's linalg.svd returns data_Z = u * s * v\n # not u*s*v.H as the Release 1.4.1 documentation claims.\n # Newer documentation is correct.\n u,s,v = linalg.svd(data_Z, full_matrices=False)\n # print >>stderr, \"DEBUG: u=\",repr(u)\n # print >>stderr, \"DEBUG: s=\",repr(s)\n # print >>stderr, \"DEBUG: v=\",repr(v)\n # print >>stderr, \"DEBUG: v.I=\",repr(v.I)\n\n if s[-1]/s[0] < 1e-12:\n # singular case\n # param_vect as (dimen+2) x 1 matrix\n param_vect = v[-1,:]\n # Note: I get last ROW of v, while Chernov claims last COLUMN,\n # because of difference in definition of SVD for MATLAB and numpy\n\n # print >> stderr, \"DEBUG: singular, param_vect=\", repr(param_vect)\n # print >> stderr, \"DEBUG: data_Z*V=\", repr(data_Z*v)\n # print >> stderr, \"DEBUG: data_Z*VI=\", repr(data_Z*v.I)\n # print >> stderr, \"DEBUG: data_Z*A=\", repr(data_Z*v[:,-1])\n else: \n Y = v.H*np.diag(s)*v\n Y_inv = v.H*np.diag([1./x for x in s])*v\n # print >>stderr, \"DEBUG: Y=\",repr(Y)\n # print >>stderr, \"DEBUG: Y.I=\",repr(Y.I), \"\\nY_inv=\",repr(Y_inv)\n #Ninv is the inverse of the constraint matrix, after centroid has been removed\n Ninv = np.asmatrix(np.identity(dimen+2, dtype=float))\n if method==\"Hyper\":\n Ninv[0,0] = 0\n Ninv[0,-1]=0.5\n Ninv[-1,0]=0.5\n Ninv[-1,-1] = -2*square_mag.mean()\n elif method==\"Pratt\":\n Ninv[0,0] = 0\n Ninv[0,-1]=-0.5\n Ninv[-1,0]=-0.5\n Ninv[-1,-1]=0\n else: \n raise ValueError(\"Error: unknown method: {} should be 'Hyper', 'Pratt', or 'Taubin'\")\n # print >> stderr, \"DEBUG: Ninv=\", repr(Ninv)\n\n # get the eigenvector for the smallest positive eigenvalue\n matrix_for_eigen = Y*Ninv*Y\n # print >> stderr, \"DEBUG: {} matrix_for_eigen=\\n{}\".format(method, repr(matrix_for_eigen))\n eigen_vals,eigen_vects = linalg.eigh(matrix_for_eigen)\n # print >> stderr, \"DEBUG: eigen_vals=\", repr(eigen_vals)\n # print >> stderr, \"DEBUG: eigen_vects=\", repr(eigen_vects)\n\n positives = [x for x in eigen_vals if x>0]\n if len(positives)+1 != len(eigen_vals):\n # raise ValueError(\"Error: for method {} exactly one eigenvalue should be negative: {}\".format(method,eigen_vals))\n print>>stderr, \"Warning: for method {} exactly one eigenvalue should be negative: {}\".format(method,eigen_vals)\n smallest_positive = min(positives)\n # print >> stderr, \"DEBUG: smallest_positive=\", smallest_positive\n # chosen eigenvector as 1 x (dimen+2) matrix\n A_colvect =eigen_vects[:,list(eigen_vals).index(smallest_positive)]\n # print >> stderr, \"DEBUG: A_colvect=\", repr(A_colvect)\n # now have to multiply by Y inverse\n param_vect = (Y_inv*A_colvect).transpose()\n # print >> stderr, \"DEBUG: nonsingular, param_vect=\", repr(param_vect) \n params = np.asarray(param_vect)[0] # convert from (dimen+2) x 1 matrix to array of (dimen+2)\n\n \n# print >> stderr, \"DEBUG: params=\", repr(params)\n radius = 0.5* sqrt( sum(a*a for a in params[1:-1])- 4*params[0]*params[-1])/abs(params[0])\n center = -0.5*params[1:-1]/params[0]\n#y print >> stderr, \"DEBUG: center=\", repr(center), \"centroid=\", repr(centroid)\n center += np.asarray(centroid)[0]\n return (radius,center)", "def get_sphere_info(points):\n rib = np.sum(points, axis=0)\n rib3d = proj3d(rib)\n pts3d = np.asarray([proj3d(p) for p in points])\n face_size = np.linalg.norm(pts3d[0] - rib3d)\n\n M = np.ones((4, 4), dtype=np.float)\n M[:3, :3] = pts3d[:3]\n M[3, :3] = rib3d\n b = [-sum(x*x) for x in M[:, :3]]\n # if this is a plane\n if abs(np.linalg.det(M)) < 1e-4:\n center = rib3d\n return True, center, None, face_size\n else:\n T = np.linalg.solve(M, b)\n D, E, F, G = T\n center = -0.5 * T[:3]\n radius = 0.5 * np.sqrt(D*D + E*E + F*F - 4*G)\n return False, center, radius, face_size", "def sfm(points):\n # Construct the required W/Rh/Sh matrices.\n\t\n # Get ih/jh from Rh and use them to find Q.\n\n # Use Q, Rh, and Sh to get R and S.\n\n # Extract the F 2x3 rotation matrices from R and form an (F,2,3) array of\n # rotation matrices.\n\n # Build an orthonormal matrix that rotates the first R matrix into an\n # identity matrix.\n\n # Apply the computed rotation matrix to the rotation matrices and the\n # points in S.\n\n # Return the R matrices and an ** Nx3 ** matrix containing the\n # reconstructed 3D points (note that S is 3xN).\n return None", "def best_fit(cls, points: array_like) -> Sphere:\n points = Points(points)\n\n if points.dimension != 3:\n raise ValueError(\"The points must be 3D.\")\n\n if points.shape[0] < 4:\n raise ValueError(\"There must be at least 4 points.\")\n\n if points.affine_rank() != 3:\n raise ValueError(\"The points must not be in a plane.\")\n\n n = points.shape[0]\n A = np.hstack((2 * points, np.ones((n, 1))))\n b = (points**2).sum(axis=1)\n\n c, _, _, _ = np.linalg.lstsq(A, b, rcond=None)\n\n center = c[:3]\n radius = float(np.sqrt(np.dot(center, center) + c[3]))\n\n return cls(center, radius)", "def get_similarity(self, ):\r\n customer_cos_similarity = cosine_similarity(self.rating_matrix, self.rating_matrix)\r\n customer_cos_similarity = pd.DataFrame(customer_cos_similarity,\r\n index=self.customer_vendor_matrix.index,\r\n columns=self.customer_vendor_matrix.index)\r\n # customer_pearson_similarity = np.corrcoef(self.rating_matrix,\r\n # self.rating_matrix,)\r\n # customer_pearson_similarity = pd.DataFrame(customer_pearson_similarity,\r\n # index=self.customer_vendor_matrix.index,\r\n # columns=self.customer_vendor_matrix.index)\r\n return customer_cos_similarity,\r\n # return customer_pearson_similarity run too slowly\r", "def pairwise_cosine(mat):\n def cosine_similarity(a, b):\n return (a * b).sum() / (np.linalg.norm(a) * np.linalg.norm(b))\n\n n = len(mat)\n dist_mat = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n dist = cosine_similarity(mat[i], mat[j])\n dist_mat[i, j] = dist\n dist_mat[j, i] = dist\n\n dist_mat[np.isnan(dist_mat)] = 0\n\n return dist_mat", "def get_distance_matrices(points, bounds=None, one_point_ok=False):\n cPoints = len(points)\n if cPoints < 2 and not one_point_ok:\n raise ValueError(\"Distance mtx for one point is the point's dimensions. Perhaps you meant to provide more than one point to this function. Maybe you need to unpack your list/tuple.\")\n # Ensure each point has the same dimension\n cDim = len(points[0]) # count of dimensions\n for p in points:\n assert len(p) == cDim\n aPoints = np.array(points)\n # Use an inner iteration function because it's more versatile and easier to code than appending to a list\n def _iter():\n for i in xrange(cDim):\n xs = aPoints[:, i]\n xdist = np.tile(xs, (cPoints, 1))\n xdist = xdist - xdist.T\n if bounds is not None:\n try:\n min_b, max_b = bounds[i]\n width = max_b - min_b\n except IndexError:\n raise Exception(\"There aren't enough boundaries for the number of dimensions in the points. Ensure that your bounds are of the same dimension as your points.\")\n # Can't use mod because the lower triangle is negative and it wraps around weird\n## xdist = xdist % (width / 2.0)\n xdist[xdist > width / 2.0] -= width\n assert not np.any(xdist > width/2.0)\n xdist[xdist < -width / 2.0] += width\n assert not np.any(xdist < -width/2.0)\n yield xdist\n linear_distances = list(_iter())\n radial_distance = np.zeros_like(linear_distances[0])\n for x in linear_distances:\n radial_distance += x**2\n radial_distance = np.sqrt(radial_distance)\n linear_distances.append(radial_distance) # too lazy to name a temp variable\n return linear_distances", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def sphere_centers(r_x, r_y, r_z):\n a_ccs_p_trans_m = hom_translation_matrix(\n t_x=0.265, t_y=0, t_z=0.014)\n a_ccs_p_rot_m = hom_rotation(x_axis_rotation_matrix(r_x) @\n y_axis_rotation_matrix(r_y) @\n z_axis_rotation_matrix(r_z))\n a_p_sph_1_2 = hom_translation_matrix(\n t_x=0.015, t_y=0.029, t_z=-0.0965)\n a_p_sph_2_2 = hom_translation_matrix(\n t_x=0.015, t_y=-0.029, t_z=-0.0965)\n\n a_ccs_ = a_ccs_p_trans_m @ a_ccs_p_rot_m\n a_c1 = a_ccs_ @ a_p_sph_1_2\n a_c2 = a_ccs_ @ a_p_sph_2_2\n\n return get_translation(a_c1), get_translation(a_c2)", "def points_to_matrix(points):\n points_matrix = np.matrix(points, dtype=np.float64).transpose()\n omega = np.ones(len(points), dtype=np.float64)\n points_matrix = np.matrix(np.vstack((points_matrix, omega)))\n return points_matrix", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def f(points):\n distances = np.zeros((points.shape[0],1))\n for i in range(len(points)):\n #print points[i,:], points[i,:]**2\n distances[i] = np.sqrt(np.sum(points[i,:]**2))\n return distances * np.sin(distances)", "def similarity_matrix(feat_mat):\n sim_mat = cosine_similarity(feat_mat)\n np.fill_diagonal(sim_mat, 0)\n return sim_mat", "def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix", "def pairwise_cosine_similarity(x, y):\n x = torch.div(x, torch.sqrt(torch.max(torch.sum(x ** 2), 1e-12)))\n y = torch.div(y, torch.sqrt(torch.max(torch.sum(y ** 2), 1e-12)))\n return torch.mm(x, torch.transpose(y, 1, 0))", "def pairwise_cosine_similarity(data, pairs, axis=0):\n lp, rp = pairs[:, 0], pairs[:, 1]\n if axis:\n vec_prod = data[:, lp].multiply(data[:, rp])\n else:\n vec_prod = data[lp].multiply(data[rp])\n axis = 1 - axis\n vec_norm = norm(data, axis=axis)\n vprod = np.squeeze(vec_prod.sum(axis=axis).getA())\n vnorm = vec_norm[lp] * vec_norm[rp]\n assert np.all(vnorm != 0)\n return vprod / vnorm", "def get_distance_matrix(self, points):\n return points[:, :, np.newaxis, :]-points[:, np.newaxis, :, :]", "def homomat(points_in_img1, points_in_img2):\n s = points_in_img1.shape[0]\n A = np.zeros((s * 2, 9)) # To\n for index in range(0, s):\n x, y = points_in_img1[index][0], points_in_img1[index][1]\n tx, ty = points_in_img2[index][0], points_in_img2[index][1]\n A[2 * index] = [x, y, 1, 0, 0, 0, -1 * tx * x, -1 * tx * y, -1 * tx]\n A[2 * index + 1] = [0, 0, 0, x, y, 1, -1 * ty * x, -1 * ty * y, -1 * ty]\n\n u, s, v = np.linalg.svd(A)\n H = v[-1].reshape(3, 3) # eigenvector with the least eigenvalue\n return H / H[2, 2]", "def calculate_similarity(self, tweets):\r\n if (len(tweets) == 1):\r\n return 0\r\n vectors = self.vectorizer.vectorize_data(tweets, False)\r\n\r\n temp = cosine_similarity(vectors[0:-1], vectors)\r\n temp = [item for sublist in temp for item in sublist]\r\n sim = sum(temp) / len(temp)\r\n return sim", "def from_points(cls, points):\n if len(points) < 3:\n raise ValueError(\"At least three points are required to construct a sphere.\")\n\n if len(points) == 3:\n return cls.from_three_points(*points)\n\n from compas.geometry import bestfit_sphere_numpy\n\n center, radius = bestfit_sphere_numpy(points)\n return cls(radius, frame=Frame(center, [1, 0, 0], [0, 1, 0]))", "def sphere_generator():\n\n sphericalRadius = np.sqrt(N / (4 * np.pi * pointDensity))\n sphericalThreshold = sphericalRadius * np.arccos(1 - 2 * thresholdFrac)\n\n data_sphere = []\n # np.random.seed(2020)\n for r in range(num_graphs):\n coords = sample_spherical(N, sphericalRadius, 3)\n # computes the adjacency matrix\n Adj_Matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n a = coords[:, i]\n b = coords[:, j]\n dot_prod = np.dot(a, b)/sphericalRadius**2\n dot_prod = min(dot_prod, 1) # <-- sometimes np.dot returns 1.00000000002, messing up np.arccos()\n\n \"\"\" note that when np.arrcos gets 1, it returns a nan \"\"\"\n theta = np.arccos(dot_prod) # gets the angle between a and b (in radians)\n\n # ij_dist = np.linalg.norm(a-b) # calculate euclidean distance\n ij_dist = sphericalRadius * theta # arclength distance\n if ij_dist < sphericalThreshold:\n Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix\n\n data_sphere.append(Adj_Matrix)\n\n return data_sphere", "def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])", "def cosine_sim(matrix):\n if type(matrix) is not csr_matrix:\n matrix = csr_matrix(matrix)\n\n return cosine_similarity(matrix, dense_output=False)", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat", "def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def laplacian(points, sigma):\n\n S = similarity_matrix(points, sigma)\n \n (npnts,npnts) = S.shape \n\n D = np.zeros_like(S)\n\n for i in range(npnts):\n #D[i,i] = 1.0 / np.sqrt(S[i,:].sum()) \n D[i,i] = S[i,:].sum()\n\n\n return (D - S) #(np.eye(npnts,npnts) - np.dot(D,np.dot(S,D)))", "def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))", "def compare_stability_matrices(ism1, ism2): \n \n import scipy as sp\n import sklearn as sk\n\n ism1=sk.preprocessing.normalize(ism1,norm='l2')\n ism2=sk.preprocessing.normalize(ism2,norm='l2')\n distance=sp.spatial.distance.correlation(ism1.ravel(), ism2.ravel())\n similarity= 1-distance\n return similarity", "def center_of_mass(points):\n # break into many triangles\n # each point is part of two triangles\n cor = [sum(points) / len(points)]\n mass_points = []\n area = 0\n for i in range(len(points) - 1):\n triangle = cor + points[i:i + 2]\n # print(triangle)\n mass_points.append(build_triangle_point_mass(triangle))\n area += shoelace_area(triangle)\n # print(triangle, area)\n mass_points.append(build_triangle_point_mass(cor + [points[-1], points[0]]))\n area += shoelace_area(cor + [points[-1], points[0]])\n return Vector2D(*find_com(*zip(*mass_points))), area", "def test_jaccard_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = jaccard_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[1/3, 1/4],\n [1/3, 2/3]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))", "def get_cosine_similarities(keywords: List[str],\n matrices: Matrices,\n word2onehot: Dict[str, int]\n ) -> None:\n for i in combinations(keywords, 2):\n print(i[0], i[1], cosine_sim(\n matrices.embedding[\n word2onehot[i[0]]], matrices.embedding[word2onehot[i[1]]\n ]\n ))", "def calculate_similarities(self) -> List[float]:\n sims = list()\n for i, r in self.sim_data.iterrows():\n if isinstance(self.model, FastTextWrapper):\n vecs = self.model.inference([r[\"Word 1\"], r[\"Word 2\"]])\n else:\n vecs = self.model.inference_batches([[r[\"Word 1\"]], [r[\"Word 2\"]]])\n vecs = [x[0] for x in vecs]\n if len(vecs) == 2:\n s = cosine_similarity([vecs[0]], [vecs[1]])[0][0]\n sims.append(s)\n else:\n sims.append(np.nan)\n self.sim_data[\"assigned_sim\"] = sims\n self.sim_data = self.sim_data.dropna()\n self.mean_error()\n self.correlation()\n return sims", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def test_cosine_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity", "def main():\n\n measures = Similarity()\n\n input1=sys.argv[1]\n vect1=np.loadtxt(fname = input1)\n \n input2=sys.argv[2]\n vect2=np.loadtxt(fname = input2)\n\n print measures.cosine_similarity(normBySum(vect1), normBySum(vect2))\n \n\n\n #print measures.cosine_similarity2(vect1, vect2)\n\n #print measures.jaccard_similarity([0,1,2,5,6],[0,2,3,5,7,9])", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def on_sphere():\n vec = np.random.standard_normal(3)\n return vec / np.linalg.norm(vec)", "def test_points_on_1sphere_8x():\n points = generate.points_on_1sphere(8, 'x')\n assert np.allclose(points[0], cst.quat1)\n assert np.allclose(points[2], cst.quatx90)\n assert np.allclose(points[4], cst.quatx)", "def _cosine_similarity_update(preds: Tensor, target: Tensor) ->Tuple[Tensor, Tensor]:\n _check_same_shape(preds, target)\n preds = preds.float()\n target = target.float()\n return preds, target", "def _compute_pairwise_distance(\n x: np.ndarray, y: np.ndarray, symmetric: bool, distance_callable: DistanceCallable\n) -> np.ndarray:\n _x = _make_3d_series(x)\n _y = _make_3d_series(y)\n x_size = _x.shape[0]\n y_size = _y.shape[0]\n\n pairwise_matrix = np.zeros((x_size, y_size))\n\n for i in range(x_size):\n curr_x = _x[i]\n for j in range(y_size):\n if symmetric and j < i:\n pairwise_matrix[i, j] = pairwise_matrix[j, i]\n else:\n pairwise_matrix[i, j] = distance_callable(curr_x, _y[j])\n return pairwise_matrix", "def point_distances(src_points, gt_points):\n distances = EuclideanDistances(np.matrix(src_points), np.matrix(gt_points))\n return np.array(distances)", "def apply(self, points):\n pshape = numpy.shape(points)\n homogeneous = 1\n if len(pshape) == 1:\n if pshape[0] == 3:\n points = numpy.array(numpy.concatenate((points, numpy.ones(1, 'f')), 1))\n homogeneous = 0\n elif len(pshape) == 2:\n if pshape[1] == 3:\n points = numpy.array(numpy.concatenate(\n (numpy.array(points), numpy.ones((pshape[0], 1), 'f')), 1))\n homogeneous = 0\n mtx = self.getMatrix((4, 4), transpose=1)\n newpoints = numpy.dot(points, mtx)\n if homogeneous:\n return newpoints\n else:\n # strip the final one off the coordinates\n if len(pshape) == 1:\n return newpoints[:3]\n else:\n newpoints = [x[:3] for x in newpoints]\n return newpoints", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity", "def test_sym_sqrtm(self): \n # create random symmetric n x n matrix\n n = 5\n A = 5.0 * 2.0*(torch.rand(n,n) - 0.5)\n A = A + A.T\n\n # reference implementation of scipy\n sqA_scipy = sla.sqrtm(A.numpy())\n isqA_scipy = sla.inv(sla.sqrtm(A.numpy()))\n # my own implementation using pure torch functions\n sqA,isqA = (x.numpy() for x in _sym_sqrtm(A))\n \n self.assertTrue(np.isclose(sqA, sqA_scipy).all())\n self.assertTrue(np.isclose(isqA, isqA_scipy).all())", "def test_jaccard_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = jaccard_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[1/3, 1/4],\n [1/3, 2/3]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def rotation_from_sphere_points_torch(x, y):\n if x.dim() == 1:\n x = x.unsqueeze(-2)\n if y.dim() == 1:\n y = y.unsqueeze(-2)\n\n dim = x.shape[1]\n\n # Compute the inner product\n inner_product = torch.mm(x, y.T)\n # Clamp in case any value is not in the interval [-1,1]\n # A small number is added/substracted to the bounds to avoid NaNs during backward computation.\n inner_product = inner_product.clamp(-1. + 1e-15, 1. - 1e-15)\n\n # Compute intermediate vector\n c_vec = x - y * inner_product\n c_vec = c_vec / torch.norm(c_vec)\n\n R = torch.eye(dim, dim, dtype=inner_product.dtype) + \\\n torch.sin(torch.acos(inner_product)) * (torch.mm(y.T, c_vec) - torch.mm(c_vec.T, y)) + \\\n (inner_product - 1.) * (torch.mm(y.T, y) + torch.mm(c_vec.T, c_vec))\n\n return R", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def test_cosine_similarity_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def gram_schmidt(vects):\n\n res = [vects[:,0] / np.linalg.norm(vects[:,0])]\n for i in xrange(1, vects.shape[1]):\n curr = vects[:,i] - reduce(lambda x,y: x+y, map(lambda x: proj(x,vects[:,i]), res))\n res.append(curr / np.linalg.norm(curr))\n return np.stack(res, axis=1)", "def _generate_similarity_mat(labels):\n l_mat = np.repeat(labels, len(labels), axis=1)\n l_mat_t = l_mat.T\n\n sim_mat = np.equal(l_mat, l_mat_t).astype(int)\n return sim_mat", "def nearest_sphere_surface(x_input, y_input, z_input):\n\n vm = math.sqrt(sum([x_input**2, y_input**2, z_input**2]))\n return (x_input/vm, y_input/vm, z_input/vm)", "def trans_hellinger(m):\n m = asmatrix(m)\n row_sums = sum(m, axis=1)\n result = sqrt(m / row_sums)\n return result", "def cartesian2spherical(coords):\n sphere = np.zeros(coords.shape)\n xy_sq = coords[:, 0]**2 + coords[:, 1]**2\n sphere[:, 0] = np.sqrt(xy_sq + coords[:, 2]**2)\n sphere[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n sphere[:, 2] = np.arctan2(np.sqrt(xy_sq), coords[:, 2])\n return sphere", "def test_surface_metric_matrices(self, faces, point):\n space = self.Space(faces=faces)\n result = space.surface_metric_matrices(point=point)\n assert result.shape == (\n space.n_faces,\n 2,\n 2,\n ), result.shape\n\n point = gs.array([point, point])\n result = space.surface_metric_matrices(point=point)\n assert result.shape == (2, space.n_faces, 2, 2)", "def getDistanceMatrix(self):\n v = self.getVectors()\n vLis = v.keys()\n N = len(v.keys())\n D = np.zeros([N, N], dtype=np.float32)\n print(N)\n for i in range(N):\n print(\"%d/%d\" %(i, N))\n D[i, i] = 1\n for j in range(i + 1, N):\n dist = self.cosin_sim_pairs(v[vLis[i]], v[vLis[j]])\n D[i, j] = dist\n D[j, i] = dist\n return D", "def jaccard_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = jaccard_index(references[i, :], queries[j, :])\n return scores", "def model_book_similarities(data):\n\n data = data.T\n U2 = distance.squareform(distance.pdist(data, metric='cosine'))\n sim_matrix = pd.DataFrame(U2)\n\n return sim_matrix", "def test_dice_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def centroid_points(points):\n p = float(len(points))\n x, y, z = zip(*points)\n return sum(x) / p, sum(y) / p, sum(z) / p", "def cosine_sim_numpy(im, s):\n return im.dot(s.T)", "def test_dice_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def get_cosine_sim(self):\r\n return CosineSimilarity().calculate_similarity(self.tweets)", "def plot_points_on_sphere(points_x, points_y, points_z, center_x, center_y, center_z, radius):\n from mayavi import mlab\n mlab.figure(1, bgcolor=(1,1,1), fgcolor=(0,0,0), size=(800,800))\n return mlab.points3d(points_x, points_y, points_z, scale_factor=0.05, color=(0.25, 0.75, 0.77))", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def spherefcn(x: np.ndarray) -> np.ndarray:\n if x.ndim == 1:\n x = x.reshape(-1, len(x))\n f = np.sum(x**2, axis=1)\n return f.reshape(-1, 1)", "def support(self, *mass_functions):\n result = 0\n for mass_function in mass_functions:\n result += self.similarity(mass_function)\n return round(result, 6)", "def points_on_sphere(\n N,\n origin = numpy.zeros(3),\n radius = 1.):\n phi = (1 + numpy.sqrt(5)) / 2 # the golden ratio\n long_incr = 2*numpy.pi / phi # how much to increment the longitude\n\n dz = 2.0 / float(N) # a unit sphere has diameter 2\n bands = numpy.arange(N) # each band will have one point placed on it\n z = bands * dz - 1 + (dz/2) # the height z of each band/point\n r = numpy.sqrt(1 - z*z) # project onto xy-plane\n az = bands * long_incr # azimuthal angle of point modulo 2 pi\n x = r * numpy.cos(az)\n y = r * numpy.sin(az)\n ## get triangles\n points = numpy.array([x, y, z])\n tri = scipy.spatial.ConvexHull(points.T)\n points = origin[None, :] + points.T*radius\n return points, tri.simplices", "def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim", "def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))", "def users_games_similarity(games_features_matrix: csr_matrix, users_features_matrix: csr_matrix) -> np.array:\n logging.getLogger(__name__).debug('Users games similarity calculating...')\n users_games_similarity_matrix = cosine_similarity(users_features_matrix, games_features_matrix)\n logging.getLogger(__name__).debug('users_games_similarity.shape: ' + str(users_games_similarity_matrix.shape))\n return users_games_similarity_matrix", "def compute_template_similarity(waveform_extractor, method='cosine_similarity'):\n import sklearn.metrics.pairwise\n\n templates = waveform_extractor.get_all_templates()\n s = templates.shape\n if method == 'cosine_similarity':\n templates_flat = templates.reshape(s[0], -1)\n similarity = sklearn.metrics.pairwise.cosine_similarity(templates_flat)\n # elif method == '':\n else:\n raise ValueError(f'compute_template_similarity(method{method}) not exists')\n\n return similarity", "def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:\n r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)\n pdistance_matrix = (\n r\n - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)\n + tf.transpose(r)\n )\n return tf.cast(pdistance_matrix, dtype=tf.float32)", "def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))", "def similarities (self, listOfWords):\n \n # building the query dictionary\n queryDict = collections.defaultdict(int)\n for w in listOfWords:\n queryDict [w] += + 1.0\n \n # normalizing the query\n length = float (len (listOfWords))\n for k in queryDict:\n queryDict [k] /= length\n \n # computing the list of similarities\n sims = []\n for doc in self.documents:\n score = 0.0\n docDict = doc [1]\n for k in queryDict:\n if docDict.has_key (k):\n score += (queryDict [k] / self.corpusDict [k]) + (docDict [k] / self.corpusDict [k])\n sims.append ([doc [0], score])\n \n return sims", "def cosineDistanceMatrix():\n\n\tmatrix = movieMatrix()\n\tsimilarity = np.dot(matrix, matrix.T)\n\tsquareMag = np.diag(similarity)\n\tinvSquareMag = 1/squareMag\n\tinvSquareMag[np.isinf(invSquareMag)]=0\n\tinvMag = np.sqrt(invSquareMag)\n\tcosine = similarity * invMag\n\tcosine = cosine.T * invMag\n\treturn cosine", "def find_tools_cos_distance_matrix( self, document_token_matrix_sources, tools_list ):\n mat_size = len( tools_list )\n similarity_matrix_sources = dict()\n for source in document_token_matrix_sources:\n print \"Computing similarity scores for source %s...\" % source\n sim_mat = document_token_matrix_sources[ source ]\n sim_scores = np.zeros( ( mat_size, mat_size ) )\n for index_x, item_x in enumerate( sim_mat ):\n tool_scores = sim_scores[ index_x ]\n for index_y, item_y in enumerate( sim_mat ):\n # compute similarity scores between two vectors\n if source == \"input_output\":\n pair_score = utils._jaccard_score( item_x, item_y )\n else:\n pair_score = utils._cosine_angle_score( item_x, item_y )\n tool_scores[ index_y ] = pair_score\n similarity_matrix_sources[ source ] = sim_scores\n return similarity_matrix_sources", "def test_cosine_similarity_all_zeros():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def fix_sphere_h (center_x, center_y, center_z, radius, centers, radii, len_points, list_of_a):\n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n for i in range(0, len(points)):\n check = 0\n check_b = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n h = 0\n while ((check_b == 0) and (h <= len(list_of_a) -1)):\n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], list_of_a[h].x, list_of_a[h].y, list_of_a[h].z) <= 1.50): \n check_b += 1\n h += 1\n if ((check == 0) and (check_b == 0)):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n return g_x, g_y, g_z", "def all_dists(coords, spherical=True):\n\n # get all perm's of coords\n a = np.tile(coords, (len(coords), 1))\n b = np.repeat(coords, len(coords), axis=0)\n\n def sq_dist(a, b): return np.sum((a - b)**2, axis=1)\n\n dists = np.sqrt(sq_dist(a, b))\n if spherical:\n coord_pairs = np.concatenate((a[:,np.newaxis], b[:,np.newaxis]),\n axis=1)\n dists = spherical_distance_haversine(coord_pairs)\n\n zero_indices = np.array(range(len(coords))) * (len(coords) + 1)\n\n # so that mins are not the zero [i, i] vals\n dists[zero_indices] = np.inf\n full_dist_matrix = dists.reshape(len(coords), len(coords))\n return full_dist_matrix", "def cosine_sim_cf(matrix):\n if type(matrix) is not lil_matrix:\n matrix = lil_matrix(matrix)\n\n n = matrix.shape[0]\n rows, cols, data = [], [], []\n user_items = [sorted([(item, idx) for idx, item in enumerate(matrix.rows[i])]) for i in range(n)]\n\n for i in range(n):\n i_ratings, i_items = matrix.data[i], user_items[i]\n for j in range(i, n):\n j_ratings, j_items = matrix.data[j], user_items[j]\n sum_numerator, sum_denominator_i, sum_denominator_j = 0, 0, 0\n i_item_ctd, j_item_ctd = 0, 0\n while i_item_ctd < len(i_items) and j_item_ctd < len(j_items):\n if i_items[i_item_ctd][0] > j_items[j_item_ctd][0]:\n j_item_ctd += 1\n elif i_items[i_item_ctd][0] < j_items[j_item_ctd][0]:\n i_item_ctd += 1\n else:\n i_idx = i_items[i_item_ctd][1]\n j_idx = j_items[j_item_ctd][1]\n sum_numerator += i_ratings[i_idx] * j_ratings[j_idx]\n sum_denominator_i += i_ratings[i_idx] ** 2\n sum_denominator_j += j_ratings[j_idx] ** 2\n i_item_ctd += 1\n j_item_ctd += 1\n\n if sum_numerator == 0: continue\n s = sum_numerator / (math.sqrt(sum_denominator_i) * math.sqrt(sum_denominator_j))\n rows.append(i), cols.append(j), data.append(s)\n if i != j: rows.append(j), cols.append(i), data.append(s)\n\n return csr_matrix((data, (rows, cols)))", "def compute_all_similarities(self,A,a):\n pass" ]
[ "0.66129136", "0.63335097", "0.62122184", "0.6183862", "0.60915583", "0.60669845", "0.59568816", "0.5932109", "0.5899266", "0.588513", "0.5874931", "0.5850991", "0.5815165", "0.5814901", "0.58031887", "0.5772447", "0.5766222", "0.5751658", "0.57376456", "0.5730853", "0.57160264", "0.57133245", "0.56603676", "0.5656061", "0.56377786", "0.56176496", "0.55887806", "0.5571803", "0.5566374", "0.55611163", "0.5541445", "0.5538036", "0.5490301", "0.5453908", "0.5441878", "0.5437689", "0.54329723", "0.5430935", "0.54163915", "0.5411584", "0.53894264", "0.5384064", "0.5375676", "0.53721035", "0.5366834", "0.5362291", "0.5340874", "0.5335583", "0.53328043", "0.5307446", "0.53070265", "0.53048575", "0.529876", "0.5297316", "0.52959776", "0.52874666", "0.5283819", "0.5283071", "0.52826965", "0.52811825", "0.5274236", "0.52741396", "0.5271247", "0.52705073", "0.5259174", "0.5254571", "0.52367735", "0.52342063", "0.52328354", "0.52317756", "0.5229756", "0.5227549", "0.52257663", "0.52241856", "0.52114445", "0.5210019", "0.5209223", "0.520805", "0.52043676", "0.52021027", "0.5198791", "0.51985574", "0.51881135", "0.51856387", "0.5183343", "0.51827884", "0.51747054", "0.51656866", "0.5159795", "0.5159565", "0.51530087", "0.51528025", "0.5151932", "0.51492244", "0.5145568", "0.5141782", "0.5140327", "0.51400894", "0.5133175", "0.5130359" ]
0.7539285
0
Compute a graph Laplacian for a set of points. The points are assumed to lie on the surface of the same sphere.
def laplacian(points, sigma): S = similarity_matrix(points, sigma) (npnts,npnts) = S.shape D = np.zeros_like(S) for i in range(npnts): #D[i,i] = 1.0 / np.sqrt(S[i,:].sum()) D[i,i] = S[i,:].sum() return (D - S) #(np.eye(npnts,npnts) - np.dot(D,np.dot(S,D)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def laplacian(self, point):\n n_vertices, n_faces = point.shape[-2], self.faces.shape[0]\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n len_edge_12 = gs.linalg.norm((vertex_1 - vertex_2), axis=-1)\n len_edge_02 = gs.linalg.norm((vertex_0 - vertex_2), axis=-1)\n len_edge_01 = gs.linalg.norm((vertex_0 - vertex_1), axis=-1)\n\n half_perimeter = 0.5 * (len_edge_12 + len_edge_02 + len_edge_01)\n area = gs.sqrt(\n (\n half_perimeter\n * (half_perimeter - len_edge_12)\n * (half_perimeter - len_edge_02)\n * (half_perimeter - len_edge_01)\n ).clip(min=1e-6)\n )\n sq_len_edge_12, sq_len_edge_02, sq_len_edge_01 = (\n len_edge_12 * len_edge_12,\n len_edge_02 * len_edge_02,\n len_edge_01 * len_edge_01,\n )\n cot_12 = (sq_len_edge_02 + sq_len_edge_01 - sq_len_edge_12) / area\n cot_02 = (sq_len_edge_12 + sq_len_edge_01 - sq_len_edge_02) / area\n cot_01 = (sq_len_edge_12 + sq_len_edge_02 - sq_len_edge_01) / area\n cot = gs.stack([cot_12, cot_02, cot_01], axis=1)\n cot /= 2.0\n id_vertices_120 = self.faces[:, [1, 2, 0]]\n id_vertices_201 = self.faces[:, [2, 0, 1]]\n id_vertices = gs.reshape(\n gs.stack([id_vertices_120, id_vertices_201], axis=0), (2, n_faces * 3)\n )\n\n def _laplacian(tangent_vec):\n \"\"\"Evaluate the mesh Laplacian operator.\n\n The operator is evaluated at a tangent vector at point to the\n manifold of DiscreteSurfaces. In other words, the operator is\n evaluated at a vector field defined on the surface point.\n\n Parameters\n ----------\n tangent_vec : array-like, shape=[..., n_vertices, 3]\n Tangent vector to the manifold at the base point that is the\n triangulated surface. This tangent vector is a vector field\n on the triangulated surface.\n\n Returns\n -------\n laplacian_at_tangent_vec: array-like, shape=[..., n_vertices, 3]\n Mesh Laplacian operator of the triangulated surface applied\n to one its tangent vector tangent_vec.\n \"\"\"\n to_squeeze = False\n if tangent_vec.ndim == 2:\n tangent_vec = gs.expand_dims(tangent_vec, axis=0)\n to_squeeze = True\n n_tangent_vecs = len(tangent_vec)\n tangent_vec_diff = (\n tangent_vec[:, id_vertices[0]] - tangent_vec[:, id_vertices[1]]\n )\n values = gs.einsum(\n \"bd,nbd->nbd\", gs.stack([gs.flatten(cot)] * 3, axis=1), tangent_vec_diff\n )\n\n laplacian_at_tangent_vec = gs.zeros((n_tangent_vecs, n_vertices, 3))\n\n id_vertices_201_repeated = gs.tile(id_vertices[1, :], (n_tangent_vecs, 1))\n\n for i_dim in range(3):\n laplacian_at_tangent_vec[:, :, i_dim] = gs.scatter_add(\n input=laplacian_at_tangent_vec[:, :, i_dim],\n dim=1,\n index=id_vertices_201_repeated,\n src=values[:, :, i_dim],\n )\n return (\n gs.squeeze(laplacian_at_tangent_vec, axis=0)\n if to_squeeze\n else laplacian_at_tangent_vec\n )\n\n return _laplacian", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def build_laplacian_nearest_neighbor_graph(\n input_vecs: types.Tensor, k: int = 1\n) -> types.Tensor:\n num_actions = tf.shape(input_vecs)[0]\n pdistance_matrix = compute_pairwise_distances(input_vecs)\n sorted_indices = tf.argsort(values=pdistance_matrix)\n selected_indices = tf.reshape(sorted_indices[:, 1 : k + 1], [-1, 1])\n rng = tf.tile(tf.expand_dims(tf.range(num_actions), axis=-1), [1, k])\n rng = tf.reshape(rng, [-1, 1])\n full_indices = tf.concat([rng, selected_indices], axis=1)\n adjacency_matrix = tf.zeros([num_actions, num_actions], dtype=tf.float32)\n adjacency_matrix = tf.tensor_scatter_nd_update(\n tensor=adjacency_matrix,\n indices=full_indices,\n updates=tf.ones([k * num_actions], dtype=tf.float32),\n )\n # Symmetrize it.\n adjacency_matrix = adjacency_matrix + tf.transpose(adjacency_matrix)\n adjacency_matrix = tf.minimum(\n adjacency_matrix, tf.ones_like(adjacency_matrix)\n )\n degree_matrix = tf.linalg.tensor_diag(tf.reduce_sum(adjacency_matrix, axis=1))\n laplacian_matrix = degree_matrix - adjacency_matrix\n return laplacian_matrix", "def generate_graph_laplacian(A):\r\n\r\n #Create symmetric matrix\r\n #A=0.5* (A+ A.T)\r\n \r\n #D is just the identity matrix (because sum(P)=1)\r\n Degree=np.sum(A,1)\r\n D=np.diag(Degree)\r\n \r\n #Laplacian matrix\r\n L=D-A\r\n return L", "def _laplacian(tangent_vec):\n to_squeeze = False\n if tangent_vec.ndim == 2:\n tangent_vec = gs.expand_dims(tangent_vec, axis=0)\n to_squeeze = True\n n_tangent_vecs = len(tangent_vec)\n tangent_vec_diff = (\n tangent_vec[:, id_vertices[0]] - tangent_vec[:, id_vertices[1]]\n )\n values = gs.einsum(\n \"bd,nbd->nbd\", gs.stack([gs.flatten(cot)] * 3, axis=1), tangent_vec_diff\n )\n\n laplacian_at_tangent_vec = gs.zeros((n_tangent_vecs, n_vertices, 3))\n\n id_vertices_201_repeated = gs.tile(id_vertices[1, :], (n_tangent_vecs, 1))\n\n for i_dim in range(3):\n laplacian_at_tangent_vec[:, :, i_dim] = gs.scatter_add(\n input=laplacian_at_tangent_vec[:, :, i_dim],\n dim=1,\n index=id_vertices_201_repeated,\n src=values[:, :, i_dim],\n )\n return (\n gs.squeeze(laplacian_at_tangent_vec, axis=0)\n if to_squeeze\n else laplacian_at_tangent_vec\n )", "def laplacian(G,nodelist=None,weight='weight'):\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\n \"laplacian() requires numpy: http://scipy.org/ \")\n # this isn't the most efficient way to do this...\n if G.is_multigraph():\n A=np.asarray(nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight))\n I=np.identity(A.shape[0])\n D=I*np.sum(A,axis=1)\n L=D-A\n return L\n # Graph or DiGraph, this is faster than above \n if nodelist is None:\n nodelist=G.nodes()\n n=len(nodelist)\n index=dict( (n,i) for i,n in enumerate(nodelist) )\n L = np.zeros((n,n))\n for ui,u in enumerate(nodelist):\n totalwt=0.0\n for v,d in G[u].items():\n try:\n vi=index[v]\n except KeyError:\n continue\n wt=d.get(weight,1)\n L[ui,vi]= -wt\n totalwt+=wt\n L[ui,ui]= totalwt\n return L", "def uniform_laplacian_smoothing(vertices, faces):\n dtype = vertices.dtype\n num_vertices = vertices.shape[1]\n\n laplacian_matrix = uniform_laplacian(num_vertices, faces).to(dtype)\n smoothed_vertices = torch.matmul(laplacian_matrix, vertices) + vertices\n\n return smoothed_vertices", "def l1(self, points):\n new_points = []\n sum = []\n for point in points:\n for i in range(len(point.coordinates)):\n if (i < len(sum)):\n sum[i] += abs(point.coordinates[i])\n else:\n sum.append(abs(point.coordinates[i]))\n for point in points:\n new_coordinates = point.coordinates\n new_coordinates = [(new_coordinates[i]/ sum[i]) for i in range(len(point.coordinates))]\n new_points.append(Point(point.name, new_coordinates, point.label))\n return new_points", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)", "def compute_mesh_laplacian(mesh, weights=None, fem_b=None, lap_type=\"conformal\"):\n print(\" Computing Laplacian\")\n if weights is None:\n (weights, fem_b) = compute_mesh_weights(mesh, weight_type=lap_type)\n\n if lap_type == \"fem\":\n weights.data = weights.data / 2\n\n N = weights.shape[0]\n sB = fem_b.sum(axis=0)\n diaB = sparse.dia_matrix((sB, 0), shape=(N, N))\n B = sparse.lil_matrix(diaB + fem_b)\n s = weights.sum(axis=0)\n dia = sparse.dia_matrix((s, 0), shape=(N, N))\n L = sparse.lil_matrix(dia - weights)\n\n # if symmetrize == 1 & & normalize == 0\n # L = diag(sum(W, 2)) - W;\n # elseif\n # symmetrize == 1 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1 / 2)) * W * diag(\n # sum(W, 2). ^ (-1 / 2));\n # elseif\n # symmetrize == 0 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1)) * W;\n\n li = np.hstack(L.data)\n print(\" -nb Nan in Laplacian : \", len(np.where(np.isnan(li))[0]))\n print(\" -nb Inf in Laplacian : \", len(np.where(np.isinf(li))[0]))\n\n return L, B", "def laplacian(mesh):\n faces = np.array(mesh.triangles)\n N = np.array(mesh.vertices).shape[0]\n A = np.zeros((N, N))\n for i in range(3):\n for j in range(3):\n if i == j:\n continue\n A[(faces[:, i], faces[:, j])] = 1.0\n A = A + A.T\n diag = A.dot(np.ones(N))\n L = np.diag(diag) - A\n return L", "def normalized_laplacian(G,nodelist=None,weight='weight'):\n # FIXME: this isn't the most efficient way to do this...\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\n \"normalized_laplacian() requires numpy: http://scipy.org/ \")\n if G.is_multigraph():\n A=np.asarray(nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight))\n d=np.sum(A,axis=1)\n n=A.shape[0]\n I=np.identity(n)\n L=I*d-A\n osd=np.zeros(n)\n for i in range(n):\n if d[i]>0: osd[i]=np.sqrt(1.0/d[i])\n T=I*osd\n L=np.dot(T,np.dot(L,T))\n return L\n # Graph or DiGraph, this is faster than above \n if nodelist is None:\n nodelist = G.nodes()\n n=len(nodelist)\n L = np.zeros((n,n))\n deg = np.zeros((n,n))\n index=dict( (n,i) for i,n in enumerate(nodelist) )\n for ui,u in enumerate(nodelist):\n totalwt=0.0\n for v,data in G[u].items():\n try:\n vi=index[v]\n except KeyError:\n continue\n wt=data.get(weight,1)\n L[ui,vi]= -wt\n totalwt+=wt\n L[ui,ui]= totalwt\n if totalwt>0.0:\n deg[ui,ui]= np.sqrt(1.0/totalwt)\n L=np.dot(deg,np.dot(L,deg))\n return L", "def findstemsLiDAR(pointsXYZ):\n non_ground_points,ground = floor_remove(pointsXYZ)\n flatpoints = np.hstack([non_ground_points[:,0:2],np.zeros_like(non_ground_points)[:,0:1]])\n\n filtered_points = radius_outlier_removal(flatpoints)\n notgoodpoints = non_ground_points[np.isnan(filtered_points[:,0])]\n goodpoints = non_ground_points[np.bitwise_not(np.isnan(filtered_points[:,0]))]\n\n cluster_list = euclidean_cluster_extract(goodpoints)\n rg_clusters = []\n for i in cluster_list:\n rg_clusters.append(region_growing(i))\n\n models = []\n stem_clouds = []\n for i in rg_clusters:\n for p in i:\n indices, model = segment_normals(p)\n prop = len(p[indices])/len(p)\n if len(indices)>1 and prop>0. and np.arccos(np.dot([0,0,1],model[3:6]))<.6:\n points = p[indices]\n PC,_,_ = Plane.getPrincipalComponents(points)\n if PC[0]/PC[1]>10:\n stem_clouds.append(points)\n models.append(model)\n return stem_clouds,models", "def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()", "def laplacian(src: torch.Tensor, kernel_size: int) -> torch.Tensor:\n return Laplacian(kernel_size)(src)", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def compute_mesh_laplacian(verts, tris, weight_type='cotangent',\n return_vertex_area=True, area_type='mixed',\n add_diagonal=True):\n if area_type not in ['mixed', 'lumped_mass']:\n raise ValueError('unknown area type: %s' % area_type)\n if weight_type not in ['cotangent', 'mean_value', 'uniform']:\n raise ValueError('unknown weight type: %s' % weight_type)\n\n n = len(verts)\n # we consider the triangle P, Q, R\n iP = tris[:, 0]\n iQ = tris[:, 1]\n iR = tris[:, 2]\n # edges forming the triangle\n PQ = verts[iP] - verts[iQ] # P--Q\n QR = verts[iQ] - verts[iR] # Q--R\n RP = verts[iR] - verts[iP] # R--P\n if weight_type == 'cotangent' or (return_vertex_area and area_type == 'mixed'):\n # compute cotangent at all 3 points in triangle PQR\n double_area = V.veclen(np.cross(PQ, RP))\n cotP = -1 * (PQ * RP).sum(axis=1) / double_area # angle at vertex P\n cotQ = -1 * (QR * PQ).sum(axis=1) / double_area # angle at vertex Q\n cotR = -1 * (RP * QR).sum(axis=1) / double_area # angle at vertex R\n\n # compute weights and indices\n if weight_type == 'cotangent':\n I = np.concatenate(( iP, iR, iP, iQ, iQ, iR))\n J = np.concatenate(( iR, iP, iQ, iP, iR, iQ))\n W = 0.5 * np.concatenate((cotQ, cotQ, cotR, cotR, cotP, cotP))\n\n elif weight_type == 'mean_value':\n # TODO: I didn't check this code yet\n PQlen = 1 / V.veclen(PQ)\n QRlen = 1 / V.veclen(QR)\n RPlen = 1 / V.veclen(RP)\n PQn = PQ * PQlen[:,np.newaxis] # normalized\n QRn = QR * QRlen[:,np.newaxis]\n RPn = RP * RPlen[:,np.newaxis]\n # TODO pretty sure there is a simpler solution to those 3 formulas\n tP = np.tan(0.5 * np.arccos((PQn * -RPn).sum(axis=1)))\n tQ = np.tan(0.5 * np.arccos((-PQn * QRn).sum(axis=1)))\n tR = np.tan(0.5 * np.arccos((RPn * -QRn).sum(axis=1)))\n I = np.concatenate(( iP, iP, iQ, iQ, iR, iR))\n J = np.concatenate(( iQ, iR, iP, iR, iP, iQ))\n W = np.concatenate((tP*PQlen, tP*RPlen, tQ*PQlen, tQ*QRlen, tR*RPlen, tR*QRlen))\n\n elif weight_type == 'uniform':\n # this might add an edge twice to the matrix\n # but prevents the problem of boundary edges going only in one direction\n # we fix this problem after the matrix L is constructed\n I = np.concatenate((iP, iQ, iQ, iR, iR, iP))\n J = np.concatenate((iQ, iP, iR, iQ, iP, iR))\n W = np.ones(len(tris) * 6)\n\n # construct sparse matrix\n # notice that this will also sum duplicate entries of (i,j), \n # which is explicitely assumed by the code above\n L = sparse.csr_matrix((W, (I, J)), shape=(n, n))\n if weight_type == 'uniform':\n # because we probably add weights in both directions of an edge earlier, \n # and the csr_matrix constructor sums them, some values in L might be 2 instead of 1\n # so reset them\n L.data[:] = 1\n # add diagonal entries as the sum across rows\n if add_diagonal:\n L = L - sparse.spdiags(L * np.ones(n), 0, n, n)\n\n if return_vertex_area:\n if area_type == 'mixed':\n # compute voronoi cell areas\n aP = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotQ * (RP**2).sum(axis=1)) # area at point P\n aQ = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point Q\n aR = 1/8. * (cotQ * (RP**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point R\n # replace by barycentric areas for obtuse triangles\n # TODO area computed previously in cotangent formula, reuse it here?\n triangle_area = .5 * V.veclen(np.cross(PQ, RP))\n for i, c in enumerate([cotP, cotQ, cotR]):\n is_x_obtuse = c < 0 # obtuse at point?\n # TODO: the paper by Desbrun says that we should divide by 1/2 or 1/4,\n # but according to other code I found we should divide by 1 or 1/2\n # check which scheme is correct!\n aP[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 0 else 1/2.)\n aQ[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 1 else 1/2.)\n aR[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 2 else 1/2.)\n area = np.bincount(iP, aP, minlength=n) + \\\n np.bincount(iQ, aQ, minlength=n) + np.bincount(iR, aR, minlength=n)\n\n elif area_type == 'lumped_mass':\n lump_area = V.veclen(np.cross(PQ, RP)) / 6.\n area = sum(np.bincount(tris[:,i], lump_area, minlength=n) for i in range(3))\n\n return L, area\n else:\n return L", "def run_lpme(self) -> np.array:\n q = self.sphere.n\n signs = []\n for i in range(q):\n a = np.ones(q)\n a = a / np.sqrt(q)\n a_prime = np.copy(a)\n a_prime[i] = -a_prime[i]\n\n z_a = a * self.sphere.radius + self.sphere.origin\n z_a_prime = a_prime * self.sphere.radius + self.sphere.origin\n\n if self.oracle.compare(z_a, z_a_prime):\n signs.append(1.0)\n else:\n signs.append(-1.0)\n\n orthants = initialize_orthants(signs)\n\n # number of cycles\n nc = 4\n theta_list = [(orth.start + orth.stop) / 2 for orth in orthants]\n for _ in range(0, nc):\n for j in range(0, q - 1):\n theta_a = orthants[j].start\n theta_b = orthants[j].stop\n while abs(theta_b - theta_a) > self.e:\n theta_c = (theta_a * 3 + theta_b) / 4\n theta_d = (theta_a + theta_b) / 2\n theta_e = (theta_a + theta_b * 3) / 4\n\n theta_list[j] = theta_a\n vec_a = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_b\n vec_b = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_c\n vec_c = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_d\n vec_d = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_e\n vec_e = compute_vector(self.sphere, theta_list)\n\n # compare ac\n cac = self.oracle.compare(vec_a, vec_c)\n ccd = self.oracle.compare(vec_c, vec_d)\n cde = self.oracle.compare(vec_d, vec_e)\n ceb = self.oracle.compare(vec_e, vec_b)\n self.num_queries += 4\n\n if self.check_i:\n context = {\n \"theta_list\": theta_list,\n \"j\": j,\n \"theta_a\": theta_a,\n \"theta_b\": theta_b,\n \"theta_c\": theta_c,\n \"theta_d\": theta_d,\n \"theta_e\": theta_e,\n }\n self.check_inconsistency(cac, ccd, cde, ceb, context)\n\n if cac:\n theta_b = theta_d\n elif ccd:\n theta_b = theta_d\n elif cde:\n theta_a = theta_c\n theta_b = theta_e\n elif ceb:\n theta_a = theta_d\n else:\n theta_a = theta_d\n\n # update theta list\n theta_list[j] = (theta_a + theta_b) / 2\n\n # save theta list\n self.theta_list = theta_list\n return normalize(compute_vector(self.sphere, theta_list) - self.sphere.origin)", "def catmullrom(P0, P1, P2, P3, a, nPoints=100):\n # Convert the points to numpy so that we can do array multiplication\n P0, P1, P2, P3 = map(np.array, [P0, P1, P2, P3])\n\n # Calculate t0 to t4\n alpha = a\n\n def tj(ti, Pi, Pj):\n xi, yi, zi = Pi\n xj, yj, zj = Pj\n\n # ( ( (xj-xi)**2 + (yj-yi)**2 )**0.5 )**alpha + ti\n a = (xj - xi) ** 2 + (yj - yi) ** 2 + (zj - zi) ** 2\n b = a ** 0.5\n c = b ** alpha\n return c + ti\n\n t0 = 0\n t1 = tj(t0, P0, P1)\n t2 = tj(t1, P1, P2)\n t3 = tj(t2, P2, P3)\n\n # Only calculate points between P1 and P2\n t = np.linspace(t1, t2, nPoints)\n\n # Reshape so that we can multiply by the points P0 to P3\n # and get a point for each value of t.\n t = t.reshape(len(t), 1)\n\n A1 = (t1 - t) / (t1 - t0) * P0 + (t - t0) / (t1 - t0) * P1\n A2 = (t2 - t) / (t2 - t1) * P1 + (t - t1) / (t2 - t1) * P2\n A3 = (t3 - t) / (t3 - t2) * P2 + (t - t2) / (t3 - t2) * P3\n\n B1 = (t2 - t) / (t2 - t0) * A1 + (t - t0) / (t2 - t0) * A2\n B2 = (t3 - t) / (t3 - t1) * A2 + (t - t1) / (t3 - t1) * A3\n\n C = (t2 - t) / (t2 - t1) * B1 + (t - t1) / (t2 - t1) * B2\n return C", "def laplacian(f,dx,dy,dz,x=[],y=[],z=[],param=[],dim=[]):\n if not param:\n param = read_param(quiet=True)\n if not dim:\n dim = read_dim()\n if len(x) < 1:\n gd = read_grid(quiet=True)\n x = gd.x\n y = gd.y\n z = gd.z\n\n laplacian = N.empty(f.shape)\n laplacian = xder2(f,dx,x=x,y=y,z=z,param=param,dim=dim) +\\\n yder2(f,dy,x=x,y=y,z=z,param=param,dim=dim) +\\\n zder2(f,dz,x=x,y=y,z=z,param=param,dim=dim)\n\n if param.coord_system == 'cylindric':\n laplacian += xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)/x\n if param.coord_system == 'spherical':\n sin_y = N.sin(y)\n cos_y = N.cos(y)\n i_sin = N.where(N.abs(sin_y) < 1e-5)[0]\n if i_sin.size > 0:\n cos_y[i_sin] = 0.; sin_y[i_sin] = 1\n x_2, cotth = N.meshgrid(1./x**2, cos_y/sin_y)\n laplacian += 2*xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)/x +\\\n yder(f,dy,x=x,y=y,z=z,param=param,dim=dim)*x_2*cotth\n\n return laplacian", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def lloyds_algorithm(X, k, T):\n n, d = X.shape\n\n # Initialize clusters random.\n clustering = np.random.randint(0, k, (n,))\n centroids = np.zeros((k, d))\n\n # Used to stop if cost isn't improving (decreasing)\n cost = 0\n oldcost = 0\n\n # Column names\n # print(\"Iterations\\tCost\")\n for i in range(T):\n\n # Update centroid\n centroids = np.zeros((k, d))\n # YOUR CODE HERE\n numberOfPointsInClusters = np.zeros((k,))\n for idx, point in enumerate(clustering):\n numberOfPointsInClusters[point] += 1\n centroids[point] += X[idx]\n for n in range(k):\n if numberOfPointsInClusters[n] == 0:\n numberOfPointsInClusters[n] = float('-inf')\n centroids = [centroid / numberOfPointsInClusters[idx] for idx, centroid in enumerate(centroids)]\n # END CODE\n\n # Update clustering\n\n # YOUR CODE HERE\n for idx, point in enumerate(X):\n clustering[idx] = np.argmin([np.linalg.norm(point - cluster) for cluster in centroids])\n # END CODE\n\n # Compute and print cost\n cost = 0\n for j in range(n):\n cost += np.linalg.norm(X[j] - centroids[clustering[j]]) ** 2\n # print(i + 1, \"\\t\\t\", cost)\n\n # Stop if cost didn't improve more than epislon (decrease)\n if np.isclose(cost, oldcost): break # TODO\n oldcost = cost\n\n return clustering, centroids, cost", "def laplacian(self, p):\n wf = self._wf(p)\n return - G * self.d * wf.sum()", "def light_source_directions():\n L = np.array([[-0.06059872, -0.44839055, 0.8917812],\n [-0.05939919, -0.33739538, 0.93948714],\n [-0.05710194, -0.21230722, 0.97553319],\n [-0.05360061, -0.07800089, 0.99551134],\n [-0.04919816, 0.05869781, 0.99706274],\n [-0.04399823, 0.19019233, 0.98076044],\n [-0.03839991, 0.31049925, 0.9497977],\n [-0.03280081, 0.41611025, 0.90872238],\n [-0.18449839, -0.43989616, 0.87889232],\n [-0.18870114, -0.32950199, 0.92510557],\n [-0.1901994, -0.20549935, 0.95999698],\n [-0.18849605, -0.07269848, 0.97937948],\n [-0.18329657, 0.06229884, 0.98108166],\n [-0.17500445, 0.19220488, 0.96562453],\n [-0.16449474, 0.31129005, 0.93597008],\n [-0.15270716, 0.4160195, 0.89644202],\n [-0.30139786, -0.42509698, 0.85349393],\n [-0.31020115, -0.31660118, 0.89640333],\n [-0.31489186, -0.19549495, 0.92877599],\n [-0.31450962, -0.06640203, 0.94692897],\n [-0.30880699, 0.06470146, 0.94892147],\n [-0.2981084, 0.19100538, 0.93522635],\n [-0.28359251, 0.30729189, 0.90837601],\n [-0.26670649, 0.41020998, 0.87212122],\n [-0.40709586, -0.40559588, 0.81839168],\n [-0.41919869, -0.29999906, 0.85689732],\n [-0.42618633, -0.18329412, 0.88587159],\n [-0.42691512, -0.05950211, 0.90233197],\n [-0.42090385, 0.0659006, 0.90470827],\n [-0.40860354, 0.18720162, 0.89330773],\n [-0.39141794, 0.29941372, 0.87013988],\n [-0.3707838, 0.39958255, 0.83836338],\n [-0.499596, -0.38319693, 0.77689378],\n [-0.51360334, -0.28130183, 0.81060526],\n [-0.52190667, -0.16990217, 0.83591069],\n [-0.52326874, -0.05249686, 0.85054918],\n [-0.51720021, 0.06620003, 0.85330035],\n [-0.50428312, 0.18139393, 0.84427174],\n [-0.48561334, 0.28870793, 0.82512267],\n [-0.46289771, 0.38549809, 0.79819605],\n [-0.57853599, -0.35932235, 0.73224555],\n [-0.59329349, -0.26189713, 0.76119165],\n [-0.60202327, -0.15630604, 0.78303027],\n [-0.6037003, -0.04570002, 0.7959004],\n [-0.59781529, 0.06590169, 0.79892043],\n [-0.58486953, 0.17439091, 0.79215873],\n [-0.56588359, 0.27639198, 0.77677747],\n [-0.54241965, 0.36921337, 0.75462733],\n [0.05220076, -0.43870637, 0.89711304],\n [0.05199786, -0.33138635, 0.9420612],\n [0.05109826, -0.20999284, 0.97636672],\n [0.04919919, -0.07869871, 0.99568366],\n [0.04640163, 0.05630197, 0.99733494],\n [0.04279892, 0.18779527, 0.98127529],\n [0.03870043, 0.30950341, 0.95011048],\n [0.03440055, 0.41730662, 0.90811441],\n [0.17290651, -0.43181626, 0.88523333],\n [0.17839998, -0.32509996, 0.92869988],\n [0.18160174, -0.20480196, 0.96180921],\n [0.18200745, -0.07490306, 0.98044012],\n [0.17919505, 0.05849838, 0.98207285],\n [0.17329685, 0.18839658, 0.96668244],\n [0.1649036, 0.30880674, 0.93672045],\n [0.1549931, 0.41578148, 0.89616009],\n [0.28720483, -0.41910705, 0.8613145],\n [0.29740177, -0.31410186, 0.90160535],\n [0.30420604, -0.1965039, 0.9321185],\n [0.30640529, -0.07010121, 0.94931639],\n [0.30361153, 0.05950226, 0.95093613],\n [0.29588748, 0.18589214, 0.93696036],\n [0.28409783, 0.30349768, 0.90949304],\n [0.26939905, 0.40849857, 0.87209694],\n [0.39120402, -0.40190413, 0.8279085],\n [0.40481085, -0.29960803, 0.86392315],\n [0.41411685, -0.18590756, 0.89103626],\n [0.41769724, -0.06449957, 0.906294],\n [0.41498764, 0.05959822, 0.90787296],\n [0.40607977, 0.18089099, 0.89575537],\n [0.39179226, 0.29439419, 0.87168279],\n [0.37379609, 0.39649585, 0.83849122],\n [0.48278794, -0.38169046, 0.78818031],\n [0.49848546, -0.28279175, 0.8194761],\n [0.50918069, -0.1740934, 0.84286803],\n [0.51360856, -0.05870098, 0.85601427],\n [0.51097962, 0.05899765, 0.8575658],\n [0.50151639, 0.17420569, 0.84742769],\n [0.48600297, 0.28260173, 0.82700506],\n [0.46600106, 0.38110087, 0.79850181],\n [0.56150442, -0.35990283, 0.74510586],\n [0.57807114, -0.26498677, 0.77176147],\n [0.58933134, -0.1617086, 0.7915421],\n [0.59407609, -0.05289787, 0.80266769],\n [0.59157958, 0.057798, 0.80417224],\n [0.58198189, 0.16649482, 0.79597523],\n [0.56620006, 0.26940003, 0.77900008],\n [0.54551481, 0.36380988, 0.7550205]], dtype=float)\n return L", "def lap(self):\n\n gr = self.grid\n phi = gr.phi\n\n lapphi = gr.scratch_array()\n\n ib = gr.ilo\n ie = gr.ihi\n\n lapphi[ib:ie+1] = \\\n (phi[ib-1:ie] - 2.0*phi[ib:ie+1] + phi[ib+1:ie+2])/gr.dx**2\n\n return lapphi", "def laplace(arr, out=None):\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n out[i, j] = (\n 2 * (arr[i + 1, j] - arr_c) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n if dim_r == 1:\n continue # deal with singular radial dimension\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], arr[i + 1, j]\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], value_outer(arr, (i, j))\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n return out", "def laplacian(images, n_down=4):\n lapls = []\n\n for i in range(n_down):\n n = F.interpolate(images, scale_factor=0.5, mode='bilinear',\n align_corners=True)\n lapls.append(images -\n F.interpolate(n, size=images.shape[-2:], mode='bilinear',\n align_corners=True))\n images = n\n\n lapls.append(images)\n return lapls", "def find_out_difference_perpendiculars(lap, ref_lap):\n\n distances = []\n\n for i in lap.index:\n point = lap.loc[i]\n\n closest_index = find_closest_point(point, ref_lap)\n closest_point = ref_lap.loc[closest_index]\n\n neighbor_i = len(ref_lap) - 1 if closest_index == 0 else closest_index - 1\n neighbor1 = ref_lap.loc[neighbor_i]\n neighbor_i = 0 if len(ref_lap) == closest_index + 1 else closest_index + 1\n neighbor2 = ref_lap.loc[neighbor_i]\n\n v1 = create_vector(closest_point, point)\n v2 = create_vector(closest_point, neighbor1)\n v3 = create_vector(closest_point, neighbor2)\n\n angle1 = find_angle_between_vectors(v1, v2)\n angle2 = find_angle_between_vectors(v1, v3)\n\n degrees90 = math.pi / 2\n min_dist = -1\n if angle1 > degrees90 and angle2 > degrees90:\n min_dist = line_length(point.LAT, point.LON, closest_point.LAT, closest_point.LON)\n elif angle1 < degrees90 and angle2 < degrees90:\n dist1 = find_shortest_distance(point, closest_point, neighbor1)\n dist2 = find_shortest_distance(point, closest_point, neighbor2)\n min_dist = dist1 if dist1 <= dist2 else dist2\n elif angle1 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor1)\n elif angle2 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor2)\n\n if min_dist == -1:\n print('ERROR: Could not find distance')\n print(\"Indices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif math.isnan(min_dist):\n print(\"NAN value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif min_dist < 0:\n print(\"Negative value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n else:\n min_dist = degrees2kilometers(min_dist) * 100000 # in centimeters\n distances.append(min_dist)\n\n return distances", "def ricomLocal2LL(points,lat0=0,long0=0,latoff=0,longoff=0): \n import math\n bigr = 6378136.0\n dlong = longoff-long0\n dlat = latoff-lat0\n clat0 = math.cos(lat0*math.pi/180.0)\n numPts = len(points)\n n = 0\n pointsLL=[]\n while n<numPts:\n x = points[n][0]/(bigr*(math.pi/180.0)*clat0) - dlong\n y = points[n][1]/(bigr*(math.pi/180.0)) - dlat\n pointsLL.append([x,y])\n n+=1\n \n return pointsLL", "def fix_sphere_h (center_x, center_y, center_z, radius, centers, radii, len_points, list_of_a):\n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n for i in range(0, len(points)):\n check = 0\n check_b = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n h = 0\n while ((check_b == 0) and (h <= len(list_of_a) -1)):\n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], list_of_a[h].x, list_of_a[h].y, list_of_a[h].z) <= 1.50): \n check_b += 1\n h += 1\n if ((check == 0) and (check_b == 0)):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n return g_x, g_y, g_z", "def LaplacianMatrix(adjmatrix):\n if adjmatrix.dtype in [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]:\n adjmatrix = adjmatrix.astype(int)\n N = len(adjmatrix)\n\n laplacianmatrix = np.identity(N, dtype=adjmatrix.dtype) * adjmatrix.sum(axis=1)\n laplacianmatrix -= adjmatrix\n\n return laplacianmatrix", "def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)", "def lats(self):\n lats = []\n quads = self.getQuadrilaterals()\n groups = self._getGroupIndex()\n u_groups = np.unique(groups)\n ng = len(u_groups)\n for i in range(ng):\n q_ind = np.where(groups == u_groups[i])[0]\n nq = len(q_ind)\n top_lats = []\n bot_lats = []\n for j in range(nq):\n if j == 0:\n top0 = [quads[q_ind[j]][0].latitude]\n bot0 = [quads[q_ind[j]][3].latitude]\n top_lats = top_lats + top0\n bot_lats = bot_lats + bot0\n top_lats = top_lats + [quads[q_ind[j]][1].latitude]\n bot_lats = bot_lats + [quads[q_ind[j]][2].latitude]\n lats = lats + top_lats + bot_lats[::-1] + top0 + [np.nan]\n\n return np.array(lats)", "def lap2D(self, lat):\n lap = np.roll(lat, 1, 0) + np.roll(lat, -1, 0) + \\\n np.roll(lat, 1, 1) + np.roll(lat, -1, 1) - \\\n 4. * lat\n lap = 1./self.dx**2. * lap\n # print(lap[50][50])\n return(lap)", "def uniform_laplacian(image, radius=1):\n height, width = image.shape[:2]\n window_size = 2 * radius + 1\n\n W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))\n\n return weights_to_laplacian(W)", "def basicGetPointsGeodesic(self):\n\n # more geodesic, distance=2 (complicated because random)\n data = numpy.array([[0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0]])\n labels = Labels(data=data)\n result = labels.getPoints(ids=[1], mode='geodesic', distance=2, \n connectivity=1)\n result = result.tolist()\n if len(result) == 5:\n desired = [[0, 1], [0, 3], [1, 2], [2, 1], [2, 3]]\n elif len(result) == 4:\n desired = [[0, 2], [1, 1], [1, 3], [2, 2]]\n elif len(result) == 3:\n if [1, 2] in result:\n if [0, 1] in result:\n desired = [[0, 1], [1, 2], [2, 3]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 2], [2, 1]]\n elif [0, 1] in result:\n if [0, 3] in result:\n desired = [[0, 1], [0, 3], [2, 2]]\n elif [2, 1] in result:\n desired = [[0, 1], [2, 1], [1, 3]]\n else:\n desired = [[0, 1], [1, 3], [2, 2]]\n elif [2, 3] in result:\n if [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 3]]\n elif [2, 1] in result:\n desired = [[0, 2], [2, 1], [2, 3]]\n else:\n desired = [[2, 3], [1, 1], [0, 2]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 2]]\n elif [2, 1] in result:\n desired = [[2, 1], [1, 3], [0, 2]]\n for des in desired:\n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)\n\n # mode geodesic, distance=3, inset\n labels = Labels(data=data[1:3, 2:8])\n labels.setInset([slice(1, 3), slice(2, 8)])\n result = labels.getPoints(ids=[2], mode='geodesic', distance=3, \n connectivity=1)\n result = result.tolist()\n if len(result) == 1:\n np_test.assert_equal(result[0][1], 5)\n elif len(result) == 2:\n desired = []\n if [1, 4] in result:\n desired = [[1, 4], [2, 6]]\n elif [2, 4] in result:\n desired = [[2, 4], [1, 6]]\n for des in desired: \n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)", "def point(L, lam):\n lam = arg.getvector(lam, out='row')\n return L.pp.reshape((3,1)) + L.uw.reshape((3,1)) * lam", "def laplacian( graph : SpatialGraph, \n sparse : bool = False\n ) -> Union[np.ndarray, sp.spmatrix] :\n adj = adjacency(graph, sparse=sparse)\n dgr = sp.diags(np.array(adj.sum(1))) if sparse else np.diag(np.array(adj.sum(1)))\n return adj - dgr", "def insphere(network,\n geometry,\n **kwargs):\n import warnings\n try:\n import pulp as pu\n Np = geometry.num_pores()\n value = _sp.zeros(Np)\n pore_map = geometry.map_pores(geometry.pores(),geometry._net)\n for geom_pore,net_pore in pore_map:\n net_throats = geometry._net.find_neighbor_throats(net_pore)\n geom_throats = geometry._net.map_throats(net_throats,geometry)[:,1]\n verts = geometry['throat.offset_vertices'][geom_throats]\n if len(verts) > 1:\n try:\n pts = np.vstack((i for i in verts if len(i)>0))\n except ValueError:\n pts = []\n if len(pts) > 4:\n \"Work out central point to use as initial guess\"\n c0 = np.mean(pts,axis=0)\n \"Compute convex hull to find points lying on the hull in order\"\n hull = ConvexHull(pts, qhull_options='QJ Pp')\n \"For each simplex making up the hull collect the end points\"\n A = pts[hull.simplices[:,0]]\n B = pts[hull.simplices[:,1]]\n C = pts[hull.simplices[:,2]]\n #I = np.array([[0,1],[-1,0]])\n \"Normal of the simplices\"\n #N = np.dot((B-A),I)\n N = np.cross((B-A),(C-A),axis=1)\n #L = np.sqrt(np.sum(np.square(N),axis=1))\n \"Normalize the normal vector\"\n L = np.linalg.norm(N,axis=1)\n F = np.vstack((L,L,L)).T\n N /= F\n \"If normals point out of hull change sign to point in\"\n pointing_out = (np.sum((A-c0)*N,axis=1)>0)\n N[pointing_out]*= -1\n \"Define Linear Program Variables\"\n \"The centre of the incircle adjustment\"\n cx = pu.LpVariable(\"cx\",None,None,pu.LpContinuous)\n cy = pu.LpVariable(\"cy\",None,None,pu.LpContinuous)\n cz = pu.LpVariable(\"cz\",None,None,pu.LpContinuous)\n \"Radius of the incircle\"\n R = pu.LpVariable(\"R\",0,None,pu.LpContinuous)\n \"Slack variables for shortest distance between centre and simplices\" \n S = pu.LpVariable.dict(\"SlackVariable\",range(len(A)),0,None,pu.LpContinuous)\n \"Set up LP problem\"\n prob = pu.LpProblem(\"FindInRadius\",pu.LpMaximize)\n \"Objective Function\"\n prob += R\n for i in range(len(A)):\n \" Ni.(C-Ai)-Si = 0\"\n prob += N[i][0]*(c0[0]+cx) + N[i][1]*(c0[1]+cy) + N[i][2]*(c0[2]+cz)- N[i][0]*A[i][0] - N[i][1]*A[i][1] - N[i][2]*A[i][2]- S[i] == 0\n \"Si >= R\"\n prob += S[i] >= R\n \"Solve the LP\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n prob.solve()\n \"As the radius is the objective function we can get it from the objective or as R.value()\"\n rad = prob.objective.value()\n #cen = c0 + np.array([cx.value(),cy.value(),cz.value()])\n value[geom_pore]=rad*2\n \n \n return value\n except ImportError:\n print(\"Cannot use insphere method without installing pulp package\")", "def _3D_sphere_edges(G, radius):\n # TODO This can be parallelized.\n edges = []\n for (u, pu), (v, pv) in combinations(G.nodes(data=\"pos\"), 2):\n for a, b in zip(pu, pv)):\n if (haversine(a,b)) <= radius:\n edges.append((u, v))\n print(u,v)\n return edges", "def lons(self):\n lons = []\n quads = self.getQuadrilaterals()\n groups = self._getGroupIndex()\n u_groups = np.unique(groups)\n ng = len(u_groups)\n for i in range(ng):\n q_ind = np.where(groups == u_groups[i])[0]\n nq = len(q_ind)\n top_lons = []\n bot_lons = []\n for j in range(nq):\n if j == 0:\n top0 = [quads[q_ind[j]][0].longitude]\n bot0 = [quads[q_ind[j]][3].longitude]\n top_lons = top_lons + top0\n bot_lons = bot_lons + bot0\n top_lons = top_lons + [quads[q_ind[j]][1].longitude]\n bot_lons = bot_lons + [quads[q_ind[j]][2].longitude]\n lons = lons + top_lons + bot_lons[::-1] + top0 + [np.nan]\n return np.array(lons)", "def _holt__(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1]))\n return sqeuclidean(l, y)", "def laplace2d(get_A, get_rho, N=Mynum, Te=2):\n # Reduce the row and column of Laplacian matrix by 2 \n # Reduced row and column will be replace with embed in future\n # n = N - 2 for embed\n n = N\n # Solving for the PDE(1)\n h = 1.0/(n-1)\n A = get_A(n) * (1/(h**2))\n b = get_rho(n, Te)\n U = sp.linalg.solve(A, b)\n\n # Reshape the u vector into nxn matrix for heat map plotting\n T = U.reshape((n, n))\n print T\n \n # Embed the surrounding of U matrix into zeros\n Tfull = embed(T, Te)\n\n # Verify that dot function of A matrix and U vector\n # return the same rho value at midpoint\n CheckU = np.dot(A,U)\n\n # Filter very small value into zeros\n for i in range(0,len(CheckU)):\n if (abs(CheckU[i]) < 1e-12):\n CheckU[i] = 0\n\n # Validate that product of A and U matrix is the same as rho vector\n # Will give warning if it is not the same\n # assert np.all(CheckU == b) # working only mynum = 7 and 9 \n\n # Print value of the products at midpoint.\n mid = (n**2-1)/2\n print \"Q1: Value of the dot product A.u1 is %5.3f at (0.5,0.5).\" % (CheckU[mid])\n return Tfull", "def graham_scan(points: np.ndarray) -> np.ndarray:\n primary, remaining_points = extract_primary(points)\n sorted_points = sort_for_graham_scan(remaining_points, primary)\n hull = find_hull_vertices(sorted_points)\n return hull", "def to_laplace(graph, form=\"DAD\", regularizer=None):\n valid_inputs = [\"I-DAD\", \"DAD\", \"R-DAD\"]\n if form not in valid_inputs:\n raise TypeError(\"Unsuported Laplacian normalization\")\n\n A = graph\n\n in_degree = np.sum(A, axis=0)\n out_degree = np.sum(A, axis=1)\n\n # regularize laplacian with parameter\n # set to average degree\n if form == \"R-DAD\":\n if regularizer is None:\n regularizer = 1\n elif not isinstance(regularizer, (int, float)):\n raise TypeError(\n \"Regularizer must be a int or float, not {}\".format(type(regularizer))\n )\n elif regularizer < 0:\n raise ValueError(\"Regularizer must be greater than or equal to 0\")\n regularizer = regularizer * np.mean(out_degree)\n\n in_degree += regularizer\n out_degree += regularizer\n\n with np.errstate(divide=\"ignore\"):\n in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5\n out_root = 1 / np.sqrt(out_degree)\n\n in_root[np.isinf(in_root)] = 0\n out_root[np.isinf(out_root)] = 0\n\n in_root = np.diag(in_root) # just change to sparse diag for sparse support\n out_root = np.diag(out_root)\n\n if form == \"I-DAD\":\n L = np.diag(in_degree) - A\n L = in_root @ L @ in_root\n elif form == \"DAD\" or form == \"R-DAD\":\n L = out_root @ A @ in_root\n # return symmetrize(L, method=\"avg\") # sometimes machine prec. makes this necessary\n return L", "def to_laplace(graph, form=\"DAD\", regularizer=None):\n valid_inputs = [\"I-DAD\", \"DAD\", \"R-DAD\"]\n if form not in valid_inputs:\n raise TypeError(\"Unsuported Laplacian normalization\")\n\n A = graph\n\n in_degree = np.sum(A, axis=0)\n out_degree = np.sum(A, axis=1)\n\n # regularize laplacian with parameter\n # set to average degree\n if form == \"R-DAD\":\n if regularizer is None:\n regularizer = 1\n elif not isinstance(regularizer, (int, float)):\n raise TypeError(\n \"Regularizer must be a int or float, not {}\".format(type(regularizer))\n )\n elif regularizer < 0:\n raise ValueError(\"Regularizer must be greater than or equal to 0\")\n regularizer = regularizer * np.mean(out_degree)\n\n in_degree += regularizer\n out_degree += regularizer\n\n with np.errstate(divide=\"ignore\"):\n in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5\n out_root = 1 / np.sqrt(out_degree)\n\n in_root[np.isinf(in_root)] = 0\n out_root[np.isinf(out_root)] = 0\n\n in_root = np.diag(in_root) # just change to sparse diag for sparse support\n out_root = np.diag(out_root)\n\n if form == \"I-DAD\":\n L = np.diag(in_degree) - A\n L = in_root @ L @ in_root\n elif form == \"DAD\" or form == \"R-DAD\":\n L = out_root @ A @ in_root\n # return symmetrize(L, method=\"avg\") # sometimes machine prec. makes this necessary\n return L", "def laplacian(expr):\n\n delop = Del()\n if expr.is_Vector:\n return (gradient(divergence(expr)) - curl(curl(expr))).doit()\n return delop.dot(delop(expr)).doit()", "def make_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n dr_2, dz_2 = 1 / bcs.grid.discretization ** 2\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))\n def laplace(arr, out=None):\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n out[i, j] = (\n 2 * (arr[i + 1, j] - arr_c) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n if dim_r == 1:\n continue # deal with singular radial dimension\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], arr[i + 1, j]\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], value_outer(arr, (i, j))\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n return out\n\n return laplace # type: ignore", "def linear_LS_triangulation(u1, P1, u2, P2):\n A = np.zeros((4, 3))\n b = np.zeros((4, 1))\n\n # Create array of triangulated points\n x = np.zeros((3, len(u1)))\n\n # Initialize C matrices\n C1 = np.array(linear_LS_triangulation_C)\n C2 = np.array(linear_LS_triangulation_C)\n\n for i in range(len(u1)):\n # Derivation of matrices A and b:\n # for each camera following equations hold in case of perfect point matches:\n # u.x * (P[2,:] * x) = P[0,:] * x\n # u.y * (P[2,:] * x) = P[1,:] * x\n # and imposing the constraint:\n # x = [x.x, x.y, x.z, 1]^T\n # yields:\n # (u.x * P[2, 0:3] - P[0, 0:3]) * [x.x, x.y, x.z]^T + (u.x * P[2, 3] - P[0, 3]) * 1 = 0\n # (u.y * P[2, 0:3] - P[1, 0:3]) * [x.x, x.y, x.z]^T + (u.y * P[2, 3] - P[1, 3]) * 1 = 0\n # and since we have to do this for 2 cameras, and since we imposed the constraint,\n # we have to solve 4 equations in 3 unknowns (in LS sense).\n #\n # Build C matrices, to construct A and b in a concise way\n C1[:, 2] = u1[i, :]\n C2[:, 2] = u2[i, :]\n\n # Build A matrix:\n # [\n # [ u1.x * P1[2,0] - P1[0,0], u1.x * P1[2,1] - P1[0,1], u1.x * P1[2,2] - P1[0,2] ],\n # [ u1.y * P1[2,0] - P1[1,0], u1.y * P1[2,1] - P1[1,1], u1.y * P1[2,2] - P1[1,2] ],\n # [ u2.x * P2[2,0] - P2[0,0], u2.x * P2[2,1] - P2[0,1], u2.x * P2[2,2] - P2[0,2] ],\n # [ u2.y * P2[2,0] - P2[1,0], u2.y * P2[2,1] - P2[1,1], u2.y * P2[2,2] - P2[1,2] ]\n # ]\n A[0:2, :] = C1.dot(P1[0:3, 0:3]) # C1 * R1\n A[2:4, :] = C2.dot(P2[0:3, 0:3]) # C2 * R2\n\n # Build b vector:\n # [\n # [ -(u1.x * P1[2,3] - P1[0,3]) ],\n # [ -(u1.y * P1[2,3] - P1[1,3]) ],\n # [ -(u2.x * P2[2,3] - P2[0,3]) ],\n # [ -(u2.y * P2[2,3] - P2[1,3]) ]\n # ]\n b[0:2, :] = C1.dot(P1[0:3, 3:4]) # C1 * t1\n b[2:4, :] = C2.dot(P2[0:3, 3:4]) # C2 * t2\n b *= -1\n\n # Solve for x vector\n cv2.solve(A, b, x[:, i:i + 1], cv2.DECOMP_SVD)\n\n return np.transpose(x), np.ones(len(u1), dtype=bool)", "def _laplacian_to_image(lpyr, filter_vec, coeff):\n im = lpyr[-1]\n filter_vec = filter_vec.reshape(filter_vec.size, 1)\n for i in reversed(range(len(lpyr) - 1)):\n im = _expand(im, filter_vec) + coeff[i] * lpyr[i]\n\n return im", "def calculate_laplace_coeff(alpha, j, s):\n return integrate.quad(lambda psi, alpha, j, s: np.cos(j*psi)/(1-2*alpha*np.cos(psi)+alpha**2)**s,\n 0, 2*np.pi, args=(alpha, j, s,))[0]/np.pi", "def fixed_points(self, epsilon = 0.000001):\n # (a b) (z) = (Lz)\n # (c d) (1) ( L) \n\n # az + b = c z^2 + d z\n # 0 = c z^2 + (d-a)z - b\n # z = ((a-d) \\pm sqrt( (a-d)^2 + 4 bc )) / 2c\n\n a, b, c, d = self\n\n assert abs(c) > epsilon\n zp = ((a - d) + sqrt( (a - d)**2 + 4*b*c)) / 2*c\n zm = ((a - d) - sqrt( (a - d)**2 + 4*b*c)) / 2*c\n Lp = c*zp + d\n Lm = c*zm + d\n if abs(Lp) < abs(Lm):\n Ls = [Lp, Lm]\n out = [zp, zm]\n else:\n Ls = [Lm, Lp]\n out = [zm, zp] \n assert abs(Ls[0]) < 1 - epsilon and 1 + epsilon < abs(Ls[1])\n return [CP1((z,1)) for z in out]", "def graham_scan(points):\n if len(points) <= 3:\n return points\n pointList = ExtendedTupleList(points)\n complete_range = pointList.range_within(0, 1)\n first_point = (complete_range[1][\"min\"][1], complete_range[1][\"min\"][0])\n newPoints = ExtendedTupleList([])\n for point in pointList:\n square_dist, cosine = line_length_angle((first_point, point))\n new_point = (point[0], point[1], square_dist, cosine)\n newPoints.append(new_point)\n newPoints.double_sort(3, 2, reverse_outside = True, reverse_inside = True)\n hull = ExtendedTupleList([])\n hull.append(first_point)\n hull.append(newPoints[0])\n lastAngle = newPoints[0][3]\n for k in range(1, len(newPoints)):\n if newPoints[k][3] == lastAngle:\n continue\n lastAngle = newPoints[k][3]\n while (len(hull) >= 2 and direction(hull[-2], hull[-1], newPoints[k]) >= 0):\n hull.pop()\n hull.append(newPoints[k])\n real_hull = []\n for point in hull:\n real_hull.append((point[0], point[1]))\n real_hull.append(real_hull[0])\n return real_hull", "def compile_points_edges(sphere_pieces):\n def build_edge_list(tris, points):\n v_adj = np.zeros(2*[points.shape[0]], dtype=np.int32)\n v_adj[tris[:,0], tris[:,1]] = v_adj[tris[:,1], tris[:,0]] = 1\n v_adj[tris[:,1], tris[:,2]] = v_adj[tris[:,2], tris[:,1]] = 1\n v_adj[tris[:,2], tris[:,0]] = v_adj[tris[:,0], tris[:,2]] = 1\n return np.array(np.where(np.triu(v_adj) == 1), dtype=np.int32).T\n\n vcount = 0\n all_points = []\n all_edges = []\n for points, tris in [(p.points, p.tris) for p in sphere_pieces]:\n edges = build_edge_list(tris, points)\n edges += vcount\n vcount += len(points)\n all_points.append(points)\n all_edges.append(edges)\n return np.vstack(all_points), np.vstack(all_edges)", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def distances(points, l=2):\n distances = []\n while points:\n baseline = points.pop()\n distances.extend([distance(baseline, point, l) for point in points])\n return distances", "def main() -> None:\n region = sleplet.slepian.Region(mask_name=\"africa\")\n slepian = sleplet.slepian_methods.choose_slepian_method(L, region)\n africa = sleplet.functions.SlepianAfrica(L, region=region, smoothing=SMOOTHING)\n\n # perform reconstruction\n f = sleplet.slepian_methods.slepian_inverse(africa.coefficients, L, slepian)\n\n # plot\n name = f\"africa_slepian_reconstruction_L{L}\"\n sleplet.plotting.PlotSphere(\n f,\n L,\n name,\n normalise=NORMALISE,\n region=slepian.region,\n ).execute()", "def solve_L(centers_i,centers_r):\n\t# The first term is a column vector of size N. Each of its rows\n\t# multiplies with the respective row on the x_r with 2 columns and \n\t# N rows.\n\tLy = centers_i[:,1][np.newaxis,:].transpose() * centers_r\n\tLx = - centers_i[:,0][np.newaxis,:].transpose() * centers_r\n\tL = np.concatenate((Ly,centers_i[:,1][np.newaxis,:].transpose(),Lx),\n\t\taxis=1)\n\tb = centers_i[:,0]\n\tprint(\"solving for the rotation and translation coefficients...\")\n\trl,resids,rank,svals = np.linalg.lstsq(L,b)\n\tprint(\"residue:%0.4f\trank:%0.4f\"%(np.sum(resids),rank))\n\treturn rl", "def prepare_laplacian(laplacian):\n\n def estimate_lmax(laplacian, tol=5e-3):\n r\"\"\"Estimate the largest eigenvalue of an operator.\"\"\"\n lmax = sparse.linalg.eigsh(laplacian, k=1, tol=tol,\n ncv=min(laplacian.shape[0], 10),\n return_eigenvectors=False)\n lmax = lmax[0]\n lmax *= 1 + 2 * tol # Be robust to errors.\n return lmax\n\n def scale_operator(L, lmax, scale=1):\n r\"\"\"Scale the eigenvalues from [0, lmax] to [-scale, scale].\"\"\"\n I = sparse.identity(L.shape[0], format=L.format, dtype=L.dtype)\n L *= 2 * scale / lmax\n L -= I\n return L\n\n lmax = estimate_lmax(laplacian)\n laplacian = scale_operator(laplacian, lmax)\n\n laplacian = sparse.coo_matrix(laplacian)\n\n # PyTorch wants a LongTensor (int64) as indices (it'll otherwise convert).\n indices = np.empty((2, laplacian.nnz), dtype=np.int64)\n np.stack((laplacian.row, laplacian.col), axis=0, out=indices)\n indices = torch.from_numpy(indices)\n\n laplacian = torch.sparse_coo_tensor(indices, laplacian.data, laplacian.shape)\n laplacian = laplacian.coalesce() # More efficient subsequent operations.\n return laplacian", "def solve_laplace_equation(\n grid: GridBase, bc: \"BoundariesData\", label: str = \"Solution to Laplace's equation\"\n) -> ScalarField:\n rhs = ScalarField(grid, data=0)\n return solve_poisson_equation(rhs, bc=bc, label=label)", "def jarvis_convex_hull(points):\n start_index = np.argmax(points[:, 0]) # Point with the highest y-coordinate\n start_point = points[start_index]\n # result = [start_index[:]]\n result = [start_index]\n added_points = {start_index}\n while True:\n for ref_index, ref_point in enumerate(points):\n exit_ = True\n if ref_index == start_index or ref_index in added_points:\n continue\n\n signs = 0\n threshold = len(points) - 2\n for compare_index, compare_point in enumerate(points):\n if compare_index == ref_index or compare_index == start_index:\n continue\n check = compare(start_point, ref_point, compare_point)\n if abs(check) < 1e-2:\n dist_start_ref = distance(start_point, ref_point)\n dist_start_compare = distance(start_point, compare_point)\n if dist_start_compare > dist_start_ref:\n threshold = threshold + 1\n else:\n threshold = threshold - 1\n continue\n signs = signs + 1 if check > 0 else signs - 1\n\n if abs(signs) < threshold:\n continue\n\n exit_ = False\n result.append(ref_index[:])\n added_points.add(ref_index)\n start_index = ref_index\n break\n\n if exit_:\n return result", "def triangulate(points):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Compute Delauney\n triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)\n\n # Get vertices from result indexes\n triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]\n \n return triangles", "def crea_falla( lats, lons, prof, dip, strike, latini, latfin, area_sf, profundidad, razon_aspecto ):\n \n # se pasa los arrays de lats y lons a arrays unidimensionales que contienen las coordenadas sin repeticion\n\n # longitudes\n vector_lon_input = lons[0,:] # primera fila de matriz de lons, columnas se repiten\n # se chequea si son crecientes monotonos, util para interpolacion \n if all( x < y for x, y in zip( vector_lon_input, vector_lon_input[1:] ) ):\n vector_lon_input = vector_lon_input\n else:\n vector_lon_input = vector_lon_input[::-1]\n\n # latitudes\n vector_lat_input = lats[:,0] # primera columna de matriz de lats, filas se repiten\n # se chequea si son crecientes monotonos, util para interpolacion \n if all( x < y for x, y in zip( vector_lat_input, vector_lat_input[1:] ) ):\n vector_lat_input = vector_lat_input\n else:\n vector_lat_input = vector_lat_input[::-1]\n\n\n lim_norte = latini # nuevo limite superior\n dif_lim_norte = np.abs( lats-lim_norte ) # diferencias entre array de latitudes y valor del limite superior\n idx_lim_norte = ( np.where( dif_lim_norte == dif_lim_norte.min() )[0][0], np.where( dif_lim_norte == dif_lim_norte.min() )[1][0] )# indice del valor de Slab2.0 que mas se aproxima \n\n lim_sur = latfin # nuevo limite inferior\n dif_lim_sur = np.abs( lats-lim_sur ) # diferencias entre array de latitudes y valor del limite inferior\n idx_lim_sur = ( np.where( dif_lim_sur == dif_lim_sur.min() )[0][0], np.where( dif_lim_sur == dif_lim_sur.min() )[1][0] )# indice del valor de Slab2.0 que mas se aproxima \n\n # se calcula la distancia entre los limites (largo de la falla) en metros\n largo_falla = Geodesic.WGS84.Inverse(lats[idx_lim_norte], lons[idx_lim_norte], lats[idx_lim_sur], lons[idx_lim_sur] )[ \"s12\" ]\n largo_subfalla = np.sqrt( area_sf ) # subfallas cuadradas\n n_fallas_filas = np.floor_divide( largo_falla, largo_subfalla ) # cantidad de fallas en sentido norte - sur \n # a partir del numero de fallas en el sentido norte sur (ctdad de latitudes) se crea un vector de latitudes equidistantes\n lats_fallas = np.reshape( np.linspace( lim_norte, lim_sur, int( n_fallas_filas ) ),( int( n_fallas_filas ),1 ) )\n \n # se busca la latitud del medio para referenciarla a la profundidad deseada\n if len(lats_fallas)%2 != 0:\n lat_mediana = lats_fallas[ np.floor_divide( len( lats_fallas ), 2) ]\n else:\n lat_mediana = lats_fallas[ np.floor_divide( len( lats_fallas ), 2) - 1 ]\n\n # busca indice de la latitud del medio\n dif_lat_mediana = np.abs( lats - lat_mediana )\n # primer indice, muestra la linea de profundidades para esta latitud\n idx_lat_mediana = np.where( dif_lat_mediana == dif_lat_mediana.min() )[0][0] \n # se busca indice de la profundidad en la linea de la latitud media\n dif_profundidad = np.abs( profundidad - prof[ idx_lat_mediana, ] )\n idx_profundidad = np.where( dif_profundidad == dif_profundidad.min() )[0][0]\n \n # indice elemento central de la falla creada, a partir de la latitud central y la profundidad\n idx_subfalla_central = ( idx_lat_mediana, idx_profundidad )\n\n # longitud de la subfalla central\n lon_subfalla_central = lons[ idx_subfalla_central ]#[0][0]\n # profundidad de la subfalla central (punto con la profundidad mas cercana a la ingresada)\n prof_subfalla_central = prof[ idx_subfalla_central ]#[0][0]\n\n # se busca los indices de los elementos mas cercanos a las latitudes de las fallas creadas por el linespace\n dif_lats = np.ones( (len( lats_fallas ), ) + np.shape( lats ) ) # inicializacion de array para diferencias de latitudes\n for i in range( len( lats_fallas ) ):\n dif_lats[i] = np.abs( lats - lats_fallas[i] )\n \n idx_fallas = np.ones( (len( lats_fallas ), ) + ( 1,2 ) ) # inicializacion de array con los indices de las latitudes \n for j in range( len( lats_fallas ) ):\n idx_fallas[j] = ( np.where( dif_lats[j] == dif_lats[j].min() )[0][0], np.where( dif_lats[j] == dif_lats[j].min() )[1][0] )\n \n # ancho de la falla\n ancho_falla = largo_falla/razon_aspecto\n n_fallas_columnas = np.floor_divide( ancho_falla, largo_subfalla ) # numero de subfallas en el sentido este-oeste\n # completar array de latitudes con el nuevo ancho\n #matriz_latitudes = np.reshape(np.tile(lats_fallas, int(n_fallas_columnas)),(int(n_fallas_columnas),(len(lats_fallas))))\n matriz_latitudes = np.tile( lats_fallas, int( n_fallas_columnas ) )\n # creacion de array con longitudes a usarse\n # calculo de longitudes de los centros de las subfallas a partir del ancho de la falla\n # es necesario saber si la cantidad es par o impar\n if n_fallas_columnas%2 != 0:\n mitad_ancho = ancho_falla / 2 # en metros\n n_fallas_xlado = int( n_fallas_columnas ) // 2 # cantidad de subfallas a ambos lados de falla central\n lon_limite_oeste = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 270, mitad_ancho )[ \"lon2\" ]\n lon_limite_este = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 90, mitad_ancho )[ \"lon2\" ]\n lons_subfallas_oeste = np.linspace( lon_limite_oeste, lon_subfalla_central, ( n_fallas_xlado + 1 ) )\n lons_subfallas_este = np.linspace( lon_subfalla_central, lon_limite_este, ( n_fallas_xlado + 1 ) )\n lons_subfallas = np.append( lons_subfallas_oeste[:-1], lons_subfallas_este ) # vector con las longitudes de las subfallas\n lons_subfallas = np.reshape( lons_subfallas, ( 1, int( n_fallas_columnas ) ) )\n else:\n mitad_ancho = ancho_falla / 2 \n n_fallas_oeste = int( n_fallas_columnas ) / 2 - 1 # -1 para no contar 2 veces la subfalla del medio\n n_fallas_este = int( n_fallas_columnas ) / 2\n lon_limite_oeste = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 270, ( mitad_ancho - largo_subfalla ) )[ \"lon2\" ]\n lon_limite_este = Geodesic.WGS84.Direct( lat_mediana, lon_subfalla_central, 90, mitad_ancho )[ \"lon2\" ]\n lons_subfallas_oeste = np.linspace( lon_limite_oeste, lon_subfalla_central, ( int( n_fallas_oeste ) + 1 ) )\n lons_subfallas_este = np.linspace( lon_subfalla_central, lon_limite_este, ( int( n_fallas_este ) + 1 ) )\n lons_subfallas = np.append( lons_subfallas_oeste[:-1], lons_subfallas_este ) # vector con las longitudes de las subfallas\n lons_subfallas = np.reshape( lons_subfallas, ( 1, int( n_fallas_columnas ) ) )\n\n # creacion de matriz de longitudes\n matriz_longitudes = np.tile( lons_subfallas, ( int( n_fallas_filas ), 1 ) ) # matriz con longitudes de las subfallas\n\n # se debe encontrar las profundidades, dips y strikes correspondientes a estas latitudes y longitudes de cada subfalla\n # profundidades correspondientes a cada subfalla:\n # se interpolara para encontrar los valores de profundidad correspondientes a cada subfalla\n \n vec_lons_subfallas_todas = np.reshape( matriz_longitudes, \n ( int( n_fallas_filas * n_fallas_columnas ), ) ) # vector con todos los elementos de la matriz de longitudes de las subfallas creadas\n vec_lats_subfallas_todas = np.reshape( matriz_latitudes, \n ( int( n_fallas_filas * n_fallas_columnas ), ) ) # vector con todos los elementos de la matriz de latitudes de las subfallas creadas\n\n\n # objeto de interpolacion de profundidades\n profs_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), prof )\n # inicializacion array de valores interpolados de profundidades\n prof_subfallas = np.ones( ( int( n_fallas_columnas * n_fallas_filas ), 1) )\n for p in range( int( n_fallas_columnas*n_fallas_filas ) ):\n prof_subfallas[p] = profs_int( ( vec_lats_subfallas_todas[p], vec_lons_subfallas_todas[p] ) )\n prof_subfallas = np.reshape( prof_subfallas, ( int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n \n # dips correspondientes a cada subfalla:\n # se interpolara para encontrar los valores de dip correspondientes a cada subfalla\n\n # objeto de interpolacion de dips\n dips_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), dip )\n # inicializacion array de valores interpolados de dip\n dip_subfallas = np.ones( ( int( n_fallas_columnas * n_fallas_filas ), 1) )\n for d in range( int( n_fallas_columnas * n_fallas_filas ) ):\n dip_subfallas[d] = dips_int( ( vec_lats_subfallas_todas[d], vec_lons_subfallas_todas[d] ) )\n dip_subfallas = np.reshape( dip_subfallas, (int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n \n # strike correspondiente a cada subfalla:\n # se interpolara para encontrar los valores de strike correspondientes a cada subfalla\n\n # objeto de interpolacion de strikes\n strikes_int = RegularGridInterpolator( ( vector_lat_input, vector_lon_input ), strike )\n # inicializacion array de valores interpolados de strike\n strike_subfallas = np.ones( ( int( n_fallas_columnas*n_fallas_filas ), 1) )\n for s in range( int( n_fallas_columnas*n_fallas_filas ) ):\n strike_subfallas[s] = strikes_int( ( vec_lats_subfallas_todas[s], vec_lons_subfallas_todas[s] ) )\n strike_subfallas = np.reshape( strike_subfallas, ( int( n_fallas_filas ), int( n_fallas_columnas ) ) )\n # revisar, quiza sea necesario invertir los valores de la latitud\n\n\n\n\n return largo_falla, matriz_longitudes, matriz_latitudes, prof_subfallas, dip_subfallas, strike_subfallas", "def build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = gaus_1d(filter_size).reshape(1, filter_size)\n g_pyr = build_gaussian_pyramid(im, max_levels, filter_size)[0]\n l_pyr = []\n for i in range(len(g_pyr) - 1):\n l_im = g_pyr[i] - expand_im(g_pyr[i + 1], filter_vec)\n l_pyr.append(l_im)\n\n l_pyr.append(g_pyr[-1])\n return [l_pyr, filter_vec]", "def calc_lampam_2(ss):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_2(ss[index])\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_2(ss[index])\n return lampam\n\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n theta2 = np.deg2rad(2*ss.astype(float))\n theta4 = 2*theta2\n cos_sin = np.concatenate((\n np.cos(theta2),\n np.cos(theta4),\n np.sin(theta2),\n np.sin(theta4))).reshape((4, n_plies_in_panels))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)", "def metoda_lagrange(X, Y, pointx):\n\n n = X.shape[0]\n aprox_value = 0\n\n for i in range(n):\n aprox_value += Y[i] * L_k(i, pointx, X)\n\n return aprox_value", "def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]", "def convexHull(points):\n points = np.append(points, [[0, 0, 0]], axis=0) # All points plus origin\n hull = ConvexHull(points) # Visible points plus possible origin. Use its vertices property.\n\n return hull", "def graham_scan(points):\n\n # Find point with smallest y coordinate\n # If two points have equal y coordinates, select the one with the lower x-coordinate\n smallest = points[0]\n for p in points:\n if p[1] < smallest[1]:\n smallest = p\n elif p[1] == smallest[1]:\n if p[0] < smallest[0]:\n smallest = p\n\n # Sort points by angle over smallest to x-axis\n points.sort(key=lambda x: angle(x, smallest))\n\n # Our stack\n hull = [smallest, points[1]]\n i = 2\n while i < len(points):\n # If the last points and the new point form a counter-clockwise triangle,\n # we need the last point. Therefore, push the new point\n if ccw(hull[-2], hull[-1], points[i]) > 0 or len(hull) == 2:\n hull.append(points[i])\n i += 1\n # If the two last points and the new point don't form a counter-clockwise triangle,\n # the we don't need the last point\n else:\n hull.pop()\n return hull", "def _fit_plane_to_point_cloud(\n points_xyz: NDArrayFloat,\n) -> Tuple[float, float, float, float]:\n center_xyz: NDArrayFloat = np.mean(points_xyz, axis=0)\n out: Tuple[NDArrayFloat, NDArrayFloat, NDArrayFloat] = np.linalg.svd(\n points_xyz - center_xyz\n )\n vh = out[2]\n\n # Get the unitary normal vector\n a, b, c = float(vh[2, 0]), float(vh[2, 1]), float(vh[2, 2])\n d: float = -np.dot([a, b, c], center_xyz)\n return (a, b, c, d)", "def ldfe(n=3):\n\n # We will use the following coordinate system.\n #\n # | z, top\n # |\n # |\n # |\n # o------- x, right\n # /\n # /\n # /\n # / y, front\n\n # Cube inside the octant that touches the sphere at\n a = 1 / sqrt(3)\n\n # We have three important faces of the cube.\n # Start with the front face and refine it in N segments.\n x = linspace(0, a, n + 1)\n z = linspace(0, a, n + 1)\n\n # Then delta Omega_ij = [x_i,x_i+1] x [z_j,z_j+1]\n # Now go through every cell.\n points = zeros((1 * 1 * 4 * n * n, 3)) # 1/3 of the octants\n weights = zeros(1 * 1 * 4 * n * n)\n square = zeros(1 * 1 * 4 * n * n)\n counter = 0\n rhos0 = 0.1 * ones(4)\n for i in range(n):\n for j in range(n):\n x0, x1, z0, z1 = x[i], x[i + 1], z[j], z[j + 1]\n\n omegas = computeomegas(x0, x1, z0, z1)\n areas = computeareas(omegas, x0, x1, z0, z1)\n print(\"\\n\\nOptimiztation for:\")\n print(\"Domain:\")\n print([x0, x1, z0, z1])\n\n rhos = optimizeposition_leastsquares(areas, omegas, x0, x1, z0, z1,\n rhos0)\n rhos0 = rhos # take the optimal parameter of this cell as the starting value for the optimizer in the next cell\n dummy = rand()\n for k in range(4):\n points[counter, :] = project(omegas[k](rhos[k]))\n weights[counter] = areas[k]\n square[counter] = dummy\n counter += 1\n scatterplot(points, weights, square)\n return points, weights", "def delaunay_triangulate(P: np.ndarray):\n n = P.shape[0]\n if n < 3:\n A = np.ones((n, n)) - np.eye(n)\n else:\n try:\n d = Delaunay(P)\n A = np.zeros((n, n))\n for simplex in d.simplices:\n for pair in itertools.permutations(simplex, 2):\n A[pair] = 1\n except QhullError as err:\n print(\"Delaunay triangulation error detected. Return fully-connected graph.\")\n print(\"Traceback:\")\n print(err)\n A = np.ones((n, n)) - np.eye(n)\n return A", "def euclidean_proj_l1ball(v, s=1):\n assert s > 0, \"Radius s must be strictly positive (%d <= 0)\" % s\n n, = v.shape # will raise ValueError if v is not 1-D\n # compute the vector of absolute values\n u = np.abs(v)\n # check if v is already a solution #DB I commented this out since I want it to always be an equality ||v||_1 = 1\n #if u.sum() <= s:\n # L1-norm is <= s\n # return v\n # v is not already a solution: optimum lies on the boundary (norm == s)\n # project *u* on the simplex\n w = euclidean_proj_simplex(u, s=s)\n # compute the solution to the original problem on v\n w *= np.sign(v)\n return w", "def calc_lampam_sym(ss, constraints):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n\n n_plies_in_panels = 2 * np.size(ss) # laminate ply count\n\n cos_sin = np.empty((4, n_plies_in_panels // 2), float)\n for ind in range(n_plies_in_panels // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, ))\n\n for_the_top = np.arange(n_plies_in_panels // 2)\n z_0 = np.ones(n_plies_in_panels // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n return lampam", "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def calc_lampam(ss, constraints=None):\n if constraints is None:\n return calc_lampam_2(ss)\n\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n if not constraints.sym:\n cos_sin = np.empty((4, n_plies_in_panels), float)\n for ind in range(n_plies_in_panels):\n cos_sin[:, ind] = np.copy(constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, )))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n cos_sin = np.empty((4, np.size(ss) // 2), float)\n for ind in range(np.size(ss) // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4,))\n\n for_the_top = np.arange(np.size(ss) // 2)\n z_0 = np.ones(np.size(ss) // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n if np.size(ss) % 2:\n cos_sin_mid = constraints.cos_sin[\n constraints.ind_angles_dict[ss[n_plies_in_panels // 2]]]\n lampam += np.array([\n (1/n_plies_in_panels)*cos_sin_mid,\n np.zeros((4,), dtype=float),\n (1/n_plies_in_panels**3)*cos_sin_mid]).reshape(12)\n return lampam", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def calculate_normalized_laplacian(adj):\n adj = sp.coo_matrix(adj)\n d = np.array(adj.sum(1))\n d_inv_sqrt = np.power(d, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return normalized_laplacian", "def _get_surface_color_scalars(self, mol, solvent_radius, surface_points, smooth_input):\n grid = FutamuraHash(mol)\n T = grid.T\n radii = {'C':1.75,\n 'O':1.4,\n 'N':1.55,\n 'S':1.8,\n 'P':2.0,\n 'H':1.17,\n 'Z':3.0}\n default_distance = 1.8\n print 'locating nearest atoms'\n scalars = vtk.vtkIntArray()\n scalars.SetNumberOfComponents(1)\n # now locate the intersections\n number_index_map = {}\n for ind in range(len(mol.atoms)):\n number_index_map[mol.atoms[ind].atom_number] = ind\n \n last_atom = 'None'\n if smooth_input:\n new_points = []\n ptctr = 0\n for point in surface_points:\n x_val = y_val = z_val = 0\n # figure out which bin it goes in\n for x_ind in range(0, grid.volume_count_x):\n if point[0] < grid.volume_indices_x[x_ind]:\n break\n else:\n x_val = x_ind\n for y_ind in range(grid.volume_count_y):\n if point[1] < grid.volume_indices_y[y_ind]:\n break\n else:\n y_val = y_ind\n for z_ind in range(grid.volume_count_z):\n if point[2] < grid.volume_indices_z[z_ind]:\n break\n else:\n z_val = z_ind\n\n start_array = [0,0,0]\n end_array = [0,0,0]\n # figure out starts and ends\n counts = [grid.volume_count_x, grid.volume_count_y, grid.volume_count_z]\n keys = [x_val, y_val, z_val]\n for ind in [0,1,2]:\n if keys[ind] == 0:\n start_array[ind] = 0\n end_array[ind] = 2\n elif keys[ind] == counts[ind] - 1:\n start_array[ind] = keys[ind]-1\n end_array[ind] = keys[ind]+1\n else:\n start_array[ind] = keys[ind]-1\n end_array[ind] = keys[ind]+2\n min_dist = 1000.0\n sec_dist = 1000.0\n id2 = -1\n id = -1\n escape = 0 # turns 1 once the correct atom is found\n if smooth_input == 0:\n identification_distance = 0.1\n # figure out if its in range of the last atom chosen (arbitrary, but tends to speed up the calculations)\n if last_atom != 'None':\n dist = math.sqrt(pow(point[0]-last_atom.x,2) + pow(point[1]-last_atom.y,2) + pow(point[2]-last_atom.z,2))\n dif = abs(dist - radii.get(last_atom.atom_type[0], default_distance))\n if dif < identification_distance:\n id = last_atom.atom_number # assume this is it\n escape = 1\n \n if not escape:\n # now look for atoms in the same bin as the last atom\n ky = '%s %s %s'%(x_val,y_val,z_val)\n if ky in T.keys(): # first look in this atoms bin\n for atom in T[ky]:\n # do not retrieve if type H and protonation is turned off\n if self.hydrogens_on or ((not self.hydrogens_on) and atom.atom_type[0] != 'H'):\n dist = math.sqrt(pow(point[0]-atom.x,2) + pow(point[1]-atom.y,2) + pow(point[2]-atom.z,2))\n if abs(dist - radii.get(atom.atom_type[0], default_distance)) < identification_distance:\n id = atom.atom_number # assume this is it\n escape = 1\n break\n if not escape:\n for i in range(start_array[0], end_array[0]):\n for j in range(start_array[1], end_array[1]):\n for k in range(start_array[2], end_array[2]):\n key2 = '%s %s %s'%(i,j,k)\n #if key2 != ky:\n if key2 in T.keys():\n for atom in T[key2]:\n if self.hydrogens_on or ((not self.hydrogens_on) and atom.atom_type[0] != 'H'):\n dist = math.sqrt(pow(point[0]-atom.x,2) + pow(point[1]-atom.y,2) + pow(point[2]-atom.z,2))\n if not smooth_input:\n if abs(dist - radii.get(atom.atom_type[0], default_distance)) < identification_distance:\n id = atom.atom_number\n escape = 1\n break\n elif dist < min_dist:\n min_dist = dist\n id = atom.atom_number\n else:\n if dist < min_dist:\n sec_dist = min_dist\n id2 = id\n min_dist = dist\n id = atom.atom_number\n if escape:\n break\n if escape:\n break\n if escape:\n break\n # assign the index\n last_atom = mol.atoms[number_index_map[id]]\n scalars.InsertTuple1(ptctr, number_index_map[id])\n # smooth the data\n fitting_back_distance = 0.2\n if smooth_input:\n x2 = point[0]\n y2 = point[1]\n z2 = point[2]\n if id2 != -1: # more than one intersection is necessary\n sec_last_atom = mol.atoms[number_index_map[id2]]\n if abs(min_dist-radii.get(last_atom.atom_type[0], default_distance)) < fitting_back_distance: # if this atom is close enough\n if abs(sec_dist-radii.get(sec_last_atom.atom_type[0], default_distance)) > 0.4: # if second atom is far enough away\n r = radii.get(last_atom.atom_type[0], default_distance)\n d = min_dist\n x = last_atom.x\n y = last_atom.y\n z = last_atom.z\n x2 = ((r/d)*(point[0]-x)) + x\n y2 = ((r/d)*(point[1]-y)) + y\n z2 = ((r/d)*(point[2]-z)) + z\n new_points.append([x2,y2,z2])\n \n ptctr += 1\n if smooth_input:\n return scalars,new_points\n else:\n return scalars", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def lapserate(t, z, sigma, lat):\n import numpy as np\n dT = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n dz = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n for i in range(np.ma.size(sigma, axis=0)-1):\n dT[i, :] = t[i+1, :] - t[i, :]\n for i in range(np.ma.size(sigma, axis=0)-1):\n dz[i, :] = z[i+1, :] - z[i, :]\n lapse = -1000 * dT[0:-1] / dz[0:-1]\n # zonalplot(lapse, sigma[0:-1], lat, 'Lapse rate')\n return lapse", "def lapserate(t, z, sigma, lat):\n import numpy as np\n dT = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n dz = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n for i in range(np.ma.size(sigma, axis=0)-1):\n dT[i, :] = t[i+1, :] - t[i, :]\n for i in range(np.ma.size(sigma, axis=0)-1):\n dz[i, :] = z[i+1, :] - z[i, :]\n lapse = -1000 * dT[0:-1] / dz[0:-1]\n # zonalplot(lapse, sigma[0:-1], lat, 'Lapse rate')\n return lapse", "def euclidean_proj_l1ball(v, s=1):\n assert s > 0, \"Radius s must be strictly positive (%d <= 0)\" % s\n n, = v.shape # will raise ValueError if v is not 1-D\n # compute the vector of absolute values\n u = np.abs(v)\n # check if v is already a solution\n if u.sum() <= s:\n # L1-norm is <= s\n return v\n # v is not already a solution: optimum lies on the boundary (norm == s)\n # project *u* on the simplex\n w = euclidean_proj_simplex(u, s=s)\n # compute the solution to the original problem on v\n w *= np.sign(v)\n return w", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def get_spline(points):\n import numpy\n import scipy.linalg\n\n # sort points by x value\n points = sorted(points, key=lambda point: point[\"x\"])\n\n n = len(points) - 1\n\n # Set up a system of equations of form Ax=b\n A = numpy.zeros(shape=(4*n, 4*n))\n b = numpy.zeros(shape=(4*n, 1))\n\n for i in range(0, n):\n # 2n equations from condtions (S2)\n A[i][4*i+0] = points[i][\"x\"]**3\n A[i][4*i+1] = points[i][\"x\"]**2\n A[i][4*i+2] = points[i][\"x\"]\n A[i][4*i+3] = 1\n b[i] = points[i][\"y\"]\n\n A[n+i][4*i+0] = points[i+1][\"x\"]**3\n A[n+i][4*i+1] = points[i+1][\"x\"]**2\n A[n+i][4*i+2] = points[i+1][\"x\"]\n A[n+i][4*i+3] = 1\n b[n+i] = points[i+1][\"y\"]\n\n # 2n-2 equations for (S3):\n if i == 0:\n continue\n # point i is an inner point\n A[2*n+(i-1)][4*(i-1)+0] = 3*points[i][\"x\"]**2\n A[2*n+(i-1)][4*(i-1)+1] = 2*points[i][\"x\"]\n A[2*n+(i-1)][4*(i-1)+2] = 1\n A[2*n+(i-1)][4*(i-1)+0+4] = -3*points[i][\"x\"]**2\n A[2*n+(i-1)][4*(i-1)+1+4] = -2*points[i][\"x\"]\n A[2*n+(i-1)][4*(i-1)+2+4] = -1\n b[2*n+(i-1)] = 0\n\n A[3*n+(i-1)][4*(i-1)+0] = 6*points[i][\"x\"]\n A[3*n+(i-1)][4*(i-1)+1] = 2\n A[3*n+(i-1)][4*(i-1)+0+4] = -6*points[i][\"x\"]\n A[3*n+(i-1)][4*(i-1)+1+4] = -2\n b[3*n+(i-1)] = 0\n # Natural spline:\n A[3*n-1+0][0+0] += 6*points[0][\"x\"]\n A[3*n-1+0][0+1] += 2\n b[3*n-1+0] += 0\n\n A[3*n+n-1][4*(n-1)+0] += 6*points[n][\"x\"]\n A[3*n+n-1][4*(n-1)+1] += 2\n b[3*n+n-1] += 0\n\n x = scipy.linalg.solve(A, b)\n spline = []\n for i in range(0, n):\n spline.append({\"u\": points[i][\"x\"], \"v\": points[i+1][\"x\"],\n \"a\": float(x[4*i+0]),\n \"b\": float(x[4*i+1]),\n \"c\": float(x[4*i+2]),\n \"d\": float(x[4*i+3])})\n return spline", "def local_action(self, *links, all_links):\n S = 0.0\n for link in links:\n site1 = link[:-1]\n u = link[-1]\n for v in range(self.dim):\n if v != u:\n site2 = np.array(site1) - self.bases[v]\n plaq1 = self.plaquette_operator(site1, u, v, all_links)\n plaq2 = self.plaquette_operator(site2, u, v, all_links)\n S += (plaq1 + plaq2)\n return S", "def lagrange(natural: int) -> tuple:\n natural_copy = natural\n x_val = int(natural_copy ** 0.5)\n while x_val > 0:\n natural_copy -= x_val * x_val\n y_val = int(natural_copy ** 0.5)\n natural_copy -= y_val * y_val\n z_val = int(natural_copy ** 0.5)\n natural_copy -= z_val * z_val\n t_val = int(natural_copy ** 0.5)\n if x_val * x_val + y_val * y_val + z_val * z_val + t_val * t_val == natural:\n return x_val, y_val, z_val, t_val\n else:\n natural_copy = natural\n x_val = x_val - 1", "def forward(self, x):\n x = torch.matmul(self.laplacian, x)\n dims = tuple(range(x.ndimension())[1:])\n x = x.pow(2).sum(dims)\n return x", "def d_dl(self, points):\n n_centres = self.n_points\n n_points = points.shape[0]\n\n # TPS kernel (nonlinear + affine)\n\n # for each input, evaluate the rbf\n # (n_points, n_centres)\n k_points = self.kernel.apply(points)\n\n # k_points with (1, x, y) appended to each point\n # (n_points, n_centres+3) - 3 is (1, x, y) for affine component\n k = np.hstack([k_points, np.ones([n_points, 1]), points])\n\n # (n_centres+3, n_centres+3)\n try:\n inv_L = np.linalg.inv(self.l)\n except np.linalg.LinAlgError:\n # If two points are coincident, or very close to being so, then the\n # matrix is rank deficient and thus not-invertible. Therefore,\n # only take the inverse on the full-rank set of indices.\n _u, _s, _v = np.linalg.svd(self.l)\n keep = _s.shape[0] - sum(_s < self.min_singular_val)\n inv_L = _u[:, :keep].dot(1.0 / _s[:keep, None] * _v[:keep, :])\n\n\n # Taking the derivative of L for changes in l must yield an x,y change\n # for each centre.\n # (n_centres+3, n_centres+3, n_centres, n_dims)\n dL_dl = np.zeros(self.l.shape + (n_centres, 2))\n\n # take the derivative of the kernel wrt centres at the centres\n # SHOULD be (n_centres, n_dims, n_centres, n_dims)\n # IS (n_centres, n_centres, n_dims\n dK_dl_at_tgt = self.kernel.d_dl(self.source.points)\n\n # we want to build a tensor where for each slice where\n # dK_dl[i, j, k, l] is the derivative wrt the l'th dimension of the\n # i'th centre for L[j, k] -> first axis is just looping over centres\n # and last looping over dims\n # (n_centres, n_centres, n_centres, n_dims)\n dK_dl = np.zeros((n_centres, ) + dK_dl_at_tgt.shape)\n\n # make a linear iterator over the centres\n iter = np.arange(n_centres)\n\n # efficiently build the repeated pattern for dK_dl\n # note that the repetition over centres happens over axis 0\n # and the dims axis is the last\n # so dK_dl[0, ..., 0] corresponds to dK/dx0 in Joan's paper\n # dK_dl[3, ..., 1] corresponds to dK_dy3 in Joan's paper\n dK_dl[iter, iter] = dK_dl_at_tgt[iter]\n dK_dl[iter, :, iter] = dK_dl_at_tgt[:, iter]\n\n # prepare memory for the answer\n # SHOULD be (n_points, n_dims, n_centres, n_dims)\n # IS (n_points, , n_centres, n_dims)\n dW_dl = np.zeros((n_points, n_centres, 2))\n\n # pretend the target is equal to the source\n # (n_dims, n_centres+3)\n pseudo_target = np.hstack([self.source.points.T, np.zeros([2, 3])])\n\n for i in np.arange(n_centres):\n # dP_dli (n_centres, n_points, n_dims, n_dims)\n dP_dli = np.zeros(self.p.shape + (2,))\n dP_dli[i, 1, 0] = -1\n dP_dli[i, 2, 1] = -1\n\n dL_dl[:n_centres, :n_centres, i] = dK_dl[i]\n dL_dl[:n_centres, n_centres:, i] = dP_dli\n dL_dl[n_centres:, :n_centres, i] = np.swapaxes(dP_dli, 0, 1)\n\n omega_x = -inv_L.dot(dL_dl[..., i, 0].dot(inv_L))\n omega_y = -inv_L.dot(dL_dl[..., i, 1].dot(inv_L))\n dW_dl[:, i, 0] = k.dot(omega_x).dot(pseudo_target[0])\n dW_dl[:, i, 1] = k.dot(omega_y).dot(pseudo_target[1])\n\n return dW_dl", "def square_bravais_lattice(self,R,lattice_multiplier=1):\n a = lattice_multiplier*self.a\n b = lattice_multiplier*self.b\n c = lattice_multiplier*self.c\n\n #Calculate the number of lattice points needed in each direction to cover a length of R\n #I use the ceiling function so that when I shift the origin by a one unit cell vector,\n #I still cover all lattive points within a distance of R\n Na = int(np.ceil(R/np.linalg.norm(a)))\n Nb = int(np.ceil(R/np.linalg.norm(b)))\n Nc = int(np.ceil(R/np.linalg.norm(c)))\n\n #calculate the number of vertices in a grid that covers the sphere\n #A sphere of radius R fits within a grid of size 2R x 2R x 2R\n #Adding one to account for origin\n number_vertices = (2*Na+1)*(2*Nb+1)*(2*Nc+1)\n vertices = np.empty((number_vertices,3))\n vertex_labels = np.empty(number_vertices ,dtype=int)\n \n # populate the vertices list with the positions of a lattice with single spacing\n n = 0\n for i in np.arange(-Na,Na+1):\n for j in np.arange(-Nb,Nb+1):\n for k in np.arange(-Nc,Nc+1):\n vertices[n]=np.dot([[i,j,k]],[[a[0],a[1],a[2]],[b[0],b[1],b[2]],[c[0],c[1],c[2]]])\n vertex_labels[n] = self.position_map_inverse[(i*lattice_multiplier)%2,(j*lattice_multiplier)%2,(k*lattice_multiplier)%2]\n n += 1\n return vertices, vertex_labels", "def main():\n points = np.array(\n [[1, 1], [2, 5], [3, 2], [4, 4], [5, 2], [6, 3], [2, 3], [3, 4], [5, 3]]\n )\n hull = graham_scan(points)\n hull = np.concatenate((hull, [hull[0]]))\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(points[:, 0], points[:, 1])\n ax.plot(hull[:, 0], hull[:, 1], 'r')\n ax.set_title('Convex Hull using Graham Scan')\n plt.show()", "def eight_point(points_lst):\r\n\r\n # get H for normalization and produce normalized points\r\n points_lst = np.array(points_lst)\r\n h_l = get_h(points_lst[:, 0])\r\n h_r = get_h(points_lst[:, 1])\r\n p_l_norm = [h_l @ np.array([p[0], p[1], 1]) for p in points_lst[:, 0]]\r\n p_r_norm = [h_r @ np.array([p[0], p[1], 1]) for p in points_lst[:, 1]]\r\n\r\n # create A using normalized points\r\n a = []\r\n for p_l, p_r in zip(p_l_norm, p_r_norm):\r\n x_l, y_l = p_l[0], p_l[1]\r\n x_r, y_r = p_r[0], p_r[1]\r\n a.append([x_r * x_l, x_r * y_l, x_r, y_r * x_l, y_r * y_l, y_r, x_l, y_l, 1])\r\n a = np.array(a)\r\n\r\n u, s, vh = np.linalg.svd(a)\r\n f_mat = np.reshape(vh[-1, :], (3, 3))\r\n\r\n # enforce singularity constraint\r\n u, s, vh = np.linalg.svd(f_mat)\r\n s[-1] = 0\r\n f_unscaled = (u * s) @ vh\r\n\r\n # rescale F\r\n return np.linalg.inv(h_r) @ f_unscaled @ np.linalg.inv(h_l)", "def laplacian(W, normalized=True):\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L", "def Lorenz(s):\n x = s[0]\n y = s[1]\n z = s[2]\n \n # constants for the equations\n sigma = 10.0\n rho = 28.0\n beta = 8.0/3.0\n \n # Return the state derivatives.\n return [sigma * (y-x), (rho-z)*x -y, x*y - beta*z]", "def get_clusters(self,points):\n self.points = points\n self.__dabest = [self.__cmeans(points,i) for i in range(self.__start,self.__end)]\n ##self.hull = \n return self.__dabest", "def calc_Ls(self, x_surface, geom):\n\n return np.zeros((self.n_wl,))", "def laplacian(A):\n #calculate D by creating a diagonal matrix with the column sum of A\n D = np.diag(A.sum(axis=0))\n return D - A", "def delaunay(cls,\n points: Sequence[Point],\n context: Context) -> 'Triangulation':\n points = sorted(points)\n result = [cls._initialize_triangulation(points[start:stop],\n context)\n for start, stop in pairwise(accumulate(\n chain((0,), coin_change(len(points), _base_cases))))]\n for _ in repeat(None, ceil_log2(len(result))):\n parts_to_merge_count = len(result) // 2 * 2\n result = ([result[offset]._merge(result[offset + 1])\n for offset in range(0, parts_to_merge_count, 2)]\n + result[parts_to_merge_count:])\n return result[0]" ]
[ "0.6669824", "0.62479734", "0.6143647", "0.60507256", "0.60478675", "0.5883016", "0.58383894", "0.5776216", "0.5732057", "0.56500006", "0.5618606", "0.5549746", "0.5351798", "0.5340996", "0.5318419", "0.53017426", "0.52880955", "0.5279453", "0.5237156", "0.52357775", "0.5218471", "0.5176523", "0.5167271", "0.5147309", "0.5145872", "0.5142192", "0.5132436", "0.51234376", "0.5120541", "0.511803", "0.51098335", "0.51094586", "0.5081282", "0.5080635", "0.50721437", "0.50673336", "0.50475514", "0.504111", "0.5029194", "0.5028908", "0.50035805", "0.49971417", "0.49831086", "0.49775085", "0.4970657", "0.4963029", "0.49607387", "0.49607387", "0.49607253", "0.49554712", "0.49443048", "0.49439782", "0.49418676", "0.4933729", "0.49326852", "0.49232998", "0.49226636", "0.4922157", "0.4918421", "0.49174833", "0.49110395", "0.49078247", "0.49066362", "0.49062085", "0.4901193", "0.48935226", "0.48804", "0.4878334", "0.48705187", "0.48652557", "0.48636678", "0.48633483", "0.48623812", "0.48621604", "0.48588562", "0.48574814", "0.48520675", "0.48502833", "0.48481193", "0.48481193", "0.48476127", "0.48448953", "0.48347986", "0.48347986", "0.48327082", "0.48316127", "0.4818229", "0.48118985", "0.48093438", "0.48086038", "0.48071668", "0.48001227", "0.47998178", "0.4785649", "0.4783926", "0.47817206", "0.47760928", "0.47616085", "0.47607633", "0.47563282" ]
0.6151292
2
Decorator to help verify that a function was actually executed. Annotates a function with an attribute 'didrun', and only sets it to True if the function is actually called.
def checkrun(f): @functools.wraps(f) def wrapper(*args, **kwargs): wrapper.didrun = True return f(*args, **kwargs) wrapper.didrun = False return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_called(self, func):\n self.called[func] = False\n def _check(*args, **kwargs):\n self.called[func] = True\n return func(*args, **kwargs)\n return _check", "def run_once(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = func(*args, **kwargs)\n wrapper.has_run = True\n return result\n wrapper.has_run = False\n return wrapper", "def post_run_func_checked(driver: HammerDriver) -> None:\n if post_run_func is not None:\n post_run_func(driver)", "def assertion_passed(self, func):", "def check_before_executing(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not self._checked:\n assert self.is_correct, (\n 'The MatchList is incorrectly constructed. '\n 'Run check_and_print_if_error() for details.')\n return f(self, *args, **kwargs)\n return wrapper", "def check_in_use(f):\n\n def wrapped(self, *args, **kwargs):\n if self.fired:\n raise InUse(_(\"Executor in use\"))\n return f(self, *args, **kwargs)\n return wrapped", "def test_func(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit(func)), types.FunctionType)", "def does_it_run(func, args):\n \n if args is None:\n func()\n else:\n func(*args)", "def check_mocked_functions_called(*mocked_functions):\n for mocked_function in mocked_functions:\n assert_that(mocked_function.called, f\"The function was not called - {mocked_function}\")", "def _func_only(func):\n if inspect.isfunction(func):\n return\n else:\n raise Exception(\"Only functions can be tasks\")", "def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))", "def run_once(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = f(*args, **kwargs)\n wrapper.has_run = True\n wrapper.result = result\n \n return wrapper.result\n \n wrapper.has_run = False\n return wrapper", "def add_check_function(check_function: Callable):\n\n def decorator(func: Callable):\n @wraps(func)\n def wrapper(*args, **kwargs):\n check_function(*args, *kwargs.values())\n return func(*args, **kwargs)\n\n return wrapper\n\n name = getattr(check_function, '__name__', '`func`')\n decorator.__doc__ = f\"Check the function's arguments via `{name}` before calling it.\"\n return decorator", "def test_func_2(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit_2(func)), types.FunctionType)", "def traced_function_wrong(function):\n logger.debug(\"started execution of %s\", function)\n start_time = time.time()\n\n @wraps(function)\n def wrapped(*args, **kwargs):\n result = function(*args, **kwargs)\n logger.info(\n \"function %s took %.2fs\", function, time.time() - start_time\n )\n return result\n\n return wrapped", "def final_check(self):\n for func in self.called.keys():\n self.assertTrue(self.called[func], \"%s was not called\" % (func,))", "def test_that_original_func_saved():\n\n assert callable(custom_sum.__original_func)", "def decorator(func):\n\n pass", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "def _summary_wrapper(summary_func):\n\n @functools.wraps(summary_func)\n def wrapper(*args, **kwargs):\n from alf.utils.common import run_if\n return run_if(\n should_record_summaries(), lambda: summary_func(*args, **kwargs))\n\n return wrapper", "def is_being(f):\n\n def check_is_being(self, *args, **kwargs):\n # TODO\n return f(self, *args, **kwargs)\n\n return check_is_being", "def test_wrapped_function(self):\n decorator = self.decorator()\n\n def func():\n pass\n\n wrapper = decorator(func)\n self.assertIdentical(wrapper.__wrapped__, func)", "def log_dec(fn: \"Function\"):\n @wraps(fn)\n def inner(*args, **kwargs):\n run_dt = datetime.now()\n result = fn(*args, **kwargs)\n end_dt = datetime.now()\n print(f\"{run_dt}: called {fn.__name__}\")\n print(f\"Execution time: {end_dt - run_dt}\")\n print(f\"Function description:\\n{fn.__doc__}\")\n print(f\"Function returned something: {True if result else False}\")\n return result\n return inner", "def before_call(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "def assertion_started(self, func):", "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)", "def test_tolerate_decorated_function_return_value():\n def test_function():\n return \"foobar\"\n fn = tolerate()(test_function)\n eq_(fn(), \"foobar\")", "def test_tolerate_return_function_decorator():\n decorator = tolerate()\n ok_(inspect.isfunction(decorator))\n\n args, varargs, keywords, defaults = inspect.getargspec(decorator)\n eq_(len(args), 1, 'Return function should take one argument for function')", "def test_wraps():\n print('func')", "def __call__(self, func):\n LOG.debug(\"@defer %s\", func)\n\n @wraps(func)\n def decorated(itself, event, *args, **kwargs):\n \"\"\"the decorated function\"\"\"\n LOG.debug(\"decorated\")\n if event.defer(itself, delay=self.delay):\n # OK, let's handle it later\n return\n return func(itself, event, *args, **kwargs)\n return decorated", "def is_decorator(tree, fname):\n return ((isx(tree, fname)) or\n (type(tree) is Call and isx(tree.func, fname)))", "def profiler(func): # type: ignore\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs): # type: ignore\n if wrapper.exits == wrapper.calls:\n wrapper.exits = 0\n wrapper.calls = 0\n wrapper.begin = datetime.datetime.now()\n wrapper.calls += 1\n resulted_func = func(*args, **kwargs)\n wrapper.exits += 1\n wrapper.last_time_taken = (datetime.datetime.now() - wrapper.begin).total_seconds()\n return resulted_func\n\n wrapper.calls = 0\n wrapper.exits = 0\n return wrapper", "def important(func):\n\n def decorated(*args, **kwargs):\n \"\"\"Decorated method.\"\"\"\n runLog.important(func(*args, **kwargs))\n\n return decorated", "def test_decorator(f):\n return f", "def notify_decorator(name, fn):\n return fn", "def track_func(self, func, args):\n\n try:\n self.start_track()\n ret = func(*args)\n finally:\n self.end_track()\n \n return ret", "def testit(did_pass):\n\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def changed(func, name):\n @functools.wraps(func)\n def decorateit(*args, **kwargs):\n if func.__name__ not in changed_list:\n changed_list.append(func.__name__)\n logger.debug(\"function {} changed by {}! Check if it works as you think\".format(name))\n return func(*args, **kwargs)\n return decorateit", "def check_mocked_functions_not_called(*mocked_functions):\n for mocked_function in mocked_functions:\n assert_that(not mocked_function.called, f\"The function should not have been called - {mocked_function}\")", "def truth(message, expected=None):\n def decorator(func):\n return update_wrapper(Check(func, message, expected), func)\n return decorator", "def test_deferred(func=None, log_step=True):\n\n def _test_deferred(func, *args, **kwargs):\n \"\"\"Actual implementation\"\"\"\n # record the step\n info = inspect.getframeinfo(inspect.stack()[2].frame)\n code_context = \"\"\n lineno = info.lineno\n\n # loop to actual function call to grab all lines\n while func.__name__ not in code_context and lineno > 0:\n code_context = linecache.getline(info.filename,\n lineno) + code_context\n lineno -= 1\n code_context = textwrap.dedent(code_context.rstrip())\n defer_pipe_call = xtz.xtz._DeferredPipeCall(\n code_context, func, args, kwargs, log_step=log_step)\n return defer_pipe_call\n\n if func is not None:\n return decorator.decorate(func, _test_deferred)\n else:\n return decorator.decorator(_test_deferred)", "def test_dispatch_launch(self):\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'LaunchRequest'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)", "def isDecorated(self):", "def annotate(func:Callable, quiet:Optional[bool]):\n name = func.__name__\n @wraps(func)\n def inner(*args, **kwargs):\n _default_recorder.start(name)\n try:\n return func(*args, **kwargs)\n finally:\n _default_recorder.end(name, quiet)\n return inner", "def execute_if_enabled(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not self._enabled:\n return\n return f(self, *args, **kwargs)\n return wrapper", "def istest(func):\n func.__test__ = True\n return func", "def test_ignore_lack_of_metadata():\n\n def original(something, dispatcher, intent):\n \"\"\"Original!\"\"\"\n pass\n\n new_func = partial(original, \"something\")\n original.attr = 1\n wrapped = do(new_func)\n assert wrapped.__name__ == \"do_wrapper\"", "def arg_wrapper(func):\n\n @functools.wraps(func)\n def wrapper(*args,\n **kwargs):\n\n \"\"\" Simple profiler for the function.\n\n :param args: Args for the function.\n :param kwargs: Kwargs for the function.\n :return: The result of the function.\n \"\"\"\n\n _profiler = profilers.start_if_active(profile_id)\n\n # Run the function\n result = func(*args,\n **kwargs)\n\n profilers.stop_if_active(func=func,\n profile_id=profile_id,\n profiler=_profiler,\n sort_by=sort_by)\n\n # Return the function result\n return result\n\n # Return the decorated function\n return wrapper", "def decorate_with_checker(func: CallableT) -> CallableT:\n assert not hasattr(func, \"__preconditions__\"), \\\n \"Expected func to have no list of preconditions (there should be only a single contract checker per function).\"\n\n assert not hasattr(func, \"__postconditions__\"), \\\n \"Expected func to have no list of postconditions (there should be only a single contract checker per function).\"\n\n assert not hasattr(func, \"__postcondition_snapshots__\"), \\\n \"Expected func to have no list of postcondition snapshots (there should be only a single contract checker \" \\\n \"per function).\"\n\n sign = inspect.signature(func)\n if '_ARGS' in sign.parameters:\n raise TypeError(\n 'The arguments of the function to be decorated with a contract checker include \"_ARGS\" which is '\n 'a reserved placeholder for positional arguments in the condition.')\n\n if '_KWARGS' in sign.parameters:\n raise TypeError(\n 'The arguments of the function to be decorated with a contract checker include \"_KWARGS\" which is '\n 'a reserved placeholder for keyword arguments in the condition.')\n\n param_names = list(sign.parameters.keys())\n\n # Determine the default argument values\n kwdefaults = resolve_kwdefaults(sign=sign)\n\n id_func = id(func)\n\n # (mristin, 2021-02-16)\n # Admittedly, this branching on sync/async is absolutely monstrous.\n # However, I couldn't find out an easier way to refactor the code so that it supports async.\n # Python expects us to explicitly colour functions as sync/async so we can not just put in an if-statement and\n # introduce an \"await\".\n #\n # The two wrappers need to be manually maintained in parallel.\n # Whenever you make a change, please inspect manually that both sync and async code exercises equivalent behavior.\n # For example, copy/paste the two blocks of code in separate files and perform a diff.\n\n if inspect.iscoroutinefunction(func):\n\n async def wrapper(*args, **kwargs): # type: ignore\n \"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"\n kwargs_error = _assert_no_invalid_kwargs(kwargs)\n if kwargs_error:\n raise kwargs_error\n\n # We need to create a new in-progress set if it is None as the ``ContextVar`` does not accept\n # a factory function for the default argument. If we didn't do this, and simply set an empty\n # set as the default, ``ContextVar`` would always point to the same set by copying the default\n # by reference.\n in_progress = _IN_PROGRESS.get()\n if in_progress is None:\n in_progress = set()\n _IN_PROGRESS.set(in_progress)\n\n # Use try-finally instead of ExitStack for performance.\n try:\n # If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop\n # by skipping any subsequent contract checks for the same function.\n if id_func in in_progress:\n return await func(*args, **kwargs)\n\n in_progress.add(id_func)\n\n (preconditions, snapshots, postconditions) = _unpack_pre_snap_posts(wrapper)\n\n resolved_kwargs = kwargs_from_call(\n param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)\n\n type_error = _assert_resolved_kwargs_valid(postconditions, resolved_kwargs)\n if type_error:\n raise type_error\n\n violation_error = await _assert_preconditions_async(\n preconditions=preconditions, resolved_kwargs=resolved_kwargs)\n if violation_error:\n raise violation_error\n\n # Capture the snapshots\n if postconditions and snapshots:\n resolved_kwargs['OLD'] = await _capture_old_async(\n snapshots=snapshots, resolved_kwargs=resolved_kwargs)\n\n # Ideally, we would catch any exception here and strip the checkers from the traceback.\n # Unfortunately, this can not be done in Python 3, see\n # https://stackoverflow.com/questions/44813333/how-can-i-elide-a-function-wrapper-from-the-traceback-in-python-3\n result = await func(*args, **kwargs)\n\n if postconditions:\n resolved_kwargs['result'] = result\n\n violation_error = await _assert_postconditions_async(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs)\n if violation_error:\n raise violation_error\n\n return result\n finally:\n in_progress.discard(id_func)\n else:\n\n def wrapper(*args, **kwargs): # type: ignore\n \"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"\n kwargs_error = _assert_no_invalid_kwargs(kwargs)\n if kwargs_error:\n raise kwargs_error\n\n # We need to create a new in-progress set if it is None as the ``ContextVar`` does not accept\n # a factory function for the default argument. If we didn't do this, and simply set an empty\n # set as the default, ``ContextVar`` would always point to the same set by copying the default\n # by reference.\n in_progress = _IN_PROGRESS.get()\n if in_progress is None:\n in_progress = set()\n _IN_PROGRESS.set(in_progress)\n\n # Use try-finally instead of ExitStack for performance.\n try:\n # If the wrapper is already checking the contracts for the wrapped function, avoid a recursive loop\n # by skipping any subsequent contract checks for the same function.\n if id_func in in_progress:\n return func(*args, **kwargs)\n\n in_progress.add(id_func)\n\n (preconditions, snapshots, postconditions) = _unpack_pre_snap_posts(wrapper)\n\n resolved_kwargs = kwargs_from_call(\n param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs)\n\n type_error = _assert_resolved_kwargs_valid(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs)\n if type_error:\n raise type_error\n\n violation_error = _assert_preconditions(\n preconditions=preconditions, resolved_kwargs=resolved_kwargs, func=func)\n if violation_error:\n raise violation_error\n\n # Capture the snapshots\n if postconditions and snapshots:\n resolved_kwargs['OLD'] = _capture_old(\n snapshots=snapshots, resolved_kwargs=resolved_kwargs, func=func)\n\n # Ideally, we would catch any exception here and strip the checkers from the traceback.\n # Unfortunately, this can not be done in Python 3, see\n # https://stackoverflow.com/questions/44813333/how-can-i-elide-a-function-wrapper-from-the-traceback-in-python-3\n result = func(*args, **kwargs)\n\n if postconditions:\n resolved_kwargs['result'] = result\n\n violation_error = _assert_postconditions(\n postconditions=postconditions, resolved_kwargs=resolved_kwargs, func=func)\n if violation_error:\n raise violation_error\n\n return result\n finally:\n in_progress.discard(id_func)\n\n # Copy __doc__ and other properties so that doctests can run\n functools.update_wrapper(wrapper=wrapper, wrapped=func)\n\n assert not hasattr(wrapper, \"__preconditions__\"), \"Expected no preconditions set on a pristine contract checker.\"\n assert not hasattr(wrapper, \"__postcondition_snapshots__\"), \\\n \"Expected no postcondition snapshots set on a pristine contract checker.\"\n assert not hasattr(wrapper, \"__postconditions__\"), \"Expected no postconditions set on a pristine contract checker.\"\n\n # Precondition is a list of condition groups (i.e. disjunctive normal form):\n # each group consists of AND'ed preconditions, while the groups are OR'ed.\n #\n # This is necessary in order to implement \"require else\" logic when a class weakens the preconditions of\n # its base class.\n setattr(wrapper, \"__preconditions__\", [])\n setattr(wrapper, \"__postcondition_snapshots__\", [])\n setattr(wrapper, \"__postconditions__\", [])\n\n return wrapper # type: ignore", "def post_init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing post_init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__post_init_functions__.append(wrapper_fn)\n return wrapper_fn", "def assert_if_truthy(fun):\n\n # NOTE: Not using ``wrapt`` because it forwards attributes\n # from the wrapping function to the wrapped function,\n # which will break the code in _make_asserter.\n @_wraps(fun)\n def wrapper(*args, **kwargs):\n result = fun(*args, **kwargs)\n assert not result, result\n\n return wrapper", "def before_test(self, func, *args, **kwargs):\n pass", "def _run_func(\n func: Callable, *args: Any, return_value: Any | None = None, **kwargs: Any\n) -> None:\n try:\n if try_bind(func, *args, return_value=return_value, **kwargs):\n func(*args, return_value=return_value, **kwargs)\n elif try_bind(func, *args, **kwargs):\n func(*args, **kwargs)\n else:\n raise SignatureMismatch(func)\n except SignatureMismatch:\n # always re-raise SignatureMismatch as this means we have been unable\n # to run the side-effect function at all.\n raise\n except Exception: # noqa: B902\n logger.exception(\"Error running side_effect function '%s'\", fname(func))\n if settings.ABORT_ON_ERROR or settings.TEST_MODE_FAIL:\n raise", "def is_lambda_decorator(tree, fname=None):\n return ((type(tree) is Call and len(tree.args) == 1) and\n (fname is None or is_decorator(tree.func, fname)))", "def func_custom(fname):\n def decorator(f):\n def decorated(*idp, **kwp):\n global tracer_data\n if hasattr(tracer_data, 'enabled') and tracer_data.enabled:\n try:\n call(fname)\n return f(*idp, **kwp)\n finally:\n ret()\n else:\n return f(*idp, **kwp)\n return decorated\n return decorator", "def benchmark(func):\n start = time.time()\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rc = func(*args, **kwargs)\n print('Running time: {}'.format(time.time() - start))\n return rc\n return wrapper", "def check_chief(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n \tif g.my['rank'] > 15:\r\n \t\tabort(401)\r\n \treturn function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def _debug_wrap(func):\n\n def wrapper(*args, **kwargs):\n _debug_print(f\"{datetime.datetime.now()} - About to run: {func.__name__}\")\n ret_val = func(*args, **kwargs)\n _debug_print(f\"{datetime.datetime.now()} - Completed run: {func.__name__}\")\n return ret_val\n\n return wrapper", "def validated_hook(self) -> Callable[[bool], None]:\n return self._validated_hook", "def is_function(self):\n return False", "def decorator_func(func):\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check", "def test_require_enabled_call_method(self):\n method = MagicMock(return_value=True)\n decorated = require_enabled(method)\n self = MagicMock()\n self.enabled = True\n self.assertTrue(decorated(self))\n self.assertTrue(method.called)", "def test_tolerate_decorated_function_fail_silently():\n def test_function():\n raise Exception()\n fn = tolerate()(test_function)\n fn()", "def measure(func):\n if func not in measured_funcs:\n measured_funcs.add(func)\n if not hasattr(func, 'total_runtime'):\n func.total_runtime = 0.0\n if not hasattr(func, 'total_calls'):\n func.total_calls = 0\n\n def wrapper(*args, **kwargs):\n before_call = datetime.datetime.now()\n res = func(*args, **kwargs)\n elapsed = datetime.datetime.now() - before_call\n func.total_runtime += elapsed.total_seconds()\n func.total_calls += 1\n return res\n\n return wrapper", "def test_dispatch_intent(self):\n @self.skill.intent('test_intent')\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'IntentRequest'\n self.skill.request.request.intent = interface.Intent()\n self.skill.request.request.intent.name = 'test_intent'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "def NeverNeededExpectation(self, expectation: BaseExpectation) -> bool:\n return self.did_fully_pass", "def is_fixture_method(callable_):\n # ensure we don't pick up turtles/mocks as fixtures\n if not inspect.isroutine(callable_):\n return False\n\n # _fixture_id indicates this method was tagged by us as a fixture\n return callable_hasattr(callable_, '_fixture_type')", "def is_function(self):\n line = self.line.strip()\n if line.startswith('fu'):\n if line.startswith('function') is False:\n return True", "def decorator(func):\n\t\tif has_self(func):\n\t\t\t###################################################################\n\t\t\tdef evaluate(self):\n\t\t\t\t\"\"\" Function wrapper for lazily evaluating the aspect.\n\t\t\t\t\"\"\"\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func(self)\n\t\t\t\treturn evaluate.value\n\t\telse:\n\t\t\t###################################################################\n\t\t\tdef evaluate():\n\t\t\t\t\"\"\" Function wrapper for lazily evaluating the aspect.\n\t\t\t\t\"\"\"\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value\n\t\treturn push_aspect(name or func.__name__, evaluate)", "def _fixup_func_caller(cmd, **kwargs):\n def wrapper():\n result = _run(cmd, **kwargs)\n if result.returncode not in (None, 0):\n return result.stdout\n return None\n return wrapper", "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "def aFunction():\n return True", "def test_tolerate_decorated_function_raise_if_disabled():\n def test_function():\n raise AttributeError()\n fn = tolerate()(test_function)\n # disable\n tolerate.disabled = True\n fn()", "def profiled(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n inner.ncalls += 1\n return func(*args, **kwargs)\n\n inner.ncalls = 0\n return inner", "def __call__(self, func):\n\n # set logger if it was not set earlier\n if not self.logger:\n logging.basicConfig()\n self.logger = logging.getLogger(func.__module__)\n\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n st = datetime.datetime.now()\n f_result = func(*args, **kwds)\n et = datetime.datetime.now()\n self.logger.debug(\"%s duration: %s\" % (func.__name__, et - st))\n return f_result\n\n return wrapper", "def function_timer(orig_func):\n import time\n\n @wraps(orig_func)\n def wrapper(*args, **kwargs):\n t1 = time.time()\n result = orig_func(*args, **kwargs)\n t2 = time.time()\n print('{} ran in: {} sec'.format(orig_func.__name__, t2))\n return result\n\n return wrapper", "def decorator(func):\n\t\treturn push_aspect(name or func.__name__, func)", "def login_required(view_function):\n\t@wraps(view_function) # Tells debuggers that is is a function wrapper\n\tdef decorator(*args, **kwargs):\n\t\tauth = current_app.auth\n\t\tallowed = False\n\t\t# User must be logged in\n\t\tif current_user.is_authenticated:\n\t\t\t# User must be verified (if required)\n\t\t\tif auth.AUTH_ENABLE_CONFIRM_ACCOUNT and current_user.verified:\n\t\t\t\tallowed=True\n\t\t\t# User can be not verified (if allowed)\n\t\t\telif not auth.AUTH_ENABLE_CONFIRM_ACCOUNT:\n\t\t\t\tallowed=True\n\t\tif not allowed:\n\t\t\t# Redirect to unauthenticated page\n\t\t\treturn auth.unauthenticated()\n\t\t# It's OK to call the view\n\t\treturn view_function(*args, **kwargs)\n\treturn decorator", "def after_test(self, func, *args, **kwargs):\n pass", "def run(*argv_r, **kwargs_r):\n def decorator(func):\n def wrapped(device, call_again=False):\n with device.mutex:\n if not device.flag_removing:\n try:\n status_perform = True\n time_start = time.time()\n try:\n func(device)\n except Exception as err:\n t = traceback.format_exc()\n log.error(u'Run function exception: {0}'.format(decode_string(t)))\n\n time_finish = time.time()\n time_spend = time_finish-time_start\n log.info('run function {0} of device {2} was executed for {1} seconds'.\n format(func.func_name, time_spend, device.id))\n\n period = getattr(device, kwargs_r.keys()[0]).val\n func.__dict__['mem_period'] = period\n\n except Exception as err:\n t = traceback.format_exc()\n log.error(u'system error in run decorator: {0}'.format(decode_string(t)))\n status_perform = False\n finally:\n if not status_perform:\n mem_period = func.__dict__['mem_period'] \\\n if 'mem_period' in func.__dict__ \\\n else kwargs_r.values()[0]\n else:\n mem_period = period\n\n if call_again:\n if time_spend < mem_period:\n device.manager.tasks_pool.add_task(time_finish + mem_period - time_spend,\n getattr(device, func.func_name))\n else:\n device.manager.tasks_pool.add_task(time_finish, getattr(device, func.func_name))\n\n wrapped.runnable = True\n wrapped.period_name = kwargs_r.keys()[0]\n wrapped.period_default_value = kwargs_r.values()[0]\n return wrapped\n return decorator", "def isValidFunction(self):\n for token in self.value:\n if token.type == 'defFunction' or token.type == 'callFunction':\n if token.value.split('(')[0] == self.name:\n return False\n return True", "def execute_logging_decorator(func):\n filename = filename_regexp.match(inspect.getmodule(inspect.stack()[1][0]).__file__).group(1)\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n logger.info('*** {}: START EXECUTING ***'.format(filename))\n result = func(*args, **kwargs)\n logger.info('{}: Finished executing'.format(filename))\n return result\n return wrapper" ]
[ "0.65756035", "0.6015735", "0.60046905", "0.5997352", "0.59535944", "0.5899848", "0.5813919", "0.57161885", "0.5611487", "0.55894804", "0.5569097", "0.556421", "0.55513084", "0.55510217", "0.55382", "0.5538129", "0.55316585", "0.5526899", "0.55099505", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504363", "0.5502706", "0.5502706", "0.5497879", "0.5497879", "0.54972124", "0.5445182", "0.5431307", "0.54309607", "0.5426045", "0.54168683", "0.54168683", "0.54168683", "0.54168683", "0.54168683", "0.54085773", "0.53872377", "0.5387162", "0.538158", "0.5366764", "0.5357345", "0.534718", "0.5341437", "0.53310674", "0.5330346", "0.5327428", "0.5323629", "0.5316015", "0.53118277", "0.5309878", "0.5309236", "0.52845204", "0.52773887", "0.52752775", "0.5267291", "0.5264588", "0.52609926", "0.5244075", "0.52409554", "0.5238793", "0.52337754", "0.5227366", "0.5222618", "0.5221218", "0.5219886", "0.52166325", "0.52131057", "0.5211811", "0.5202986", "0.5188604", "0.5181223", "0.5179946", "0.5177026", "0.5176242", "0.5175725", "0.5170986", "0.5152567", "0.5146794", "0.5146329", "0.51383364", "0.51285785", "0.5127388", "0.5126629", "0.5124947", "0.5122619", "0.5121288", "0.51154363", "0.5114032", "0.5112531", "0.51097983", "0.51056993", "0.5104983", "0.51013774", "0.5100827" ]
0.8174126
0
Developers can define their own workflow classes in external python packages, in which case the workflowname must be specified as a fullyqualified class name.
def test_workflow_class_discovery(): config = { "workflow-name": "tests.workflows.test_workflow.CustomWorkflow", "cluster-type": CLUSTER_TYPE } template_dir = tempfile.mkdtemp(suffix="test-workflow-discovery-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) _execution_dir, workflow = launch_flow(template_dir, 1) assert isinstance(workflow, CustomWorkflow) assert workflow.execute.didrun
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_workflow_class(workflow_name):\n try:\n module_name = \"workflow.\" + workflow_name\n importlib.import_module(module_name)\n return True\n except ImportError:\n return False", "def get_workflow_object(\n workflow_name, settings, logger, client, token, decision, maximum_page_size\n):\n module_name = \"workflow.\" + workflow_name\n module_object = importlib.import_module(module_name)\n workflow_class = getattr(module_object, workflow_name)\n # Create the object\n workflow_object = workflow_class(\n settings, logger, client, token, decision, maximum_page_size\n )\n return workflow_object", "def __init__(self, workflow):\n self.workflow = workflow", "def start_workflow(self, workflow_name, workflow_input, **params):\n raise NotImplementedError", "def generate_workflow_name(self) -> str:\n pass", "def get_workflow_name(workflow_type):\n return \"workflow_\" + workflow_type", "def test_deploy_workflow_definition(self):\n pass", "def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):\n\n # Decide which preprocessing/postprocessing workflow to use\n # String arguments are parsed for convenience in the run script\n if isinstance(workflow, basestring):\n if workflow == \"base\":\n workflow_class = WorkflowBase\n elif workflow == \"tfa\":\n from inferelator.tfa_workflow import TFAWorkFlow\n workflow_class = TFAWorkFlow\n elif workflow == \"amusr\":\n from inferelator.amusr_workflow import SingleCellMultiTask\n workflow_class = SingleCellMultiTask\n elif workflow == \"single-cell\":\n from inferelator.single_cell_workflow import SingleCellWorkflow\n workflow_class = SingleCellWorkflow\n else:\n raise ValueError(\"{val} is not a string that can be mapped to a workflow class\".format(val=workflow))\n # Or just use a workflow class directly\n elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):\n workflow_class = workflow\n else:\n raise ValueError(\"Workflow must be a string that maps to a workflow class or an actual workflow class\")\n\n # Decide which regression workflow to use\n # Return just the workflow if regression is set to None\n if regression is None:\n return workflow_class\n # String arguments are parsed for convenience in the run script\n elif isinstance(regression, basestring):\n if regression == \"bbsr\":\n from inferelator.regression.bbsr_python import BBSRRegressionWorkflow\n regression_class = BBSRRegressionWorkflow\n elif regression == \"elasticnet\":\n from inferelator.regression.elasticnet_python import ElasticNetWorkflow\n regression_class = ElasticNetWorkflow\n elif regression == \"amusr\":\n from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow\n regression_class = AMUSRRegressionWorkflow\n else:\n raise ValueError(\"{val} is not a string that can be mapped to a regression class\".format(val=regression))\n # Or just use a regression class directly\n elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):\n regression_class = regression\n else:\n raise ValueError(\"Regression must be a string that maps to a regression class or an actual regression class\")\n\n class RegressWorkflow(regression_class, workflow_class):\n regression_type = regression_class\n\n return RegressWorkflow", "def createSingleModuleWorkflow(module,name):\n\n moduleType = module.getType()\n moduleName = name\n\n workflow = Workflow()\n step = StepDefinition(moduleType+'_step')\n step.addModule(module)\n moduleInstance = step.createModuleInstance(moduleType,moduleName)\n\n step.addParameter(moduleInstance.parameters.getInput())\n workflow.addParameter(moduleInstance.parameters.getInput())\n\n workflow.addStep(step)\n stepInstance = workflow.createStepInstance(moduleType+'_step',moduleName+'_step')\n\n # Propagate the module input parameters to the workflow level\n moduleInstance.linkParameterUp(moduleInstance.parameters.getInput())\n stepInstance.linkParameterUp(moduleInstance.parameters.getInput())\n\n workflow.setName(name)\n workflow.setDescription('Single module workflow from '+moduleType+' type module')\n workflow.setDescrShort(moduleType+' workflow')\n return workflow", "def workflow_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workflow_name\")", "def init_workflow():\n pass", "def generate_workflow_name(self) -> str:\n return self._workflow_name", "def load_workflow_by_filename(self, workflow_filename,\n workflow_name=None, workflow_name_prefix=\"\", workflow_name_suffix=\"\"):\n if not self._galaxy_instance:\n raise RuntimeError(\"WorkflowLoader not initialized\")\n self._logger.debug(\"Loading workflow definition from file: %s\", workflow_filename)\n with open(workflow_filename) as f:\n wf_json = _json.load(f)\n self._logger.debug(\"Workflow definition loaded from file: done\")\n wf_json[\"name\"] = \"-\".join([workflow_name_prefix,\n (workflow_name if workflow_name else wf_json[\"name\"]).replace(\" \", \"\"),\n workflow_name_suffix])\n self._logger.debug(\"Uploading the Workflow to the Galaxy instance ...\")\n wf = self._galaxy_instance.workflows.import_new(wf_json)\n self._logger.debug(\"Uploading the Workflow to the Galaxy instance: done\")\n self._workflows[wf.id] = wf\n return wf", "def start_workflow(self, **params):\n raise NotImplementedError", "def test_change_workflow_definition(self):\n pass", "def update_workflow_name(self, old_playbook, old_workflow, new_playbook, new_workflow):\n old_key = _WorkflowKey(old_playbook, old_workflow)\n new_key = _WorkflowKey(new_playbook, new_workflow)\n self.workflows[new_key] = self.workflows.pop(old_key)\n self.workflows[new_key].name = new_workflow\n self.workflows[new_key].playbook_name = new_playbook\n logger.debug('updated workflow name {0} to {1}'.format(old_key, new_key))", "def get_name(name, class_name):\n if name:\n return name\n if not class_name:\n raise MLRunInvalidArgumentError(\"name or class_name must be provided\")\n if isinstance(class_name, type):\n return class_name.__name__\n return class_name", "def load_workflow(resource, workflow_name):\n try:\n playbook_file = open(resource, 'r')\n except (IOError, OSError) as e:\n logger.error('Could not load workflow from {0}. Reason: {1}'.format(resource, format_exception_message(e)))\n return None\n else:\n with playbook_file:\n workflow_loaded = playbook_file.read()\n try:\n playbook_json = json.loads(workflow_loaded)\n playbook_name = playbook_json['name']\n workflow_json = next(\n (workflow for workflow in playbook_json['workflows']\n if workflow['name'] == workflow_name), None)\n if workflow_json is None:\n logger.warning('Workflow {0} not found in playbook {0}. '\n 'Cannot load.'.format(workflow_name, playbook_name))\n return None\n workflow = Workflow.create(workflow_json)\n return playbook_name, workflow\n except ValueError as e:\n logger.error('Cannot parse {0}. Reason: {1}'.format(resource, format_exception_message(e)))\n except (InvalidInput, UnknownApp, UnknownAppAction, UnknownFilter, UnknownFlag) as e:\n logger.error('Error constructing workflow {0}. Reason: {1}'.format(workflow_name,\n format_exception_message(e)))\n return None\n except KeyError as e:\n logger.error('Invalid Playbook JSON format. Details: {}'.format(e))\n return None", "def test_get_workflow_definition(self):\n pass", "def generate_workflow_name(self) -> str:\n workflow_name = None\n if self._parsed_url.basename in WORKFLOW_SPEC_FILENAMES:\n # We omit the name of the specification file if it is standard\n # (e.g. `reana.yaml` or `reana.yml`)\n workflow_name = self._clean_workflow_name(self._parsed_url.dirname)\n if not workflow_name:\n workflow_name = self._clean_workflow_name(\n f\"{self._parsed_url.dirname}-{self._parsed_url.basename_without_extension}\"\n )\n return workflow_name", "def __init__(self):\n\n # Application handle\n self.application = None\n\n # Workflow name\n self.name = None\n\n # Workflow data\n self.data = None", "def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_", "def test_find_workflow_definitions(self):\n pass", "def test_rename_to_mlflow(mlflow):\n atom = ATOMClassifier(X_bin, y_bin, experiment=\"test\", random_state=1)\n atom.run(\"GNB\")\n atom.scoring()\n assert mlflow.call_count == 10 # 9 from scoring + 1 from training", "def new_workflow(self, upload_file, name=\"\", description=\"\", submit=None):\n data = upload_file.file.read()\n if not name:\n name = upload_file.filename.replace(\".xml\", \"\")\n workflow = Workflow(name=name, description=description,\n data=data,\n created_by=identity.current.user.id)\n log.info(\"Saved new workflow %d\", workflow.id)\n raise redirect(\"/workflow/%d\" % workflow.id)", "def pkg(klass, name):\n raise NotImplementedError", "def register_workflow_type(\n domain, name, version, description=None,\n default_task_start_to_close_timeout=None,\n default_execution_start_to_close_timeout=None, default_task_list=None,\n default_task_priority=None, default_child_policy=\"TERMINATE\",\n default_lambda_role=None):\n kwargs = {}\n\n for aws_prop, value, conversion in (\n ('description', description, None),\n ('defaultTaskStartToCloseTimeout', default_task_start_to_close_timeout,\n None),\n ('defaultExecutionStartToCloseTimeout',\n default_execution_start_to_close_timeout, None),\n ('defaultTaskList', default_task_list, None),\n ('defaultTaskPriority', default_task_priority, str),\n ('defaultChildPolicy', default_child_policy, None),\n ('defaultLambdaRole', default_lambda_role, None)):\n\n kwargs = check_and_add_kwargs(aws_prop, value, conversion, kwargs)\n\n result = make_request(\n SWF.register_workflow_type,\n domain=domain,\n name=name,\n version=version,\n **kwargs)\n\n if result.success:\n return\n\n if result.result.code == WORKFLOW_TYPE_ALREADY_EXISTS:\n raise WorkflowTypeAlreadyExistsError(\"Workflow type already exists.\")\n\n raise RegisterWorkflowTypeError(result.result.message)", "def add_workflow(workflow):\n\n launchpad = LaunchPad.auto_load()\n launchpad.add_wf(workflow)", "def add_workflow(workflow):\n\n launchpad = LaunchPad.auto_load()\n launchpad.add_wf(workflow)", "def get_workflow(self, playbook_name, workflow_name):\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n return self.workflows[key]\n return None", "def get(name, config):\n\n process = Process()\n\n # Build workflow\n with st.spinner(\"Building workflow....\"):\n process.build(name, config)\n\n return process", "def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )", "def load_workflow_from_file(self, path, workflow_name, name_override=None, playbook_override=None):\n with open(path, 'r') as playbook_file:\n playbook_loaded = playbook_file.read()\n try:\n json_in = json.loads(playbook_loaded)\n except json.JSONDecodeError:\n logger.error('Cannot parse {}'.format(path))\n else:\n playbook_name = playbook_override if playbook_override else json_in['name']\n for workflow in (workflow_ for workflow_ in json_in['workflows'] if workflow_['name'] == workflow_name):\n if workflow['name'] == workflow_name:\n workflow_name = name_override if name_override else workflow['name']\n workflow['name'] = workflow_name\n key = _WorkflowKey(playbook_name, workflow_name)\n self.__add_workflow(key, workflow_name, workflow)\n self.add_child_workflows()\n break\n else:\n logger.warning('Workflow {0} not found in playbook {0}. '\n 'Cannot load.'.format(workflow_name, playbook_name))\n return False\n return True", "def class_name(name: str) -> str:\n return text.pascal_case(utils.safe_snake(name, \"type\"))", "def class_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"class_name\")", "def default_runtime_class_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_runtime_class_name\")", "def default_runtime_class_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_runtime_class_name\")", "def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)", "def current_workflow():\n try:\n return current_worker_pool.workflow\n except AttributeError:\n return None", "def generate_workflow_name(self) -> str:\n repository_name = self._parsed_url.basename_without_extension\n if self._git_ref:\n workflow_name = f\"{repository_name}-{self._git_ref}\"\n else:\n workflow_name = repository_name\n return self._clean_workflow_name(workflow_name)", "def copy_workflow(self, old_playbook_name, new_playbook_name, old_workflow_name, new_workflow_name):\n workflow = self.get_workflow(old_playbook_name, old_workflow_name)\n workflow_copy = deepcopy(workflow)\n workflow_copy.playbook_name = new_playbook_name\n workflow_copy.name = new_workflow_name\n workflow_copy.playbook_name = new_playbook_name\n\n key = _WorkflowKey(new_playbook_name, new_workflow_name)\n self.workflows[key] = workflow_copy\n logger.info('Workflow copied from {0}-{1} to {2}-{3}'.format(old_playbook_name, old_workflow_name,\n new_playbook_name, new_workflow_name))", "def add_workflow(self, workflow):\n self.workflow_manager.add_workflow(workflow)", "def __init__(self, name, class_type, config):\n # Call constructors of parent classes.\n Component.__init__(self, name, class_type, config)\n Module.__init__(self)\n\n # Flag indicating whether the model is frozen or not.\n self.frozen = False", "def __init__(\n self,\n parsed_url: ParsedUrl,\n output_dir: str,\n spec: Optional[str] = None,\n workflow_name: Optional[str] = None,\n ):\n super().__init__(parsed_url, output_dir, spec)\n self._archive_name = self._parsed_url.basename\n if workflow_name:\n self._workflow_name = self._clean_workflow_name(workflow_name)\n else:\n self._workflow_name = self._clean_workflow_name(\n self._parsed_url.basename_without_extension\n )", "def __init__(__self__, *,\n allowed_runtime_class_names: pulumi.Input[Sequence[pulumi.Input[str]]],\n default_runtime_class_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"allowed_runtime_class_names\", allowed_runtime_class_names)\n if default_runtime_class_name is not None:\n pulumi.set(__self__, \"default_runtime_class_name\", default_runtime_class_name)", "def __init__(self, name, process_step=None, **attributes):\n self.name = name\n self.process_step = process_step\n # Check the config file for the process metadata if process step not provided\n if process_step is None:\n cfg_def = cfg.read_procedures()[name]\n self.process_step = cfg_def['process_step']\n attributes = cfg_def.copy()\n \n\n #self.source = source\n self.attributes = attributes\n #self.attributes['standard_name'] = 'source'", "def test_validate_sub_process_class_plugins(ctx, common_relax_workchain):\n from aiida_common_workflows.plugins import get_entry_point_name_from_class\n assert eos.validate_sub_process_class(get_entry_point_name_from_class(common_relax_workchain).name, ctx) is None", "def test_get_workflow_definition_xml(self):\n pass", "def __init__(self, workflow, **kwds):\n self.kwds = kwds\n self.url = self.get('url', 'cmsweb.cern.ch')\n WorkflowManager.__init__(self, workflow, self.url)\n self.workload = getWorkload(self.url, workflow)\n self.cacheID = self.winfo.get('StepOneConfigCacheID', '')\n self.config = getConfig(self.url, self.cacheID)\n self.pileup_dataset = self._pileup_dataset()\n self.priority = self._priority()\n self.era = self.get('era', 'Summer12')\n self.lfn = self.get('lfn', '/store/mc')\n self.special_name = self.get('specialName', '')\n self.max_rss = self.get('maxRSS', 2300000)\n self.max_vsize = self.get('maxVSize', 4100000000)\n self.input_dataset = ''\n self.pileup_scenario = ''\n self.global_tag = self.get('globalTag', '')\n self.campaign = self.get('campaign', '')\n self.max_merge_events = self.get('maxMergeEvents', 50000)\n self.activity = self.get('activity', 'reprocessing')\n self.restrict = self.get('restrict', 'None')\n self.site_use = self.get('site', None)\n self.site_cust = self.get('site_cust', None)\n self.xrootd = self.get('xrootd', 0)\n self.ext_tag = self.get('ext', '')\n self.team = self.get('team', '')\n\n # perform various initialization\n self._init()\n\n # custom settings\n # Construct processed dataset version\n if self.pileup_scenario:\n self.pileup_scenario = self.pileup_scenario+'_' \n\n specialprocstring = kwds.get('specialName', '')\n if specialprocstring:\n self.special_name = specialprocstring + '_'\n\n # ProcessingString\n inprocstring = kwds.get('procstring', '')\n if inprocstring:\n self.procstring = inprocstring\n else:\n self.procstring = self.special_name + self.pileup_scenario +\\\n self.global_tag + self.ext_tag\n\n # ProcessingVersion\n inprocversion = kwds.get('procversion', '')\n if inprocversion:\n self.procversion = inprocversion\n else:\n self.procversion = self.dataset_version(self.era, self.procstring)", "def test_workflows_get(self):\n pass", "def test_workflows_get(self):\n pass", "def _create_Work(classname, dataclass):\n globals()[classname] = type(classname, (Work, dataclass), {})", "def process_workflow(\n workflow_type, decision, settings, logger, client, token, maximum_page_size\n):\n # for the workflowType attempt to do the work\n if workflow_type is not None:\n\n logger.info(\"workflowType: %s\", workflow_type)\n\n # Instantiate and object for the workflow using eval\n # Build a string for the object name\n workflow_name = get_workflow_name(workflow_type)\n\n # Attempt to import the module for the workflow\n if import_workflow_class(workflow_name):\n # Instantiate the workflow object\n workflow_object = get_workflow_object(\n workflow_name,\n settings,\n logger,\n client,\n token,\n decision,\n maximum_page_size,\n )\n # Process the workflow\n invoke_do_workflow(workflow_name, workflow_object, logger)\n else:\n logger.info(\"error: could not load object %s\\n\", workflow_name)", "def invoke_do_workflow(workflow_name, workflow_object, logger):\n try:\n success = workflow_object.do_workflow()\n except Exception:\n success = None\n logger.error(\"error processing workflow %s\", workflow_name, exc_info=True)\n\n # Print the result to the log\n if success:\n logger.info(\"%s success %s\" % (workflow_name, success))", "def add_wf(self,wfname):\n wf = Workflow()\n if not wf.is_tag_valid(wfname): \n raise pawstools.WfNameError(wf.tag_error_message(wfname))\n wf.message_callback = self.logmethod\n self.workflows[wfname] = wf", "def __init__(__self__, *,\n allowed_runtime_class_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n default_runtime_class_name: Optional[pulumi.Input[str]] = None):\n if allowed_runtime_class_names is not None:\n pulumi.set(__self__, \"allowed_runtime_class_names\", allowed_runtime_class_names)\n if default_runtime_class_name is not None:\n pulumi.set(__self__, \"default_runtime_class_name\", default_runtime_class_name)", "def test_remove_workflow_definitions_in_job(self):\n pass", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def get_python_classname(raw_classname):\n class_name = raw_classname.replace(\" \",\"\")\n class_name = class_name.replace(\"-\",\"\")\n return class_name", "def find_class(self, class_name: str) -> Type:\n pass", "def __create_classname(self, fullname):\n return PACKAGE_NAME + \".\" + fullname", "def test_cron_workflow_service_get_cron_workflow(self):\n pass", "def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]", "def create_yaml_workflow_schema():\n reana_yaml_schema = \\\n '''\n version: 0.4.0\n inputs:\n files:\n - code/helloworld.py\n - inputs/names.txt\n parameters:\n sleeptime: 2\n inputfile: inputs/names.txt\n helloworld: code/helloworld.py\n outputfile: outputs/greetings.txt\n outputs:\n files:\n - outputs/greetings.txt\n workflow:\n type: serial\n specification:\n steps:\n - environment: 'python:2.7'\n commands:\n - python \"${helloworld}\" --sleeptime ${sleeptime} \\\n --inputfile \"${inputfile}\" --outputfile \"${outputfile}\"\n '''\n return reana_yaml_schema", "def __init__(self):\n self.label = \"Stream Network to RAPID\"\n self.description = (\"Processes stream network data into files for RAPID\")\n self.canRunInBackground = False\n self.category = \"Workflows\"", "def __tool_name__(cls):", "def import_classifier(name):\n classinput=open(name,'rb')\n main_class=load(classinput)\n classinput.close()\n return main_class", "def test_update_flow_classifier_name(self):\n resource = 'flow_classifier'\n cmd = fc.FlowClassifierUpdate(test_cli20.MyApp(sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n ['myid', '--name', 'myname'],\n {'name': 'myname'})", "def get_class(self, name):\n raise NotImplementedError", "def do_workflow(self, arg=None):\n\n def add_steps_to_workflow(curr_flow):\n while True:\n cmd_call = simple_input('Please choose a command to add to the workflow.', cmds, True)\n if cmd_call not in ['DONE', 'EXIT']:\n if self.is_output_cmd(cmd_call):\n curr_flow.add_output(cmd_call)\n else:\n curr_flow.add_step(cmd_call)\n cmds.pop(cmds.index(cmd_call))\n\n _conf = simple_input('Do you want to configure this command?', ['Y','N'], True) if self.is_configureable(cmd) else None\n if _conf == 'Y':\n curr_flow.configure_step(cmd_call)\n\n elif cmd_call == 'DONE':\n break\n else:\n return\n return curr_flow.has_steps()\n\n def confirm_workflow(curr_flow):\n checks = [('START', 'Start workflow?'), ('ADD', 'Do you want to add more steps?'),\n ('RESTART', 'Do you want to start over?')]\n curr_flow.draw_steps()\n for check in checks:\n _continue = simple_input(check[1], ['Y', 'N', 'EXIT'])\n if _continue == 'Y':\n return check[0]\n if _continue == 'EXIT':\n return 'EXIT'\n return 'INVALID'\n\n print('Preparing Workflow Wizard...')\n options = sorted(self.cmds + self.output_cmds)\n from smores.workflow import Workflow\n workflow = Workflow(self)\n target, load_type = self.validate_args('', 'file')\n if target:\n _l = True if target in self.inputs['files'].keys() else False\n workflow.add_target(target, load_type, _l)\n print('Please choose the commands you would like to add to the workflow.'\n '\\nCommands will be executed in the order in which they are added.'\n '\\n\\nPlease note that some commands have dependencies that must be satisfied. An overview of '\n 'command dependencies is available on the main SMOREs wiki on Github')\n print('\\nAvailable Commands for WorkFlow')\n cmds = []\n for i, _o in enumerate(options):\n print('{1}'.format(i, _o))\n cmds.append(_o)\n cmds.append('DONE')\n steps_added = add_steps_to_workflow(workflow)\n while steps_added:\n _run = confirm_workflow(workflow)\n if _run == 'START':\n break\n elif _run == 'ADD':\n _ = add_steps_to_workflow(workflow)\n elif _run == 'RESTART':\n self.do_workflow('')\n else:\n return\n workflow.run()\n print('Workflow has completed.')\n return\n\n else:\n print('Workflows currently have to be setup without the file already being loaded.')\n return", "def __init__(self, name):\r\n super(Module, self).__init__()\r\n self.name = name", "def __init__(self, name, jar, main_class=None,\r\n action_on_failure='TERMINATE_JOB_FLOW', step_args=None):\r\n self.name = name\r\n self._jar = jar\r\n self._main_class = main_class\r\n self.action_on_failure = action_on_failure\r\n\r\n if isinstance(step_args, basestring):\r\n step_args = [step_args]\r\n\r\n self.step_args = step_args", "def test_get_workflow_definition_by_process_id(self):\n pass", "def test_workflows_list(self):\n pass", "def __init__(self, wf_ex):\n self.wf_ex = wf_ex\n self.wf_spec = spec_parser.get_workflow_spec(wf_ex.spec)", "def test_remove_workflow_definition(self):\n pass", "def __init__(self, name, has_python, skip_java, skip_scala, root_dir,\r\n checkstyle_suppression_files, debug_port, targets, transitive, workunit_factory):\r\n\r\n self.name = name\r\n self.root_dir = root_dir\r\n self.targets = OrderedSet(targets)\r\n self.transitive = transitive\r\n self.workunit_factory = workunit_factory\r\n\r\n self.sources = []\r\n self.py_sources = []\r\n self.py_libs = []\r\n self.resource_extensions = set()\r\n\r\n self.has_python = has_python\r\n self.skip_java = skip_java\r\n self.skip_scala = skip_scala\r\n self.has_scala = False\r\n self.has_tests = False\r\n\r\n self.checkstyle_suppression_files = checkstyle_suppression_files # Absolute paths.\r\n self.debug_port = debug_port\r\n\r\n self.internal_jars = OrderedSet()\r\n self.external_jars = OrderedSet()", "def __init__(self, name, can_perform, logic, required):\n\n self.name = name\n self.can_perform = can_perform\n self.logic = logic\n self.required = required", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def get_model(name):\n # Evil reflection\n model_name = name.lower()\n model_module = importlib.import_module('.'+model_name, cfg.model_pck)\n [(_, model_class)] = inspect.getmembers(\n model_module,\n lambda c: inspect.isclass(c) and sys.modules[c.__module__] == model_module)\n\n tf.logging.debug('Found class %s', model_class)\n return model_class", "def __init__(self, workflow_name, input_params, excutable_engines, *args, **kwargs): # NOQA\n super().__init__(*args, **kwargs)\n self.fields[\"run_name\"] = forms.CharField(max_length=256,\n required=True,\n initial=\"{} {}\".format(workflow_name, timezone.now().strftime(\"%Y-%m-%d %H:%M:%S\"))) # NOQA\n self.fields[\"execution_engine\"].choices = [[engine.token, engine.name] for engine in excutable_engines] # NOQA\n for input_param in input_params:\n if input_param[\"type\"] == \"boolean\":\n self.fields[input_param[\"label\"]] = forms.BooleanField()\n elif input_param[\"type\"] in [\"int\", \"long\", \"double\"]:\n self.fields[input_param[\"label\"]] = forms.IntegerField()\n elif input_param[\"type\"] == \"float\":\n self.fields[input_param[\"label\"]] = forms.FloatField()\n elif input_param[\"type\"] in [\"string\", \"File\", \"Directory\"]:\n self.fields[input_param[\"label\"]] = forms.CharField()\n self.fields[input_param[\"label\"]].required = True\n if input_param[\"default\"] is not None:\n self.fields[input_param[\"label\"]].initial = input_param[\"default\"] # NOQA\n if input_param[\"doc\"] is not None:\n self.fields[input_param[\"label\"]].help_text = input_param[\"doc\"] # NOQA\n for field in self.fields.keys():\n self.fields[field].widget.attrs[\"placeholder\"] = \"\"", "def make_model(name):\n module_path = '{0}.{1}'.format(matchers.__name__, name)\n module = __import__(module_path, fromlist=[''])\n classes = inspect.getmembers(module, inspect.isclass)\n classes = [c for c in classes if c[1].__module__ == module_path]\n classes = [c[1] for c in classes if c[0].lower() == name.lower()]\n assert len(classes) == 1\n return classes[0]", "def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):\n return create_inferelator_workflow(regression=regression, workflow=workflow)()", "def load_workflow_file(factory_registry, file_path):\n\n reader = WorkflowReader(factory_registry)\n\n workflow_model = reader.read(file_path)\n\n return workflow_model", "def python_name(self):\n return self.requirement.name", "def get_workflow_type(decision):\n try:\n return decision[\"workflowType\"][\"name\"]\n except KeyError:\n # No workflowType found\n return None", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def test_component_class_and_module(self):\r\n\t\tself.assertTrue(self._configuration_[\"AddWordDefinitionTask\"].class_name() == \"AddWordDefinitionTask\" and\r\n\t\t self._configuration_[\"AddWordDefinitionTask\"].module_name() == \"TestPlugins\")", "def __init__(self, name: str = 'ret_val') -> None:\n self.name = name\n self.class_name = \"\"", "def setup_class(klass):", "def setup_class(klass):", "def __init__(self, name):\n super(Module, self).__init__()\n self.name = name", "def sax_workflow(self):\n # This code is bad mostly because the versioning system is\n # bad. It should be refactored when the versioning system will\n # be refactored.\n options = self.getOptions()\n if not options.include_workflow:\n return\n\n def sax_workflow_all_versions():\n only_viewable = options.only_viewable\n only_previewable = options.only_previewable\n # Previewable versions.\n if not only_viewable:\n version = self.context.get_unapproved_version_data()\n if version[0]:\n self.sax_workflow_version(version, 'unapproved')\n if only_previewable:\n return\n version = self.context.get_approved_version_data()\n if version[0]:\n self.sax_workflow_version(version, 'approved')\n if only_previewable:\n return\n\n # Public versions\n version = self.context.get_public_version_data()\n if version[0]:\n self.sax_workflow_version(version, 'public')\n if only_previewable:\n return\n\n # Old versions\n if only_previewable or not only_viewable:\n previous_versions = self.context.get_previous_versions_data()\n if only_previewable:\n if previous_versions and previous_versions[-1][0]:\n self.sax_workflow_version(\n previous_versions[-1], 'closed')\n return\n for version in previous_versions:\n if version[0]:\n self.sax_workflow_version(version, 'closed')\n\n self.startElement('workflow')\n sax_workflow_all_versions()\n self.endElement('workflow')", "def build(self, name, registry=None, message=None, *, workflow=...):\n registry = get_package_registry(registry)\n self._validate_with_workflow(registry=registry, workflow=workflow, name=name, message=message)\n return self._build(name=name, registry=registry, message=message)", "def create_workflow_file(self, workflow: Workflow, props: PropertySet):", "def create(*, db_session, workflow_in: WorkflowCreate) -> Workflow:\n project = project_service.get_by_name_or_raise(\n db_session=db_session, project_in=workflow_in.project\n )\n plugin_instance = plugin_service.get_instance(\n db_session=db_session, plugin_instance_id=workflow_in.plugin_instance.id\n )\n workflow = Workflow(\n **workflow_in.dict(exclude={\"plugin_instance\", \"project\"}),\n plugin_instance=plugin_instance,\n project=project,\n )\n\n db_session.add(workflow)\n db_session.commit()\n return workflow", "def get_SrcClass(args):\n return Reactome(args)", "def include(self, name, outDir=True, asWorkflow=\"\"):\n\n \"\"\" Set workdir\"\"\"\n if outDir:\n basename = extensionless(os.path.basename(name))\n if asWorkflow:\n self.workdir = os.path.join(self.workdir, asWorkflow)\n self.moduleDir = basename\n\n \"\"\" Include File \"\"\" \n self.workflow.include(name)", "def import_activity_class(activity_name, reload=True):\n try:\n module_name = \"activity.\" + activity_name\n importlib.import_module(module_name)\n return True\n except ImportError as e:\n return False", "def find_in_workflows(self, name):\n wflows = []\n for obj in self.__dict__.values():\n if is_instance(obj, Driver) and name in obj.workflow:\n wflows.append((obj.workflow, obj.workflow.index(name)))\n return wflows" ]
[ "0.7437935", "0.66066194", "0.6272784", "0.62531227", "0.5922669", "0.59157443", "0.58219445", "0.5800828", "0.57692146", "0.5733996", "0.5662112", "0.56386393", "0.55851597", "0.5581028", "0.5571103", "0.55317545", "0.55233943", "0.5494293", "0.5466347", "0.54425746", "0.5432957", "0.53871465", "0.5334043", "0.5327022", "0.5325657", "0.5277995", "0.526299", "0.5234133", "0.5234133", "0.5232919", "0.5208759", "0.5162786", "0.5125997", "0.5124574", "0.51102495", "0.51041716", "0.51041716", "0.509353", "0.506685", "0.5035595", "0.5025648", "0.50238776", "0.5020852", "0.50048494", "0.4989125", "0.4987386", "0.49861065", "0.49843976", "0.49789152", "0.49678278", "0.49678278", "0.49675405", "0.4964306", "0.4939436", "0.49350715", "0.49346226", "0.49241287", "0.49228832", "0.4916318", "0.49143994", "0.4913284", "0.4903156", "0.48981848", "0.48861516", "0.488544", "0.48776573", "0.487493", "0.48651257", "0.48612964", "0.48611364", "0.4854335", "0.48516148", "0.48515302", "0.48394814", "0.48246938", "0.4809834", "0.48081473", "0.48059583", "0.48041573", "0.48029387", "0.47967732", "0.47951326", "0.4783166", "0.47822937", "0.47757125", "0.4773478", "0.47708032", "0.47683713", "0.47670007", "0.4764428", "0.4764428", "0.47623608", "0.4759841", "0.47570768", "0.47542506", "0.4754145", "0.4749344", "0.47308153", "0.47307175", "0.4722429" ]
0.6346265
2
Users can specify environment variables in their config file which will be set in the driver and worker environments. Make sure those variables are set during the workflow, but not after.
def test_workflow_environment(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "environment-variables": { "FOO": "BAR", "FOO2": "BAR2" } } template_dir = tempfile.mkdtemp(suffix="test-workflow-environment-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): def _check(): assert os.environ['FOO'] == "BAR" assert os.environ["OMP_NUM_THREADS"] == '1' return True # driver env _check() # worker env assert all(workflow_inst.run_on_each_worker(_check).values()) os.environ['FOO'] = 'ORIGINAL_FOO' _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # Environment is restored after execution is finished. assert os.environ['FOO'] == 'ORIGINAL_FOO' assert 'FOO2' not in os.environ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def env_config():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_GCP_REGION': 'expected-gcp-region',\n 'ELB_GCP_ZONE': 'expected-gcp-zone',\n 'ELB_BATCH_LEN': '93',\n 'ELB_CLUSTER_NAME': 'expected-cluster-name',\n 'ELB_RESULTS': 'gs://expected-results',\n 'ELB_USE_PREEMPTIBLE': 'true',\n 'ELB_BID_PERCENTAGE': '91'}\n\n for var_name in env:\n os.environ[var_name] = str(env[var_name])\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def test_local_env_pass_explicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_explicit\"\n\n assert env_key not in os.environ\n\n test_dir = fileutils.make_test_dir()\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n env_vars = {env_key: exp_value} # <-- explicitly passing a new env var to task\n settings = RunSettings(\n exe_name, exe_args, run_command=\"srun\", run_args=run_args, env_vars=env_vars\n )\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key in settings.env_vars\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def _setup_env(self):\n\n os.environ['GIT_NAME'] = statiki.GIT_NAME\n os.environ['GIT_EMAIL'] = statiki.GIT_EMAIL\n os.environ['GH_TOKEN'] = 'this-is-a-bogus-token:password'\n os.environ['TRAVIS_REPO_SLUG'] = TEST_REPO\n\n return", "def environment_vars_set():\n os.environ[\"YESSSSMS_LOGIN\"] = \"03211234567\"\n os.environ[\"YESSSSMS_PASSWD\"] = \"MySecr3t\"\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n os.environ[\"YESSSSMS_RECIPIENT\"] = \"066356789789\"", "def _setup_environment_and_configs(args, appengine_path):\n clusterfuzz_dir = os.path.abspath(os.path.join(args.directory, 'clusterfuzz'))\n\n # Matches startup scripts.\n os.environ['PYTHONPATH'] = ':'.join([\n os.getenv('PYTHONPATH', ''),\n appengine_path,\n os.path.join(clusterfuzz_dir, 'src'),\n ])\n\n os.environ['ROOT_DIR'] = clusterfuzz_dir\n if not os.getenv('BOT_NAME'):\n os.environ['BOT_NAME'] = args.name\n\n os.environ['LD_LIBRARY_PATH'] = '{0}:{1}'.format(\n os.path.join(clusterfuzz_dir, 'src', 'clusterfuzz', '_internal',\n 'scripts'), os.getenv('LD_LIBRARY_PATH', ''))\n\n tmpdir = os.path.join(clusterfuzz_dir, 'bot_tmpdir')\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n os.environ['TMPDIR'] = tmpdir\n os.environ['BOT_TMPDIR'] = tmpdir\n\n os.environ['KILL_STALE_INSTANCES'] = 'False'\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n\n if not os.getenv('UNTRUSTED_WORKER'):\n local_gcs_buckets_path = os.path.abspath(\n os.path.join(args.server_storage_path, 'local_gcs'))\n assert os.path.exists(local_gcs_buckets_path), (\n 'Server storage path not found, make sure to start run_server with '\n 'the same storage path.')\n\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = local_gcs_buckets_path\n\n if args.android_serial:\n if not os.getenv('OS_OVERRIDE'):\n os.environ['OS_OVERRIDE'] = 'ANDROID'\n\n os.environ['ANDROID_SERIAL'] = args.android_serial", "def test_local_env_pass_implicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_implicit\"\n os.environ[env_key] = exp_value\n\n test_dir = fileutils.make_test_dir()\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n # NOTE: not passing env_args into run_settings here, relying on --export=ALL default\n settings = RunSettings(exe_name, exe_args, run_command=\"srun\", run_args=run_args)\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key not in settings.env_vars\n os.environ.pop(env_key)\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def setup_environment():\n os.environ['QUEUE_OVERRIDE'] = 'LINUX_UNTRUSTED'\n os.environ['WORKER_ROOT_DIR'] = os.path.join(MNT_DIR, 'clusterfuzz')\n os.environ['WORKER_BOT_TMPDIR'] = os.path.join(MNT_DIR, 'tmp')\n\n if not os.path.exists(BOT_BASEDIR):\n os.mkdir(BOT_BASEDIR)", "def _setup_environment_vars(self, opts):\n # Check that these directories actually exist\n assert os.path.isdir(opts.movie_advisor_home)\n\n #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)\n\n self.movie_advisor_home = opts.movie_advisor_home\n self.bento_home = opts.bento_home\n self.bento_tgz = opts.bento_tgz\n self.kiji_uri = \"kiji://.env/tutorial\"\n\n # \"express job\" takes a jar file as an argument\n assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))\n\n # Set the classpath for all of the commands that we'll run\n jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]\n for jar in jarsFullPaths: assert os.path.isfile(jar)\n\n classpath = \":\".join(jarsFullPaths)\n os.environ['KIJI_CLASSPATH'] = classpath\n\n if opts.show_classpath:\n print(\"export KIJI_CLASSPATH=%s\" % classpath)\n sys.exit(0)", "def env_config_no_cluster():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_RESULTS': 'gs://expected-results'}\n\n for var_name in env:\n os.environ[var_name] = env[var_name]\n # Test that the results parameter is passed correctly and that trailing slash is discarded\n os.environ['ELB_RESULTS'] = TEST_RESULTS_BUCKET + '/'\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id", "def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def SetupEnvironment(self):\n pass", "def _setEnv(self):\n try:\n global_env_prfix = \"/GlobalEnv/\"\n if self.etcd_key_prefix is not None:\n global_env_prfix = self.etcd_key_prefix + \"/GlobalEnv/\"\n value = self.etcd.get(global_env_prfix)\n if value[0] is not None:\n jsonConfig = json.loads(value[0].decode('utf-8'))\n for key in jsonConfig.keys():\n os.environ[key] = jsonConfig[key]\n else:\n raise TypeError(\"config manager key {} must be set as \\\n a prerequisite ...\".format(global_env_prfix))\n except Exception as e:\n self.logger.error(\"Exception raised in _setEnv\\\n with error:{}\".format(e))\n raise e", "def test_env_var_settings_set(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"wowww\"\n\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"goood\"\n\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS(\"123456\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"yesss\"\n\n del os.environ[\"YESSSSMS_LOGIN\"]\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"123456\"\n assert sms._logindata[\"login_passwort\"] == \"password\"\n assert sms._provider == \"yesss\"", "def get_env(self):\n self.airflow_cluster_name = conf.get('core', 'cluster')\n bicommon = BICommon()\n self.env_type = bicommon.env\n\n self.parameters.update({'airflow_cluster_name': self.airflow_cluster_name, 'env': self.env_type})", "def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['ombu@d2.ombuweb.com:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'", "def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)", "def _setup_environment(environment):\n env.environment = environment\n env.project = ENVS[environment]\n env.hosts = [env.project['host']]\n env.user = env.project.get('user', env.local_user)\n env.password = env.project.get('password', None)\n # Redundant, just to easy the interpolation later on\n env.project['environment'] = environment", "def load_evironment():\n environment = Utility.load_yaml(os.getenv(\"system_file\", \"./system.yaml\"))\n for key in environment:\n if key in os.environ:\n environment[key] = os.getenv(key)\n Utility.environment = environment", "def get_environment_configuration():\n\n try:\n time_limit = int(os.getenv('AUTOBOT_POST_TIMELIMIT'))\n except TypeError:\n time_limit = None\n\n # if we're using Redis Labs\n redis_cloud_url = os.getenv('REDISCLOUD_URL')\n\n if redis_cloud_url:\n url = urlparse.urlparse(redis_cloud_url)\n redis_host = url.hostname\n redis_port = url.port\n redis_password = url.password\n else:\n redis_host = os.getenv('AUTOBOT_REDIS_URL')\n redis_port = os.getenv('AUTOBOT_REDIS_PORT')\n redis_password = None\n\n override = {\n REDDIT_USERNAME: os.getenv('AUTOBOT_REDDIT_USERNAME'),\n REDDIT_PASSWORD: os.getenv('AUTOBOT_REDDIT_PASSWORD'),\n SUBREDDIT: os.getenv('AUTOBOT_SUBREDDIT'),\n CLIENT_ID: os.getenv('AUTOBOT_CLIENT_ID'),\n CLIENT_SECRET: os.getenv('AUTOBOT_CLIENT_SECRET'),\n POST_TIMELIMIT: time_limit,\n REDIS_BACKEND: os.getenv('AUTOBOT_REDIS_BACKEND'),\n REDIS_URL: redis_host,\n REDIS_PORT: redis_port,\n REDIS_PASSWORD: redis_password,\n ROLLBAR_ACCESS_TOKEN: os.getenv('ROLLBAR_ACCESS_TOKEN'),\n ROLLBAR_ENVIRONMENT: os.getenv('ROLLBAR_ENVIRONMENT')\n }\n\n # remove all the 'None' valued things\n return {k: v for k, v in override.items() if v is not None}", "def _setup_env():\n env.home_path = os.path.expanduser('~')\n env.env_path = os.getenv('WORKON_HOME')\n\n if not env.env_path:\n warn(\"You should set the WORKON_HOME environment variable to\" \\\n \" the root directory for your virtual environments.\")\n env.env_path = env.sites_path\n\n env.project_path = join(env.sites_path, env.project_name)\n env.ve_path = join(env.env_path, env.project_name)\n env.activate_path = join(env.ve_path, 'bin', 'activate')", "def build_env(self, job, private_data_dir, private_data_files=None):\n env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)\n if private_data_files is None:\n private_data_files = {}\n # Set environment variables needed for inventory and job event\n # callbacks to work.\n env['JOB_ID'] = str(job.pk)\n env['INVENTORY_ID'] = str(job.inventory.pk)\n if job.project:\n env['PROJECT_REVISION'] = job.project.scm_revision\n env['ANSIBLE_RETRY_FILES_ENABLED'] = \"False\"\n env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)\n if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:\n env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)\n env['AWX_HOST'] = settings.TOWER_URL_BASE\n\n # Create a directory for ControlPath sockets that is unique to each job\n cp_dir = os.path.join(private_data_dir, 'cp')\n if not os.path.exists(cp_dir):\n os.mkdir(cp_dir, 0o700)\n # FIXME: more elegant way to manage this path in container\n env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'\n\n # Set environment variables for cloud credentials.\n cred_files = private_data_files.get('credentials', {})\n for cloud_cred in job.cloud_credentials:\n if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):\n env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)\n\n for network_cred in job.network_credentials:\n env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')\n env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')\n\n ssh_keyfile = cred_files.get(network_cred, '')\n if ssh_keyfile:\n env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile\n\n authorize = network_cred.get_input('authorize', default=False)\n env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))\n if authorize:\n env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')\n\n path_vars = (\n ('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),\n ('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),\n ('ANSIBLE_COLLECTIONS_PATH', 'collections_path', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),\n )\n\n config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), list(map(lambda x: x[1], path_vars)))\n\n for env_key, config_setting, folder, default in path_vars:\n paths = default.split(':')\n if env_key in env:\n for path in env[env_key].split(':'):\n if path not in paths:\n paths = [env[env_key]] + paths\n elif config_setting in config_values:\n for path in config_values[config_setting].split(':'):\n if path not in paths:\n paths = [config_values[config_setting]] + paths\n paths = [os.path.join(CONTAINER_ROOT, folder)] + paths\n env[env_key] = os.pathsep.join(paths)\n\n return env", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)", "def set_global_vars():\n global_vars = {\"status\": False}\n try:\n global_vars[\"Owner\"] = \"Mystique\"\n global_vars[\"Environment\"] = \"Prod\"\n global_vars[\"aws_region\"] = \"us-east-1\"\n global_vars[\"tag_name\"] = \"serverless_cloudwatch_logs_exporter\"\n global_vars[\"retention_days\"] = 35\n global_vars[\"cw_logs_to_export\"] = [\"/aws/lambda/trending_news\"]\n #global_vars[\"cw_logs_to_export\"] = os.environ.get(\"cw_logs_to_export\").split(\",\")\n global_vars[\"log_dest_bkt\"] = \"cw-log-exports-01\"\n global_vars[\"time_out\"] = 300\n global_vars[\"tsk_back_off\"] = 2\n global_vars[\"status\"] = True\n except Exception as e:\n logger.error(\"Unable to set Global Environment variables. Exiting\")\n global_vars[\"error_message\"] = str(e)\n return global_vars", "def get_environmentals(self):\n for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():\n setattr(self, k, v)", "def SetEnvironmentVars(self):\n for name, value, section in self._marchConfig():\n fetch_name = self._get_param_name(name, section)\n self._set_env_prop(fetch_name, value)", "def set_env(self, propagated_env_vars={}):\n os.environ['BUILD_ROOT'] = self.build_root\n # This is how we tell run-test.sh what set of C++ binaries to use for mini-clusters in Java\n # tests.\n for env_var_name, env_var_value in propagated_env_vars.iteritems():\n os.environ[env_var_name] = env_var_value", "def env_vars(self, env_vars):\n\n self._env_vars = env_vars", "def _prepare_publish_environments():\n env = copy.deepcopy(os.environ)\n\n project_name = os.getenv(\"AVALON_PROJECT\")\n asset_name = os.getenv(\"AVALON_ASSET\")\n\n env[\"AVALON_PROJECT\"] = project_name\n env[\"AVALON_ASSET\"] = asset_name\n env[\"AVALON_TASK\"] = os.getenv(\"AVALON_TASK\")\n env[\"AVALON_WORKDIR\"] = os.getenv(\"AVALON_WORKDIR\")\n env[\"AVALON_APP\"] = f\"hosts.{publish_host}\"\n env[\"AVALON_APP_NAME\"] = \"celaction/local\"\n\n env[\"PYBLISH_HOSTS\"] = publish_host\n\n os.environ.update(env)", "def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False", "def _require_environment():\n require('environment', 'host', provided_by=ENVS.keys())", "def dist_env():\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\", \"0\"))\n num_trainers = 1\n training_role = os.getenv(\"PADDLE_TRAINING_ROLE\", \"TRAINER\")\n assert (training_role == \"PSERVER\" or training_role == \"TRAINER\")\n\n # - PADDLE_TRAINER_ENDPOINTS means nccl2 mode.\n # - PADDLE_PSERVER_ENDPOINTS means pserver mode.\n # - PADDLE_CURRENT_ENDPOINT means current process endpoint.\n trainer_endpoints = os.getenv(\"PADDLE_TRAINER_ENDPOINTS\")\n pserver_endpoints = os.getenv(\"PADDLE_PSERVER_ENDPOINTS\")\n current_endpoint = os.getenv(\"PADDLE_CURRENT_ENDPOINT\")\n if trainer_endpoints:\n trainer_endpoints = trainer_endpoints.split(\",\")\n num_trainers = len(trainer_endpoints)\n elif pserver_endpoints:\n num_trainers = int(os.getenv(\"PADDLE_TRAINERS_NUM\"))\n\n return {\n \"trainer_id\": trainer_id,\n \"num_trainers\": num_trainers,\n \"current_endpoint\": current_endpoint,\n \"training_role\": training_role,\n \"pserver_endpoints\": pserver_endpoints,\n \"trainer_endpoints\": trainer_endpoints\n }", "def test_read_env_config3(config, environment_vars_set):\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"goood\"", "def _env_vars(self, cred_file=None, section='default'):\n if cred_file:\n parser = ConfigParser.SafeConfigParser()\n parser.optionxform = str\n parser.read(os.path.expanduser(cred_file))\n for name, value in parser.items(section):\n if name == 'OS_AUTH_URL':\n if not self.module.params.get('login_url'):\n self.module.params['login_url'] = value\n if name == 'OS_USERNAME':\n if not self.module.params.get('login_user'):\n self.module.params['login_user'] = value\n if name == 'OS_PASSWORD':\n if not self.module.params.get('login_password'):\n self.module.params['login_password'] = value\n if name == 'OS_TENANT_ID':\n if not self.module.params.get('login_tenant_name'):\n self.module.params['login_tenant_name'] = value\n else:\n if not self.module.params.get('login_url'):\n authurl = os.getenv('OS_AUTH_URL')\n self.module.params['login_url'] = authurl\n\n if not self.module.params.get('login_user'):\n username = os.getenv('OS_USERNAME')\n self.module.params['login_user'] = username\n\n if not self.module.params.get('login_password'):\n password = os.getenv('OS_PASSWORD')\n self.module.params['login_password'] = password\n\n if not self.module.params.get('login_tenant_name'):\n tenant = os.getenv('OS_TENANT_ID')\n self.module.params['login_tenant_name'] = tenant", "def environment_vars_set_wowww():\n os.environ[\"YESSSSMS_LOGIN\"] = \"03211234567\"\n os.environ[\"YESSSSMS_PASSWD\"] = \"MySecr3t\"\n os.environ[\"YESSSSMS_PROVIDER\"] = \"wowww\"\n os.environ[\"YESSSSMS_RECIPIENT\"] = \"066356789780\"", "def set_environment():\n # status\n\n logger.info('setting global environment variables')\n\n # set all env vars\n os.environ['DBUGMODE'] = 'False'\n os.environ['AWS_DEFAULT_REGION'] = set_default_region() or 'us-east-1'\n\n logger.info('AWS_DEFAULT_REGION determined as %s' % os.environ['AWS_DEFAULT_REGION'])", "def init_config_values():\n global HEADER, PROCESS_PROGRAM_NAME, METADATA_HANDLE_EVENT_NAME\n global APPINSIGHTS_INSTRUMENTATIONKEY\n global DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL, DATABRICKS_OUTPUT_STORAGE_SAS_TOKEN\n global ADX_INGEST_QUEUE_URL_LIST, ADX_INGEST_QUEUE_SAS_TOKEN\n global CONCURRENT_ENQUEUE_TASKS\n global MAX_COMPACT_FILE_RECORDS\n APPINSIGHTS_INSTRUMENTATIONKEY = os.getenv(\"APPINSIGHTS_INSTRUMENTATIONKEY\",\n APPINSIGHTS_INSTRUMENTATIONKEY)\n DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL = os.getenv(\"DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL\",\n DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL)\n DATABRICKS_OUTPUT_STORAGE_SAS_TOKEN = os.getenv(\"DATABRICKS_OUTPUT_STORAGE_SAS_TOKEN\",\n DATABRICKS_OUTPUT_STORAGE_SAS_TOKEN)\n ADX_INGEST_QUEUE_URL_LIST = os.getenv(\"ADX_INGEST_QUEUE_URL_LIST\", ADX_INGEST_QUEUE_URL_LIST)\n ADX_INGEST_QUEUE_SAS_TOKEN = os.getenv(\"ADX_INGEST_QUEUE_SAS_TOKEN\", ADX_INGEST_QUEUE_SAS_TOKEN)\n CONCURRENT_ENQUEUE_TASKS = int(os.getenv(\"CONCURRENT_ENQUEUE_TASKS\", CONCURRENT_ENQUEUE_TASKS))\n ADX_INGEST_QUEUE_URL_LIST = ADX_INGEST_QUEUE_URL_LIST.replace(' ', '').split(',')\n logging.info(f\"ADX_INGEST_QUEUE_URL_LIST: {ADX_INGEST_QUEUE_URL_LIST}\")\n\n\n HEADER = os.getenv(\"LOG_MESSAGE_HEADER\", HEADER)\n PROCESS_PROGRAM_NAME = os.getenv(\"PROCESS_PROGRAM_NAME\", PROCESS_PROGRAM_NAME)\n METADATA_HANDLE_EVENT_NAME = os.getenv(\"METADATA_HANDLE_EVENT_NAME\", METADATA_HANDLE_EVENT_NAME)\n MAX_COMPACT_FILE_RECORDS = int(os.getenv(\"MAX_COMPACT_FILE_RECORDS\", str(MAX_COMPACT_FILE_RECORDS)))", "def read_environment(self):\n # Setup credentials\n if os.getenv(\"DO_API_TOKEN\"):\n self.api_token = os.getenv(\"DO_API_TOKEN\")\n if os.getenv(\"DO_API_KEY\"):\n self.api_token = os.getenv(\"DO_API_KEY\")", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def set_test_environment():\n import flask_monitoringdashboard\n\n flask_monitoringdashboard.config.database_name = 'sqlite:///test-database.db'", "def configure_environment(ctx, user):\n try:\n out = ctx.obj.configuration(\n user\n )\n print_message(out[\"path\"])\n except BaseException as e:\n print_error(e.message)", "def _setup_cpu_environment() -> None:\n inter_op_parallel_threads = os.getenv(ENV_CPU_INTER_OP_CONFIG)\n intra_op_parallel_threads = os.getenv(ENV_CPU_INTRA_OP_CONFIG)\n\n if not inter_op_parallel_threads and not intra_op_parallel_threads:\n return\n\n from tensorflow import config as tf_config\n\n if inter_op_parallel_threads:\n try:\n inter_op_parallel_threads_number = int(inter_op_parallel_threads.strip())\n except ValueError:\n raise ValueError(\n f\"Error parsing the environment variable '{ENV_CPU_INTER_OP_CONFIG}'. \"\n f\"Please cross-check the value.\"\n )\n\n tf_config.threading.set_inter_op_parallelism_threads(\n inter_op_parallel_threads_number\n )\n\n if intra_op_parallel_threads:\n try:\n intra_op_parallel_threads_number = int(intra_op_parallel_threads.strip())\n except ValueError:\n raise ValueError(\n f\"Error parsing the environment variable '{ENV_CPU_INTRA_OP_CONFIG}'. \"\n f\"Please cross-check the value.\"\n )\n\n tf_config.threading.set_intra_op_parallelism_threads(\n intra_op_parallel_threads_number\n )", "def dd_environment():\n\n # specify couchbase container name\n env = {\n 'GITLAB_TEST_TOKEN': GITLAB_TEST_TOKEN,\n 'GITLAB_LOCAL_MASTER_PORT': str(GITLAB_LOCAL_MASTER_PORT),\n 'GITLAB_LOCAL_RUNNER_PORT': str(GITLAB_LOCAL_RUNNER_PORT),\n }\n compose_file = os.path.join(HERE, 'compose', 'docker-compose.yml')\n with docker_run(\n compose_file=compose_file,\n env_vars=env,\n conditions=[\n CheckDockerLogs(\n compose_file, ['Gitlab is up!', 'Configuration loaded', 'Metrics server listening'], wait=5\n ),\n CheckEndpoints(GITLAB_RUNNER_URL, attempts=180),\n ],\n ):\n yield CONFIG, E2E_METADATA", "def setUp(self):\n test_env_setup()", "def test_env_vars():\n # Create a variable with the file system encoding and save it\n # in our PYTHONPATH\n env_var = to_fs_from_unicode(u'รฑรฑรฑ')\n CONF.set('main', 'spyder_pythonpath', [env_var])\n\n # Create a kernel spec\n kernel_spec = SpyderKernelSpec()\n\n # Assert PYTHONPATH is in env vars and it's not empty\n assert kernel_spec.env['PYTHONPATH'] != ''\n\n # Assert all env vars are binary strings\n assert all([is_binary_string(v) for v in kernel_spec.env.values()])\n\n # Remove our entry from PYTHONPATH\n CONF.set('main', 'spyder_pythonpath', [])", "def _generate_environment(self):\n envvars = {}\n for key in self.envvars:\n try:\n envvars[key] = os.environ[key]\n except KeyError:\n continue\n\n # Warn the user that we cannot support secrets\n if envvars:\n logger.warning(\"This API does not support environment secrets.\")\n return envvars", "def getEnvironmentVars(self):\n my_env = os.environ.copy()\n my_env[\"MODE\"] = self.model.mode\n my_env[\"ALGORITHM\"] = self.model.algorithm\n my_env[\"INPUT_FILE\"] = self.model.inputFile\n my_env[\"TARGET_FILE\"] = self.model.targetFile\n my_env[\"DEVICE\"] = self.model.device\n my_env[\"SIZE\"] = str(self.model.size)\n my_env[\"ROLE\"] = self.model.role\n my_env[\"WAIT_SIZE\"] = str(self.model.waitSize)\n my_env[\"STEP_SIZE\"] = str(self.model.stepSize)\n my_env[\"TUI_CONNECTION\"] = str(True)\n return my_env", "def user_env(self):\n\n # FIXME I think the JPY_ variables have been deprecated in JupyterHub\n # since 0.7.2, we should replace them. Can we figure this out?\n\n env = super(EC2Spawner, self).get_env()\n env.update(dict(\n JUPYTERHUB_PREFIX=self.hub.server.base_url,\n Name='Jupyter',\n PATH=self.path\n ))\n\n if self.notebook_dir:\n env['NOTEBOOK_DIR'] = self.notebook_dir\n\n hub_api_url = self.hub.api_url\n if self.hub_api_url != '':\n hub_api_url = self.hub_api_url\n\n env['JPY_HUB_API_URL'] = hub_api_url\n env['JUPYTERHUB_API_URL'] = hub_api_url\n\n self.log.debug(\"Env built: {}\".format(env))\n return env", "def setup_environment(self):\n self.run_command(\"cd {}\".format(quote(str(self.builddir))))\n env_vars = self._build_env_variables_string()\n if env_vars:\n env_vars = quote(env_vars)\n command = \"{} DISTRO={} MACHINE={} . {} build-{}\".format(\n env_vars,\n quote(self.distro),\n quote(self.machine),\n quote(self.init_env_file),\n quote(self.distro),\n )\n self.run_command(command)", "def setUp(self):\n os.environ[\"PADDLE_TRAINERS_NUM\"] = \"2\"\n os.environ[\n \"PADDLE_PSERVERS_IP_PORT_LIST\"\n ] = \"127.0.0.1:36001,127.0.0.2:36001\"", "def _build_env(self, args):\n env = os.environ.copy()\n env['ANSIBLE_CONFIG'] = os.path.join(ANSIBLE_DIR, 'ansible.cfg')\n if hasattr(args, 'stdout_callback'):\n env['ANSIBLE_STDOUT_CALLBACK'] = args.stdout_callback\n return env", "def test_read_env_config2(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"wowww\"", "def dev():\n env.hosts = ['']\n env.user = ''\n env.virtualenv_dir = ''\n env.code_dir = ''\n env.var_dir = ''\n env.activate = 'source %s/bin/activate' % env.virtualenv_dir\n env.backup_on_deploy = False", "def setup_environment():\n global _ENV_SETUP_DONE\n if _ENV_SETUP_DONE:\n return\n _ENV_SETUP_DONE = True\n\n _configure_libraries()\n\n custom_module_path = os.environ.get(\"DETECTRON2_ENV_MODULE\")\n\n if custom_module_path:\n setup_custom_environment(custom_module_path)\n else:\n # The default setup is a no-op\n pass", "def dd_environment():\n\n # specify couchbase container name\n env = {\n 'GITLAB_TEST_PASSWORD': GITLAB_TEST_PASSWORD,\n 'GITLAB_LOCAL_PORT': str(GITLAB_LOCAL_PORT),\n 'GITLAB_LOCAL_PROMETHEUS_PORT': str(GITLAB_LOCAL_PROMETHEUS_PORT),\n }\n\n with docker_run(\n compose_file=os.path.join(HERE, 'compose', 'docker-compose.yml'),\n env_vars=env,\n conditions=[CheckEndpoints(GITLAB_URL, attempts=200), CheckEndpoints(PROMETHEUS_ENDPOINT)],\n ):\n # run pre-test commands\n for _ in range(100):\n requests.get(GITLAB_URL)\n sleep(2)\n\n yield CONFIG", "def get_config():\n\n return {\n 'ADMIN_USERNAME': env.get('ECSTEST_ADMIN_USERNAME', 'username'),\n 'ADMIN_PASSWORD': env.get('ECSTEST_ADMIN_PASSWORD', 'password'),\n 'TOKEN': env.get('ECSTEST_TOKEN', None),\n 'CONTROL_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_ENDPOINT', 'https://127.0.0.1:4443'\n ),\n 'TOKEN_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_TOKEN_ENDPOINT', 'https://127.0.0.1:4443/login'\n ),\n 'ALT_CONTROL_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_ENDPOINT',\n env.get('ECSTEST_CONTROL_ENDPOINT',\n 'https://127.0.0.1:4443')),\n 'ALT_TOKEN_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_TOKEN_ENDPOINT',\n env.get('ECSTEST_CONTROL_TOKEN_ENDPOINT',\n 'https://127.0.0.1:4443/login'),\n ),\n 'VERIFY_SSL': _env_to_bool('ECSTEST_VERIFY_SSL', 0),\n 'REQUEST_TIMEOUT': float(env.get('ECSTEST_REQUEST_TIMEOUT', 15.0)),\n 'TOKEN_FILENAME': env.get(\n 'ECSTEST_TOKEN_FILENAME', '/tmp/ecstest.token'\n ),\n 'CACHE_TOKEN': _env_to_bool('ECSTEST_CACHE_TOKEN', 1),\n 'AUTH_TOKEN_MIN_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MIN_LENGTH', 1),\n 'AUTH_TOKEN_MAX_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MAX_LENGTH', 512),\n 'NAMESPACE': env.get('ECSTEST_NAMESPACE', 'namespace1'),\n 'MAX_LOGIN_TIME': env.get('ECSTEST_MAX_LOGIN_TIME', 3),\n 'ACCESS_SSL': _env_to_bool('ECSTEST_ACCESS_SSL', 0),\n 'ACCESS_SERVER': env.get('ECSTEST_ACCESS_SERVER', 'localhost'),\n 'ALT_ACCESS_SERVER': env.get(\n 'ECSTEST_ALT_ACCESS_SERVER',\n env.get('ECSTEST_ACCESS_SERVER', 'localhost')\n ),\n 'ACCESS_PORT': int(env.get('ECSTEST_ACCESS_PORT', 3128)),\n 'ACCESS_KEY': env.get('ECSTEST_ACCESS_KEY', 'mykey'),\n 'ACCESS_SECRET': env.get('ECSTEST_ACCESS_SECRET', 'mysecret'),\n 'ALT_ACCESS_KEY': env.get(\n 'ECSTEST_ALT_ACCESS_KEY',\n env.get('ECSTEST_ACCESS_KEY', 'mykey')\n ),\n 'ALT_ACCESS_SECRET': env.get(\n 'ECSTEST_ALT_ACCESS_SECRET',\n env.get('ECSTEST_ACCESS_SECRET', 'mysecret')\n ),\n 'VERBOSE_OUTPUT': _env_to_bool('ECSTEST_VERBOSE_OUTPUT', 0),\n 'TEST_TARGET': env.get('ECSTEST_TEST_TARGET', constants.TARGET_AWSS3),\n 'TEST_TYPE': env.get(\n 'ECSTEST_TEST_TYPE', constants.TYPE_COMPATIBILITY\n ),\n 'DNS_BUCKET_NAMING_CONVENTION': _env_to_bool(\n 'ECSTEST_DNS_BUCKET_NAMING_CONVENTION', 0\n ),\n 'NODES_PER_SITE': int(env.get('ECSTEST_NODES_PER_SITE', 1)),\n 'RUN_DISABLED': _env_to_bool('ECSTEST_RUN_DISABLED'),\n 'REUSE_BUCKET_NAME': env.get('ECSTEST_REUSE_BUCKET_NAME'),\n }", "def test_environ_vars_available(self) -> None:\n self.assertIsNotNone(os.environ.get('AWS_ACCESS_KEY_ID'))\n self.assertIsNotNone(os.environ.get('AWS_SECRET_KEY'))\n self.assertIsNotNone(os.environ.get('AWS_REGION_NAME'))\n self.assertIsNotNone(os.environ.get('S3_BUCKET'))", "def _set_environment_vars(self):\n os.environ[\"PATH\"] = os.path.join(self.source_folder, \"depot_tools\") + os.pathsep + os.environ[\"PATH\"]\n os.environ[\"DEPOT_TOOLS_PATH\"] = os.path.join(self.source_folder, \"depot_tools\")\n if tools.os_info.is_windows:\n os.environ[\"DEPOT_TOOLS_WIN_TOOLCHAIN\"] = \"0\"\n os.environ[\"GYP_MSVS_VERSION\"] = \"2017\" if str(self.settings.compiler.version) == \"15\" else \"2019\"", "def configure_airflow_variables():\n from airflow.bin.cli import import_helper\n for path in glob(os.path.join(airflow_variables_dir, '*.json')):\n import_helper(path)", "def getParamsFromEnv(self):\r\n self.port = os.getenv('PGPORT', self.port)\r\n self.host = os.getenv('PGHOST', self.host)\r\n self.database = os.getenv('PGDATABASE', self.database)\r\n self.user = os.getenv('PGUSER', self.user)\r\n self.password = os.getenv('PGPASSWORD', self.password)", "def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)", "def test_get_environment_string(self):\n pass", "def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'", "def change_environment_variables():\n values = load('environment.yaml')\n\n for key in values.keys():\n os.environ[key] = values[key]\n\n info(f'Changed environment variables to {values}')", "def create_vars_dot_env(self):\n\n print(\"Creating vars.env in your Google Drive!\")\n\n with open(self.envpath, \"w\") as envfile:\n envfile.write(\"COLAB_ENV = Active\\n\")", "def testing_env_var(monkeypatch):\n monkeypatch.setenv(\"AWS_ACCESS_KEY_ID\", \"jqt\")\n monkeypatch.setenv(\"AWS_SECRET_ACCESS_KEY\", \"rde\")\n monkeypatch.delenv(\"AWS_PROFILE\", raising=False)\n monkeypatch.setenv(\"AWS_CONFIG_FILE\", \"/tmp/noconfigheere\")\n monkeypatch.setenv(\"AWS_SHARED_CREDENTIALS_FILE\", \"/tmp/noconfighereeither\")\n monkeypatch.setenv(\"GDAL_DISABLE_READDIR_ON_OPEN\", \"EMPTY_DIR\")", "def staging():\n env.settings = 'staging'\n env.hosts = []\n\n env.roledefs = {\n 'app': [],\n 'worker': [],\n 'admin': []\n }\n\n env.user = 'newsapps'\n\n env.s3_bucket = ''\n env.site_domain = ''\n\n env.db_root_user = ''\n env.db_root_pass = ''\n env.db_type = 'mysql'\n env.db_host = ''\n env.database_password = ''\n\n env.django_settings_module = '%(project_name)s.staging_settings' % env", "def _get_environment_variables(cluster):\n # Make environment variables for the shell commands\n shell_env = {}\n shell_env[\"OMP_NUM_THREADS\"] = cluster.num_vcores\n shell_env[\"GRAPHLAB_MEMORY_LIMIT_IN_MB\"] = cluster.container_size * .8\n shell_env[\"TURI_PORT_START\"] = cluster.start_port\n shell_env[\"TURI_PORT_END\"] = cluster.end_port\n # Overwrite environment for SFrame cache capacity. Being conservative, we use default 1/5 of the container size.\n for key in ['GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY', 'GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY_PER_FILE']:\n if key in os.environ:\n shell_env[key] = os.environ[key]\n else:\n shell_env[key] = cluster.container_size * 1024 * 1024 * .2\n\n # Overwrite environment for SFrame cache file location.\n if cluster.node_tmp_dir:\n shell_env['GRAPHLAB_CACHE_FILE_LOCATIONS'] = cluster.node_tmp_dir\n if cluster.hdfs_tmp_dir:\n shell_env['GRAPHLAB_CACHE_FILE_HDFS_LOCATION'] = cluster.hdfs_tmp_dir\n return shell_env", "def build_env(self, instance, private_data_dir, private_data_files=None):\n env = {}\n # Add ANSIBLE_* settings to the subprocess environment.\n for attr in dir(settings):\n if attr == attr.upper() and attr.startswith('ANSIBLE_'):\n env[attr] = str(getattr(settings, attr))\n # Also set environment variables configured in AWX_TASK_ENV setting.\n for key, value in settings.AWX_TASK_ENV.items():\n env[key] = str(value)\n\n env['AWX_PRIVATE_DATA_DIR'] = private_data_dir\n\n if self.instance.execution_environment is None:\n raise RuntimeError(f'The {self.model.__name__} could not run because there is no Execution Environment.')\n\n return env", "def update_environ():\n\n # Environment variables to set.\n BASE = os.getcwd()\n PLUGINS = os.path.join(BASE, 'lib')\n RESOURCES = os.path.join(BASE, 'res')\n MODELS = os.path.join(RESOURCES, 'models')\n\n # Set the vaue to '' to set the var to ''.\n # Anything else will be added to current var value.\n minimapper_env = {\n 'GAZEBO_RESOURCE_PATH': RESOURCES,\n 'GAZEBO_MODEL_PATH': MODELS,\n 'GAZEBO_PLUGIN_PATH': PLUGINS,\n 'GAZEBO_MODEL_DATABASE_URI': None\n }\n\n # Conditionally set environment variables.\n env = os.environ.copy()\n for key, val in minimapper_env.items():\n if val is None:\n env[key] = ''\n elif key not in env:\n env[key] = val\n elif key in env and val not in env[key]:\n env[key] = val + ':' + env[key]\n\n return env", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'", "def _set_ci_environment_variables(parent_shell):\n variables_to_set = {\n \"JOBSTAMPS_ALWAYS_USE_HASHES\": \"1\",\n \"CLINT_FORCE_COLOR\": \"1\",\n \"PYTHONDONTWRITEBYTECODE\": \"1\"\n }\n\n for key, value in variables_to_set.items():\n os.environ[key] = value\n parent_shell.overwrite_environment_variable(key, value)", "def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment", "def setup_environmentvars(self, path, mydbfilepath):\n\n if not FDB_AVAILABLE:\n print(BColors.WARNING + \"Warning: fdb_embedded couldn't be imported! \" \\\n + \"\\nMake sure you've installed fdb_embedded correctly.\" + BColors.ENDC)\n return False\n\n os.environ['FIREBIRD'] = path\n self.db_filepath = mydbfilepath\n return True", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['REGION'] = 'region'", "def live():\n env.hosts = ['']\n env.user = ''\n env.virtualenv_dir = ''\n env.code_dir = ''\n env.var_dir = ''\n env.activate = 'source %s/bin/activate' % env.virtualenv_dir\n env.backup_on_deploy = False", "def find_environ_config_vars():\n # only allow secret key and database uri for now\n envvars = [\"SQLALCHEMY_DATABASE_URI\", \"SECRET_KEY\"]\n results = {}\n for key in envvars:\n if key in os.environ:\n results[key] = os.environ[key]\n return results", "def register_envvars(self, *envvars):\n invalid_envvars = [\n envvar\n for envvar in envvars\n if re.match(r\"^\\w+$\", envvar, flags=re.ASCII) is None\n ]\n if invalid_envvars:\n raise WorkflowError(\n f\"Invalid environment variables requested: {', '.join(map(repr, invalid_envvars))}. \"\n \"Environment variable names may only contain alphanumeric characters and the underscore. \"\n )\n undefined = set(var for var in envvars if var not in os.environ)\n if self.check_envvars and undefined:\n raise WorkflowError(\n \"The following environment variables are requested by the workflow but undefined. \"\n \"Please make sure that they are correctly defined before running Snakemake:\\n\"\n \"{}\".format(\"\\n\".join(undefined))\n )\n self.envvars.update(envvars)", "def staging():\n env.settings = 'staging'\n env.hosts = ['db.beta.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media-beta.tribapps.com'", "def _configure_logging(self):\n workflowProperty = '-Dapp.workflow={}'.format(self.appName)\n\n # self.sparkProperties[SparkProperties.SPARK_DRIVER_EXTRAJAVAOPTIONS] = workflowProperty\n # self.sparkProperties[SparkProperties.SPARK_EXECUTOR_EXTRAJAVAOPTIONS] = workflowProperty\n\n self._append_or_create_property(SparkProperties.SPARK_DRIVER_EXTRAJAVAOPTIONS, workflowProperty, ' ')\n self._append_or_create_property(SparkProperties.SPARK_EXECUTOR_EXTRAJAVAOPTIONS, workflowProperty, ' ')\n # Add environment variables to the executors\n self.extra_property('spark.executorEnv.APP_WORKFLOW', self.appName)\n # Add environment variables to the driver process. TODO: with executor_env ?\n self.executor_env('APP_WORKFLOW', self.appName)", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'", "def test_read_env_config4(config, environment_vars_set):\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"yesss\"", "def set_environment(plugin_path):\n srcpath = os.path.join(plugin_path, \"scripts\")\n icnpath = os.path.join(plugin_path, \"icons\")\n melpath = os.path.join(plugin_path, \"mel\")\n modpath = os.path.join(plugin_path, \"modules\")\n tplpath = os.path.join(plugin_path, \"templates\")\n tolpath = os.path.join(plugin_path, \"scripts\", \"tools\")\n sys.path.append(modpath)\n sys.path.append(srcpath)\n sys.path.append(os.path.join(srcpath, \"ui\"))\n sys.path.append(tolpath)\n\n script_dirs = os.environ[\"MAYA_SCRIPT_PATH\"] + os.pathsep\n os.environ[\"AZUREBATCH_ICONS\"] = AzureBatchSetup.clean(icnpath)\n os.environ[\"AZUREBATCH_MODULES\"] = AzureBatchSetup.clean(modpath)\n os.environ[\"AZUREBATCH_TEMPLATES\"] = AzureBatchSetup.clean(tplpath)\n os.environ[\"AZUREBATCH_TOOLS\"] = AzureBatchSetup.clean(tolpath)\n os.environ[\"MAYA_SCRIPT_PATH\"] = script_dirs + \\\n AzureBatchSetup.clean(melpath)\n print(\"Attempting to create mod file under MAYA_MODULE_PATH\")\n mods = AzureBatchSetup.find_modules_locations(plugin_path)\n\n if not mods:\n print(\"Attempting to add custom module path to Maya.env\")\n mods = AzureBatchSetup.find_env_location(plugin_path)\n if not mods:\n print(\"Failed to setup AzureBatch mod file\")\n return os.environ[\"MAYA_MODULE_PATH\"] + os.pathsep", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def setUp(self) -> None:\n self.ec2 = boto3.resource('ec2')\n self.ec2_client = boto3.client('ec2')\n self.sts = boto3.client('sts')\n self.iam = boto3.client('iam')\n self.autoscaling = boto3.client('autoscaling')\n\n self.prod_env = prod_env", "def setup_config():\n if CONFIG.get(\"environment\", \"server\") == 'production':\n return 'config.ProductionConfig'\n else:\n return 'config.TestingConfig'", "def _set_from_env(name, context, default):\n if default is _DEFAULT_ARG and name not in os.environ:\n return\n\n context[name] = os.environ.get(name, default)", "def environment_variables(self, alias):\n raise NotImplementedError", "def get_config():\n return ExperimentConfig(base_path=os.getenv(\"AICROWD_OUTPUT_PATH\", \"./scratch/shared\"),\n experiment_name=os.getenv(\"AICROWD_EVALUATION_NAME\", \"experiment_name\"),\n dataset_name=os.getenv(\"AICROWD_DATASET_NAME\", \"cars3d\"))" ]
[ "0.70271873", "0.7026732", "0.69392926", "0.69331855", "0.68138427", "0.6760369", "0.6758569", "0.6714527", "0.67041445", "0.66552407", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.6584399", "0.65421516", "0.6530069", "0.6528909", "0.64960116", "0.64579433", "0.6447718", "0.6442308", "0.6439525", "0.6437336", "0.6370697", "0.6345632", "0.6339555", "0.6325007", "0.63221276", "0.63064796", "0.63016444", "0.6293589", "0.6274244", "0.6267764", "0.62675107", "0.62647486", "0.62388027", "0.62383616", "0.62349176", "0.622633", "0.6211716", "0.6210299", "0.6183221", "0.617913", "0.61725587", "0.6172187", "0.61706686", "0.6158634", "0.6113442", "0.61093014", "0.6101946", "0.60916483", "0.6087995", "0.6081235", "0.6065887", "0.6060052", "0.6043402", "0.60311955", "0.6026236", "0.60203844", "0.60177535", "0.59974295", "0.598437", "0.5969494", "0.59658223", "0.5960848", "0.59605336", "0.5955636", "0.59437793", "0.5943249", "0.59383184", "0.5936631", "0.5935923", "0.59358776", "0.5925824", "0.59148467", "0.59076506", "0.5904008", "0.590092", "0.58891183", "0.58808297", "0.5873499", "0.58678716", "0.5861577", "0.5853061", "0.5847486", "0.58474404", "0.58474296", "0.58456385", "0.5842651", "0.5840485", "0.5840485", "0.5840485", "0.58333695", "0.5822237", "0.58220094", "0.5812524", "0.5811031" ]
0.7265635
0
The config can specify a resource manager server address as "driver", which means the workflow should launch the resource manager on the scheduler machine. Make sure it launches, but is also shut down after the workflow exits.
def test_resource_manager_on_driver(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "resource-manager": { "server": "driver", "port": 4000, "config": { "read_reqs": 123, "read_data": 456, "write_reqs": 789, "write_data": 321 } } } template_dir = tempfile.mkdtemp(suffix="test-resource-manager-on-driver-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): client = ResourceManagerClient('127.0.0.1', 4000) mgr_config = client.read_config() assert mgr_config == config["resource-manager"]["config"], \ "Resource manager config does not match the one in the workflow config" _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # FIXME: For mysterious reasons, the check below does not work on Travis-CI. # Somehow, read_config() succeeds despite the fact that # the resource manager server was already terminated?? if os.environ.get('TRAVIS', '') == 'true': pytest.skip("Skipping resource manager shutdown check on Travis-CI") # Server should not be running any more after workflow exits. with pytest.raises(TimeoutError): client2 = ResourceManagerClient('127.0.0.1', 4000) client2.read_config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch(config):\n \n launch_with_configs([config])", "def run_xenon_simple(workflow, machine, worker_config):\n scheduler = Scheduler()\n\n return scheduler.run(\n xenon_interactive_worker(machine, worker_config),\n get_workflow(workflow)\n )", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def launch_instance_manager():\n # Todo: Use name servers in the docker contexct (set up a docker compose?)\n # pyro4-ns\n parser = argparse.ArgumentParser(\"python3 launch_instance_manager.py\")\n parser.add_argument(\"--seeds\", type=str, default=None, \n help=\"The default seed for the environment.\")\n parser.add_argument(\"--seeding_type\", type=str, default=SeedType.CONSTANT, \n help=\"The seeding type for the environment. Defaults to 1 (CONSTANT)\"\n \"if a seed specified, otherwise 0 (NONE): \\n{}\".format(SeedType.__doc__))\n\n \n parser.add_argument(\"--max_instances\", type=int, default=None,\n help=\"The maximum number of instances the instance manager is able to spawn,\"\n \"before an exception is thrown. Defaults to Unlimited.\")\n opts = parser.parse_args()\n\n \n if opts.max_instances is not None:\n assert opts.max_instances > 0, \"Maximum instances must be more than zero!\"\n InstanceManager.MAXINSTANCES = opts.max_instances\n \n\n try:\n print(\"Removing the performance directory!\")\n try:\n shutil.rmtree(InstanceManager.STATUS_DIR)\n except:\n pass\n finally:\n if not os.path.exists(InstanceManager.STATUS_DIR):\n os.makedirs(InstanceManager.STATUS_DIR)\n print(\"autoproxy?\",Pyro4.config.AUTOPROXY)\n InstanceManager.REMOTE = True\n Pyro4.config.COMMTIMEOUT = InstanceManager.KEEP_ALIVE_PYRO_FREQUENCY \n\n # Initialize seeding.\n if opts.seeds is not None:\n InstanceManager._init_seeding(seeds=opts.seeds, seed_type=opts.seeding_type)\n else:\n InstanceManager._init_seeding(seed_type=SeedType.NONE)\n\n \n Pyro4.Daemon.serveSimple(\n {\n InstanceManager: INSTANCE_MANAGER_PYRO\n },\n ns = True)\n \n except Pyro4.errors.NamingError as e:\n print(e)\n print(\"Start the Pyro name server with pyro4-ns and re-run this script.\")", "def run(config):\n\tlog.debug('-- in example.py')\n#\tgetWLSMachineandandExecuteSecondary(config)\n#\t__createPegaConfigCommand(config)\n#\tcreateUsers(config)\n#\t__connectAdminServer(config)\n\tconnectAdminServerOverSSL(config)", "def test_launch_deployment(self):\n pass", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def _manageWorkersConfig(event):\n if event.info.get('key') != PluginSettings.SLICER_CLI_WEB_WORKER_CONFIG_ITEM:\n return\n if _loadWorkerConfig():\n _manageWorkers(None)", "def test_scheduler_runs():\n import mesos.native\n\n # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave.\n assert os.path.isfile('dist/fake_mysos_executor.pex')\n\n storage = FakeStorage(SequentialThreadingHandler())\n zk_client = FakeClient(storage=storage)\n zk_client.start()\n\n zk_url = \"zk://fake_host/home/mysos/clusters\"\n cluster_name = \"test_cluster\"\n num_nodes = 3\n\n state_provider = LocalStateProvider(safe_mkdtemp())\n\n framework_info = FrameworkInfo(\n user=getpass.getuser(),\n name=\"mysos\",\n checkpoint=False)\n\n state = Scheduler(framework_info)\n\n scheduler = MysosScheduler(\n state,\n state_provider,\n getpass.getuser(),\n os.path.abspath(\"dist/fake_mysos_executor.pex\"),\n \"./fake_mysos_executor.pex\",\n zk_client,\n zk_url,\n Amount(40, Time.SECONDS),\n \"/fakepath\",\n gen_encryption_key())\n\n RootMetrics().register_observable('scheduler', scheduler)\n\n scheduler_driver = mesos.native.MesosSchedulerDriver(\n scheduler,\n framework_info,\n \"local\")\n scheduler_driver.start()\n\n # Wait until the scheduler is connected and becomes available.\n assert scheduler.connected.wait(30)\n\n scheduler.create_cluster(cluster_name, \"mysql_user\", num_nodes, cluster_password=\"passwd\")\n\n # A slave is promoted to be the master.\n deadline(\n lambda: wait_for_master(\n get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),\n zk_client),\n Amount(40, Time.SECONDS))\n\n scheduler.delete_cluster(cluster_name, password=\"passwd\")\n\n # The cluster is deleted from ZooKeeper.\n deadline(\n lambda: wait_for_termination(\n get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),\n zk_client),\n Amount(40, Time.SECONDS))\n\n sample = RootMetrics().sample()\n assert sample['scheduler.tasks_killed'] == 1\n\n assert scheduler_driver.stop() == DRIVER_STOPPED", "def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)", "def launch(config_list):\n p = PyRosLaunch(config_list)\n p.start()\n p.spin()", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def test_get_server_runnable(self):\n global locator, config_paths\n locator.load_config(config_paths[2])\n\n self.assertIsNotNone(locator.get_server_runnable())", "def test_mgr_start_stop(self, runpath):\n driver = self.MyDriver(name=\"MyDriver\", runpath=runpath)\n\n assert not driver.pre_start_called\n assert not driver.post_start_called\n\n with driver:\n assert driver.pre_start_called\n assert driver.post_start_called\n assert not driver.pre_stop_called\n assert not driver.post_stop_called\n\n assert driver.pre_stop_called\n assert driver.post_stop_called", "def main(config):\n command = config.workflow_utils.command\n try:\n subprocess.run(command, shell=True, check=True)\n except AttributeError as exp:\n # add in some backward compatibility for py2.7\n subprocess.check_call(command, shell=True)", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def test_workflows_restart(self):\n pass", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def run_xenon(\n workflow, *, machine, worker_config, n_processes, deref=False,\n verbose=False):\n\n dynamic_pool = DynamicPool(machine)\n\n for i in range(n_processes):\n cfg = copy(worker_config)\n cfg.name = 'xenon-{0:02}'.format(i)\n dynamic_pool.add_xenon_worker(cfg)\n\n job_keeper = JobKeeper()\n S = Scheduler(job_keeper=job_keeper, verbose=verbose)\n\n result = S.run(\n dynamic_pool, get_workflow(workflow)\n )\n\n dynamic_pool.close_all()\n\n if deref:\n return worker_config.registry().dereference(result, host='scheduler')\n else:\n return result", "def runTestCase(self):\n \n #Login\n self.login() \n \n #Performing Configure Resources \n ResultCR, statusCR = self.test_configureResourec()\n \n if statusCR:\n self.succeed(\"Configure Resources Step Successfully Completed %s\"%ResultCR)\n \n else:\n self.failure(\"Failed to Configure Resources Step %s\"%ResultCR)\n \n time.sleep(120)", "def test_cron_workflow_service_terminate_cron_workflow(self):\n pass", "def init_workflow():\n pass", "def _do_bootstrap(self, configs=None):\n pass", "def test_config(self):\n\n # We start in uninitialized state.\n # In this state there is no driver process.\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n # Ping the agent.\n retval = self._ia_client.ping_agent()\n log.info(retval)\n\n # Initialize the agent.\n # The agent is spawned with a driver config, but you can pass one in\n # optinally with the initialize command. This validates the driver\n # config, launches a driver process and connects to it via messaging.\n # If successful, we switch to the inactive state.\n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n # Ping the driver proc.\n retval = self._ia_client.ping_resource()\n log.info(retval)\n\n decoder = IonObjectDeserializer(obj_registry=get_obj_registry())\n\n # Grab the alarms defined in the config.\n retval = decoder.deserialize(self._ia_client.get_agent(['alarms'])['alarms'])\n\n \"\"\"\n {'status': None, 'stream_name': 'parsed', 'name': 'test_sim_warning',\n 'upper_bound': 5.0, 'expr': 'x<5.0', 'upper_rel_op': '<',\n 'lower_rel_op': None, 'type_': 'IntervalAlarmDef', 'value_id': 'temp',\n 'lower_bound': None, 'message': 'Temperature is above test range of 5.0.',\n 'current_val': None, 'type': 1}\n \"\"\"\n self.assertEqual(retval[0].type_, 'IntervalAlarmDef')\n self.assertEqual(retval[0].upper_bound, 5.0)\n self.assertEqual(retval[0].expr, 'x<5.0')\n \n # Reset the agent. This causes the driver messaging to be stopped,\n # the driver process to end and switches us back to uninitialized.\n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)", "def run_init_new_resource(self,\n name,\n xd_resource_id,\n ppn,\n sshUserName,\n remoteAccessNode,\n localScratch,\n networkScratch,\n akrrData,\n appKerDir,\n batchScheduler,\n authMeth=None,\n sshPassword = None,\n sshPrivateKeyFile = None,\n sshPrivateKeyPassword = None\n ):\n #start bash shell\n bash = self.getBash(setAKRRenv=True,cdToAKRR_HOME=True)\n bash.output=\"\"\n bash.timeoutMessage='Unexpected behavior of init_new_resource.sh (premature EOF or TIMEOUT)'\n \n fasttimeout=3\n slowtimeout=30\n #start prep script\n bash.startcmd(\"$AKRR_HOME/setup/scripts/init_new_resource.sh\")\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter resource_id for import \\(enter 0 for no match\\):.*\\n',\n '0' if xd_resource_id==None else str(xd_resource_id),timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter AKRR resource name, hit enter to use same name as in XDMoD Database \\[.*\\]:.*\\n',\n '' if name==None else name,timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter queuing system on resource \\(slurm or pbs\\):.*\\n',\n '' if batchScheduler==None else batchScheduler,timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter Resource head node \\(access node\\) full name \\(e.g. headnode.somewhere.org\\):.*\\n',\n '' if remoteAccessNode==None else remoteAccessNode,timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter username for resource access:.*\\n',\n '' if sshUserName==None else sshUserName,timeout=fasttimeout)\n \n iMatch=bash.justExpect([r'\\[.*INFO.*\\]: Can access resource without password',\n r'\\[.*INFO.*\\]: Can not access resource without password'],\n timeout=fasttimeout)\n if iMatch==0:\n if authMeth!=None:\n #i.e. the test is to go throurg list\n raise Exception(\"Passwordless access is already set-up, but expectation is to set new access method\")\n elif iMatch==1:\n #Select authentication method:\n # 0 The private and public keys was generated manually, right now. Try again.\n # 1 Use existing private and public key.\n # 2 Generate new private and public key.\n # 3 Use password directly.\n #[INPUT]: Select option from list above:\n bash.expectSendline(r'\\[.*INPUT.*]: Select option from list above:.*\\n\\[.*\\]',\n '' if authMeth==None else str(authMeth),timeout=fasttimeout)\n \n if authMeth==None or authMeth==2:\n bash.expectSendline(r'\\[.*INPUT.*]: Enter password for.*\\n',\n '' if sshPassword==None else str(sshPassword),timeout=fasttimeout)\n bash.expectSendline(r'\\[.*INPUT.*]: Enter private key name:.*\\n\\[.*\\]',\n '' if sshPrivateKeyFile==None else str(sshPrivateKeyFile),timeout=fasttimeout)\n bash.expectSendline(r'\\[.*INPUT.*]: Enter passphrase for new key \\(leave empty for passwordless access\\):.*\\n',\n '' if sshPrivateKeyPassword==None else str(sshPrivateKeyPassword),timeout=fasttimeout)\n elif authMeth==3:\n bash.expectSendline(r'\\[.*INPUT.*]: Enter password for.*\\n',\n '' if sshPassword==None else str(sshPassword),timeout=fasttimeout)\n elif authMeth==1:\n output=bash.justExpect(r'\\[.*INPUT.*]: Select key number from list above:.*\\n',timeout=fasttimeout)\n if sshPrivateKeyFile!=None:\n pkeys={}\n for l in output.splitlines():\n m=re.match(r'^\\s*(\\d+) \\s*(\\S+)',l)\n if m:\n pkeys[m.group(2)]=m.group(1)\n if sshPrivateKeyFile not in pkeys:\n raise Exception(\"Unknown private key: \"+sshPrivateKeyFile)\n bash.startcmd(str(pkeys[sshPrivateKeyFile]))\n else:\n bash.startcmd('0')\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter password for.*\\n',\n '' if sshPassword==None else str(sshPassword),timeout=fasttimeout)\n #sshPrivateKeyPassword\n bash.expectSendline(r'\\[.*INPUT.*]: Enter processors \\(cores\\) per node count:.*\\n',\n '' if ppn==None else str(ppn),timeout=slowtimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter location of local scratch \\(visible only to single node\\):.*\\n\\[.*\\]',\n '' if localScratch==None else str(localScratch),timeout=fasttimeout)\n\n bash.expectSendline(r'\\[.*INPUT.*]: Enter location of network scratch \\(visible only to all nodes\\), used for temporary storage of app kernel input/output:.*\\n',\n '' if networkScratch==None else str(networkScratch),timeout=fasttimeout)\n bash.justExpect(r'\\[.*INFO.*\\]: Directory exist and accessible for read/write')\n\n bash.expectSendline(r'\\[.*INPUT.*]: Enter future location of app kernels input and executable files:.*\\n\\[.*\\]',\n '' if appKerDir==None else str(appKerDir),timeout=fasttimeout)\n bash.justExpect(r'\\[.*INFO.*\\]: Directory exist and accessible for read/write')\n\n bash.expectSendline(r'\\[.*INPUT.*\\]: Enter future locations for app kernels working directories \\(can or even should be on scratch space\\):.*\\n\\[.*\\]',\n '' if akrrData==None else str(akrrData),timeout=fasttimeout)\n bash.justExpect(r'\\[.*INFO.*\\]: Directory exist and accessible for read/write')\n \n #wait for prompt\n output=bash.justExpect(bash.prompt,timeout=slowtimeout)\n \n delattr(bash, 'timeoutMessage')\n return copy.deepcopy(bash.output)", "def api(self, config):\n\n # Generate workflow file\n workflow = os.path.join(tempfile.gettempdir(), \"workflow.yml\")\n with open(workflow, \"w\", encoding=\"utf-8\") as f:\n f.write(config)\n\n os.environ[\"CONFIG\"] = workflow\n txtai.api.application.start()\n server = Server(txtai.api.application.app)\n with server.service():\n uid = 0\n while True:\n stop = st.empty()\n click = stop.button(\"stop\", key=uid)\n if not click:\n time.sleep(5)\n uid += 1\n stop.empty()", "def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()", "def run(cfg_dir):\n with pkio.save_chdir(cfg_dir):\n _run_srw()", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def cli():\n #try:\n manager = Actions()\n manager.get_schedulers()\n #except Exception as e:\n # click.echo(e)", "def run_scheduler(scheduler_port=default_scheduler_port):\n scheduler_proc = subprocess.Popen([\n \"dask-scheduler\",\n \"--host\", \n os.environ[\"CDSW_IP_ADDRESS\"], \n \"--port\", \n str(scheduler_port),\n \"--dashboard-address\",\n (\"127.0.0.1:%s\" % os.environ[\"CDSW_READONLY_PORT\"])\n\n ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n \n # Wait for the scheduler to become ready\n print(\"Waiting for Dask scheduler to become ready...\")\n while True:\n try:\n with socket.create_connection((os.environ[\"CDSW_IP_ADDRESS\"], scheduler_port), timeout=1.0):\n break\n except OSError as ex:\n time.sleep(0.01)\n print(\"Dask scheduler is ready\")\n return scheduler_proc", "def __init__(self, config_path, setup_celery=True):\n _log.info(\"GNU MediaGoblin %s main server starting\", __version__)\n _log.debug(\"Using config file %s\", config_path)\n ##############\n # Setup config\n ##############\n\n # Open and setup the config\n global_config, app_config = setup_global_and_app_config(config_path)\n\n setup_crypto()\n\n ##########################################\n # Setup other connections / useful objects\n ##########################################\n\n # Setup Session Manager, not needed in celery\n self.session_manager = session.SessionManager()\n\n # load all available locales\n setup_locales()\n\n # Set up plugins -- need to do this early so that plugins can\n # affect startup.\n _log.info(\"Setting up plugins.\")\n setup_plugins()\n\n # Set up the database\n self.db = setup_database()\n\n # Register themes\n self.theme_registry, self.current_theme = register_themes(app_config)\n\n # Get the template environment\n self.template_loader = get_jinja_loader(\n app_config.get('local_templates'),\n self.current_theme,\n PluginManager().get_template_paths()\n )\n\n # Set up storage systems\n self.public_store, self.queue_store = setup_storage()\n\n # set up routing\n self.url_map = get_url_map()\n\n # set up staticdirector tool\n self.staticdirector = get_staticdirector(app_config)\n\n # Setup celery, if appropriate\n if setup_celery and not app_config.get('celery_setup_elsewhere'):\n if os.environ.get('CELERY_ALWAYS_EAGER', 'false').lower() == 'true':\n setup_celery_from_config(\n app_config, global_config,\n force_celery_always_eager=True)\n else:\n setup_celery_from_config(app_config, global_config)\n\n #######################################################\n # Insert appropriate things into mediagoblin.mg_globals\n #\n # certain properties need to be accessed globally eg from\n # validators, etc, which might not access to the request\n # object.\n #######################################################\n\n setup_globals(app=self)\n\n # Workbench *currently* only used by celery, so this only\n # matters in always eager mode :)\n setup_workbench()\n\n # instantiate application meddleware\n self.meddleware = [common.import_component(m)(self)\n for m in meddleware.ENABLED_MEDDLEWARE]", "def test_runtime_bake(scheduler, os, region, pcluster_config_reader, clusters_factory, test_datadir, architecture):\n # remarkable AMIs are not available for ARM yet\n ami_type = \"remarkable\" if architecture == \"x86_64\" else \"official\"\n cluster_config = pcluster_config_reader(\n custom_ami=retrieve_latest_ami(region, os, ami_type=ami_type, architecture=architecture)\n )\n cluster = clusters_factory(cluster_config)\n remote_command_executor = RemoteCommandExecutor(cluster)\n\n # Verify no chef.io endpoint is called in cloud-init-output log to download chef installer or chef packages\"\"\"\n # on head node\n remote_command_executor.run_remote_script(str(test_datadir / \"verify_chef_download.sh\"))\n # on compute\n scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)\n result = scheduler_commands.submit_script(str(test_datadir / \"verify_chef_download.sh\"))\n job_id = scheduler_commands.assert_job_submitted(result.stdout)\n scheduler_commands.wait_job_completed(job_id)\n scheduler_commands.assert_job_succeeded(job_id)", "def _test_cli_package(self):\n self.keystone_url = self.bootstrap_inputs['keystone_url']\n iaas_resolver = '{0} {1}' \\\n .format(*self._resolve_url_to_ip_and_netloc(self.keystone_url))\n iaas_resolver_cmd = 'echo {0} >> /etc/hosts'.format(iaas_resolver)\n\n # Make sure cli machine is up with a registered ssh key\n wait_for_vm_to_become_ssh_available(env, self._execute_command,\n self.logger)\n\n with self.dns():\n self.logger.info('Preparing CLI and downloading example')\n package_name = self._prepare_cli()\n blueprint_path = self.get_hello_world_blueprint()\n\n self._install_cli(package_name)\n self.logger.info('Preparing manager blueprint')\n self.prepare_manager_blueprint()\n self._update_hosts_file(iaas_resolver)\n\n # Getting the remote manager blueprint and preparing resources\n self.logger.info('Retrieving remote manager blueprint file...')\n manager_blueprint = StringIO()\n fab.get(self.test_manager_blueprint_path, manager_blueprint)\n manager_blueprint_yaml = yaml.load(manager_blueprint.getvalue())\n resources_to_download = self._get_resource_list(manager_blueprint_yaml)\n\n # each os should implement any vm-related function before this comment\n\n with FileServer(self._get_file_server_inputs(), resources_to_download,\n FILE_SERVER_PORT, self.logger) as fs:\n additional_inputs = fs.get_processed_inputs()\n self._update_inputs_file(additional_inputs)\n\n self.logger.info('Bootstrapping...')\n self.bootstrap_manager()\n\n # Adding iaas resolver for the manager machine.\n self.logger.info('adding {0} to /etc/hosts of the manager vm'\n .format(iaas_resolver))\n manager_fab_conf = {\n 'user': self.client_user,\n 'key_filename': self._get_manager_kp(),\n 'host_string': self.manager_ip,\n 'timeout': 30,\n 'connection_attempts': 10\n }\n wait_for_vm_to_become_ssh_available(manager_fab_conf,\n self._execute_command,\n self.logger)\n self._run_cmd_on_custom_machine(iaas_resolver_cmd,\n manager_fab_conf, sudo=True)\n\n # Uploading, deploying and testing hello_world blueprint\n # Any sleep is to allow the execution to complete\n # TODO: remove this line when the openstack sg description fix is applied # NOQA\n self._update_example_sg()\n\n self.logger.info('Testing the example deployment cycle...')\n blueprint_id = 'blueprint-{0}'.format(uuid.uuid4())\n\n self._upload_blueprint(blueprint_path, blueprint_id,\n self.app_blueprint_file)\n self.deployment_id = self.create_deployment(blueprint_id)\n self.addCleanup(self.uninstall_deployment)\n self.install_deployment(self.deployment_id)\n self.assert_deployment_working(\n self._get_app_property('http_endpoint'))", "def to_main():\n if env.is_staging:\n print \"Reverting back to PRODUCTION is not allowed for STAGING!\"\n return\n with cd(env.code_dir):\n run('ln -sf celeryconfig-production.py ./api/celeryconfig.py')\n restart_api()", "def _use_existing_schedule(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/schedule'))\n self.schedule = sh['schedule']\n sh.close()", "async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def test_cron_workflow_service_update_cron_workflow(self):\n pass", "def start_scheduler():\n from security_monkey import scheduler\n scheduler.setup_scheduler()\n scheduler.scheduler.start()", "async def async_setup(hass: HomeAssistant, config: Config):\n if hass.data.get(DOMAIN) is None:\n hass.data.setdefault(DOMAIN, {})\n _LOGGER.info(STARTUP_MESSAGE)\n\n # Copy configuration values for later use.\n hass.data[DOMAIN][ATTR_POOL_TEMPERATURE_ENTITY_ID] = config[DOMAIN][\n ATTR_POOL_TEMPERATURE_ENTITY_ID\n ]\n hass.data[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID] = config[DOMAIN][\n ATTR_POOL_PUMP_MODE_ENTITY_ID\n ]\n hass.data[DOMAIN][ATTR_SWITCH_ENTITY_ID] = config[DOMAIN][ATTR_SWITCH_ENTITY_ID]\n hass.data[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID] = config[DOMAIN][\n ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID\n ]\n hass.data[DOMAIN][ATTR_SCHEDULE_BREAK_DURATION_IN_HOURS] = config[DOMAIN][\n ATTR_SCHEDULE_BREAK_DURATION_IN_HOURS\n ]\n\n async def check(call):\n \"\"\"Service: Check if the pool pump should be running now.\"\"\"\n # Use a fixed time reference.\n now = dt_util.now()\n mode = hass.states.get(hass.data[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID])\n _LOGGER.debug(\"Pool pump mode: %s\", mode.state)\n\n # Only check if pool pump is set to 'Auto'.\n if mode.state == POOL_PUMP_MODE_AUTO:\n manager = PoolPumpManager(hass, now)\n _LOGGER.debug(\"Manager initialised: %s\", manager)\n # schedule = \"Unknown\"\n if await manager.is_water_level_critical():\n schedule = \"Water Level Critical\"\n else:\n run = manager.next_run()\n _LOGGER.debug(\"Next run: %s\", run)\n if not run:\n # Try tomorrow\n tomorrow = now + timedelta(days=1)\n next_midnight = tomorrow.replace(hour=0, minute=0, second=0)\n _LOGGER.debug(\"Next midnight: %s\", next_midnight)\n manager_tomorrow = PoolPumpManager(hass, next_midnight)\n _LOGGER.debug(\"Manager initialised: %s\", manager_tomorrow)\n run = manager_tomorrow.next_run()\n _LOGGER.debug(\"Next run: %s\", run)\n schedule = run.pretty_print()\n # Set time range so that this can be displayed in the UI.\n hass.states.async_set(\n \"{}.{}\".format(DOMAIN, ATTR_NEXT_RUN_SCHEDULE), schedule\n )\n # And now check if the pool pump should be running.\n await manager.check()\n else:\n hass.states.async_set(\n \"{}.{}\".format(DOMAIN, ATTR_NEXT_RUN_SCHEDULE), \"Manual Mode\"\n )\n\n hass.services.async_register(DOMAIN, \"check\", check)\n\n # Return boolean to indicate that initialization was successfully.\n return True", "def init():\n\n @click.command(name='autoscale')\n @click.option(\n '--ipa-certs', required=False, envvar='TREADMILL_IPA_CERTS',\n callback=aws_cli.handle_context_opt,\n is_eager=True,\n default='/etc/ipa/ca.crt',\n expose_value=False\n )\n @click.option(\n '--timeout', required=False, default=_DEFAULT_TIMEOUT, type=int,\n help='Time interval to evaluate state (seconds).'\n )\n @click.option(\n '--max-count', required=True, type=int,\n help='Max server count.'\n )\n @click.option(\n '--min-count', required=False, type=int, default=0,\n help='Min server count.'\n )\n @click.option(\n '--batch-count', required=True, type=int,\n help='Max batch count for new servers.'\n )\n @click.option(\n '--app-srv-ratio', required=False, type=float,\n default=_DEFAULT_APP_SERVER_RATIO,\n help='Default app/server ratio.'\n )\n def autoscale_cmd(timeout, max_count, min_count, batch_count,\n app_srv_ratio):\n \"\"\"Autoscale Treadmill cell based on scheduler queue.\"\"\"\n while True:\n create_cnt, extra_servers = autoscale.scale(\n max_servers=max_count,\n min_servers=min_count,\n default_app_srv_ratio=app_srv_ratio,\n max_batch=batch_count)\n if create_cnt > 0:\n autoscale.create_n_servers(create_cnt, partition=None)\n\n if extra_servers:\n autoscale.delete_servers_by_name(extra_servers)\n\n time.sleep(timeout)\n\n return autoscale_cmd", "def run(self):\n config_path = get_exe_path('configuration.exe')\n if config_path:\n subprocess.Popen(config_path)", "def init(args):\n # Setup AWS connection\n aws_eu = connect_from_conf('aws_eu')\n aws_us = connect_from_conf('aws_us')\n ec2_conn['eu-west-1'] = aws_eu['ec2']\n elb_conn['eu-west-1'] = aws_eu['elb']\n ec2_conn['us-west-1'] = aws_us['ec2']\n elb_conn['us-west-1'] = aws_us['elb']\n global schedules\n schedules = get_schedules()", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def launch_configtool():\r\n from PyQt4 import QtGui\r\n from freeseer.frontend.configtool.configtool import ConfigToolApp\r\n\r\n profile = settings.profile_manager.get()\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig,\r\n storage_args=['Global'], read_only=False)\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n main = ConfigToolApp(profile, config)\r\n main.show()\r\n sys.exit(app.exec_())", "def on_initial_bootstrap(self, process, config, **kwargs):\n\n # get default org_id\n # @TODO: single org assumed for now\n org_ids = process.container.resource_registry.find_resources(RT.Org, id_only=True)\n if not (len(org_ids) and len(org_ids[0]) == 1):\n raise StandardError(\"Could not determine org_id\")\n\n org_id = org_ids[0][0]\n\n ems_client = ExchangeManagementServiceProcessClient(process=process)\n\n #\n # Create XSs and XPs\n #\n for xsname, xsdict in config.get_safe('exchange_spaces', {}).iteritems():\n xso = ResExchangeSpace(name=xsname)\n xso_id = ems_client.create_exchange_space(xso, org_id)\n\n log.info(\"ExchangeSpace %s, id %s\", xsname, xso_id)\n\n for xpname, xpopts in xsdict.get('exchange_points', {}).iteritems():\n\n # @TODO: some translation for types CFG currentl has it as \"topic_tree\" and we've been using \"ttree\"\n ttype = xpopts.get('type', 'topic_tree')\n if ttype == \"topic_tree\":\n ttype = \"ttree\"\n\n xpo = ResExchangePoint(name=xpname, topology_type=ttype)\n xpo_id = ems_client.create_exchange_point(xpo, xso_id)\n\n log.info(\"\\tExchangePoint %s, id %s\", xpname, xpo_id)\n\n #\n # Create and associate brokers with XSs\n #\n for brokername in xsdict.get('brokers', []):\n xbo = ResExchangeBroker(name=brokername)\n xbo_id = ems_client.create_exchange_broker(xbo)\n\n log.info(\"\\tExchangeBroker %s, id %s\", brokername, xbo_id)\n\n # directly associate broker with XS\n # @TODO: should EMS provide this?\n # first find out if the assoc exists already\n assocs = process.container.resource_registry.find_associations(xso_id, PRED.hasExchangeBroker, id_only=True)\n if len(assocs) > 0:\n continue\n process.container.resource_registry.create_association(xso_id, PRED.hasExchangeBroker, xbo_id)", "def __init__(__self__,\n resource_name: str,\n args: WorkspaceSamlConfigurationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: MonitorConfigPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def prelaunch_rsmc(conf,logger,cycle):\n rsmc_overrides=conf.getbool('prelaunch','rsmc_overrides')\n if not rsmc_overrides:\n logger.info('RSMC overrides are disabled.')\n return\n\n vit=conf.syndat\n rsmc=str(vit.center).upper()\n rfile=conf.strinterp('prelaunch','{rsmc_conf}',RSMC=rsmc)\n if not produtil.fileop.isnonempty(rfile):\n logger.warning('%s: RSMC override file is empty or non-existent'\n %(rfile,))\n conf.read(rfile)", "def local_celery():\n click.echo('Start Celery on Machine')\n ret = subprocess.call(\n ['celery', 'worker', '-A', 'celery_worker.celery', '--loglevel=info', '-P', 'eventlet'])\n sys.exit(ret)", "def _configure_remote_executor(ex, cardinalities, loop):\n if loop.is_running():\n asyncio.run_coroutine_threadsafe(ex.set_cardinalities(cardinalities), loop)\n else:\n loop.run_until_complete(ex.set_cardinalities(cardinalities))\n return", "def bootstrap_post():\n\timport os\n\tprint('status','running bootstrap_post from bootstrap.py')\n\tos.system('make config')\n\treturn", "def configure_scheduler(self, config: types.SimpleNamespace) -> SurveyTopology:\n\n if \"parameters\" in config.driver_configuration:\n for parameter in config.driver_configuration[\"parameters\"]:\n value = config.driver_configuration[\"parameters\"][parameter]\n self.log.debug(f\"Setting driver parameter: {parameter} = {value}\")\n setattr(\n self.parameters,\n parameter,\n value,\n )\n\n if not hasattr(config, \"driver_configuration\"):\n raise RuntimeError(\n \"No driver_configuration section defined in configuration.\"\n )\n\n return self.get_survey_topology(config)", "def runner_setup():\n concurrent_sessions = 5\n runner = VisualGridRunner(concurrent_sessions)\n yield runner", "def run_starter(self, expect_to_fail=False):", "def run(self):\n\n if self.nproc > 0:\n # get resources\n nodes = self.RM.get_allocation(self, self.nproc, self.mem_pproc, self.disk_pproc)\n\n # did we actually get nodes?????\n if nodes >= 0:\n #--------------------------------\n # update resource usage\n #--------------------------------\n self.using.nodes = nodes\n self.using.procs = self.nproc\n if self.start_waiting_time >= 0:\n self.total_waiting_time += self.fwk.fwk_global_time - self.start_waiting_time\n self.start_waiting_time = -1\n\n #--------------------------------\n # set curr_exec_time, start_exec_time, and state\n #--------------------------------\n self.get_curr_exec_time()\n\n #--------------------------------\n # log event\n #--------------------------------\n if self.retry == True:\n if self.sim.retry_limit > 0 and self.curr_retries < self.sim.retry_limit:\n self.num_retries += 1\n self.curr_retries += 1\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched attempt %d on %d processes on %d nodes\" %(self.retry, self.using.procs, self.using.nodes))\n else:\n #print \"exceeded retry limit\"\n if self.fwk.debug:\n print('exceeded retry limit, killing sim from component.')\n self.sim.kill()\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started running on %d processes on %d nodes\" % (self.using.procs, self.using.nodes))\n else:\n #-------------------------------------------\n # we did not get the resources we wanted\n #-------------------------------------------\n self.state = \"waiting_on_resources\"\n if self.start_waiting_time == -1:\n self.start_waiting_time = self.fwk.fwk_global_time\n self.num_waiting += 1\n #--------------------------------\n # log event\n #--------------------------------\n self.fwk.logEvent(self.sim.name, self.name, \"waiting_on_procs\", \"needs %d procs %d memory pproc %d disk pproc\" % (self.nproc, self.mem_pproc, self.disk_pproc))\n else:\n # non-resource consuming component\n self.get_curr_exec_time()\n if self.retry == True:\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched, attempt %d\" %(self.num_retries))\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started\")", "def execute_flow(self, server_config):\n\n with self._cli_handler.get_cli_service(self._cli_handler.enable_mode) as enable_session:\n config_actions = TRexServerConfigActions(enable_session)\n\n if server_config:\n config_actions.custom_trex_config(server_config_path=server_config)\n else:\n config_actions.default_trex_config()\n\n config_actions.start_trex_daemon()", "def test_cron_workflow_service_get_cron_workflow(self):\n pass", "def test_workflow_class_discovery():\n config = {\n \"workflow-name\": \"tests.workflows.test_workflow.CustomWorkflow\",\n \"cluster-type\": CLUSTER_TYPE\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-workflow-discovery-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n _execution_dir, workflow = launch_flow(template_dir, 1)\n assert isinstance(workflow, CustomWorkflow)\n assert workflow.execute.didrun", "def updateRunningConf(self, responsible):\n stop_command, start_command = map(\n self.gen_actionCommand, (\"stop\", \"start\")\n )\n\n #runCommandAsRootAndCheck raises in case of an error (perfect!)\n yield deferToThread(self.runCommandAsRootAndCheck, stop_command)\n\n ntpservers = self.CONFIG['ntpservers'].split(' ')\n responsible.feedback(\n tr(\"Reference servers: %(NTPSERVERS)s\"),\n NTPSERVERS=', '.join(ntpservers)\n )\n\n ha_status = None\n context = Context.fromComponent(self)\n if EDENWALL and self.core.hasComponent(context, 'ha'):\n try:\n ha_status = yield self.core.callService(context, 'ha', 'getHAMode')\n except Exception, err:\n self.error(exceptionAsUnicode(err))\n peers = _ntppeers(responsible, ha_status)\n\n template_variables = {\n 'ntpservers' : self.CONFIG['ntpservers'].split(' '),\n 'peers': peers,\n }\n self.generate_configfile(template_variables)\n\n yield deferToThread(self.runCommandAsRootAndCheck, start_command)", "def test_config_remove(self):\n server = self.start_server(\"hello world\", 200)\n try:\n self.setup_dynamic()\n\n cfg_file = \"test.yml\"\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.output_has(lines=2))\n\n self.assert_last_status(\"up\")\n\n os.remove(self.monitors_dir() + cfg_file)\n\n # Ensure the job was removed from the schduler\n self.wait_until(lambda: self.log_contains(\"Remove scheduler job 'myid'\"))\n self.wait_until(lambda: self.log_contains(\"Job 'myid' returned\"))\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()", "def start_sml():\n launchfile = basepath + '/launch/teststarter.launch'\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n #print roslaunch.rlutil.check_roslaunch(launchfile)\n #roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [launchfile])\n launch.start()", "def run_resource_action(owner_id, action, name, resource_id):\n\n schedule = Schedule.objects.get(org=owner_id, name=name, deleted=None)\n resource_type = schedule.resource_model_name\n resource_cls = schedule.selector_resource_cls\n resource_id_key = f'{resource_type}_id'\n log_dict = {\n 'owner_id': owner_id,\n 'event_type': 'job',\n resource_id_key: resource_id,\n 'schedule_id': schedule.id,\n }\n\n external_id = ''\n cloud_id = ''\n owner = Owner.objects.get(id=owner_id)\n started_at = time()\n query = dict(id=resource_id)\n if resource_type == 'machine':\n query['state__ne'] = 'terminated'\n try:\n resource = resource_cls.objects.get(**query)\n except me.DoesNotExist:\n log_dict['error'] = \"Resource with that id does not exist.\"\n msg = action + ' failed'\n log_event(action=msg, **log_dict)\n except Exception as exc:\n log_dict['error'] = str(exc)\n msg = action + ' failed'\n log_event(action=msg, **log_dict)\n\n if not log_dict.get('error'):\n if resource_type == 'machine':\n machine = resource\n log_dict['cloud_id'] = cloud_id = machine.cloud.id\n log_dict['external_id'] = external_id = machine.external_id\n if action in ('start', 'stop', 'reboot', 'destroy', 'notify'):\n # call list machines here cause we don't have another way\n # to update machine state if user isn't logged in\n from mist.api.machines.methods import list_machines\n from mist.api.machines.methods import destroy_machine\n # TODO change this to compute.ctl.list_machines\n list_machines(owner, cloud_id)\n\n if action == 'start':\n log_event(action='Start', **log_dict)\n try:\n machine.ctl.start()\n except Exception as exc:\n log_dict['error'] = '%s Machine in %s state' % (\n exc, machine.state)\n log_event(action='Start failed', **log_dict)\n else:\n log_event(action='Start succeeded', **log_dict)\n elif action == 'stop':\n log_event(action='Stop', **log_dict)\n try:\n machine.ctl.stop()\n except Exception as exc:\n log_dict['error'] = '%s Machine in %s state' % (\n exc, machine.state)\n log_event(action='Stop failed', **log_dict)\n else:\n log_event(action='Stop succeeded', **log_dict)\n elif action == 'reboot':\n log_event(action='Reboot', **log_dict)\n try:\n machine.ctl.reboot()\n except Exception as exc:\n log_dict['error'] = '%s Machine in %s state' % (\n exc, machine.state)\n log_event(action='Reboot failed', **log_dict)\n else:\n log_event(action='Reboot succeeded', **log_dict)\n elif action == 'destroy':\n log_event(action='Destroy', **log_dict)\n try:\n destroy_machine(owner, cloud_id, external_id)\n except Exception as exc:\n log_dict['error'] = '%s Machine in %s state' % (\n exc, machine.state)\n log_event(action='Destroy failed', **log_dict)\n else:\n log_event(action='Destroy succeeded', **log_dict)\n elif action == 'notify':\n mails = []\n for _user in [machine.owned_by, machine.created_by]:\n if _user:\n mails.append(_user.email)\n for mail in list(set(mails)):\n if mail == machine.owned_by.email:\n user = machine.owned_by\n else:\n user = machine.created_by\n subject = \\\n config.MACHINE_EXPIRE_NOTIFY_EMAIL_SUBJECT.format(\n portal_name=config.PORTAL_NAME\n )\n if schedule.when.type == 'reminder' and \\\n schedule.when.message:\n custom_msg = '\\n%s\\n' % \\\n schedule.when.message\n else:\n custom_msg = ''\n machine_uri = config.PORTAL_URI + \\\n '/machines/%s' % machine.id\n main_body = config.MACHINE_EXPIRE_NOTIFY_EMAIL_BODY\n sch_entry = machine.expiration.when.entry\n body = main_body.format(\n fname=user.first_name,\n machine_name=machine.name,\n expiration=sch_entry,\n uri=machine_uri + '/expiration',\n custom_msg=custom_msg,\n portal_name=config.PORTAL_NAME)\n log.info('About to send email...')\n if not helper_send_email(subject, body, user.email):\n raise ServiceUnavailableError(\n \"Could not send notification email \"\n \"about machine that is about to expire.\")\n elif resource_type == 'cluster':\n cluster = resource\n log_dict.update({'cloud_id': cluster.cloud.id,\n 'external_id': cluster.external_id})\n if action == 'destroy':\n log_event(action='Destroy', **log_dict)\n try:\n cluster.ctl.destroy()\n except Exception as exc:\n log_dict['error'] = str(exc)\n log_event(action='Destroy failed', **log_dict)\n else:\n log_event(action='Destroy succeeded', **log_dict)\n elif resource_type == 'network':\n network = resource\n log_dict.update({'cloud_id': network.cloud.id,\n 'network_id': network.network_id})\n if action == 'delete':\n log_event(action='Delete', **log_dict)\n try:\n network.ctl.delete()\n except Exception as exc:\n log_dict['error'] = str(exc)\n log_event(action='Delete failed', **log_dict)\n else:\n log_event(action='Delete succeeded', **log_dict)\n elif resource_type == 'volume':\n volume = resource\n log_dict.update({'cloud_id': volume.cloud.id,\n 'external_id': volume.external_id})\n if action == 'delete':\n log_event(action='Delete', **log_dict)\n try:\n volume.ctl.delete()\n except Exception as exc:\n log_dict['error'] = str(exc)\n log_event(action='Delete failed', **log_dict)\n else:\n log_event(action='Delete succeeded', **log_dict)\n\n if action != 'notify' and log_dict.get('error'):\n # TODO markos asked this\n log_dict['started_at'] = started_at\n log_dict['finished_at'] = time()\n title = \"Execution of '%s' action \" % action\n title += \"failed\" if log_dict.get('error') else \"succeeded\"\n from mist.api.methods import notify_user\n notify_user(\n owner, title,\n cloud_id=cloud_id,\n external_id=external_id,\n duration=log_dict['finished_at'] - log_dict['started_at'],\n error=log_dict.get('error'),\n )", "def test_resources_exception(self):\n with self.assertRaises(ProcessorConfigError) as context:\n self.pl.resource.remove(\"onto_specs_path\")\n self.pl.resource.remove(\"onto_specs_dict\")\n self.pl.add(\n self._stave_processor,\n config={\"port\": self._port, \"server_thread_daemon\": True},\n )\n self.pl.run(self._dataset_dir)", "def main():\n ensure_not_root()\n config.setup()\n model.init_db()\n manager.run()", "def _load_config(self):\n\n options = dict()\n\n job_stores = self.app.config.get('SCHEDULER_JOBSTORES')\n if job_stores:\n options['jobstores'] = job_stores\n\n executors = self.app.config.get('SCHEDULER_EXECUTORS')\n if executors:\n options['executors'] = executors\n\n job_defaults = self.app.config.get('SCHEDULER_JOB_DEFAULTS')\n if job_defaults:\n options['job_defaults'] = job_defaults\n\n timezone = self.app.config.get('SCHEDULER_TIMEZONE')\n if timezone:\n options['timezone'] = timezone\n\n self._scheduler.configure(**options)\n\n\n self.jobconfig = self.app.config.get('SCHEDULER_JOBCONFIG', None) # Textual reference to the jobs dictionary.\n self.auth = self.app.config.get('SCHEDULER_AUTH', self.auth)\n self.api_enabled = self.app.config.get('SCHEDULER_VIEWS_ENABLED', self.api_enabled) # for compatibility reason\n self.api_enabled = self.app.config.get('SCHEDULER_API_ENABLED', self.api_enabled)\n self.allowed_hosts = self.app.config.get('SCHEDULER_ALLOWED_HOSTS', self.allowed_hosts)", "def configure_schedulers(self, **kwargs):\n if self.scheduler_type == 'CosineAnnealingWarmRestarts':\n self.scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer, **self.scheduler_params)\n elif self.scheduler_type == 'CosineAnnealingLR':\n self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, **self.scheduler_params)\n elif self.scheduler_type == 'CyclicLR':\n self.scheduler = torch.optim.lr_scheduler.CyclicLR(self.optimizer, **self.scheduler_params)\n else:\n self.scheduler = None", "def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()", "def setup_module(module):\n config_manager.stop()", "def _launch(self,\n max_retry: Optional[int] = 3,\n raise_on_failure: bool = True) -> Optional[float]:\n # TODO(zhwu): handle the failure during `preparing sky runtime`.\n retry_cnt = 0\n backoff = common_utils.Backoff(self.RETRY_INIT_GAP_SECONDS)\n while True:\n retry_cnt += 1\n try:\n usage_lib.messages.usage.set_internal()\n # Detach setup, so that the setup failure can be detected\n # by the controller process (job_status -> FAILED_SETUP).\n sky.launch(self.dag,\n cluster_name=self.cluster_name,\n detach_setup=True,\n detach_run=True,\n _is_launched_by_spot_controller=True)\n logger.info('Spot cluster launched.')\n except (exceptions.InvalidClusterNameError,\n exceptions.NoCloudAccessError,\n exceptions.ResourcesMismatchError) as e:\n logger.error('Failure happened before provisioning. '\n f'{common_utils.format_exception(e)}')\n if raise_on_failure:\n raise exceptions.ProvisionPrechecksError(reasons=[e])\n return None\n except exceptions.ResourcesUnavailableError as e:\n # This is raised when the launch fails due to prechecks or\n # after failing over through all the candidates.\n # Please refer to the docstring of `sky.launch` for more\n # details of how the exception will be structured.\n if not any(\n isinstance(err, exceptions.ResourcesUnavailableError)\n for err in e.failover_history):\n # _launch() (this function) should fail/exit directly, if\n # none of the failover reasons were because of resource\n # unavailability or no failover was attempted (the optimizer\n # cannot find feasible resources for requested resources),\n # i.e., e.failover_history is empty.\n # Failing directly avoids the infinite loop of retrying\n # the launch when, e.g., an invalid cluster name is used\n # and --retry-until-up is specified.\n reasons = (e.failover_history\n if e.failover_history else [e])\n reasons_str = '; '.join(\n common_utils.format_exception(err) for err in reasons)\n logger.error(\n 'Failure happened before provisioning. Failover '\n f'reasons: {reasons_str}')\n if raise_on_failure:\n raise exceptions.ProvisionPrechecksError(\n reasons=reasons)\n return None\n logger.info('Failed to launch the spot cluster with error: '\n f'{common_utils.format_exception(e)})')\n except Exception as e: # pylint: disable=broad-except\n # If the launch fails, it will be recovered by the following\n # code.\n logger.info('Failed to launch the spot cluster with error: '\n f'{common_utils.format_exception(e)})')\n logger.info(f' Traceback: {traceback.format_exc()}')\n else: # No exception, the launch succeeds.\n # At this point, a sky.launch() has succeeded. Cluster may be\n # UP (no preemption since) or DOWN (newly preempted).\n job_submitted_at = self._wait_until_job_starts_on_cluster()\n if job_submitted_at is not None:\n return job_submitted_at\n # The job fails to start on the spot cluster, retry the launch.\n # TODO(zhwu): log the unexpected error to usage collection\n # for future debugging.\n logger.info(\n 'Failed to successfully submit the job to the '\n 'launched cluster, due to unexpected submission errors or '\n 'the cluster being preempted during job submission.')\n\n terminate_cluster(self.cluster_name)\n if max_retry is not None and retry_cnt >= max_retry:\n # Retry forever if max_retry is None.\n if raise_on_failure:\n with ux_utils.print_exception_no_traceback():\n raise exceptions.SpotJobReachedMaxRetriesError(\n 'Resources unavailable: failed to launch the spot '\n f'cluster after {max_retry} retries.')\n else:\n return None\n gap_seconds = backoff.current_backoff()\n logger.info('Retrying to launch the spot cluster in '\n f'{gap_seconds:.1f} seconds.')\n time.sleep(gap_seconds)", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def main():\n try:\n # parse argument\n s3_region = sys.argv[1]\n s3_bucket = sys.argv[2]\n s3_prefix = sys.argv[3]\n s3_yaml_name = sys.argv[4]\n launch_name = sys.argv[5]\n\n # create boto3 session/client and download yaml/json file\n session = boto3.session.Session()\n\n s3_endpoint_url = os.environ.get(\"S3_ENDPOINT_URL\", None)\n \n if s3_endpoint_url is not None:\n LOG.info('Endpoint URL {}'.format(s3_endpoint_url))\n rospy.set_param('S3_ENDPOINT_URL', s3_endpoint_url)\n else:\n # create boto3 session/client and download yaml/json file\n ec2_client = session.client('ec2', s3_region)\n LOG.info('Checking internet connection...')\n response = ec2_client.describe_vpcs()\n if not response['Vpcs']:\n log_and_exit(\"No VPC attached to instance\", SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)\n LOG.info('Verified internet connection')\n\n s3_client = session.client('s3', region_name=s3_region, endpoint_url=s3_endpoint_url, config=get_boto_config())\n\n yaml_key = os.path.normpath(os.path.join(s3_prefix, s3_yaml_name))\n local_yaml_path = os.path.abspath(os.path.join(os.getcwd(), s3_yaml_name))\n s3_client.download_file(Bucket=s3_bucket, Key=yaml_key, Filename=local_yaml_path)\n # Get values passed in yaml files. Default values are for backward compatibility and for single racecar racing\n default_yaml_values = {RACE_TYPE_YAML_KEY: TIME_TRIAL_RACE_TYPE,\n MODEL_S3_BUCKET_YAML_KEY: s3_bucket,\n MODEL_S3_PREFIX_YAML_KEY: s3_prefix,\n CAR_COLOR_YAML_KEY: DEFAULT_COLOR,\n MODEL_METADATA_FILE_S3_YAML_KEY: None}\n yaml_dict = get_yaml_dict(local_yaml_path)\n yaml_values = get_yaml_values(yaml_dict, default_yaml_values)\n\n # Forcing the yaml parameter to list\n force_list_params = [MODEL_METADATA_FILE_S3_YAML_KEY, MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY,\n CAR_COLOR_YAML_KEY]\n\n for params in force_list_params:\n yaml_values[params] = force_list(yaml_values[params])\n\n # Populate the model_metadata_s3_key values to handle both training and evaluation for all race_formats\n if None in yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY]:\n # MODEL_METADATA_FILE_S3_KEY not passed as part of yaml file ==> This happens during evaluation\n # Assume model_metadata.json is present in the s3_prefix/model/ folder\n yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY] = list()\n for s3_prefix in yaml_values[MODEL_S3_PREFIX_YAML_KEY]:\n yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY].append(os.path.join(s3_prefix, 'model/model_metadata.json'))\n\n # Set multicar value if its a head to model racetype\n multicar = yaml_values[RACE_TYPE_YAML_KEY] == HEAD_TO_MODEL_RACE_TYPE\n # Validate the yaml values\n validate_yaml_values(yaml_values, multicar)\n # List of racecar names that should include second camera while launching\n racecars_with_stereo_cameras = list()\n\n # List of racecar names that should include lidar while launching\n racecars_with_lidars = list()\n\n # List of SimApp versions\n simapp_versions = list()\n\n for agent_index, model_s3_bucket in enumerate(yaml_values[MODEL_S3_BUCKET_YAML_KEY]):\n\n racecar_name = 'racecar_'+str(agent_index) if len(yaml_values[MODEL_S3_BUCKET_YAML_KEY]) > 1 else 'racecar'\n # Make a local folder with the racecar name to download the model_metadata.json\n if not os.path.exists(os.path.join(os.getcwd(), racecar_name)):\n os.makedirs(os.path.join(os.getcwd(), racecar_name))\n local_model_metadata_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), racecar_name),\n 'model_metadata.json'))\n json_key = yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY][agent_index]\n json_key = json_key.replace('s3://{}/'.format(model_s3_bucket), '')\n s3_client.download_file(Bucket=model_s3_bucket, Key=json_key, Filename=local_model_metadata_path)\n sensors, _, simapp_version = utils_parse_model_metadata.parse_model_metadata(local_model_metadata_path)\n simapp_versions.append(simapp_version)\n if Input.STEREO.value in sensors:\n racecars_with_stereo_cameras.append(racecar_name)\n if Input.LIDAR.value in sensors or Input.SECTOR_LIDAR.value in sensors:\n racecars_with_lidars.append(racecar_name)\n\n cmd = [''.join((\"roslaunch deepracer_simulation_environment {} \".format(launch_name),\n \"local_yaml_path:={} \".format(local_yaml_path),\n \"racecars_with_stereo_cameras:={} \".format(','.join(racecars_with_stereo_cameras)),\n \"racecars_with_lidars:={} multicar:={} \".format(','.join(racecars_with_lidars), multicar),\n \"car_colors:={} simapp_versions:={}\".format(','.join(yaml_values[CAR_COLOR_YAML_KEY]),\n ','.join(simapp_versions))))]\n Popen(cmd, shell=True, executable=\"/bin/bash\")\n \n except botocore.exceptions.ClientError as ex:\n log_and_exit(\"Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}\"\n .format(s3_bucket, yaml_key, ex), \n SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_400)\n except botocore.exceptions.EndpointConnectionError:\n log_and_exit(\"No Internet connection or s3 service unavailable\",\n SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)\n except Exception as ex:\n log_and_exit(\"Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}\"\n .format(s3_bucket, yaml_key, ex), \n SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)", "def startScheduler():\n # print(scheduler.state)\n if scheduler.state == STATE_STOPPED:\n url = 'postgres://{}:{}@{}:{}/{}'.format(\n settings.DATABASES['default']['USER'],\n settings.DATABASES['default']['PASSWORD'],\n settings.DATABASES['default']['HOST'],\n settings.DATABASES['default']['PORT'],\n settings.DATABASES['default']['NAME']\n )\n scheduler.add_jobstore('sqlalchemy', url=url, tablename=\"taskstore\")\n scheduler.start()", "def test_by_config(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_config(addon))\n\n self.run_mgr.by_default(self.cli_inst)\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Start'))\n self.assertTrue(output[1].startswith('Execute'))\n self.assertTrue(output[2].startswith('Stop'))", "def run(tag, devmode, img_passwd_file, install_server_hostname,\n custom_cli_subnet, custom_db_subnet, clitests, builder):\n manager = Manager(\n 'run', tag, devmode=devmode, img_passwd_file=img_passwd_file,\n install_server_hostname=install_server_hostname,\n custom_cli_subnet=custom_cli_subnet, custom_db_subnet=custom_db_subnet,\n clitests=clitests, builder_hostname=builder)\n manager.run()", "def check_lr_schedulers(self) -> None:\n # set default scheduler\n if (\n \"LR_SCHEDULER\" not in self.config\n or self.config[\"LR_SCHEDULER\"] == \"Identity\"\n ):\n self.config[\"LR_SCHEDULER\"] = \"Identity\"\n self.config[\"LR_SCHEDULER_PARAMS\"] = dict()\n\n lr_scheduler_names = get_class_names_in_files(\n \"src\" + os.path.sep + \"lr_schedulers.py\"\n )\n lr_scheduler_names.remove(\"LrScheduler\")\n\n # Check config regularizer exists\n assert self.config[\"LR_SCHEDULER\"] in lr_scheduler_names\n assert \"LR_SCHEDULER_PARAMS\" in self.config\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"], dict)\n\n if self.config[\"LR_SCHEDULER\"] == \"MultiStepLR\":\n # milestones: list[int]\n assert \"milestones\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n for n in self.config[\"LR_SCHEDULER_PARAMS\"][\"milestones\"]:\n assert isinstance(n, int)\n\n assert \"gamma\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n assert 0 < self.config[\"LR_SCHEDULER_PARAMS\"][\"gamma\"] <= 1.0\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"][\"gamma\"], float)\n\n elif self.config[\"LR_SCHEDULER\"] == \"WarmupCosineLR\":\n # set epochs: int\n self.config[\"LR_SCHEDULER_PARAMS\"][\"epochs\"] = self.config[\"EPOCHS\"]\n\n # set target_lr: float\n self.config[\"LR_SCHEDULER_PARAMS\"][\"target_lr\"] = self.config[\"LR\"]\n\n # warmp_epochs\n assert \"warmup_epochs\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n assert (\n 0\n <= self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"]\n <= self.config[\"EPOCHS\"]\n )\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"], int)\n\n # start_lr\n if self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"] != 0:\n assert \"start_lr\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n assert (\n 0\n < self.config[\"LR_SCHEDULER_PARAMS\"][\"start_lr\"]\n <= self.config[\"LR\"]\n )\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"][\"start_lr\"], float)\n\n # n_rewinding\n if \"n_rewinding\" not in self.config[\"LR_SCHEDULER_PARAMS\"]:\n self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"] = 1\n else:\n assert type(self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"]) is int\n assert self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"] > 0\n assert (\n self.config[\"EPOCHS\"]\n % self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"]\n == 0\n )\n\n # Check zero division in lr scheduling\n assert (\n self.config[\"EPOCHS\"]\n // self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"]\n > self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"]\n )\n\n # min_lr\n if \"min_lr\" not in self.config[\"LR_SCHEDULER_PARAMS\"]:\n self.config[\"LR_SCHEDULER_PARAMS\"][\"min_lr\"] = 0.0\n else:\n assert type(self.config[\"LR_SCHEDULER_PARAMS\"][\"min_lr\"]) is float\n assert self.config[\"LR_SCHEDULER_PARAMS\"][\"min_lr\"] >= 0.0\n\n # decay\n if \"decay\" not in self.config[\"LR_SCHEDULER_PARAMS\"]:\n self.config[\"LR_SCHEDULER_PARAMS\"][\"decay\"] = 0.0\n else:\n assert type(self.config[\"LR_SCHEDULER_PARAMS\"][\"decay\"]) is float\n assert 0.0 <= self.config[\"LR_SCHEDULER_PARAMS\"][\"decay\"] < 1.0", "def main(cmd_line=None):\n release_config = 'CentOS-8/master.yaml'\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger('dlrnapi_promoter')\n log.setLevel(logging.DEBUG)\n\n log.info(\"Checking for log directory\")\n log_file = os.path.expanduser(get_log_file('staging',\n release_config))\n log_dir = \"/\".join(log_file.split(\"/\")[:-1])\n if not os.path.exists(log_dir):\n log.info(\"Creating log directory : {}\".format(log_dir))\n os.makedirs(log_dir)\n config_builder = PromoterConfigFactory(config_class=StageConfig)\n\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger(\"promoter-staging\")\n log.setLevel(logging.DEBUG)\n\n args = parse_args(config_builder.global_defaults, cmd_line=cmd_line)\n\n if hasattr(args, \"release_config\"):\n release_config = args.release_config\n config_builder = PromoterConfigFactory(config_class=StageConfig,\n **{'log_file': log_file})\n\n config = config_builder(\"staging\", release_config,\n validate=None)\n # Export dlrn password\n os.environ['DLRNAPI_PASSWORD'] = config.dlrn['server']['password']\n staged_env = StageOrchestrator(config)\n args.handler(staged_env)\n\n if cmd_line is not None:\n return config", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def start(self, endpoint, discoverer, pool, distributed=True):\n kv_store = kvstore.get(options.kv_store)\n kv_store.write(f'/schedulers/{endpoint}', dir=True)\n\n if not isinstance(kv_store, kvstore.LocalKVStore):\n # set etcd as service discover\n logger.info('Mars Scheduler started with kv store %s.', options.kv_store)\n # create KVStoreActor when there is a distributed KV store\n self._kv_store_ref = pool.create_actor(KVStoreActor, uid=KVStoreActor.default_uid())\n else:\n # single scheduler\n logger.info('Mars Scheduler started in standalone mode.')\n\n # create ClusterInfoActor\n self._cluster_info_ref = pool.create_actor(\n SchedulerClusterInfoActor, discoverer, distributed=distributed,\n uid=SchedulerClusterInfoActor.default_uid())\n # create ChunkMetaActor\n self._chunk_meta_ref = pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())\n # create CustomLogMetaActor\n self._custom_log_meta_ref = pool.create_actor(\n CustomLogMetaActor, uid=CustomLogMetaActor.default_uid())\n # create SessionManagerActor\n self._session_manager_ref = pool.create_actor(\n SessionManagerActor, uid=SessionManagerActor.default_uid())\n # create ResourceActor\n self._resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())\n # create NodeInfoActor\n self._node_info_ref = pool.create_actor(NodeInfoActor, uid=NodeInfoActor.default_uid())\n kv_store.write(f'/schedulers/{endpoint}/meta',\n json.dumps(self._resource_ref.get_workers_meta()))\n if options.vineyard.enabled:\n # create global VineyardKeyMapActor\n self._vineyard_key_map_ref = pool.create_actor(VineyardKeyMapActor, uid=VineyardKeyMapActor.default_uid())", "def test_launch_slurm_orc(fileutils, wlmutils):\n launcher = wlmutils.get_test_launcher()\n if launcher != \"slurm\":\n pytest.skip(\"Test only runs on systems with Slurm as WLM\")\n\n exp_name = \"test-launch-slurm-orc\"\n exp = Experiment(exp_name, launcher=launcher)\n test_dir = fileutils.make_test_dir(exp_name)\n\n # batch = False to launch on existing allocation\n orc = SlurmOrchestrator(6780, batch=False)\n orc.set_path(test_dir)\n\n exp.start(orc, block=True)\n status = exp.get_status(orc)\n\n # don't use assert so that orc we don't leave an orphan process\n if constants.STATUS_FAILED in status:\n exp.stop(orc)\n assert False\n\n exp.stop(orc)\n status = exp.get_status(orc)\n assert all([stat == constants.STATUS_CANCELLED for stat in status])", "def celeryd_after_setup(**_):\n riberry.model.conn.dispose_engine()", "def __init__(self,\n config: Optional[pipeline_config.PipelineConfig] = None):\n if config is None:\n config = pipeline_config.PipelineConfig(\n supported_launcher_classes=[\n in_process_component_launcher.InProcessComponentLauncher,\n docker_component_launcher.DockerComponentLauncher,\n ],\n )\n super().__init__(config)", "def _startup():\n from octoprint_dashboard.model import User, Config\n if Config.query.scalar() is None:\n print(\"No config, add config via command 'python -m flask config'\")\n shutdown_server()\n if User.query.filter_by(superadmin=True).count() == 0:\n print(\"No superadmin, add superadmin via command 'python -m flask add_superadmin <username>'\")\n shutdown_server()\n\n scheduler.start() # starts background task scheduler\n zeroconf_browser.start() # starts MDNS service discovery", "def test_workflow_environment():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"environment-variables\": {\n \"FOO\": \"BAR\",\n \"FOO2\": \"BAR2\"\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-workflow-environment-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n def _check():\n assert os.environ['FOO'] == \"BAR\"\n assert os.environ[\"OMP_NUM_THREADS\"] == '1'\n return True\n \n # driver env\n _check()\n \n # worker env\n assert all(workflow_inst.run_on_each_worker(_check).values())\n \n os.environ['FOO'] = 'ORIGINAL_FOO'\n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # Environment is restored after execution is finished.\n assert os.environ['FOO'] == 'ORIGINAL_FOO'\n assert 'FOO2' not in os.environ", "def __launch__(self,config,command=None,**kwargs):\n if command is None:\n command = ['sleep 30;','qsub']\n return SampleQsubProcess.__launch__(self,config,command=command,**kwargs)", "def init():\n\n @click.command()\n @click.option('--cell', required=True,\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False)\n @click.option('--ssh', help='SSH client to use.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @click.argument('command', nargs=-1)\n def ssh(ssh, app, command):\n \"\"\"SSH into Treadmill container.\"\"\"\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))\n\n return ssh", "def createScheduler_(self):\n klass_name = 'Scheduler' + string.capitalize(self.scheduler_name)\n file_name = klass_name\n try:\n klass = importName(file_name, klass_name)\n except KeyError:\n msg = 'No `class '+klass_name+'` found in file `'+file_name+'.py`'\n raise SkimException(msg)\n except ImportError, e:\n msg = 'Cannot create scheduler '+self.scheduler_name\n msg += ' (file: '+file_name+', class '+klass_name+'):\\n'\n msg += str(e)\n raise SkimException(msg)\n\n common.scheduler = klass()\n common.scheduler.configure(self.cfg_params)\n return", "def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])", "def main(config: str):\n application = Application(config_path=config)\n application.run()", "async def launch(config, session, context, connection_file):\n raise NotImplementedError(\"launch must be implemented\")", "def setup(self, manager):\n self._manager = manager\n self._configured = True", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def test_reconfigure_then_listen(sysmon_tester_agent):\n new_config = _test_config.copy()\n new_config['base_topic'] = 'test2/sysmon'\n sysmon_tester_agent.vip.rpc.call('platform.sysmon', 'reconfigure',\n **new_config)\n listen(sysmon_tester_agent, new_config)", "def initialize_scheduler():\n\n with SCHED_LOCK:\n\n # Check if scheduler should be started\n start_jobs = not len(SCHED.get_jobs())\n\n # Update check\n github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0\n\n schedule_job(versioncheck.checkGithub, 'Check GitHub for updates',\n hours=0, minutes=github_minutes, seconds=0)\n\n # Our interval should never be less than 30 seconds\n monitor_seconds = CONFIG.MONITORING_INTERVAL if CONFIG.MONITORING_INTERVAL >= 30 else 30\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(plextv.get_real_pms_url, 'Refresh Plex server URLs',\n hours=12, minutes=0, seconds=0)\n schedule_job(pmsconnect.get_server_friendly_name, 'Refresh Plex server name',\n hours=12, minutes=0, seconds=0)\n\n schedule_job(activity_pinger.check_recently_added, 'Check for recently added items',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.NOTIFY_RECENTLY_ADDED))\n schedule_job(activity_pinger.check_server_response, 'Check for Plex remote access',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.MONITOR_REMOTE_ACCESS))\n schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',\n hours=12 * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)\n\n # If we're not using websockets then fall back to polling\n if not CONFIG.MONITORING_USE_WEBSOCKET or POLLING_FAILOVER:\n schedule_job(activity_pinger.check_active_sessions, 'Check for active sessions',\n hours=0, minutes=0, seconds=monitor_seconds)\n\n # Refresh the users list and libraries list\n user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12\n library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12\n\n if CONFIG.PMS_TOKEN:\n schedule_job(plextv.refresh_users, 'Refresh users list',\n hours=user_hours, minutes=0, seconds=0)\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(pmsconnect.refresh_libraries, 'Refresh libraries list',\n hours=library_hours, minutes=0, seconds=0)\n\n backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6\n\n schedule_job(database.make_backup, 'Backup PlexPy database',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n schedule_job(config.make_backup, 'Backup PlexPy config',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n\n # Start scheduler\n if start_jobs and len(SCHED.get_jobs()):\n try:\n SCHED.start()\n except Exception as e:\n logger.info(e)\n\n # Debug\n #SCHED.print_jobs()", "def test_delayed_exec_configs(self):\n from fixtures.test_adapter import TestAdapter\n class Test(pyperry.Base):\n def _config(cls):\n cls.configure('read', adapter=TestAdapter, foo=lambda: 'barbarbar')\n\n adapter = Test.adapter('read', )\n self.assertEquals(adapter.config.foo, 'barbarbar')", "def setup_and_run(self, configuration_data):\n\n # Configure the home path.\n home_path = os.path.expanduser(\"~\") + \"/.pyBridge/\"\n home_exists = os.path.exists(home_path)\n if home_exists is False:\n os.mkdir(home_path)\n\n # Load the addons\n self.loaded_addons = []\n\n # Process each bridge and load the appropriate bridge code and assemble the broadcast domains.\n for domain in configuration_data.domains:\n domain_bridges = []\n for bridge in domain.bridges:\n bridge_name = bridge.bridge\n\n try:\n module = importlib.import_module(\"bridges.%s\" % bridge_name)\n addon_instance = module.Bridge(self, home_path, bridge, configuration_data)\n self.loaded_addons.append(addon_instance)\n\n domain_bridges.append(addon_instance)\n except ImportError as e:\n print(\"!!! Failed to initialize bridge '%s': \" % bridge_name)\n print(traceback.format_exc())\n return False\n\n # For all bridges, construct the domain\n for added_bridge in domain_bridges:\n for target_bridge in domain_bridges:\n if added_bridge is target_bridge:\n continue\n\n self.connection_bridges.setdefault(target_bridge, [])\n self.connection_bridges[target_bridge].append(added_bridge)\n\n # Once everything is mapped, start up all of the loaded addons.\n for loaded_addon in self.loaded_addons:\n loaded_addon.start()\n\n process_sleepms = datetime.timedelta(milliseconds=configuration_data.global_configuration.process_internal.sleep_ms)\n\n # Handle sigterm to tear everything down\n def termination_handler(signum, frame):\n self.should_run = False\n signal.signal(signal.SIGTERM, termination_handler)\n\n last_time = datetime.datetime.now()\n while self.should_run:\n current_time = datetime.datetime.now()\n delta_time = current_time - last_time\n\n for addon in self.loaded_addons:\n addon.update(delta_time)\n\n for connection in self.connections:\n connection.update(delta_time)\n\n if delta_time < process_sleepms:\n slept_time = process_sleepms - delta_time\n time.sleep(slept_time.total_seconds())\n\n last_time = current_time\n\n print(\"!!! Deinitializing Bot ....\")\n\n # Stop all running addons\n for addon in self.loaded_addons:\n addon.stop()\n\n # Stop all connections\n for connection in self.connections:\n connection.disconnect()\n return True" ]
[ "0.59481937", "0.5393874", "0.5334805", "0.53214973", "0.52877617", "0.5262214", "0.52576655", "0.52325445", "0.51960254", "0.5173219", "0.51681364", "0.5132155", "0.51199645", "0.5117904", "0.5104992", "0.5102506", "0.5085037", "0.5080357", "0.50710267", "0.5063733", "0.5049085", "0.5040776", "0.5025511", "0.50168973", "0.49915764", "0.49845466", "0.49802625", "0.49795437", "0.49559587", "0.49388114", "0.49380955", "0.49350774", "0.49328217", "0.49230444", "0.49108994", "0.48963356", "0.48938194", "0.48928958", "0.48854753", "0.4880746", "0.4869472", "0.48682362", "0.48613945", "0.48576695", "0.48541746", "0.48326007", "0.48275444", "0.482635", "0.48249787", "0.48111206", "0.47972348", "0.47931418", "0.4786867", "0.4778065", "0.47732136", "0.4772916", "0.47716862", "0.47666436", "0.476646", "0.47654265", "0.47592974", "0.47585562", "0.47552767", "0.4752635", "0.47446746", "0.47415605", "0.47410637", "0.47333443", "0.4730758", "0.472363", "0.4722264", "0.4719788", "0.4714543", "0.47064778", "0.47040534", "0.46889246", "0.46876794", "0.46826765", "0.46747708", "0.46745574", "0.46723595", "0.4667556", "0.46667674", "0.4664931", "0.46583426", "0.46576846", "0.46565378", "0.46471784", "0.46408245", "0.46275106", "0.46199346", "0.4617316", "0.46146485", "0.46105748", "0.46099013", "0.4605863", "0.4599043", "0.45837176", "0.4583101", "0.4582383" ]
0.67965436
0
Setup for test_worker_initialization(), below. Parameterized for the "oncepermachine' case (and its opposite).
def setup_worker_initialization_template(request): once_per_machine = request.param template_dir = tempfile.mkdtemp(suffix="test-worker-initialization") worker_script = f"{template_dir}/do-nothing.sh" with open(worker_script, 'w') as f: f.write("#!/bin/bash\n") f.write("sleep 10") os.chmod(worker_script, 0o777) config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "worker-initialization": { "script-path": "do-nothing.sh", "only-once-per-machine": once_per_machine, "script-args": ["_TEST_SCRIPT_FAKE_ARG_"], # This is just here to make it easy to identify the process "launch-delay": 0 } } with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) return template_dir, config, once_per_machine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_worker_initialization(setup_worker_initialization_template):\n template_dir, _config, once_per_machine = setup_worker_initialization_template\n \n num_workers = 2\n if once_per_machine or CLUSTER_TYPE in (\"synchronous\", \"processes\"):\n expected_script_count = 1\n else:\n expected_script_count = num_workers\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n assert script_count > 0, f\"Worker script is not running. Check logs in:\\n{script_dir}\"\n assert script_count <= expected_script_count, f\"Worker script started too many times. Check logs in:\\n{script_dir}\"\n assert script_count == expected_script_count, f\"Worker script not started on all workers. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n\n assert script_count == 0, \\\n (\"Worker script(s) remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def init_worker(*shared_args_list):\n global SHARED_ARGS\n SHARED_ARGS = shared_args_list", "def test_setup_sync(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper.setup(), None)", "def evaluate_system__initialize_workers(opts, dictionary, features, labels):\n global evaluate_system__worker_cache\n evaluate_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary, \"features\": features, \"labels\": labels}", "def worker_init_fn(worker_id: int) -> None:\n worker_info = torch.utils.data.get_worker_info()\n set_rnd(worker_info.dataset, seed=worker_info.seed) # type: ignore[union-attr]", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def init(number_of_workers=0):\n global _wq, _use_workers\n\n if number_of_workers:\n _use_workers = number_of_workers\n else:\n _use_workers = benchmark_workers()\n\n # if it is best to use zero workers, then use that.\n _wq = WorkerQueue(_use_workers)", "def recognition_system__initialize_workers(opts, dictionary):\n global recognition_system__worker_cache\n recognition_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary}", "def _get_executor_init(self, workers):\n raise NotImplementedError", "def test_workers(self):\n wr = WorkflowRuner(4)\n try:\n wr.init_workers()\n assert wr.workers_available() == 4\n wr.acquire_worker()\n assert wr.workers_available() == 3\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert not wr.active_workers()\n wr.release_worker()\n assert wr.active_workers()\n wr.release_worker()\n assert wr.workers_available() == 2\n wr.terminate_workers_and_clean_subprocesses()\n except:\n wr.terminate_workers_and_clean_subprocesses()", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def setUpClass(cls):\n cls.maxDiff = None\n cls.servers = servers.keys()\n cls.job_types1 = {'conformers': True,\n 'opt': True,\n 'fine_grid': False,\n 'freq': True,\n 'sp': True,\n '1d_rotors': False,\n 'orbitals': False,\n 'lennard_jones': False,\n }", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.callbacks = {'remove':[],\n 'add':[],\n 'adjust':[]}\n\n class CallBack(object):\n def __init__(self, callbacks):\n self.callbacks = callbacks\n self.numpyArrayType = type(np.zeros(13))\n\n def workerBeforeDataRemove(self, tm, dataIdx, wd):\n assert isinstance(tm, int)\n assert isinstance(dataIdx, int)\n self.callbacks['remove'].append((tm,wd.X[dataIdx,0]))\n\n def workerAdjustData(self, data):\n assert isinstance(data, self.numpyArrayType)\n self.callbacks['adjust'].append(data[0])\n\n def workerAfterDataInsert(self, tm, dataIdx, wd):\n assert isinstance(tm, int)\n assert isinstance(dataIdx, int)\n self.callbacks['add'].append((tm,wd.X[dataIdx,0]))\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker,\n self.numTimes,\n numDataPointsThisWorker,\n addRemoveCallbackObject = CallBack(self.callbacks))", "def worker_init_fn(self, worker_id: int) -> None:\n np.random.seed(np.random.get_state()[1][0] + worker_id + random.randint(1, 1000))\n\n worker_info = torch.utils.data.get_worker_info()\n worker_info.dataset.set_worker_id(worker_id)\n worker_info.dataset.examples, shard_stats = self.get_worker_shard(\n worker_info.dataset.examples, worker_info.num_workers, worker_id\n )\n worker_info.dataset.logger.info(\n f\"Stats for shard created for worker {worker_id}: \\n {shard_stats}\"\n )\n worker_info.dataset.create_language_index_mapping()", "async def _setup(self):\n\n Reporter.info('Setting up workers...')\n self.workers = [asyncio.Task(self._work(), loop=self.loop)\n for _ in range(self.MAX_WORKERS)]\n Reporter.info('Starting scan...')\n await self.q.join()", "def __init__(self, worker_device):\n self._worker_device = worker_device\n self._local_map = {}\n self._global_map = {}", "def setUp(self):\n print time.ctime(), \"enter setUp\"\n #pdb.set_trace()\n self.site1 = self.globalCfg['site1']\n self.site2 = self.globalCfg['site2']\n self.site3 = self.globalCfg['site3']\n\n # if the former case or teardown failed, re-exec the config, setup sys-rep\n if not testHaDR_AdvancedParameters_MultiTier.isFirstCase:\n if not testHaDR_AdvancedParameters_MultiTier.testRunSuccess or not testHaDR_AdvancedParameters_MultiTier.testTearDownSuccess:\n print time.ctime(), \"----> cleanup and re-setup system replication since last case/teardown failed...\"\n # in case the previous case's teardown failed, and the primary is happened to be host2(there is a failback before),\n # there's no chance to disable full_sync anymore, maybe lead to the next case(if failback) failed\n if self.globalCfg['sync_mode'] == 'sync' and self.site1.fullSync:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n t1 = threading.Thread(target = self.site1.cleanUp)\n t2 = threading.Thread(target = self.site2.cleanUp)\n t3 = threading.Thread(target = self.site3.cleanUp)\n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()\n self.site1.startDatabaseLandscapeAsWhole()\n self.site2.startDatabaseLandscapeAsWhole()\n self.site3.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n self.waitForDatabaseLandscapeStartedByPY(self.site2)\n self.waitForDatabaseLandscapeStartedByPY(self.site3)\n getattr(self, self.getCurCfg())()\n\n\n if self._testMethodName == 'test220INIParaReplication':\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"persistence\", \"savepoint_interval_s\", \"200\")\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"persistence\", \"savepoint_interval_s\", \"200\")\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"persistence\", \"savepoint_interval_s\", \"200\")\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER2\"), \"indexserver.ini\", \"HOST\", \"authorization\", \"internal_support_user_limit\", \"2\")\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER2\"), \"indexserver.ini\", \"HOST\", \"authorization\", \"internal_support_user_limit\", \"2\")\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER2\"), \"indexserver.ini\", \"HOST\", \"authorization\", \"internal_support_user_limit\", \"2\")\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER2\"), \"preprocessor.ini\", \"CUSTOMER\", \"lexicon\", \"abort_time\", \"400\")\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER2\"), \"preprocessor.ini\", \"CUSTOMER\", \"lexicon\", \"abort_time\", \"400\")\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER2\"), \"preprocessor.ini\", \"CUSTOMER\", \"lexicon\", \"abort_time\", \"400\")\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER2\"), \"preprocessor.ini\", \"HOST\", \"lexicon\", \"abort_time\", \"200\")\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER2\"), \"preprocessor.ini\", \"HOST\", \"lexicon\", \"abort_time\", \"200\")\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER2\"), \"preprocessor.ini\", \"HOST\", \"lexicon\", \"abort_time\", \"200\")\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"STANDBY1\"), \"xsengine.ini\", \"HOST\", \"httpserver\", \"maxthreads\", \"300\")\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"STANDBY1\"), \"xsengine.ini\", \"HOST\", \"httpserver\", \"maxthreads\", \"300\")\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"STANDBY1\"), \"xsengine.ini\", \"HOST\", \"httpserver\", \"maxthreads\", \"300\")\n if self._multiDBInstance and not self.globalCfg['withInitTenant']:\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"scriptserver.ini\", \"CUSTOMER\", \"row_engine\", \"container_dop\", \"2\", self.globalCfg['dbname1'])\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"scriptserver.ini\", \"CUSTOMER\", \"row_engine\", \"container_dop\", \"2\", self.globalCfg['dbname1'])\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"scriptserver.ini\", \"CUSTOMER\", \"row_engine\", \"container_dop\", \"2\", self.globalCfg['dbname1'])\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"STANDBY1\"), \"scriptserver.ini\", \"CUSTOMER\", \"adapter_operation_cache\", \"geocode\", \"15\", self.globalCfg['dbname1'])\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"STANDBY1\"), \"scriptserver.ini\", \"CUSTOMER\", \"adapter_operation_cache\", \"geocode\", \"15\", self.globalCfg['dbname1'])\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"STANDBY1\"), \"scriptserver.ini\", \"CUSTOMER\", \"adapter_operation_cache\", \"geocode\", \"15\", self.globalCfg['dbname1'])\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"xsengine.ini\", \"CUSTOMER\", \"transaction\", \"table_lock_array_size\", \"2\", self.globalCfg['dbname1'])\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"xsengine.ini\", \"CUSTOMER\", \"transaction\", \"table_lock_array_size\", \"2\", self.globalCfg['dbname1'])\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"xsengine.ini\", \"CUSTOMER\", \"transaction\", \"table_lock_array_size\", \"2\", self.globalCfg['dbname1'])\n\n if testHaDR_AdvancedParameters_MultiTier.isFirstCase:\n testHaDR_AdvancedParameters_MultiTier.isFirstCase = False", "def __init__(self, worker):\n self._worker = worker\n self._jobs = Queue()\n self._results, self._errors = [], []\n self._jobfinished = Condition()", "def _initJobs(self):\n pass", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Always start the servers for each test variant\n self.start_agents_once = False\n self.start_servers_once = False\n\n # Whether to skip tearDown\n self.skip_teardown = False", "def __init__(self):\n self._event = multiprocessing.Event()\n self._queue = multiprocessing.JoinableQueue()\n self._results = multiprocessing.Queue()\n self._spawn_workers()\n self.population = self._seed_population()", "def __init__(self, num_workers, mb=None):\n self._state = SharedState(mb=mb)\n self._procs = self._state.make_procs(num_workers)", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def worker_init_reset_seed(worker_id: int):\n initial_seed = torch.initial_seed() % 2**31\n seed_all_rng(initial_seed + worker_id)", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def __init__(self, config, maxCores, maxMemory, maxDisk):\n self.config = config\n self.maxCores = maxCores\n self.maxMemory = maxMemory\n self.maxDisk = maxDisk\n self.environment = {}\n \"\"\"\n :type dict[str,str]\n \"\"\"\n self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,\n workflowID=self.config.workflowID,\n cleanWorkDir=self.config.cleanWorkDir)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def __init__(self, worker_id=0, base_port=5005):", "def init_worker (self):\n print(\"initializing map worker in directory: \", os.getcwd ())\n\n context = zmq.Context()\n\n # Socket to receive messages on. Worker uses PULL from the master\n # To that end, we connect to the server. The map worker pulls info\n # from the base port of the master\n self.receiver = context.socket (zmq.PULL)\n self.receiver.setsockopt (zmq.RCVHWM, 0)\n connect_addr = \"tcp://\"+ self.master_ip + \":\" + str (self.master_port)\n print(\"Using PULL, map worker connecting to \", connect_addr)\n self.receiver.connect (connect_addr)\n \n # As part of the initialization, we tell the master that we are up.\n # This information is to be pushed to the master at a port which is\n # 2 more than the base of the master.\n self.init_sender = context.socket (zmq.PUSH)\n self.init_sender.setsockopt (zmq.LINGER, -1)\n connect_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+2)\n print(\"Using PUSH, map worker connecting to worker up barrier at \", connect_addr)\n self.init_sender.connect (connect_addr)\n #bind_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+2)\n #print \"Using PUSH, map worker binding to worker up barrier at \", bind_addr\n #self.init_sender.bind (bind_addr)\n\n # now send an ACK to the barrier to let it know that we are up\n self.init_sender.send (b'0')\n\n # close the socket\n # self.init_sender.close ()\n\n # To send the results, we need to initialize the send address to point\n # to the map results barrier\n #\n # Note that the port number of the maps result barrier is 3 more than\n # the port of the master. Initialize it so we can send results \n self.results_sender = context.socket (zmq.PUSH)\n self.results_sender.setsockopt (zmq.LINGER, -1)\n self.results_sender.setsockopt (zmq.SNDHWM, 0)\n connect_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+3)\n print(\"Using PUSH, map worker connecting to map results barrier at \", connect_addr)\n self.results_sender.connect (connect_addr)\n #bind_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+3)\n #print \"Using PUSH, map worker binding to map results barrier at \", bind_addr\n #self.results_sender.bind (bind_addr)", "def initialize_threading(self, worker_env=None):\n if not (os.path.exists(core.config.paths.zmq_public_keys_path) and\n os.path.exists(core.config.paths.zmq_private_keys_path)):\n logging.error(\"Certificates are missing - run generate_certificates.py script first.\")\n sys.exit(0)\n\n for i in range(NUM_PROCESSES):\n args = (i,)\n if worker_env:\n args = (i, worker_env,)\n\n pid = multiprocessing.Process(target=loadbalancer.Worker, args=args)\n pid.start()\n self.pids.append(pid)\n\n self.ctx = zmq.Context.instance()\n self.auth = ThreadAuthenticator(self.ctx)\n self.auth.start()\n self.auth.allow('127.0.0.1')\n self.auth.configure_curve(domain='*', location=core.config.paths.zmq_public_keys_path)\n\n self.load_balancer = loadbalancer.LoadBalancer(self.ctx)\n self.receiver = loadbalancer.Receiver(self.ctx)\n\n self.receiver_thread = threading.Thread(target=self.receiver.receive_results)\n self.receiver_thread.start()\n\n self.manager_thread = threading.Thread(target=self.load_balancer.manage_workflows)\n self.manager_thread.start()\n\n self.threading_is_initialized = True\n logger.debug('Controller threading initialized')\n gevent.sleep(0)", "def setUp(self):\n self.sc = init_orca_context(cores=4)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_servers_once = False\n self.setup_start_agents = False\n\n # force test status !!\n # use mangling trick described at\n # https://stackoverflow.com/questions/3385317/private-variables-and-methods-in-python\n # conditionally set to FAIL in tearDown\n self._Test__status = 'PASS' # pylint:disable=invalid-name", "def test_defaults(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper._connector_name, None)\n self.assertIsInstance(worker_helper.broker, FakeAMQPBroker)", "def setUp(self):\n self.pcp = ControllerQueue(None, None)\n DummyWorkItem.results = {}", "def setup(env, clerks, cachier):\n global workers_arrived\n while True:\n timeout = env.timeout(random.randint(IAT_MIN, IAT_MAX))\n yield timeout\n env.process(worker(env, workers_arrived, clerks, cachier))\n workers_arrived += 1", "def worker_initializer():\n signal.signal(signal.SIGINT, signal.SIG_IGN)", "def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)", "def _init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)", "def setUp(self):\n self.core_processor = core_processor.ProcessCores()", "def setUp(self):\r\n self.reactor = DummyProcessReactor()\r\n self.pm = ProcessMonitor(reactor=self.reactor)\r\n self.pm.minRestartDelay = 2\r\n self.pm.maxRestartDelay = 10\r\n self.pm.threshold = 10", "def _setup_workers(self, num_workers):\n self.pool = []\n\n for _ in range(num_workers):\n self.pool.append(Thread(target=self.threadloop))\n\n for a_thread in self.pool:\n a_thread.setDaemon(True)\n a_thread.start()", "def setDefaultWorker(self, worker):\n pass", "def initialize(self, setting):\n\n # record type mappings \n for worker in setting[\"workers\"]:\n wid = worker[\"id\"]\n flavor = worker[\"flavor\"]\n self.worker_flavor[wid] = flavor\n self.workers[wid] = Worker(wid, self.mode)\n\n self.workload = [0 for _ in range(len(self.workers))]\n\n # record neighboring nodes \n for u, v in setting[\"neighbor_map\"]:\n self.neighbors[u].add(v) \n self.neighbors[v].add(u)\n\n self.initialized = True", "def worker_workerready(self, node, workerinfo):\n node.workerinfo = workerinfo\n node.workerinfo[\"id\"] = node.gateway.id\n node.workerinfo[\"spec\"] = node.gateway.spec\n\n self.config.hook.pytest_testnodeready(node=node)\n if self.shuttingdown:\n node.shutdown()\n else:\n self.sched.add_node(node)", "def pytest_started_handling_group(session, worker):", "def initialize():\n manager.initialize()\n logs.exit_great_success()", "def setup_worker_threads(self):\n \n for thread_number in range(0, self.max_workers):\n worker = DeviceWorker(self, thread_number)\n self.worker_threads.append(worker)\n worker.start()", "def setUp(self):\n # Start the servers and agents\n super().setUp()\n\n self.obj_class = self.params.get(\"dfs_oclass\", '/run/ior/objectclass/*')\n self.ior_chu_trs_blk_size = self.params.get(\n \"chunk_block_transfer_sizes\", '/run/ior/*')\n # Fail IOR test in case of Warnings\n self.fail_on_warning = True\n self.server_count = len(self.hostlist_servers) * 2\n # Create the Pool\n self.create_pool_max_size()\n self.update_ior_cmd_with_pool()", "def _seed_npy_before_worker_init(worker_id, seed, worker_init_fn=None):\n try:\n import numpy as np\n np.random.seed(seed + worker_id)\n except ImportError:\n pass\n\n if worker_init_fn is not None:\n return worker_init_fn(worker_id)", "def _problem_run_experiments_initialise(self):\n pass", "def setup_class(cls):\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)\n\n cls.log_files = []\n cls.multiplexers = []\n\n try:\n temp_dir_node_1 = os.path.join(cls.t, \"temp_dir_node_1\")\n os.mkdir(temp_dir_node_1)\n cls.connection_node_1 = _make_libp2p_connection(\n data_dir=temp_dir_node_1,\n port=DEFAULT_PORT + 1,\n delegate_port=DEFAULT_DELEGATE_PORT + 1,\n delegate=True,\n )\n cls.multiplexer_node_1 = Multiplexer(\n [cls.connection_node_1], protocols=[MockDefaultMessageProtocol]\n )\n cls.log_files.append(cls.connection_node_1.node.log_file)\n cls.multiplexer_node_1.connect()\n cls.multiplexers.append(cls.multiplexer_node_1)\n\n entry_peer = cls.connection_node_1.node.multiaddrs[0]\n\n temp_dir_node_2 = os.path.join(cls.t, \"temp_dir_node_2\")\n os.mkdir(temp_dir_node_2)\n cls.connection_node_2 = _make_libp2p_connection(\n data_dir=temp_dir_node_2,\n port=DEFAULT_PORT + 2,\n delegate_port=DEFAULT_DELEGATE_PORT + 2,\n entry_peers=[entry_peer],\n delegate=True,\n )\n cls.multiplexer_node_2 = Multiplexer(\n [cls.connection_node_2], protocols=[MockDefaultMessageProtocol]\n )\n cls.log_files.append(cls.connection_node_2.node.log_file)\n cls.multiplexer_node_2.connect()\n\n cls.multiplexers.append(cls.multiplexer_node_2)\n\n wait_for_condition(lambda: cls.multiplexer_node_1.is_connected, 10)\n wait_for_condition(lambda: cls.multiplexer_node_2.is_connected, 10)\n wait_for_condition(lambda: cls.connection_node_1.is_connected, 10)\n wait_for_condition(lambda: cls.connection_node_2.is_connected, 10)\n cls.connections = [cls.connection_node_1, cls.connection_node_2]\n cls.addresses = [\n cls.connection_node_1.address,\n cls.connection_node_2.address,\n ]\n\n for j in range(DEFAULT_CLIENTS_PER_NODE):\n ports = [DEFAULT_DELEGATE_PORT + 1, DEFAULT_DELEGATE_PORT + 2]\n peers_public_keys = [\n cls.connection_node_1.node.pub,\n cls.connection_node_2.node.pub,\n ]\n for i in range(len(ports)):\n port = ports[i]\n peer_public_key = peers_public_keys[i]\n temp_dir_client = os.path.join(cls.t, f\"temp_dir_client__{j}_{i}\")\n os.mkdir(temp_dir_client)\n conn = _make_libp2p_client_connection(\n data_dir=temp_dir_client,\n peer_public_key=peer_public_key,\n node_port=port,\n )\n mux = Multiplexer([conn], protocols=[MockDefaultMessageProtocol])\n\n cls.connections.append(conn)\n cls.addresses.append(conn.address)\n\n mux.connect()\n wait_for_condition((lambda m: lambda: m.is_connected)(mux), 10)\n wait_for_condition((lambda c: lambda: c.is_connected)(conn), 10)\n cls.multiplexers.append(mux)\n break\n\n except Exception:\n cls.teardown_class()\n raise", "def on_worker_init(self):\n self.import_default_modules()\n\n self.close_database()\n self.close_cache()", "def _initialise_run(self) -> None:", "async def _setup(self):", "def test_create(self):\n assert self.worker.connection is None or self.worker.connection.is_alive()\n # TODO(orlade): Mock this stuff.\n # assert_queue_size({TEST_REQUEST_QUEUE: 0, TEST_RESULT_QUEUE: 0})", "def test_choosingPerformerWhenNoPeersAndNoWorkers(self):\n self.checkPerformer(LocalPerformer)", "def init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)", "def test_choosingPerformerWithLocalCapacity(self):\n # Give it some local capacity.\n wlf = self.pcp.workerListenerFactory()\n proto = wlf.buildProtocol(None)\n proto.makeConnection(StringTransport())\n # Sanity check.\n self.assertEqual(len(self.pcp.workerPool.workers), 1)\n self.assertEqual(self.pcp.workerPool.hasAvailableCapacity(), True)\n # Now it has some capacity.\n self.checkPerformer(WorkerConnectionPool)", "def Initialize(self):\n self.queue_workers = [\n gevent.spawn(self._EventQueueWorker) for _ in xrange(self.num_workers)]\n self.gc_worker = gevent.spawn(self._GarbageCollectorWorker)", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def setupClass(cls):\n cls._tmp_dir = tempfile.mkdtemp()\n cls.test_filepath = os.path.join( cls._tmp_dir, \"test_data.h5\" )\n cls._generate_testdata_h5(cls.test_filepath)\n cls.server_proc, cls.shutdown_event = cls._start_mockserver( cls.test_filepath, same_process=True )\n cls.client_connection = httplib.HTTPConnection( \"localhost:8000\" )", "def setup_class(self):\n self._tester = self.get_remote('tester').run_prox_with_config(\"01_handle_none-gen.cfg\", \"-e -t\")\n # TODO tester.run_prox_with_config should raise an exception on error, so the below if block can be removed\n if (self._tester == None):\n raise IOError(\"Could not connect to PROX on the tester system\")\n else:\n logging.debug(\"Connected to PROX on Tester\")\n\n self._sut = self.get_remote('sut').run_prox_with_config(\"01_handle_none-sut.cfg\", \"-t\")\n if (self._sut == None):\n raise IOError(\"Could not connect to PROX on the SUT\")\n else:\n logging.debug(\"Connected to PROX on SUT\")\n\n self._cores = [1, 2, 3, 4]", "def seed_worker(_worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def _create_jobs(self):\n try:\n self.request_master_socket.send_multipart([remote_constants.WORKER_CONNECT_TAG])\n _ = self.request_master_socket.recv_multipart()\n except zmq.error.Again as e:\n logger.error(\"Can not connect to the master, \" \"please check if master is started.\")\n self.master_is_alive = False\n return\n\n initialized_jobs = self._init_jobs(job_num=self.device_count)\n self.request_master_socket.setsockopt(zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)\n\n def master_heartbeat_exit_callback_func():\n logger.warning(\"[Worker] lost connection with the master, will exit reply heartbeat for master.\")\n if self.worker_status is not None:\n self.worker_status.clear()\n self.log_server_proc.kill()\n self.log_server_proc.wait()\n # exit the worker\n self.exit()\n\n self.master_heartbeat_thread = HeartbeatServerThread(\n heartbeat_exit_callback_func=master_heartbeat_exit_callback_func)\n self.master_heartbeat_thread.setDaemon(True)\n self.master_heartbeat_thread.start()\n self.master_heartbeat_address = self.master_heartbeat_thread.get_address()\n\n logger.set_dir(\n os.path.expanduser('~/.parl_data/worker/{}'.format(self.master_heartbeat_address.replace(':', '_'))))\n if self.cpu_num:\n logger.info(\"[Worker] Connect to the master node successfully. \" \"({} CPUs)\".format(self.cpu_num))\n elif self.gpu_num:\n logger.info(\"[Worker] Connect to the master node successfully. \" \"({} GPUs)\".format(self.gpu_num))\n\n for job in initialized_jobs:\n job.worker_address = self.master_heartbeat_address\n\n allocated_cpu = AllocatedCpu(self.master_heartbeat_address, self.cpu_num)\n allocated_gpu = AllocatedGpu(self.master_heartbeat_address, self.gpu)\n initialized_worker = InitializedWorker(self.master_heartbeat_address, initialized_jobs, allocated_cpu,\n allocated_gpu, socket.gethostname())\n self.request_master_socket.send_multipart(\n [remote_constants.WORKER_INITIALIZED_TAG,\n cloudpickle.dumps(initialized_worker)])\n\n message = self.request_master_socket.recv_multipart()\n if message[0] == remote_constants.REJECT_CPU_WORKER_TAG:\n logger.error(\"GPU cluster rejects a CPU worker to join in\")\n self.worker_is_alive = False\n elif message[0] == remote_constants.REJECT_GPU_WORKER_TAG:\n logger.error(\"CPU cluster rejects a GPU worker to join in\")\n self.worker_is_alive = False\n else:\n self.worker_status = WorkerStatus(self.master_heartbeat_address, initialized_jobs, self.cpu_num,\n self.gpu_num)", "def setUp(self):\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }", "def __init__(self, worker, event_loop):\n self.weakref_worker = weakref.ref(worker)\n self.event_loop = event_loop\n self.asyncio_task = None", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, self.random_seed, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn", "def worker_process_init(*args, **kwargs):\n\n riberry.model.conn.dispose_engine()", "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def setUp(self):\n\n self.testInit = TestInit(__file__)\n self.testInit.setLogging()\n self.testInit.setDatabaseConnection()\n self.testInit.setSchema(customModules = [\"WMCore.WMBS\",'WMCore.MsgService',\n 'WMCore.ResourceControl', 'WMCore.ThreadPool',\n 'WMCore.Agent.Database'],\n useDefault = False)\n\n myThread = threading.currentThread()\n self.daoFactory = DAOFactory(package = \"WMCore.WMBS\",\n logger = myThread.logger,\n dbinterface = myThread.dbi)\n\n\n\n locationAction = self.daoFactory(classname = \"Locations.New\")\n pendingSlots = self.daoFactory(classname = \"Locations.SetPendingSlots\")\n\n\n for site in self.sites:\n locationAction.execute(siteName = site, pnn = 'se.%s' % (site), ceName = site)\n pendingSlots.execute(siteName = site, pendingSlots = 1000)\n\n\n #Create sites in resourceControl\n resourceControl = ResourceControl()\n for site in self.sites:\n resourceControl.insertSite(siteName = site, pnn = 'se.%s' % (site), ceName = site)\n resourceControl.insertThreshold(siteName = site, taskType = 'Processing', \\\n maxSlots = 10000, pendingSlots = 10000)\n\n\n self.testDir = self.testInit.generateWorkDir()\n\n\n # Set heartbeat\n for component in self.components:\n heartbeatAPI = HeartbeatAPI(component)\n heartbeatAPI.registerComponent()\n\n self.configFile = EmulatorSetup.setupWMAgentConfig()\n\n return", "def _state_machine_setup(self):\r\n self.fsm = StateMachine()\r\n self.fsm.add_state(\"start\", self.start_state)\r\n self.fsm.add_state(\"init_trial\", self.init_trial_state)\r\n self.fsm.add_state(\"pre_probe\", self.pre_probe_state)\r\n self.fsm.add_state(\"probe\", self.probe_state)\r\n self.fsm.add_state(\"response\", self.response_state)\r\n self.fsm.add_state(\"pause\", self.pause_state)\r\n # define end and start state\r\n self.fsm.add_state(\"end\", self.end_state, end_state=True)\r\n self.fsm.set_start(\"start\")\r\n self.fsm.add_logger(self.logger_main)\r\n self.go_next = False", "def setUp(self):\n\n self._hash_bins = 10\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"embedding_dim\": self._embedding_dim\n }", "def setUp(self):\n SimTimePublisher._sim_time_setup_requested = False", "def initialize_jobserver(*args, **kwargs):\n\n assert(_MakeJobServer._singleton is None)\n\n # Check if the jobserver is supported\n supported = _test_support()\n\n if not supported:\n _MakeJobServer._singleton = None\n log('@{yf}WARNING: Make job server not supported. The number of Make '\n 'jobs may exceed the number of CPU cores.@|')\n return\n\n # Create the jobserver singleton\n _MakeJobServer._singleton = _MakeJobServer(*args, **kwargs)", "def _initialize_runners_startup(self):\n if self.command_group.is_cmd0_runner():\n self._initialize_runner(self.command_group.cmd0)\n if self.command_group.is_cmd1_runner():\n self._initialize_runner(self.command_group.cmd1)\n if self.command_group.is_cmd2_runner():\n self._initialize_runner(self.command_group.cmd2)", "def __init__(self, *args, wick_parallel=0, **kwargs):\n super().__init__(*args, **kwargs)\n self._wick_parallel = wick_parallel", "def setUp(self):\n self.setup_start_servers = False\n super(ZeroConfigTest, self).setUp()", "def _setupPools(self):\n reactor = MemoryReactorWithClock()\n cph = SteppablePoolHelper(jobSchema + schemaText)\n then = datetime.datetime(2012, 12, 12, 12, 12, 12)\n reactor.advance(astimestamp(then))\n cph.setUp(self)\n qpool = ControllerQueue(reactor, cph.pool.connection, useWorkerPool=False)\n\n realChoosePerformer = qpool.choosePerformer\n performerChosen = []\n\n def catchPerformerChoice(onlyLocally=False):\n result = realChoosePerformer(onlyLocally=onlyLocally)\n performerChosen.append(True)\n return result\n\n qpool.choosePerformer = catchPerformerChoice\n reactor.callLater(0, qpool._workCheck)\n\n qpool.startService()\n cph.flushHolders()\n\n return cph, qpool, reactor, performerChosen", "def __init__( self, app, nworkers, **kwds ):\n super( LwrJobRunner, self ).__init__( app, nworkers, runner_param_specs=LWR_PARAM_SPECS, **kwds )\n self._init_worker_threads()\n galaxy_url = self.runner_params.galaxy_url\n if galaxy_url:\n galaxy_url = galaxy_url.rstrip(\"/\")\n self.galaxy_url = galaxy_url\n self.__init_client_manager()\n if self.runner_params.url:\n # This is a message queue driven runner, don't monitor\n # just setup required callback.\n self.client_manager.ensure_has_status_update_callback(self.__async_update)\n else:\n self._init_monitor_thread()", "def setup(self) -> None:\n self.running = True\n self.listen()\n self.start_workers()\n\n # Send server socket to workers.\n assert self.socket is not None\n for work_queue in self.work_queues:\n work_queue[0].send(self.family)\n send_handle(work_queue[0], self.socket.fileno(),\n self.workers[self.current_worker_id].pid)\n self.socket.close()", "def setUp(self):\n self.number_of_tests = 20 # number of pseudo-random seeds\n self.max_nq = 2 # maximal number of qubits to check", "def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1", "def __init__(self):\n self.setup_called = False", "def setup_class(cls):\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)\n\n cls.log_files = []\n temp_dir = os.path.join(cls.t, \"temp_dir_node\")\n os.mkdir(temp_dir)\n cls.connection_node = _make_libp2p_connection(\n data_dir=temp_dir, port=DEFAULT_PORT + 1, delegate=True\n )\n cls.multiplexer_node = Multiplexer(\n [cls.connection_node], protocols=[MockDefaultMessageProtocol]\n )\n cls.log_files.append(cls.connection_node.node.log_file)\n cls.multiplexer_node.connect()\n\n try:\n temp_dir_client_1 = os.path.join(cls.t, \"temp_dir_client_1\")\n os.mkdir(temp_dir_client_1)\n cls.connection_client_1 = _make_libp2p_client_connection(\n data_dir=temp_dir_client_1,\n peer_public_key=cls.connection_node.node.pub,\n ledger_api_id=FetchAICrypto.identifier,\n )\n cls.multiplexer_client_1 = Multiplexer(\n [cls.connection_client_1], protocols=[MockDefaultMessageProtocol]\n )\n cls.multiplexer_client_1.connect()\n\n temp_dir_client_2 = os.path.join(cls.t, \"temp_dir_client_2\")\n os.mkdir(temp_dir_client_2)\n cls.connection_client_2 = _make_libp2p_client_connection(\n data_dir=temp_dir_client_2,\n peer_public_key=cls.connection_node.node.pub,\n ledger_api_id=EthereumCrypto.identifier,\n )\n cls.multiplexer_client_2 = Multiplexer(\n [cls.connection_client_2], protocols=[MockDefaultMessageProtocol]\n )\n cls.multiplexer_client_2.connect()\n\n wait_for_condition(lambda: cls.connection_client_1.is_connected is True, 10)\n wait_for_condition(lambda: cls.connection_client_2.is_connected is True, 10)\n except Exception:\n cls.multiplexer_node.disconnect()\n raise", "def __init__(self, simulator, simulation_params):\n self._simulator = simulator\n self._worker_procs = []\n self._workers_running = False\n self._task_queue = mp.JoinableQueue()\n self._result_queue = mp.Queue()\n self._shutdown_workers_event = mp.Event()\n self._log = logging.getLogger('MLProject.parallel.Master')\n\n simulator_params = {'movements': simulator.movements,\n 'max_steps': simulator.max_steps,\n 'render': simulation_params.render,\n 'log_level': self._log.getEffectiveLevel()}\n self._log.info('Creating worker processes')\n for wnum in range(1, simulation_params.num_workers + 1):\n worker = mp.Process(target=worker_proc,\n args=(wnum, self._task_queue, self._result_queue,\n self._shutdown_workers_event, simulator_params),\n name='Worker_{}'.format(wnum))\n worker.daemon = True\n self._worker_procs.append(worker)", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def __init__(__self__, *,\n threads_per_core: int):\n pulumi.set(__self__, \"threads_per_core\", threads_per_core)", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, None, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n\n return pool_fn", "async def test_internet(coresys: CoreSys):\n coresys.core.state = CoreState.RUNNING\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.INTERNET_HOST])\n async def execute_host(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n @Job(conditions=[JobCondition.INTERNET_SYSTEM])\n async def execute_system(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n\n coresys.host.network._connectivity = True\n coresys.supervisor._connectivity = True\n assert await test.execute_host()\n assert await test.execute_system()\n\n coresys.host.network._connectivity = True\n coresys.supervisor._connectivity = False\n assert await test.execute_host()\n assert not await test.execute_system()\n\n coresys.host.network._connectivity = None\n coresys.supervisor._connectivity = True\n assert await test.execute_host()\n assert await test.execute_system()\n\n coresys.host.network._connectivity = False\n coresys.supervisor._connectivity = True\n assert not await test.execute_host()\n assert await test.execute_system()", "def experiment_init(self):\n pass" ]
[ "0.7411761", "0.71275353", "0.6863804", "0.68591213", "0.68004584", "0.67204416", "0.66945666", "0.66871023", "0.66871023", "0.6637621", "0.6634074", "0.657743", "0.6510289", "0.6509755", "0.6507559", "0.6495748", "0.6495748", "0.64917386", "0.6455389", "0.64043945", "0.63973695", "0.63647604", "0.63285184", "0.62533015", "0.62119734", "0.6165571", "0.6146755", "0.61389947", "0.6114634", "0.6110396", "0.61083055", "0.60927564", "0.60899854", "0.60899854", "0.60894245", "0.6069655", "0.60564774", "0.6042136", "0.6038831", "0.597153", "0.59559566", "0.59435946", "0.5923074", "0.5920879", "0.59122074", "0.5910641", "0.58994615", "0.5856281", "0.5854643", "0.5852364", "0.58457565", "0.58342165", "0.5828054", "0.58277804", "0.58261067", "0.5814714", "0.5802014", "0.57851666", "0.5776964", "0.5775662", "0.5774218", "0.5752989", "0.57488984", "0.5731945", "0.5725298", "0.5711989", "0.5704363", "0.5703944", "0.5698017", "0.569473", "0.5693027", "0.56910324", "0.5681036", "0.56726265", "0.5670179", "0.5669108", "0.566558", "0.5663994", "0.5660941", "0.5654452", "0.5641853", "0.56379664", "0.5637483", "0.5633157", "0.5628979", "0.5626827", "0.56217676", "0.5616875", "0.56101924", "0.5595247", "0.55909306", "0.558072", "0.5576107", "0.5573181", "0.55728084", "0.55662596", "0.5563562", "0.5561751", "0.55597997", "0.55550086" ]
0.6802044
4
The config can specify a script to be run on each worker upon cluster initialization. This test verifies that it is launched and active while the workflow runs, and that it is launched on each worker, or just once per machine, depending on the config.
def test_worker_initialization(setup_worker_initialization_template): template_dir, _config, once_per_machine = setup_worker_initialization_template num_workers = 2 if once_per_machine or CLUSTER_TYPE in ("synchronous", "processes"): expected_script_count = 1 else: expected_script_count = num_workers @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count > 0, f"Worker script is not running. Check logs in:\n{script_dir}" assert script_count <= expected_script_count, f"Worker script started too many times. Check logs in:\n{script_dir}" assert script_count == expected_script_count, f"Worker script not started on all workers. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count == 0, \ ("Worker script(s) remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cluster_jobs_script(self):\r\n\r\n qiime_config = load_qiime_config()\r\n submit_script = qiime_config['cluster_jobs_fp']\r\n\r\n if (submit_script):\r\n full_path = which(submit_script)\r\n if full_path:\r\n submit_script = full_path\r\n self.assertTrue(exists(submit_script),\r\n \"cluster_jobs_fp is not set to a valid path in qiime config: %s\" % submit_script)\r\n # check if executable\r\n self.assertTrue(access(submit_script, X_OK),\r\n \"cluster_jobs_fp is not executable: %s\" % submit_script)\r\n else:\r\n # Can't run in parallel, but not a critical error\r\n pass", "def setup_worker_initialization_template(request):\n once_per_machine = request.param\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-initialization\")\n\n worker_script = f\"{template_dir}/do-nothing.sh\"\n with open(worker_script, 'w') as f:\n f.write(\"#!/bin/bash\\n\")\n f.write(\"sleep 10\")\n os.chmod(worker_script, 0o777)\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"do-nothing.sh\",\n \"only-once-per-machine\": once_per_machine,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n return template_dir, config, once_per_machine", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def test_worker_dvid_initialization():\n repo_dir = Path(flyemflows.__file__).parent.parent\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-dvid\")\n \n # Copy worker script/config into the template\n shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml',\n f'{template_dir}/dvid.toml')\n \n shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh',\n f'{template_dir}/launch-worker-dvid.sh')\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"launch-worker-dvid.sh\",\n \"only-once-per-machine\": True,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 1.0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n def is_worker_dvid_running():\n return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n assert is_worker_dvid_running(), f\"Worker DVID is not running. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n assert not is_worker_dvid_running(), \\\n (\"Worker DVID remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def launch(config):\n \n launch_with_configs([config])", "def run_experiment(experiment: str):\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n print_color(f\"* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Experiment: {experiment}\", bcolors.OKBLUE)\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n\n experiment_file = experiment + \".yaml\"\n\n # Set namespace to check\n with open(f\"./litmus/{experiment_file}\") as f:\n spec = yaml.load(f, Loader=yaml.FullLoader)\n result_name = spec['metadata']['name']\n namespace = spec['metadata']['namespace']\n\n print_color(f\"Running Litmus ChaosEngine Experiment {experiment_file} in namespace {namespace}\")\n print_color(f\"Deploying {experiment_file}...\")\n run_shell(f\"kubectl delete chaosengine {result_name} -n {namespace}\")\n run_shell(f\"kubectl create -f ./litmus/{experiment_file} -n {namespace}\")\n\n # Check status of experiment execution\n startTime = datetime.now()\n print_color(f\"{startTime.strftime('%Y-%m-%d %H:%M:%S')} Running experiment...\")\n expStatusCmd = \"kubectl get chaosengine \" + result_name + \" -o jsonpath='{.status.experiments[0].status}' -n \" + namespace\n run_shell(expStatusCmd)\n logs_cmd = f\"kubectl logs --since=10s -l name={experiment} -n {namespace}\"\n print(f\"\\n{bcolors.OKGREEN}//** Experiment Logs ({logs_cmd}) **//\\n\\n\")\n try:\n while subprocess.check_output(expStatusCmd, shell=True).decode('unicode-escape') != \"Completed\":\n os.system(logs_cmd)\n os.system(\"sleep 10\")\n\n print(f\"\\n\\n//** End of Experiment Logs **//{bcolors.ENDC}\\n\")\n\n # View experiment results\n run_shell(f\"kubectl describe chaosresult {result_name}-{experiment} -n {namespace}\")\n\n except:\n print_color(\"User has cancelled script execution.\", bcolors.FAIL)\n sys.exit(2)\n\n # Store Experiment Result\n status = subprocess.check_output(\"kubectl get chaosresult \" + result_name + \"-\" + experiment + \" -n \" + namespace + \" -o jsonpath='{.status.experimentstatus.verdict}'\", shell=True).decode('unicode-escape')\n return ExperimentResult(experiment, status, startTime)", "def verify_runconfig(master_host, namespace, job_name, replica, num_ps,\n num_workers, num_evaluators):\n is_chief = True\n num_replicas = 1\n if replica == \"ps\":\n is_chief = False\n num_replicas = num_ps\n elif replica == \"worker\":\n is_chief = False\n num_replicas = num_workers\n elif replica == \"evaluator\":\n is_chief = False\n num_replicas = num_evaluators\n\n # Construct the expected cluster spec\n chief_list = [\n \"{name}-chief-0.{ns}.svc:2222\".format(name=job_name, ns=namespace)\n ]\n ps_list = []\n for i in range(num_ps):\n ps_list.append(\"{name}-ps-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n worker_list = []\n for i in range(num_workers):\n worker_list.append(\"{name}-worker-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n evaluator_list = []\n for i in range(num_evaluators):\n evaluator_list.append(\"{name}-evaluator-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n cluster_spec = {\n \"chief\": chief_list,\n \"ps\": ps_list,\n \"worker\": worker_list,\n }\n if num_evaluators > 0:\n cluster_spec[\"evaluator\"] = evaluator_list\n\n for i in range(num_replicas):\n full_target = \"{name}-{replica}-{index}\".format(\n name=job_name, replica=replica.lower(), index=i)\n actual_config = get_runconfig(master_host, namespace, full_target)\n full_svc = \"{ft}.{ns}.svc\".format(ft=full_target, ns=namespace)\n expected_config = {\n \"task_type\": replica,\n \"task_id\": i,\n \"cluster_spec\": cluster_spec,\n \"is_chief\": is_chief,\n \"master\": \"grpc://{fs}:2222\".format(fs=full_svc),\n \"num_worker_replicas\": num_workers + 1, # Chief is also a worker\n \"num_ps_replicas\": num_ps,\n } if not replica == \"evaluator\" else {\n # Evaluator has special config.\n \"task_type\": replica,\n \"task_id\": 0,\n \"cluster_spec\": {},\n \"is_chief\": is_chief,\n \"master\": \"\",\n \"num_worker_replicas\": 0,\n \"num_ps_replicas\": 0,\n }\n\n # Compare expected and actual configs\n if actual_config != expected_config:\n msg = \"Actual runconfig differs from expected. Expected: {0} Actual: {1}\".format(\n str(expected_config), str(actual_config))\n logging.error(msg)\n raise RuntimeError(msg)", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def run_experiment():\n pass", "def test_by_config(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_config(addon))\n\n self.run_mgr.by_default(self.cli_inst)\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Start'))\n self.assertTrue(output[1].startswith('Execute'))\n self.assertTrue(output[2].startswith('Stop'))", "def run(config):\n\tlog.debug('-- in example.py')\n#\tgetWLSMachineandandExecuteSecondary(config)\n#\t__createPegaConfigCommand(config)\n#\tcreateUsers(config)\n#\t__connectAdminServer(config)\n\tconnectAdminServerOverSSL(config)", "def run(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n print('Key in run')\n print(config.bigmacc.key)\n i = config.bigmacc.key\n print(i)\n # SCENARIO SETUP ---\n config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)\n print(config.general.project)\n cea.datamanagement.data_initializer.main(config)\n # use the scenario code to set the year for the lca and other operations that need the current year\n pathway_code = config.general.parent\n pathway_items = pathway_code.split('_')\n scenario_year = int(pathway_items[1])\n config.emissions.year_to_calculate = scenario_year\n\n bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)\n\n scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')\n experiment_key = 'exp_{}'.format(i)\n print(experiment_key)\n keys = [int(x) for x in str(i)]\n if experiment_key in scen_check['Experiments'].values.tolist():\n print('Experiment was finished previously, moving to next.')\n pass\n else:\n print('START: experiment {}.'.format(i))\n\n # INITIALIZE TIMER ---\n t0 = time.perf_counter()\n if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):\n print(' - Folder exists for experiment {}.'.format(i))\n else:\n os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))\n print(' - Folder does not exist for experiment {}, creating now.'.format(i))\n\n # run the archetype mapper to leverage the newly loaded typology file and set parameters\n print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))\n cea.datamanagement.archetypes_mapper.main(config)\n\n # run the rule checker to set the scenario parameters\n print(' - Running rule checker for experiment {}.'.format(i))\n cea.bigmacc.bigmacc_rules.main(config)\n\n # SIMULATIONS ---\n\n print(' - Run radiation is {}.'.format(config.bigmacc.runrad))\n print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))\n # checking on need for radiation simulation\n\n if config.bigmacc.runrad == True:\n # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation\n if config.bigmacc.rerun != True:\n print(' - Running radiation simulation for experiment {}.'.format(i))\n if os.path.exists(locator.get_radiation_building('B000')):\n print(' - Radiation folder exists for experiment {}, copying.'.format(i))\n else:\n print(' - Radiation running for experiment {}.'.format(i))\n cea.resources.radiation_daysim.radiation_main.main(config)\n else:\n # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))\n old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'solar-radiation')\n # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())\n else:\n radfiles = config.bigmacc.copyrad\n # print(' - Copying radiation results from {}.'.format(radfiles))\n # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())\n print(' - Experiment {} does not require new radiation simulation.'.format(i))\n\n # running demand forecasting\n if os.path.exists(locator.get_schedule_model_file('B000')):\n print(' - Schedules exist for experiment {}.'.format(i))\n else:\n print(' - Schedule maker running for experiment {}.'.format(i))\n schedule_maker.main(config)\n\n # check to see if we need to rerun demand or if we can copy\n if config.bigmacc.rerun != True:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n if keys[0] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n elif keys[6] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n cea.demand.demand_main.main(config)\n # print(' - Looking for demand results data from previous run for experiment {}.'.format(i))\n # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n # config.general.scenario_name, 'outputs', 'data', 'demand')\n # if os.path.exists(old_demand_files):\n # # print(' - Copy demand results files from previous run of experiment {}.'.format(i))\n # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())\n # pass\n # else:\n # print(' - No results found.')\n # print(' - Running demand simulation for experiment {}.'.format(i))\n # cea.demand.demand_main.main(config)\n\n if config.bigmacc.pv == True:\n print(' - Run PV is {}.'.format(config.bigmacc.pv))\n if config.bigmacc.rerun == True:\n print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))\n old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')\n if os.path.exists(old_pv_files):\n # print(' - Copying PV files from previous run of experiment {}.'.format(i))\n # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())\n pass\n else:\n print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n else:\n # if PV simulation is needed, run it.\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n\n print('Run water-body exchange is {}.'.format(config.bigmacc.water))\n # if water-body simulation is needed, run it.\n if config.bigmacc.water == True:\n print(' - Running water body simulation for experiment {}.'.format(i))\n water.main(config)\n\n # recalculating the supply split between grid and ng in the websrook DH\n if keys[4] == 1:\n print(' - Do not run district heat recalculation.')\n else:\n print(' - Run district heat recalculation.')\n cea.bigmacc.wesbrook_DH.main(config)\n\n if keys[7] == 1:\n print(' - PV use detected. Adding PV generation to demand files.')\n util.write_pv_to_demand(config)\n else:\n print(' - No PV use detected.')\n\n # running the emissions and costing calculations\n print(' - Run cost and emissions scripts.')\n cea.analysis.costs.system_costs.main(config)\n cea.analysis.lca.main.main(config)\n\n # clone out the simulation inputs and outputs directory\n print(' - Transferring results directory for experiment {}.'.format(i))\n\n new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'inputs')\n new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data')\n\n if config.bigmacc.rerun != True:\n distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)\n distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)\n\n time_elapsed = time.perf_counter() - t0\n\n # save log information\n log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),\n index_col='Unnamed: 0')\n log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),\n 'Completed': 'True',\n 'Experiment Time': '%d.2 seconds' % time_elapsed,\n 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)\n log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))\n log_df.to_csv(r\"C:\\Users\\justi\\Desktop\\126logger_backup.csv\", )\n\n # write netcdf of hourly_results\n netcdf_writer.main(config, time='hourly')\n\n if config.bigmacc.rerun != True:\n shutil.rmtree(locator.get_costs_folder())\n shutil.rmtree(locator.get_demand_results_folder())\n shutil.rmtree(locator.get_lca_emissions_results_folder())\n shutil.rmtree(locator.get_solar_radiation_folder())\n shutil.rmtree(locator.get_potentials_folder())\n else:\n print(' - Rerun does not require purging of the files.')\n\n # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here\n if keys[0] == 1:\n cea.datamanagement.data_initializer.main(config)\n else:\n pass\n print('END: experiment {}. \\n'.format(i))", "def test_resource_manager_on_driver():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"resource-manager\": {\n \"server\": \"driver\",\n \"port\": 4000,\n \"config\": {\n \"read_reqs\": 123,\n \"read_data\": 456,\n \"write_reqs\": 789,\n \"write_data\": 321\n }\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-resource-manager-on-driver-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n client = ResourceManagerClient('127.0.0.1', 4000)\n mgr_config = client.read_config()\n assert mgr_config == config[\"resource-manager\"][\"config\"], \\\n \"Resource manager config does not match the one in the workflow config\"\n \n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # FIXME: For mysterious reasons, the check below does not work on Travis-CI.\n # Somehow, read_config() succeeds despite the fact that\n # the resource manager server was already terminated??\n if os.environ.get('TRAVIS', '') == 'true':\n pytest.skip(\"Skipping resource manager shutdown check on Travis-CI\")\n\n # Server should not be running any more after workflow exits.\n with pytest.raises(TimeoutError):\n client2 = ResourceManagerClient('127.0.0.1', 4000)\n client2.read_config()", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def test_run_experiment_locally(self) -> None:\n\n experiment = Experiment(\n name=\"torchx_booth_sequential_demo\",\n search_space=SearchSpace(parameters=self._parameters),\n optimization_config=OptimizationConfig(objective=self._objective),\n runner=self._runner,\n is_test=True,\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n\n scheduler = Scheduler(\n experiment=experiment,\n generation_strategy=(\n choose_generation_strategy(\n search_space=experiment.search_space,\n )\n ),\n options=SchedulerOptions(),\n )\n\n try:\n for _ in range(3):\n scheduler.run_n_trials(max_trials=2)\n\n # TorchXMetric always returns trial index; hence the best experiment\n # for min objective will be the params for trial 0.\n scheduler.report_results()\n except FailureRateExceededError:\n pass # TODO(ehotaj): Figure out why this test fails in OSS.\n # Nothing to assert, just make sure experiment runs.", "def test_run_started(self):", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def run(ceph_cluster, **kwargs) -> int:\n log.info(\"Running RBD Sanity tests.\")\n\n config = kwargs[\"config\"]\n script_dir = config[\"script_path\"]\n script = config[\"script\"]\n\n nodes = config.get(\"nodes\", [])\n rhbuild = config.get(\"rhbuild\")\n\n if nodes:\n nodes = get_nodes_by_ids(ceph_cluster, nodes)\n else:\n # By default, tests would be executed on a single client node\n nodes = [ceph_cluster.get_nodes(role=\"client\")[0]]\n\n os_ver = rhbuild.split(\"-\")[-1]\n if \"4.\" in rhbuild and os_ver == \"8\":\n nodes[0].exec_command(\n cmd=\"sudo /usr/sbin/alternatives --set python /usr/bin/python3\"\n )\n\n if rhbuild[0] > \"4\":\n out, err = nodes[0].exec_command(\n sudo=True, cmd=\"ceph config get mon mon_allow_pool_delete --format json\"\n )\n\n if not json.loads(out):\n nodes[0].exec_command(\n sudo=True, cmd=\"ceph config set mon mon_allow_pool_delete true\"\n )\n\n for node in nodes:\n branch = config.get(\"branch\", get_tag(node))\n one_time_setup(node, rhbuild, branch=branch)\n\n cmd = f\"cd ceph/{script_dir}; sudo bash {script}\"\n if script == \"*\":\n cmd = f\"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done\"\n\n node.exec_command(cmd=cmd, check_ec=True, timeout=1200)\n\n return 0", "def run_starter(self, expect_to_fail=False):", "def test_launch_deployment(self):\n pass", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def main():\n rclpy.init()\n\n worker_id = int(sys.argv[1])\n policy_type = sys.argv[2]\n node = WorkerSync(worker_id, 'worker_node', policy_type)\n\n try:\n executor = MultiThreadedExecutor()\n steps = 0\n\n while rclpy.ok():\n if node.flag.pull:\n node.pull(executor)\n\n elif node.flag.collect:\n steps = node.collect()\n\n elif node.flag.compute:\n node.compute(steps)\n\n elif node.flag.push:\n experiment_complete = node.push(executor)\n node.upkeep()\n\n # End experiment if passed number of max episodes.\n if experiment_complete:\n node.test(100)\n break\n\n except KeyboardInterrupt:\n pass\n\n # Destroy the node explicitly\n node.destroy_node()\n rclpy.shutdown()", "def runFunc(runType):\n logger.info('Running test locally with development environment')\n runProcess('local', ['invoke', '-v', '--config-file', 'run_config.yaml'])", "def uq_ensemble(config=\"dummy_test\", script=\"ERROR: PARAMETER script SHOULD BE DEFINED FOR TASK UQ_ENSEMBLE\",**args):\n \n path_to_config = find_config_file_path(config)\n sweep_dir = path_to_config + \"/SWEEP\"\n env.script = script\n\n run_ensemble(config, sweep_dir, **args)", "def execute_experiment(self):\n protocol_name = self.protocol_config['protocol']\n number_of_repetitions = self.protocol_config['numOfRepetitions']\n configurations = self.protocol_config['configurations']\n working_directory = self.protocol_config['workingDirectory']\n executables = self.protocol_config['executableName']\n for i in range(number_of_repetitions):\n for idx2 in range(len(configurations)):\n for idx in range(len(executables)):\n os.system(f'fab -f Execution/fabfile.py run_protocol:{self.protocol_config_path},'\n f'{configurations[idx2]},{executables[idx]},{working_directory[idx]} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def launch(\n key_name: str,\n size: int,\n master_type: str,\n worker_type: str,\n image_id: str,\n owner: str,\n bucket_name: str,\n worker_command: str,\n config: str,\n cluster_name: Optional[str],\n workers_per_machine: int\n):\n\n if cluster_name is None:\n # credit for the words_alpha.txt file https://github.com/dwyl/english-words\n cluster_name = random.choice([word for word in open(\"words_alpha.txt\")])[:-1]\n storage_name = cluster_name + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # name of the file storage on s3\n head_tags, worker_tags = get_tags(owner, cluster_name, storage_name) # tags for head and workers\n\n print(f'Launching cluster named ------------ {cluster_name} --------------------- (storage_name: {storage_name})')\n print(f'---------------------------------------------------------------------------------------------------')\n\n ec2 = boto3.resource(\"ec2\")\n as_client = boto3.client('autoscaling')\n\n # compress and upload the source code to the s3\n repo_name = _compress_folder()\n filename = str(pathlib.Path.cwd().parent / TAR_NAME)\n print(f'Uploading {filename} to {storage_name}')\n up(bucket_name, storage_name, filename)\n # down(bucket_name, storage_name, filename) # just to check file available\n print(f'Upload finished')\n\n download_untar = f'rm -f /home/ubuntu/{TAR_NAME} && ' \\\n f'aws s3 cp s3://{bucket_name}/{storage_name} /home/ubuntu/{TAR_NAME} && ' + \\\n f'rm -rf /home/ubuntu/{repo_name} && ' + \\\n f'mkdir /home/ubuntu/{repo_name} && ' + \\\n f'tar -xvf /home/ubuntu/{TAR_NAME} -C /home/ubuntu/'\n\n head_command = 'python -u es/experiment.py with ' + config + ' local=False'\n master_script = make_master_script(download_untar, make_master_run_script(head_command), repo_name)\n\n print(f'master will run this: -------\\n{master_script}\\n--------------')\n\n master_instance = ec2.create_instances(\n ImageId=image_id,\n KeyName=key_name,\n InstanceType=master_type,\n MinCount=1,\n MaxCount=1,\n SecurityGroupIds=[DEFAULT_SECURITY_GROUP],\n UserData=master_script,\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n TagSpecifications=[{'ResourceType': 'instance', 'Tags': head_tags}],\n IamInstanceProfile={'Name': 'redis_cluster_code_access'},\n # EbsOptimized=True,\n # Tags=head_tags\n )[0]\n\n master_ip = master_instance.private_ip_address\n\n print(f'Master launched, IP is: {master_ip}')\n scaling_client = boto3.client(\"autoscaling\")\n\n # try deleting the auto-scaling group and launch configuration of given name (should be done in the manage/kill)\n try:\n _ = scaling_client.delete_auto_scaling_group(\n AutoScalingGroupName=cluster_name,\n ForceDelete=True,\n )\n print(f'Auto scaling group named {cluster_name} deleted')\n # time.sleep(1)\n except:\n print(f'auto scaling group not found, skipping deletion')\n try:\n _ = scaling_client.delete_launch_configuration(\n LaunchConfigurationName=cluster_name\n )\n # time.sleep(1)\n print(f'Launch fonfig named {cluster_name} deleted')\n except:\n print(f'launch config not found, not deleting')\n\n worker_command = worker_command + f' --num_workers={workers_per_machine}'\n worker_script = make_worker_script(download_untar, make_worker_run_script(master_ip, worker_command), repo_name)\n print(f'Worker will run this: -------\\n{worker_script}\\n--------------')\n print(f'Creating launch configuration..')\n\n config_resp = as_client.create_launch_configuration(\n ImageId=image_id,\n KeyName=key_name,\n InstanceType=worker_type,\n LaunchConfigurationName=cluster_name,\n SecurityGroups=[DEFAULT_SECURITY_GROUP],\n UserData=worker_script,\n IamInstanceProfile=REDIS_CLUSTER_CODE_ACCESS,\n # EbsOptimized=True,\n )\n assert config_resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n\n print(f'Creating auto scaling group..')\n\n asg_resp = as_client.create_auto_scaling_group(\n AutoScalingGroupName=cluster_name,\n LaunchConfigurationName=cluster_name,\n MinSize=size,\n MaxSize=size,\n DesiredCapacity=size,\n AvailabilityZones=AVAILABILITY_ZONES,\n Tags=worker_tags,\n )\n assert asg_resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n\n print(f'\\nCluster created, name: {cluster_name}\\n')", "def main() -> bool:\n global logger\n logger = setup_logger(\"nitpycker\")\n plugin_manager = Manager()\n plugin_manager.load_plugins()\n args = parse_args(plugin_manager)\n if plugin_manager.enable_plugins(args.plugins, args):\n exit(2)\n\n plugin_manager.pre_test_discovery()\n tests = unittest.defaultTestLoader.discover(args.start_directory, pattern=args.pattern)\n plugin_manager.post_test_discovery()\n tests = plugin_manager.filter_tests(tests)\n report = ParallelRunner(plugin_manager, process_number=args.process_number, verbosity=args.verbosity).run(tests)\n return not report.wasSuccessful()", "def run_xenon_simple(workflow, machine, worker_config):\n scheduler = Scheduler()\n\n return scheduler.run(\n xenon_interactive_worker(machine, worker_config),\n get_workflow(workflow)\n )", "def cluster_start(args: Namespace, configuration: BareConfig):\n logging.basicConfig(level=logging.DEBUG,\n datefmt='%m-%d %H:%M')\n launch_orchestrator(args=args, conf=configuration)", "def test_script(self) -> None:\n main()", "def test_workflow_environment():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"environment-variables\": {\n \"FOO\": \"BAR\",\n \"FOO2\": \"BAR2\"\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-workflow-environment-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n def _check():\n assert os.environ['FOO'] == \"BAR\"\n assert os.environ[\"OMP_NUM_THREADS\"] == '1'\n return True\n \n # driver env\n _check()\n \n # worker env\n assert all(workflow_inst.run_on_each_worker(_check).values())\n \n os.environ['FOO'] = 'ORIGINAL_FOO'\n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # Environment is restored after execution is finished.\n assert os.environ['FOO'] == 'ORIGINAL_FOO'\n assert 'FOO2' not in os.environ", "def _setup_test_infra(world_rank, world_size):\n os.environ['RANK'] = str(world_rank)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n\n set_cuda_device_id(world_rank)\n\n dist.init_process_group(backend='nccl', world_size=world_size, rank=world_rank)", "def test_minimium_cluster_definition(self, monkeypatch):\n import yaml\n\n def test_read_file(*args, **kwargs):\n config_string = open('./tests/test_data/minimum.yaml', 'r').read()\n return config_string\n \n def test_clustername(*args, **kwargs):\n return 'test-clustername'\n\n mock_dataproc_client = mock.create_autospec(dataproc_v1beta2.ClusterControllerClient())\n mock_gcs_client = mock.create_autospec(storage.Client())\n spawner = DataprocSpawner(hub=Hub(), dataproc=mock_dataproc_client, gcs=mock_gcs_client, user=MockUser(), _mock=True, gcs_notebooks=self.gcs_notebooks)\n \n # Prevents a call to GCS. We return the local file instead.\n monkeypatch.setattr(spawner, \"read_gcs_file\", test_read_file)\n monkeypatch.setattr(spawner, \"clustername\", test_clustername)\n\n spawner.project = \"test-project\"\n spawner.zone = \"test-self1-b\"\n spawner.env_str = \"test-env-str\"\n spawner.args_str = \"test-args-str\"\n\n config_built = spawner._build_cluster_config()\n\n assert 'project_id' in config_built\n assert 'cluster_name' in config_built\n\n assert config_built['project_id'] == 'test-project'\n assert config_built['cluster_name'] == 'test-clustername'\n \n assert config_built['config']['gce_cluster_config']['zone_uri'].split('/')[-1] == 'test-self1-b'\n\n assert 'ANACONDA' in config_built['config']['software_config']['optional_components']\n assert 'JUPYTER' in config_built['config']['software_config']['optional_components']\n\n assert 'dataproc:jupyter.hub.args' in config_built['config']['software_config']['properties']\n assert 'dataproc:jupyter.hub.enabled' in config_built['config']['software_config']['properties']\n # assert 'dataproc:jupyter.notebook.gcs.dir' in config_built['config']['software_config']['properties']\n assert 'dataproc:jupyter.hub.env' in config_built['config']['software_config']['properties']", "def simple_behavior_test(test_files):\n parse_config_file_and_execute_run(test_files, overwrite=True)", "def setup(self, args={}):\n\n return Status.RUN", "def run_experiment(experiment_name: str = None,\n output_path: str = '/tmp/sweep',\n start_count: int = 0,\n end_count: int = int(1e6),\n ignore_errors: bool = False,\n agent_module: str = None,\n config: Dict[str, Any] = None):\n if not agent_module and not config:\n agent_module, config = load_experiment(experiment_name)\n if isinstance(agent_module, str):\n agent_module = importlib.import_module(agent_module)\n\n prefix_keys = config_utils.list_keys_to_expand(config)\n for c, p in zip(config, prefix_keys):\n c.update(dict(prefix_keys=p))\n config_count = config_utils.count_configuration(config)\n start_count = max(start_count, 0)\n end_count = min(end_count, sum(config_count))\n print(f'Loaded experiment_name={experiment_name}')\n print(f'Loaded {sum(config_count)}({config_count}) experiment configurations')\n print(f'Set start_count={start_count}, end_count={end_count}')\n print(f'Set prefix_keys={prefix_keys}')\n print(f'Set output_dir={output_path}')\n\n # @title Launch experiments\n for i in range(start_count, end_count):\n c, _ = config_utils.index_configuration(config, index=i, count=config_count)\n task_name = config_utils.get_compressed_name_from_keys(\n c, agent_module.TASK_KEYS)\n experiment_name = config_utils.get_compressed_name_from_keys(\n c, c.pop('prefix_keys'))\n output_dir = f'{output_path}/{task_name}/{experiment_name}'\n print(f'[{i+1}/{sum(config_count)}] Starting experiment...')\n print(f'\\t config: {pprint.pformat(c, indent=2)}')\n print(f'\\t output_dir={output_dir}')\n return_dict = {}\n if ignore_errors:\n try:\n agent_module.train(c, output_dir=output_dir, return_dict=return_dict)\n except Exception as e:\n print(\n f'[{i+1}/{sum(config_count)}] FAILED experiment {e.__class__.__name__}: {e.message}'\n )\n else:\n agent_module.train(c, output_dir=output_dir, return_dict=return_dict)\n print(f'\\t time_to_jit={return_dict.get(\"time_to_train\", None)}')\n print(f'\\t time_to_train={return_dict.get(\"time_to_jit\", None)}')", "def start(config, machines):\n logging.info(\"Start Kubernetes cluster on VMs\")\n commands = []\n\n # Setup cloud controller\n commands.append(\n [\n \"ansible-playbook\",\n \"-i\",\n os.path.join(config[\"infrastructure\"][\"base_path\"], \".continuum/inventory_vms\"),\n os.path.join(\n config[\"infrastructure\"][\"base_path\"],\n \".continuum/cloud/control_install.yml\",\n ),\n ]\n )\n\n # Setup worker\n commands.append(\n [\n \"ansible-playbook\",\n \"-i\",\n os.path.join(config[\"infrastructure\"][\"base_path\"], \".continuum/inventory_vms\"),\n os.path.join(\n config[\"infrastructure\"][\"base_path\"],\n \".continuum/%s/install.yml\" % (config[\"mode\"]),\n ),\n ]\n )\n\n results = machines[0].process(config, commands)\n\n # Check playbooks\n for command, (output, error) in zip(commands, results):\n logging.debug(\"Check output for Ansible command [%s]\", \" \".join(command))\n ansible.check_output((output, error))\n\n # Install observability packages (Prometheus, Grafana) if configured by the user\n if config[\"benchmark\"][\"observability\"]:\n command = [\n \"ansible-playbook\",\n \"-i\",\n os.path.join(config[\"infrastructure\"][\"base_path\"], \".continuum/inventory_vms\"),\n os.path.join(\n config[\"infrastructure\"][\"base_path\"],\n \".continuum/cloud/observability.yml\",\n ),\n ]\n\n output, error = machines[0].process(config, command)[0]\n\n logging.debug(\"Check output for Ansible command [%s]\", \" \".join(command))\n ansible.check_output((output, error))", "def test_get_server_runnable(self):\n global locator, config_paths\n locator.load_config(config_paths[2])\n\n self.assertIsNotNone(locator.get_server_runnable())", "def test_workers(self):\n wr = WorkflowRuner(4)\n try:\n wr.init_workers()\n assert wr.workers_available() == 4\n wr.acquire_worker()\n assert wr.workers_available() == 3\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert not wr.active_workers()\n wr.release_worker()\n assert wr.active_workers()\n wr.release_worker()\n assert wr.workers_available() == 2\n wr.terminate_workers_and_clean_subprocesses()\n except:\n wr.terminate_workers_and_clean_subprocesses()", "def main(config: DictConfig) -> None:\n\n if config.test:\n # TODO: clean up current working directory with test=true\n experiment_path = os.getcwd().replace(\"test=true,\", \"\").replace(\"test=True,\", \"\")\n if config.unsupervised:\n trainer = UnsupervisedTrainer(config, experiment_path)\n else:\n trainer = Trainer(config, experiment_path)\n summary, report = trainer.test()\n print(summary)\n print(report)\n else:\n experiment_path = os.getcwd()\n if config.unsupervised:\n trainer = UnsupervisedTrainer(config, experiment_path)\n else:\n trainer = Trainer(config, experiment_path)\n trainer.run()\n print(\"Launched training. Press CTRL+C to stop.\")\n print(f\"Logs available at {os.getcwd()}\")", "def LaunchWorker(config, num_processes = None):\n if num_processes != None:\n num_processes = int(num_processes)\n pool = MulticorePool(num_processes)\n worker = Worker(zmq.Context(), config,\n receiver_timeout = None, # wait indefinitely for task requests\n pool = pool)\n worker.Setup() # connect/bind sockets and prepare for work\n worker.Run() # run the request/reply loop until termination\n sys.exit(worker.exit_status)", "def simple_worker_loop() -> None:\n print('\\nSimple worker loop tutorial', flush=True)\n\n # the first thing to do at the start of any experiment is to initialize a few global parameters\n # these parameters are shared across the entire repo\n ps.init_globals(\n seed=0, # if None, the experiment is not seeded and would initialized differently each time\n registry=None, # if None, a registry is created and used\n # a registry does bookkeeping of all people and locations used in the experiment\n )\n\n # init locations\n home = ps.env.Home()\n work = ps.env.Office() # any subclass of BusinessLocation can be a workplace, e.g. Bar, Restaurant, Hospital, etc.\n\n # init a worker\n person = ps.env.Worker(\n person_id=ps.env.PersonID('worker', age=35), # person_id is a unique id for this person\n home=home.id, # specify the home_id that person is assigned to\n work=work.id, # specify the id of the person's workplace\n )\n\n # Init simulator\n sim = ps.env.PandemicSim(\n locations=[work, home], # a list of all locations\n persons=[person] # a list of all persons\n )\n # PandemicSim by default creates and uses randomized testing and an SEIR infection model\n\n # Iterate through steps in the simulator, where each step advances an hour\n for _ in trange(24, desc='Simulating hour'):\n sim.step()\n\n # Or iterate by advancing in days by calling step_day in the simulator\n for _ in trange(10, desc='Simulating day'):\n sim.step_day()\n\n # The above loop iterates the simulator with no movement restrictions\n # To impose restrictions, for example, Stage-2 of austin_regulations\n sim.impose_regulation(ps.sh.austin_regulations[2])\n\n # Calling step_day now will run the simulator under Stage-2 regulation\n for _ in trange(10, desc='Simulating day (Under Stage-2)'):\n sim.step_day()", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def main():\n args = parse_command_line()\n expt_config = load_config(args.experiment_config_path)\n run_cli(RunOptions.from_dict(expt_config))", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def test_local_env_pass_explicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_explicit\"\n\n assert env_key not in os.environ\n\n test_dir = fileutils.make_test_dir()\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n env_vars = {env_key: exp_value} # <-- explicitly passing a new env var to task\n settings = RunSettings(\n exe_name, exe_args, run_command=\"srun\", run_args=run_args, env_vars=env_vars\n )\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key in settings.env_vars\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def run_script(self, params, config_no):\n raise NotImplementedError()", "def setup_amq_cluster_operator(self):\n\n # self.amq_dir = constants.TEMPLATE_DEPLOYMENT_AMQ_CP\n run(f'oc apply -f {self.amq_dir} -n {self.namespace}', shell=True, check=True, cwd=self.dir)\n time.sleep(5)\n # Wait for strimzi-cluster-operator pod to be created\n if self.is_amq_pod_running(pod_pattern=\"cluster-operator\"):\n log.info(\"strimzi-cluster-operator pod is in running state\")\n else:\n raise ResourceWrongStatusException(\"strimzi-cluster-operator pod is not getting to running state\")\n\n run(f'oc apply -f {self.amq_dir_examples} -n {self.namespace}', shell=True, check=True, cwd=self.dir)\n # checking pod status one more time\n if self.is_amq_pod_running(pod_pattern=\"cluster-operator\"):\n log.info(\"strimzi-cluster-operator pod is in running state\")\n else:\n raise ResourceWrongStatusException(\"strimzi-cluster-operator pod is not getting to running state\")", "def pytest_started_handling_group(session, worker):", "def setup(self, cluster):\n raise NotImplementedError()", "def main(argv):\n parser = argparse.ArgumentParser(description=\"\"\"Bootstrap CI Scripts\"\"\")\n parser.add_argument(\"-d\", \"--directory\",\n type=str,\n required=True,\n help=(\"\"\"Directory to store language runtimes, \"\"\"\n \"\"\"scripts and other script details in\"\"\"))\n parser.add_argument(\"-s\", \"--script\",\n type=str,\n help=\"\"\"Script to pass control to\"\"\")\n parser.add_argument(\"-e\", \"--eval-output\",\n type=str,\n choices=[\n \"bash\",\n \"powershell\"\n ],\n help=\"\"\"Evaluate output in shell\"\"\")\n parser.add_argument(\"-p\", \"--print-to\",\n type=str,\n help=\"\"\"Where to print output script to\"\"\")\n parser.add_argument(\"-r\", \"--scripts-directory\",\n type=str,\n help=(\"\"\"Directory where scripts are already \"\"\"\n \"\"\"stored in\"\"\"))\n parser.add_argument(\"--keep-scripts\",\n action=\"store_true\",\n help=\"\"\"Don't remove stale scripts.\"\"\")\n args, remainder = parser.parse_known_args(argv)\n\n print_script_to, print_messages_to = _determine_outputs(args.print_to)\n\n with closing(print_script_to):\n parent_shell = construct_parent_shell(args.eval_output,\n print_script_to)\n container = ContainerDir(parent_shell,\n stale_check=_stale_check_url(args),\n **(vars(args)))\n util = container.fetch_and_import(\"util.py\")\n # suppress(unused-attribute)\n util.PRINT_MESSAGES_TO = print_messages_to\n bootstrap_script = container.script_path(\"bootstrap.py\").fs_path\n bootstrap_script_components = bootstrap_script.split(os.path.sep)\n scripts_path = os.path.sep.join(bootstrap_script_components[:-2])\n\n # Overwrite CONTAINER_DIR in the output script, but not\n # for our own invocation, we'll need the parent instance\n # if we're actually in a test\n parent_shell.overwrite_environment_variable(\"CONTAINER_DIR\",\n container.path())\n _set_ci_environment_variables(parent_shell)\n\n _define_script_command(\"polysquare_run\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n None)\n _define_script_command(\"polysquare_cleanup\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n \"clean.py\")\n\n # Done, pass control to the script we're to run\n container.fetch_and_import(args.script).run(container,\n util,\n parent_shell,\n argv=remainder)\n\n # Print a final new line so that active messages don't get\n # truncated.\n util.print_message(\"\\n\")\n\n if container.return_code() != 0:\n parent_shell.exit(container.return_code())\n\n return container.return_code()", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def test_scheduler_runs():\n import mesos.native\n\n # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave.\n assert os.path.isfile('dist/fake_mysos_executor.pex')\n\n storage = FakeStorage(SequentialThreadingHandler())\n zk_client = FakeClient(storage=storage)\n zk_client.start()\n\n zk_url = \"zk://fake_host/home/mysos/clusters\"\n cluster_name = \"test_cluster\"\n num_nodes = 3\n\n state_provider = LocalStateProvider(safe_mkdtemp())\n\n framework_info = FrameworkInfo(\n user=getpass.getuser(),\n name=\"mysos\",\n checkpoint=False)\n\n state = Scheduler(framework_info)\n\n scheduler = MysosScheduler(\n state,\n state_provider,\n getpass.getuser(),\n os.path.abspath(\"dist/fake_mysos_executor.pex\"),\n \"./fake_mysos_executor.pex\",\n zk_client,\n zk_url,\n Amount(40, Time.SECONDS),\n \"/fakepath\",\n gen_encryption_key())\n\n RootMetrics().register_observable('scheduler', scheduler)\n\n scheduler_driver = mesos.native.MesosSchedulerDriver(\n scheduler,\n framework_info,\n \"local\")\n scheduler_driver.start()\n\n # Wait until the scheduler is connected and becomes available.\n assert scheduler.connected.wait(30)\n\n scheduler.create_cluster(cluster_name, \"mysql_user\", num_nodes, cluster_password=\"passwd\")\n\n # A slave is promoted to be the master.\n deadline(\n lambda: wait_for_master(\n get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),\n zk_client),\n Amount(40, Time.SECONDS))\n\n scheduler.delete_cluster(cluster_name, password=\"passwd\")\n\n # The cluster is deleted from ZooKeeper.\n deadline(\n lambda: wait_for_termination(\n get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),\n zk_client),\n Amount(40, Time.SECONDS))\n\n sample = RootMetrics().sample()\n assert sample['scheduler.tasks_killed'] == 1\n\n assert scheduler_driver.stop() == DRIVER_STOPPED", "def _manageWorkersConfig(event):\n if event.info.get('key') != PluginSettings.SLICER_CLI_WEB_WORKER_CONFIG_ITEM:\n return\n if _loadWorkerConfig():\n _manageWorkers(None)", "def test_run_experiment():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = \"\"\"\n ---\n options:\n resume_setup: no\n resume_simulation: no\n default_number_of_iterations: 0\n output_dir: {}\n setup_dir: ''\n experiments_dir: ''\n minimize: no\n annihilate_sterics: yes\n molecules:\n T4lysozyme:\n filepath: {}\n leap: {{parameters: oldff/leaprc.ff14SB}}\n select: 0\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:\n vacuum:\n nonbonded_method: NoCutoff\n GBSA-OBC2:\n nonbonded_method: NoCutoff\n implicit_solvent: OBC2\n protocols:{}\n systems:\n system:\n receptor: T4lysozyme\n ligand: p-xylene\n solvent: !Combinatorial [vacuum, GBSA-OBC2]\n experiments:\n system: system\n protocol: absolute-binding\n restraint:\n type: FlatBottom\n spring_constant: 0.6*kilocalorie_per_mole/angstroms**2\n well_radius: 5.2*nanometers\n restrained_receptor_atoms: 1644\n restrained_ligand_atoms: 2609\n options:\n temperature: 302.0*kelvin\n \"\"\".format(tmp_dir, examples_paths()['lysozyme'], examples_paths()['p-xylene'],\n indent(standard_protocol))\n\n exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))\n\n # Now check_setup_resume should not raise exceptions\n exp_builder._check_resume()\n\n # We setup a molecule and with resume_setup: now we can't do the experiment\n err_msg = ''\n exp_builder._options['resume_setup'] = False\n exp_builder._db._setup_molecules('p-xylene')\n try:\n exp_builder.run_experiments()\n except YamlParseError as e:\n err_msg = str(e)\n assert 'molecule' in err_msg\n\n # Same thing with a system\n err_msg = ''\n system_dir = os.path.dirname(\n exp_builder._db.get_system('system_GBSAOBC2')[0].position_path)\n try:\n exp_builder.run_experiments()\n except YamlParseError as e:\n err_msg = str(e)\n assert 'system' in err_msg\n\n # Now we set resume_setup to True and things work\n exp_builder._options['resume_setup'] = True\n ligand_dir = exp_builder._db.get_molecule_dir('p-xylene')\n frcmod_file = os.path.join(ligand_dir, 'p-xylene.frcmod')\n prmtop_file = os.path.join(system_dir, 'complex.prmtop')\n molecule_last_touched = os.stat(frcmod_file).st_mtime\n system_last_touched = os.stat(prmtop_file).st_mtime\n exp_builder.run_experiments()\n\n # Neither the system nor the molecule has been processed again\n assert molecule_last_touched == os.stat(frcmod_file).st_mtime\n assert system_last_touched == os.stat(prmtop_file).st_mtime\n\n # The experiments folders are correctly named and positioned\n for exp_name in ['systemvacuum', 'systemGBSAOBC2']:\n # The output directory must be the one in the experiment section\n output_dir = os.path.join(tmp_dir, exp_name)\n assert os.path.isdir(output_dir)\n assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))\n assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))\n assert os.path.isfile(os.path.join(output_dir, exp_name + '.yaml'))\n assert os.path.isfile(os.path.join(output_dir, exp_name + '.log'))\n\n # Analysis script is correct\n analysis_script_path = os.path.join(output_dir, 'analysis.yaml')\n with open(analysis_script_path, 'r') as f:\n assert yaml.load(f, Loader=yaml.FullLoader) == [['complex', 1], ['solvent', -1]]\n\n # Now we can't run the experiment again with resume_simulation: no\n exp_builder._options['resume_simulation'] = False\n try:\n exp_builder.run_experiments()\n except YamlParseError as e:\n err_msg = str(e)\n assert 'experiment' in err_msg\n\n # We set resume_simulation: yes and now things work\n exp_builder._options['resume_simulation'] = True\n exp_builder.run_experiments()", "def launch_test(\n code,\n max_num_machines,\n num_mpiprocs_per_machine,\n with_mpi,\n daemon,\n):\n import os\n from aiida_shengbte import helpers\n from aiida import orm\n from aiida.plugins import DataFactory, CalculationFactory\n\n if not code:\n # get code\n computer = helpers.get_computer()\n code = helpers.get_code(entry_point=\"shengbte\", computer=computer)\n fc_dir = os.path.normcase(\n os.path.join(os.path.abspath(__file__), \"../input_files\")\n )\n SinglefileData = DataFactory(\"singlefile\")\n FORCE_CONSTANTS_2ND = SinglefileData(\n file=fc_dir + \"/FORCE_CONSTANTS_2ND\",\n filename=\"FORCE_CONSTANTS_2ND\",\n )\n FORCE_CONSTANTS_3RD = SinglefileData(\n file=fc_dir + \"/FORCE_CONSTANTS_3RD\",\n filename=\"FORCE_CONSTANTS_3RD\",\n )\n\n inputs = {\n \"code\": code,\n \"control\": orm.Dict(\n dict={\n \"allocations\": {\n \"nelements\": 2,\n \"natoms\": 2,\n \"ngrid\": [3, 3, 3],\n \"norientations\": 3,\n },\n \"crystal\": {\n \"lattvec\": [[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],\n \"elements\": [\"In\", \"As\"],\n \"types\": [1, 2],\n \"positions\": [[0, 0, 0], [0.25, 0.25, 0.25]],\n \"scell\": [5, 5, 5],\n \"born\": [\n [[2.67810, 0, 0], [0, 2.67810, 0], [0, 0, 2.67810]],\n [[-2.67810, 0, 0], [0, -2.67810, 0], [0, 0, -2.67810]],\n ],\n \"orientations\": [[1, 0, 0], [1, 1, 0], [1, 1, 1]],\n },\n \"parameters\": {\n \"T\": 300,\n },\n \"flags\": {\n \"espresso\": False,\n \"nonanalytic\": True,\n \"nanowires\": True,\n },\n }\n ),\n \"FORCE_CONSTANTS_2ND\": FORCE_CONSTANTS_2ND,\n \"FORCE_CONSTANTS_3RD\": FORCE_CONSTANTS_3RD,\n # 'clean_workdir': orm.Bool(True),\n \"metadata\": {\n \"description\": \"Test job submission with the aiida_shengbte plugin\",\n \"options\": {\n \"resources\": {\n \"num_machines\": int(max_num_machines),\n \"num_mpiprocs_per_machine\": int(num_mpiprocs_per_machine),\n },\n \"withmpi\": with_mpi,\n }\n # 'dry_run': True,\n # 'store_provenance': False,\n },\n }\n\n launch.launch_process(\n CalculationFactory(\"shengbte.shengbte\"), daemon, **inputs\n )", "def test_base(self):\n self.render_config_template(\n path=os.path.abspath(self.working_dir) + \"/log/*\",\n local=True,\n )\n\n functionbeat_proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"functionbeat is running\"))\n exit_code = functionbeat_proc.kill_and_wait()\n assert exit_code == 0", "def test_by_default(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_default(addon))\n self.run_mgr.by_config(self.fileio_inst)\n\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Starting'))\n self.assertTrue(output[1].startswith('Executing'))\n self.assertTrue(output[2].startswith('Stopping'))", "def startTestRun(self):", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def test_slurm_sample_resource_3(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_sample_3\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 9003)\n self.assertEqual(self.component.gpus_per_node, 0)\n self.assertEqual(self.component.lfs_per_node['path'], \"/not_comet\")\n self.assertEqual(self.component.lfs_per_node['size'], 1002)\n self.assertEqual(self.component.lm_info['cores_per_node'], 9003)\n\n return", "def main():\n args = get_args()\n config, config_fn = get_config(arg_config_file=args.config_file,\n arg_config_name=args.config_name)\n setup_logs(args.log_dir, args.log_level or config['housekeeping']['log_level'])\n logger.info('program starting')\n logger.info('config_file used: %s' % config_fn)\n\n jobcheck = exit_if_already_running()\n exit_if_suppressed()\n\n for dir_sect_name in config:\n if dir_sect_name == 'housekeeping':\n continue\n logger.info('section starting for %s' % dir_sect_name)\n process_dir = ProcessDir(config[dir_sect_name],\n args.test_run)\n process_dir.walk()\n logger.info('section terminating for %s' % dir_sect_name)\n\n jobcheck.close()\n logger.info('program terminating successfully')\n return 0", "async def test_script_main(config, mocker, monkeypatch):\n for key in config.keys():\n monkeypatch.setenv(key, config[key])\n mock_event_loop = mocker.patch(\"asyncio.get_event_loop\")\n mock_root_logger = mocker.patch(\"logging.getLogger\")\n mock_status_loop = mocker.patch(\"lta.deleter.status_loop\")\n mock_work_loop = mocker.patch(\"lta.deleter.work_loop\")\n main()\n mock_event_loop.assert_called()\n mock_root_logger.assert_called()\n mock_status_loop.assert_called()\n mock_work_loop.assert_called()", "def run_job(in_args=sys.argv[1:]):\n print '>>>> condor_worker.py logging:'\n proc = Popen(['hostname', '-f'], stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n if err == '':\n print 'Running on', out\n else:\n raise RuntimeError(err)\n\n parser = WorkerArgParser(description=__doc__)\n args = parser.parse_args(in_args)\n print 'Args:'\n print args\n\n # Make sandbox area to avoid names clashing, and stop auto transfer\n # back to submission node\n # -------------------------------------------------------------------------\n tmp_dir = 'scratch'\n os.mkdir(tmp_dir)\n os.chdir(tmp_dir)\n try:\n # Copy files to worker node area from /users, /hdfs, /storage, etc.\n # ---------------------------------------------------------------------\n if args.copyToLocal:\n print 'PRE EXECUTION: Copy to local:'\n for (source, dest) in args.copyToLocal:\n print source, dest\n if source.startswith('/hdfs'):\n source = source.replace('/hdfs', '')\n check_call(['hadoop', 'fs', '-copyToLocal', source, dest])\n else:\n if os.path.isfile(source):\n shutil.copy2(source, dest)\n elif os.path.isdir(source):\n shutil.copytree(source, dest)\n\n print 'In current dir:'\n print os.listdir(os.getcwd())\n\n # Do setup of programs & libs, and run the program\n # We have to do this in one step to avoid different-shell-weirdness,\n # since env vars don't necessarily get carried over.\n # ---------------------------------------------------------------------\n print 'SETUP AND EXECUTION'\n setup_cmd = ''\n if args.setup:\n os.chmod(args.setup, 0555)\n setup_cmd = 'source ./' + args.setup + ' && '\n\n if os.path.isfile(os.path.basename(args.exe)):\n os.chmod(os.path.basename(args.exe), 0555)\n\n # run_cmd = args.exe\n\n # If it's a local file, we need to do ./ for some reason...\n # But we must determine this AFTER running setup script,\n # can't do it beforehand\n run_cmd = \"if [[ -e {exe} ]];then ./{exe} {args};else {exe} {args};fi\"\n run_args = ' '.join(args.args) if args.args else ''\n run_cmd = run_cmd.format(exe=args.exe, args=run_args)\n print 'Contents of dir before running:'\n print os.listdir(os.getcwd())\n print \"Running:\", setup_cmd + run_cmd\n check_call(setup_cmd + run_cmd, shell=True)\n\n print 'In current dir:'\n print os.listdir(os.getcwd())\n\n # Copy files from worker node area to /hdfs or /storage\n # ---------------------------------------------------------------------\n if args.copyFromLocal:\n print 'POST EXECUTION: Copy to HDFS:'\n for (source, dest) in args.copyFromLocal:\n print source, dest\n if dest.startswith('/hdfs'):\n source = os.path.realpath(source)\n dest = dest.replace('/hdfs', '')\n check_call(['hadoop', 'fs', '-copyFromLocal', '-f', source, dest])\n else:\n if os.path.isfile(source):\n shutil.copy2(source, dest)\n elif os.path.isdir(source):\n shutil.copytree(source, dest)\n finally:\n # Cleanup\n # ---------------------------------------------------------------------\n print 'CLEANUP'\n os.chdir('..')\n shutil.rmtree(tmp_dir)", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def test_default_user():\n\n # launch unique-sleep\n application_json = get_resource(\"{}/unique-sleep.json\".format(fixture_dir()))\n client = marathon.create_client()\n client.add_app(application_json)\n app = client.get_app(application_json['id'])\n assert app['user'] is None\n\n # wait for deployment to finish\n tasks = client.get_tasks(\"unique-sleep\")\n host = tasks[0]['host']\n\n assert run_command_on_agent(host, \"ps aux | grep '[s]leep ' | awk '{if ($1 !=\\\"root\\\") exit 1;}'\")\n\n client = marathon.create_client()\n client.remove_app(\"/unique-sleep\")", "def main():\n\n run_manual_session()\n # run_automated_session()", "def run(self, config_file=None, partic_list=None):\n\n from time import strftime\n from qap_utils import raise_smart_exception, \\\n check_config_settings\n\n # in case we are overloading\n if config_file:\n from qap.script_utils import read_yml_file\n self._config = read_yml_file(config_file)\n self.validate_config_dict()\n self._config[\"pipeline_config_yaml\"] = config_file\n \n if not self._config:\n raise Exception(\"config not found!\")\n\n if partic_list:\n self._config[\"subject_list\"] = partic_list\n\n # Get configurations and settings\n check_config_settings(self._config, \"num_processors\")\n check_config_settings(self._config, \"num_sessions_at_once\")\n check_config_settings(self._config, \"available_memory\")\n check_config_settings(self._config, \"output_directory\")\n check_config_settings(self._config, \"working_directory\")\n\n self._num_bundles_at_once = 1\n write_report = self._config.get('write_report', False)\n\n if \"cluster_system\" in self._config.keys() and not self._bundle_idx:\n res_mngr = self._config[\"cluster_system\"]\n if (res_mngr == None) or (\"None\" in res_mngr) or \\\n (\"none\" in res_mngr):\n self._platform = None\n else:\n platforms = [\"SGE\", \"PBS\", \"SLURM\"]\n self._platform = str(res_mngr).upper()\n if self._platform not in platforms:\n msg = \"The resource manager %s provided in the pipeline \"\\\n \"configuration file is not one of the valid \" \\\n \"choices. It must be one of the following:\\n%s\" \\\n % (self._platform, str(platforms))\n raise_smart_exception(locals(), msg)\n else:\n self._platform = None\n\n # Create output directory\n try:\n os.makedirs(self._config[\"output_directory\"])\n except:\n if not op.isdir(self._config[\"output_directory\"]):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % self._config[\"output_directory\"]\n raise Exception(err)\n else:\n pass\n\n # Create working directory\n try:\n os.makedirs(self._config[\"working_directory\"])\n except:\n if not op.isdir(self._config[\"working_directory\"]):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % self._config[\"working_directory\"]\n raise Exception(err)\n else:\n pass\n\n results = []\n\n # set up callback logging\n import logging\n from nipype.pipeline.plugins.callback_log import log_nodes_cb\n\n cb_log_filename = os.path.join(self._config[\"output_directory\"],\n \"callback.log\")\n # Add handler to callback log file\n cb_logger = logging.getLogger('callback')\n cb_logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(cb_log_filename)\n cb_logger.addHandler(handler)\n\n # settle run arguments (plugins)\n self.runargs = {}\n self.runargs['plugin'] = 'MultiProc'\n self.runargs['plugin_args'] = \\\n {'memory_gb': int(self._config[\"available_memory\"]),\n 'status_callback': log_nodes_cb}\n n_procs = {'n_procs': self._config[\"num_processors\"]}\n self.runargs['plugin_args'].update(n_procs)\n\n # load the participant list file into dictionary\n subdict = self.load_sublist()\n\n # flatten the participant dictionary\n self._sub_dict = self.create_session_dict(subdict)\n\n # create the list of bundles\n self._bundles_list = self.create_bundles()\n num_bundles = len(self._bundles_list)\n\n if not self._bundle_idx:\n # want to initialize the run-level log directory (not the bundle-\n # level) only the first time we run the script, due to the\n # timestamp. if sub-nodes are being kicked off by a batch file on\n # a cluster, we don't want a new timestamp for every new node run\n self._run_log_dir = op.join(self._config['output_directory'],\n '_'.join([self._run_name, \"logs\"]),\n '_'.join([strftime(\"%Y%m%d_%H_%M_%S\"),\n \"%dbundles\" % num_bundles]))\n\n if self._run_log_dir:\n if not os.path.isdir(self._run_log_dir):\n try:\n os.makedirs(self._run_log_dir)\n except:\n if not op.isdir(self._run_log_dir):\n err = \"[!] Log directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % self._run_log_dir\n raise Exception(err)\n else:\n pass\n\n if num_bundles == 1:\n self._config[\"num_sessions_at_once\"] = \\\n len(self._bundles_list[0])\n\n # Start the magic\n if not self._platform and not self._bundle_idx:\n # not a cluster/grid run\n for idx in range(1, num_bundles+1):\n results.append(self.run_one_bundle(idx))\n\n elif not self._bundle_idx:\n # there is a self._bundle_idx only if the pipeline runner is run\n # with bundle_idx as a parameter - only happening either manually,\n # or when running on a cluster\n self.submit_cluster_batch_file(num_bundles)\n\n else:\n # if there is a bundle_idx supplied to the runner\n results = self.run_one_bundle(self._bundle_idx)", "def test_lama_job_runner():\n\n configs = registration_root.glob('*.toml')\n\n for cfg in configs:\n delete_previous_files()\n\n print(f\"\\n{'#'*8} Doing config {cfg.name} {'#'*8}\")\n\n lama_job_runner.lama_job_runner(cfg, wt_registration_dir, make_job_file=True, log_level=logging.ERROR)\n lama_job_runner.lama_job_runner(cfg, wt_registration_dir, log_level=logging.ERROR)\n\n lama_job_runner.lama_job_runner(cfg, mut_registration_dir, make_job_file=True, log_level=logging.ERROR)\n lama_job_runner.lama_job_runner(cfg, mut_registration_dir, log_level=logging.ERROR)\n # return # Just do the first", "def start_master_worker():\n print(\"Starting master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to start\n print(\"Waiting a bit\")\n time.sleep(10)\n return True", "def test_training():\n assert init_engine('train', [\"config=first_run_test/default.yaml\"]).run() is None", "def test_runtime_bake(scheduler, os, region, pcluster_config_reader, clusters_factory, test_datadir, architecture):\n # remarkable AMIs are not available for ARM yet\n ami_type = \"remarkable\" if architecture == \"x86_64\" else \"official\"\n cluster_config = pcluster_config_reader(\n custom_ami=retrieve_latest_ami(region, os, ami_type=ami_type, architecture=architecture)\n )\n cluster = clusters_factory(cluster_config)\n remote_command_executor = RemoteCommandExecutor(cluster)\n\n # Verify no chef.io endpoint is called in cloud-init-output log to download chef installer or chef packages\"\"\"\n # on head node\n remote_command_executor.run_remote_script(str(test_datadir / \"verify_chef_download.sh\"))\n # on compute\n scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)\n result = scheduler_commands.submit_script(str(test_datadir / \"verify_chef_download.sh\"))\n job_id = scheduler_commands.assert_job_submitted(result.stdout)\n scheduler_commands.wait_job_completed(job_id)\n scheduler_commands.assert_job_succeeded(job_id)", "def test_base(self):\n self.render_config_template(\n )\n\n proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"mockbeat start running.\"))\n proc.check_kill_and_wait()\n assert self.log_contains(\"mockbeat stopped.\")", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def test_slurm_sample_resource_1(self):\n\n # Set environment variables\n os.environ['SLURM_NODELIST'] = 'nodes[1-2]'\n os.environ['SLURM_NPROCS'] = '24'\n os.environ['SLURM_NNODES'] = '2'\n os.environ['SLURM_CPUS_ON_NODE'] = '24'\n\n # Run component with desired configuration\n self.component._cfg = self.cfg_sample_1\n self.component._configure()\n\n # Verify configured correctly\n self.assertEqual(self.component.cores_per_node, 9001)\n self.assertEqual(self.component.gpus_per_node, 99)\n self.assertEqual(self.component.lfs_per_node['path'], \"not_comet/\")\n self.assertEqual(self.component.lfs_per_node['size'], 1000)\n self.assertEqual(self.component.lm_info['cores_per_node'], 9001)\n\n return", "def _run(args, base_dir, workflows_dir, config_path):\n if not os.path.exists(config_path):\n sys.stdout.write(\n f\"The config file: {config_path} does not exist.\\nProvide a path to the config file with \"\n f\"--configfile or if you do not have a config file run:\\n\"\n f\"seq2science init {args.workflow}\\n\"\n )\n os._exit(1) # noqa\n\n # parse the args\n parsed_args = {\n \"snakefile\": os.path.join(workflows_dir, args.workflow.replace(\"-\", \"_\"), \"Snakefile\"),\n \"use_conda\": True,\n \"conda_cleanup_pkgs\": \"cache\",\n \"conda_frontend\": \"mamba\",\n \"conda_prefix\": os.path.join(base_dir, \".snakemake\"),\n \"dryrun\": args.dryrun,\n \"printreason\": args.reason,\n \"keepgoing\": args.keep_going,\n \"unlock\": args.unlock,\n \"cleanup_metadata\": args.cleanup_metadata,\n \"force_incomplete\": args.rerun_incomplete,\n \"rerun_triggers\": [\"mtime\", \"input\", \"software-env\"] if not args.skip_rerun else [],\n }\n\n # get the additional snakemake options\n snakemake_options = args.snakemakeOptions if args.snakemakeOptions is not None else dict()\n snakemake_options.setdefault(\"config\", {}).update({\"rule_dir\": os.path.join(base_dir, \"rules\")})\n snakemake_options = snakemake_options | {\"scheduler\": \"greedy\"}\n snakemake_options[\"configfiles\"] = [config_path]\n for key, value in snakemake_options.items():\n if not isinstance(value, str):\n continue\n if value.lower() == \"true\":\n snakemake_options[key] = True\n if value.lower() == \"false\":\n snakemake_options[key] = False\n\n parsed_args.update(snakemake_options)\n\n # parse the profile\n if args.profile is not None:\n profile_file = snakemake.get_profile_file(args.profile, \"config.yaml\")\n if profile_file is None:\n subjectively_prettier_error(profile_arg, \"profile given but no config.yaml found.\")\n add_profile_args(profile_file, parsed_args)\n\n # cores\n if args.cores: # command-line interface\n parsed_args[\"cores\"] = args.cores\n elif parsed_args.get(\"cores\"): # profile\n parsed_args[\"cores\"] = int(parsed_args[\"cores\"])\n elif parsed_args[\"dryrun\"]:\n parsed_args[\"cores\"] = 999\n else:\n parsed_args[\"cores\"] = 0\n\n if parsed_args[\"cores\"] < 2 and not any(\n [parsed_args[\"unlock\"], parsed_args[\"cleanup_metadata\"], parsed_args[\"dryrun\"]]\n ):\n subjectively_prettier_error(core_arg, \"specify at least two cores.\")\n\n # when running on a cluster assume cores == nodes (just like snakemake does)\n if \"cluster\" in parsed_args and not \"nodes\" in parsed_args:\n parsed_args[\"nodes\"] = parsed_args[\"cores\"]\n\n # store how seq2science was called\n parsed_args[\"config\"][\"cli_call\"] = sys.argv\n\n parsed_args[\"config\"].update({\"cores\": parsed_args[\"cores\"]})\n resource_parser(parsed_args)\n\n # run snakemake/seq2science\n # 1. pretty welcome message\n setup_seq2science_logger(parsed_args, args.debug)\n log_welcome(logger, args.workflow)\n\n if args.debug:\n # dump the parsed args as readable json\n import json\n logger.debug(json.dumps(parsed_args, sort_keys=True, indent=2))\n\n if not args.skip_rerun or args.unlock or args.cleanup_metadata is not None:\n # 2. start a dryrun checking which files need to be created, and check if\n # any params changed, which means we have to remove those files and\n # continue from there\n logger.info(\n \"Checking if seq2science was run already, if something in the configuration was changed, and if so, if \"\n \"seq2science needs to re-run any jobs.\"\n )\n\n with seq2science.util.CaptureStdout() as targets, seq2science.util.CaptureStderr() as errors:\n exit_code = run_snakemake(\n args.workflow.replace(\"-\", \"_\"),\n **{\n **parsed_args,\n **{\n \"list_params_changes\": True,\n \"quiet\": False,\n \"log_handler\": lambda x: None, # don't show any of the logs\n \"keep_logger\": True,\n },\n }\n )\n if args.debug:\n nl = \"\\n\"\n logger.info(f\"\"\"Targets:\\n{nl.join(sorted(targets))}\\n\\n\"\"\")\n logger.info(f\"\"\"Errors:\\n{nl.join(sorted(errors))}\\n\\n\"\"\")\n\n if not exit_code:\n os._exit(1) # noqa\n\n # 3. check which files would need a rerun, and exclude files we do\n # not want to consider:\n # - genome files, since provider will change to local\n regex_patterns = [\n \"(\\/.+){2}.*\\.(fa(\\.fai|.sizes)?|gaps\\.bed)$\", # match genome files\n \"(\\/.+){2}.*\\.annotation\\.(bed|gtf)$\", # match gene annotations\n ]\n targets = [target for target in targets if not any(re.match(pattern, target) for pattern in regex_patterns)]\n\n # 4. if there are any targets left, force to recreate those targets plus the final results (rule seq2science)\n if len(targets):\n targets += [\"seq2science\"]\n parsed_args[\"forcerun\"] = targets\n parsed_args[\"targets\"] = targets\n parsed_args[\"forcetargets\"] = True\n parsed_args[\"keep_logger\"] = True\n logger.info(\"Done. Now starting the real run.\")\n\n logger.printreason = parsed_args[\"printreason\"]\n logger.stream_handler.setStream(sys.stdout)\n parsed_args[\"config\"][\"no_config_log\"] = True\n\n # 5. start the \"real\" run where jobs actually get started\n exit_code = run_snakemake(args.workflow.replace(\"-\", \"_\"), **parsed_args)\n\n # 6. output exit code 0 for success and 1 for failure\n os._exit(0) if exit_code else os._exit(1) # noqa", "def test_ale_workflow_cli_smoke(tmp_path_factory):\n tmpdir = tmp_path_factory.mktemp(\"test_ale_workflow_cli_smoke\")\n sleuth_file = op.join(get_test_data_path(), \"test_sleuth_file.txt\")\n prefix = \"test\"\n\n cli._main(\n [\n \"ale\",\n \"--output_dir\",\n str(tmpdir),\n \"--prefix\",\n prefix,\n \"--n_iters\",\n \"10\",\n \"--n_cores\",\n \"1\",\n sleuth_file,\n ]\n )\n assert op.isfile(op.join(tmpdir, f\"{prefix}_input_coordinates.txt\"))", "def main(config):\n command = config.workflow_utils.command\n try:\n subprocess.run(command, shell=True, check=True)\n except AttributeError as exp:\n # add in some backward compatibility for py2.7\n subprocess.check_call(command, shell=True)", "def workflow(base_dir, # base tool path\n use_cache=1, # whether to skip already executed runs (in cache) or not (1/0)\n ignore_git=0): # whether to ignore git version or not (1/0)\n\n # get some needed variables from config file\n runs = int(config['general']['runs'])\n workers = int(config['general']['workers'])\n\n batch_size = int(config['mtje']['batch_size'])\n epochs = int(config['mtje']['epochs'])\n use_malicious_labels = int(config['mtje']['use_malicious_labels'])\n use_count_labels = int(config['mtje']['use_count_labels'])\n gen_type = config['mtje']['gen_type']\n similarity_measure = config['mtje']['similarity_measure'].lower()\n net_type = 'mtje'\n\n training_n_samples = int(config['sorel20mDataset']['training_n_samples'])\n validation_n_samples = int(config['sorel20mDataset']['validation_n_samples'])\n test_n_samples = int(config['sorel20mDataset']['test_n_samples'])\n\n min_n_anchor_samples = int(config['freshDataset']['min_n_anchor_samples'])\n max_n_anchor_samples = int(config['freshDataset']['max_n_anchor_samples'])\n fresh_n_queries = int(config['freshDataset']['n_queries'])\n n_evaluations = int(config['freshDataset']['n_evaluations'])\n\n f_c_epochs = int(config['familyClassifier']['epochs'])\n f_c_train_split_proportion = int(config['familyClassifier']['train_split_proportion'])\n f_c_valid_split_proportion = int(config['familyClassifier']['valid_split_proportion'])\n f_c_test_split_proportion = int(config['familyClassifier']['test_split_proportion'])\n f_c_batch_size = int(config['familyClassifier']['batch_size'])\n\n c_l_epochs = int(config['contrastiveLearning']['epochs'])\n c_l_train_split_proportion = int(config['contrastiveLearning']['train_split_proportion'])\n c_l_valid_split_proportion = int(config['contrastiveLearning']['valid_split_proportion'])\n c_l_test_split_proportion = int(config['contrastiveLearning']['test_split_proportion'])\n c_l_batch_size = int(config['contrastiveLearning']['batch_size'])\n c_l_rank_size = int(config['contrastiveLearning']['rank_size'])\n c_l_knn_k_min = int(config['contrastiveLearning']['knn_k_min'])\n c_l_knn_k_max = int(config['contrastiveLearning']['knn_k_max'])\n\n # initialize Hash object\n ch = Hash()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items('sorel20mDataset'))))\n # get config file sha256 digest\n dataset_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items(net_type))))\n # get config file sha256 digest\n config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('freshDataset'))))\n # get config file sha256 digest\n fresh_dataset_config_sha = ch.get_b64()\n\n # create copy of the current config hash digest\n ch_copy = ch.copy()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('familyClassifier'))))\n # get config file sha256 digest\n family_class_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch_copy.update(json.dumps(dict(config.items('contrastiveLearning'))))\n # get config file sha256 digest\n contr_learn_config_sha = ch_copy.get_b64()\n\n # instantiate key-n_samples dict\n n_samples_dict = {'train': training_n_samples,\n 'validation': validation_n_samples,\n 'test': test_n_samples}\n\n # Note: The entrypoint names are defined in MLproject. The artifact directories\n # are documented by each step's .py file.\n\n # start mlflow run\n with mlflow.start_run() as active_run:\n # get code git commit version\n git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT)\n\n # log config file\n mlflow.log_text(json.dumps({s: dict(config.items(s)) for s in config.sections()}), 'config.txt')\n\n # set dataset destination dir\n dataset_dir = os.path.join(base_dir, 'dataset')\n # set dataset base path (directory containing 'meta.db')\n dataset_base_path = os.path.join(dataset_dir, '09-DEC-2020', 'processed-data')\n # set pre-processed dataset base path (directory containing .dat files)\n pre_processed_dataset_dir = os.path.join(dataset_dir, '09-DEC-2020', 'pre-processed_dataset')\n # set fresh dataset base path (directory containing .dat files)\n fresh_dataset_dir = os.path.join(dataset_dir, 'fresh_dataset')\n\n # if pre-processed dataset files for this run parameters are not present, generate them\n if not preproc_check_files(destination_dir=pre_processed_dataset_dir,\n n_samples_dict=n_samples_dict):\n logger.info(\"Pre-processed dataset not found.\")\n\n # if the original Sorel20M dataset is not present, download it\n if not download_check_files(dataset_dir):\n logger.info(\"Dataset not found.\")\n\n # run dataset downloader\n download_dataset_run = run(\"download_dataset\", {\n 'destination_dir': dataset_dir\n }, config_sha=dataset_config_sha)\n\n # pre-process dataset\n preprocess_dataset_run = run(\"preprocess_dataset\", {\n 'ds_path': dataset_base_path,\n 'destination_dir': pre_processed_dataset_dir,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'test_n_samples': test_n_samples,\n 'batch_size': batch_size,\n 'remove_missing_features': str(os.path.join(dataset_base_path, \"shas_missing_ember_features.json\"))\n }, config_sha=dataset_config_sha)\n\n # if the fresh dataset is not present, generate it\n if not fresh_check_files(fresh_dataset_dir):\n logger.info(\"Fresh dataset not found.\")\n\n # generate fresh dataset\n build_fresh_dataset_run = run(\"build_fresh_dataset\", {\n 'dataset_dest_dir': fresh_dataset_dir\n }, config_sha=fresh_dataset_config_sha)\n\n # initialize results files dicts\n results_files = {}\n c_l_results_files = {}\n\n # instantiate common (between consecutive training runs) training parameters\n common_training_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'epochs': epochs,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'use_malicious_labels': use_malicious_labels,\n 'use_count_labels': use_count_labels,\n 'workers': workers\n }\n\n # instantiate common (between consecutive training runs) evaluation parameters\n common_evaluation_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'test_n_samples': test_n_samples,\n 'evaluate_malware': use_malicious_labels,\n 'evaluate_count': use_count_labels\n }\n\n # for each training run\n for training_run_id in range(runs):\n logger.info(\"initiating training run n. {}\".format(str(training_run_id)))\n\n # -- Model Training and Evaluation Steps -------------------------------------------------------------------\n # set training parameters\n training_params = common_training_params\n training_params.update({'training_run': training_run_id})\n\n # train network (get or run) on Sorel20M dataset\n training_run = get_or_run(\"train_network\",\n training_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n resume=True,\n config_sha=config_sha)\n\n # get model checkpoints path\n checkpoint_path = parse.unquote(parse.urlparse(os.path.join(training_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n checkpoint_file = os.path.join(checkpoint_path, \"epoch_{}.pt\".format(epochs))\n\n # set evaluation parameters\n evaluation_params = common_evaluation_params\n evaluation_params.update({'checkpoint_file': checkpoint_file})\n\n # evaluate model against Sorel20M dataset\n evaluation_run = get_or_run(\"evaluate_network\",\n evaluation_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n config_sha=config_sha)\n\n # get model evaluation results path\n results_path = parse.unquote(parse.urlparse(os.path.join(evaluation_run.info.artifact_uri,\n \"model_results\")).path)\n\n # set model evaluation results filename\n results_file = os.path.join(results_path, \"results.csv\")\n\n # add file path to results_files dictionary (used for plotting mean results)\n results_files[\"run_id_\" + str(training_run_id)] = results_file\n\n # compute (and plot) all tagging results\n all_tagging_results_run = get_or_run(\"compute_all_run_results\", {\n 'results_file': results_file,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Model Evaluation using Fresh Dataset Steps ------------------------------------------------------------\n # evaluate model against fresh dataset\n fresh_evaluation_run = get_or_run(\"evaluate_fresh\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'min_n_anchor_samples': min_n_anchor_samples,\n 'max_n_anchor_samples': max_n_anchor_samples,\n 'n_query_samples': fresh_n_queries,\n 'n_evaluations': n_evaluations\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n\n # get model evaluation results path\n fresh_results_path = parse.unquote(parse.urlparse(os.path.join(fresh_evaluation_run.info.artifact_uri,\n \"fresh_prediction_results\")).path)\n\n # set model evaluation results filename\n fresh_results_file = os.path.join(fresh_results_path, \"fresh_prediction_results.json\")\n\n # compute (and plot) all family prediction results (on fresh dataset)\n all_tagging_results_run = get_or_run(\"compute_all_run_fresh_results\", {\n 'results_file': fresh_results_file\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Family Classifier Steps -------------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n f_c_train_run = get_or_run(\"train_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': f_c_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model checkpoints path\n f_c_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(f_c_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n f_c_checkpoint_file = os.path.join(f_c_checkpoint_path, \"epoch_{}.pt\".format(f_c_epochs))\n\n # evaluate model against fresh dataset\n f_c_eval_run = get_or_run(\"evaluate_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': f_c_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model evaluation results path\n f_c_results_path = parse.unquote(parse.urlparse(os.path.join(f_c_eval_run.info.artifact_uri,\n \"family_class_results\")).path)\n\n # set model evaluation results filename\n f_c_results_file = os.path.join(f_c_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n f_c_compute_results_run = get_or_run(\"compute_all_family_class_results\", {\n 'results_file': f_c_results_file,\n 'fresh_ds_path': fresh_dataset_dir\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Contrastive Learning Steps ----------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n c_l_train_run = get_or_run(\"train_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': c_l_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model checkpoints path\n c_l_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(c_l_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n c_l_checkpoint_file = os.path.join(c_l_checkpoint_path, \"epoch_{}.pt\".format(c_l_epochs))\n\n # evaluate model against fresh dataset\n c_l_eval_run = get_or_run(\"evaluate_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': c_l_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size,\n 'rank_size': c_l_rank_size,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_results_path = parse.unquote(parse.urlparse(os.path.join(c_l_eval_run.info.artifact_uri,\n \"contrastive_learning_results\")).path)\n\n # set model evaluation results filename\n c_l_results_file = os.path.join(c_l_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n c_l_compute_results_run = get_or_run(\"compute_contrastive_learning_results\", {\n 'results_file': c_l_results_file,\n 'fresh_ds_path': fresh_dataset_dir,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_scores_dir_path = parse.unquote(parse.urlparse(os.path.join(c_l_compute_results_run.info.artifact_uri,\n \"contrastive_learning_scores\")).path)\n\n # add dir path to c_l_results_files dictionary (used for plotting mean score trends)\n c_l_results_files[\"run_id_\" + str(training_run_id)] = c_l_scores_dir_path\n # ----------------------------------------------------------------------------------------------------------\n\n # create temp dir name using the value from config_sha (sha of some parts of the config file).\n # -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n tempdir = os.path.join(base_dir, 'tmp_{}'.format(config_sha))\n # create temp dir\n os.makedirs(tempdir, exist_ok=True)\n\n # create contrastive learning temp dir name using the value from config_sha (sha of some parts of the config\n # file). -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n c_l_tempdir = os.path.join(base_dir, 'tmp_{}'.format(contr_learn_config_sha))\n # create temp dir\n os.makedirs(c_l_tempdir, exist_ok=True)\n\n # set run-to-filename file path\n run_to_filename = os.path.join(tempdir, \"results.json\")\n\n # create and open the results.json file in write mode\n with open(run_to_filename, \"w\") as output_file:\n # save results_files dictionary as a json file\n json.dump(results_files, output_file)\n\n mlflow.log_artifact(run_to_filename, \"run_to_filename\")\n\n # set run-to-filename file path\n c_l_run_to_filename = os.path.join(c_l_tempdir, \"c_l_results.json\")\n\n # create and open the c_l_results.json file in write mode\n with open(c_l_run_to_filename, \"w\") as output_file:\n # save c_l_results_files dictionary as a json file\n json.dump(c_l_results_files, output_file)\n\n mlflow.log_artifact(c_l_run_to_filename, \"run_to_filename\")\n\n # if there is more than 1 run, compute also per-tag mean results\n if runs > 1:\n # plot all roc distributions\n per_tag_plot_runs = get_or_run(\"plot_all_roc_distributions\", {\n 'run_to_filename_json': run_to_filename,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n\n # plot all model mean scores trends\n plot_all_scores_trends = get_or_run(\"plot_all_contrastive_scores_trends\", {\n 'run_to_filename_json': c_l_run_to_filename,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # remove temp files and temporary directory\n os.remove(run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(tempdir)\n\n # remove contrastive learning temp files and temporary directory\n os.remove(c_l_run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(c_l_tempdir)", "def run_scenario(settings, params, curr_exp, n_exp):\n try:\n start_time = time.time()\n proc_name = mp.current_process().name\n logger = logging.getLogger('runner-%s' % proc_name)\n\n # Get list of metrics required\n metrics = settings.DATA_COLLECTORS\n\n # Copy parameters so that they can be manipulated\n tree = copy.deepcopy(params)\n\n # Set topology\n topology_spec = tree['topology']\n topology_name = topology_spec.pop('name')\n if topology_name not in TOPOLOGY_FACTORY:\n logger.error('No topology factory implementation for %s was found.'\n % topology_name)\n return None\n topology = TOPOLOGY_FACTORY[topology_name](**topology_spec)\n\n workload_spec = tree['workload']\n workload_name = workload_spec.pop('name')\n if workload_name not in WORKLOAD:\n logger.error('No workload implementation named %s was found.'\n % workload_name)\n return None\n workload = WORKLOAD[workload_name](topology, **workload_spec)\n\n # Assign computation to nodes\n if 'computation_placement' in tree:\n computationpl_spec = tree['computation_placement']\n computationpl_name = computationpl_spec.pop('name')\n if computationpl_name not in COMPUTATION_PLACEMENT:\n logger.error('No computation placement named %s was found.'\n % computationpl_name)\n return None\n COMPUTATION_PLACEMENT[computationpl_name](topology, **computationpl_spec)\n\n # Assign caches to nodes\n if 'cache_placement' in tree:\n cachepl_spec = tree['cache_placement']\n cachepl_name = cachepl_spec.pop('name')\n if cachepl_name not in CACHE_PLACEMENT:\n logger.error('No cache placement named %s was found.'\n % cachepl_name)\n return None\n network_cache = cachepl_spec.pop('network_cache')\n # Cache budget is the cumulative number of cache entries across\n # the whole network\n cachepl_spec['cache_budget'] = workload.n_contents * network_cache\n \n # Onur: need the full budget to assign to receivers for SIT cache placement\n cachepl_spec['n_contents'] = workload.n_contents\n # NOTE: cache placement is now done together with comp. spot placement!\n #CACHE_PLACEMENT[cachepl_name](topology, **cachepl_spec)\n\n # Assign contents to sources\n # If there are many contents, after doing this, performing operations\n # requiring a topology deep copy, i.e. to_directed/undirected, will\n # take long.\n contpl_spec = tree['content_placement']\n contpl_name = contpl_spec.pop('name')\n if contpl_name not in CONTENT_PLACEMENT:\n logger.error('No content placement implementation named %s was found.'\n % contpl_name)\n return None\n CONTENT_PLACEMENT[contpl_name](topology, workload.contents, **contpl_spec)\n\n # caching and routing strategy definition\n strategy = tree['strategy']\n warmup_strategy = tree['warmup_strategy']\n if strategy['name'] not in STRATEGY:\n logger.error('No implementation of strategy %s was found.' % strategy['name'])\n return None\n if warmup_strategy['name'] not in STRATEGY:\n logger.error('No implementation of warm-up strategy %s was found.' % warmup_strategy['name'])\n return None \n\n # cache eviction policy definition\n cache_policy = tree['cache_policy']\n if cache_policy['name'] not in CACHE_POLICY:\n logger.error('No implementation of cache policy %s was found.' % cache_policy['name'])\n return None\n\n # task scheduling policy at the computation spots\n sched_policy = tree['sched_policy']\n\n # Configuration parameters of network model\n netconf = tree['netconf']\n\n # Text description of the scenario run to print on screen\n scenario = tree['desc'] if 'desc' in tree else \"Description N/A\"\n\n logger.info('Experiment %d/%d | Preparing scenario: %s', curr_exp, n_exp, scenario)\n\n if any(m not in DATA_COLLECTOR for m in metrics):\n logger.error('There are no implementations for at least one data collector specified')\n return None\n\n collectors = {m: {} for m in metrics}\n\n logger.info('Experiment %d/%d | Start simulation', curr_exp, n_exp)\n results = exec_experiment(topology, workload, netconf, strategy, cache_policy, collectors, warmup_strategy, sched_policy)\n\n duration = time.time() - start_time\n logger.info('Experiment %d/%d | End simulation | Duration %s.',\n curr_exp, n_exp, timestr(duration, True))\n return (params, results, duration)\n except KeyboardInterrupt:\n logger.error('Received keyboard interrupt. Terminating')\n sys.exit(-signal.SIGINT)\n except Exception as e:\n err_type = str(type(e)).split(\"'\")[1].split(\".\")[1]\n err_message = e.message\n logger.error('Experiment %d/%d | Failed | %s: %s\\n%s',\n curr_exp, n_exp, err_type, err_message,\n traceback.format_exc())", "def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)", "def worker(lconf):\n try:\n prepare_and_run(lconf)\n with OUTPUT_LOCK:\n print '[SUCCEEDED]', lconf\n except Exception as e:\n exc_buffer = StringIO()\n traceback.print_exc(file=exc_buffer)\n logging.error('Uncaught exception in worker process:\\n%s',\n exc_buffer.getvalue())\n raise e", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def test_relaunch_deployment_run(self):\n pass", "def test_local_env_pass_implicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_implicit\"\n os.environ[env_key] = exp_value\n\n test_dir = fileutils.make_test_dir()\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n # NOTE: not passing env_args into run_settings here, relying on --export=ALL default\n settings = RunSettings(exe_name, exe_args, run_command=\"srun\", run_args=run_args)\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key not in settings.env_vars\n os.environ.pop(env_key)\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def experiment(**config):\n from ..training.train import training\n \n training(config)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def runner_setup():\n concurrent_sessions = 5\n runner = VisualGridRunner(concurrent_sessions)\n yield runner", "def test_base(self):\n self.render_config_template(\n redis=True,\n redis_host=os.getenv('REDIS_HOST')\n )\n\n proc = self.start_beat()\n self.wait_until(\n lambda: self.output_has(lines=1)\n )\n\n exit_code = proc.kill_and_wait()\n assert exit_code == 0", "async def test_script_main(config, mocker, monkeypatch, path_map_mock):\n for key in config.keys():\n monkeypatch.setenv(key, config[key])\n mock_event_loop = mocker.patch(\"asyncio.get_event_loop\")\n mock_root_logger = mocker.patch(\"logging.getLogger\")\n mock_status_loop = mocker.patch(\"lta.unpacker.status_loop\")\n mock_work_loop = mocker.patch(\"lta.unpacker.work_loop\")\n main()\n mock_event_loop.assert_called()\n mock_root_logger.assert_called()\n mock_status_loop.assert_called()\n mock_work_loop.assert_called()", "def setup(self) -> None:\n mlflow.set_tracking_uri('file://' + hutils.get_original_cwd() + '/mlruns')\n if self.log_mlflow:\n mlflow.set_experiment(self.config.runner.exp_name)\n \n if self.log_mlflow:\n self.log_parameters(self.config)\n mlflow.log_param('node', os.uname()[1])", "def _setup_environment_and_configs(args, appengine_path):\n clusterfuzz_dir = os.path.abspath(os.path.join(args.directory, 'clusterfuzz'))\n\n # Matches startup scripts.\n os.environ['PYTHONPATH'] = ':'.join([\n os.getenv('PYTHONPATH', ''),\n appengine_path,\n os.path.join(clusterfuzz_dir, 'src'),\n ])\n\n os.environ['ROOT_DIR'] = clusterfuzz_dir\n if not os.getenv('BOT_NAME'):\n os.environ['BOT_NAME'] = args.name\n\n os.environ['LD_LIBRARY_PATH'] = '{0}:{1}'.format(\n os.path.join(clusterfuzz_dir, 'src', 'clusterfuzz', '_internal',\n 'scripts'), os.getenv('LD_LIBRARY_PATH', ''))\n\n tmpdir = os.path.join(clusterfuzz_dir, 'bot_tmpdir')\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n os.environ['TMPDIR'] = tmpdir\n os.environ['BOT_TMPDIR'] = tmpdir\n\n os.environ['KILL_STALE_INSTANCES'] = 'False'\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n\n if not os.getenv('UNTRUSTED_WORKER'):\n local_gcs_buckets_path = os.path.abspath(\n os.path.join(args.server_storage_path, 'local_gcs'))\n assert os.path.exists(local_gcs_buckets_path), (\n 'Server storage path not found, make sure to start run_server with '\n 'the same storage path.')\n\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = local_gcs_buckets_path\n\n if args.android_serial:\n if not os.getenv('OS_OVERRIDE'):\n os.environ['OS_OVERRIDE'] = 'ANDROID'\n\n os.environ['ANDROID_SERIAL'] = args.android_serial", "def test_execute_deployment(self):\n pass", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")" ]
[ "0.6706494", "0.6451912", "0.62901366", "0.6208054", "0.6111138", "0.59998226", "0.59990007", "0.59590906", "0.5952131", "0.5922806", "0.5853597", "0.579472", "0.57890517", "0.57299614", "0.5724651", "0.5720135", "0.57154197", "0.57125825", "0.5700395", "0.5678336", "0.5675108", "0.5670209", "0.566518", "0.5664975", "0.5656063", "0.5654081", "0.5638076", "0.56345034", "0.56298196", "0.5617696", "0.560728", "0.55967987", "0.55782944", "0.5539412", "0.5529398", "0.5518406", "0.5505211", "0.549633", "0.54794437", "0.54769874", "0.54723054", "0.5471996", "0.54676116", "0.54638535", "0.5461949", "0.5460987", "0.5460051", "0.5459727", "0.5459203", "0.54504925", "0.5444799", "0.5436641", "0.54340464", "0.54112566", "0.54111105", "0.541038", "0.5399934", "0.53743774", "0.53704834", "0.5367358", "0.5365268", "0.5361397", "0.53570133", "0.5352458", "0.5345613", "0.53416157", "0.53347856", "0.5334358", "0.5333161", "0.5332061", "0.5330198", "0.5329122", "0.532567", "0.53247577", "0.53216255", "0.53209835", "0.5317115", "0.5313542", "0.53123593", "0.5311396", "0.5305324", "0.53027934", "0.5298557", "0.52898216", "0.52879965", "0.5286808", "0.5282901", "0.5275211", "0.5275211", "0.527297", "0.5269263", "0.5266645", "0.5265484", "0.52515286", "0.52509874", "0.52440995", "0.523997", "0.5239501", "0.5237325", "0.523688" ]
0.70054567
0
You can provide an initialization script for each worker to call before the workflow starts. The most common usecase for such a script is to launch a local dvid server on each worker (for posting in parallel to the cloud). We provide the necessary script for local dvid workers outofthebox, in scripts/workerdvid. This test verifies that it works.
def test_worker_dvid_initialization(): repo_dir = Path(flyemflows.__file__).parent.parent template_dir = tempfile.mkdtemp(suffix="test-worker-dvid") # Copy worker script/config into the template shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml', f'{template_dir}/dvid.toml') shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh', f'{template_dir}/launch-worker-dvid.sh') config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "worker-initialization": { "script-path": "launch-worker-dvid.sh", "only-once-per-machine": True, "script-args": ["_TEST_SCRIPT_FAKE_ARG_"], # This is just here to make it easy to identify the process "launch-delay": 1.0 } } with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) def is_worker_dvid_running(): return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0 @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert is_worker_dvid_running(), f"Worker DVID is not running. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert not is_worker_dvid_running(), \ ("Worker DVID remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_worker_initialization(setup_worker_initialization_template):\n template_dir, _config, once_per_machine = setup_worker_initialization_template\n \n num_workers = 2\n if once_per_machine or CLUSTER_TYPE in (\"synchronous\", \"processes\"):\n expected_script_count = 1\n else:\n expected_script_count = num_workers\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n assert script_count > 0, f\"Worker script is not running. Check logs in:\\n{script_dir}\"\n assert script_count <= expected_script_count, f\"Worker script started too many times. Check logs in:\\n{script_dir}\"\n assert script_count == expected_script_count, f\"Worker script not started on all workers. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n\n assert script_count == 0, \\\n (\"Worker script(s) remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def setup_worker_initialization_template(request):\n once_per_machine = request.param\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-initialization\")\n\n worker_script = f\"{template_dir}/do-nothing.sh\"\n with open(worker_script, 'w') as f:\n f.write(\"#!/bin/bash\\n\")\n f.write(\"sleep 10\")\n os.chmod(worker_script, 0o777)\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"do-nothing.sh\",\n \"only-once-per-machine\": once_per_machine,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n return template_dir, config, once_per_machine", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def init_worker(*shared_args_list):\n global SHARED_ARGS\n SHARED_ARGS = shared_args_list", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def worker_init_fn(worker_id: int) -> None:\n worker_info = torch.utils.data.get_worker_info()\n set_rnd(worker_info.dataset, seed=worker_info.seed) # type: ignore[union-attr]", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def worker_init_fn(self, worker_id: int) -> None:\n np.random.seed(np.random.get_state()[1][0] + worker_id + random.randint(1, 1000))\n\n worker_info = torch.utils.data.get_worker_info()\n worker_info.dataset.set_worker_id(worker_id)\n worker_info.dataset.examples, shard_stats = self.get_worker_shard(\n worker_info.dataset.examples, worker_info.num_workers, worker_id\n )\n worker_info.dataset.logger.info(\n f\"Stats for shard created for worker {worker_id}: \\n {shard_stats}\"\n )\n worker_info.dataset.create_language_index_mapping()", "def _init_workloads(self):\n php_cgi = '/usr/bin/php-cgi'\n workloads = []\n\n # Workloads served with node.js\n workloads.append(nodejs.NodeJS('yahvp', 'Yet Another HTML5 Video Player',\n [Arg('--port', '7000')]))\n\n return workloads", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def test_setup_sync(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper.setup(), None)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def main():\n rclpy.init()\n\n worker_id = int(sys.argv[1])\n policy_type = sys.argv[2]\n node = WorkerSync(worker_id, 'worker_node', policy_type)\n\n try:\n executor = MultiThreadedExecutor()\n steps = 0\n\n while rclpy.ok():\n if node.flag.pull:\n node.pull(executor)\n\n elif node.flag.collect:\n steps = node.collect()\n\n elif node.flag.compute:\n node.compute(steps)\n\n elif node.flag.push:\n experiment_complete = node.push(executor)\n node.upkeep()\n\n # End experiment if passed number of max episodes.\n if experiment_complete:\n node.test(100)\n break\n\n except KeyboardInterrupt:\n pass\n\n # Destroy the node explicitly\n node.destroy_node()\n rclpy.shutdown()", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def init_processes(rank, run_id, hosts, backend='gloo'):\n hosts = hosts.split(',')\n os.environ['MASTER_ADDR'] = hosts[0] # first worker is the master worker\n os.environ['MASTER_PORT'] = '29500'\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['RANK'] = str(rank)\n dist.init_process_group(backend, rank=rank, world_size=world_size)\n run(rank, world_size, run_id)", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def init_distributed(backend, world_size, rank, checkpoint_dir):\n # multi-gpu initial\n logger.debug(f'Initializing {world_size} workers')\n # Remove the init file from previous version\n init_dir = checkpoint_dir / 'shared_distributed'\n if init_dir.is_file():\n rm_file(init_dir)\n\n init_dir.mkdir(parents=True, exist_ok=True)\n init_file = init_dir / f'slurm-{slurm.job_id}'\n init_method = init_file.resolve().as_uri()\n dist.init_process_group(backend, world_size=world_size, rank=rank, init_method=init_method)\n logger.debug('Init finished')", "def _problem_run_experiments_initialise(self):\n pass", "def SetUp(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('--task-hash')\n args, _ = parser.parse_known_args()\n\n self.task = self.CreateNewTask(\n isolated_hash=args.task_hash,\n dimensions={'os': 'Ubuntu-14.04'},\n idle_timeout_secs=90, connection_timeout_secs=90,\n verbosity=logging.DEBUG)\n self.task.Create()\n self.task.WaitForConnection()", "def test_training():\n assert init_engine('train', [\"config=first_run_test/default.yaml\"]).run() is None", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def main() -> None:\n worker = Worker()\n worker.do_work()", "def main(args=sys.argv):\n try:\n # Set up logging.\n logging.basicConfig(level=logging.WARN)\n work_dir = args[1]\n assert os.path.exists(work_dir), \"First argument to lsf_runner.py must be a directory that exists\"\n do_work_on_compute_node(work_dir)\n except Exception as exc:\n # Dump encoded data that we will try to fetch using mechanize\n print(exc)\n raise", "def setUp(self):\n # obtain separate logs\n self.update_log_file_names()\n # Start the servers and agents\n super(MdtestBase, self).setUp()\n\n # Get the parameters for Mdtest\n self.mdtest_cmd = MdtestCommand()\n self.mdtest_cmd.get_params(self)\n self.processes = self.params.get(\"np\", '/run/mdtest/client_processes/*')\n self.manager = self.params.get(\"manager\", '/run/mdtest/*', \"MPICH\")", "def setup():\n global server, app\n \n galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host )\n galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', default_galaxy_test_port )\n \n start_server = 'GALAXY_TEST_EXTERNAL' not in os.environ \n \n if start_server:\n \n tempdir = tempfile.mkdtemp()\n file_path = os.path.join( tempdir, 'database', 'files' )\n os.makedirs( file_path )\n if 'GALAXY_TEST_DBURI' in os.environ:\n database_connection = os.environ['GALAXY_TEST_DBURI']\n else:\n database_connection = 'sqlite:///' + os.path.join( tempdir, 'database', 'universe.sqlite' )\n \n app = UniverseApplication( job_queue_workers = 5,\n template_path = \"templates\",\n database_connection = database_connection,\n file_path = file_path,\n tool_config_file = \"tool_conf.xml\",\n tool_path = \"tools\",\n test_conf = \"test.conf\",\n log_destination = \"stdout\",\n use_heartbeat=True )\n \n log.info( \"Embedded Universe application started\" )\n\n webapp = universe_wsgi.app_factory( dict(),\n use_translogger = False,\n app=app )\n\n server = galaxy.web.server.serve( webapp, dict(), \n host=galaxy_test_host, \n port=galaxy_test_port, \n start_loop=False )\n \n atexit.register( teardown )\n \n import threading\n t = threading.Thread( target=server.serve_forever )\n t.start()\n\n time.sleep( 2 )\n \n log.info( \"Embedded web server started\" )\n \n if app:\n # TODO: provisions for loading toolbox from file when using external server\n import test_toolbox\n test_toolbox.toolbox = app.toolbox\n else:\n from galaxy import tools\n import test_toolbox\n test_toolbox.toolbox = tools.ToolBox( 'tool_conf.xml', 'tools' )\n \n # Test if the server is up\n import httplib\n conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port )\n conn.request( \"GET\", \"/\" )\n assert conn.getresponse().status == 200, \"Test HTTP server did not return '200 OK'\"\n \n os.environ['GALAXY_TEST_HOST'] = galaxy_test_host\n os.environ['GALAXY_TEST_PORT'] = galaxy_test_port\n os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir", "def _init_remote():\r\n require('path', provided_by = [staging])\r\n\r\n create_project_dir()\r\n deploy_nosyncdb()\r\n create_virtualenv()\r\n install_requirements()\r\n create_db()\r\n create_secret_settings()\r\n syncdb()\r\n createsuperuser()\r\n install_site()\r\n reload()", "def simple_worker_loop() -> None:\n print('\\nSimple worker loop tutorial', flush=True)\n\n # the first thing to do at the start of any experiment is to initialize a few global parameters\n # these parameters are shared across the entire repo\n ps.init_globals(\n seed=0, # if None, the experiment is not seeded and would initialized differently each time\n registry=None, # if None, a registry is created and used\n # a registry does bookkeeping of all people and locations used in the experiment\n )\n\n # init locations\n home = ps.env.Home()\n work = ps.env.Office() # any subclass of BusinessLocation can be a workplace, e.g. Bar, Restaurant, Hospital, etc.\n\n # init a worker\n person = ps.env.Worker(\n person_id=ps.env.PersonID('worker', age=35), # person_id is a unique id for this person\n home=home.id, # specify the home_id that person is assigned to\n work=work.id, # specify the id of the person's workplace\n )\n\n # Init simulator\n sim = ps.env.PandemicSim(\n locations=[work, home], # a list of all locations\n persons=[person] # a list of all persons\n )\n # PandemicSim by default creates and uses randomized testing and an SEIR infection model\n\n # Iterate through steps in the simulator, where each step advances an hour\n for _ in trange(24, desc='Simulating hour'):\n sim.step()\n\n # Or iterate by advancing in days by calling step_day in the simulator\n for _ in trange(10, desc='Simulating day'):\n sim.step_day()\n\n # The above loop iterates the simulator with no movement restrictions\n # To impose restrictions, for example, Stage-2 of austin_regulations\n sim.impose_regulation(ps.sh.austin_regulations[2])\n\n # Calling step_day now will run the simulator under Stage-2 regulation\n for _ in trange(10, desc='Simulating day (Under Stage-2)'):\n sim.step_day()", "def run_experiment():\n pass", "def init(): \n\tset_verbosity()\n\t_set_threads()\n\t_set_heartbeat()\n\t#_set_storage()\n\t\n\tinit_targets()\n\t\n\tsend_heartbeat(start=True)\n\t\n\tinfo_msg = \"init plugin script\"\n\tlogger.info(info_msg)\n\n\tinit_plugin()\n\n\tinfo_msg = \"loaded %s plugin(s)\" %(len(kb.plugins.handle))\n\tlogger.info(info_msg)", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def init_default_workload(self, pkg, args_file=None, tests_dir=None):\n self.workload = WorkLoad()\n config = self.__load_config_file()\n max_tests_per_app = self.get_config(\"tests_per_app\", 20)\n for i in range(0, max_tests_per_app):\n wk = DroidBotWorkUnit(self.executable_prefix)\n wk.config(id=None, **config)\n self.workload.add_unit(wk)", "def test_pre_cli_init(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-i\", \"test\"))\n assert \"test\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-i\", \":test\"))\n assert \"does not exist\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def main_tester():\n create_tester_paths()\n _logger.info(' -- tester init done setting up paths and db file.')", "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def init(number_of_workers=0):\n global _wq, _use_workers\n\n if number_of_workers:\n _use_workers = number_of_workers\n else:\n _use_workers = benchmark_workers()\n\n # if it is best to use zero workers, then use that.\n _wq = WorkerQueue(_use_workers)", "def init_workflow():\n pass", "def _setup_test_infra(world_rank, world_size):\n os.environ['RANK'] = str(world_rank)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n\n set_cuda_device_id(world_rank)\n\n dist.init_process_group(backend='nccl', world_size=world_size, rank=world_rank)", "def test_workers(self):\n wr = WorkflowRuner(4)\n try:\n wr.init_workers()\n assert wr.workers_available() == 4\n wr.acquire_worker()\n assert wr.workers_available() == 3\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert wr.active_workers()\n wr.acquire_worker()\n assert not wr.active_workers()\n wr.release_worker()\n assert wr.active_workers()\n wr.release_worker()\n assert wr.workers_available() == 2\n wr.terminate_workers_and_clean_subprocesses()\n except:\n wr.terminate_workers_and_clean_subprocesses()", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def setup(self, args={}):\n\n return Status.RUN", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def setUp(self):\n self.wes_server_process = subprocess.Popen(\n 'python {}'.format(os.path.abspath('wes_service/wes_service_main.py')),\n shell=True)\n time.sleep(5)", "def recognition_system__initialize_workers(opts, dictionary):\n global recognition_system__worker_cache\n recognition_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary}", "def _initialise_run(self) -> None:", "def initialize():\n\n with settings(prompts={'Password: ': 'test', 'Password (again): ': 'test'}):\n for user, group in USER_GROUPS:\n sudo(\"useradd %s -G %s,minv -g minv -N || true\" % (user, group))\n sudo(\"chmod g+rwx /home/%s\" % user)\n sudo('minv_ createuser %s -g %s' % (user, group), user=\"minv\")\n\n # upload script to create collections\n put(\n join(env.testdata_path, \"scripts/initial_collections.sh\"),\n \"\", mode=0755\n )\n sudo(\"cp initial_collections.sh /home/minv-app-administrator/\")\n\n # upload collection configs\n for conf in glob(join(env.testdata_path, \"configurations/*.conf\")):\n put(conf, \"\", mode=0444, use_sudo=True)\n sudo(\"cp %s /home/minv-app-administrator/\" % basename(conf))\n\n with cd(\"/home/minv-app-administrator/\"):\n sudo(\"chmod a+rx . *\")\n sudo(\n \"sh -l ./initial_collections.sh\",\n user=\"minv-app-administrator\"\n )", "def setup_test_wf(s3_prefix, paths_list, test_name, workdirs_to_keep=None):\n\n import os\n import shutil\n from CPAC.pipeline import nipype_pipeline_engine as pe\n from CPAC.utils.datasource import check_for_s3\n from CPAC.utils.interfaces.datasink import DataSink\n\n test_dir = os.path.join(os.getcwd(), test_name)\n work_dir = os.path.join(test_dir, \"workdir\")\n out_dir = os.path.join(test_dir, \"output\")\n\n if os.path.exists(out_dir):\n try:\n shutil.rmtree(out_dir)\n except:\n pass\n\n if os.path.exists(work_dir):\n for dirname in os.listdir(work_dir):\n if workdirs_to_keep:\n for keepdir in workdirs_to_keep:\n print(\"{0} --- {1}\\n\".format(dirname, keepdir))\n if keepdir in dirname:\n continue\n try:\n shutil.rmtree(os.path.join(work_dir, dirname))\n except:\n pass\n\n local_paths = {}\n for subpath in paths_list:\n s3_path = os.path.join(s3_prefix, subpath)\n local_path = check_for_s3(s3_path, dl_dir=test_dir)\n local_paths[subpath] = local_path\n\n wf = pe.Workflow(name=test_name)\n wf.base_dir = os.path.join(work_dir)\n wf.config['execution'] = {\n 'hash_method': 'timestamp',\n 'crashdump_dir': os.path.abspath(test_dir)\n }\n\n ds = pe.Node(DataSink(), name='sinker_{0}'.format(test_name))\n ds.inputs.base_directory = out_dir\n ds.inputs.parameterization = True\n\n return (wf, ds, local_paths)", "def init():\r\n if not env.hosts:\r\n _init_local()\r\n else:\r\n _init_remote()", "def launch(\n key_name: str,\n size: int,\n master_type: str,\n worker_type: str,\n image_id: str,\n owner: str,\n bucket_name: str,\n worker_command: str,\n config: str,\n cluster_name: Optional[str],\n workers_per_machine: int\n):\n\n if cluster_name is None:\n # credit for the words_alpha.txt file https://github.com/dwyl/english-words\n cluster_name = random.choice([word for word in open(\"words_alpha.txt\")])[:-1]\n storage_name = cluster_name + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # name of the file storage on s3\n head_tags, worker_tags = get_tags(owner, cluster_name, storage_name) # tags for head and workers\n\n print(f'Launching cluster named ------------ {cluster_name} --------------------- (storage_name: {storage_name})')\n print(f'---------------------------------------------------------------------------------------------------')\n\n ec2 = boto3.resource(\"ec2\")\n as_client = boto3.client('autoscaling')\n\n # compress and upload the source code to the s3\n repo_name = _compress_folder()\n filename = str(pathlib.Path.cwd().parent / TAR_NAME)\n print(f'Uploading {filename} to {storage_name}')\n up(bucket_name, storage_name, filename)\n # down(bucket_name, storage_name, filename) # just to check file available\n print(f'Upload finished')\n\n download_untar = f'rm -f /home/ubuntu/{TAR_NAME} && ' \\\n f'aws s3 cp s3://{bucket_name}/{storage_name} /home/ubuntu/{TAR_NAME} && ' + \\\n f'rm -rf /home/ubuntu/{repo_name} && ' + \\\n f'mkdir /home/ubuntu/{repo_name} && ' + \\\n f'tar -xvf /home/ubuntu/{TAR_NAME} -C /home/ubuntu/'\n\n head_command = 'python -u es/experiment.py with ' + config + ' local=False'\n master_script = make_master_script(download_untar, make_master_run_script(head_command), repo_name)\n\n print(f'master will run this: -------\\n{master_script}\\n--------------')\n\n master_instance = ec2.create_instances(\n ImageId=image_id,\n KeyName=key_name,\n InstanceType=master_type,\n MinCount=1,\n MaxCount=1,\n SecurityGroupIds=[DEFAULT_SECURITY_GROUP],\n UserData=master_script,\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n TagSpecifications=[{'ResourceType': 'instance', 'Tags': head_tags}],\n IamInstanceProfile={'Name': 'redis_cluster_code_access'},\n # EbsOptimized=True,\n # Tags=head_tags\n )[0]\n\n master_ip = master_instance.private_ip_address\n\n print(f'Master launched, IP is: {master_ip}')\n scaling_client = boto3.client(\"autoscaling\")\n\n # try deleting the auto-scaling group and launch configuration of given name (should be done in the manage/kill)\n try:\n _ = scaling_client.delete_auto_scaling_group(\n AutoScalingGroupName=cluster_name,\n ForceDelete=True,\n )\n print(f'Auto scaling group named {cluster_name} deleted')\n # time.sleep(1)\n except:\n print(f'auto scaling group not found, skipping deletion')\n try:\n _ = scaling_client.delete_launch_configuration(\n LaunchConfigurationName=cluster_name\n )\n # time.sleep(1)\n print(f'Launch fonfig named {cluster_name} deleted')\n except:\n print(f'launch config not found, not deleting')\n\n worker_command = worker_command + f' --num_workers={workers_per_machine}'\n worker_script = make_worker_script(download_untar, make_worker_run_script(master_ip, worker_command), repo_name)\n print(f'Worker will run this: -------\\n{worker_script}\\n--------------')\n print(f'Creating launch configuration..')\n\n config_resp = as_client.create_launch_configuration(\n ImageId=image_id,\n KeyName=key_name,\n InstanceType=worker_type,\n LaunchConfigurationName=cluster_name,\n SecurityGroups=[DEFAULT_SECURITY_GROUP],\n UserData=worker_script,\n IamInstanceProfile=REDIS_CLUSTER_CODE_ACCESS,\n # EbsOptimized=True,\n )\n assert config_resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n\n print(f'Creating auto scaling group..')\n\n asg_resp = as_client.create_auto_scaling_group(\n AutoScalingGroupName=cluster_name,\n LaunchConfigurationName=cluster_name,\n MinSize=size,\n MaxSize=size,\n DesiredCapacity=size,\n AvailabilityZones=AVAILABILITY_ZONES,\n Tags=worker_tags,\n )\n assert asg_resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n\n print(f'\\nCluster created, name: {cluster_name}\\n')", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)", "def worker_init_reset_seed(worker_id: int):\n initial_seed = torch.initial_seed() % 2**31\n seed_all_rng(initial_seed + worker_id)", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def _get_executor_init(self, workers):\n raise NotImplementedError", "def __init__( self, app, nworkers, **kwds ):\n super( LwrJobRunner, self ).__init__( app, nworkers, runner_param_specs=LWR_PARAM_SPECS, **kwds )\n self._init_worker_threads()\n galaxy_url = self.runner_params.galaxy_url\n if galaxy_url:\n galaxy_url = galaxy_url.rstrip(\"/\")\n self.galaxy_url = galaxy_url\n self.__init_client_manager()\n if self.runner_params.url:\n # This is a message queue driven runner, don't monitor\n # just setup required callback.\n self.client_manager.ensure_has_status_update_callback(self.__async_update)\n else:\n self._init_monitor_thread()", "def main():\n ensure_not_root()\n config.setup()\n model.init_db()\n manager.run()", "def run_dask_workers(n, cpu, memory, nvidia_gpu=0, scheduler_port=default_scheduler_port):\n worker_code = \"\"\"\nimport cdsw_dask_utils\nworker_proc = cdsw_dask_utils._run_dask_worker_in_worker(scheduler_port=%d)\n# Keep the CDSW worker alive until the Dask worker exits.\nprint(worker_proc.wait())\n\"\"\" % scheduler_port\n workers = cdsw.launch_workers(\n n=n, \\\n cpu=cpu, \\\n memory=memory, \\\n nvidia_gpu=nvidia_gpu, \\\n kernel=\"python3\", \\\n code=worker_code\n )\n \n try:\n ids = [worker['id'] for worker in workers]\n \n except KeyError as key : \n errors = [[worker['k8sMessage'],worker['engineId']] for worker in workers ]\n for error in errors : \n print('''worker {} failed to launch with err message : \n {}'''.format(error[1],error[0]))\n raise RuntimeError(\"failed to launch workers with err : \"+error[0])\n \n print(\"IDs\", ids)\n # Wait for the workers to start running, but don't wait for them to exit - \n # we want them to stay up for use as daemons.\n cdsw_await_workers.await_workers(ids, wait_for_completion=False)\n return workers", "def init_distributed(args: dict):\n\n if is_distributed(args):\n dist.init_process_group(backend=\"nccl\")\n torch.cuda.set_device(args.local_rank)", "def evaluate_system__initialize_workers(opts, dictionary, features, labels):\n global evaluate_system__worker_cache\n evaluate_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary, \"features\": features, \"labels\": labels}", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-w\", \"--workflow_path\", help='Path to workflow file')\n parser.add_argument(\"-g\", \"--galaxy\",\n dest=\"galaxy_url\",\n help=\"Target Galaxy instance URL/IP address (required \"\n \"if not defined in the tools list file)\",)\n parser.add_argument(\"-a\", \"--apikey\",\n dest=\"api_key\",\n help=\"Galaxy admin user API key (required if not \"\n \"defined in the tools list file)\",)\n args = parser.parse_args()\n\n gi = galaxy.GalaxyInstance(url=args.galaxy_url, key=args.api_key)\n\n with open(args.workflow_path, 'r') as wf_file:\n import_uuid = json.load(wf_file).get('uuid')\n existing_uuids = [d.get('latest_workflow_uuid') for d in gi.workflows.get_workflows()]\n if import_uuid not in existing_uuids:\n gi.workflows.import_workflow_from_local_path(args.workflow_path)", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def experiments_init(self):\n pass", "def run_job(in_args=sys.argv[1:]):\n print '>>>> condor_worker.py logging:'\n proc = Popen(['hostname', '-f'], stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n if err == '':\n print 'Running on', out\n else:\n raise RuntimeError(err)\n\n parser = WorkerArgParser(description=__doc__)\n args = parser.parse_args(in_args)\n print 'Args:'\n print args\n\n # Make sandbox area to avoid names clashing, and stop auto transfer\n # back to submission node\n # -------------------------------------------------------------------------\n tmp_dir = 'scratch'\n os.mkdir(tmp_dir)\n os.chdir(tmp_dir)\n try:\n # Copy files to worker node area from /users, /hdfs, /storage, etc.\n # ---------------------------------------------------------------------\n if args.copyToLocal:\n print 'PRE EXECUTION: Copy to local:'\n for (source, dest) in args.copyToLocal:\n print source, dest\n if source.startswith('/hdfs'):\n source = source.replace('/hdfs', '')\n check_call(['hadoop', 'fs', '-copyToLocal', source, dest])\n else:\n if os.path.isfile(source):\n shutil.copy2(source, dest)\n elif os.path.isdir(source):\n shutil.copytree(source, dest)\n\n print 'In current dir:'\n print os.listdir(os.getcwd())\n\n # Do setup of programs & libs, and run the program\n # We have to do this in one step to avoid different-shell-weirdness,\n # since env vars don't necessarily get carried over.\n # ---------------------------------------------------------------------\n print 'SETUP AND EXECUTION'\n setup_cmd = ''\n if args.setup:\n os.chmod(args.setup, 0555)\n setup_cmd = 'source ./' + args.setup + ' && '\n\n if os.path.isfile(os.path.basename(args.exe)):\n os.chmod(os.path.basename(args.exe), 0555)\n\n # run_cmd = args.exe\n\n # If it's a local file, we need to do ./ for some reason...\n # But we must determine this AFTER running setup script,\n # can't do it beforehand\n run_cmd = \"if [[ -e {exe} ]];then ./{exe} {args};else {exe} {args};fi\"\n run_args = ' '.join(args.args) if args.args else ''\n run_cmd = run_cmd.format(exe=args.exe, args=run_args)\n print 'Contents of dir before running:'\n print os.listdir(os.getcwd())\n print \"Running:\", setup_cmd + run_cmd\n check_call(setup_cmd + run_cmd, shell=True)\n\n print 'In current dir:'\n print os.listdir(os.getcwd())\n\n # Copy files from worker node area to /hdfs or /storage\n # ---------------------------------------------------------------------\n if args.copyFromLocal:\n print 'POST EXECUTION: Copy to HDFS:'\n for (source, dest) in args.copyFromLocal:\n print source, dest\n if dest.startswith('/hdfs'):\n source = os.path.realpath(source)\n dest = dest.replace('/hdfs', '')\n check_call(['hadoop', 'fs', '-copyFromLocal', '-f', source, dest])\n else:\n if os.path.isfile(source):\n shutil.copy2(source, dest)\n elif os.path.isdir(source):\n shutil.copytree(source, dest)\n finally:\n # Cleanup\n # ---------------------------------------------------------------------\n print 'CLEANUP'\n os.chdir('..')\n shutil.rmtree(tmp_dir)", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def run_starter(self, expect_to_fail=False):", "def __initializeDistributed(self):\n self.raiseADebug(\"Initializing parallel InternalParallel: {0} Nodes: {1}\".format(self.runInfoDict['internalParallel'],len(self.runInfoDict['Nodes'])))\n if self._parallelLib != ParallelLibEnum.shared:\n # dashboard?\n db = self.runInfoDict['includeDashboard']\n # Check if the list of unique nodes is present and, in case, initialize the\n servers = None\n sys.path.append(self.runInfoDict['WorkingDir'])\n if 'UPDATE_PYTHONPATH' in self.runInfoDict:\n sys.path.extend([p.strip() for p in self.runInfoDict['UPDATE_PYTHONPATH'].split(\":\")])\n\n if _rayAvail:\n # update the python path and working dir\n olderPath = os.environ[\"PYTHONPATH\"].split(os.pathsep) if \"PYTHONPATH\" in os.environ else []\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(set(olderPath+sys.path))\n\n # is ray instanciated outside?\n self.rayInstanciatedOutside = 'headNode' in self.runInfoDict\n self.daskInstanciatedOutside = 'schedulerFile' in self.runInfoDict\n if len(self.runInfoDict['Nodes']) > 0 or self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n availableNodes = [nodeId.strip() for nodeId in self.runInfoDict['Nodes']]\n uniqueN = list(set(availableNodes))\n # identify the local host name and get the number of local processors\n localHostName = self.__getLocalHost()\n self.raiseADebug(\"Head host name is : \", localHostName)\n # number of processors\n nProcsHead = availableNodes.count(localHostName)\n if not nProcsHead:\n self.raiseAWarning(\"# of local procs are 0. Only remote procs are avalable\")\n self.raiseAWarning(f'Head host name \"{localHostName}\" /= Avail Nodes \"'+', '.join(uniqueN)+'\"!')\n self.raiseADebug(\"# of local procs : \", str(nProcsHead))\n self.raiseADebug(\"# of total procs : \", str(len(availableNodes)))\n if nProcsHead != len(availableNodes) or self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n if self.rayInstanciatedOutside:\n address = self.runInfoDict['headNode']\n elif self.daskInstanciatedOutside:\n self.daskSchedulerFile = self.runInfoDict['schedulerFile']\n else:\n # create head node cluster\n # port 0 lets ray choose an available port\n address = self.__runHeadNode(nProcsHead, 0)\n if self._parallelLib == ParallelLibEnum.ray:\n # add names in runInfo\n self.runInfoDict['headNode'] = address\n self.raiseADebug(\"Head host IP :\", address)\n if self._parallelLib == ParallelLibEnum.dask:\n # add file in runInfo\n self.runInfoDict['schedulerFile'] = self.daskSchedulerFile\n self.raiseADebug('scheduler file :', self.daskSchedulerFile)\n ## Get servers and run ray or dask remote listener\n if self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n servers = self.runInfoDict['remoteNodes']\n else:\n servers = self.__runRemoteListeningSockets(address, localHostName)\n # add names in runInfo\n self.runInfoDict['remoteNodes'] = servers\n if self._parallelLib == ParallelLibEnum.ray:\n ## initialize ray server with nProcs\n self._server = ray.init(address=address,log_to_driver=False,include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n if self.daskSchedulerFile is not None:\n #handle multinode and prestarted configurations\n self._server = dask.distributed.Client(scheduler_file=self.daskSchedulerFile)\n else:\n #Start locally\n cluster = dask.distributed.LocalCluster()\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseAWarning(\"No supported server\")\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"NODES IN THE CLUSTER : \", str(ray.nodes()))\n else:\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Executing RAY in the cluster but with a single node configuration\")\n self._server = ray.init(num_cpus=nProcsHead,log_to_driver=False,include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n self.raiseADebug(\"Executing DASK in the cluster but with a single node configuration\")\n #Start locally\n cluster = dask.distributed.LocalCluster()\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseADebug(\"Initializing\", str(self._parallelLib), \"locally with num_cpus: \", self.runInfoDict['totalNumCoresUsed'])\n if self._parallelLib == ParallelLibEnum.ray:\n self._server = ray.init(num_cpus=int(self.runInfoDict['totalNumCoresUsed']),include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n #handle local method\n cluster = dask.distributed.LocalCluster(n_workers=int(self.runInfoDict['totalNumCoresUsed']))\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseAWarning(\"parallellib creation not handled\")\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Head node IP address: \", self._server.address_info['node_ip_address'])\n self.raiseADebug(\"Redis address : \", self._server.address_info['redis_address'])\n self.raiseADebug(\"Object store address: \", self._server.address_info['object_store_address'])\n self.raiseADebug(\"Raylet socket name : \", self._server.address_info['raylet_socket_name'])\n self.raiseADebug(\"Session directory : \", self._server.address_info['session_dir'])\n self.raiseADebug(\"GCS Address : \", self._server.address_info['gcs_address'])\n if servers:\n self.raiseADebug(\"# of remote servers : \", str(len(servers)))\n self.raiseADebug(\"Remote servers : \", \" , \".join(servers))\n else:\n self.raiseADebug(\"JobHandler initialized without ray\")\n else:\n ## We are just using threading\n self._server = None\n self.raiseADebug(\"JobHandler initialized with threading\")\n # ray or dask is initialized\n self.__isDistributedInitialized = True", "def seed_worker(_worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def setUpClass(cls):\n cls.run_mgr = runner(['start', 'execute'], ['stop'])\n cls.load_mgr = loader(verbose=False, recursive=True)\n cls.load_mgr.set_addon_dirs(['./data'])\n cls.load_mgr.load_addons()\n cls.cli_inst = cls.load_mgr.get_instance('CommandLineAddon')\n cls.fileio_inst = cls.load_mgr.get_instance('FileIOAddon')", "def run(args):\n pub_command = []\n sub_command = []\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n\n if not os.path.isfile(args.pub):\n print(f'Publisher executable file does not exists: {args.pub}')\n sys.exit(1)\n\n if not os.access(args.pub, os.X_OK):\n print(\n 'Publisher executable does not have execution permissions:'\n f'{args.pub}')\n\n pub_command.append(args.pub)\n\n if not os.path.isfile(args.sub):\n print(f'Subscriber executable file does not exists: {args.sub}')\n sys.exit(1)\n\n if not os.access(args.sub, os.X_OK):\n print(\n 'Subscriber executable does not have execution permissions:'\n f'{args.sub}')\n sys.exit(1)\n\n sub_command.append(args.sub)\n\n if args.xml_pub and args.xml_sub:\n if args.xml_pub:\n xml_file_pub = os.path.join(script_dir, args.xml_pub)\n if args.xml_sub:\n xml_file_sub = os.path.join(script_dir, args.xml_sub)\n else:\n print('Not provided xml configuration files.')\n sys.exit(1)\n\n pub_command.extend(['--xmlfile', xml_file_pub])\n sub_command.extend(['--xmlfile', xml_file_sub])\n\n pub_command.extend(['--seed', str(os.getpid())])\n sub_command.extend(['--seed', str(os.getpid())])\n\n if args.wait:\n pub_command.extend(['--wait', str(args.wait)])\n\n if args.samples:\n pub_command.extend(['--samples', str(args.samples)])\n sub_command.extend(['--samples', str(args.samples)])\n\n if len(args.servers) != len(args.xml_servers):\n print(\n 'Number of servers arguments should be equal to the number of xmls provided.')\n sys.exit(1)\n\n ds_procs = []\n for i in range(0, len(args.servers)):\n server_cmd = []\n\n if not os.path.isfile(args.servers[i]):\n print(f'Discovery server executable file does not exists: {args.servers[i]}')\n sys.exit(1)\n\n if not os.access(args.servers[i], os.X_OK):\n print(\n 'Discovery server executable does not have execution permissions:'\n f'{args.servers[i]}')\n sys.exit(1)\n\n server_cmd.append(args.servers[i])\n server_cmd.extend(['--xml-file', args.xml_servers[i]])\n server_cmd.extend(['--server-id', str(i)])\n\n ds_proc = subprocess.Popen(server_cmd)\n print(\n 'Running Discovery Server - commmand: ',\n ' '.join(map(str, server_cmd)))\n\n ds_procs.append(ds_proc)\n\n sub_proc = subprocess.Popen(sub_command)\n print(\n f'Running Subscriber - commmand: ',\n ' '.join(map(str, sub_command)))\n\n pub_proc = subprocess.Popen(pub_command)\n print(\n 'Running Publisher - commmand: ',\n ' '.join(map(str, pub_command)))\n\n try:\n outs, errs = sub_proc.communicate(timeout=15)\n except subprocess.TimeoutExpired:\n print('Subscriber process timed out, terminating...')\n sub_proc.kill()\n pub_proc.kill()\n [ds_proc.kill() for ds_proc in ds_procs]\n try:\n sys.exit(os.EX_SOFTWARE)\n except AttributeError:\n sys.exit(1)\n\n\n pub_proc.kill()\n ds_proc.kill()\n [ds_proc.kill() for ds_proc in ds_procs]\n try:\n sys.exit(os.EX_OK)\n except AttributeError:\n sys.exit(0)", "def start_master_worker():\n print(\"Starting master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to start\n print(\"Waiting a bit\")\n time.sleep(10)\n return True", "def start():\n events.bind('jobs.cancel', 'slicer_cli_web_worker', _manageWorkers)\n events.bind('jobs.schedule', 'slicer_cli_web_worker', _manageWorkers)\n events.bind('jobs.job.update.after', 'slicer_cli_web_worker', _manageWorkers)\n events.bind('model.job.save.after', 'slicer_cli_web_worker', _manageWorkers)\n\n events.bind('model.setting.save.after', 'slicer_cli_web_worker', _manageWorkersConfig)\n events.bind('model.file.save.after', 'slicer_cli_web_worker', _manageWorkersConfigFile)\n _manageWorkers(None)", "def test_server_functionality():\n worker_ids = []\n added_workers = []\n worker_updates = {}\n global_model_version = \"1\"\n worker_global_model_version = \"0\"\n os.environ[ADMIN_USERNAME] = 'admin'\n os.environ[ADMIN_PASSWORD] = 'str0ng_s3cr3t'\n admin_auth = ('admin', 'str0ng_s3cr3t')\n\n public_keys = []\n private_keys = []\n num_workers = 3\n worker_key_file_prefix = 'worker_key_file'\n for n in range(num_workers):\n private_key, public_key = gen_pair(worker_key_file_prefix + f'_{n}')\n private_keys.append(private_key.encode(encoder=HexEncoder))\n public_keys.append(public_key.encode(encoder=HexEncoder))\n\n def begin_server(server, server_adapter):\n server.start_server(server_adapter)\n\n def test_register_func_cb(id):\n worker_ids.append(id)\n\n def test_unregister_func_cb(id):\n worker_ids.remove(id)\n\n def test_ret_global_model_cb():\n return create_model_dict(\n msgpack.packb(\"Pickle dump of a string\"),\n global_model_version)\n\n def is_global_model_most_recent(version):\n return int(version) == global_model_version\n\n def test_rec_server_update_cb(worker_id, update):\n if worker_id in worker_ids:\n worker_updates[worker_id] = update\n return f\"Update received for worker {worker_id[0:WID_LEN]}.\"\n else:\n return f\"Unregistered worker {worker_id[0:WID_LEN]} tried to send an update.\"\n\n dcf_server_safe = DCFServer(\n register_worker_callback=test_register_func_cb,\n unregister_worker_callback=test_unregister_func_cb,\n return_global_model_callback=test_ret_global_model_cb,\n is_global_model_most_recent=is_global_model_most_recent,\n receive_worker_update_callback=test_rec_server_update_cb,\n server_mode_safe=True,\n key_list_file=None,\n load_last_session_workers=False\n )\n\n dcf_server_unsafe = DCFServer(\n register_worker_callback=test_register_func_cb,\n unregister_worker_callback=test_unregister_func_cb,\n return_global_model_callback=test_ret_global_model_cb,\n is_global_model_most_recent=is_global_model_most_recent,\n receive_worker_update_callback=test_rec_server_update_cb,\n server_mode_safe=False,\n key_list_file=None,\n load_last_session_workers=False\n )\n\n def get_worker_key(mode, i):\n if mode == 'safe': return public_keys[i].decode('utf-8')\n else: return 'dummy_public_key'\n\n def get_signed_phrase(mode, i, phrase=b'test phrase'):\n if mode == 'safe':\n return SigningKey(private_keys[i], encoder=HexEncoder).sign(phrase).hex()\n else: return 'dummy_signed_phrase'\n\n for server, mode in zip([dcf_server_unsafe, dcf_server_safe], ['unsafe', 'safe']):\n worker_ids = []\n added_workers = []\n worker_updates = {}\n\n stoppable_server = StoppableServer(host=get_host_ip(), port=8080)\n server_gl = Greenlet.spawn(begin_server, server, stoppable_server)\n sleep(2)\n\n returned_ids = []\n # Phase 1: register a set of workers using the admin API and test registration\n for i in range(num_workers):\n\n admin_registered_worker = {\n PUBLIC_KEY_STR: get_worker_key(mode, i),\n REGISTRATION_STATUS_KEY: True\n }\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}\",\n json=admin_registered_worker, auth=admin_auth)\n\n added_worker_dict = json.loads(response.content.decode('utf-8'))\n assert len(worker_ids) == i + 1\n assert worker_ids[i] == added_worker_dict[WORKER_ID_KEY]\n added_workers.append(added_worker_dict[WORKER_ID_KEY])\n\n # Phase 2: Send updates and receive global updates for the registered workers\n # This should succeed\n worker_updates = {}\n for i in range(num_workers):\n # send updates\n signed_phrase = get_signed_phrase(mode, i, hashlib.sha256(msgpack.packb(\"Model update!!\")).digest())\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{RECEIVE_WORKER_UPDATE_ROUTE}/{added_workers[i]}\",\n files={WORKER_MODEL_UPDATE_KEY: zlib.compress(msgpack.packb(\"Model update!!\")),\n SIGNED_PHRASE: signed_phrase\n }\n ).content\n print(response)\n assert msgpack.unpackb(worker_updates[worker_ids[i]]) == \"Model update!!\"\n assert response.decode(\n \"UTF-8\") == f\"Update received for worker {added_workers[i][0:WID_LEN]}.\"\n\n # receive updates\n challenge_phrase = requests.get(f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{CHALLENGE_PHRASE_ROUTE}/{added_workers[i]}\").content\n model_return_binary = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{RETURN_GLOBAL_MODEL_ROUTE}\",\n json={WORKER_ID_KEY: added_workers[i],\n SIGNED_PHRASE: get_signed_phrase(mode, i, challenge_phrase),\n LAST_WORKER_MODEL_VERSION: \"0\"}\n ).content\n model_return = msgpack.unpackb(zlib.decompress(model_return_binary))\n assert isinstance(model_return, dict)\n assert model_return[GLOBAL_MODEL_VERSION] == global_model_version\n assert msgpack.unpackb(model_return[GLOBAL_MODEL]) == \"Pickle dump of a string\"\n\n # Phase 3: Unregister workers.\n for i in range(num_workers):\n admin_registered_worker = {\n PUBLIC_KEY_STR: get_worker_key(mode, i),\n REGISTRATION_STATUS_KEY: False\n }\n response = requests.put(\n f\"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}\"\n f\"/{added_workers[i]}\",\n json=admin_registered_worker, auth=admin_auth)\n unreg_worker_dict = json.loads(response.content.decode('utf-8'))\n assert not unreg_worker_dict[REGISTRATION_STATUS_KEY]\n assert len(worker_ids) == 0\n\n # Phase 4: Try to send updates from the unregistered workers - this should fail\n worker_updates = {}\n for i in range(num_workers):\n # send updates\n signed_phrase = get_signed_phrase(mode, i, hashlib.sha256(msgpack.packb(\"Model update!!\")).digest())\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{RECEIVE_WORKER_UPDATE_ROUTE}/{added_workers[i]}\",\n files={WORKER_MODEL_UPDATE_KEY: zlib.compress(msgpack.packb(\"Model update!!\")),\n SIGNED_PHRASE: signed_phrase\n }\n ).content\n assert added_workers[i] not in worker_updates\n assert response.decode('UTF-8') == UNREGISTERED_WORKER\n\n # receive updates\n model_return_binary = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{RETURN_GLOBAL_MODEL_ROUTE}\",\n json={WORKER_ID_KEY: added_workers[i],\n LAST_WORKER_MODEL_VERSION: \"0\"}\n ).content\n assert response.decode('UTF-8') == UNREGISTERED_WORKER\n\n # Phase 5: Re-register existing workers.\n for i in range(num_workers):\n admin_registered_worker = {\n PUBLIC_KEY_STR: get_worker_key(mode, i),\n REGISTRATION_STATUS_KEY: True\n }\n response = requests.put(\n f\"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}\"\n f\"/{added_workers[i]}\",\n json=admin_registered_worker, auth=admin_auth)\n unreg_worker_dict = json.loads(response.content.decode('utf-8'))\n assert unreg_worker_dict[REGISTRATION_STATUS_KEY]\n\n # Phase 6: Send updates and receive global updates for the registered workers\n # This should succeed\n worker_updates = {}\n for i in range(num_workers):\n # send updates\n signed_phrase = get_signed_phrase(mode, i, hashlib.sha256(msgpack.packb(\"Model update!!\")).digest())\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{RECEIVE_WORKER_UPDATE_ROUTE}/{added_workers[i]}\",\n files={WORKER_MODEL_UPDATE_KEY: zlib.compress(msgpack.packb(\"Model update!!\")),\n SIGNED_PHRASE: signed_phrase\n }\n ).content\n assert msgpack.unpackb(worker_updates[worker_ids[i]]) == \"Model update!!\"\n assert response.decode(\n \"UTF-8\") == f\"Update received for worker {added_workers[i][0:WID_LEN]}.\"\n\n # receive updates\n challenge_phrase = requests.get(f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{CHALLENGE_PHRASE_ROUTE}/{added_workers[i]}\").content\n model_return_binary = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{RETURN_GLOBAL_MODEL_ROUTE}\",\n json={WORKER_ID_KEY: added_workers[i],\n SIGNED_PHRASE: get_signed_phrase(mode, i, challenge_phrase),\n LAST_WORKER_MODEL_VERSION: \"0\"}\n ).content\n model_return = msgpack.unpackb(zlib.decompress(model_return_binary))\n assert isinstance(model_return, dict)\n assert model_return[GLOBAL_MODEL_VERSION] == global_model_version\n assert msgpack.unpackb(model_return[GLOBAL_MODEL]) == \"Pickle dump of a string\"\n\n # Phase 7: Delete existing workers.\n for i in range(num_workers):\n response = requests.delete(\n f\"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}\"\n f\"/{added_workers[i]}\", auth=admin_auth)\n message_dict = json.loads(response.content.decode('utf-8'))\n assert SUCCESS_MESSAGE_KEY in message_dict\n assert len(worker_ids) == 0\n\n # Phase 8: Try to send updates to the deleted workers - this should fail\n worker_updates = {}\n for i in range(num_workers):\n # send updates\n signed_phrase = get_signed_phrase(mode, i, hashlib.sha256(msgpack.packb(\"Model update!!\")).digest())\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{RECEIVE_WORKER_UPDATE_ROUTE}/{added_workers[i]}\",\n files={WORKER_MODEL_UPDATE_KEY: zlib.compress(msgpack.packb(\"Model update!!\")),\n SIGNED_PHRASE: signed_phrase\n }\n ).content\n assert added_workers[i] not in worker_updates\n assert response.decode('UTF-8') == INVALID_WORKER\n\n # receive updates\n challenge_phrase = requests.get(f\"http://{server.server_host_ip}:{server.server_port}/\"\n f\"{CHALLENGE_PHRASE_ROUTE}/{added_workers[i]}\").content\n model_return_binary = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{RETURN_GLOBAL_MODEL_ROUTE}\",\n json={WORKER_ID_KEY: added_workers[i],\n SIGNED_PHRASE: get_signed_phrase(mode, i, challenge_phrase),\n LAST_WORKER_MODEL_VERSION: \"0\"}\n ).content\n assert response.decode('UTF-8') == INVALID_WORKER\n\n # Phase 9: Try to register non-existent workers using the public API\n # - this should fail in the safe mode and succeed in the unsafe mode.\n for i in range(num_workers):\n registration_data = {\n PUBLIC_KEY_STR: get_worker_key(mode, i),\n SIGNED_PHRASE: get_signed_phrase(mode, i)\n }\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{REGISTER_WORKER_ROUTE}\",\n json=registration_data)\n if mode == 'safe':\n assert response.content.decode('utf-8') == INVALID_WORKER\n else:\n assert 'unauthenticated' in response.content.decode('utf-8')\n\n # Phase 10 - for the safe mode try registering with the public and admin API\n # with invalid public keys - these should both fail\n if mode == 'safe':\n for i in range(num_workers):\n registration_data = {\n PUBLIC_KEY_STR: \"dummy public key\",\n SIGNED_PHRASE: get_signed_phrase(mode, i)\n }\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{REGISTER_WORKER_ROUTE}\",\n json=registration_data)\n assert response.content.decode('utf-8') == INVALID_WORKER\n\n registration_data = {\n PUBLIC_KEY_STR: get_worker_key(mode, i),\n SIGNED_PHRASE: \"dummy signed phrase key\"\n }\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{REGISTER_WORKER_ROUTE}\",\n json=registration_data)\n assert response.content.decode('utf-8') == INVALID_WORKER\n\n admin_registered_worker = {\n PUBLIC_KEY_STR: \"dummy public key\",\n REGISTRATION_STATUS_KEY: True\n }\n response = requests.post(\n f\"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}\",\n json=admin_registered_worker, auth=admin_auth)\n message = json.loads(response.content.decode('utf-8'))\n assert ERROR_MESSAGE_KEY in message\n key_short = \"dummy public key\"[0:WID_LEN]\n assert message[ERROR_MESSAGE_KEY] == \\\n f\"Unable to validate public key (short) {key_short} \" \\\n \"- worker not added.\"\n\n stoppable_server.shutdown()", "def _setup_environment_and_configs(args, appengine_path):\n clusterfuzz_dir = os.path.abspath(os.path.join(args.directory, 'clusterfuzz'))\n\n # Matches startup scripts.\n os.environ['PYTHONPATH'] = ':'.join([\n os.getenv('PYTHONPATH', ''),\n appengine_path,\n os.path.join(clusterfuzz_dir, 'src'),\n ])\n\n os.environ['ROOT_DIR'] = clusterfuzz_dir\n if not os.getenv('BOT_NAME'):\n os.environ['BOT_NAME'] = args.name\n\n os.environ['LD_LIBRARY_PATH'] = '{0}:{1}'.format(\n os.path.join(clusterfuzz_dir, 'src', 'clusterfuzz', '_internal',\n 'scripts'), os.getenv('LD_LIBRARY_PATH', ''))\n\n tmpdir = os.path.join(clusterfuzz_dir, 'bot_tmpdir')\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n os.environ['TMPDIR'] = tmpdir\n os.environ['BOT_TMPDIR'] = tmpdir\n\n os.environ['KILL_STALE_INSTANCES'] = 'False'\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n\n if not os.getenv('UNTRUSTED_WORKER'):\n local_gcs_buckets_path = os.path.abspath(\n os.path.join(args.server_storage_path, 'local_gcs'))\n assert os.path.exists(local_gcs_buckets_path), (\n 'Server storage path not found, make sure to start run_server with '\n 'the same storage path.')\n\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = local_gcs_buckets_path\n\n if args.android_serial:\n if not os.getenv('OS_OVERRIDE'):\n os.environ['OS_OVERRIDE'] = 'ANDROID'\n\n os.environ['ANDROID_SERIAL'] = args.android_serial", "def run(config):\n\tlog.debug('-- in example.py')\n#\tgetWLSMachineandandExecuteSecondary(config)\n#\t__createPegaConfigCommand(config)\n#\tcreateUsers(config)\n#\t__connectAdminServer(config)\n\tconnectAdminServerOverSSL(config)", "def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1", "def test_init_remote(isolated_runner, project_init):\n data, commands = project_init\n\n # create the project\n new_project = Path(data[\"test_project\"])\n assert not new_project.exists()\n result = isolated_runner.invoke(\n cli, commands[\"init_test\"] + commands[\"id\"] + commands[\"force\"], commands[\"confirm\"]\n )\n assert 0 == result.exit_code, format_result_exception(result)\n assert new_project.exists()\n assert (new_project / \".renku\").exists()\n assert (new_project / \".renku\" / \"renku.ini\").exists()\n assert (new_project / \".renku\" / \"metadata\").exists()", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def _seed_npy_before_worker_init(worker_id, seed, worker_init_fn=None):\n try:\n import numpy as np\n np.random.seed(seed + worker_id)\n except ImportError:\n pass\n\n if worker_init_fn is not None:\n return worker_init_fn(worker_id)", "def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))", "def setup_package():\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('authentication')\n\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n test_config = {}\n\n # Setup environment variables for the test cases.\n host_port_cfg = {'viewer_host': 'localhost',\n 'viewer_port': env.get_free_port(),\n 'viewer_product': 'authentication'}\n\n test_env = env.test_env(TEST_WORKSPACE)\n\n codechecker_cfg = {\n 'check_env': test_env,\n 'workspace': TEST_WORKSPACE,\n 'checkers': []\n }\n\n codechecker_cfg.update(host_port_cfg)\n\n codechecker_cfg['run_names'] = []\n\n test_config['codechecker_cfg'] = codechecker_cfg\n\n # Export configuration for the tests.\n env.export_test_cfg(TEST_WORKSPACE, test_config)\n\n # Enable authentication and start the CodeChecker server.\n env.enable_auth(TEST_WORKSPACE)\n print(\"Starting server to get results\")\n _start_server(codechecker_cfg, test_config, False)", "def train_setup(additional_arg_parser=None, args=None):\n if args is None:\n args = parse_input_arguments(additional_arg_parser)\n if args.do_eval or args.do_test:\n args.load_pretrained = True\n if args.load_pretrained and args.pretrained_checkpoint == '':\n raise ValueError('Must provide --pretrained_checkpoint when using --load_pretrained')\n if args.eval_batch_size == 0:\n args.eval_batch_size = args.train_batch_size\n if args.load_pretrained:\n args.save_dir = \"/\".join(args.pretrained_checkpoint.split('/')[:-1])\n else:\n args.save_dir = get_save_dir(args.save_dir, args.run_name)\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n args.start_epoch = 0\n args.start_step = 0\n\n split_name = 'train' if args.do_train else 'validation' if args.do_eval else 'test'\n logger = get_logger(args.save_dir, 'log_train')\n\n logger.info(\"local_rank: %d, node_index: %d, gpu_per_node: %d\"%(args.local_rank, args.node_index, args.gpu_per_node))\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.local_rank += args.node_index * args.gpu_per_node\n args.n_gpu = 1\n args.device = device\n\n logger.info(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,\n torch.distributed.get_world_size() if args.local_rank != -1 else 1)\n\n set_seed(args)\n\n return args, logger", "def setup(self, run, run_id):\n\n raise NotImplementedError", "def init_nibetaseries_participant_wf(\n estimator, atlas_img, atlas_lut, bids_dir,\n derivatives_pipeline_dir, exclude_description_label, fir_delays,\n hrf_model, high_pass,\n output_dir, run_label, selected_confounds, session_label, smoothing_kernel,\n space_label, subject_list, task_label, description_label, work_dir,\n ):\n # setup workflow\n nibetaseries_participant_wf = pe.Workflow(name='nibetaseries_participant_wf')\n nibetaseries_participant_wf.base_dir = os.path.join(work_dir, 'NiBetaSeries_work')\n os.makedirs(nibetaseries_participant_wf.base_dir, exist_ok=True)\n\n # reading in derivatives and bids inputs as queryable database like objects\n layout = BIDSLayout(bids_dir, derivatives=derivatives_pipeline_dir)\n\n for subject_label in subject_list:\n\n # collect the necessary inputs for both collect data\n subject_data = collect_data(layout,\n subject_label,\n task=task_label,\n run=run_label,\n ses=session_label,\n space=space_label,\n description=description_label)\n # collect files to be associated with each preproc\n brainmask_list = [d['brainmask'] for d in subject_data]\n confound_tsv_list = [d['confounds'] for d in subject_data]\n events_tsv_list = [d['events'] for d in subject_data]\n preproc_img_list = [d['preproc'] for d in subject_data]\n bold_metadata_list = [d['metadata'] for d in subject_data]\n\n single_subject_wf = init_single_subject_wf(\n estimator=estimator,\n atlas_img=atlas_img,\n atlas_lut=atlas_lut,\n bold_metadata_list=bold_metadata_list,\n brainmask_list=brainmask_list,\n confound_tsv_list=confound_tsv_list,\n events_tsv_list=events_tsv_list,\n fir_delays=fir_delays,\n hrf_model=hrf_model,\n high_pass=high_pass,\n name='single_subject' + subject_label + '_wf',\n output_dir=output_dir,\n preproc_img_list=preproc_img_list,\n selected_confounds=selected_confounds,\n smoothing_kernel=smoothing_kernel,\n )\n\n single_subject_wf.config['execution']['crashdump_dir'] = (\n os.path.join(output_dir, \"sub-\" + subject_label, 'log')\n )\n\n for node in single_subject_wf._get_all_nodes():\n node.config = deepcopy(single_subject_wf.config)\n\n nibetaseries_participant_wf.add_nodes([single_subject_wf])\n\n return nibetaseries_participant_wf", "def setup_script(self, *args, **kwargs):\n pass", "def run_experiment(experiment: str):\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n print_color(f\"* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Experiment: {experiment}\", bcolors.OKBLUE)\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n\n experiment_file = experiment + \".yaml\"\n\n # Set namespace to check\n with open(f\"./litmus/{experiment_file}\") as f:\n spec = yaml.load(f, Loader=yaml.FullLoader)\n result_name = spec['metadata']['name']\n namespace = spec['metadata']['namespace']\n\n print_color(f\"Running Litmus ChaosEngine Experiment {experiment_file} in namespace {namespace}\")\n print_color(f\"Deploying {experiment_file}...\")\n run_shell(f\"kubectl delete chaosengine {result_name} -n {namespace}\")\n run_shell(f\"kubectl create -f ./litmus/{experiment_file} -n {namespace}\")\n\n # Check status of experiment execution\n startTime = datetime.now()\n print_color(f\"{startTime.strftime('%Y-%m-%d %H:%M:%S')} Running experiment...\")\n expStatusCmd = \"kubectl get chaosengine \" + result_name + \" -o jsonpath='{.status.experiments[0].status}' -n \" + namespace\n run_shell(expStatusCmd)\n logs_cmd = f\"kubectl logs --since=10s -l name={experiment} -n {namespace}\"\n print(f\"\\n{bcolors.OKGREEN}//** Experiment Logs ({logs_cmd}) **//\\n\\n\")\n try:\n while subprocess.check_output(expStatusCmd, shell=True).decode('unicode-escape') != \"Completed\":\n os.system(logs_cmd)\n os.system(\"sleep 10\")\n\n print(f\"\\n\\n//** End of Experiment Logs **//{bcolors.ENDC}\\n\")\n\n # View experiment results\n run_shell(f\"kubectl describe chaosresult {result_name}-{experiment} -n {namespace}\")\n\n except:\n print_color(\"User has cancelled script execution.\", bcolors.FAIL)\n sys.exit(2)\n\n # Store Experiment Result\n status = subprocess.check_output(\"kubectl get chaosresult \" + result_name + \"-\" + experiment + \" -n \" + namespace + \" -o jsonpath='{.status.experimentstatus.verdict}'\", shell=True).decode('unicode-escape')\n return ExperimentResult(experiment, status, startTime)", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)", "def setup_params():\n Script.fullname = os.path.splitext(os.path.abspath(__file__))[0]\n Script.basename = os.path.basename(__file__)\n Script.name = os.path.splitext(Script.basename)[0]\n Script.service = modUtils.check_service(Script.name)", "def start_test_run(self, request):\n request.worker.initialize_test_run(request.message.tests,\n request.message.run_data)\n\n return SuccessReply()", "def main(argv):\n parser = argparse.ArgumentParser(description=\"\"\"Bootstrap CI Scripts\"\"\")\n parser.add_argument(\"-d\", \"--directory\",\n type=str,\n required=True,\n help=(\"\"\"Directory to store language runtimes, \"\"\"\n \"\"\"scripts and other script details in\"\"\"))\n parser.add_argument(\"-s\", \"--script\",\n type=str,\n help=\"\"\"Script to pass control to\"\"\")\n parser.add_argument(\"-e\", \"--eval-output\",\n type=str,\n choices=[\n \"bash\",\n \"powershell\"\n ],\n help=\"\"\"Evaluate output in shell\"\"\")\n parser.add_argument(\"-p\", \"--print-to\",\n type=str,\n help=\"\"\"Where to print output script to\"\"\")\n parser.add_argument(\"-r\", \"--scripts-directory\",\n type=str,\n help=(\"\"\"Directory where scripts are already \"\"\"\n \"\"\"stored in\"\"\"))\n parser.add_argument(\"--keep-scripts\",\n action=\"store_true\",\n help=\"\"\"Don't remove stale scripts.\"\"\")\n args, remainder = parser.parse_known_args(argv)\n\n print_script_to, print_messages_to = _determine_outputs(args.print_to)\n\n with closing(print_script_to):\n parent_shell = construct_parent_shell(args.eval_output,\n print_script_to)\n container = ContainerDir(parent_shell,\n stale_check=_stale_check_url(args),\n **(vars(args)))\n util = container.fetch_and_import(\"util.py\")\n # suppress(unused-attribute)\n util.PRINT_MESSAGES_TO = print_messages_to\n bootstrap_script = container.script_path(\"bootstrap.py\").fs_path\n bootstrap_script_components = bootstrap_script.split(os.path.sep)\n scripts_path = os.path.sep.join(bootstrap_script_components[:-2])\n\n # Overwrite CONTAINER_DIR in the output script, but not\n # for our own invocation, we'll need the parent instance\n # if we're actually in a test\n parent_shell.overwrite_environment_variable(\"CONTAINER_DIR\",\n container.path())\n _set_ci_environment_variables(parent_shell)\n\n _define_script_command(\"polysquare_run\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n None)\n _define_script_command(\"polysquare_cleanup\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n \"clean.py\")\n\n # Done, pass control to the script we're to run\n container.fetch_and_import(args.script).run(container,\n util,\n parent_shell,\n argv=remainder)\n\n # Print a final new line so that active messages don't get\n # truncated.\n util.print_message(\"\\n\")\n\n if container.return_code() != 0:\n parent_shell.exit(container.return_code())\n\n return container.return_code()" ]
[ "0.7326153", "0.70049655", "0.6345922", "0.6308314", "0.6258606", "0.6253948", "0.6138363", "0.6117165", "0.61046404", "0.60405153", "0.6030067", "0.6017148", "0.6017148", "0.60138994", "0.5929832", "0.5922357", "0.5922357", "0.5841495", "0.5836372", "0.5836372", "0.5832595", "0.581463", "0.5772954", "0.5771031", "0.575313", "0.56777894", "0.56747687", "0.5659891", "0.5640645", "0.5577385", "0.55719376", "0.5561494", "0.55428886", "0.5523706", "0.55147123", "0.5478336", "0.54707277", "0.5460361", "0.54588115", "0.5446822", "0.54451716", "0.5440989", "0.54398894", "0.5438688", "0.5432235", "0.5431733", "0.54297745", "0.5427318", "0.54260164", "0.5424549", "0.54223794", "0.54190284", "0.54179436", "0.5395773", "0.53948766", "0.5390663", "0.5376367", "0.5371886", "0.53663445", "0.53663445", "0.53663445", "0.5358736", "0.535767", "0.53541", "0.534588", "0.5337961", "0.53338754", "0.53290564", "0.53285676", "0.5328094", "0.532746", "0.53268576", "0.5322337", "0.53158826", "0.5309442", "0.5306513", "0.530635", "0.5299333", "0.5293483", "0.5288506", "0.5288397", "0.5285047", "0.52832067", "0.5282402", "0.52789384", "0.52752477", "0.5266647", "0.5259705", "0.5257654", "0.52531", "0.52521384", "0.52446413", "0.52445954", "0.5237309", "0.52364147", "0.5223839", "0.52212805", "0.5216481", "0.52128303", "0.52115643" ]
0.7815065
0
Return the next power of 10
def nextpow10(n): if n == 0: return 0 else: return math.ceil(math.log10(abs(n)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def _next_power_of_two(self, n):\n if n == 0:\n return 1\n return int(2 ** math.ceil(math.log2(n)))", "def next_power2(num):\n return 2 ** int(np.ceil(np.log2(num)))", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n", "def _next_power_of_2(x):\n return 1 if x == 0 else 2**(x - 1).bit_length()", "def non_recursive_power(base, power):\n result = 1\n i = 0\n while i < power:\n result = result * base\n i = i+1\n\n return result", "def nextpow2(x):\n return int(numpy.ceil(numpy.log2(numpy.abs(x))))", "def nextPowerOf2(n):\n count = 0; \n \n # First n in the below \n # condition is for the \n # case where n is 0 \n if (n and not(n & (n - 1))): \n return n \n \n while( n != 0): \n n >>= 1\n count += 1\n \n return 1 << count;", "def recursive_power(base, power):\n if power == 0:\n return 1\n else:\n return base*recursive_power(base, power-1)", "def self_powers():\n return sum([i ** i for i in range(1, 1001)]) % (10 ** 10)", "def nextpow2(x):\n log2_n = math.ceil(math.log2(x))\n n = 2 ** log2_n\n return n", "def next_p2(num):\n rval = 1\n while rval < num:\n rval <<= 1\n return rval", "def _pow_(self, n):\n assert n > 0\n return generic_power(self, n)", "def next_p2 (num):\n rval = 1\n while rval<num:\n rval <<= 1\n return rval", "def nextpow2(n):\n m_f = np.log2(n)\n m_i = np.ceil(m_f)\n return np.int(2 ** m_i)", "def power(x, n):\n power = 1\n for i in range(abs(n)):\n power = multiply(power, x) \n return power", "def power(base, exp):\n\tans = [1]\n\twhile exp > 0:\n\t\tcarry = 0\n\t\tfor i in xrange(len(ans)):\n\t\t\tmult = ans[i] * base + carry\n\t\t\tans[i] = mult % 10\n\t\t\tcarry = mult / 10\n\t\twhile carry > 0:\n\t\t\tans.append(carry % 10)\n\t\t\tcarry /= 10\n\t\texp -= 1\n\treturn ans", "def power(number, power):\n return math.pow(number, power)", "def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p", "def _find_nearest_power_of_two(x):\n\n return 1 << (x - 1).bit_length()", "def nextpow2(longitud_malla):\r\n n = 1\r\n while n < longitud_malla: n *= 2\r\n return n", "def improve_power(x):\r\n for i in range(2,base(x)//2+1):\r\n if(base(x)%i==0):\r\n temp=base(x)\r\n n=0\r\n flag=True\r\n while(temp>1):\r\n if(temp%i!=0):\r\n flag=False\r\n break\r\n else:\r\n temp=temp/i\r\n n=n+1\r\n if (flag):\r\n return(make_power(i,n*power(x)))\r\n return (make_power(x(0), x(1)))", "def nextpow2(x):\n return np.ceil(np.log2(np.abs(x)))", "def next_po2(n) -> int:\n if not n:\n return 1\n if is_po2(n):\n # n is a power of 2\n return n\n return 1 << (n - 1).bit_length()", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def power(x, n):\n value = 1\n for i in range(n):\n value = multiply(value, x)\n return value", "def power(x, n):\n if n == 0:\n return 1\n result = power(x, math.floor(n / 2))\n if n % 2 > 0:\n return x * result * result\n else:\n return result * result", "def prevpow2(i):\n n = 1\n while 2*n <= i: n *= 2\n return n", "def power(base, exponent):\n return base ** exponent", "def _pow_10_round(n, up=True):\n if up:\n return 10 ** math.ceil(math.log(n, 10))\n else:\n return 10 ** math.floor(math.log(n, 10))", "def power(x): \r\n return x(1)", "def mod_power(x, a, m):\n r = 1\n x = x % m\n while a > 0:\n if a & 1:\n r = (r * x) % m\n a >>= 1\n x = (x * x) % m\n return r", "def power_of_ten(term):\n if term is None:\n return True\n _, factor = term\n return round_to_ten(factor) == factor", "def pow2(limit):\n i = 0\n bin_num = 1\n while bin_num <= limit:\n yield bin_num\n i += 1\n bin_num = 2 ** i", "def modifier(base):\n return int(math.floor((base - 10) / 2))", "def power(x, m, n):\n a = 1\n while m > 0:\n if m % 2 == 1:\n a=(a*x)%n\n x=(x*x)%n\n m//=2\n return a", "def power(num,pwr):\n if pwr is 0:\n return 1\n\n if pwr < 0 :\n return \"not supported by this function.\"\n\n if num != 0 and pwr >= 0:\n return num * power(num,pwr-1)", "def power(num, exponent):\n return num ** exponent", "def power_str(num, powr):\r\n \r\n orig_num = num\r\n \r\n for i in range(powr-1):\r\n \r\n num = multiply_str(num, orig_num)\r\n\r\n return num", "def last_n_digits(num, n):\n return num%(10**n)", "def power(num, exponent):\n power = num ** exponent\n return power", "def getrandbits(k: int) -> int:\n ...", "def power1(x, n):\n if n == 0:\n return 1\n return x * power1(x, n-1)", "def rand10():\n res = 40\n while res >= 40:\n res = 7 * (rand7() - 1) + (rand7() - 1)\n return res % 10 + 1", "def fast_power(a, n, m): # (a ^ n) % m\n result = 1\n value = a\n power = n\n while power > 0:\n if power % 2 == 1:\n result = result * value\n result %= m\n value = value * value\n value %= m\n power = power//2\n return result", "def zeropad_to_power_of_2(self):\n # https://stackoverflow.com/questions/14267555/find-the-smallest-power-of-2-greater-than-n-in-python\n n = 2 ** (self.nt - 1).bit_length()\n return self.zeropad(0, n - self.nt)", "def _power(self, a, n, m):\n res = 1\n while n != 0:\n if n % 2 != 0:\n res *= a\n res %= m\n n -= 1\n else:\n a *= a\n a %= m\n n //= 2\n return res", "def nextpow2(x):\n res = np.ceil(np.log2(x))\n return res.astype('int') #we want integer values only but ceil gives float ", "def powerize(n, p):\n return sum(int(d)**p for d in str(n))", "def pow_mod_p(self, base, power, mod):\n if power == 0:\n assert(base == 0)\n return 1\n res = 1\n base = base % mod\n while power != 0:\n if power % 2 == 1:\n res = res * base % mod\n base = (base * base) % mod\n power //= 2\n return res", "def power1(x, n):\n if n == 0:\n return 1\n else:\n return x * power1(x, n - 1)", "def _nearest_bigger_power_of_two(x: int) -> int:\n y = 2\n while y < x:\n y *= 2\n return y", "def round_down_to_power_of_two(n):\n\n\tfor i in range(30, 0, -1):\n\t\tp = 1 << i\n\t\tif p <= n:\n\t\t\treturn p\n\n\treturn -1", "def modExponent(self, base, power):\n result = 1\n power = int(power)\n base = base % self.mod\n while power > 0:\n if power & 1:\n # self.modReduce(result * base)\n result = result * base % self.mod\n base = base * base % self.mod # self.modReduce(base * base)\n power = power >> 1\n return result", "def __ipow__(self,n):\r\n\t\t\r\n\t\treturn self.power(n)", "def modulo_power(x, b):\n \n r = x % b\n ct = 0\n pows = {}\n while r not in pows:\n pows[r] = ct\n ct += 1\n r = x * r % b\n return ct - pows[r]", "def power(base, exponent, cache=None):\n if cache is None:\n cache = {}\n # Any negative exponent will be a fraction 0 < x < 1, so round down to 0\n if exponent < BigInteger(\"0\"):\n return BigInteger(\"0\")\n if exponent == BigInteger(\"0\"):\n return BigInteger(\"1\")\n if exponent == BigInteger(\"1\"):\n return base\n print \"Printing\"\n print exponent.__hash__()\n if exponent in cache:\n print \"Accessing cache: \", exponent\n return cache[exponent]\n half_exponent = divide(exponent, BigInteger(\"2\"))\n half_result = power(base, half_exponent, cache)\n # a**n = a**(n/2) * 2 if n is even\n result = multiply(half_result, half_result)\n # Divide doesn't support mod or remainder, so check for an odd number\n # If exponent is odd, multiply by base one more time\n if exponent.digits[-1] in (1, 3, 5, 7, 9):\n result = multiply(result, base)\n cache[exponent] = result\n return result", "def Incrpower(self, increment):\n self.power += increment", "def ToBase(b, n):\r\n d = []\r\n while n:\r\n d.append(n % b)\r\n n //= b\r\n d.reverse() \r\n return int(''.join(map(str, d)))", "def pow2(x: int, p: int) -> int:\n while p > 0:\n x = x * x % q\n p -= 1\n return x", "def main():\n result = 0\n for n in range(1, 1001):\n result += n**n\n\n result = str(result)\n answer = result[len(result)-10::]\n\n print \"answer: \" + answer", "def power(a, b):\n \n return a**b", "def myExp(base,exponent,modulus):\n result = 1\n while exponent > 0:\n if exponent & 1 == 1:\n result = (result * base) % modulus\n exponent = exponent >> 1\n base = (base * base) % modulus\n return result", "def powmod(b,e,n):\r\n\treturn power_mod(b,e,n)", "def get_min_run(n):\n r = 0\n while n >= 64:\n r |= n & 1\n n >>= 1\n return n + r", "def powmod(self, a, c):\r\n a %= self.base\r\n res = 1\r\n\r\n for _ in range(c):\r\n res = (res * a) % self.base\r\n \r\n return res", "def power(a, n):\n result = 1\n exponent_is_negative = n < 0\n\n n = abs(n)\n while n > 0:\n result *= a\n n -= 1\n\n if exponent_is_negative is True:\n result = 1 / result\n\n return result", "def exponent(num,power=2):\n return num ** power", "def r1(x, n, max_size=32):\n return (x << n) % (2 << (max_size - 1)) + (x >> (max_size - n))", "def rand_int(n):\n\n while True:\n res, power, remain = 0, 0, n\n while remain:\n res |= coin() << power\n power += 1\n remain >>= 1\n if res <= n:\n return res", "def ipow(base, exponent):\n result = 1\n \n while exponent > 0:\n if exponent % 2 == 1:\n result *= base\n\n exponent //= 2\n base *= base\n \n return result", "def digit(number: int, n: int) -> int:\n return number // 10 ** n % 10", "def next_integer(n):\n if n % 2 == 0:\n return n // 2\n else:\n return 3 * n + 1", "def powmod(b,e,n):\n\treturn power_mod(b,e,n)", "def get_generator(modulus, k, m):\n\ttries = 0\n\twhile tries <= ((m-k) + 1):\n\t\tgen = get_prime(k, m)\n\t\tprint(gen)\n\t\tpows = [gen]\n\t\tfor x in range(modulus):\n\t\t\ta = pows[0]\n\t\t\tpows.insert(0, a*gen)\n\t\tremainders = set([(x % modulus) for x in pows])\n\t\tif len(remainders) == modulus - 1:\n\t\t\treturn gen\n\t\ttries += 1\n\tprint(\"No generator found. Increasing generator range by 10 on each side.\")\n\tl = k - 10\n\tn = m + 10\n\tif l <= 0:\n\t\tl = 1\n\treturn get_generator(modulus, l, n)", "def pow(a, b):\n\n result = 1\n\n for _ in range(b):\n result *= a\n\n return result", "def stream(_) -> int:\n return 1 << 9", "def stream(_) -> int:\n return 1 << 9", "def mod_pow(a: int, b: int, m: int) -> int:\n\tres = 1\n\twhile b > 0:\n\t\tif b % 2 != 0:\n\t\t\tres = (res * a) % m\n\t\ta = (a * a) % m\n\t\tb //= 2\n\treturn res", "def powerRemainder(w, d, n):\n\n b = bin(d).lstrip('0b') # get binary representation of d\n r = 1\n\n for i in b: # for each digit in binary\n r = r ** 2 # square r\n if i == '1': # if digit is 1\n r = r * w # times r by w\n r %= n # mod r by n\n\n return r # return answer", "def _down_to_power_of_two(n):\n if n < 2:\n raise ValueError(\"N should be >= 2: %d\" % n)\n log_n = math.log(n, 2)\n p = int(log_n)\n # If n is exactly power of 2 then 2**p would be n, decrease p by 1.\n if p == log_n:\n p -= 1\n return 2**p", "def nd_pow_of_two(number):\n nd_pow = 1\n while number % nd_pow == 0:\n nd_pow <<= 1\n return nd_pow >> 1", "def power(x, n):\n # Negative and fractional powers are not allowed\n if n < 0:\n raise ValueError('n cannot be negative')\n elif 0 < n < 1.0:\n raise ValueError('n cannot be fractional')\n\n result = 1\n for _ in range(n):\n result = multiply(result, x)\n return result", "def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)", "def modpow(a, n, p):\n res = 1\n a = a % p\n while n > 0:\n # if n is odd\n if n & 1:\n res = (res * a) % p\n n = n >> 1 # n = n / 2\n a = (a*a) % p\n\n return res", "def pow(a: float, n: int):\n if n == 0:\n return 1\n elif n % 2 == 0: # power n - even\n return pow(a**2, n//2)\n else: # power n - odd\n return pow(a, n-1)*a", "def mod_pow(x,e,p):\n x = x % p\n R = 1\n while e > 0 :\n if (e%2) == 1 :\n R = (R*x) % p\n e = e//2\n x = (x*x) % p \n return(R)", "def print_powers_of():\n base = int(input(\"Please enter a positive integer to serve as the base: \"))\n power = int(input(\"Please enter a positive integer to serve as the highest power: \"))\n if base >= 0 and power >= 0:\n for x in range(0, power + 1, 1):\n result = base ** x\n print(str(base) + \" ^ \" + str(x) + \" = \" + str(result))\n else:\n print(\"ERROR: Both values must be POSITIVE INTEGERS.\")", "def dig_pow(n, p):\n t = sum(pow(int(j), p+i) for i, j in enumerate(str(n)))\n return t/n if t % n == 0 else -1", "def first_n_digits(num, n):\n return num // 10 ** (int(math.log(num, 10)) - n + 1)", "def power(x, y):\n\n res = 1\n while y:\n if y & 1:\n res *= x\n x *= x\n y >>= 1\n return res", "def power2(x, n):\n if n == 0:\n return 1\n else:\n partial = power2(x, n // 2)\n result = partial * partial\n if n % 2 == 1:\n result *= x\n return result", "def power2(x, n):\n if n == 0:\n return 1\n else:\n partial = power2(x, n // 2)\n result = partial * partial\n if n % 2 == 1:\n result *= x\n return result", "def log_10_product(x, pos):\n return '%1i' % (x)", "def log_10_product(x, pos):\n return '%1i' % (x)", "def pow(a, n, depth=1):\n if n == 1:\n print(\"depth=\", depth)\n return a\n if n % 2 == 1:\n return pow(a, n-1, depth + 1) * a\n else: # n % 2 = 0\n return pow(a * a, n // 2, depth + 1)", "def solution(max_base: int = 5) -> int:\n freqs = defaultdict(list)\n num = 0\n\n while True:\n digits = get_digits(num)\n freqs[digits].append(num)\n\n if len(freqs[digits]) == max_base:\n base = freqs[digits][0] ** 3\n return base\n\n num += 1", "def numbits(n):\n return int(math.ceil(math.log(n, 2)))", "def _round_bits(n: int, radix_bits: int) -> int:\n return (n + radix_bits - 1) // radix_bits" ]
[ "0.7424253", "0.73961514", "0.7280163", "0.7175434", "0.713948", "0.71352744", "0.69893503", "0.6905554", "0.68357366", "0.67452294", "0.6701021", "0.6685439", "0.6657409", "0.6649841", "0.66056854", "0.658991", "0.6583599", "0.656067", "0.6541835", "0.6518142", "0.6472119", "0.6443989", "0.6443932", "0.64288723", "0.638601", "0.6369569", "0.6360769", "0.63479626", "0.63443714", "0.63408923", "0.629543", "0.6294267", "0.6291711", "0.62853295", "0.62730473", "0.6273026", "0.6268443", "0.6264864", "0.6257421", "0.6257157", "0.6252666", "0.6240737", "0.6234147", "0.6223954", "0.62220496", "0.62024593", "0.61960226", "0.61897606", "0.6187666", "0.6182989", "0.6171605", "0.6148809", "0.6147326", "0.6130181", "0.6108148", "0.60974866", "0.60852295", "0.60769707", "0.60593647", "0.60436654", "0.6026347", "0.6014222", "0.6013646", "0.60073763", "0.600172", "0.59953356", "0.5991785", "0.5987212", "0.59554017", "0.5949175", "0.594464", "0.5943377", "0.5938866", "0.5926433", "0.5922779", "0.59208393", "0.59128344", "0.5910713", "0.5910713", "0.58963203", "0.58962", "0.5891474", "0.5891053", "0.5890406", "0.58881676", "0.5886844", "0.5885176", "0.58803195", "0.5873278", "0.5870054", "0.5867297", "0.5863202", "0.583579", "0.583579", "0.5830809", "0.5830809", "0.58294696", "0.5822304", "0.5821187", "0.58156526" ]
0.8007733
0
Return a number that looks 'nice', with a maximum error
def magicnr(value, error): magics = [ (10 ** (nextpow10(error))), (10 ** (nextpow10(error))) / 2.0, (10 ** (nextpow10(error))) / 4.0, (10 ** (nextpow10(error))) / 10.0, (10 ** (nextpow10(error))) / 20.0, (10 ** (nextpow10(error))) / 40.0, (10 ** (nextpow10(error))) / 100.0, ] magics.sort() magics.reverse() magic = magics[-1] for n in magics: if n < abs(value): magic = n break return fround(value, magic)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_precision(err):\n return max(0, int(-math.log10(2 * err)) + 1)", "def computeGoodMax(totalTimes, noerrs):\n # Could allow a small amount of space above the top, but it's annnoying for percentages!\n # return None\n factor = 1.00\n maxReading = factor * max(\n [max([v for v in l if v != None]) for l in list(totalTimes.values())]\n )\n if maxReading == 0:\n maxReading = 0.1\n decade = math.floor(math.log10(maxReading))\n scaledValue = maxReading * 10 ** (-decade)\n # print (\"maxReading: \",maxReading,\"decade: \",decade,\" scaledValue: \",scaledValue)\n for v in (\n 1.0,\n 1.1,\n 1.2,\n 1.25,\n 1.3,\n 1.4,\n 1.5,\n 1.6,\n 1.7,\n 1.75,\n 1.8,\n 1.9,\n 2.0,\n 2.5,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 7.5,\n 8.0,\n 9.0,\n ):\n if scaledValue <= v:\n # print (\"computeGoodMax: \", v * (10**decade))\n return v * (10 ** decade)\n # print (\"computeGoodMax: \", 10**(decade+1))\n return 10 ** (decade + 1)", "def enlarge(n):\r\n return n * 100", "def safe_calc(exponent):\n\n if exponent > 700:\n return sys.float_info.max\n else:\n return math.exp(exponent)", "def ensure_size(value):\n return int(round(value * 1.0 / base)) * base", "def ghmult_plain(x: int) -> str:\n mult = x / 10000\n if int(mult) == mult:\n mult = int(mult)\n return '{}'.format(mult)", "def ghmult(x: int) -> str:\n mult = x / 10000\n if int(mult) == mult:\n mult = int(mult)\n return '%sx' % mult", "def get_m(self, n, err):\n m = (n * -log2(err))/log(2)\n return int(m)", "def native_max_value(self) -> float:\n return 9", "def enlarge(n):\n\n return n* 100", "def enlarge(n):\n return n*100", "def pickNarrow(length):\n return(int(np.ceil(np.log10(length))))", "def fail_max(self) -> int:\n return self._fail_max", "def MakeHumanReadable(num):\n i = 0\n while i+1 < len(EXP_STRINGS) and num >= (2 ** EXP_STRINGS[i+1][0]):\n i += 1\n rounded_val = round(float(num) / 2 ** EXP_STRINGS[i][0], 2)\n return '%s %s' % (rounded_val, EXP_STRINGS[i][1])", "def _nice(x, round=False):\n if x <= 0:\n import warnings\n warnings.warn(\"Invalid (negative) range passed to tick interval calculation\")\n x = abs(x)\n expv = floor(log10(x))\n f = x / pow(10, expv)\n if round:\n if f < 1.75:\n nf = 1.0\n elif f < 3.75:\n nf = 2.5\n elif f < 7.0:\n nf = 5.0\n else:\n nf = 10.0\n else:\n if f <= 1.0:\n nf = 1.0\n elif f <= 2.5:\n nf = 2.5\n elif f <= 5.0:\n nf = 5.0\n else:\n nf = 10.0\n return nf * pow(10, expv)", "def max_error(self) -> float:\n return float(np.max(np.abs(self._flattened_errors())))", "def max_temp(self):\n return 99", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30", "def mamajek08_logRpHK_max():\n return -3.8918287373004357", "def normexponent(val):\n n = np.log10(val)\n if n < 0:\n n = int(n) - 1\n else:\n n = int(n)\n return n", "def maxim(self) -> (int, float('inf')):\n\t\treturn 2", "def test_to_knx_max_exceeded(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().to_knx(DPTValue1Ucount.value_max + 1)", "def humanvalue(self, value):\n if value > 1024 * 1024 * 1024:\n return \"%d\" % (value / 1024 / 1024 / 1024 / 1024)\n if value > 1024 * 1024:\n return \"%d\" % (value / 1024 / 1024)\n if value > 1024:\n return \"%d\" % (value / 1024 / 1024)", "def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF", "def max_pp(level):\n base_pp = 6\n level_pp = 2 * level\n return base_pp + (level_pp - 2)", "def MINIMUM_BET() -> int:\n return 10", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def calc_max_level(num_point):\n return int(numpy.ceil(numpy.log2(num_point)))", "def _ValueMismatch(how_much):\n return 'Values mismatch, %s' % how_much", "def calculateErrorRate(numCorrect, numWrong):\n return np.round((numWrong)/(numCorrect+numWrong),3)", "def get_max_praises(self):\n char = self.caller.char_ob\n clout = char.social_clout\n s_rank = char.item_data.social_rank\n return clout + ((8 - s_rank) // 2)", "def max_value(self) -> float:\n return DEFAULT_MAX_VALUE", "def test_to_knx_max_exceeded(self):\n with pytest.raises(ConversionError):\n DPTSceneNumber.to_knx(DPTSceneNumber.value_max + 1)", "def time_to_failure():\n if not useWeibull:\n nextFailure = int(random.expovariate(BREAK_MEAN))\n else:\n # The Weibull distr. generates many errors.\n nextFailure = int(np.random.weibull(WEIBULL_K)*10.0) + MTBF\n return nextFailure\n #return MTBF", "def _ms_err(self):\n return self._ss_err / self._df_err", "def generate_limit_and_ticks_K(max_value):\n print \"max value:\",max_value\n order = math.log(max_value, 1024)\n multiple, limit_power = math.modf(order)\n multiple = math.pow(1024, multiple)\n\n print \"base power and extension:\", limit_power, multiple\n\n # here, we have a few more complications, because instead of decades we're looking at SI prefix jumps (millenniums)\n if multiple > 512:\n # again, 5s get pushed up to the next order\n limit_power += 1\n num = 1\n extension = 0\n elif multiple > 256:\n # twos get pushed to 512 (4*128)\n num = 4\n extension = 0\n else:\n\n # and the rest get sifted again, in numbers of 128\n extension, num = math.modf(multiple/128)\n print \"multiple num and extension:\", num, extension\n\n num = 8\n if extension > 0.5:\n num += 8\n extension = 0\n elif extension > 0.2:\n extension = 0.5\n elif extension > 0.1:\n extension = 0.2\n elif extension > .001:\n extension = 0.1\n else:\n extension = 0\n\n print \"new power, number, extension:\", limit_power, num, extension\n\n max_value = math.pow(10, limit_power)* (num+extension)\n print \"new max: (number+extension)*10^power:\", max_value\n\n if 2 <= num < 10:\n steps = num\n else:\n steps = max_value / math.pow(10, limit_power-1)\n\n print \"steps:\",steps\n\n ticks = np.linspace(0, max_value, steps+1, endpoint=True)\n print \"steps:\", ticks\n\n return max_value, ticks", "def like_amount(xx):\n x = int(xx)\n if x < 0:\n return 0\n\n if x == 0:\n # this models a spike at 0\n return 0.022\n\n if x > 4000:\n # make it constant but small at very high values rather\n # than evaluating the log-normal there. There is really no\n # great way to check the tails all that well.\n # any points greater than this max-value will get\n # assigned customer-id with almost certainty\n return 1e-20\n\n # otherwise, a regular log-normal distribution\n pi = 3.14159\n mu = 5.3\n sig = 1.85\n norm = 1.0 / (np.sqrt(2 * pi) * sig * x)\n u = ((np.log(x) - mu) / sig) ** 2\n u = min(u, 100)\n result = norm * np.exp(-0.5 * u)\n return result", "def headbut_miss(num):\r\n\tglobal php\r\n\tif num == 0:\r\n\t\tphp -= 10\r\n\t\treturn 0\r\n\telse:\r\n\t\treturn num", "def float(self, max_=None):\n max_ = self.max_float if max_ is None else max_\n return max_ * (self.rng.random() - 0.5)", "def nextpow10(n):\n\t\tif n == 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn math.ceil(math.log10(abs(n)))", "def get_number(maxValue):\r\n return random.randint(1, maxValue)", "def max_abs_error(self) -> float:\n return np.max(np.abs([self.error]))", "def ghchance_plain(x: int) -> str:\n assert x % 100 == 0\n return '%d%' % (x // 100)", "def fmt_int(value):\n if value is None:\n return -999999999999999\n return int(value)", "def calculate_voting_power(error_rate):\n if error_rate == 0:\n return INF\n if error_rate == 1:\n return -INF\n return 0.5*ln(make_fraction(1-error_rate, error_rate))", "def circulation_default_extension_max_count(loan):\n return float(\"inf\")", "def _get_money_earned(tier):\n return int(((tier**2) * 10) + 10)", "def n_sanity_check(number):\n #number = min(99,number)\n #number = max(1,number)\n #return number\n if number > 99: # This is alot clearer no?\n return 99\n elif number < 1:\n return 1\n else:\n return number", "def rand_val(max):\n order = math.ceil(math.log10(max)) #Determine the num of digits in size\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n # Yea, this is quite inefficient\n while (index >= max):\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n return index", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def iceil(f: SupportsFloat) -> int:\n\t\t# noinspection PyTypeChecker\n\t\treturn int(np.ceil(f))", "def test_assert_max_exceeded(self):\n with self.assertRaises(ConversionError):\n DPTSignedRelativeValue.to_knx(128)", "def level_to_value(level, max_value):\n return (level / _MAX_LEVEL) * max_value", "def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)", "def worst_score(self):\r\n pass", "def get_max_iters():\n return 2000", "def test_to_knx_min_exceeded(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().to_knx(DPTValue1Ucount.value_min - 1)", "def largest_factor(n): \n max_factor = 1\n for i in range(2,floor(sqrt(n))+1):\n if n % i == 0:\n return max(max_factor, n // i)\n return max_factor", "def safe_exp(value):\n try:\n ans = math.exp(value)\n except OverflowError:\n ans = float(\"inf\")\n return ans", "def Ethanol(self) -> int:\n return self.raw_measure()[1]", "def generate_limit_and_ticks(max_value):\n print \"max value:\",max_value\n order = math.log10(max_value)\n multiple, limit_power = math.modf(order)\n multiple = math.pow(10, abs(multiple))\n\n print \"base power and extension:\", limit_power, multiple\n\n # within an order of magnitude, there are three ranges that are useful to look at:\n if multiple > 5:\n # over 5.0x10^x, and we care about that decade as a whole\n limit_power += 1\n num = 1\n extension = 0\n elif multiple > 2:\n # over 2.0*10^X, and we care about the first half of the decade (0-5.0)*10^x\n num = 5\n extension = 0\n else:\n # below 2.0*10^X, we really care about 20*10^(x-1)\n\n extension, num = math.modf(multiple)\n print \"multiple num and extension:\", num, extension\n\n # And the same 5/2/10 breakout repeats at this level, but adds to 10 to give more buckets over (10+n)*10^(x-1)\n num = 1\n if extension > 0.5:\n num += 1\n extension = 0\n elif extension > 0.2:\n extension = 0.5\n elif extension > 0.1:\n extension = 0.2\n elif extension > .001:\n extension = 0.1\n else:\n extension = 0\n\n print \"new power, number, extension:\", limit_power, num, extension\n\n max_value = math.pow(10, limit_power)* (num+extension)\n print \"new max: (number+extension)*10^power:\", max_value\n\n if 3 <= num < 10:\n steps = num\n else:\n steps = max_value / math.pow(10, limit_power-1)\n\n print \"steps:\",steps\n\n ticks = np.linspace(0, max_value, steps+1, endpoint=True)\n print \"steps:\", ticks\n\n return max_value, ticks", "def correct_sci(self, val):\n try:\n dividend = float(val.split('E-')[0])\n divisor = float('1' + int(val.split('E-')[1]) * '0')\n return str('%f' % (dividend / divisor))\n except:\n return val", "def safe_rand(self):\n rand_n = np.random.rand()\n if rand_n == float(1):\n rand_n -= 1e-10\n return rand_n", "def _random_error(self):\n return self._random_magnitude_error()[1]", "def STAND_LIMIT() -> int:\n return 15", "def fix_teen(n):\n if 13<=n<=14 or 17<=n<=19:\n return 0\n else:\n return n", "def micros() -> int:", "def cps_err(self):\n return np.sqrt(self.totalcounts) / self.exptime", "def badness(self):\n # Page 97 of TeXbook.\n # \"The badness of a line is an integer that is approximately 100 times\n # the cube of the ratio by which the glue inside the line must stretch\n # or shrink to make an hbox of the required size. For example, if the\n # line has a total shrinkability of 10 points, and if the glue is being\n # compressed by a total of 9 points, the badness is computed to be 73\n # (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by\n # twice its total stretchability has a badness of 800. But if the\n # badness obtained by this method turns out to be more than 10000, the\n # value 10000 is used. (See the discussion of glue set ratio and glue\n # set order in Chapter 12; if i != 0, there is infinite stretchability\n # or shrinkability, so the badness is zero, otherwise the badness is\n # approximately min(100r^3, 10000).) Overfull boxes are considered to\n # be infinitely bad; they are avoided whenever possible.\"\n # Page 111 of TeXbook.\n # \"Vertical badness is computed by the same rules as horizontal\n # badness; it is an integer between 0 and 10000, inclusive, except when\n # the box is overfull, when it is infinity.\"\n if self.is_over_full:\n return math.inf\n line_state, glue_ratio, glue_order = self.glue_set_ratio()\n if glue_order > 0:\n return 0\n # I can't find this stated anywhere, but it seems intuitively correct:\n # a single word on a line has no flexibility, but it is probably bad.\n elif glue_ratio in (GlueRatio.no_stretchability,\n GlueRatio.no_shrinkability):\n return 10000\n else:\n return min(round(100 * glue_ratio ** 3), 10000)", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def dangerouslySmall(c, e):\n return e < -limit and e < (-integerLog10(abs(c))) - 1", "def prescaler(self) -> int:", "def consistancy_test(a, b, aErr, bErr = 0):#TODO: fully test this aproach\n return int(np.ceil(np.abs(b - a) / np.sqrt(bErr**2 + aErr**2)))", "def time_to_failure():\r\n return random.expovariate(BREAK_MEAN)", "def eV(E):\n if np.max(E) < 100:\n return E * 1000\n else:\n return E", "def like_age(xx):\n x = int(xx)\n if x < 18:\n return 0\n\n if x > 120:\n return 0\n\n x = int(xx)\n\n pi = 3.14159\n mu = 18.0\n sig = 18.5\n norm = 2.0 / (np.sqrt(2 * pi) * sig)\n u = ((x - mu) / sig) ** 2\n u = min(u, 100)\n result = norm * np.exp(-0.5 * u)\n return result", "def __human_limit(limit, used):\n if limit == 0: return \"none\"\n\n percentage = str(int(used*100/limit)) + \"%\"\n percentage = \" \"*(3-len(percentage)) + percentage\n return __human_size(limit, False) + \" (\" + percentage + \")\"", "def get_improved_score_factor(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def round_based_alien_limitation(self):\n if self.num_round < 5:\n return 1\n elif self.num_round < 10:\n return 2\n else:\n return 3", "def wr(nr):\n return (1 - nr) if nr < 1.0 else 0.0", "def standardization(a, p):\r\n return a * 10 / 100 * p * p", "def HighPrecisionE(number):\n\n return \"%.22e\" % number", "def sanitise_p_value(n_iter, p_val):\n if p_val == 0:\n p_val = \"< {}\".format(1 / n_iter)\n else:\n p_val = \"{:.4f}\".format(p_val)\n return p_val", "def _max_factor(number, factors):\n return max(n for n in factors if n <= number)", "def calculate_large_constant(self, bound, real_reduction_iterations):#factor):\n minimum_exponent = round(90/(real_reduction_iterations-1))#math.ceil(math.log(bound, 10) * factor)\n \n return ZZ(10 ** minimum_exponent)", "def item_tres(n):\n if n <= 0.167:\n return 0\n elif n > 0.167 and n <= 0.333:\n return 1\n elif n > 0.333 and n <= 0.500:\n return 2\n elif n > 0.500 and n <= 0.667:\n return 3\n elif n > 0.667 and n <= 0.834:\n return 4\n elif n > 0.834 and n <= 1.000:\n return 5", "def nze(self) -> int:", "def nze(self) -> int:", "def max_staleness(self) -> str:\n return pulumi.get(self, \"max_staleness\")", "def e(i):\n if i==0:\n return 0\n else:\n return gc(2*int(math.floor((i-1)//2)))", "def sgn(x) -> int:\n if x > 0:\n return 1\n if x < 0:\n return -1\n return 0", "def ir(some_value):\r\n return int(round(some_value))", "def nice_number( num, mode=1, digits=1 ):\n # extract mantissa and exponent:\n mant, exp = num_to_mant_exp( num )\n # select the working mode and do the truncation:\n if ( mode==0 ):\n mant = np.ceil( mant*10**(digits-1) )/10**(digits-1)\n elif ( mode==1 ):\n mant = np.round( mant, digits-1)\n elif ( mode==2 ):\n mant = np.floor( mant*10**(digits-1) )/10**(digits-1)\n else:\n raise ValueError( 'Wrong worging mode for Fisher_utilities.nice_number' )\n\n return mant_exp_to_num( ( mant, exp ) )", "def updf(t,tmax):\n return 1./tmax if t<tmax else 0.", "def mix_number(n):\n if n == \"NaN\" or n == \"nan\" or n == \"Nan\" or n == 0:\n return n\n if -10 <= n <= 10:\n return percentify(n)\n return millify(n)", "def _get_n(self):#@Needs to fix it for general case\n n_60 = 0.55 * 1 * 1 * 0.75 * self._data[SoilProperty.SPT_N] /0.6\n if not self.is_clayey() and n_60>15: #apply dilitracy correction\n n_60 = 15 + 0.5 * (n_60 - 15)\n return n_60", "def random_int(max=1000):\r\n return randint(0, max)", "def free_bacon(opponent_score):\n # BEGIN PROBLEM 2\n a, b = opponent_score % 10, opponent_score // 10 # separation into digits\n return (max(a, b) + 1)\n # END PROBLEM 2" ]
[ "0.684441", "0.6396268", "0.620864", "0.6083352", "0.6061179", "0.6019383", "0.601921", "0.5970972", "0.5961703", "0.59382796", "0.5932441", "0.592237", "0.5899084", "0.5895729", "0.58700436", "0.58562726", "0.5854558", "0.58429986", "0.58234286", "0.5815658", "0.58116823", "0.58086294", "0.5805074", "0.57641387", "0.5745738", "0.5738901", "0.57242614", "0.57122165", "0.57103026", "0.5708048", "0.5705618", "0.5705574", "0.56876504", "0.5678054", "0.5665499", "0.56625295", "0.5653884", "0.5645745", "0.5644851", "0.5637176", "0.5636738", "0.56222355", "0.5616919", "0.5605304", "0.56028706", "0.55892396", "0.5588851", "0.55755424", "0.55539095", "0.5547363", "0.5539501", "0.55364937", "0.5535298", "0.5525478", "0.5522679", "0.55192584", "0.5519061", "0.5514809", "0.55136406", "0.55134135", "0.5512063", "0.5510672", "0.549604", "0.549551", "0.5489743", "0.5489587", "0.54879934", "0.54844123", "0.54733604", "0.5467905", "0.5461615", "0.5455043", "0.54544425", "0.54531527", "0.5448421", "0.54463077", "0.54429644", "0.54424196", "0.5436384", "0.5435881", "0.54313564", "0.54293346", "0.542853", "0.5427747", "0.5421048", "0.5420194", "0.54196894", "0.54185206", "0.54112154", "0.54112154", "0.5410388", "0.5407572", "0.54060066", "0.53949755", "0.539288", "0.53923756", "0.5391251", "0.5390852", "0.538906", "0.5376036" ]
0.66190135
1
Get the path to a CSV by name.
def _get_csv_path(name): return os.path.join(cwd, 'output/app_info', name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_path(name):\n return \"./data/%s\" % name", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def get_cached_csv(self, category: str) -> str:\n csv_path = f\"{self.csv_dir}/{category.lower()}.csv\"\n if path.exists(csv_path):\n return csv_path\n raise FileNotFoundError(f\"There is no {category.lower()} CSV written yet.\")", "def get_csv_path(url, destination):\n datafile_path = get_datafile_path(url, destination)\n return os.path.splitext(datafile_path)[0] + '-processed.csv'", "def get_csv(\n self,\n csv_name: str,\n csv_directory: Optional[str] = None,\n csv_output_name: Optional[str] = None,\n graph_type: Optional[str] = \"instance\",\n graph_id: Optional[str] = \"main\",\n ):\n self._check_connection()\n options = {}\n if csv_directory is None:\n csv_directory = os.getcwd()\n if csv_output_name is None:\n csv_output_name = csv_name\n options[\"name\"] = csv_name\n\n result = self._dispatch(\n \"get\",\n self._csv_url(graph_type, graph_id),\n options,\n )\n\n stream = open(f\"{csv_directory}/{csv_output_name}\", \"w\")\n stream.write(result)\n stream.close()", "def csvPathname(self, scenario, baseline=None, outputDir='.', type=RESULT_TYPE_SCENARIO):\n # Output files are stored in the output dir with same name as query file but with 'csv' extension.\n basename = os.path.basename(self.queryFile)\n mainPart, extension = os.path.splitext(basename)\n middle = scenario if type == RESULT_TYPE_SCENARIO else (\"%s-%s\" % (scenario, baseline))\n csvFile = \"%s-%s.csv\" % (mainPart, middle)\n csvPath = os.path.abspath(os.path.join(outputDir, csvFile))\n return csvPath", "def get_loc_year_csv(csv_name):\n fname = (csv_name.split('.'))[0].split('-')\n return fname[0], fname[1]", "def get_curve_path(curves_dir, star_id):\n curve_file = \"%s.csv\" % star_id\n curve_path = path.join(curves_dir, curve_file)\n\n return curve_path", "def sample_data_path(name):\n import os.path as op\n data_dir = op.join(op.dirname(__file__), \"data\")\n data_path = op.join(data_dir, name + \".csv\")\n return op.abspath(data_path)", "def data_characterization_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_characterization_dir(experiment_name) / iteration_csv", "def symbol_to_path(symbol, base_dir=None):\n if base_dir is None:\n base_dir = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir,\"{}.csv\".format(str(symbol)))", "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def symbol_to_path(symbol, base_dir=\"../data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"../data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"../data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def get_csv_file_name(output_dir, file_prefix, file_suffix):\n\tcsv_filename = \"\".join([file_prefix, '_', file_suffix, '.csv'])\n\treturn os.path.join(output_dir, csv_filename)", "def get_items_path() -> Path:\n return Path(os.path.join(Path(os.path.realpath(__file__)).parent, \"items.csv\"))", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir= proj_path + '/data/'): \n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')", "def symbol_to_path(symbol, base_dir=\"data\"):\r\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def upload_csv_to_drive(csv_path: str, csv_name: str, folder_id: Optional[str] = None) -> str:\n if folder_id:\n csv_metadata = {'name': csv_name,\n 'parents': [folder_id]}\n else:\n csv_metadata = {'name': csv_name}\n\n csv_file = Path(f\"{csv_path}/{csv_name}\")\n media = MediaFileUpload(csv_file,\n mimetype='text/csv')\n file = drive_service().files().create(body=csv_metadata,\n media_body=media,\n fields='id').execute()\n\n return file.get('id')", "def load_csv(csv_path):\n\n try:\n # Tries to read .csv file into a dataframe\n csv = pd.read_csv(csv_path, header=None)\n\n except FileNotFoundError as e:\n # If file is not found, handle the exception and exit\n logger.error(e)\n raise\n\n return csv", "def symbol_to_path(symbol, base_dir=\"files/input\"):\n print('base_dir=', base_dir)\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"files/input\"):\n print ('base_dir=',base_dir)\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def get_csv_row(file_name):\r\n\tcsv_rdr = csv.reader(open(file_name,'rb'))\r\n\treturn [row for row in csv_rdr]", "def fetch_csv(filename):\n variable = pd.read_csv(filename+'.csv', index_col=0)\n return variable", "def getCsvFileByRegion(self, region):\n result = \"\"\n if region == \"PHA\":\n result = \"00.csv\"\n elif region == \"STC\":\n result = \"01.csv\"\n elif region == \"JHC\":\n result = \"02.csv\"\n elif region == \"PLK\":\n result = \"03.csv\"\n elif region == \"ULK\":\n result = \"04.csv\"\n elif region == \"HKK\":\n result = \"05.csv\"\n elif region == \"JHM\":\n result = \"06.csv\"\n elif region == \"MSK\":\n result = \"07.csv\"\n elif region == \"OLK\":\n result = \"14.csv\"\n elif region == \"ZLK\":\n result = \"15.csv\"\n elif region == \"VYS\":\n result = \"16.csv\"\n elif region == \"PAK\":\n result = \"17.csv\"\n elif region == \"LBK\":\n result = \"18.csv\"\n elif region == \"KVK\":\n result = \"19.csv\"\n else:\n return None\n return result", "def getPathfromCSV(flist, full_csv_list, outfile=None):\n # Get the file list\n fils, csvfils = [], []\n with open(flist, 'r') as fIn:\n for line in fIn:\n if line:\n fils.append(line.split('.')[0].strip())\n with open(full_csv_list, 'r') as fIn:\n for line in fIn:\n if line:\n csvfils.append([line.split('/')[-1].split('_')[0].strip(), # Filename only\n line.strip()]) # File path only\n \n # replace it with the path list\n paths = []\n for f in fils:\n if f in [o[0] for o in csvfils]:\n idx = [o[0] for o in csvfils].index(f)\n paths.append(csvfils[idx][1])\n else:\n print('Could not find: %s' %f)\n \n print('Retrieved %i paths (of %i)' %(len(paths), len(fils)))\n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for p in paths:\n fOut.write(p)\n fOut.write('\\n')\n \n return paths", "def get_input_file():\n\n filename = input('Input the file name to save data to: ') + '.csv'\n return filename", "def load_template_path_from_csv(csv_file_path, col_name=None):\n try:\n data = pd.read_excel(csv_file_path)\n data[col_name].dropna(inplace=True)\n return data[col_name].tolist()\n except Exception as e:\n sys.stderr.write('Template Loading from csv Failed: %s\\n' % e.message)\n exit(1)", "def get_field_mapping_filename(field_name: str, config_location: str) -> str:\n return os.path.join(config_location, field_name + \".csv\")", "def get_csv(self):\n all_csvs = [each for each in listdir(self.cur_dir) if each.endswith('.csv')]\n return all_csvs", "def make_table_path(keys, value, version=None):\n return _make_path(keys, value, '.csv', version)", "def read_csv(csv_path, fieldnames=None, restkey=None,\n restval=None, dialect='excel', *args, **kwds):\n with CSVFile(os.path.expanduser(csv_path), fieldnames=fieldnames, restkey=restkey, restval=restval,\n dialect=dialect, *args, **kwds) as csvfile:\n return csvfile", "def getNamedPath(name):\n def findNamed(path):\n pp = os.path.split(path)\n if pp[-1] == '':\n return None\n if pp[-1] != name:\n path = findNamed(pp[0])\n return path\n return findNamed(os.getcwd())", "def read_from_csv(path):\n if not os.path.exists(path):\n return None\n if not path.endswith('.csv'):\n return None\n\n with open(path, 'r') as file:\n data = pd.read_csv(file, header=0)\n\n return data", "def get_score_path(cfg):\n return os.path.join(\n get_score_dir(cfg),\n \"ener.csv\")", "def get_filepath(self, name):\r\n return os.path.join(self._folder, name)", "def open_csv_as_df(account_name):\n try:\n base_dir = os.path.dirname(os.path.abspath(__file__))\n file_dir = 'data_collection/match_datasets'\n data_file = os.path.join(base_dir, file_dir, account_name + '.csv')\n data = pd.read_csv(data_file)\n return data\n except FileNotFoundError as e:\n print(e)\n print('Could not find', account_name + '.csv')\n return None", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path", "def write_csv(arr, product, file_path):\n os.chdir(file_path)\n keys = arr[0].keys()\n now = datetime.now()\n file_name = product + now.strftime(\"%m%d%y_%H%M\") + '.csv'\n try:\n with open(file_name, \"w\", newline='', encoding='utf-8') as a_file:\n dict_writer = csv.DictWriter(a_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(arr)\n a_file.close()\n except OSError:\n # file not found\n print(f\"File: ${file_name} not found\")\n return file_name", "def read_csv(path):\n return pd.read_csv(path)", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def _open_for_csv(path):\n if sys.version_info[0] < 3:\n return open(path, 'rb')\n else:\n return open(path, 'r', newline='')", "def saveCSV(name, ra, dec, ang):\n r = res(ra,dec,ang)\n return r.write('{}.csv'.format(name), overwrite = True)", "def get_csv_string(self, **kwargs):\n ...", "def get_season_csv_file(season):\n with requests.Session() as s:\n download = s.get(CSV_URL.format(season))\n return download.content.decode('utf-8')", "def get_season_csv_file(season):\n with requests.Session() as s:\n download = s.get(CSV_URL.format(season))\n return download.content.decode('utf-8')", "def read_csv(folder):\n csv_paths = [(f, os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith('.csv') and 'ๅˆ‘ไบ‹' in f and 'ๅธๆณ•้™ข๏ผๅˆ‘ไบ‹่ฃœๅ„Ÿ_ๅˆ‘ไบ‹' not in f and 'ๆœ€้ซ˜' not in f]\n return csv_paths", "def __init__(self, path=None):\n super().__init__(path=path)\n self.path += '{}.csv'", "def get_default_path(name):\n name_ = name\n if isinstance(name, (DottedName, Symbol)):\n name_ = str(name)\n if name_ in pyccel_external_lib.keys():\n name = pyccel_external_lib[name_].split('.')\n if len(name)>1:\n return DottedName(*name)\n else:\n return name[0]\n return name", "def str_to_path(name):\n import os;\n return(os.path.abspath(name));", "def get_csv(self):\n if self._scanned:\n return self._scanner.csv()", "def filepath(filename, data, root='/home/cyneo/Work/Scans/Processed Data/',\r\n filetype='.csv'):\r\n path = os.path.abspath(root + data + '/' + filename +\r\n ' ' + data + filetype)\r\n return path", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")", "def getPath(project):\n if project == '.sourglass':\n path = project\n else:\n path = os.path.join(basepath, 'logs', project + '.csv')\n try:\n open(path)\n except IOError:\n f = open(path, 'w')\n f.close()\n print(\"Started new project.\")\n return path\n else:\n return path", "def get_key_data_filepath():\n global key_filepath, directory\n filename = 'key.csv'\n key_filepath = os.path.join(directory, filename)", "def burstersFromCSV(csvfile, whichCol=1, includes=['B'], outfile=None):\n df = pd.read_csv(csvfile)\n includes = [o.lower() for o in includes]\n paths = []\n for u in range(df.shape[0]):\n for i in includes:\n try:\n this = df.ix[u,whichCol].lower()\n except: # Like for NaN\n this = ''\n if i in this and df.ix[u,0] not in paths:\n paths.append(df.ix[u,0])\n \n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for p in paths:\n fOut.write('%s\\n' %p)\n return\n return paths", "def convert_tracefilename(self, filepath):\n filename, extension = os.path.splitext(os.path.basename(filepath))\n return filename + '.csv'", "def file_path(name):\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_dir, 'data', name.lower())", "def symbol_to_path(symbol, base_dir=\"../data/\"):\n if not os.path.exists(os.path.join(base_dir, \"{}.csv\".format(str(symbol)))):\n start = dt.datetime(2010, 1, 1)\n end = dt.datetime(2016, 12, 31)\n df = web.DataReader(symbol, 'google', start, end)\n df.to_csv(os.path.join(base_dir, \"{}.csv\".format(str(symbol))))\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def get_header_csv(csv_file, cols_delimiter):\n\n with open(csv_file, \"r\") as f:\n reader = csv.reader(f, delimiter=cols_delimiter)\n header_csv = next(reader)\n \n return header_csv", "def csvp(startingPath, csv_ext='.csv'):\n print 'walking up path=', startingPath\n csvfn = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(startingPath)\n for filename in filenames if filename.endswith(csv_ext)]\n print 'list is ', len(csvfn), ' images long'\n print 'starting with', csvfn[0]\n print 'ending with', csvfn[-1]\n return csvfn", "def get_filename(output_dir, accountname):\n f_name = 'twitter_data_' + accountname + str(datetime.datetime.utcnow()) + '.csv'# start_time + '_' + end_time\n full_path = output_dir + '/' + f_name\n\n return full_path", "def load_file(name):\n return pd.read_csv(join(path_here, \"syserol/data/\" + name + \".csv\"), delimiter=\",\", comment=\"#\")", "def get_file_names(self):\n return glob.glob(os.path.join(self.path, '*.csv'))", "def load_csv_file(file_name):\n return pandas.read_csv(path_dataset + file_name)", "def askopenfilename():\r\n file_opt = options = {}\r\n options['defaultextension'] = '.csv'\r\n options['filetypes'] = [('all files', '.*'), ('csv files', '.csv')]\r\n options['initialdir'] = os.getcwd()\r\n options['initialfile'] = 'profile.csv'\r\n options['title'] = 'choose file'\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**file_opt)\r\n\r\n # open file on your own\r\n return filename", "def get_file_path(name: str) -> str:\n return os.path.join(DIR, 'scores', f'{name}.json')", "def get_path(self, name):\n for col in [self.specs, self.resources]:\n if name in col:\n return force_absolute(col['BASE'], col[name])\n raise MissingFileException(name)", "def tsv_name():\n\n if PAR['level'] == 1:\n \n return 'col.tsv'\n \n else:\n\n return 'myc.tsv'", "def get_filename(name: str, fold: int, type: str):\n if type != TYPE_TRAIN and type != TYPE_TEST:\n raise \"Invalid dataset type: \" + type\n directory = os.path.dirname(os.path.realpath(__file__))\n if directory.endswith(os.sep):\n directory.rstrip(os.sep)\n return directory + os.sep + type + os.sep + name + str(fold) + \".csv\"", "def path_for_import(name):\n return os.path.dirname(os.path.abspath(import_module(name).__file__))", "def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")", "def load_multiple_csv(self, path, column):\n df = pd.concat([pd.read_csv(f\"{path}/{f}\") for f in tqdm(os.listdir(f\"{path}/\"))], ignore_index=True)\n return df[column]", "def download_mp3_by_csv(s, username, passwd, csv_path, download_dir=None):\n\n s = login(s, username, passwd)\n refs = pd.read_csv(csv_path, sep=';').Name\n length = len(refs)\n for i, ref in enumerate(refs):\n sys.stdout.write('\\r')\n sys.stdout.write('downloading: %s/%s' % (i+1, length))\n sys.stdout.flush()\n s = search_by_ref(s, ref)\n mp3_path = None\n if download_dir != None:\n file_name = '%s.mp3' % ref\n mp3_path = os.path.join(download_dir, file_name)\n result = download_mp3(s, mp3_path, ref)\n if result == 1:\n return 1\n sys.stdout.write('\\n')\n sys.stdout.flush()\n s.driver.close()", "def download_from_csv(\r\n year, save_dir, time_step_in_seconds=5, total_paper_number=None,\r\n csv_filename=None, downloader='IDM'):\r\n postfix = f'AAAI_{year}'\r\n project_root_folder = os.path.abspath(\r\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\r\n csv_file_path = os.path.join(\r\n project_root_folder, 'csv',\r\n f'AAAI_{year}.csv' if csv_filename is None else csv_filename)\r\n csv_process.download_from_csv(\r\n postfix=postfix,\r\n save_dir=save_dir,\r\n csv_file_path=csv_file_path,\r\n is_download_supplement=False,\r\n time_step_in_seconds=time_step_in_seconds,\r\n total_paper_number=total_paper_number,\r\n downloader=downloader\r\n )", "def get_file_name(self):\n dialog = gtk.FileChooserDialog(\"Open..\",\n None,\n gtk.FILE_CHOOSER_ACTION_OPEN,\n (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n dialog.set_default_response(gtk.RESPONSE_OK)\n response = dialog.run()\n\n if response == gtk.RESPONSE_OK:\n self.file_name = dialog.get_filename()\n self.tot_rows = len(open(self.file_name).readlines())\n self.ifile = open(self.file_name)\n self.reader = csv.reader(self.ifile)\n self.row = self.reader.next()\n dialog.destroy()\n elif response == gtk.RESPONSE_CANCEL:\n print 'Closed, no file selected.'\n dialog.destroy()", "def read_csv():", "def list_csv_files():\n # See README.txt Ref#2.\n return [filename for filename in glob.glob(\"*.csv\")]", "def samplesheet_path_fixture(fixtures_dir: Path) -> Path:\n _file_path = fixtures_dir / \"samplesheet.csv\"\n return _file_path", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def GetFlatFile(path_to_file, file_name):\n return pd.read_csv(os.path.join(path_to_file, file_name))", "def browseforcsv(self, entry):\r\n filename = filedialog.askopenfilename(title='Select CSV')\r\n if filename != '': # Doesn't change if no file name entered\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, filename)", "def getPath(self, date, sep = '/'):\n\n return sep.join( [self.getDirName(date), self.getFileName(date)] )", "def get(name, filename):\n\tlogging.info(\"Reading {} from {}\".format(name, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"r+\") as f:\n\t\treader = csv.reader(f)\n\t\tlogging.debug(\"Reading name/snippet from file\")\n\t\tin_file = False\n\t\tfor row in reader:\n\t\t\tif str(row[0]) == name:\n\t\t\t\tin_file = True\n\t\t\t\tprint row\n\t\tif in_file == False:\n\t\t\tprint \"That's not in this file\"\n\tlogging.debug(\"Read successful\")\n\treturn name, filename", "def getCSV(filename, reference_column=\"time\", windows_separator=\"\"):\n reader = csv.DictReader(open(filename))\n csv_data = {}\n for row in reader:\n for col, value in row.iteritems():\n csv_data.setdefault(col, []).append(value)\n\n if reference_column not in csv_data.keys():\n raise ValueError('Reference column name {} not in the {} csv file. Aborting.'.format(reference_column, filename))\n\n # get windows separation line numbers.\n reference_points = [i for i, v in enumerate(csv_data[reference_column]) if v == windows_separator]\n\n if not reference_points:\n raise ValueError('Windows separator has not been found in {} csv file. Aborting.'.format(reference_column, filename))\n\n result = defaultdict(list)\n for i, r in enumerate(reference_points):\n for col in csv_data.keys():\n if i == 0:\n result[col].append([csv_data[col][v] for v in range(r)])\n else:\n result[col].append([csv_data[col][v] for v in range(reference_points[i - 1] + 1, r)])\n\n return result", "def GetDataFromCsvFile(file_name):\n f_path = os.path.join(LIB_HOME, 'data', file_name)\n rows = []\n for row in csv.reader(ReadFile(f_path).split('\\n')):\n if row:\n rows.append(row)\n\n return rows[1:]", "def get_fileName(path):\n fileName = input('Select data file from ' + ','.join(os.listdir(path)) + ' ')\n return fileName", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self" ]
[ "0.82250005", "0.64448506", "0.6438859", "0.638522", "0.635775", "0.62658775", "0.6196743", "0.60490125", "0.60278124", "0.5936994", "0.59059805", "0.58804125", "0.58804125", "0.58078647", "0.5763088", "0.5756751", "0.5756751", "0.5756751", "0.5755957", "0.57534754", "0.5739853", "0.5739853", "0.5702425", "0.56916463", "0.5674672", "0.55689496", "0.55672044", "0.5558292", "0.54959595", "0.5495428", "0.54942226", "0.5487731", "0.54369605", "0.54339933", "0.5421498", "0.5414352", "0.5413477", "0.5404914", "0.53892916", "0.53525424", "0.5322537", "0.53173774", "0.5314697", "0.527982", "0.5272149", "0.5260367", "0.5240882", "0.52301556", "0.522591", "0.52058375", "0.52058035", "0.51938224", "0.51838976", "0.5179638", "0.51745564", "0.5168145", "0.5150949", "0.5150949", "0.5147741", "0.51249504", "0.510854", "0.51023006", "0.5087305", "0.50843275", "0.507984", "0.5078831", "0.50755584", "0.50755066", "0.5074594", "0.50725037", "0.5070831", "0.5060197", "0.5053098", "0.50491303", "0.5037488", "0.5029772", "0.502311", "0.50048167", "0.5001686", "0.50012314", "0.49877167", "0.4986978", "0.4985742", "0.4981695", "0.49804568", "0.49785313", "0.49778783", "0.49771065", "0.49732172", "0.49701604", "0.49615008", "0.49562114", "0.49523705", "0.49511877", "0.495024", "0.49375266", "0.49372843", "0.49349436", "0.49282384", "0.49151888" ]
0.7574934
1
Turns the CSV into a workable dictionary.
def _csv_to_dict(name): csv_path = _get_csv_path(name) result = [] with open(csv_path) as csvfile: reader = csv.DictReader(csvfile) for row in reader: result.append(row) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_to_dict(self):\n log = logger.configure(\"default\")\n try: \n df = pd.read_csv(self.__csv_path)\n except IOError as e:\n # file not found\n log.error('Could not import {}. Got error {}'.format(self.__csv_path, e))\n raise \n else:\n cols = list(df.columns)\n metafield_cols = [col for col in cols if 'metafields' in col]\n if metafield_cols == [] or 'Handle' not in cols:\n # relevant columns don't exist\n log.error('{} does not contain `Handle` or `metafields` named columns'.format(self.__csv_path))\n raise\n else:\n new_cols = ['Handle'] + metafield_cols\n df = df[new_cols].set_index('Handle')\n df = df[~df.index.duplicated(keep='first')]\n return df.to_dict('index')", "def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary", "def csv_to_dict(filename):\n\twith open(filename, 'r') as in_hndl:\n\t\tindict = [i for i in csv.DictReader(in_hndl)]\n\treturn indict[0]", "def _convert_csv_column_to_dict(csv_data, column):\n results = dict()\n\n for row in csv_data:\n key = row[0]\n data = row[1:][column]\n\n if data:\n if key not in results:\n results[key] = data.strip() if data else \"\"\n else:\n # append multiple choice questions\n results[key] += \"|{0}\".format(data.strip())\n\n return results", "def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines with data\n for input_line in data:\n record = {}\n for i in range(len(header)):\n record[header[i]] = input_line[i]\n output.append(record)\n return output", "def csv2dict(filename):\n dis_dict = {}\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n el_a = row[\"Element Name\"]\n dis_dict[el_a] = {}\n for entry in row:\n if entry != \"Element Name\":\n dis_dict[el_a][entry] = float(row[entry])\n csvfile.close()\n return dis_dict", "def csv_to_dict(fp):\n import pandas as pd\n df = pd.read_csv(fp, index_col=0, header=None)\n d = df.to_dict(orient='index')\n d = {k: v.values() for k, v in d.iteritems()}\n return d", "def save_csv_into_dictionary(csv_file):\n\n dictionary = OrderedDict()\n with open(csv_file, newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n dictionary[row[0]] = row[1]\n return dictionary", "def make_dict():\n\n problems = {}\n\n with open('problems.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n cc_name = row['cc_name']\n url_link = row['url_link']\n problems[cc_name] = url_link\n\n return problems", "def csv2dicts(csvfile, names=None):\n data = []\n for row_index, row in enumerate(csvfile):\n if row_index == 0:\n if names:\n keys = names\n else:\n keys = row\n print(keys)\n continue\n data.append({key: value for key, value in zip(keys, row)})\n return data", "def csv_to_dict(csvfile, delimiter=\",\", quotechar='\"'):\n reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n data = {each: [] for each in reader.fieldnames}\n for i, row in enumerate(reader):\n for key, value in row.items():\n data[key].append(value)\n return data", "def parse_trick_ascii(csv_file):\n data_file = csv.DictReader(open(csv_file))\n single_run_data_dict = {'altitude' : [0.0],\n 'latitude' : [0.0],\n 'longitude' : [0.0]}\n # Your code here\n # ...\n # return the dict\n return single_run_data_dict", "def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary", "def csv_to_dict(filename):\n data_list = []\n \n with open(filename, 'rb') as datafile:\n data_reader = csv.DictReader(datafile, delimiter = ',')\n for row in data_reader:\n data_list.append(row)\n\n return data_list", "def parse_csv_input_file(input_file):\n with open(input_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n dict = {i: x for i, x in item.items()}\n yield(dict)", "def csvToDict(filepath):\n data = []\n with open(getcwd() + filepath, 'r') as dataset:\n assert csv.Sniffer().has_header(dataset.read(9999)), 'No headers'\n dataset.seek(0)\n dialect = csv.Sniffer().sniff(dataset.read(99999))\n dataset.seek(0)\n reader = csv.DictReader(dataset, dialect=dialect)\n headers = reader.fieldnames\n for row in reader:\n data.append(row)\n\n data = assert_data_format(data)[0]\n\n return data, headers", "def load_csv(input):\n with open(input['csv'], 'r', encoding=input['encoding']) as f:\n invoice_dict = dict()\n reader = csv.reader(f, delimiter=';')\n\n for row in reader:\n invoice_id = row[0]\n\n if invoice_id in invoice_dict:\n invoice_dict[invoice_id].add_entry(row[1:])\n else:\n invoice_dict[invoice_id] = Invoice(row)\n\n return invoice_dict", "def dictionary_formation():\r\n sales_data = {}\r\n with open('beer_data.csv', \"r\") as data_file:\r\n file_contents = csv.reader(data_file, delimiter=',')\r\n #Value of lines_read used as key value for each dictionary\r\n #in sales_data\r\n lines_read = 1\r\n for line in file_contents:\r\n if lines_read == 1:\r\n lines_read = lines_read + 1\r\n else:\r\n #Stores each column in row as key value in dictionary\r\n sales_data[str(lines_read)] = {\r\n \"invoice_number\": line[0],\r\n \"customer\": line[1],\r\n \"date_required\": line[2],\r\n \"recipe\": line[3],\r\n \"gyle_number\": line[4],\r\n \"quantity_ordered\": int(line[5])\r\n }\r\n lines_read = lines_read + 1\r\n data_file.close()\r\n return sales_data", "def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames", "def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames", "def load_csv_to_dict(filename):\n row_len = list()\n result = dict()\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n key = row[0].strip()\n values = [v.strip() for v in row[1:]]\n result[key] = values\n row_len.append(len(values))\n return result, max(row_len)", "def read_into_dictionary(input_file):\n logger.debug(\"%s %s (%s)...\" % (LOG_INDENT, inspect.stack()[0][3], input_file))\n\n input_file_suffix = (pathlib.Path(input_file).suffix)\n ret_dict = {}\n if input_file_suffix == '.csv':\n logger.debug(\"%s opening file [%s]\" % (LOG_INDENT,input_file))\n reader = csv.reader(open(input_file, 'r'))\n for row in reader:\n # read in and strip of comments / blank lines etc..\n variable_name = row[0].strip()\n variable_value = row[1].strip()\n if not variable_name:\n continue\n if variable_name.startswith('#') or variable_value.startswith('#'):\n continue\n logger.debug(\"%s %s=%s\" % (LOG_INDENT,variable_name,variable_value))\n # save in dictionary\n ret_dict[variable_name] = variable_value\n return ret_dict", "def chunk_to_dict(chunk):\n csv_cols = chunk.keys()\n return [dict(zip(csv_cols, v)) for v in chunk.values]", "def dictparse(csvfilename, keyfield):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True)\n for row in csvreader:\n table[row[keyfield]] = row\n return table", "def read_sailor_data(filename):\n\td=OrderedDict()\n\twith open(filename) as csvfile:\n\t\trdr = csv.reader(csvfile)\t\n\t\tfor i in rdr:\n\t\t\t#This except is so that if the line trying to be inputted into the dictionary is a string\n\t\t\t#It will ignore it and go to the next line\n\t\t\ttry: d[i[0]]=(float(i[1]),float(i[2]))\n\t\t\texcept: None\n\treturn d", "def _properties_from_csv_row(row, header, ignored_columns):\n props = {}\n for h, prop in enumerate(header):\n # Handle a strange edge case where the length of the row is longer than the length of the header.\n # We do this to prevent an out of range error.\n x = h\n if x > len(row) - 1:\n x = len(row) - 1\n if row[x] == '' or prop in ignored_columns:\n continue\n else:\n try:\n # We use literal_eval() here to de-stringify numbers, lists and objects in the CSV data\n p = literal_eval(row[x])\n props[prop] = p\n except (SyntaxError, ValueError) as e:\n props[prop] = row[x]\n return props", "def create_waves_dict(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n waves_dict = {row[\"Date\"]: row[\"Wave Height\"] for row in reader}\n return waves_dict", "def DictData(self):\n reader = csv.DictReader( open( self.file, \"rU\" ), dialect = \"excel\" )\n return reader", "def loadData(self, aircraftCSV='aircraft.csv'):\n aircraftDict = {}\n \n with open(aircraftCSV, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for line in reader:\n #if imperial convert to metric\n if line[2] == 'imperial':\n range = float(line[4]) * 8 / 5\n else:\n range = float(line[4])\n aircraftDict[line[0]] = [line[1], line[3], range]\n self.aircraftDict = aircraftDict", "def get_rows(csv):\n\n labels = csv[0][2:].split(',')\n\n # Convert each row into a hash of label: value\n return [dict(zip(labels, row.split(','))) for row in csv[1:]]", "def read_strong_csv(strong_meta_csv_path):\n with open(strong_meta_csv_path, 'r') as fr:\n reader = csv.reader(fr, delimiter='\\t')\n lines = list(reader)\n \n meta_dict = {}\n for line in lines:\n [audio_name, begin_time, end_time, label] = line\n meta = {'begin_time': begin_time, 'end_time': end_time, 'label': label}\n if audio_name in meta_dict:\n meta_dict[audio_name].append(meta)\n else:\n meta_dict[audio_name] = [meta]\n \n return meta_dict", "def tsvRowToDict(row):\n return {col: getattr(row, col) for col in row._columns_}", "def read_risposte_from_csv(csv_risposte):\n import os\n try:\n csv_in = open(os.path.join(os.path.dirname(__file__), csv_risposte), 'rb')\n r_reader = csv.DictReader(csv_in, delimiter=',')\n except IOError:\n print \"It was impossible to open file %s\" % csv_risposte\n exit(1)\n\n risposte_partiti = {}\n for row in r_reader:\n partito_id = int(row['partito_id'])\n domanda_id = int(row['domanda_id'])\n risposta = int(row['risposta_int'])\n if not partito_id in risposte_partiti:\n risposte_partiti[partito_id] = {}\n risposte_partiti[partito_id][domanda_id] = risposta\n\n return risposte_partiti", "def extract(input):\n reader = csv.DictReader(input)\n return reader", "def read_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[0]] = item[1]\r\n return dictionaryoutput", "def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}", "def load_data(self):\n df = pandas.read_csv(self.path)\n self.data_dict = df.to_dict(orient=\"list\")\n return self.data_dict", "def get_data():\n data = {}\n with open(app.config['DATA_CSV'], 'r') as csvfile:\n presence_reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(presence_reader):\n if len(row) != 4:\n # ignore header and footer lines\n continue\n\n try:\n user_id = int(row[0])\n date = datetime.strptime(row[1], '%Y-%m-%d').date()\n start = datetime.strptime(row[2], '%H:%M:%S').time()\n end = datetime.strptime(row[3], '%H:%M:%S').time()\n except (ValueError, TypeError):\n log.debug('Problem with line %d: ', i, exc_info=True)\n\n data.setdefault(user_id, {})[date] = {'start': start, 'end': end}\n return data", "def open_client_file_to_dict():\n clients_dict = []\n file = open(r'../clientmailerproj/client.csv', encoding='utf-8-sig')\n client_ordered_dict = csv.DictReader(file)\n for row in client_ordered_dict:\n clients_dict.append({\n 'First': row['First Name'],\n 'Last': row['Last Name'],\n 'Company': row['Account Name'],\n 'Email': row['Email'],\n 'Job': row['Job']\n })\n return clients_dict", "def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d", "def readCSV(filename, separator):\n \n filetoread = open(filename, \"r\")\n lines = []\n for line in filetoread:\n line = line.replace(\"\\n\", \"\").split(separator)\n lines.append(line)\n keys, values = lines[0], lines[1]\n dictionary = {}\n for i in range(0,len(keys)):\n try:\n dictionary[keys[i]] = int(values[i])\n except:\n dictionary[keys[i]] = values[i]\n return dictionary", "def csv_to_dict_list(file_path, char_sep=\"|\"):\n with open(file_path, mode='r') as f:\n d = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True, delimiter=char_sep)]\n return d", "def csvObj():\n CSV_URL = \"http://unitedstates.sunlightfoundation.com/legislators/legislators.csv\"\n s = requests.get(CSV_URL) # Download the csv using requests.\n reader = csv.DictReader(s.text.splitlines(), lineterminator=\"\\n\") # Use the dictreader to make a dictionary with the attribute name paired with the rows value for that attribute.\n name2twitter_id = {}\n for row in reader:\n if (row['in_office'] == \"1\" and row['twitter_id'] != \"\"):\n name = row['firstname'] + \" \" # Construct the name.\n if (row['middlename'] != \"\"): # Not all names have middle names.\n name += row['middlename'] + \" \"\n name += row['lastname']\n name2twitter_id[name] = row['twitter_id'] # Assign the name to their handle.\n del name2twitter_id[\"Tim Murphy\"] # This representative does not have an active twitter handle. \n name2twitter_id[\"Gregory W. Meeks\"] = \"RepGregoryMeeks\" # Insert this representatives twitter handle manually.\n return name2twitter_id", "def parse_csv(data):\n\n # scan for CSRs first, so it's easier to resolve CSR-related constants\n # in the second pass\n for _type, _name, _address, _, __ in data:\n if _type == 'csr_base':\n peripherals[_name] = {'name': _name,\n 'address': _address,\n 'constants': {}}\n\n for _type, _name, _val, _val2, _ in data:\n if _type == 'csr_base':\n # CSRs have already been parsed\n pass\n elif _type == 'csr_register':\n # we are currently not interested in this\n pass\n elif _type == 'constant':\n found = False\n for _csr_name in peripherals:\n if _name.startswith(_csr_name):\n local_name = _name[len(_csr_name)+1:]\n peripherals[_csr_name]['constants'][local_name] = _val\n found = True\n break\n if not found:\n # if it's not a CSR-related constant, it must be a global one\n constants[_name] = {'name': _name, 'value': _val}\n elif _type == 'memory_region':\n mem_regions[_name] = {'name': _name,\n 'address': _val,\n 'size': _val2}\n else:\n print('Skipping unexpected CSV entry: {} {}'.format(_type, _name))", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def load_csv(input_filename_state):\n dataset = {}\n with open(input_filename_state) as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n location_col = -1\n week_ahead_col = -1\n quantile_col = -1\n value_col = -1 \n\n\n for i in range(len(header)):\n if header[i] == \"place\":\n location_col = i\n elif header[i] == \"week_ahead\":\n week_ahead_col = i\n elif header[i] == \"quantile\":\n quantile_col = i \n elif header[i] == \"value\":\n value_col = i\n \n for row in reader:\n state = row[location_col]\n\n # Skip the state if it is not listed in reichlab's state list.\n if state not in STATE_ID_MAPPING:\n continue\n state_id = STATE_ID_MAPPING[state]\n week_ahead = int(row[week_ahead_col])\n quantile = row[quantile_col]\n val = max(float(row[value_col]), 0)\n if week_ahead not in dataset:\n dataset[week_ahead] = {}\n if state_id not in dataset[week_ahead]:\n dataset[week_ahead][state_id] = {}\n dataset[week_ahead][state_id][quantile] = val\n return dataset", "def jobsAsDict (string, nbTrucks):\n \n dictio = {'date': string[7:18]} \n \n # Configuration for CSV reading\n with open('/Users/Louis/Documents/Research/Code/cleanedData/'+string) as csvfile:\n # Dictionary containing the info\n reader = csv.DictReader(csvfile,delimiter = ',')\n \n # Keep track of numbers\n clusterNb = 0 # sequences\n deliveryNb = 1 \n pickUpNb = 1\n \n dictSeq = {'Init' : [None,None,None]}\n \n for row in reader:\n \n if row['StopOrder'] == '1' : # start of a new sequence\n \n \n dictio['cluster_'+str(clusterNb)] = dictSeq.copy()\n clusterNb += 1\n if clusterNb == nbTrucks+1:\n break\n # Initialization\n\n dictSeq.clear()\n deliveryNb = 1\n pickUpNb = 1 \n \n if row['ReadyTimePickup'] == 'N/A': # type: delivery\n \n dictSeq['delivery_'+str(deliveryNb)] = [row['Address'],\\\n row['Longitude'],row['Latitude']]\n deliveryNb += 1\n else:\n \n dictSeq['pickUp_'+str(pickUpNb)] = [row['Address'],\\\n row['Longitude'],row['Latitude'],row['ReadyTimePickup'],row['CloseTimePickup']]\n pickUpNb += 1\n else:\n \n if row['ReadyTimePickup'] == 'N/A': # type: delivery\n \n dictSeq['delivery_'+str(deliveryNb)] = [row['Address'],\\\n row['Longitude'],row['Latitude']]\n deliveryNb += 1\n else:\n \n dictSeq['pickUp_'+str(pickUpNb)] = [row['Address'],\\\n row['Longitude'],row['Latitude'],row['ReadyTimePickup'],row['CloseTimePickup']]\n pickUpNb += 1\n\n \n return dictio", "def load(filename):\n with open(filename,'r') as fd:\n csv_in = csv.reader(fd, delimiter=',', quotechar='\"')\n keys = csv_in.next()\n data = {k:[] for k in keys}\n for row in csv_in:\n for k,v in zip(keys,row):\n data[k].append(float(v))\n return data", "def load_from_file_csv(cls):\n fields = []\n rows = []\n new_dict = {}\n new_list = []\n key = \"\"\n filename = cls.__name__ + \".csv\"\n with open(filename) as fp:\n reader = csv.reader(fp)\n fields = next(reader)\n for row in reader:\n rows.append(row)\n for row in rows:\n i = 0\n new_dict = new_dict.fromkeys(fields)\n for attr in fields:\n key = fields[i]\n value = row[i]\n new_dict[key] = value\n i += 1\n new_list.append(cls.create(**new_dict))\n return new_list", "def get_numbers_dict(input_file):\n numbers_dict = {}\n with open(input_file) as f:\n reader = csv.reader(f)\n\n for name, number in reader:\n # check phone number is valid\n assert len(number) == 13\n assert number.startswith('+44')\n\n # add to dict\n numbers_dict[name] = number\n\n return numbers_dict", "def load_csv(file):\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile)\n return [dict(row) for row in reader]", "def csv_dict_reader(file_obj):\n #import re\n #file = open(file_obj)\n\n # reader = csv.DictReader(file_obj)\n # for line in reader:\n # print(line[\"Name\"])", "def read_2tuple_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n # use tuple of company (i.e., VEST01, etc) and item\r\n # companies have different prices\r\n dictionaryoutput[(item[0], item[1])] = item[2]\r\n return dictionaryoutput", "def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"] or None,\n \"father\": row[\"father\"] or None,\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data", "def make_dict(row):\n return dict((key[0], value) for key, value in zip(colnames, row))", "def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary", "def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"],\n \"father\": row[\"father\"],\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data", "def _process_csv_data(csv_file, user_data_map):\n with open(csv_file, 'r') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if len(row) < 2:\n print('The CSV file is not in expected format.')\n raise Exception\n user_data_map[row[1].lower()] = row[0]", "def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])", "def read_dic():\n # should return the original format\n dic = OrdDic()\n dic.update(json.load(open(\"resources/files/serials.csv\", \"r\")))\n\n\n # OLD CODE\n # logging.log(logging.INFO, \"File path: \"+os.path.realpath(__file__))\n # r = reader(open(\"resources/files/serials.csv\", \"r\", newline=\"\\n\"))\n # i = 0\n # for row in r:\n # if i:\n # inner_dic = OrdDic()\n # for serial in row[1].split(';;,,,;;'):\n # serial = serial.split(';;:::;;')\n # sub_dic = OrdDic()\n # for sub_serial in serial[1].split(';;!!!;;'):\n # sub_serial = sub_serial.split(\";;@@;;\")\n # if sub_serial[0] == 'options':\n # options = sub_serial[1].split(\";;##;;\")\n # sub_dic.update({sub_serial[0]: options})\n # else:\n # sub_dic.update(\n # {sub_serial[0]: sub_serial[1]})\n # inner_dic.update({serial[0]: sub_dic})\n # # lst = row[1].split('\\\\')\n # dic.update({row[0]: inner_dic})\n # else:\n # i += 1\n # # print(\" * Read Dictionary\")\n return dic", "def import_and_clean():\n \n with open(\"inventory.csv\", newline=\"\") as csvfile:\n inventory = csv.DictReader(csvfile)\n rows = list(inventory)\n\n for row in rows:\n row[\"product_price\"] = row[\"product_price\"].replace(\"$\", \"\")\n row[\"product_price\"] = row[\"product_price\"].replace(\".\", \"\")\n row[\"product_price\"] = int(float(row[\"product_price\"]))\n row[\"date_updated\"] = datetime.datetime.strptime(row[\"date_updated\"], \"%m/%d/%Y\")\n row[\"product_quantity\"]= int(row[\"product_quantity\"])\n \n return rows", "def load_and_transpose_csv(metrics_csv):\n models = []\n with open(metrics_csv,'r') as f:\n reader = csv.reader(f)\n fields = next(reader)\n fields = fields[1:] # remove \"Dataset\"\n out_dict = dict.fromkeys(fields,{})\n for row in reader:\n model_name = row[0]\n models.append(model_name)\n for i,item in enumerate(row[1:]):\n tmp = out_dict[fields[i]].copy()\n tmp[model_name] = item\n out_dict[fields[i]] = tmp.copy()\n return out_dict", "def getRiverIDs(lookupCsv):\n\n d = {}\n with open(lookupCsv, \"rb\") as f:\n reader = csv.reader(f)\n\n # Discard header row\n reader.next()\n\n for row in reader:\n d[row[0]] = row[1]\n\n return d", "def _read_classes(csv_reader):\n result = {}\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n class_name, class_id = row\n except ValueError:\n raise_from(ValueError('line {}: format should be \\'class_name,class_id\\''.format(line)), None)\n class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))\n\n if class_name in result:\n raise ValueError('line {}: duplicate class name: \\'{}\\''.format(line, class_name))\n result[class_name] = class_id\n return result", "def read_relevance_from_csv(corpus):\n filename = config.CORPUS[corpus]['relevance_file']\n relevance_dict = dict()\n if os.path.exists(filename):\n print('reading from relevance csv')\n with open(filename, 'r') as data_file:\n reader = csv.reader(data_file)\n for row in reader:\n relevance_dict[row[0]] = (ast.literal_eval(row[1]), ast.literal_eval(row[2]))\n return relevance_dict\n\n return {}", "def fields_to_dict(lines, delim='\\t', strip_f=strip):\r\n result = {}\r\n for line in lines:\r\n # skip empty lines\r\n if strip_f:\r\n fields = map(strip_f, line.split(delim))\r\n else:\r\n fields = line.split(delim)\r\n if not fields[0]: # empty string in first field implies problem\r\n continue\r\n result[fields[0]] = fields[1:]\r\n return result", "def readGmfCsv(gmfCsv):\n\n d = collections.defaultdict(dict)\n with open(gmfCsv, \"rb\") as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] in (\"file\", \"database\", \"station\", \"dataType\", \"data\"):\n d[row[0]].update({row[1]: row[2]})\n else:\n d[\"gmf\"].update({row[0]: row[1]})\n\n return d", "def _row_to_dict(row, fields):\n dict_row = {}\n for i, value in enumerate(row):\n key = fields[i]\n if value and str(value).lower() == 'nan':\n value = None\n dict_row[key] = value\n return dict_row", "def dict_from_file(path, key='id', dialect='excel-tab'):\n if not os.path.exists(path):\n raise ValueError(\"File not found: {}\".format(path))\n reader = csv.DictReader(open(path), dialect=dialect)\n return dict([(x[key], x) for x in reader])", "def read_csv(product_name=str, directory=DIRS['EOIR_DATA_DIR']):\n filename = ('%s.csv' % product_name)\n path = get_dir(os.path.join(directory, filename))\n with io.open(path, mode='r', encoding='utf-8-sig') as f:\n spec_dict = {}\n filtered = (line.replace(\"\\n\", '') for line in f) # Removes \\n from the created as a byproduct of encoding\n for line in filtered:\n field, value = line.split(',')\n if has_number(value) and value.find('\"') == -1:\n if value.find('x') != -1:\n if value.find('.') != -1:\n value = [float(i) for i in value.split('x')]\n else:\n value = [int(i) for i in value.split('x')]\n else:\n value = float(value)\n else:\n value = value.replace('\"', '')\n if value.find('/') != -1:\n value = [str(i) for i in value.split('/')]\n elif (value.lower()).find('true') != -1:\n value = True\n elif (value.lower()).find('false') != -1:\n value = False\n else:\n value = str(value)\n spec_dict['%s' % str(field)] = value\n f.close()\n return spec_dict", "def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()", "def _read_csv_to_dictionary_list(file_name):\n catalog_list = []\n with open(file_name) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n catalog_list.append(item)\n return catalog_list", "def file_to_dictionary():\n\n return;", "def read_reverse_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[1]] = item[0]\r\n return dictionaryoutput", "def csv_dict_reader(file_obj, data = [], cost = []):\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n data.append(line[\"ะ”ะฐั‚ะฐ\"]),\n cost.append(line[\"ะ ะฐัั…ะพะด\"])", "def read_file_convert_dict(file: str) -> dict:\n states_code = pd.read_csv(file)\n states_code = states_code.set_index('abbreviation')\n dict_y = states_code['state'].to_dict()\n return dict_y", "def readDB():\n if not os.path.exists(filenameDB):\n return { }\n \n with open(filenameDB, \"r\") as csvfile:\n rows = csv.reader(csvfile)\n if rows:\n db = { }\n for r in rows:\n if len(r)==2 and isinstance(r[0],str) and isinstance(r[1],str):\n db[r[1]] = r[0]\n return db\n return { }", "def cart_from_csv(csv_file_path):\n prices = {}\n with open(csv_file_path) as csvfile:\n for i, row in enumerate(csv.reader(csvfile, delimiter=',')):\n if len(row) != 2:\n raise MalformedCSV('Each CSV row should contain exactly 2'\n ' rows, not %s. -> name,price')\n prices[utf8(row[0])] = float(row[1])\n return Cart(prices)", "def getCouponDict(coupon_file):\n file_handle = open(coupon_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n counter = 0\n coupon_dict = {}\n for row in file_reader:\n coupon_dict[row['COUPON_ID_hash']] = row\n counter += 1\n assert len(coupon_dict.keys()) == counter\n\n file_handle.close()\n return coupon_dict", "def _read_input(self, in_file):\n result = {}\n with open(in_file, \"r\") as f:\n reader = csv.DictReader(f, delimiter=str(\"\\t\"))\n for row in reader:\n result[row[\"accession\"]] = {\n \"transcript_sequence\": row[\"transcript_sequence\"],\n \"cds_start_i\": int(row[\"cds_start_i\"]),\n \"cds_end_i\": int(row[\"cds_end_i\"]),\n }\n\n return result", "def _raw_misc_to_dict(raw):\n ret = {}\n for elem in raw:\n key, _, val = elem.partition(',')\n key = key.lstrip(\"(\").strip()\n val = val[:-1].strip()\n ret[key] = val\n return ret", "def read_name_map( name_map_path) :\n with open( name_map_path, newline=\"\") as csvfile:\n table = { }\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) < 2:\n continue\n if row[key_col] == key_header:\n continue\n key = row[key_col]\n val = row[val_col]\n table[key] = val\n return table", "def read_csv_file(file_name):\n csv_file = open(file_name, 'rb')\n rd = csv.reader(csv_file, delimiter=';',quoting=csv.QUOTE_ALL)\n\n nb_rd_rows = 0\n current_wine = {}\n columns_name = [\"name\",\"vintage\",\"appellation\",\"color\",\"wine_id\",\"item_id\",\"price\",\"degustation\",\"food_pairing\",\"food_pairing_french\",\"gws\"]\n for row in rd:\n current_object = {}\n nb_column = 0\n nb_rd_rows = nb_rd_rows + 1\n if nb_rd_rows != 1:\n for current_element in row:\n #current_element = current_element.replace(\"e\",\"e\")\n #current_element = current_element.replace(\"a\",\"a\")\n current_element = current_element.lower()\n to_add = []\n # Case of float\n if current_element[0] >= \"0\" and current_element[0] <= \"9\":\n to_add.append(adapt_type_number(current_element))\n else:\n to_add = current_element.split(',')\n for nb in range(len(to_add)):\n if '(' in to_add[nb]:\n if nb >= 1:\n special_values = to_add[nb].split(\"(\")\n to_add[nb] = special_values[0]\n to_add[nb].append(special_values[1])\n else:\n to_add[nb] = to_add[nb].replace(\"(\",\"\")\n elif ')' in to_add[nb]:\n to_add[nb] = to_add[nb].replace(\")\",\"\")\n add[nb] = add[nb].strip()\n if len(to_add) > 1:\n current_object[columns_name[nb_column]] = to_add\n elif len(to_add) == 1:\n current_object[columns_name[nb_column]] = to_add[0]\n nb_column = nb_column + 1\n if nb_rd_rows == 2:\n #current_request = re.post(API_BASE_URL, json=current_object)\n print(\"DEBUG LINE \" + str(nb_rd_rows) +\"\\n\")\n print(json.dumps(current_object))\n print(\"\\n\\n Current request:\\n\")\n #print(current_request.reason)\n print(\"\\n\\n\")", "def parsecsv(game):\n filepath = os.path.join(\n os.path.dirname(__file__),\n 'sources',\n 'csv',\n '%s.csv' % (game,)\n )\n\n with open(filepath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n chips = {}\n\n for row in reader:\n chip = {\n 'indice': row[0],\n 'indice_game': row[1],\n 'game': game,\n 'name': row[2],\n 'name_jp': row[3],\n 'codes': set(list(row[4])),\n 'damage': row[5],\n 'element': row[6],\n 'rarity': row[7],\n 'size': row[8],\n 'classification': row[9].lower()\n }\n\n if chip['indice_game'] == '?' or chip['classification'] == 'pa':\n continue\n\n try:\n chip['version'] = row[10].lower()\n\n if chip['version'] == 'both':\n chip['version'] = ''\n except IndexError:\n # No versions pre-BN3\n chip['version'] = ''\n\n # Instead of adding symbols or other non-cohesive data, we'll\n # either fill in the fields or leave them blank.\n if chip['size'] == '-':\n chip['size'] = ''\n\n if chip['rarity'] == '?':\n chip['rarity'] = 5\n\n if chip['size'] == '?':\n chip['size'] = 99\n\n if chip['indice_game'] in set(['??', '???']):\n chip['indice_game'] = ''\n\n if game in set(['bn1', 'bn2']):\n if chip['classification'] == 'oss':\n # OSS chips are excluded.\n continue\n\n chip['classification'] = 'standard'\n\n chip['indice'] = _create_indice(chip)\n\n chip_key = _create_key(chip)\n chips[chip_key] = chip\n\n return chips", "def load_csv(file):\n import csv\n reader = csv.reader(open(file, 'r'))\n columns = reader.next()\n c2i = dict((columns[i], i) for i in range(len(columns)))\n data = {}\n excluded = set([REP_CSV_HED_TIM, REP_CSV_HED_HER])\n for row in reader:\n \n # get relevant info from the line\n time = float(row[c2i[REP_CSV_HED_TIM]])\n hero = row[c2i[REP_CSV_HED_HER]]\n other = dict((c, REP_CSV_HANDLERS.get(c, REP_CSV_DEFHANDLER)(row[c2i[c]])) for c in columns if c not in excluded)\n \n # add to the data dictionary\n if hero not in data: data[hero] = []\n data[hero].append([time] + [other])\n \n return data", "def read_server_csv_file(fname):\n data = {}\n with open(fname) as csv_data:\n csv_reader = csv.reader(csv_data)\n row_num = 0\n for row in csv_reader:\n row_num += 1\n if row[0] == 'hostname' and row_num == 1:\n continue # ignore first line if first field looks like header\n # no leading/trailing spaces in hostnames\n row[0] = row[0].strip()\n data[row[1]] = {'hostname': row[0],\n 'serial': row[1],\n 'ip': row[2],\n 'netmask': row[3],\n 'gateway': row[4]}\n return data", "def read_csv(path: str) -> list[dict[str, str]]:\n with open(path, 'r') as f:\n return list(csv.DictReader(f))", "def _load_dict(infile):\n\n # read the data into a list\n data = []\n\n # open the file\n f = open(infile)\n\n for line in f:\n # ignore hashed lines\n if not line.startswith('#') and not line.startswith('@'):\n\n # mind to strip newlines\n data.append(line.strip('\\n\\r').split('\\t'))\n \n # create the dictionary in which the data will be stored\n d = {}\n\n # check for first line, if a local ID is given in the header (or simply\n # \"ID\"), take this line as the ID, otherwise create it\n if data[0][0].lower() in ['local_id','localid']:\n local_id = True\n else:\n local_id = False\n\n # iterate over data and fill the dictionary (a bit inefficient, but enough\n # for the moment)\n i = 1\n for line in data[1:]:\n if local_id:\n d[int(line[0])] = line[1:]\n else:\n d[i] = line\n i += 1\n\n # assign the header to d[0]\n if local_id:\n d[0] = [x.lower() for x in data[0][1:]]\n else:\n d[0] = [x.lower() for x in data[0]]\n\n # return the stuff\n return d", "def parse_csvfile(self, csvfile):\n\n logging.info(\"Parseing csvfile: %s\" % basename(csvfile))\n fields = []\n data = {}\n try:\n with open(csvfile) as f:\n for line in f:\n line = line.strip()\n # Skip empty or commented line\n if not line or line[0] == \"#\":\n continue\n if not fields:\n # The first valid line defines fields.\n fields = [x.strip() for x in line.split(\",\")]\n for f in self.REQUIRED_FIELDS:\n if f not in fields:\n logging.error(\"Failed to find %s field. \"\n \"Aborted.\" % f)\n sys.exit(1)\n else:\n # The rest lines are data\n values = [x.strip() for x in line.split(\",\")]\n record = {}\n for k, v in zip(fields, values):\n record[k] = v\n # Convert date time string to epoch seconds\n record[\"time_h\"] = self.parse_timestr(record[\"time_h\"])\n node = record[\"name\"]\n if data.get(node, None):\n data[node].append(record)\n else:\n data[node] = [record]\n except Exception as e:\n logging.exception(\"Failed to parsing the csvfile. \"\n \"See stack trace below:\")\n sys.exit(1)\n\n # While it didn't occur often, I observed that data in CSV files\n # generated by cbtool monextrac command were not in time order.\n # So sort them.\n logging.debug(\"Sorting the data\")\n for node in data.keys():\n data[node].sort(lambda x, y: cmp(int(x[\"time\"]), int(y[\"time\"])))\n\n return data, fields", "def read_partition_csv(source):\n partition = {}\n # Load in the Partitions from the CSV\n with open(source, mode='r') as partitions_csv:\n csv_reader = csv.DictReader(partitions_csv)\n for row in csv_reader:\n dataset_as_string = row[\"Dataset\"] # Returns Row as a String\n partition[row[\"Partition\"]] = dataset_as_string[2:-2].split(\"', '\")\n \n return partition", "def read_file(filepath: str) -> dict:\n if not filepath.endswith(\".csv\"):\n raise RuntimeError(\"File extension must be .csv\")\n\n people = {}\n with open(filepath) as csv:\n for line in csv:\n email, person = Parser.parse_line(line.rstrip(\"\\n\"))\n if email not in people:\n people[email] = person\n else:\n print(\"Ignoring person with duplicate email {}\".format(email))\n return people", "def read_table_to_dict(fname, typedict=None, row_processor=None, default_type=None, **kwargs):\n if isinstance(fname, basestring):\n data = read_table(fname, **kwargs)\n else:\n data = fname\n colkeys = data[0]\n datadict = collections.OrderedDict()\n for i in xrange(1, len(data)):\n row = data[i]\n if row_processor is not None:\n row = row_processor(row)\n if not row:\n continue\n rowkey = row[0]\n valdict = {}\n for j in xrange(1, len(colkeys)):\n key = colkeys[j]\n x = row[j]\n if typedict is not None and key in typedict:\n try:\n x = typedict[key](x)\n except:\n print >>sys.stderr, \"col key:\", key\n print >>sys.stderr, \"field value:\", x\n print >>sys.stderr, \"type / conversion function:\", typedict[key]\n raise\n elif default_type is not None:\n x = default_type(x)\n valdict[key] = x\n datadict[rowkey] = valdict\n return datadict", "def import_data(fname, rowsToRead):\n with open(filepath, 'r') as f:\n reader = csv.reader(f, delimiter=\",\")\n headers = next(reader)[1:]\n data_dict = defaultdict(list)\n for row in islice(reader, rowsToRead):\n for index, key in enumerate(headers):\n data_dict[key].append(row[index + 1])\n return data_dict", "def read_csv():", "def getUserDict(user_file):\n file_handle = open(user_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n user_dict = {}\n counter = 0\n for row in file_reader:\n user_dict[row['USER_ID_hash']] = row\n counter += 1\n assert len(user_dict.keys()) == counter\n\n file_handle.close()\n return user_dict", "def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):\n\n for key in header_csv:\n key_struct = key.split(delimiter)\n if key in dic_types.keys():\n # if no value indicated set to default\n if row[key] == '' and 'default' in dic_types[key].keys():\n row[key] = dic_types[key]['default']\n else:\n try:\n # Cast to indicated type\n row[key] = dic_types[key]['type'](row[key]) \n except:\n print(\" [WARN] Can not parse \", row[key] , \"to type\", dic_types[key]['type'])\n jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))\n \n return jstruct", "def csvfileUsage(self):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.DictReader(file_obj, delimiter=',') # CSV DictReader object\n \"\"\" reader.fieldnames returns header , slicing intial 'Month' and\n 'Year' header from list\n \"\"\"\n for com_names in reader.fieldnames[2:]:\n self.company_data[com_names] = {}\n # iterating each row\n for row in reader:\n month, year = self.parse_my(row) # parsing the year and month from row\n # pop the `Month` and `Year` Key to minimize iteration below\n row.pop('Month'), row.pop('Year')\n \"\"\" saving and updating the data at same point of time\n each iteration time, checking the max value and updating \n `Month` `Year` and `Value`\n \"\"\"\n self.prepare_company_data(month, year, row, self.company_data)\n file_obj.close() # close file\n return self.company_data", "def parse_data(data):\n parsed_data = {}\n for i, chunk in enumerate(re.split(r'\\n{2,}', data)):\n if i == 0:\n match = re.search(r'^(.*?) interest: (.*)\\n(.*?); (.*?)$', chunk)\n if match:\n source, query, geo, period = match.groups()\n parsed_data['info'] = {'source': source, 'query': query,\n 'geo': geo, 'period': period}\n else:\n chunk = _clean_subtable(chunk)\n rows = [row for row in csv.reader(StringIO(chunk)) if row]\n if not rows:\n continue\n label, parsed_rows = _parse_rows(rows)\n if label in parsed_data:\n parsed_data[label+'_1'] = parsed_data.pop(label)\n parsed_data[label+'_2'] = parsed_rows\n else:\n parsed_data[label] = parsed_rows\n\n return parsed_data", "def make_to_dict(item, include_timestamp):\n return {\n '%s:%s' % (cell.family, cell.qualifier): (cell.value, cell.timestamp) if include_timestamp else cell.value\n for cell in item\n }", "def read_csv(gw):\n\n data = {}\n\n with open(csv_file_name.format(gw=gw)) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n\n for i, r in enumerate(reader):\n if i < 3:\n continue\n data[r[2]] = {'points': int(r[7]), 'rank': int(r[0])}\n return data" ]
[ "0.773511", "0.75966036", "0.7308401", "0.7227723", "0.7156224", "0.7100157", "0.70313346", "0.6998957", "0.69969165", "0.69475853", "0.69401574", "0.6889729", "0.6880265", "0.68693936", "0.6866036", "0.68612945", "0.68607634", "0.6772458", "0.6684127", "0.6684127", "0.6604024", "0.6572032", "0.6507097", "0.64873576", "0.64841723", "0.648317", "0.63876694", "0.6372877", "0.63485634", "0.63356614", "0.63103634", "0.6263532", "0.6255506", "0.624636", "0.62348473", "0.62318206", "0.6142925", "0.61387247", "0.6129267", "0.61039835", "0.6082351", "0.60800594", "0.60743254", "0.6051925", "0.60492086", "0.6035155", "0.60263824", "0.6022511", "0.60128236", "0.6010225", "0.6009302", "0.600015", "0.599197", "0.59819037", "0.59714496", "0.5970404", "0.5958493", "0.5950356", "0.58978987", "0.58759964", "0.5849937", "0.5849633", "0.58329505", "0.58232176", "0.582106", "0.5819525", "0.5804391", "0.5799662", "0.578622", "0.57834846", "0.5776693", "0.5774535", "0.57653767", "0.5761141", "0.57451355", "0.573525", "0.5722072", "0.57045966", "0.5700185", "0.56882757", "0.5680005", "0.5678152", "0.56691307", "0.56520885", "0.56506175", "0.56464124", "0.5645565", "0.56416", "0.564065", "0.5629882", "0.5628731", "0.5622954", "0.5611992", "0.5606153", "0.5605751", "0.56019366", "0.55995", "0.55943596", "0.5594341", "0.55865353" ]
0.7150343
5
Get the app's name.
def _get_app_name(app): return app[APP_NAME_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_name():\n return config.APP_NAME", "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def get_app_name(self):\n return getattr(self, '_app_name', None)", "def get_name(self, name):\n return self.apps[name]['name']", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def getApplicationName(self) -> unicode:\n ...", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def name(self):\n return self.application_tree['name']", "def get_application_name(self, feed_id):\r\n return self._handler.get_application_name(feed_id)", "def name(self):\r\n if self._name is not None:\r\n return self._name\r\n else:\r\n try:\r\n return Inspection.find_application_name()\r\n # TODO(wickman) Be more specific\r\n except Exception:\r\n return 'unknown'", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def _app(self) -> str:\n return self.charm.app.name", "def module_name(self) -> str | None:\n try:\n return self._app_name.replace(\"-\", \"_\")\n except AttributeError:\n # If the app was created from an interactive prompt,\n # there won't be a module name.\n return None", "def app(self) -> str:\n return pulumi.get(self, \"app\")", "def get_app_name(i):\n return app_id + '-' + str(i)", "def app_name(self):\n return self._chromecast.app_display_name if self._chromecast else None", "def fallback_application_name() -> str:\n # Import here instead of at the top to avoid an ImportError caused by an\n # import cycle. This can be removed once the import graph of id3c.cli is\n # less tangled.\n from ..cli.utils import running_command_name\n\n # \"The application_name can be any string of less than NAMEDATALEN\n # characters (64 characters in a standard build).\"ยน\n #\n # psycopg2 / libpq will truncate for us, but they will issue a NOTICE log\n # message if they do. Avoid the cluttery notice by truncating ourselves.\n #\n # ยน https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME\n max_len = 64\n appname = running_command_name()\n\n return shorten(appname, max_len, \"...\")", "def current_app(self) -> str:\n app_id = self.app.get_current() # Returns the application ID (string) of the\n foreground_app = [x for x in self.app.list_apps() if app_id == x[\"id\"]][0]\n return foreground_app['title']", "def get_app_label(app_module):\n return app_module.__name__.split('.')[-1]", "def name(self):\n\n return self.manifest[\"name\"]", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def app_name(self, value):\n self._app_name = value", "def name(self):\n return self._env_name", "def get_name(self):\n return self.settings.get(\"name\", None)", "def get_app_hostname():\n if not is_running_on_app_engine() or is_running_on_localhost():\n return None\n\n version = modules.get_current_version_name()\n app_id = app_identity.get_application_id()\n\n suffix = 'appspot.com'\n\n if ':' in app_id:\n tokens = app_id.split(':')\n api_name = tokens[1]\n if tokens[0] == 'google.com':\n suffix = 'googleplex.com'\n else:\n api_name = app_id\n\n # Check if this is the default version\n default_version = modules.get_default_version()\n if version == default_version:\n return '{0}.{1}'.format(app_id, suffix)\n else:\n return '{0}-dot-{1}.{2}'.format(version, api_name, suffix)", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def get_name() -> str:\n pass", "def _extract_appname(self, log):\n appname = \"\"\n if \"appLaunch\" in log:\n appname = log[\"appLaunch\"][\"appName\"]\n else:\n self.logger.info(\"no applaunch field\")\n self.logger.info(log[\"event\"])\n pass \n \n return appname", "def _generateApplicationName(self, obj, **args):\n result = []\n try:\n result.append(obj.getApplication().name)\n except:\n pass\n return result", "def get_name():\n return __name__", "def programName(self):\n return self._parser.prog", "def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def app_label(cls):\n return cls.model_meta.app_label", "def get_name() -> str:", "def _get_base_app_name(value):\n value = os.path.basename(value)\n if (\n value.endswith(\".exe\")\n or value.endswith(\".dll\")\n or value.endswith(\".so\")\n ):\n value = os.path.splitext(value)[0]\n\n return value", "def get_name(self) -> str:\n return self._name", "def get_name(self) -> str:\n return self._name", "def get_name(self) -> str:\n pass", "def getApplicationReleaseName(self) -> unicode:\n ...", "def app_label(obj):\n try:\n return lower(obj._meta.object_name)\n except AttributeError:\n return ''", "def get_name(self) -> str:\n return self.dbname", "def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def get_name(self):\n\n\t\treturn self.__name", "def get_package_name(self):\n return self.name + '-' + self.version", "def call_name(self):\n return str(self.executable.name)", "def _getGameName(self):\n className = self.__class__.__name__\n gameName = className[0].lower() + className[1:]\n return gameName", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_package_name():\n return try_get_project_property('packageName')", "def get_name(self) -> str:\n\n return self.name_", "def app_id(self) -> str:\n return self._app_id", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def getname(self):\n return self.__name", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.9113221", "0.88275534", "0.8791967", "0.87901825", "0.8730592", "0.8585392", "0.85836774", "0.84330225", "0.83433735", "0.83433735", "0.8274585", "0.81731343", "0.8169378", "0.8116642", "0.80174756", "0.7825664", "0.7809752", "0.7760691", "0.7760691", "0.7760691", "0.7760691", "0.7739239", "0.7708164", "0.7652033", "0.7561729", "0.75582975", "0.7517115", "0.738871", "0.73668987", "0.7339383", "0.72954565", "0.71106136", "0.71018374", "0.70959985", "0.70943534", "0.7073327", "0.7010724", "0.7003402", "0.69961256", "0.69774455", "0.6964431", "0.68624574", "0.6852483", "0.6845369", "0.68161535", "0.67916226", "0.6696113", "0.6687613", "0.6687613", "0.66864234", "0.66632354", "0.6651988", "0.6643386", "0.6606793", "0.6598334", "0.6598334", "0.6598334", "0.6592547", "0.65907615", "0.65907615", "0.65907615", "0.6590573", "0.65901726", "0.6585608", "0.65801996", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.65674305", "0.6556982", "0.6556794", "0.654664", "0.65447015", "0.6536014", "0.6530622", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966", "0.6521966" ]
0.88942343
1
Get the contact's first name.
def _get_contact_first_name(app): name = app.get(CONTACT_NAME_KEY) if name: return ' {}'.format(name.split(' ')[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_name(self):\n return self._first_name", "def get_first_name(self) -> str:\n return self.first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def first_name(self) -> str:\n return self._first_name", "def first_name(self):\n\n return self._first_name", "def firstname(self):\n return self._firstname", "def firstname(self):\n return self._firstname", "def first_name(self, instance):\r\n return instance.user.first_name", "def getFirstName(self):\n\t\treturn self.FirstName", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def FirstName(self, reg_first_name = VALUE_NOT_SET):\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name", "def get_user_firstname():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n first_name = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('first_name', None), False)\n\n return first_name[0] if first_name else not_found('first_name')\n return None", "def get_first_name(self):\n element = self.driver.find_element(*self.firstname_textbox_selector)\n return element.get_attribute(\"value\")", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\r\n return self.first_name", "def ldap_get_firstname(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n firstname = result.get(\"first-name\")[0]\n return firstname\n\n return None", "def get_short_name(self):\n\n return self.first_name", "def get_short_name(self) -> str:\n return self.first_name", "def getFirstName(self):\r\n return self.firstName", "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def pref_first_name(self):\n return self.known_as if self.known_as else self.first_name", "def contact_full_name(self):\n first = self.contact_first_name\n last = self.contact_last_name\n if first and last:\n return f'{first} {last}'\n return first or last", "def get_short_name(self):\n last_name = self.last_name\n first_name = self.first_name\n if (not (last_name and not last_name.isspace())):\n \"\"\" If last name is empty or none then return first name\"\"\"\n return first_name\n else:\n return last_name", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def get_full_name(self):\n return self.last_name + self.first_name", "def get_given_name(self):\n return self.given_name", "def get_full_name(self):\n return self.first_name + ' ' + self.last_name", "def get_full_name(self):\n\t\treturn self.email", "def get_full_name(self):\n return self.name+self.last_name", "def first_name_and_initial(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name[0])", "def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()", "def get_full_name(self):\n full_name = '{} {}'.format(self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n return self.name + \" \" + self.email", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def first_name(self, name):\n self._first_name = name", "def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def full_name(self):\n return self.first_name + \" \" + self.last_name", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def given_name(self):\n profile = self._json['author-profile']\n return profile.get('preferred-name', {}).get('given-name')", "def resolve_first_name(obj, _):\n return obj.first_name.decode()", "def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)", "def get_full_name(self):\n return self.first_name+\" \"+self.last_name", "def get_user_firstname_lastname(self, record):\n lower_first_name, lower_last_name = self.clean_user_names(record)\n\n #No first name and last name check email\n if lower_first_name is None and lower_last_name is None:\n\n lower_first_name, lower_last_name = \\\n self.extract_name_from_email(record)\n\n return lower_first_name, lower_last_name", "def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()", "def extract_first_name(s):\n clean_name = re.sub(r'\\s+', r' ', s).split()\n\n for name in clean_name:\n if len(name) > 1:\n return name.title()\n else:\n pass\n\n return None", "def GetName(self):\n if self.compound.preferred_name:\n return self.compound.preferred_name\n if self._name:\n return self._name\n return str(self.compound.FirstName())" ]
[ "0.86260074", "0.8617128", "0.8315871", "0.8315871", "0.8315871", "0.8312541", "0.81962097", "0.8183392", "0.8183392", "0.80284345", "0.7993891", "0.7896602", "0.7896602", "0.78449786", "0.7739931", "0.77090037", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7694359", "0.7667651", "0.7661622", "0.7633172", "0.7561199", "0.75170195", "0.74610925", "0.7401613", "0.7390832", "0.73192936", "0.72780764", "0.7224353", "0.718242", "0.71661806", "0.7128669", "0.71247137", "0.71212107", "0.7092154", "0.70683134", "0.70683134", "0.7065586", "0.7064168", "0.70573837", "0.7056638", "0.70458466", "0.7033188", "0.70153797", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.70107454", "0.7005392", "0.69528836", "0.6948455", "0.6948361", "0.69449687", "0.69449687", "0.692941", "0.6900735", "0.6900294", "0.68693346", "0.6856045", "0.6852444" ]
0.8650525
0
Get the contacts email address.
def _get_contact_email(app): return app[CONTACT_EMAIL_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to", "def email_address(self) -> str:\n return self._email_address", "def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")", "def get_email(self):\n return self._email", "def get_email(self):\n return self.email", "def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email", "def email(self):\n return self._dict.get('email')", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def getEmail(self):\n return self.__email", "def getEmail(self):\n return self.email", "def getEmail(self):\n\t\treturn self.Email", "def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")", "def email(self) -> str:\n return self._email", "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def service_account_email_address(self) -> str:\n return pulumi.get(self, \"service_account_email_address\")", "def service_account_email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_account_email_address\")", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address", "def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)", "def service_account_email_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_account_email_address\")", "def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")", "def get_email(self):\n # Scraping the Email Address from Contact Info (email)\n\n # > click on 'Contact info' link on the page\n # self.browser.execute_script(\n # \"(function(){try{for(i in document.getElementsByTagName('a')){let el = document.getElementsByTagName('a')[i]; \"\n # \"if(el.innerHTML.includes('Contact info')){el.click();}}}catch(e){}})()\")\n # time.sleep(loading_pause_time)\n #\n # # > gets email from the 'Contact info' popup\n # try:\n # email = self.browser.execute_script(\n # \"return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let \"\n # \"el = \"\n # \"document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes(\"\n # \"'ci-email')){ \"\n # \"return el.children[2].children[0].innerText; } }} catch(e){return '';}})()\")\n #\n # self.browser.execute_script(\"document.getElementsByClassName('artdeco-modal__dismiss')[0].click()\")\n # except:\n # email = 'N/A'", "def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def from_email_address(self):\n return self._from_email", "def Email(self, default=None):\n return self.data.get('email', default)", "def customer_email(customer):\n return customer.get(\"email\")", "def email(self):\n return self.__email", "def email(self, instance):\r\n return instance.user.email", "def get_email(self, company_code):\n return self.__get(\"export/table=name&search=\" + urllib.quote_plus(\"code=`\" + company_code + \"`\") + \"&format=[email]\").text", "def customer_email(self):\n return self._customer_email", "def get_email(obj):\r\n return obj.user.email", "def getEmail(self):\n return _libsbml.ModelCreator_getEmail(self)", "def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")", "def elastic_cloud_email_address(self) -> str:\n return pulumi.get(self, \"elastic_cloud_email_address\")", "def to_email_address(self):\n return self._to_recipients", "def get_email(self, token):\n resp = requests.get(self.emails_url, params={\"access_token\": token.token})\n emails = resp.json().get(\"values\", [])\n email = \"\"\n try:\n email = emails[0].get(\"email\")\n primary_emails = [e for e in emails if e.get(\"is_primary\", False)]\n email = primary_emails[0].get(\"email\")\n except (IndexError, TypeError, KeyError):\n return \"\"\n finally:\n return email", "def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")", "def get_email(self, token, uid):\n\n email_info_resp = get_remote(get_config('login.weibo.email_info_url') + token)\n email_info_resp_json = json.loads(email_info_resp)\n\n if email_info_resp_json.get(\"error\") is not None:\n raise Exception(email_info_resp_json)\n\n return email_info_resp_json['email']", "def business_email(self):\n return self._business_email", "def get_default_email(self):\n email = 'error@error.error'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email", "def email_address():\n hostname = socket.gethostname()\n if hostname == \"warpy\":\n email = \"sschwarzer@sschwarzer.net\"\n else:\n dummy_address = \"anonymous@example.com\"\n email = os.environ.get(\"EMAIL\", dummy_address)\n if not email:\n # Environment variable exists but content is an empty string\n email = dummy_address\n return email", "def _get_user_email_address(self, request):\n return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)", "def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None", "def get_email():\n headers = request.headers\n token = headers['Authorization'].split()[1]\n return Token.objects(access_token=token).first().email", "def _extract_email_address(self, from_email):\n res = email.utils.parseaddr(from_email)\n if len(res[1]) != 0:\n return res[1].lower()\n else:\n print(res, from_email)\n return \"\"", "def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)", "def get_email_for_nickname(cls, nickname):\n account = cls.get_account_for_nickname(nickname)\n if account is None:\n return None\n return account.email", "def cc_email_address(self):\n return self._cc_recipients", "def get_author_email(author, email):\n return encode_email(email, author, 'nav')", "def clean_email_address(self):\n c_d = self.cleaned_data\n if User.objects.exclude(id=c_d['id']).filter(\n email=c_d['email_address']):\n raise forms.ValidationError(u'The email is already registered.')\n return c_d['email_address']", "def get_email(self, id_):\n\n query = self._db.User.select(self._db.User.c.id_ == id_)\n query = query.with_only_columns([self._db.User.c.email, ])\n\n record = query.execute().fetchone()\n return record[0]", "def get_email_address(user_id: UserID) -> str:\n email_address = db.session \\\n .query(DbUser.email_address) \\\n .filter_by(id=user_id) \\\n .scalar()\n\n if email_address is None:\n raise ValueError(\n f\"Unknown user ID '{user_id}' or user has no email address\"\n )\n\n return email_address", "def envelope_sender(self):\n envelope_sender = None\n # TODO: Make this check better as soon as SMTP from and sender are \n # Addresses, not AddressLists anymore.\n if self.smtp_from != None and len(self.smtp_from) > 0:\n envelope_sender = self.smtp_from\n elif self.sender != None and len(self.sender) > 0:\n envelope_sender = self.sender\n else:\n envelope_sender = self.author\n return Address(envelope_sender)", "def business_owner_email(self):\n return self._business_owner_email", "def reply_to_email_address(self):\n return self._reply_to", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def get_user_email():\n if not is_authenticated() or not is_authenticated_CSC_user() or 'samlUserdata' not in session:\n return None\n\n csc_email = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('email', None), False)\n\n return csc_email[0] if csc_email else not_found('csc_email')\n return None", "def get_primary_email(self):\n return self.associated_emails.get(is_primary_email=True)", "async def view_email_address(self, ctx):\n author = ctx.message.author\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n await ctx.send(\n \"currently configured email address:{}\".format(self.email_list[str(author.id)]))\n else:\n await ctx.send(\"There is no email address configured..!\")\n return", "def email_address() -> str:\n\n return os.environ.get(\"EMAIL_NOTIFICATION\", \"\")", "def get_or_create_contact_email(email: str, user) -> EmailAddress:\n email_clean = clean_email(email)\n ea = EmailAddress.objects.filter(email=email_clean, contact__user=user).first()\n if not ea:\n # email does not exist\n # -> create contact (dummy) and email\n contact = Contact.objects.create(\n user=user, name=email, frequency_in_days=CONTACT_FREQUENCY_DEFAULT\n )\n ea = EmailAddress.objects.create(email=email_clean, contact=contact)\n return ea", "def recent_email_sent(self):\n recent_contact_activity = self.activity().filter(verb='Contacted complainant:', description__contains='Email sent').first()\n if recent_contact_activity:\n try:\n email = recent_contact_activity.description.split(\"'\")[1]\n except IndexError:\n email = None\n return email\n return None", "def ldap_get_email(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n alias = result.get(\"alias\")[1]\n return alias\n\n return None", "def log_useremail(self):\n return self.user.email", "def get_primary_email(lookup_value, lookup_type=\"id\"):\n lookup_type = _validate_lookup_type(lookup_type, 'email')\n user_data = core.get_data('people', lookup_value, lookup_type, return_json=True)\n primary_email = user_data['emails'][0]['value']\n return primary_email", "def mailing_address(self):\n registered_office = db.session.query(Office).filter(Office.business_id == self.id).\\\n filter(Office.office_type == 'registeredOffice').one_or_none()\n if registered_office:\n return registered_office.addresses.filter(Address.address_type == 'mailing')\n\n return db.session.query(Address).filter(Address.business_id == self.id). \\\n filter(Address.address_type == Address.MAILING)", "def get_contact_info(self):\n return f\"Contact {self} at {self.email}\"", "def management_account_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_email\")", "def get_user_email():\n email = input(\"Email address: \")\n menu.option_to_exit(email)\n try:\n if not is_valid_email(email):\n raise ValueError\n except ValueError:\n print(\"\\nOoops! That doesn't look like an email address.\\n\"\n \"Please try again.\\n\")\n return get_user_email()\n else:\n return email", "def get_full_name(self):\n\t\treturn self.email", "def get_domain(self, email):\r\n try:\r\n return str(email).split('r@')[1]\r\n except:\r\n return None", "def clean_email(self):\n return self.cleaned_data[\"email\"]", "def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")", "def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")", "def get_email(khoros_object, user_settings=None, user_id=None, login=None, first_name=None, last_name=None,\n allow_multiple=False, display_warnings=True):\n user_settings = process_user_settings(user_settings, user_id=user_id, login=login,\n first_name=first_name, last_name=last_name)\n where_clause = _get_where_clause_for_email(user_settings)\n return _get_user_identifier(khoros_object, 'email', where_clause, allow_multiple, display_warnings)", "def get_email_info_from_one_addressbooks(self, id, email):\n logger.info(\"Function call: get_email_info_from_one_addressbooks from: '{}'\".format(id, ))\n if not id or not email:\n self.__handle_error(\"Empty addressbook id or email\")\n return self.__handle_result(self.__send_request('addressbooks/{}/emails/{}'.format(id, email)))", "def ___str__(self):\n return self.email", "def clean_email(self):\n if getattr(self.instance, 'email', None):\n raise ValidationError(self.registered_error)\n return self.cleaned_data['email']" ]
[ "0.83159286", "0.8266356", "0.7886328", "0.7697728", "0.75914663", "0.7493357", "0.7450327", "0.73740244", "0.73260653", "0.7248775", "0.7248775", "0.7248775", "0.72211725", "0.72211725", "0.72211725", "0.72211725", "0.71998024", "0.71859884", "0.7072096", "0.70538586", "0.70324713", "0.70106006", "0.6985493", "0.6985493", "0.6985493", "0.6985493", "0.69829917", "0.69636005", "0.6956282", "0.6942977", "0.6923558", "0.6915174", "0.6877161", "0.68741506", "0.6834144", "0.681625", "0.6786287", "0.67677116", "0.67677116", "0.67677116", "0.67677116", "0.67677116", "0.67677116", "0.67677116", "0.67285305", "0.67104465", "0.6608043", "0.66036814", "0.6595441", "0.6588091", "0.6575363", "0.65730107", "0.65632355", "0.6550933", "0.6543369", "0.65000474", "0.647186", "0.64538485", "0.6446374", "0.6441488", "0.6418398", "0.64024866", "0.64016324", "0.63926727", "0.6386338", "0.6371464", "0.6362431", "0.6342857", "0.6319012", "0.6261963", "0.6236772", "0.61921465", "0.61874896", "0.6184654", "0.61795574", "0.61519575", "0.614172", "0.614172", "0.61090475", "0.6093558", "0.60864985", "0.60834944", "0.6047423", "0.60367274", "0.60342425", "0.60332906", "0.60260856", "0.60255754", "0.60205346", "0.6020285", "0.6014161", "0.5997495", "0.59833395", "0.59661955", "0.59531164", "0.59531164", "0.5940369", "0.5924892", "0.5916045", "0.58796215" ]
0.7802634
3
Get the subject to send with the email.
def _get_email_subject(app_name): return '{} <==> Tote'.format(app_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subject(self):\n return self.mail.get('Subject')", "def getSubject(self):\r\n return self.msg[\"Subject\"]", "def subject(self):\n return self.properties.get(\"subject\", None)", "def get_subject(self):\n return self._subject", "def subject(self):\n return self.get(\"subject\")", "def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")", "def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")", "def subject(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subject\")", "def subject(self) -> str:\n return self[\"Sns\"][\"Subject\"]", "def subject(self):\n if \"subject\" in self._prop_dict:\n return self._prop_dict[\"subject\"]\n else:\n return None", "def get_subject(self):\n ri = self.get_request_info()\n if ri['subject'] is None:\n ri['subject'] = None\n # setup first RDN sequence\n ri['subject'][0] = None\n\n subject = ri['subject'][0]\n return name.X509Name(subject)", "def custom_email_subject(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"custom_email_subject\")", "def subject(self):\n subject = re.sub(RE_PATTERNS, '', self.header('Subject', ''))\n subject = re.sub(FW_PATTERNS, '', subject)\n return subject.strip()", "def subject(self):\n return self._subject", "def subject(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subject\")", "def subject(self):\n return self.__subject", "def subject(self):\n subject = loader.render_to_string(self.subject_template_name,\n self.get_context())\n return ''.join(subject.splitlines())", "def subject(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subject\")", "def getSubject(self):\n\n return X501DN.from_POW(self.get_POW().getSubject())", "def getSubject(self):\n\n return X501DN.from_POW(self.get_POW().getSubject())", "def getSubject(self, record):\n base_subject = super(CustomSMTPHandler, self).getSubject(record)\n try:\n hostname = platform.node()\n # pylint: disable=broad-except\n except Exception:\n hostname = 'Unknown'\n\n return base_subject.format(hostname)", "def subject_property_name(self):\n subject_property_name = 'subject'\n if 'participant' in self.schemas.keys():\n subject_property_name = 'participant'\n return subject_property_name", "def get_name(self):\n return self.load_name(self.subject)", "def set_subject(self):\n\t\tfrom email.errors import HeaderParseError\n\t\ttry:\n\t\t\t_subject = decode_header(self.mail.get(\"Subject\", \"No Subject\"))\n\t\t\tself.subject = _subject[0][0] or \"\"\n\t\t\n\t\t\tif _subject[0][1]:\n\t\t\t\tself.subject = self.subject.decode(_subject[0][1])\n\t\t\telse:\n\t\t\t\t# assume that the encoding is utf-8\n\t\t\t\tself.subject = self.subject.decode(\"utf-8\")[:140]\n\t\texcept (UnicodeDecodeError, HeaderParseError):\n\t\t\t#try:\n\t\t\t#\tself.subject = self.subject.decode(\"gb18030\")\n\t\t\t#except UnicodeDecodeError:\n\t\t\tself.subject = u'Error Decoding Subject'\n\t\t#if self.subject and len(self.subject)>140:\n\t\t#\tself.subject = self.subject[:135]\n\t\timport re\n\n\t\temoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n\t\tself.subject = emoji_pattern.sub(r'', self.subject)\n\n\t\tif not self.subject:\n\t\t\tself.subject = \"No Subject\"", "def get_pretty_subject(cert):\n subject = 'subject=' + _get_pretty_name(cert.get_subject())\n issuer = 'issuer=' + _get_pretty_name(cert.get_issuer())\n return subject + '\\n' + issuer + '\\n'", "def get_subject_type(self):\n\n return self.subject_type", "def subject(template_name):\n subject = loader.render_to_string(template_name,\n self.get_context())\n return ''.join(subject.splitlines())", "def subject_alt_emails(self):\n\n return self._get_subject_alt('rfc822_name')", "def _get_user_provided_subject_identifier(self):\n if self.get_user_provided_subject_identifier_attrname() in dir(self):\n return getattr(self, self.get_user_provided_subject_identifier_attrname())\n else:\n return None", "def get_subject(self, web_registry):\n return self.subject_builder.build_subject(web_registry=web_registry)", "def get_subject(self, idattr):\n return self.get_node('//Subject[@id=\"%s\"]' % idattr)", "def get_user_provided_subject_identifier_attrname(self):\n return None", "def get_from_subject(mesid, mailbox):\n res, data = mailbox.fetch(mesid, 'BODY.PEEK[HEADER.FIELDS (SUBJECT FROM)]')\n if res != 'OK':\n raise RuntimeError('error in fetch call for {}'.format(mesid))\n # Apparently default character set for IMAP is UTF7\n myheads = data[0][1].decode('utf-7')\n name = get_from(myheads)\n\n subject = findall(r'Subject:\\s+(.*)\\r\\n', myheads)[0] # Assume match\n return ' '.join((name, ':', subject))", "def current_subject(self):\n return \"%s: %s\" % (self.name, self.phase)", "def email(self):\n # mapping = ssis_synctree_settings[STUDENT_PSIDUSERNAME_MAPPINGS].get(self.idnumber)\n # handle = (self.name + self._year_of_graduation).lower().replace(' ', '') if not mapping else mapping\n return self.idnumber + '@mail.ssis-suzhou.net'", "def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")", "def publisher_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher_email\")", "def subject(self, subject: \"str\"):\n self._attrs[\"subject\"] = subject", "def subject(self, subject: \"str\"):\n self._attrs[\"subject\"] = subject", "def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def get_subject_set(self):\n return self.get_label_set(SUBJECT_NODE_TAG)", "def send_mail(self, subject):\r\n pass", "def get_subject(subject_content, data_key=\"b\"):\n (header, data) = get_data(subject_content)\n data = get_events(data[data_key])\n\n return header, data", "def setSubject(self,value): \n self.PDFreactorConfiguration.in1[\"subject\"] = value", "def notification_sender_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_sender_email\")", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def format_subject(self, notice):\n return \"PDR Notice: {0}: {1}\".format(notice.type, notice.title)", "def email(self) -> str:\n return self._email", "def subject(self, val: str):\n self._subject = val", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def get_sender_email(mail: Message) -> str:\n sender_pattern = re.compile(\"^(?P<name>.*)\\s<(?P<email>.*)>$\")\n from_header = mail['From'] # type: str\n\n sender = sender_pattern.match(from_header)\n if not sender:\n raise KeyError(\"Invalid From header on email\")\n\n return sender.group('email')", "def get_email(self):\n return self._email", "def subject_schema(self):\n return self.schemas.get(self.subject_property_name, None)", "def get_subject_cn(self):\n subject = self.get_subject()\n cns = subject.get_entries_by_oid(name.OID_commonName)\n return [cn.get_value() for cn in cns]", "def get_subject(self, sub_id):\n sub_html = self._get_html_for_subject_main(sub_id)\n ep_html = self._get_html_for_subject_eps(sub_id)\n return BangumiSubjectFactory.from_html(sub_html, ep_html)", "def publisher_email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"publisher_email\")", "def get_email(self):\n return self.email", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def subject_begins_with(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subject_begins_with\")", "def getEmail(self):\n return _libsbml.ModelCreator_getEmail(self)", "def getEmail(self):\n return self.__email", "def get_pubkey(self):\n return self._csr['certificationRequestInfo']['subjectPublicKeyInfo']", "def getEmail(self):\n\t\treturn self.Email", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)", "def subject(self, value):\n self.set_property(\"subject\", value)", "def get_message(self, email):\n\n message = MIMEText(self.message, 'html')\n\n message['Subject'] = self.subject\n message['From'] = self.from_\n message['To'] = email\n\n return message", "def get_subject(text_file):\n path_name, sf = os.path.splitext(text_file)\n fname = os.path.basename(path_name)\n fname = fname.replace(\"-Left_Handed\", \"\")\n all_hyphens = [m.start() for m in re.finditer('-', fname)]\n if len(all_hyphens) == 1:\n beg = fname[:len(fname)-2].rindex('_')\n else:\n beg = all_hyphens[-2]\n\n end = all_hyphens[-1]\n subj = fname[beg+1:end]\n subj = subj.lower()\n\n return subj", "def email(self):\n return self._dict.get('email')", "def getEmail(self):\n return self.email", "def publisher_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher_email\")", "def mail(self):\n\n return self._mail", "def attachment_name(self):\n if self.__attachment_name is not None:\n return self.__attachment_name\n if self.__attachment_name_property:\n return getattr(self, self.__attachment_name_property, '')\n else:\n # property order resolution:\n # 1) try property 'subject'\n # 2) try property 'name'\n try:\n attachment_name = getattr(self, 'subject')\n except AttributeError:\n attachment_name = getattr(self, 'name', '')\n return attachment_name", "def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")", "def get_eval_subject_name(self, position):\n \"\"\"\n Arguments:\n position: position of the name in the list\n Returns:\n eval subject's name\n \"\"\"\n assert position < self.n_eval,\\\n \"The total number of evaluation samples is: %d\" % self.n_eval\n return self.evaluation_subjects[position]", "def get_mail( traceback ):\n msg = MIMEText( traceback )\n msg[ 'Subject' ] = Header( 'FX daily cron error' )\n msg[ 'From' ] = 'FX daily cron'\n msg[ 'To' ] = 'tamakoshihiroki@gmail.com'\n msg[ 'Date' ] = formatdate( localtime = 9 )\n msg[ 'Content-Type' ] = ''.join(\n [ 'text/plain; charset=\"', BODY_ENCODING, '\"', ] )\n return msg", "def send_mail(subject):\r\n obj = EmailNotification().emailobj()\r\n obj.send_mail(subject)", "def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")", "def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")", "def get_subject_arguments():\n subject = {}\n common_name = input(\"Subject's common name:\")\n if common_name != \"\":\n subject[\"CN\"] = common_name\n\n email = input(\"Subject's e-mail:\")\n if email != \"\":\n subject[\"emailAddress\"] = email\n\n country = input(\"Subject's country:\")\n if country != \"\":\n subject[\"C\"] = country\n\n state = input(\"Subject's state:\")\n if state != \"\":\n subject[\"ST\"] = state\n\n city = input(\"Subject's city:\")\n if city != \"\":\n subject[\"L\"] = city\n\n organization = input(\"Subject's organization:\")\n if organization != \"\":\n subject[\"O\"] = organization\n\n organization_unit = input(\"Subject's organization unit:\")\n if organization_unit != \"\":\n subject[\"OU\"] = organization_unit\n\n return subject", "def keep_header_subject(text, keep_subject=False):\n _before, _blankline, after = text.partition('\\n\\n')\n\n sub = [l for l in _before.split(\"\\n\") if \"Subject:\" in l]\n if keep_subject:\n final = sub[0] + \"\\n\" + after\n else:\n final = after\n return final", "def _get_first_contact_email_template_name(app):\n return app[FIRST_CONTACT_EMAIL_TEMPLATE_NAME_KEY]", "def get_default_email(self):\n email = 'error@error.error'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")" ]
[ "0.85039127", "0.84494317", "0.79497236", "0.7938282", "0.7885958", "0.7774435", "0.7774435", "0.7769633", "0.77543914", "0.77393216", "0.7721028", "0.76066816", "0.74656695", "0.74626553", "0.7323837", "0.7316676", "0.7280974", "0.7280175", "0.72613007", "0.72613007", "0.7129592", "0.6761609", "0.65292525", "0.63866013", "0.63220835", "0.62425077", "0.6225023", "0.62243396", "0.618509", "0.6179474", "0.6130085", "0.6127075", "0.6096884", "0.6083161", "0.60702443", "0.60627306", "0.59950155", "0.59393156", "0.59393156", "0.5904189", "0.5877959", "0.5877959", "0.5877959", "0.5867092", "0.5830675", "0.582139", "0.5753722", "0.574737", "0.5725269", "0.5725269", "0.5689801", "0.5685007", "0.5678964", "0.5663373", "0.56471825", "0.5637251", "0.5629961", "0.5618799", "0.56143194", "0.55983895", "0.5575424", "0.554896", "0.5533855", "0.5533855", "0.5533855", "0.5533855", "0.5513799", "0.5513799", "0.5513799", "0.5513799", "0.5488308", "0.54850405", "0.5477045", "0.5475021", "0.54701257", "0.54542726", "0.5443502", "0.5441707", "0.54264915", "0.5401347", "0.5394315", "0.5388096", "0.53812927", "0.5346332", "0.5333781", "0.53093916", "0.5307104", "0.5304786", "0.5292433", "0.528481", "0.528481", "0.52728254", "0.5243684", "0.52309984", "0.5223078", "0.5221573", "0.5221573", "0.5221573", "0.5221573", "0.5221573" ]
0.6934014
21
Get the email template name for the first contact email.
def _get_first_contact_email_template_name(app): return app[FIRST_CONTACT_EMAIL_TEMPLATE_NAME_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def _get_contact_first_name(app):\n name = app.get(CONTACT_NAME_KEY)\n if name:\n return ' {}'.format(name.split(' ')[0])", "def template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_name\")", "def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None", "def template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"template_name\")", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")", "def get_name_from_email(email):\r\n individual_name = email.split('@')[0]\r\n parts = individual_name.split('.')\r\n name = \" \".join(parts).title()\r\n return name", "def get_email_template_id(self):\n return self.email_template_id", "def get_full_name(self):\n\t\treturn self.email", "def get_name_from_email(email: str) -> str:\n before_at_symbol = email.split(\"@\")[0]\n name_parts = before_at_symbol.split(\".\")\n name = \" \".join(name_parts).title()\n return name", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def get_template():\r\n try:\r\n return CourseEmailTemplate.objects.get()\r\n except CourseEmailTemplate.DoesNotExist:\r\n log.exception(\"Attempting to fetch a non-existent course email template\")\r\n raise", "def find_template_name(self, regex, template_env=None):\n # Select template_env\n if not template_env:\n template_env = self._template_env\n\n # Find templates matching the regex\n template_list = template_env.list_templates(\n filter_func=lambda template_name: re.match(regex, template_name))\n\n # Select the first match\n if template_list:\n return template_list[0]\n else:\n return ''", "def displayname(self):\n return self.email", "def template_name(self, template_type: Union[TemplateType, str]) -> str:\n return self.options.get(\"templates\", {}).get(template_type, template_type)", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def get_short_name(self):\n\t\treturn self.email", "def launch_template_name(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_name\")", "def launch_template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def getNameTemplate(self):\n\n return self.nameTemplate", "def launch_template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def _get_cfn_template_file_name(self, cfn_template_path: str) -> str:\n base_name = os.path.basename(cfn_template_path)\n (file_name, ext) = os.path.splitext(base_name)\n return file_name", "def inspect_template_name(self) -> str:\n return pulumi.get(self, \"inspect_template_name\")", "def get_short_name(self):\n\n return self.email", "def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')", "def get_template_name(self):\n if self.template_name:\n return '%s' % self.template_name\n\n if self.template_name_prefix:\n return '%s%s.html' % (self.template_name_prefix, self.mode)\n\n for piece_name in reversed(list(self.pieces.keys())):\n piece = getattr(self, piece_name)\n result = piece.get_template_name()\n if result:\n return '%s.html' % result\n\n return None", "def get_first_name(self) -> str:\n return self.first_name", "def _get_contact_email(app):\n return app[CONTACT_EMAIL_KEY]", "def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address", "def get_first_name(self):\n return self._first_name", "def _get_email_subject(app_name):\n return '{} <==> Tote'.format(app_name)", "def subject(template_name):\n subject = loader.render_to_string(template_name,\n self.get_context())\n return ''.join(subject.splitlines())", "def get_default_email(self):\n email = 'error@error.error'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email", "def get_salutation(email):\n return email.split(\"@\")[0].replace(\".\", \" \").title()", "def get_context_template_name(self):\n return getattr(self, 'context_template_name', None)", "def get_notification_name(self):\n return f\"{self.first_name[0].lower()}.{self.last_name.lower()}\"", "def get_nickname_for_email(cls, email, default=None):\n account = cls.get_account_for_email(email)\n if account is not None and account.nickname:\n return account.nickname\n if default is not None:\n return default\n return email.replace('@', '_')", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def get_template_name(self):\n if self.template_name is not None:\n return self.template_name\n model_opts = self.queryset.model._meta\n return f\"{model_opts.app_label}/{model_opts.model_name}.html\"", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def getFirstName(self):\n\t\treturn self.FirstName", "def get_name(self):\n return self.load_name(self.subject)", "def extract_name_from_email(record):\n\n email = record['email']\n email = email.split('@')[0].lower()\n lower_first_name = None\n lower_last_name = None\n #Assume there is first name and last name in email\n #if there is a separator\n separator_list = ['.', '_', '-']\n for sep in separator_list:\n if sep in email:\n mail = email.split(sep)\n lower_first_name = mail[0]\n lower_last_name = mail[1]\n break\n\n #Otherwise just take the part before the @ as the\n #lower_first_name and lower_last_name\n if lower_first_name is None:\n lower_first_name = email\n lower_last_name = email\n\n return lower_first_name, lower_last_name", "def recent_email_sent(self):\n recent_contact_activity = self.activity().filter(verb='Contacted complainant:', description__contains='Email sent').first()\n if recent_contact_activity:\n try:\n email = recent_contact_activity.description.split(\"'\")[1]\n except IndexError:\n email = None\n return email\n return None", "def first_name(self) -> str:\n return self._first_name", "def get_success_template_name(self):\n if hasattr(self, \"success_template_name\"):\n return self.success_template_name\n\n template_name = self.get_partial_template_name()\n if template_name:\n before, separator, after = template_name.rpartition(\".\")\n return f\"{before}_success{separator}{after}\"\n\n return None", "def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)", "def get_template(self):\n model = self.get_object()\n template_name = self.model_template_name or 'template'\n try:\n template_string = getattr(model, template_name)\n except AttributeError as e:\n raise ImproperlyConfigured(\n \"%(model)s is missing a template. Define \"\n \"%(model)s.template, %(cls)s.model_template_name \"\n \"or override %(cls)s.get_template().\" % {\n 'model': model.__class__.__name__,\n 'cls': self.__class__.__name__\n }\n )\n return template_string", "def _get_template_filename(self):\n file_name = ReportMeta.reports[self._report_key]['fileName']\n return '{}.html'.format(file_name)", "def contact_full_name(self):\n first = self.contact_first_name\n last = self.contact_last_name\n if first and last:\n return f'{first} {last}'\n return first or last", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def get_contact_info(self):\n return f\"Contact {self} at {self.email}\"", "def get_template(self):\n if self.get_website:\n return self.get_website.get_template()\n else:\n return default_entity.get_website.get_template()", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to", "def custom_email_subject(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"custom_email_subject\")", "def _get_template():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/attributes/instance-template',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+instanceTemplates/(.+)', r'\\1', r.text)\n else:\n return ''", "def get_html_template(template_name):\n template_dir = DIRS['email.templates.html']\n template_location = template_dir + template_name\n file = codecs.open(template_location, 'r')\n f = file.read()\n file.close()\n return f", "def arm_template_display_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arm_template_display_name\")", "def getSubject(self, record):\n base_subject = super(CustomSMTPHandler, self).getSubject(record)\n try:\n hostname = platform.node()\n # pylint: disable=broad-except\n except Exception:\n hostname = 'Unknown'\n\n return base_subject.format(hostname)", "def get_sender_email(mail: Message) -> str:\n sender_pattern = re.compile(\"^(?P<name>.*)\\s<(?P<email>.*)>$\")\n from_header = mail['From'] # type: str\n\n sender = sender_pattern.match(from_header)\n if not sender:\n raise KeyError(\"Invalid From header on email\")\n\n return sender.group('email')", "def determine_preferred_contact(user_data):\n try:\n user_data['personal']['email']\n except KeyError:\n preferred_contact = 'mail'\n else:\n preferred_contact = 'email'\n return preferred_contact", "def pref_first_name(self):\n return self.known_as if self.known_as else self.first_name", "def get_mail(\n cls, type: str, context: dict, to_email: str, connection=None,\n ) -> mail.EmailMessage:\n templates = cls.objects.filter(type=type)\n if not templates:\n return None\n\n return templates[0].render(\n context=context, to_email=to_email, connection=connection\n )", "def _get_mail_template(request, issue, full_diff=False):\n context = {}\n template = 'mails/comment.txt'\n if request.user == issue.owner:\n query = models.Message.query(\n models.Message.sender == request.user.email(), ancestor=issue.key)\n if query.count(1) == 0:\n template = 'mails/review.txt'\n files, patch = _get_affected_files(issue, full_diff)\n context.update({'files': files, 'patch': patch, 'base': issue.base})\n return template, context", "def get_user_name_from_email(email):\n\tu = db(db.auth_user.email == email).select().first()\n\tif u is None:\n\t\treturn 'None'\n\telse:\n\t\treturn ' '.join([u.first_name, u.last_name])", "def get_user_firstname():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n first_name = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('first_name', None), False)\n\n return first_name[0] if first_name else not_found('first_name')\n return None", "def firstname(self):\n return self._firstname", "def firstname(self):\n return self._firstname", "def get_user_name_from_email(email):\n u = db(db.auth_user.email == email).select().first()\n if u is None:\n return 'None'\n else:\n return ' '.join([u.first_name, u.last_name])", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def find_template_filename(self, template_name):\n\n def next_file():\n filename = self.path / template_name\n yield filename\n try:\n exts = self.default_file_extensions\n except AttributeError:\n return\n\n strfilename = str(filename)\n for ext in exts:\n yield Path(strfilename + ext)\n\n for filename in next_file():\n if filename.is_file():\n return filename", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def get_first_name(self):\n element = self.driver.find_element(*self.firstname_textbox_selector)\n return element.get_attribute(\"value\")", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def display_name(self):\n if self.email is None:\n if self.first_name is None and self.last_name is None:\n return \"\"\n\n if self.first_name is None and self.last_name is None:\n return self.email\n\n if self.last_name is None:\n return self.first_name\n\n if self.first_name is None:\n return self.last_name\n\n return \"{} {}\".format(self.first_name, self.last_name)", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def email(self):\n # mapping = ssis_synctree_settings[STUDENT_PSIDUSERNAME_MAPPINGS].get(self.idnumber)\n # handle = (self.name + self._year_of_graduation).lower().replace(' ', '') if not mapping else mapping\n return self.idnumber + '@mail.ssis-suzhou.net'", "def get_primary_email(self):\n return self.associated_emails.get(is_primary_email=True)", "def get_primary_email(lookup_value, lookup_type=\"id\"):\n lookup_type = _validate_lookup_type(lookup_type, 'email')\n user_data = core.get_data('people', lookup_value, lookup_type, return_json=True)\n primary_email = user_data['emails'][0]['value']\n return primary_email", "def template_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"template_id\")", "def get_template(self ,template_name):\n\n found = False\n for template in self.templates:\n if template['name'] == template_name:\n found = True\n return template\n if not found:\n return None", "def _get_template(self, template_name):\n if template_name not in self.chached_templates:\n self.chached_templates[template_name] = self.env.get_template(template_name)\n return self.chached_templates[template_name]", "def first_name(self):\n\n return self._first_name", "def email_to_name(email):\n n = email.split(\"@\")[0].title()\n return n.replace(\".\", \" \")", "def GetName(self):\n if self.compound.preferred_name:\n return self.compound.preferred_name\n if self._name:\n return self._name\n return str(self.compound.FirstName())", "def arm_template_display_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"arm_template_display_name\")", "def get_full_name(self):\n return self.name + \" \" + self.email", "def template_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_id\")", "def template_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_id\")", "def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)", "def _get_message_template(search_results: SearchResults) -> Text:\n msg_template = ''\n if search_results.checked_post.post_type == 'image':\n if len(search_results.matches) == 0:\n msg_template = DEFAULT_COMMENT_OC\n elif len(search_results.matches) == 1:\n msg_template = DEFAULT_REPOST_IMAGE_COMMENT_ONE_MATCH\n else:\n msg_template = DEFAULT_REPOST_IMAGE_COMMENT\n\n if search_results.checked_post.post_type == 'link':\n if len(search_results.matches) == 0:\n msg_template = LINK_OC\n else:\n msg_template = LINK_REPOST\n\n return msg_template", "def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None" ]
[ "0.68836665", "0.6812669", "0.66186845", "0.66120255", "0.6575655", "0.6516548", "0.64882386", "0.64112824", "0.63238996", "0.6301571", "0.6288311", "0.6213783", "0.6116907", "0.60900533", "0.6080471", "0.6079054", "0.60654145", "0.6055386", "0.60473263", "0.60457283", "0.60357887", "0.6032748", "0.60245496", "0.5977315", "0.5976874", "0.5962834", "0.5917714", "0.5909153", "0.5900827", "0.5865399", "0.58497834", "0.5818909", "0.57971895", "0.5766234", "0.57282066", "0.5709668", "0.57005703", "0.56995", "0.56881773", "0.5687285", "0.56835574", "0.56835574", "0.5678245", "0.56777304", "0.56732154", "0.5646834", "0.5640002", "0.5639781", "0.56390196", "0.56389654", "0.5605514", "0.5604985", "0.5595997", "0.5593393", "0.5593393", "0.558616", "0.5584017", "0.5579393", "0.5574546", "0.5573731", "0.55670774", "0.55626255", "0.5557779", "0.55575186", "0.55538684", "0.55418634", "0.5530539", "0.5530448", "0.5529028", "0.5525701", "0.55102026", "0.55102026", "0.54970217", "0.5494777", "0.5489237", "0.54857844", "0.54857844", "0.54857844", "0.54777384", "0.5473247", "0.54637164", "0.54627925", "0.5459935", "0.54587114", "0.5452537", "0.5437325", "0.54281294", "0.54233766", "0.54177374", "0.54140323", "0.5399981", "0.5399489", "0.5385161", "0.5384695", "0.5384695", "0.53739077", "0.5373343", "0.53716147", "0.53693503", "0.53693503" ]
0.87440306
0
Gets the tote store url for this app.
def _get_app_tote_store_url(app): return app[APP_TOTE_STORE_URL]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNoteStoreUrl(self, authenticationToken):\r\n pass", "def getNoteStoreUrl(self, authenticationToken):\r\n self.send_getNoteStoreUrl(authenticationToken)\r\n return self.recv_getNoteStoreUrl()", "def get_store_path(cls):\n user_data_dir = cls.user_data_dir()\n store_path = os.path.join(user_data_dir, 'store.json')\n return store_path", "def store_path(self):\n return path.join(env.store_home, self._store_path)", "def get_url(self):\n return self.db_url", "def url(self):\n return self.storage.url(self.name)", "def helper_get_alt_task_store_name(self):\n return self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"alt_task_store_name\")", "def get_uri(self):\n return self.url", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def get_store(self, store_name: str) -> Any:\n pass", "def _get_store(self):\n return self._store", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def getRootURL(self):\n return self.appRootURL", "def get_url(self):\n return self._url", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def store_endpoint(self):\n # Kind of cache for logging purposes (avoids repeated calls)\n self._store_endpoint = self.keystone_client.ceilometer_uri\n return self._store_endpoint", "def application_url(self) -> Optional[str]:\n return pulumi.get(self, \"application_url\")", "def tracking_url(self) -> str:\n return pulumi.get(self, \"tracking_url\")", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def get_url(self):\n return self.base_driver.current_url", "def get_track_url(self) -> Optional[str]:\n return self.track_url", "def get_store(store_name: str):\n return store_handler.get_store(store_name)", "def get_store(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_STORE)", "def geturl(self):\n return self.__url", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url", "def url(self):\n return self._client.url", "def getURI(self):\n return _libsbml.XMLTriple_getURI(self)", "def get_t_shirt_url():\n return _T_SHIRT", "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "def url(self):\n return url_for_item(self.key)", "def url(self):\n return url_for_item(self.key)", "def uri(cls):\n return f'{cls.app_label}.{cls.name}'", "def get_url(self):\n url = self.driver.current_url\n return url", "def getUrl(self):\n return self.__get('url')", "def get_url(self):\n return self.metadata['thisRecordUrl']", "def tos_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"tos_uri\")", "def url(self) -> str:\n return self.HTTP.url if self.HTTP else self._url", "def url(self):\n\n return self._url", "def url(self):\n\n return self._url", "def url(self):\n\n return self._url", "def get_url(self):\n return self.driver.current_url", "def url(self):\n url = self.url\n return url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def url(self):\n return self._url", "def server_url(self):\n return self._session.server_url", "def geo_url(self):\n from geoid.acs import AcsGeoid\n\n us = tiger_url(self.year, self.summary_level, AcsGeoid.parse(self.geoid).stusab)\n\n return parse_app_url(us)", "def web_url(self) -> str:\n return pulumi.get(self, \"web_url\")", "def url(self):\n return self._session_coords.url", "def get_tracker_uri(self):\r\n return self.tracker_uri", "def server_url(self):\n return self._session_coords.server_url", "def api_url(self):\n return self.get_api_url()", "def get_url(self) -> str:\n\n return self.__page_url", "def getUrl(self):\n return self.url", "def uri(self) -> Optional[str]:\n return pulumi.get(self, \"uri\")", "def server_url(self):\n\n\t\treturn self._server_url", "def url(self):\n # type: () -> string_types\n return self._url", "def url(self):\n if not self._is_served:\n raise RuntimeError('Cannot determine app url if app is not yet \"served\".')\n elif not (_current_server and _current_server.serving):\n raise RuntimeError('Cannot determine app url if the server is not '\n 'yet running.')\n else:\n host, port = _current_server.serving\n return 'http://%s:%i/%s/' % (host, port, self._path)", "def getUrl(self):\n cmdId = self.executeCommand(Command.GET_CURRENT_URL)\n return cmdId", "def server_url(self):\n\n return self._server_url", "def get_mixed_stores(mixed_setting):\n return mixed_setting[\"default\"][\"OPTIONS\"][\"stores\"]", "def server(self) -> str:\n return self._server_url", "def URL(self):\r\n return self._URL", "def getParentDeviceUrl(self):\n url = \"\"\n dev = self.device()\n if dev: url = dev.absolute_url_path()\n return url", "def get_overpass_uri() -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", \"overpass_uri\", fallback=\"https://overpass-api.de\").strip()", "def url(self):\n return self.hs.hostname if self.active else None", "def url(self):\n return app.settings.cherrypy.url()", "def tos_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tos_uri\")", "def tos_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tos_uri\")", "def _get_uri(plex_server):\n return plex_server.url(\n \"/:/websockets/notifications\", includeToken=True\n ).replace(\"http\", \"ws\")", "def app(self) -> str:\n return pulumi.get(self, \"app\")", "def url(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"url\" # pylint: disable=unsubscriptable-object\n )", "def workspace_url(self):\n return os.environ.get('TEAMRAUM_URL', '').strip('/')", "def get_url(self):\n return self.resource.url", "def etlWorkflowUrl(self):\n return self.sdaUrl + \"/workflows/_etl\"", "def get_info_url(self):\n return self.get_info(\"URL\")", "def _order_url(self):\n if self.settings[\"LIVE\"]:\n base = \"https://checkout.google.com/api/checkout/v2/request/Merchant/%s\"\n else:\n base = \"https://sandbox.google.com/checkout/api/checkout/v2/request/Merchant/%s\"\n return base % self.settings[\"MERCHANT_ID\"]", "def tails_public_uri(self) -> str:\n return self._tails_public_uri", "def url_path(cls):\n return os.path.join(\n cls.app_label.replace('_', '-'), cls.name.replace('_', '-')\n )", "def url(self):\n return self.__values['url']", "def get_uri(self):\n return self.__uri", "def get_store_info(store_name: str):\n return store_handler.get_store_info(store_name)", "def url(self) -> str:\n return self.url_as()", "def app_protocol(self):\n if settings.INAPP_REQUIRE_HTTPS:\n return 'https'\n else:\n return 'https' if self.is_https else 'http'", "def store_url(self, store_url):\n\n self._store_url = store_url", "def getAPI(self):\n return self.api_url", "def get_service_url():\n return get_config_handler().get_service_url()" ]
[ "0.6696655", "0.64120066", "0.6146364", "0.5828228", "0.57313335", "0.5705295", "0.56562585", "0.5626252", "0.5613728", "0.5612579", "0.5582851", "0.555923", "0.555923", "0.5541939", "0.5541939", "0.5510885", "0.55108297", "0.5492612", "0.5492612", "0.5468174", "0.54611695", "0.54573894", "0.544008", "0.54178166", "0.54149383", "0.54030055", "0.5387014", "0.53735423", "0.5370176", "0.5370176", "0.5370176", "0.5363042", "0.53580505", "0.5356113", "0.5348549", "0.53478795", "0.53478795", "0.5346409", "0.5317711", "0.5301193", "0.5287688", "0.5272914", "0.52718705", "0.5262901", "0.5262901", "0.5262901", "0.5253643", "0.5242", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.52367187", "0.5234128", "0.5228037", "0.5220718", "0.5219494", "0.52104557", "0.5192979", "0.51802623", "0.5173161", "0.5169662", "0.51677763", "0.5165194", "0.51565874", "0.51262873", "0.5119804", "0.5116866", "0.51113135", "0.510234", "0.509783", "0.50866115", "0.5081766", "0.5074739", "0.50715035", "0.5070952", "0.5070952", "0.50697136", "0.50659156", "0.5050919", "0.50434214", "0.50406605", "0.50396085", "0.5039044", "0.50344163", "0.5029868", "0.5026468", "0.5019444", "0.5015615", "0.5009059", "0.5004911", "0.50027806", "0.49984008", "0.49966377", "0.4988576" ]
0.84141535
0
Check if we already sent the first contact email.
def _did_send_first_contact_email(app): first_contact = app[FIRST_CONTACT_EMAIL_SENT_KEY] if first_contact and first_contact.lower() == 'y': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsfirstAddContact(self):\n if search_text(contact.get_value('accounts'), isScrollable = 0, searchFlag = TEXT_CONTAINS):\n click_in_list_by_index(0)\n return True\n else:\n return False", "def recent_email_sent(self):\n recent_contact_activity = self.activity().filter(verb='Contacted complainant:', description__contains='Email sent').first()\n if recent_contact_activity:\n try:\n email = recent_contact_activity.description.split(\"'\")[1]\n except IndexError:\n email = None\n return email\n return None", "def is_replied_to(thread):\r\n messages = thread['messages']\r\n if len(messages) < 2:\r\n return False\r\n user_email = get_sender_email(messages[0])\r\n for i in range(1, len(messages)):\r\n sender_email = get_sender_email(messages[i])\r\n if user_email != sender_email:\r\n return True\r\n return False", "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(blank_contact)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)", "def has_validated_email(self):\n return self.receipt_diploma_uploaded_at is not None", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def testMailSent(self):\n self.sendEmail()\n messages = self.mail_stub.get_sent_messages(to='trigger@ifttt.com')\n self.assertEqual(1, len(messages))\n self.assertEqual('trigger@ifttt.com', messages[0].to)", "def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)", "def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este nรฃo envia e-mail\n self.assertEqual(1, len(mail.outbox))", "def has_sender(self):\n return self.balance > 0", "def isEmailUsed(self, email):\n\n\t\ttestq = {\"email\": email};\n\t\ttest_result = self.db.request(\"getOne\", testq);\n\n\t\tif test_result:\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n blank_contact = self.create_contact(data={'email': ''})\n null_contact = self.create_contact(data={'email': None})\n self.group.contacts.add(blank_contact)\n self.group.contacts.add(null_contact)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)\n self.stopRouter()", "def check_notify(self):\n # no stage or no notify\n if not self.stage_id or not self.stage_id.notify:\n return False\n # mail already sent and don't send multiple times\n if self.stage_id in self.notified_stage_ids:\n if not self.stage_id.notify_multiple:\n return False\n # no mail template\n if not self.stage_id.notify_template_id:\n raise except_orm(\n _(u'Warning !'),\n _(u\"No email template selected \"\n u\"in the '%s' stage of the '%s' method\"\n ) % (self.stage_id.name, self.method_id.name))\n return True", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': 'testing@dummy.com'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def only_once(self) -> bool:\n return self.times == 1", "def is_once(today, last_send):\n if isinstance(today, datetime):\n if last_send is not None:\n if today.date() != last_send.date():\n return True\n return False\n return True\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))", "def is_empty(self):\n if len(self.messages) < 1:\n return True\n else:\n return False", "def has_receipt_address(self):\n return self.receipt_address_uploaded_at is not None", "def has_validated_email(self):\n return self.user.email_user is not None", "def include_contact(self, contact_num: int):\n if self._unique_contacts is not None:\n return contact_num in self._unique_contacts\n else:\n return True", "def unfilled_contact(entry: ContactEntry) -> bool:\n if entry.email is not None:\n if len(entry.email) >= 1:\n if entry.email[0].address is not None:\n return False\n if entry.name is not None:\n if entry.name.given_name is not None:\n return False\n if entry.name.family_name is not None:\n return False\n if entry.organization is not None:\n if entry.organization.name is not None:\n if entry.organization.name.text is not None:\n return False\n if entry.organization.department is not None:\n if entry.organization.department.text is not None:\n return False\n return True", "def checkEmail():\n\tpop_conn = poplib.POP3_SSL('pop.gmail.com')\n\tpop_conn.user('')\n\tpop_conn.pass_('')\n\t#Get messages from server:\n\tmessages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n\t# Concat message pieces:\n\tmessages = [\"\\n\".join(mssg[1]) for mssg in messages]\n\t#Parse message intom an email object:\n\tmessages = [parser.Parser().parsestr(mssg) for mssg in messages]\n\tflag = 0\n\tsweep = None\n\tfor message in messages:\n\t\tsubject = message['subject']\n\t\tif subject is None:\n\t\t\tcontinue\n\t\telif \"CommenceSweep:\" in subject:\n\t\t\tstart = subject.find(\":\")\n\t\t\tcommand = subject[start+1:]\n\t\t\tprint command\n\t\t\tif \"Comp\"+sys.argv[1] in command:\n\t\t\t\tstart = command.find(\"-\")\n\t\t\t\tsweep = command[start+1:]\n\t\t\t\tprint sweep\n\t\t\t\tpoplist = pop_conn.list()\n\t\t\t\tmsglist = poplist[1]\n\t\t\t\tfor msgspec in msglist:\n\t\t\t\t\tdelete = int(msgspec.split(' ')[0])\n\t\t\t\t\tpop_conn.dele(delete)\n\t\t\t\tflag = 1\n\tpop_conn.quit()\n\treturn flag, sweep", "def isSetEmail(self):\n return _libsbml.ModelCreator_isSetEmail(self)", "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')", "def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True", "def get_receive_mail_str(self):\n ret = False\n if self.__mail:\n ret = True\n return ret", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)", "def check_for_duplicate_subject_identifier(self):\n pass", "def is_first_synced(self):\n return True", "def check_duplicate_message(\n cls,\n recipient_id: str,\n email_subject: str,\n email_body: str\n ) -> bool:\n\n email_hash = cls._generate_hash(\n recipient_id, email_subject, email_body)\n\n datetime_now = datetime.datetime.utcnow()\n time_interval = datetime.timedelta(\n minutes=feconf.DUPLICATE_EMAIL_INTERVAL_MINS)\n\n sent_datetime_lower_bound = datetime_now - time_interval\n\n messages = cls.get_by_hash(\n email_hash, sent_datetime_lower_bound=sent_datetime_lower_bound)\n\n for message in messages:\n if (message.recipient_id == recipient_id and\n message.subject == email_subject and\n message.html_body == email_body):\n return True\n\n return False", "def stay_in_email(self):\n maxtime = 0\n while True:\n if self._device(packageName='com.tct.email', description='Open navigation drawer').exists \\\n or self._device(resourceId='com.tct.email:id/avatar').exists:\n break\n else:\n self._device.press.back()\n maxtime += 1\n if maxtime > 3:\n self._logger.debug(\"Can't back email\")\n break\n if maxtime < 4:\n return True\n else:\n self._device.press.home()\n self._device.delay(2)\n self._logger.debug(\"Launch email.\")\n if self.enter_app('Email'):\n self._device.delay(2)\n if self.get_current_packagename() == self.get_app_package_from_file('Email'):\n self._logger.debug('Launch eamil successfully.')\n maxtime = 0\n while True:\n if self._device(description='Open navigation drawer').exists \\\n or self._device(resourceId='com.tct.email:id/avatar').exists:\n self._logger.debug('Launch eamil main page successfully.')\n return True\n else:\n self._device.press.back()\n maxtime += 1\n if maxtime > 3:\n self._logger.debug('Launch eamil main page fail.')\n return False\n else:\n self._logger.debug('Launch eamil fail.')\n return False\n else:\n return False", "def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True", "def checkSender(f, ip):\n\n tooManyPosts = False\n\n # Check if this IP is blocked - if so return False.\n logger = logEmail.EmailLogger(logPath)\n isBlocked = logger.getBlock(ip)\n if isBlocked==True:\n return False\n\n # If there is a logger file path given - check the log.\n if logPath != \"null\":\n #try:\n secondsBack = 20\n maxEmails = 4\n tooManyPosts, nrLoggedEmails, lastLog = checkLog(ip,\\\n secondsBack, maxEmails)\n #except:\n #return True # OK\n # Alert admin that this IP might be a spammer!\n if nrLoggedEmails!=None and tooManyPosts==True:\n emailAdmin(ip, nrLoggedEmails, lastLog)\n\n ok = (tooManyPosts==False)\n return ok", "def is_first_challenge_completed(self):\n participants = Participant.objects.filter(user_id=self.user.id)\n\n total_completions = 0\n\n total_completions += Entry.objects.filter(participant__in=participants).count()\n #total_completions += ParticipantPicture.objects.filter(participant__in=participants).count()\n #total_completions += ParticipantFreeText.objects.filter(participant__in=participants).count()\n\n if total_completions == 0:\n return False\n\n return True", "def replied(self):\n return bool(self.replied_at is not None)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n return True", "def is_no_email(self):\n return self._tag == 'no_email'", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def always_send(self):\n\n return self._always_send", "def test_duplicate_email(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n rv = self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.assertIn(b'Sorry email already exist', rv.data)", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def i_check_that_the_form_has_been_subimtted():\n driver.find_element_by_id(\"submit_message\").click()\n assert \"Contact Confirmation\" in driver.title", "def share_contact(self, name, sender_email):\n contact = self.pull_one_contact(name)[0]\n \n from_email = \"share.contact326@gmail.com\"\n from_password = \"INST326Final\" \n the_name = contact[0]\n number = contact[1]\n email = contact[2]\n zipcode = contact[3]\n \n message = f\"\"\"Subject:New shared contact! \\n\n Name: {the_name},\\n \n Number: {number},\\n\n Email: {email},\\n\n Zip Code: {zipcode} \n \"\"\" \n \n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(from_email, from_password)\n server.sendmail(from_email, sender_email, message)\n print(f\"\"\"The contact for {name} has been sent to {sender_email}.\\n\n They may have to check their junk folder.\"\"\")", "def sendNextMessage(self):\n if len(self.new_data) == 0:\n return False\n\n donation = self.new_data.pop(0)\n self.seen_keys.add(donation['pk'])\n\n if donation['name'] == \"\":\n if donation['game'] == \"\":\n self.sendMessage(self.newAnonymous % (donation['amount'],\n donation['total']))\n else:\n self.sendMessage(self.newAnonymousGame % (donation['amount'],\n donation['game'],\n donation['total']))\n else:\n if donation['game'] == \"\":\n self.sendMessage(self.newNonymous % (donation['name'],\n donation['amount'],\n donation['total']))\n else:\n self.sendMessage(self.newNonymousGame % (donation['name'],\n donation['amount'],\n donation['game'],\n donation['total']))\n return True", "def run_mailcheck (self):\n\t\t# TODO: add function in backend to check if all needed things are set\n\t\t# like server/pass/user/... - if not, show error\n\t\t# if it is not currently refreshing\n\t\tif not self.__mailbackend.refreshing:\n\t\t\tself.__status = mail.MailCheckStatus.REFRESH \n\t\t\tself.redraw_canvas()\n\t\t\tself.__mailbackend.start()\n\t\treturn False\t# in case we are run as a timeout", "def emailform():\n if request.method == 'POST':\n email = request.form['email1']\n confirmemail = request.form['email2']\n if email == confirmemail:\n #EMAIL CODE HERE\n return True", "def check_not_empty(self, wait_time):\n for i in range(3):\n self.refresh_emailbox(wait_time)\n if not self._device(resourceId='com.tct.email:id/empty_view').exists:\n self._logger.debug('The box is not empty')\n return True\n self._logger.debug('The box is empty')\n return False\n\n # def test(self, address):\n # self._device(resourceId='com.android.email:id/forward').click()\n # self._device.delay(2)\n # if self._device(resourceId='android:id/button1').exists:\n # self._device(resourceId='android:id/button1').click()\n # self._device.delay(3)\n # self._device(className='android.widget.MultiAutoCompleteTextView',description='To').set_text(address)\n # self._device.delay(2)\n # self._device(description='Send').click()\n # self._device.delay(2)\n # self._logger.debug('email sending...')\n # if self._device(resourceId='com.android.email:id/forward').exists:\n # self._device.delay(10)", "def is_previously_approved_author(self, email):\n\n # if the user has not entered email, email is None, in which case we can't check if they have previous comments\n if email is not None:\n # search for any activated comments within the last 6 months by email\n # this SQL should be one of the fastest ways of doing this check\n # https://stackoverflow.com/questions/18114458/fastest-way-to-determine-if-record-exists\n rv = self.db.fetchone([\n 'SELECT CASE WHEN EXISTS(',\n ' select * from comments where email=%s and mode=1 and ',\n ' created > DATE_SUB(CURDATE(), INTERVAL 6 MONTH)',\n ') THEN 1 ELSE 0 END;'], (email,))\n return rv[0] == 1\n else:\n return False", "def get_please_contact(self):\n if self.please_contact:\n return self.please_contact.get_please_contact()\n else:\n return self", "async def check_once(self) -> None:\n attached_tool_request_message = message_definitions.AttachedToolsRequest()\n await self._messenger.send(\n node_id=NodeId.head, message=attached_tool_request_message\n )", "def verify_mail(self):\n raise NotImplementedError", "def wasSent(self):\n\t\tself.sent = True\n\t\tself.save()", "def get_already_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(\n seller_id=seller_id, status=2\n )", "def test_generate_confirmational_email_for_student_that_already_received_email(self):\n self.student2.has_received_email = True\n self.student2.save()\n\n template = self.email_confirmation_template.name\n confirm_interview_url = \"confirm-interview-test-url\"\n choose_interview_url = \"choose-interview-test-url\"\n\n confirm_email_generator = GenerateConfirmEmails(\n template, confirm_interview_url, choose_interview_url)\n\n confirm_email_generator.generate_confirmation_emails()\n\n client = Client()\n client.login(\n email=self.teacher_admin.email,\n password='123'\n )\n\n url = reverse('admin:post_office_email_changelist')\n response = client.get(url, follow=True)\n\n result_list = response.context_data['cl'].result_list\n\n # Two students dont have interviews and the third already received email\n # There should be no emails generated in Email change list\n self.assertEqual(len(result_list), 0)", "def report(self):\n self.last_contacted = time.time()", "def is_valid_email_address(self, addr):\n\t\t# the call is blocking, so only syntactic analysis performed\n\t\t# To check if the SMTP server exists change check_mx to True\n\t\t# to check if email address exists change verify to true\n\t\treturn addr is not None and validate_email(addr, verify=False, check_mx=False)", "def verify_player_pending(self, player_email):\n try:\n self.pending_players.index(player_email)\n return True\n except ValueError:\n return False", "def getincomingmail(self):\n self.socket.send(\"fuglu scanner ready - please pipe your message\\r\\n\")\n try:\n (handle, tempfilename) = tempfile.mkstemp(\n prefix='fuglu', dir=self.config.get('main', 'tempdir'))\n self.tempfilename = tempfilename\n self.tempfile = os.fdopen(handle, 'w+b')\n except Exception as e:\n self.endsession('could not write to tempfile')\n\n while True:\n data = self.socket.recv(1024)\n if len(data) < 1:\n break\n self.tempfile.write(data)\n self.tempfile.close()\n self.logger.debug('Incoming message received')\n return True", "def is_caller_for_call_campaign(call_profile):\n caller_campaigns = find_campaigns_as_caller(call_profile)\n is_caller = len(caller_campaigns) > 0\n return is_caller", "def test_handle_sending_email(self, mock_email):\n mock_email.return_value = True\n\n send_email_notification(self.email_body)\n self.assertTrue(EmailMultiAlternatives.send.has_been_called)", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def is_smtp_over(self):\n\t\tif self._h > self._beta*self._size:\n\t\t\treturn True\n\t\treturn False", "def _send(self, email_message):\n if not email_message.to:\n return False\n try:\n if (isinstance(email_message,gmail.EmailMessage)):\n e = message\n elif (isinstance(email_message,mail.EmailMessage)):\n e = gmail.EmailMessage(sender=email_message.from_email,\n to=email_message.to,\n subject=email_message.subject,\n body=email_message.body)\n if email_message.extra_headers.get('Reply-To', None):\n e.reply_to = email_message.extra_headers['Reply-To']\n if email_message.bcc:\n e.bcc = list(email_message.bcc)\n #TODO - add support for html messages and attachments...\n e.send()\n except:\n if not self.fail_silently:\n raise\n return False\n return True", "def hasStartSyncReceived(self):\r\n\r\n return self.receiver.hasStartSyncReceived()", "def clean_email(self):\n if self.data.get(\"selected_item\") != self.AGENT_ID:\n # resume normal invite flow\n return super().clean_email()\n\n email = self.cleaned_data[\"email\"]\n email = get_invitations_adapter().clean_email(email)\n try:\n self._agent_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n return super().clean_email()\n\n if self._agent_user.account_type != AccountType.agent_user.value:\n raise forms.ValidationError(\n _(\"An active non-agent user is using this e-mail address\")\n )\n if self._agent_user.organisations.filter(\n id=self.instance.organisation.id\n ).exists():\n raise forms.ValidationError(\n _(\"This agent is already active for this organisation\")\n )\n\n return email", "def check_for_incoming_info(self):\n\n if self.test_message_response:\n self.parse_incoming_message(self.test_message_response)\n return True\n\n POLL_ONLY_TIMEOUT_VALUE = 0\n got_at_least_one = False\n while (True):\n readables, writables, errors = select.select([self.socket_datastream], [], [], POLL_ONLY_TIMEOUT_VALUE)\n if not self.socket_datastream in readables:\n return got_at_least_one\n got_at_least_one = True\n data, remote_ip_port = self.socket_datastream.recvfrom(MAX_EXPECTED_MSG_SIZE)\n if remote_ip_port != self.ip_port_arduino_datastream:\n errorhandler.loginfo(\"Msg from unexpected source {}\".format(remote_ip_port))\n else:\n errorhandler.logdebug(\"msg received:{}\".format(data.hex()))\n self.parse_incoming_message(data)", "def is_waiting_for_deliveries(self):\n if self.is_corrected and self.cached_data.last_feedbackset_deadline_datetime < timezone.now():\n return False\n return self.cached_data.last_feedbackset_deadline_datetime >= timezone.now()", "def create_sent_email(self, *args, **kwargs):\n receiver = kwargs['receiver']\n sender = kwargs['sender']\n user = kwargs['user']\n body = kwargs['body']\n subject = kwargs['subject']\n if receiver and sender and subject and body:\n sent_email = SentEmail()\n sent_email.receiver = receiver\n sent_email.subject = subject\n sent_email.sender = sender\n sent_email.status = 'sent'\n sent_email.user = user\n sent_email.body = body\n sent_email.save()\n return True\n else:\n return False", "def isFirst(self):\n index = self.parentNode.idevices.index(self)\n return index == 0", "def message_already_processed(msg):\n\n is_already_member = redis.sismember(redis_sqs_message_set, msg.message_id)\n if not is_already_member:\n redis.sadd(redis_sqs_message_set, msg.message_id)\n\n return is_already_member", "def check_smtp_server_connection(self):\n try:\n connected = True\n\n while not self.config:\n time.sleep(1)\n\n # Create SMTP server and handshake\n server = smtplib.SMTP(self.config.smtp_host + ':' + self.config.smtp_port)\n server.connect(self.config.smtp_host + ':' + self.config.smtp_port)\n\n self.logger.info(MODULE_NAME + '::check_smtp_server_connection::Successfully '\n 'connected to the configured SMTP server and port at: ' + self.config.smtp_host + ':' + self.config.smtp_port)\n\n server.quit()\n\n return connected\n\n except Exception as e:\n self.logger.error(MODULE_NAME + '::check_smtp_server_connection()::The following '\n 'unhandled exception occurred: ' + e.message)\n connected = False\n return connected", "def has_message_available(self):\n return not self.feedback_log.empty()", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)\n self.stopRouter()", "def send_sender_activation_email(self, email):\n logger.info(\"Function call: send_sender_activation_email for '{}'\".format(email, ))\n return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders/{}/code'.format(email, )))", "def _result_already_returned(self):\n return self.deferred.called", "def is_duplicate_email(email):\n users = User.objects.filter(email=email).values()\n if len(users):\n return True\n return False", "def is_new_contact(self):\n return self.value.lower() == self.NEW_CONTACT", "def available(self):\n existing_url = self.context.get_personal_fundraising_campaign_url()\n same = existing_url == self.context.absolute_url()\n creating = 'create-personal-campaign-page' in self.request.URL\n return not same and not creating", "def is_email_address_already_assigned(email_address: str) -> bool:\n return _do_users_matching_filter_exist(DbUser.email_address, email_address)", "def test_send_mass_html_mail_to_send_no_email(self, send_mass_html_mail__mock: Mock):\n self.family.guests.add(\n Guest(name=\"Pierre\", email=None, phone=\"0123456789\", female=False, family=self.family),\n bulk=False\n )\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n recipient = list(send_mass_html_mail__mock.call_args[0][0])[0][4]\n self.assertListEqual(list(recipient),\n [\"Franรงoise <valid@example.com>\", \"Jean <valid@example.com>\"])", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def valid_in_request(self):\n return self._repeatable[0] is not None", "def test_only_send_one_email_to_studio(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 10, 10, tzinfo=dt_timezone.utc)\n for i in range(5):\n baker.make_recipe(\n 'booking.booking', event=self.event,\n status='OPEN', paid=False,\n payment_confirmed=False,\n user__email=\"unpaid_user{}@test.com\".format(i),\n date_booked= datetime(2015, 2, 9, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent= datetime(2015, 2, 9, 2, tzinfo=dt_timezone.utc),\n )\n\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking (6) and studio once\n # for all cancelled bookings\n unpaid_booking = Booking.objects.get(id=self.unpaid.id)\n self.assertEqual(len(mail.outbox), 7)\n self.assertEqual(\n unpaid_booking.status, 'CANCELLED', unpaid_booking.status\n )\n self.assertEqual(\n Booking.objects.filter(status='CANCELLED').count(), 6\n )\n cancelled_booking_emails = [\n [booking.user.email] for booking\n in Booking.objects.filter(status='CANCELLED')\n ]\n all_emails = cancelled_booking_emails + [[settings.DEFAULT_STUDIO_EMAIL]]\n self.assertEqual(\n sorted(all_emails),\n sorted([email.to for email in mail.outbox])\n )", "def has_bad_headers(self):\n\n headers = [self.sender, self.reply_to] + self.recipients\n for header in headers:\n if _has_newline(header):\n return True\n\n if self.subject:\n if _has_newline(self.subject):\n for linenum, line in enumerate(self.subject.split('\\r\\n')):\n if not line:\n return True\n if linenum > 0 and line[0] not in '\\t ':\n return True\n if _has_newline(line):\n return True\n if len(line.strip()) == 0:\n return True\n return False", "def test_only_send_one_email_to_studio(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 11, 10, tzinfo=dt_timezone.utc)\n for i in range(5):\n baker.make(\n TicketBooking, ticketed_event=self.ticketed_event,\n cancelled=False, paid=False,\n user__email=\"unpaid_user{}@test.com\".format(i),\n date_booked= datetime(2015, 2, 9, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent=datetime(2015, 2, 9, 2, tzinfo=dt_timezone.utc),\n )\n for booking in TicketBooking.objects.all():\n baker.make(Ticket, ticket_booking=booking)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking (6) (these 5 plus\n # self.unpaid) and studio once for all cancelled bookings\n self.unpaid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 7)\n self.assertTrue(self.unpaid.cancelled)\n self.assertEqual(\n TicketBooking.objects.filter(cancelled=True).count(), 6\n )\n cancelled_booking_emails = [\n booking.user.email for booking\n in TicketBooking.objects.filter(cancelled=True)\n ]\n all_emails = cancelled_booking_emails + [settings.DEFAULT_STUDIO_EMAIL]\n\n self.assertEqual(\n sorted(all_emails),\n sorted([email.to[0] for email in mail.outbox])\n )", "def has_pending_packets_to_be_sent(self):\n return self.num_packets != 0", "def is_forwarded(self):\n return bool(re.match(FW_PATTERNS, self.header('Subject', '')))", "def test_user_is_sender(self):\n sender = self.create_user()\n thread = self.create_thread(sender=sender, status='pending')\n self.assertTrue(thread.first_message.visible_to_user(sender))", "def test_skip_if_no_patients(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=5)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 0)", "def _email_allowed(self, tool):\n if 'emails' not in self.watchdb[tool]:\n self.watchdb[tool]['emails'] = []\n\n sent = self.watchdb[tool]['emails']\n now = time.time()\n limit_minute = now - 300\n if sum(e > limit_minute for e in sent) >= 1:\n return False\n\n limit_max = now - 3600\n if sum(e > limit_max for e in sent) >= 5:\n return False\n\n self.watchdb[tool]['emails'] = [e for e in sent if e > limit_max]\n self.watchdb[tool]['emails'].append(now)\n return True", "def test_sms_campaign_spool_contact(self):\n result = sms_campaign_spool_contact.delay()\n self.assertEqual(result.successful(), True)", "def is_ready(self, want_send_index, latest_index):\n return latest_index - want_send_index >= self.p-1", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = JOSReservation.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def validate(self):\n\t\tlogger = frappe.logger()\n\n\t\tif self.email_id:\n\t\t\tvalidate_email_address(self.email_id, True)\n\n\t\tif frappe.local.flags.in_patch or frappe.local.flags.in_test:\n\t\t\treturn\n\n\t\tif not frappe.local.flags.in_install and not frappe.local.flags.in_patch:\n\t\t\ttry:\n\t\t\t\tif self.use_imap:\n\t\t\t\t\tlogger.info('Checking incoming IMAP email server {host}:{port} ssl={ssl}...'.format(\n\t\t\t\t\t\thost=self.email_server, port=get_port(self), ssl=self.use_ssl))\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = imaplib.IMAP4(self.email_server, port=get_port(self))\n\n\t\t\t\telse:\n\t\t\t\t\tlogger.info('Checking incoming POP3 email server {host}:{port} ssl={ssl}...'.format(\n\t\t\t\t\t\thost=self.email_server, port=get_port(self), ssl=self.use_ssl))\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = poplib.POP3_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = poplib.POP3(self.email_server, port=get_port(self))\n\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.warn('Incoming email account \"{host}\" not correct'.format(host=self.email_server), exc_info=e)\n\t\t\t\tfrappe.throw(title=_(\"Incoming email account not correct\"),\n\t\t\t\t\tmsg='Error connecting IMAP/POP3 \"{host}\": {e}'.format(host=self.email_server, e=e))\n\n\t\t\tfinally:\n\t\t\t\ttry:\n\t\t\t\t\tif self.use_imap:\n\t\t\t\t\t\ttest.logout()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest.quit()\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\n\t\t\ttry:\n\t\t\t\tif self.get('use_ssl_for_outgoing'):\n\t\t\t\t\tif not self.get('smtp_port'):\n\t\t\t\t\t\tself.smtp_port = 465\n\n\t\t\t\t\tlogger.info('Checking outgoing SMTPS email server {host}:{port}...'.format(\n\t\t\t\t\t\thost=self.smtp_server, port=self.smtp_port))\n\t\t\t\t\tsess = smtplib.SMTP_SSL((self.smtp_server or \"\").encode('utf-8'),\n\t\t\t\t\t\t\tcint(self.smtp_port) or None)\n\t\t\t\telse:\n\t\t\t\t\tif self.use_tls and not self.smtp_port:\n\t\t\t\t\t\tself.smtp_port = 587\n\t\t\t\t\tlogger.info('Checking outgoing SMTP email server {host}:{port} STARTTLS={tls}...'.format(\n\t\t\t\t\t\thost=self.smtp_server, port=self.get('smtp_port'), tls=self.use_tls))\n\t\t\t\t\tsess = smtplib.SMTP(cstr(self.smtp_server or \"\"), cint(self.smtp_port) or None)\n\t\t\t\tsess.quit()\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.warn('Outgoing email account \"{host}\" not correct'.format(host=self.smtp_server), exc_info=e)\n\t\t\t\tfrappe.throw(title=_(\"Outgoing email account not correct\"),\n\t\t\t\t\tmsg='Error connecting SMTP \"{host}\": {e}'.format(host=self.smtp_server, e=e))", "def is_request_sent(self, request, relations):\n states = self.get_request_states(request, relations)\n for rid in states.keys():\n if not states[rid]['sent']:\n return False\n\n return True", "def save(self, commit=False):\n mail_result = self.send_email()\n if mail_result:\n self.instance.is_admin_notified = True\n\n contact = super().save(commit=commit)\n\n return contact", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def is_sender(self):\n return pn_link_is_sender(self._impl)", "def check_sync(self):\r\n if not self.awaiting_sync:\r\n return True\r\n self.check_ack_queue()\r\n return not self.awaiting_sync" ]
[ "0.6324304", "0.6179195", "0.610429", "0.59007627", "0.58576906", "0.5768399", "0.5731466", "0.57095504", "0.5704638", "0.56970215", "0.5689795", "0.566889", "0.56208533", "0.5612742", "0.5604829", "0.5577359", "0.55763453", "0.55086017", "0.55008966", "0.5475762", "0.5462582", "0.5454686", "0.5432835", "0.5419143", "0.541212", "0.53917944", "0.53865623", "0.5386411", "0.53730184", "0.5365943", "0.5342636", "0.53093755", "0.5295068", "0.527253", "0.5270876", "0.5269584", "0.526855", "0.52530515", "0.5253049", "0.5246491", "0.5246416", "0.5243332", "0.5241488", "0.52360547", "0.5232553", "0.52211183", "0.5217706", "0.5207819", "0.5207643", "0.5193887", "0.5184819", "0.5183461", "0.5179155", "0.51786864", "0.51723135", "0.5168672", "0.5167601", "0.51636815", "0.5162523", "0.5155569", "0.5148729", "0.5144945", "0.51436365", "0.51422304", "0.5141959", "0.51401436", "0.5137617", "0.5116845", "0.5114546", "0.51095665", "0.5105998", "0.51035756", "0.5094115", "0.50863063", "0.5073811", "0.5068527", "0.5065309", "0.50652224", "0.5065057", "0.50597477", "0.50591743", "0.5056193", "0.50497335", "0.50492895", "0.5043069", "0.5011223", "0.50073963", "0.4997545", "0.49954438", "0.49933878", "0.49929735", "0.49917108", "0.49876645", "0.4979641", "0.4969338", "0.49664629", "0.49649572", "0.49542493", "0.4953771", "0.4952808" ]
0.8393486
0
Prints a summary of the results.
def _print_summary(results): if not len(results) > 0: print 'No results to show in summary.' return table = {} for res in results: for k, v in res.iteritems(): table.setdefault(k, []).append(v) print tabulate(table, headers='keys', tablefmt="simple")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printSummary(self):\n pass", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def display_results(summary):\n print ('Total running time %.2f secs (includes DB checks)'\n % summary.total_time)\n\n print 'OK:', summary.ok\n print 'Errors:', summary.errors\n\n # Display stats\n print 'Changes stats:'\n for var, s in summary.stats.iteritems():\n print '\\t%s:' % var,\n for x in s.iteritems():\n print '%s=%.2f' % x,\n print\n\n # Display profiling data\n print 'Profiling data:'\n for name, data in summary.profile.iteritems():\n print '\\t%s: %d calls, %.2fms' % (name, data['callcount'],\n data['time'] * 1000)", "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def print_summary(self):\n self.model.summary()", "def print_summary(self):\n self.network.print_summary()", "def summary(self, printed=True):\n raise NotImplementedError", "def results_summary(self, num_models=10, sort_metric=None):\n if self.state.dry_run:\n info(\"Dry-Run - no results to report.\")\n return\n\n # FIXME API documentation\n _results_summary(input_dir=self.state.host.results_dir,\n project=self.state.project,\n architecture=self.state.architecture,\n num_models=num_models,\n sort_metric=sort_metric)", "def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))", "def test_rr_summary(results):\n # pylint: disable=unidiomatic-typecheck\n test_result = results.summary()\n assert type(test_result).__name__ == \"Summary\"\n assert type(test_result.tables) == list\n assert len(test_result.tables) == 3\n assert len(test_result.extra_txt) > 0", "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "def print_results(self):\n pass", "def summary(self) -> str:\n pass", "def summary(app):\n click.echo(get_summary(app))", "def printsummary(text):\r\n\r\n print('Summary:')\r\n print('--------')\r\n print(text)", "def print_summary(self):\n\t\t\n\t\tif not self.objects:\n\t\t\tsys.stderr.write(\"No objects.\\n\")\n\t\t\treturn\n\t\t\n\t\t# Summary header data\n\t\theader = (\"ok\", \"error\", \"zdata\", \"xdata\", \"odata\", \"ratio\")\n\t\t\n\t\t# Summary header format\n\t\tfield = \" %11s\"\n\t\tfmt = field * len(header)\n\t\twidth = len(field % \"\") * len(header)\n\t\ts_line = \"-\" * width\n\t\td_line = \"=\" * width\n\t\t\n\t\t# Verbose header data\n\t\tvheader = (\"ok?\", \"type\", \"id\", \"zdata\", \"xdata\", \"odata\", \"ratio\")\n\t\t\n\t\t# Verbose header format\n\t\tvfmt = \" %3s %7s\" + field * 5\n\t\t\n\t\t# Summary data\n\t\tc_ratio = None\n\t\to_ok = o_error = 0\n\t\tz_data_size = x_data_size = o_data_size = 0\n\t\t\n\t\tif self.verbose:\n\t\t\tprint vfmt % vheader\n\t\t\tprint s_line\n\t\t\n\t\t# Gather data from objects\n\t\tfor obj in self.objects:\n\t\t\tif obj.v_all:\n\t\t\t\to_ok += 1\n\t\t\t\tif obj.z_data_size: z_data_size += obj.z_data_size\n\t\t\t\tif obj.x_data_size: x_data_size += obj.x_data_size\n\t\t\t\tif obj.o_data_size: o_data_size += obj.o_data_size\n\t\t\telse:\n\t\t\t\to_error += 1\n\t\t\t\n\t\t\tif self.verbose:\n\t\t\t\tv_c_ratio = None\n\t\t\t\t\n\t\t\t\t# Calculate compression if possible\n\t\t\t\tif obj.z_data_size and obj.x_data_size:\n\t\t\t\t\tv_c_ratio = str(100 * obj.z_data_size / obj.x_data_size) + \"%\"\n\t\t\t\t\n\t\t\t\t# Build verbose data\n\t\t\t\tv_data = (\n\t\t\t\t\t\"[Y]\" if obj.v_all else \"[N]\",\n\t\t\t\t\tobj.o_data_type or \"N/A\",\n\t\t\t\t\tobj.id[:10],\n\t\t\t\t\tobj.z_data_size or \"N/A\",\n\t\t\t\t\tobj.x_data_size or \"N/A\",\n\t\t\t\t\tobj.o_data_size or \"N/A\",\n\t\t\t\t\tv_c_ratio or \"N/A\"\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\t# Print verbose data\n\t\t\t\tprint vfmt % v_data\n\t\t\n\t\tif self.verbose:\n\t\t\tprint d_line\n\t\t\n\t\t# Calculate compression ratio\n\t\tif z_data_size and x_data_size:\n\t\t\tc_ratio = str(100 * z_data_size / x_data_size) + \"%\"\n\t\t\n\t\t# Print summary\n\t\tprint fmt % header\n\t\tprint s_line\n\t\tprint fmt % (o_ok, o_error, z_data_size, x_data_size, o_data_size, c_ratio)", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def summary(self):\n print(self.model.summary())", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()", "def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))", "def _printSummary(self):\n\t\t### COP OUT\n\t\tif self.params['background'] is True:\n\t\t\tself.stats['count'] += 1\n\t\t\treturn\n\n\t\t### THIS NEEDS TO BECOME MUCH MORE GENERAL, e.g. Peaks\n\t\ttdiff = time.time()-self.stats['startseries']\n\t\tif not self.params['continue'] or tdiff > 0.1:\n\t\t\tcount = self.stats['count']\n\t\t\t#if(count != self.stats['lastcount']):\n\t\t\tsys.stderr.write(\"\\n\\tSUMMARY: \"+self.functionname+\"\\n\")\n\t\t\tself._printLine()\n\t\t\tsys.stderr.write(\"\\tTIME: \\t\"+apDisplay.timeString(tdiff)+\"\\n\")\n\t\t\tself.stats['timesum'] = self.stats['timesum'] + tdiff\n\t\t\tself.stats['timesumsq'] = self.stats['timesumsq'] + (tdiff**2)\n\t\t\ttimesum = self.stats['timesum']\n\t\t\ttimesumsq = self.stats['timesumsq']\n\t\t\tif(count > 1):\n\t\t\t\ttimeavg = float(timesum)/float(count)\n\t\t\t\ttimestdev = math.sqrt(float(count*timesumsq - timesum**2) / float(count*(count-1)))\n\t\t\t\ttimeremain = (float(timeavg)+float(timestdev))*self.stats['seriesleft']\n\t\t\t\tsys.stderr.write(\"\\tAVG TIME: \\t\"+apDisplay.timeString(timeavg,timestdev)+\"\\n\")\n\t\t\t\t#print \"\\t(- TOTAL:\",apDisplay.timeString(timesum),\" -)\"\n\t\t\t\tif(self.stats['seriesleft'] > 0):\n\t\t\t\t\tsys.stderr.write(\"\\t(- REMAINING TIME: \"+apDisplay.timeString(timeremain)+\" for \"\n\t\t\t\t\t\t+str(self.stats['seriesleft'])+\" series -)\\n\")\n\t\t\t#print \"\\tMEM: \",(mem.active()-startmem)/1024,\"M (\",(mem.active()-startmem)/(1024*count),\"M)\"\n\t\t\tself.stats['count'] += 1\n\t\t\tself._printLine()", "def summary(self):\r\n print(self.model.summary())", "def show_summary(self, lang):\n return self.summary % self.vars", "def print_summary(self):\n #exec(\"print(storyline.{}_clause+', '+storyline.{}_clause.lower()+', '+storyline.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n #exec(\"print(self.{}_clause+', '+self.{}_clause.lower()+', '+self.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n lwr = \".lower()\"\n exec(\"print(\"+str(3*(\"self.{}_clause{}+',', \")).format(\"A\",\"\",\"B\",lwr,\"C\",lwr)+\"'\\b\\b')\")", "def print_acts_summary(master_results_data,\n master_results_pass,\n master_results_fail,\n master_results_unknown,\n pass_counter,\n fail_counter,\n unknown_counter,\n split_results=False,\n ):\n widths = [max(map(len, col)) for col in zip(*master_results_data)]\n if not split_results:\n for row in master_results_data:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('')\n print('Pass: %s '\n 'Fail: %s '\n 'Unknown: %s '\n 'Total: %s' % (pass_counter,\n fail_counter,\n unknown_counter,\n pass_counter+fail_counter+unknown_counter))\n else:\n print('')\n for row in master_results_pass:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('Pass: %s' % pass_counter)\n\n print('')\n for row in master_results_fail:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('Fail: %s' % fail_counter)\n if unknown_counter is not 0:\n print('')\n for row in master_results_unknown:\n print(' '.join((val.ljust(width)\n for val, width in zip(row, widths))))\n print('Unknown: %s' % unknown_counter)", "def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')", "def summary(self):\n raise NotImplementedError", "def display_results():\n pass", "def summary(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def dump_total_results(statistic_entries):\n individual_tests = sum([entry['correct answers'] + entry['wrong answers']\n for entry in statistic_entries])\n average_per_test = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(individual_tests)\n average_per_run = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(len(statistic_entries))\n\n best_time = min([entry['best time (s)'] for entry in statistic_entries])\n worst_time = max([entry['worst time (s)'] for entry in statistic_entries])\n\n print(\"\\nSummary for all done tests:\")\n print(\" %5d total test runs\" % len(statistic_entries))\n print(\" %5d individual tests\" % individual_tests)\n print(\" %5.1f individual tests per run\" % (individual_tests / float(len(statistic_entries))))\n print(\" %5.2f seconds per answer (average)\" % average_per_test)\n print(\" %5.2f seconds per run (average)\" % average_per_run)\n print(\" %5.2f seconds was best time.\" % best_time)\n print(\" %5.2f seconds was worst time.\" % worst_time)", "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)", "def summarise(self):\n self.summary = az.summary(self.trace, var_names=[\"~chol\"], round_to=2)\n print(self.summary)\n return self.summary", "def summary(self):\n return ''", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n if _have_ipython:\n IPython.display.display(IPython.display.HTML(self._repr_html_()))\n else:\n print(self)", "def summary(self):\n self.model.summary()", "def summary(self):\n summary = defaultdict(int)\n\n for r in self.results:\n summary[r.result] += 1\n\n return summary", "def _render_results_scan_summary(self):\n\n core.add_text(\n 'Scan Summary',\n color=self._control_text_color,\n parent=self._window_name)\n\n core.add_text(\n 'Number of images scanned: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n name='number_of_scanned_images_text',\n source=NUMBER_OF_SCANNED_IMAGES,\n parent=self._window_name)\n\n core.add_text(\n 'Number duplicate image sets: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n str(len(self._duplicates_list)),\n parent=self._window_name)\n\n core.add_text('', parent=self._window_name)", "def show_summary(self) -> None:\n all_averages = []\n\n for i in self.album_statistics.values():\n try:\n all_averages.append(i['avg'])\n except (TypeError, ValueError):\n pass\n # print(all_averages)\n try:\n final_average = math.ceil(np.mean(all_averages))\n except ValueError:\n click.echo(\n 'Oops! https://lyrics.ovh couldn\\'t find any lyrics across any'\n ' album. This is caused by inconsistent Artist names from'\n ' Musicbrainz and lyrics.ovh. Try another artist.'\n )\n raise (SystemExit)\n output = BeautifulTable(max_width=200)\n output.set_style(BeautifulTable.STYLE_BOX_ROUNDED)\n output.column_headers = [\n 'Average number of words in tracks across all albums\\n'\n f'for {self.artist}'\n ]\n output.append_row([final_average])\n click.echo(output)\n\n return self", "def pytest_terminal_summary(self, terminalreporter, exitstatus):\n # pylint: disable=unused-argument\n terminalreporter.section(\"Test Information\")\n for test, info in self._info.items():\n for datum in info:\n terminalreporter.write(\"{}: {}\\n\".format(test, datum))", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def test_summary_with_results(self):\n add_constituency_result_line('X, 10, C')\n r = self.client.get('/summary')\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.data, 'html.parser')\n self.assertIs(soup.find(id='no-results'), None)\n self.assertIsNot(soup.find(id='results-table'), None)", "def print_results_summary(self,remove_zeros=False,trim_end_zeros=False):\n if remove_zeros:\n if trim_end_zeros:\n raise Warning('remove_zeros = False overrides trim_end_zeros=True. Removing all values with mean=zero')\n nz_ind = np.nonzero(self.xhat)\n xhats = self.xhat[nz_ind]\n sigmas = self.mean_stddev[nz_ind]\n elif trim_end_zeros:\n xhats = np.trim_zeros(self.xhat,trim='b')\n sigmas = self.mean_stddev[np.arange(xhats.size)]\n else:\n xhats = self.xhat\n sigmas = self.mean_stddev\n\n self._print_results_header()\n print('{: >5} {: >8} {: >10} {: >4}'.format('n','mean','error','pct_error'))\n for i in range(xhats.size):\n print('{0: >5} {1: >8.4g} +/- {2: >10.4g} ({3: >4.1%})'.format(i,xhats[i],sigmas[i],sigmas[i] / xhats[i]))", "def print_summary_metrics(lst):\n print('*' * 50)\n print(' ' * 16 + 'Summary statistics')\n print('*' * 50)\n print('mean: {} | median: {} | mode: {}'.format(get_mean(lst),\n get_median(lst),\n get_mode(lst)))\n print('range: {} | IQR: {}'.format(get_range(list_nums),\n get_IQR(list_nums)))\n print('\\n')\n print('original list: \\n {}'.format(lst))\n print('sorted list: \\n {}'.format(sorted(lst)))\n print('List without outliers: \\n {}'.format(\n remove_outliers(list_nums)))", "def summary_str(self):\n if not self.results:\n return self.summary.empty() or ''\n elif self.state == Ok:\n return self.summary.ok(self.results) or ''\n return self.summary.problem(self.results) or ''", "def print_results(self):\n print(\"Total sec: \", self.total_sec)\n print(\"Total min: \", self.total_min)\n print(\"Total hours: \", self.total_hours)", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def display_summary(self, *args):\n logger.debug(u\"{} Summary\".format(self.joueur))\n yield(self.remote.callRemote(\n \"display_summary\", self.currentperiod.todict()))\n self.joueur.info(\"Ok\")\n self.joueur.remove_waitmode()", "def print_summary(self, **kwargs):\r\n compile_time = sum([ps.compile_time for ps\r\n in self.profile_stats.values()])\r\n\r\n fct_call = dict([(fn, ps.fct_callcount)\r\n for (fn, ps) in self.profile_stats.items()])\r\n\r\n fct_call_time = dict([(fn, ps.fct_call_time)\r\n for (fn, ps) in self.profile_stats.items()])\r\n\r\n apply_time = {}\r\n for fn, ps in self.profile_stats.items():\r\n for (i, node) in enumerate(fn.maker.fgraph.toposort()):\r\n apply_time[(i, node)] = ps.apply_time[node]\r\n for (i, n), t in apply_time.items():\r\n if t == 0:\r\n print i, n\r\n\r\n apply_cimpl = {}\r\n for fn, ps in self.profile_stats.items():\r\n apply_cimpl.update(ps.apply_cimpl)\r\n\r\n message = self.message\r\n\r\n variable_shape = {}\r\n for fn, ps in self.profile_stats.items():\r\n variable_shape.update(ps.variable_shape)\r\n\r\n other_time = dict(\r\n linker_time=sum(\r\n [ps.linker_time for ps in self.profile_stats.values()]),\r\n optimizer_time=sum(\r\n [ps.optimizer_time for ps in self.profile_stats.values()]))\r\n\r\n self.print_summary_(\"print_summary\",\r\n compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n self.local_time, other_time,\r\n **kwargs)", "def get_summary(self):\n \n text = \"word: {}, total_score: {} \\n\".format(self.clue, self.total_score)\n for card, score in self.sorted_card_score_pairs:\n card_text = \"\\t card.name:{} (team:{}), similarity: {} \\n\".format(card.name, card.color, score)\n text += card_text\n return text", "def print_results(results):\n print(f\"Intial Entries: {results[0]}\")\n print(f\"Added Entries: {results[1]}\")\n print(f\"Final Entries: {results[2]}\")\n print(f\"Total Run Time: {results[3]}\")\n print(\"\\n\")", "def summary(self):\n response = self._get(self.uri_for(\"summary\"))\n return json_to_py(response)", "def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"", "def print_summaries(summaries):\n\n for method, summary in summaries:\n print(method)\n print('')\n print(summary)\n print('')", "def _print_results(results, title=''):\n pstr = '[' + title + ']: ' if title else ''\n for k, v in results.items():\n pstr += '\\t{}: {}'.format(k, v)\n print(pstr)", "def _summary(obj):\n return obj.summary", "def show_results(results, n=10, print_results=True):\n # Print headline\n s = \"\"\n if len(results) == 0:\n s += \"-- No results --\"\n else:\n s += \"{0:18s} {1:7s}\\n\".format(\"Class\", \"Prob\")\n s += \"#\"*50 + \"\\n\"\n for entry in results:\n if n == 0:\n break\n else:\n n -= 1\n s += \"{0:18s} {1:>7.4f}%\\n\".format(entry['semantics'],\n entry['probability']*100)\n s += \"#\"*50\n if print_results:\n print(s)\n return s", "def summary(self, verbosity=0, file=None):\n\n if type(file) == type(\"\"):\n f=open(file, \"w\")\n else: f= sys.stdout\n\n f.write(_(\"The number of vertices is %d. \") % self.number_of_vertices)\n f.write(_(\"The largest %s is %d.\\n\") % (self.degree_type, self.max_deg))\n f.write(\"\\nDegree distribution:\\n\")\n f.write(_(\" 0:%7.4f%%\\n\") % \\\n (self.n_0/self.number_of_vertices*100))\n\n column=1\n for degree, probability in self.dd:\n f.write(\" %5d:%7.4f%%\" % (degree, probability*100))\n if column == 5:\n f.write(\"\\n\")\n column=1\n else: column += 1\n f.write(\"\\n\")", "def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")", "def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary", "def summary(\n self, parameters_to_show=4, show_parameters=True, show_nsamples=True\n ):\n string = \"\"\n if self.path_to_results_file is not None:\n string += \"file: {}\\n\".format(self.path_to_results_file)\n string += \"cls: {}.{}\\n\".format(\n self.__class__.__module__, self.__class__.__name__\n )\n if show_nsamples:\n string += \"nsamples: {}\\n\".format(len(self.samples))\n if show_parameters:\n string += \"parameters: {}\".format(\n self._parameter_summary(\n self.parameters, parameters_to_show=parameters_to_show\n )\n )\n return string", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')", "def summary_string(self) -> str:", "def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)", "def _print_summary(data, metric):\n\n print(u'Cortical thickness {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 0].mean(), data[:, 0].std(ddof=1),\n data[:, 0].min(), data[:, 0].max()))\n print('Other modalities {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 1:].mean(), data[:, 1:].std(ddof=1),\n data[:, 1:].min(), data[:, 1:].max()))\n print('Overall {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data.mean(), data.std(ddof=1),\n data.min(), data.max()))", "def show_summary(self, out = None, debug = False):\n if (out is None) : out = sys.stdout\n results = self.matching_candidates\n if (len(results) > 0):\n self.atom_props.show_properties(identity = \"HOH\", out = out)\n if (self.nuc_phosphate_site):\n print(\" appears to be nucleotide coordination site\", file=out)\n if (self.no_final):\n print(\" Found potential ion%s outside of specified set:\" % \\\n (\"s\" if len(results) > 1 else \"\"), file=out)\n if (self.final_choice is not None):\n # We have one result that we are reasonably certain of\n elem_params, score = results[0]\n if elem_params.element not in mmtbx.ions.HALIDES:\n self.atom_props.show_ion_results(\n identity = str(self.final_choice),\n out = out,\n valence_used = self.valence_used,\n confirmed = True)\n else:\n print(\" Probable anion:\", str(elem_params), file=out)\n print(\"\", file=out)\n elif (len(results) > 1):\n # We have a couple possible identities for the atom\n below_cutoff = [ elem_params for elem_params, score in results\n if score < self.ambiguous_valence_cutoff]\n if len(below_cutoff) == 1:\n elem_params = below_cutoff[0]\n print(\" ambigous results, best valence from %s\" % \\\n str(elem_params), file=out)\n self.atom_props.show_ion_results(\n identity = str(elem_params),\n out = out,\n valence_used = True)\n print(\"\", file=out)\n else:\n ions = [str(i[0]) for i in sorted(results, key = lambda x: x[1])]\n print(\" ambiguous results, could be %s\" % \", \".join(ions), file=out)\n for elem_params, score in results :\n self.atom_props.show_ion_results(identity = str(elem_params),\n out = out)\n print(\"\", file=out)\n else:\n if (self.atom_type != WATER) or (self.nuc_phosphate_site):\n self.atom_props.show_properties(identity = \"HOH\", out = out)\n if (self.nuc_phosphate_site):\n print(\" appears to be nucleotide coordination site\", file=out)\n # try anions now\n if (self.looks_like_halide):\n print(\" Probable cation: %s\" % str(self.final_choice), file=out)\n print(\"\", file=out)\n else:\n # atom is definitely not water, but no reasonable candidates found\n # print out why all the metals we tried failed\n if (debug) and (len(self.filtered_candidates) > 0):\n print(\" insufficient data to identify atom\", file=out)\n possible = True\n for params in self.filtered_candidates:\n if (self.atom_props.has_compatible_ligands(str(params))):\n if possible:\n print(\" possible candidates:\", file=out)\n possible = False\n self.atom_props.show_ion_results(identity = str(params),\n out = out)\n else :\n print(\" incompatible ligands for %s\" % str(params), file=out)\n #print >> out, \" rejected as unsuitable:\"\n #for params in self.rejected_candidates:\n # if (self.atom_props.has_compatible_ligands(str(params))):\n # self.atom_props.show_ion_results(identity = str(params),\n # out = out)\n # else :\n # print >> out, \" incompatible ligands for %s\" % str(params)\n print(\"\", file=out)", "def print_summary(stim_table):\n print(\n '{:<20}{:>15}{:>15}\\n'.format('Colname', 'No. conditions', 'Mean N/cond')\n )\n for colname in stim_table.columns:\n conditions, occurrences = np.unique(\n np.nan_to_num(stim_table[colname]), return_counts = True\n )\n print(\n '{:<20}{:>15}{:>15.1f}'.format(\n colname, len(conditions), np.mean(occurrences)\n )\n )", "def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)", "def print_results(self, f1_score, precision_score, recall_score):\n print(\"Algorithm: %s\" % self.name)\n self.print_score(f1_score, \"F1 Score\")\n self.print_score(precision_score, \"Precision Score\")\n self.print_score(recall_score, \"Recall Score\")", "def summary(self, test_type='t-test'):\n summary = f'Results for running {self.cv_method} evaluation for {self.method} '\n summary += f'on {self.n_model} models:\\n\\n'\n name_length = max([max(len(m.name) for m in self.models) + 1, 6])\n means = self.get_means()\n sems = self.get_sem()\n if means is None:\n means = np.nan * np.ones(self.n_model)\n if sems is None:\n sems = np.nan * np.ones(self.n_model)\n try:\n p_zero = self.test_zero(test_type=test_type)\n p_noise = self.test_noise(test_type=test_type)\n except ValueError:\n p_zero = np.nan * np.ones(self.n_model)\n p_noise = np.nan * np.ones(self.n_model)\n # header of the results table\n summary += 'Model' + (' ' * (name_length - 5))\n summary += '| Eval \\u00B1 SEM |'\n summary += ' p (against 0) |'\n summary += ' p (against NC) |\\n'\n summary += '-' * (name_length + 51)\n summary += '\\n'\n for i, m in enumerate(self.models):\n summary += m.name + (' ' * (name_length - len(m.name)))\n summary += f'| {means[i]: 5.3f} \\u00B1 {sems[i]:4.3f} |'\n if p_zero[i] < 0.001:\n summary += ' < 0.001 |'\n else:\n summary += f'{p_zero[i]:>13.3f} |'\n if p_noise[i] < 0.001:\n summary += ' < 0.001 |'\n else:\n summary += f'{p_noise[i]:>14.3f} |'\n summary += '\\n'\n summary += '\\n'\n if self.cv_method == 'crossvalidation':\n summary += 'No p-values available as crossvalidation provides no variance estimate'\n elif test_type == 't-test':\n summary += 'p-values are based on uncorrected t-tests'\n elif test_type == 'bootstrap':\n summary += 'p-values are based on percentiles of the bootstrap samples'\n elif test_type == 'ranksum':\n summary += 'p-values are based on ranksum tests'\n return summary", "def printResults(self, stream=sys.stdout):\n # Only master writes.\n if MPICommons.isMaster():\n stream.write(\"%15s %15s %15s %12s\\n\"%(\" time (t)\", \" count (n)\", \"(dn/dt) \", \"stdErr\"))\n n_tot = 0\n\t actualTot = 0\n t = 0.0\n for i,n in enumerate(self.__data):\n # Calculate the values to present.\n t = i * self.__time_interval\n actualTot += n\n dt = self.__time_interval\n n_tot += n\n dn = n\n rateEst = self.__floatAnalInterval*dn/dt\n stdErr = self.__floatAnalInterval*math.sqrt(dn)/dt\n # Only for times != zero.\n if (i > 0):\n stream.write(\"%15.5f %15i\"%(t, n_tot) +\" \"+ \"{:.6E}\".format(rateEst) +\" \"+\"{:.3E}\".format(stdErr) +\"\\n\")\n eqTime = self.__finalTime - self.__initialTime\n stream.write(\"\\nOverall we counted the following number of counts in the following amount of time: \" + \"%6i\"%(actualTot) + \" \" + \"{:.6E}\".format(eqTime))", "def printSummary(result):\n inputCount = result['inputCount']\n print('Kept %d of %d (%.2f%%) candidate substring%s seen on input.' %\n (len(result['substrings']), inputCount,\n len(result['substrings']) / inputCount * 100.0,\n '' if inputCount == 1 else 's'), file=sys.stderr)\n\n notEnoughTruePositives = result['notEnoughTruePositives']\n print('%d substring%s did not meet the minimum true positive '\n 'requirement (%d).' %\n (notEnoughTruePositives, '' if notEnoughTruePositives == 1 else 's',\n args.minTruePositives),\n file=sys.stderr)\n\n fractionTooLow = result['fractionTooLow']\n print('%d substring%s did not have a high enough true positive '\n 'fraction (%f).' %\n (fractionTooLow, '' if fractionTooLow == 1 else 's',\n args.minTruePositiveFraction),\n file=sys.stderr)\n\n inferior = result['inferior']\n if inferior == 1:\n print('1 substring was inferior to (at least) one of its own '\n 'substrings.', file=sys.stderr)\n else:\n print('%d substrings were inferior to (at least) one of their own '\n 'substrings.' % inferior, file=sys.stderr)", "def _print_aggregate_results(\n task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]\n) -> None:\n aggregate_task_result = aggregate_nvs_results(task_results[task])\n print(\"\")\n print(f\"Aggregate results for task={task}:\")\n pretty_print_nvs_metrics(aggregate_task_result)\n print(\"\")", "def print_summary(self,\r\n n_apply_to_print=config.ProfileMode.n_apply_to_print,\r\n n_ops_to_print=config.ProfileMode.n_ops_to_print):\r\n fct_call_time = self.mode.fct_call_time\r\n fct_call = self.mode.fct_call\r\n apply_time = self.apply_time\r\n op_cimpl = self.op_cimpl\r\n message = self.message\r\n outputs_size = self.outputs_size\r\n\r\n self.print_summary_(\"print_summary\",\r\n None,\r\n None,\r\n None,\r\n apply_time,\r\n op_cimpl,\r\n message,\r\n outputs_size,\r\n n_apply_to_print,\r\n n_ops_to_print)", "def result_summary(self):\r\n summary = ['Ran %d commands to test %d scripts. %d of these commands '\r\n 'failed and %d scripts could not be tested due to errors.' %\r\n (self.total_commands, self.total_scripts,\r\n self._num_failures(), self._num_script_errors())]\r\n\r\n if self._num_failures() > 0:\r\n summary.append('Failed scripts were: %s' %\r\n ' '.join(self._failed_scripts()))\r\n\r\n for error_info in self.script_errors.values():\r\n if len(error_info[0]) > 0:\r\n summary.append(self._format_script_error_summary(\r\n error_info[0], error_info[1]))\r\n\r\n if self.warnings:\r\n summary.append('Warnings:')\r\n for warning in self.warnings:\r\n summary.append(' ' + warning)\r\n\r\n return '\\n'.join(summary)", "def print_statistics(self):\n print 'Ran %s iterations in %0.3f seconds\\n' % (\n self.iterations, self.elapsed_time)\n\n print 'Overall Equity'\n for index in range(len(self.holdem_ranges)):\n range_short_form = '%r' % self.holdem_ranges[index]\n print 'P%s) %-15s %0.3f' % (\n index,\n range_short_form,\n float(self.win_stats.get(index, 0))/self.iterations)\n print '\\n'\n print 'Hand distribution for each player'\n for stats in self.player_stats:\n stats.print_report()", "def summary():\r\n\r\n average_age, counted = _find_average_age()\r\n male, female = _find_male_female_percentage()\r\n headings = [\"Total Number of Patients\", \"Average Age\",\r\n \"Patients Involved In Average Age\", \"Percentage of Male\",\r\n \"Percentage of Female\"]\r\n data = [len(resources), average_age, counted, male, female]\r\n return render_template(\"summary.html\", headings=headings, data=data)", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def summarize(self):\n info(\"Running \" + self.title + \" generator\")", "def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")", "def show_results(bill, tip, pct):\n \n total = tip + bill\n\n print(\"Bill amount: $\" + str(bill))\n print(\"Tip percentage: \" + str(pct) + \"%\")\n print(\"Tip amount due: $\" + str(tip))\n print(\"Total with tip: $\" + str(total))\n\n print(\"\"\"\n-----------------------------------\n GOOD BYE \n-----------------------------------\n\"\"\")", "def summary(self, yname=None, xname=None, title=None, alpha=.05):\n #TODO: add a summary text for options that have been used\n\n jvalue, jpvalue, jdf = self.jtest()\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['GMM']),\n ('Date:', None),\n ('Time:', None),\n ('No. Observations:', None),\n #('Df Residuals:', None), #[self.df_resid]), #TODO: spelling\n #('Df Model:', None), #[self.df_model])\n ]\n\n top_right = [#('R-squared:', [\"%#8.3f\" % self.rsquared]),\n #('Adj. R-squared:', [\"%#8.3f\" % self.rsquared_adj]),\n ('Hansen J:', [\"%#8.4g\" % jvalue] ),\n ('Prob (Hansen J):', [\"%#6.3g\" % jpvalue]),\n #('F-statistic:', [\"%#8.4g\" % self.fvalue] ),\n #('Prob (F-statistic):', [\"%#6.3g\" % self.f_pvalue]),\n #('Log-Likelihood:', None), #[\"%#6.4g\" % self.llf]),\n #('AIC:', [\"%#8.4g\" % self.aic]),\n #('BIC:', [\"%#8.4g\" % self.bic])\n ]\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Results\"\n\n # create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n return smry", "def print_summary(self, **kwargs):\n compile_time = sum([ps.compile_time for ps\n in self.profile_stats.values()])\n\n fct_call = dict([(fn, ps.fct_callcount)\n for (fn, ps) in iteritems(self.profile_stats)])\n\n fct_call_time = dict([(fn, ps.fct_call_time)\n for (fn, ps) in iteritems(self.profile_stats)])\n\n apply_time = {}\n for fn, ps in iteritems(self.profile_stats):\n for (i, node) in enumerate(fn.maker.fgraph.toposort()):\n apply_time[(i, node)] = ps.apply_time[node]\n for (i, n), t in iteritems(apply_time):\n if t == 0:\n print(i, n)\n\n apply_cimpl = {}\n for ps in itervalues(self.profile_stats):\n apply_cimpl.update(ps.apply_cimpl)\n\n message = self.message\n\n variable_shape = {}\n for ps in itervalues(self.profile_stats):\n variable_shape.update(ps.variable_shape)\n\n other_time = dict(\n linker_time=sum(\n [ps.linker_time for ps in self.profile_stats.values()]),\n optimizer_time=sum(\n [ps.optimizer_time for ps in self.profile_stats.values()]))\n\n self.print_summary_(\"print_summary\",\n compile_time, fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n self.local_time, other_time,\n **kwargs)" ]
[ "0.82503015", "0.8232667", "0.79443955", "0.76772296", "0.7640588", "0.74202365", "0.7303483", "0.72779363", "0.7266571", "0.7234515", "0.7221001", "0.7205003", "0.7152706", "0.7136578", "0.7104214", "0.709339", "0.7073432", "0.70705724", "0.70628256", "0.70268404", "0.7017214", "0.7010544", "0.6970262", "0.696602", "0.696164", "0.6919556", "0.6897107", "0.68615025", "0.68459874", "0.68369794", "0.6807929", "0.68025094", "0.6801055", "0.6770942", "0.6769907", "0.67698175", "0.6765093", "0.6763754", "0.6761602", "0.6761602", "0.6761602", "0.6761602", "0.6761602", "0.6761602", "0.6761602", "0.67561185", "0.67448646", "0.67442065", "0.67416453", "0.6738218", "0.6732415", "0.6712361", "0.6704764", "0.6701137", "0.66964436", "0.6691677", "0.66840637", "0.6674501", "0.6674501", "0.6674501", "0.6671788", "0.66218054", "0.66174346", "0.6602291", "0.6573928", "0.6573571", "0.6570618", "0.65699595", "0.65695024", "0.65613914", "0.6554774", "0.65504587", "0.6549898", "0.65484357", "0.65248144", "0.6521019", "0.65196496", "0.65196496", "0.6518978", "0.6512291", "0.6509253", "0.65053177", "0.65039986", "0.64911085", "0.64837444", "0.6479132", "0.6473374", "0.6471507", "0.6471268", "0.64594907", "0.64562833", "0.6454562", "0.6450285", "0.6443177", "0.6442758", "0.6434183", "0.6434068", "0.6432104", "0.64305276", "0.64243895" ]
0.8227009
2
Sends out emails to the apps in the provided csv.
def send(app_csv='apps.csv', verbose=True, dry_run=True): results = [] app_info = _csv_to_dict(app_csv) for app in app_info: # Get all the app info needed for this request. app_name = _get_app_name(app) contact_first_name = _get_contact_first_name(app) email_address = _get_contact_email(app) app_tote_store_url = _get_app_tote_store_url(app) subject = _get_email_subject(app_name) # If we already sent the first contact email, continue. if _did_send_first_contact_email(app): result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='skipped', error=None, ) logger.info(result) results.append(result) continue try: # Get the appropriate template to send. email_template = _get_first_contact_email_template_name(app) template = env.get_template(email_template) # Render the template with app info. content = template.render( app_name=app_name, contact_first_name=contact_first_name, app_tote_store_url=app_tote_store_url, ) send_email(to=email_address, subject=subject, html=content, dry_run=dry_run) result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='success', error=None, ) except Exception as e: result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='failure', error=str(e), ) logger.info(result) results.append(result) # Sleep momentarily to avoid dos'ing the server. if not dry_run: time.sleep(0.1) if verbose: _print_summary(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_email_csv(csv_input):\n # Get a pandas dataframe column with all of the relevant duns numbers\n\n df = pd.read_csv(csv_input)\n duns_numbers = df.dunsnumber.tolist()\n\n # Gets the file number for the current file by taking the max of all of the other numbers in the lists directory and adding one to the hightest number\n\n non_decimal = re.compile(r'[^\\d]+')\n file_number_list = [int(non_decimal.sub('', file)) for file in listdir('mail/lists')]\n file_number = max(file_number_list)+1 if file_number_list else 1\n\n file_name = 'mail/lists/email_{0}.csv'.format(file_number)\n\n # Actually get the emails\n\n sam_qs = SamRecord.objects.all().filter(duns__in=duns_numbers)[:100]\n\n results = set([])\n\n pudb.set_trace()\n\n for sam in sam_qs:\n email = sam.email_address\n if email:\n results.add(email)\n\n with open(file_name, 'w') as f:\n for email in results:\n f.write(email+\"\\n\")", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def main(arguments, emailer):\n emailer.read_config()\n print(\"Config read.\")\n emailer.setup_config(pages=arguments.pages,\n email_list=arguments.email_list,\n items_range=arguments.range,\n config=arguments.config,\n database=arguments.database,\n file=arguments.file,\n email_address=arguments.email_address,\n email_password=arguments.email_password,\n send_time=arguments.time,\n frequency=arguments.frequency)\n emailer.write_config()\n \n emailer.setup_database()\n if emailer.pull_items_search() != 'bot':\n print(\"Items retrieved\")\n else:\n return\n \n emailer.items_to_xls()\n print(\"xls file created.\")\n emailer.items_to_csv()\n print(\"csv file created\")\n\n print(\"Sending emails.\")\n emailer.send_email()", "def send_emails(self):\n\n with open(self.emails_file) as fp:\n emails = fp.readlines()\n logging.debug('%s e-mail addresses are loaded from %s' % (len(emails), self.emails_file))\n\n emails = map(lambda email: email.strip(), emails)\n\n for i, email in enumerate(emails):\n try:\n self.send_email(email)\n except Exception as e:\n logging.exception('Can\\'t send e-mail to %s (number %s)!' % (email, i))\n else:\n logging.debug('E-mail was sent to %s (number %s)' % (email, i))\n\n sleep_time = self.timeout * (0.5 + random.random())\n time.sleep(sleep_time) # timeout\n\n logging.debug('Done!')", "def send_email(settings, excel):\n Email._set_email(settings, excel)\n Email._send_email_helper(settings, excel)", "def emailJobs(\n df, \n retainedCompany, \n senderName, \n defaultSenderEmail, \n emailPassword, \n senderTitle, \n senderCompany, \n senderCompanyHomePage, \n senderPhone, \n noContactCompanyListPickleFileName, \n port=465, \n returnHTML=True\n ):\n try:\n with open(noContactCompanyListPickleFileName, 'rb') as inputFile:\n noContactCompanyList = pickle.load(inputFile) \n except:\n noContactCompanyList = []\n\n for i in range(len(df)):\n companyName = df['Organization Name'][i]\n if companyName.lower() in noContactCompanyList:\n pass\n try:\n domainName = df['Domain'][i]\n jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']]\n # email all the jobs pages for that copmany\n sendEmails( \n 'guys', # addressing general company, so use 'guys' instead of individual name\n retainedCompany,\n companyName,\n jobsEmails,\n senderName,\n defaultSenderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=port,\n returnHTML = returnHTML \n ) \n except:\n pass", "def write_emails_to_file(result_emails, category):\r\n\tf = open('emails.csv', 'wb')\r\n\tcsvWriter = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\tfor email in result_emails:\r\n\t\tcsvWriter.writerow([email, category])\t\r\n\tf.close()", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def _send_bulk_mail(\n recipient_ids, sender_id, intent, email_subject, email_html_body,\n sender_email, sender_name, instance_id=None):\n _require_sender_id_is_valid(intent, sender_id)\n\n recipients_settings = user_services.get_users_settings(recipient_ids)\n recipient_emails = [user.email for user in recipients_settings]\n\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n def _send_bulk_mail_in_transaction(instance_id=None):\n \"\"\"Sends the emails in bulk to the recipients.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_bulk_mail(\n sender_name_email, recipient_emails, email_subject,\n cleaned_plaintext_body, cleaned_html_body)\n\n if instance_id is None:\n instance_id = email_models.BulkEmailModel.get_new_id('')\n email_models.BulkEmailModel.create(\n instance_id, recipient_ids, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(\n _send_bulk_mail_in_transaction, instance_id)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def write_to_csv(list_of_emails):\n import csv\n # use newline='' to prevent double-spaced rows\n with open('emails.csv', 'w', newline='') as outFile:\n outWriter = csv.writer(outFile)\n charNum = outWriter.writerow(['email'])\n for i in list_of_emails:\n charNum = outWriter.writerow([i])\n outFile.close()", "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def send_bulk_course_email(entry_id, _xmodule_instance_args):\r\n # Translators: This is a past-tense verb that is inserted into task progress messages as {action}.\r\n action_name = ugettext_noop('emailed')\r\n visit_fcn = perform_delegate_email_batches\r\n return run_main_task(entry_id, visit_fcn, action_name)", "def send_emails():\n\n cmd = \"sendmail -f git@dev.rtsoft.ru\"\n for msg in EMAIL_MESSAGES:\n for rec in RECIPIENTS:\n call(\"echo '%s' | %s %s\" % (msg, cmd, rec), None, True)", "def sendMail(listEmailsToSend, title, data):\n if isinstance(listEmailsToSend, str):\n listEmailsToSend = [listEmailsToSend]\n send_mail(\n f'{title}',\n f'{data}',\n settings.EMAIL_HOST_USER,\n listEmailsToSend,\n fail_silently=False,\n )", "def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)", "def send_mail(month: str, data: list):\n\n V2RayLogger.debug('SMTP server: {0}:{1}.'.format(Config.get('mail_host'), Config.get('mail_port')))\n smtp = smtplib.SMTP_SSL(Config.get('mail_host'), Config.get('mail_port'))\n V2RayLogger.debug('SMTP login with: {0}:{1}.'.format(Config.get('mail_user'), Config.get('mail_pass')))\n smtp.login(Config.get('mail_user'), Config.get('mail_pass'))\n V2RayLogger.debug('SMTP login successful.')\n\n for row in data:\n V2RayLogger.debug('Send email: {0}:{1}.'.format(row[0], row[1]))\n message = '<tr align=left><th align=\"left\">{0:30s}</th><th align=\"left\">{1:9s}</th></tr>\\n'.format(\n row[0], row[1])\n message = MIMEText(message, 'html')\n message['Subject'] = Header(Config.get('mail_subject') + ': {0}'.format(month))\n message['From'] = Config.get('mail_user')\n message['To'] = row[0]\n\n smtp.sendmail(Config.get('mail_user'), row[0], message.as_string())\n V2RayLogger.info('Send traffic to: {0}.'.format(row[0]))", "def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):\n tester_name = user_services.get_username(tester_id)\n tester_email = user_services.get_email_from_user_id(tester_id)\n _send_email(\n tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,\n email_subject, email_body, tester_email, sender_name=tester_name)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n messageIds = []\n i = 0\n nextPageToken = None\n while (i <= 15):\n try:\n response = service.users().messages().list(userId='me', q='after:2016/09/01', maxResults=10000, pageToken=nextPageToken).execute()\n messages = response.get('messages')\n nextPageToken = response['nextPageToken']\n\n for m in messages:\n messageIds.append(m['id'])\n\n i+=1 \n except KeyError:\n break\n\n senders = []\n counter = 0\n for i in messageIds:\n data = service.users().messages().get(userId='me', id=i).execute()\n for d in data['payload']['headers']:\n if d['name'] == 'Received':\n print(d['value'][d['value'].find('; ')+1:d['value'].find('(PST)')])\n if d['name'] == 'From' and 'bounce' not in d['value']:\n senders.append(d['value'])\n print(counter, ' ', d['value'])\n counter += 1\n break\n\n emails = []\n with open('out.csv', 'wb') as f:\n writer = csv.writer(f, delimiter=',')\n for person in set(senders):\n cleaned = clean_data(person)\n name = cleaned[0]\n email = cleaned[1]\n if email not in emails:\n emails.append(email)\n if name != None and email != None:\n writer.writerow([name, email])", "def readInCSV(csvFile):\n\tprint \"Checking if helper app is installed...\"\n\tandroidCheckAndInstallHelper()\n\ttry:\n\t\tprint \"Will read in the files from %s\" % csvFile\n\t\tstatus = subprocess.call([\"adb\",\"shell\",\"am\",\"startservice\",\n\t\t\t\t\t\t\t\t \"-a\", \"com.synchronoss.androidDev.contactcreaterapp.action.IMPORT\",\n\t\t\t\t\t\t\t\t \"-e\", \"CSV\", csvFile,\n\t\t\t\t\t\t\t\t \"com.synchronoss.androidDev.contactcreaterapp/.CreateAndAddContacts\"],\n\t\t\t\t\t\t\t\t stdout=stdout,stderr=stderr)\n\t\tif (status == 1):\n\t\t\tprint \"Contacts successfully copied from csv on target device.\"\n\t\tif (status != 0):\n\t\t\tprint >>sys.stderr, \"Unable to launch contact adder app\"\n\t\t\tsys.exit()\n\texcept OSError as e:\n\t\tprint >>sys.stderr, \"Execution failed: \", e\n\t\tsys.exit()\n\twaitForHelperApp()", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def email_import(ctx, user_csv, group_size, group_name, section_name):\n usersChunked = dict()\n\n config_options = lazyTools.TOMLConfigCTXImport(ctx)\n\n debug = lazyTools.parentSetting(ctx, \"debug\")\n verbose = lazyTools.parentSetting(ctx, \"verbose\")\n\n if section_name.lower() in config_options[\"gophish\"]:\n\n # Debug print statement to check if the section name was properly found\n if debug:\n click.secho(\"[*] Section name found in config file.\", fg=\"green\")\n\n # Check if we need to be on the VPN\n if config_options[\"gophish\"][section_name.lower()][\"VPN_Required\"]:\n # Skip VPN check if debug is True\n if debug:\n click.secho(\"[*] Skipping VPN check \")\n else:\n if lazyTools.ConnectedToVPN(ctx.parent.parent.params[\"config_path\"]):\n # Connected to VPN\n if debug:\n click.secho(\"[*] Connected to VPN\", fg=\"green\")\n else:\n raise click.Abort(\n \"The VPN does not appear to be connected. Try again after connecting to the VPN. \"\n )\n\n # Connect to GoPhish server\n if debug:\n click.echo(\n \"[*] Using hostname: https://{hostname}:{port}\".format(\n hostname=config_options[\"gophish\"][section_name.lower()][\n \"Hostname\"\n ],\n port=config_options[\"gophish\"][section_name.lower()][\"Port\"],\n )\n )\n if config_options[\"gophish\"][section_name.lower()][\"Verify_SSL\"]:\n click.echo(\"[*] SSL connections will be verified.\")\n else:\n click.secho(\"[*] SSL connections will not be verified.\", bold=True)\n\n api = Gophish(\n config_options[\"gophish\"][section_name.lower()][\"api_key\"],\n host=\"https://{hostname}:{port}\".format(\n hostname=config_options[\"gophish\"][section_name.lower()][\"Hostname\"],\n port=config_options[\"gophish\"][section_name.lower()][\"Port\"],\n ),\n verify=config_options[\"gophish\"][section_name.lower()][\"Verify_SSL\"],\n )\n\n # Try to get list of existing groups\n try:\n groups = api.groups.get()\n except requests.exceptions.ConnectionError as e:\n click.secho(\n \"Connection to the GoPhish server failed because {e}. Check the host and try again.\".format(\n e=e\n ),\n fg=\"red\",\n )\n raise click.Abort()\n\n # Check if something went wrong. Error parsing on the part of GoPhish library needs some love.\n if isinstance(groups, Error):\n click.secho(\n \"[!] {message}. Remediate the issue and try again.\".format(\n message=groups.message\n ),\n fg=\"red\",\n bold=True,\n )\n raise click.Abort()\n\n # groups isn't an Error object, so we *should* be good to go.\n if debug:\n click.secho(\"A list of groups was successfully acquired.\", fg=\"green\")\n\n # List all users in existing groups\n for group in groups:\n # print(group.targets)\n for user in group.targets:\n pass # print(vars(user))\n # printUsersInGroup(group)\n\n # Read the CSV file with the users in it.\n with open(user_csv, \"r\", encoding=\"utf-8\") as user_csv_file:\n # dialect = csv.Sniffer().sniff(user_csv_file.read(1024))\n # print(vars(dialect))\n userReader = csv.DictReader(user_csv_file, delimiter=\",\")\n rowList = list()\n for row in userReader:\n rowList.append(row)\n\n # click.echo(tabulate(rowList, headers='keys', tablefmt=\"grid\"))\n\n # Divide the list of users into groups by group name\n # Template: <First>_<Second>_<Number\n # i.e. Phishing_Campaign_Remote_4\n\n group_name = group_name.replace(\" \", \"_\")\n group_name = group_name + \"_{}\"\n\n if group_size == 0:\n # Do not divide list of group_size is 0\n usersChunked = {group_name.format(1): rowList}\n\n else:\n chunks = [\n rowList[x : x + group_size] for x in range(0, len(rowList), group_size)\n ]\n\n for count, userListChunk in enumerate(chunks, start=1):\n usersChunked.update({group_name.format(count): userListChunk})\n\n # For each group in usersChunked, upload\n with click.progressbar(\n usersChunked,\n length=len(usersChunked),\n label=\"Groups Added\",\n show_eta=False,\n show_pos=True,\n ) as bar:\n for chunkName in bar:\n targetList = list()\n for user in usersChunked[chunkName]:\n targetList.append(\n User(\n first_name=user[\"First Name\"],\n last_name=user[\"Last Name\"],\n email=user[\"Email\"],\n position=user[\"Position\"],\n )\n )\n group = Group(name=chunkName, targets=targetList)\n\n group = api.groups.post(group)\n\n if isinstance(group, Error):\n click.secho(\n \"[!] {message}. Remediate the issue and try again.\".format(\n message=group.message\n ),\n fg=\"red\",\n bold=True,\n )\n raise click.Abort()\n\n if debug:\n click.echo(\"Group {} was successfully added.\".format(group.name))\n\n else:\n raise click.BadParameter(\n \"The section name '{}' doesn't appear to exist. Check the config file and try again.\".format(\n ctx.params[\"section_name\"]\n )\n )", "def report_mailer(accounts, days):\n account_names = _parse_accounts(accounts)\n sm_report_mailer(account_names, days)", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def shops_procurement_email_csv(request):\n\n Order.objects.all().delete()\n Product.objects.all().delete()\n\n procurements = Procurement.objects.all()\n\n if procurements:\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=procurement_%s.csv' % procurement_id\n\n for procurement in procurements:\n\n writer = csv.writer(response)\n\n writer.writerow([\n 'Vendor',\n 'Product',\n 'Variant',\n 'Quantity',\n 'Grams'])\n\n order_by_args = [\n 'product_variant__product__vendor',\n 'product_variant', ]\n procurement_items = procurement.procurementitem_set.all().order_by(*order_by_args)\n\n for procurement_item in procurement_items:\n writer.writerow([\n procurement_item.product_variant.product.vendor,\n str(procurement_item.product_variant.product),\n str(procurement_item.product_variant.option1),\n str((procurement_item.order_units) or ''),\n str((procurement_item.order_weight) or '')])\n\n return response", "def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):\r\n # Get information from current task's request:\r\n task_id = subtask_status.task_id\r\n\r\n try:\r\n course_email = CourseEmail.objects.get(id=email_id)\r\n except CourseEmail.DoesNotExist as exc:\r\n log.exception(\"Task %s: could not find email id:%s to send.\", task_id, email_id)\r\n raise\r\n\r\n # Exclude optouts (if not a retry):\r\n # Note that we don't have to do the optout logic at all if this is a retry,\r\n # because we have presumably already performed the optout logic on the first\r\n # attempt. Anyone on the to_list on a retry has already passed the filter\r\n # that existed at that time, and we don't need to keep checking for changes\r\n # in the Optout list.\r\n if subtask_status.get_retry_count() == 0:\r\n to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)\r\n subtask_status.increment(skipped=num_optout)\r\n\r\n course_title = global_email_context['course_title']\r\n subject = \"[\" + course_title + \"] \" + course_email.subject\r\n from_addr = _get_source_address(course_email.course_id, course_title)\r\n\r\n course_email_template = CourseEmailTemplate.get_template()\r\n try:\r\n connection = get_connection()\r\n connection.open()\r\n\r\n # Define context values to use in all course emails:\r\n email_context = {'name': '', 'email': ''}\r\n email_context.update(global_email_context)\r\n\r\n while to_list:\r\n # Update context with user-specific values from the user at the end of the list.\r\n # At the end of processing this user, they will be popped off of the to_list.\r\n # That way, the to_list will always contain the recipients remaining to be emailed.\r\n # This is convenient for retries, which will need to send to those who haven't\r\n # yet been emailed, but not send to those who have already been sent to.\r\n current_recipient = to_list[-1]\r\n email = current_recipient['email']\r\n email_context['email'] = email\r\n email_context['name'] = current_recipient['profile__name']\r\n\r\n # Construct message content using templates and context:\r\n plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)\r\n html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)\r\n\r\n # Create email:\r\n email_msg = EmailMultiAlternatives(\r\n subject,\r\n plaintext_msg,\r\n from_addr,\r\n [email],\r\n connection=connection\r\n )\r\n email_msg.attach_alternative(html_msg, 'text/html')\r\n\r\n # Throttle if we have gotten the rate limiter. This is not very high-tech,\r\n # but if a task has been retried for rate-limiting reasons, then we sleep\r\n # for a period of time between all emails within this task. Choice of\r\n # the value depends on the number of workers that might be sending email in\r\n # parallel, and what the SES throttle rate is.\r\n if subtask_status.retried_nomax > 0:\r\n sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)\r\n\r\n try:\r\n log.debug('Email with id %s to be sent to %s', email_id, email)\r\n\r\n with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):\r\n connection.send_messages([email_msg])\r\n\r\n except SMTPDataError as exc:\r\n # According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.\r\n if exc.smtp_code >= 400 and exc.smtp_code < 500:\r\n # This will cause the outer handler to catch the exception and retry the entire task.\r\n raise exc\r\n else:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n except SINGLE_EMAIL_FAILURE_ERRORS as exc:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n else:\r\n dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])\r\n if settings.BULK_EMAIL_LOG_SENT_EMAILS:\r\n log.info('Email with id %s sent to %s', email_id, email)\r\n else:\r\n log.debug('Email with id %s sent to %s', email_id, email)\r\n subtask_status.increment(succeeded=1)\r\n\r\n # Pop the user that was emailed off the end of the list only once they have\r\n # successfully been processed. (That way, if there were a failure that\r\n # needed to be retried, the user is still on the list.)\r\n to_list.pop()\r\n\r\n except INFINITE_RETRY_ERRORS as exc:\r\n dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_nomax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_nomax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True\r\n )\r\n\r\n except LIMITED_RETRY_ERRORS as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # Errors caught are those that indicate a temporary condition that might succeed on retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n except BULK_EMAIL_FAILURE_ERRORS as exc:\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n num_pending = len(to_list)\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with \"fatal\" exception. %d emails unsent.',\r\n task_id, email_id, num_pending)\r\n # Update counters with progress to date, counting unsent emails as failures,\r\n # and set the state to FAILURE:\r\n subtask_status.increment(failed=num_pending, state=FAILURE)\r\n return subtask_status, exc\r\n\r\n except Exception as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # These are unexpected errors. Since they might be due to a temporary condition that might\r\n # succeed on retry, we give them a retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',\r\n task_id, email_id)\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n else:\r\n # All went well. Update counters with progress to date,\r\n # and set the state to SUCCESS:\r\n subtask_status.increment(state=SUCCESS)\r\n # Successful completion is marked by an exception value of None.\r\n return subtask_status, None\r\n finally:\r\n # Clean up at the end.\r\n connection.close()", "def raw_csv_app_2w(request):\n two_weeks = datetime.date.today() - datetime.timedelta(days=14)\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'atachment; filename = \"raw-powerbi-app-2w.csv\"'\n app_er = App_error.objects.filter(event_date__gt=two_weeks)\n app_w = App_warning.objects.filter(event_date__gt=two_weeks)\n app_crit = App_critical.objects.filter(event_date__gt=two_weeks)\n writer = csv.writer(response)\n for line in app_er:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app error'])\n for line in app_w:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app warning'])\n for line in app_crit:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app critical'])\n\n return response", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def sendEmail(request, names):\n datas = ()\n i = 1\n for name in [name for name in names.split(',')]:\n # user1 = get_object_or_404(User, username='ๅพ่ถ…ไผŸ')\n # print(user1.email)\n if name:\n # print(name)\n user = get_object_or_404(User, username__exact=name)\n if not user.email:\n request.session['res'] = '0'\n # print(res)\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))\n\n message = (u'่ฟ˜ไนฆๆ็คบ', u'ไฝ ๅทฒ็ป่ถ…ๅ‡บไบ†่ฟ˜ไนฆๆœŸ้™,่ฏทๅฐฝๅฟซๅฝ’่ฟ˜ๅ›พไนฆใ€‚',\n 'LocalLibrarySystem<670736258@qq.com>', [user.email])\n datas += (message,)\n\n res = send_mass_mail(datas, fail_silently=False,)\n # print(res)\n request.session['res'] = res\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))", "def get_buyer_emails():\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n return {(crm.get_name_by_id(row[CUSTOMER_ID]), crm.get_email_by_id(row[CUSTOMER_ID])) for row in sales_data}", "def send_email_week():\n\n cars_all = Car.objects.all()\n title_list = []\n today = now()\n for car in cars_all:\n if (today.day - car.created.day) > 7:\n new_car = car.title\n title_list.append(new_car)\n\n for item in Subscriber.objects.all():\n email_adress = item.email\n data = {\n 'email': email_adress,\n 'title': title_list,\n }\n email_body = render_to_string('main/email_add_ad.html', data)\n msg = EmailMultiAlternatives(subject='ะžะฑัŒัะฒะปะตะฝะธั ะผะฐัˆะธะฝ', to=[email_adress, ])\n msg.attach_alternative(email_body, 'text/html')\n msg.send()", "def send_email(jobs):\n jobs = jobs\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login(EMAIL, PASS)\n\n subject = f\"Job Scraper Results\"\n\n if jobs != \"Not working\":\n body = []\n job_ids = [\n jobs[x] for x in sorted(jobs.keys(), key=lambda x: jobs[x][0], reverse=True)\n ][:25]\n for jobID in job_ids:\n score, link, title, company, date_posted, location, full_text = jobID\n body.append(\n f\"({score}) {title} at {company} in {location} posted \\\n {date_posted[5:11]}\\n{link}\\n... {full_text[100:500]} ...\"\n )\n if len(body) == 0:\n body = body + (\"\\nNo results.\")\n body = \"\\n\\n\\n\".join(body)\n body = body.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n msg = f\"Subject: {subject}\\n\\n{body}\"\n else:\n msg = f\"Subject: {subject} - {jobs}\\n\\n{jobs}\"\n\n msg = f\"From: {EMAIL}\\r\\nTo: {EMAIL}\\r\\n\" + msg\n\n server.sendmail(EMAIL, EMAIL, msg)\n\n timezone_ny = pytz.timezone(\"America/NEW_York\")\n datetime_ny = datetime.now(timezone_ny)\n print(f\"E-mail was sent at {datetime_ny.strftime('%H:%M')}.\\n\\n\")\n\n server.quit()", "def send_video_links():\n email_list = Emails.query.filter_by(status=\"active\").all() \n print(\"Sending newsletters to \", len(email_list), \" users\")\n random_video = get_random_video_link()\n video_link = f\"https://www.youtube.com/watch?v={random_video[1]}\"\n\n for email in email_list:\n #send email to user\n try:\n send_single_email(email.email, video_link, random_video[0])\n except Exception as e:\n print(e)\n \n\n\n print(\"DEBUG- Emails send job finished \")\n return \"Success\"", "def send_mass_mail(datatuple, fail_silently=False, auth_user=None,\n auth_password=None, connection=None):\n connection = connection or get_connection(username=auth_user,\n password=auth_password,\n fail_silently=fail_silently)\n messages = [\n EmailMessage(subject=subject, body=message, from_email=sender,\n to=[recipient])\n for subject, message, sender, recipient in datatuple]\n return connection.send_messages(messages)", "def _send_email_helper(settings, excel):\n try:\n server = smtplib.SMTP(settings.smtp_server, str(settings.smtp_port))\n server.starttls()\n server.login(settings.user,settings.password)\n dest = [str(settings.user), str(settings.dest_addr)]\n server.sendmail(settings.user, dest, Email._set_email(settings,excel).as_string())\n server.quit()\n\n FileHelper.archive(settings, excel)\n excel.clear_sheet()\n excel.gen_dates()\n Popups.email_sent()\n except Exception:\n print(\"Send email failed.\")", "def send_email(geocentric_coordinates_transformated_to_ITRF_final_list, data):\n pandas.read_json(json.dumps(geocentric_coordinates_transformated_to_ITRF_final_list)).to_excel(\n data_output + \"/\" + data['filename'] + \"_results.xlsx\")\n msg = Message('ITRF Transformations', sender=app.config['MAIL_USERNAME'], recipients=[data['email']])\n msg.body = make_email_message(data['itrf_begin'], data['epoch_begin'], data['itrf_final'], data['epoch_final'],\n data['velocity'], data['date'])\n with app.open_resource(data_output + \"/\" + data['filename'] + \"_results.xlsx\") as fp:\n file_name = data['filename'] + \"_results\"\n msg.attach(file_name + \".xlsx\", file_name + \"/xlsx\", fp.read())\n mail.send(msg)", "def apps_information(self):\n with open(self.app_data_path, 'r') as app_csv_file:\n csv_reader = csv.reader(app_csv_file)\n apps = [self.AppInformation(app[0], app[1], app[2], app[3], app[4], app[5]) for app in csv_reader]\n return apps", "def send_user_query_email(\n sender_id, recipient_ids, email_subject, email_body, email_intent):\n bulk_email_model_id = email_models.BulkEmailModel.get_new_id('')\n sender_name = user_services.get_username(sender_id)\n sender_email = user_services.get_email_from_user_id(sender_id)\n _send_bulk_mail(\n recipient_ids, sender_id, email_intent, email_subject, email_body,\n sender_email, sender_name,\n instance_id=bulk_email_model_id)\n return bulk_email_model_id", "def send_emails(recipients: List[str], availability_text: str) -> None:\n for recipient in recipients:\n try:\n # Sending the output as an email\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = \"scraper006@gmail.com\" # Enter your address\n receiver_email = recipient # Enter receiver address\n password = \"+Scraper006+\"\n\n message = f\"\"\"\\\n Subject: Time to buy!\n\n Current state of the availability: {availability_text.encode(\"utf-8\")}\n \"\"\"\n\n context = ssl.create_default_context()\n\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, message)\n except Exception as e:\n print(f\"It looks like we could not send the email to {recipient}\")\n print(f\"Error message: {e}\")", "def generate_email(mail, env):\n race, results, standings = get_last_results_and_standings()\n next_race = get_next_race()\n\n subject = f\"Race digest - F1 2021 | Round {race.round} | {race.name}\"\n body = (f\"Results:\\n{results}\\n\\nCurrent standings:\\n\"\n f\"{standings}\\n\\nNext race: {next_race}\")\n\n login_info = env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']\n\n subs = update_db_and_get_subs(mail, (env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']))\n\n for sub in subs:\n send_email(subject, body, sub, login_info)", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")", "def email_process(recipient_list: List[Client]) -> None:\n\n if recipient_list:\n send_email(recipient_list)\n update_only_emailed_clients(recipient_list)\n remove_fully_contacted_clients()\n else:\n print(\"No emails were sent.\")", "def delete_tweets_from_csv(csv):\n tweets_id = get_csv_ids(csv)\n\n config = open_config()\n\n timestamps = get_delete_timestamps(config)\n\n api = get_api()\n\n delete_tweets_by_id(api, tweets_id, timestamps[2])\n\n logger.info('done from csv')", "def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)", "def send_messages(self, email_messages):\n if not self.connection:\n self.open()\n\n for message in email_messages:\n self.connection.send_raw_email(\n source=message.from_email,\n destinations=message.recipients(),\n raw_message=message.message().as_string())", "def send_emails(emails, author, title):\n subject = 'New post by %s' % author.capitalize()\n message = '%s wrote a new post with the title: %s' % (author.capitalize(), title)\n print('Sending emails to ', emails)\n send_mails_count = send_mail(\n subject=subject,\n message=message,\n from_email=EMAIL_HOST_USER,\n recipient_list=emails\n )\n print('Successfully sent %s - letters' % send_mails_count)", "def add_recipients(df, all_emails):\n user = df[\"sender\"].iloc[0] # ID of the user\n emails = all_emails[user]\n df[\"emails\"] = str(list(emails))\n df[\"emails\"] = df[\"emails\"].map(literal_eval)\n return df", "def main():\n with open('csv_files/products.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" description{}\".format(str(i)),\n \" type{}\".format(str(i)),\n \" {}\".format(str(random.randint(1, 100)))])\n\n with open('csv_files/customers.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" first_name{}\".format(str(i)),\n \" last_name{}\".format(str(i)),\n \" address{}\".format(str(i)),\n \" phone_number{}\".format(str(i)),\n \" email{}\".format(str(i))])", "def main(\n out_dir, emb_path, csv_separator, quoting, evaluators):\n evaluators = evaluators.split(\",\")\n scheduler = Scheduler(\n out_dir, emb_path, csv_separator, quoting)\n scheduler.add_tasks(*evaluators)\n scheduler.run()", "def export_to(short_name):\r\n (app, owner, n_tasks, n_task_runs,\r\n overall_progress, last_activity) = app_by_shortname(short_name)\r\n title = app_title(app, gettext(\"Export\"))\r\n loading_text = gettext(\"Exporting data..., this may take a while\")\r\n\r\n try:\r\n require.app.read(app)\r\n except HTTPException:\r\n if app.hidden:\r\n raise abort(403)\r\n else: # pragma: no cover\r\n raise\r\n\r\n def respond():\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n app=app,\r\n owner=owner)\r\n\r\n def gen_json(table):\r\n n = db.session.query(table)\\\r\n .filter_by(app_id=app.id).count()\r\n sep = \", \"\r\n yield \"[\"\r\n for i, tr in enumerate(db.session.query(table)\r\n .filter_by(app_id=app.id).yield_per(1), 1):\r\n item = json.dumps(tr.dictize())\r\n if (i == n):\r\n sep = \"\"\r\n yield item + sep\r\n yield \"]\"\r\n\r\n def format_csv_properly(row):\r\n keys = sorted(row.keys())\r\n values = []\r\n for k in keys:\r\n values.append(row[k])\r\n return values\r\n\r\n\r\n def handle_task(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def handle_task_run(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def get_csv(out, writer, table, handle_row):\r\n for tr in db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .yield_per(1):\r\n handle_row(writer, tr)\r\n yield out.getvalue()\r\n\r\n def respond_json(ty):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n try:\r\n table = tables[ty]\r\n except KeyError:\r\n return abort(404)\r\n return Response(gen_json(table), mimetype='application/json')\r\n\r\n def create_ckan_datastore(ckan, table, package_id):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n new_resource = ckan.resource_create(name=table,\r\n package_id=package_id)\r\n ckan.datastore_create(name=table,\r\n resource_id=new_resource['result']['id'])\r\n ckan.datastore_upsert(name=table,\r\n records=gen_json(tables[table]),\r\n resource_id=new_resource['result']['id'])\r\n\r\n def respond_ckan(ty):\r\n # First check if there is a package (dataset) in CKAN\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n msg_1 = gettext(\"Data exported to \")\r\n msg = msg_1 + \"%s ...\" % current_app.config['CKAN_URL']\r\n ckan = Ckan(url=current_app.config['CKAN_URL'],\r\n api_key=current_user.ckan_api)\r\n app_url = url_for('.details', short_name=app.short_name, _external=True)\r\n\r\n try:\r\n package, e = ckan.package_exists(name=app.short_name)\r\n if e:\r\n raise e\r\n if package:\r\n # Update the package\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_update(app=app, user=owner, url=app_url,\r\n resources=package['resources'])\r\n\r\n ckan.package = package\r\n resource_found = False\r\n for r in package['resources']:\r\n if r['name'] == ty:\r\n ckan.datastore_delete(name=ty, resource_id=r['id'])\r\n ckan.datastore_create(name=ty, resource_id=r['id'])\r\n ckan.datastore_upsert(name=ty,\r\n records=gen_json(tables[ty]),\r\n resource_id=r['id'])\r\n resource_found = True\r\n break\r\n if not resource_found:\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n else:\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_create(app=app, user=owner, url=app_url)\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n #new_resource = ckan.resource_create(name=ty,\r\n # package_id=package['id'])\r\n #ckan.datastore_create(name=ty,\r\n # resource_id=new_resource['result']['id'])\r\n #ckan.datastore_upsert(name=ty,\r\n # records=gen_json(tables[ty]),\r\n # resource_id=new_resource['result']['id'])\r\n flash(msg, 'success')\r\n return respond()\r\n except requests.exceptions.ConnectionError:\r\n msg = \"CKAN server seems to be down, try again layer or contact the CKAN admins\"\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n except Exception as inst:\r\n if len(inst.args) == 3:\r\n t, msg, status_code = inst.args\r\n msg = (\"Error: %s with status code: %s\" % (t, status_code))\r\n else: # pragma: no cover\r\n msg = (\"Error: %s\" % inst.args[0])\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n finally:\r\n return respond()\r\n\r\n def respond_csv(ty):\r\n # Export Task(/Runs) to CSV\r\n types = {\r\n \"task\": (\r\n model.task.Task, handle_task,\r\n (lambda x: True),\r\n gettext(\r\n \"Oops, the application does not have tasks to \\\r\n export, if you are the owner add some tasks\")),\r\n \"task_run\": (\r\n model.task_run.TaskRun, handle_task_run,\r\n (lambda x: type(x.info) == dict),\r\n gettext(\r\n \"Oops, there are no Task Runs yet to export, invite \\\r\n some users to participate\"))}\r\n try:\r\n table, handle_row, test, msg = types[ty]\r\n except KeyError:\r\n return abort(404)\r\n\r\n out = StringIO()\r\n writer = UnicodeWriter(out)\r\n t = db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .first()\r\n if t is not None:\r\n if test(t):\r\n writer.writerow(sorted(t.info.keys()))\r\n\r\n return Response(get_csv(out, writer, table, handle_row),\r\n mimetype='text/csv')\r\n else:\r\n flash(msg, 'info')\r\n return respond()\r\n\r\n export_formats = [\"json\", \"csv\"]\r\n if current_user.is_authenticated():\r\n if current_user.ckan_api:\r\n export_formats.append('ckan')\r\n\r\n ty = request.args.get('type')\r\n fmt = request.args.get('format')\r\n if not (fmt and ty):\r\n if len(request.args) >= 1:\r\n abort(404)\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n ckan_name=current_app.config.get('CKAN_NAME'),\r\n app=app,\r\n owner=owner)\r\n if fmt not in export_formats:\r\n abort(415)\r\n return {\"json\": respond_json, \"csv\": respond_csv, 'ckan': respond_ckan}[fmt](ty)", "def invite_site_users(users):\n #group(run_cron.s(item) for item in sites).delay()\n pass", "def sendMail(self):\n content = '<table><thead><tr><th>IP</th><th>DOWN</th></tr></thead><tbody>'\n for f in self.failedList:\n content += '<tr><td style=\"padding: 8px;line-height: 20px;vertical-align: top;border-top: 1px solid #ddd;\">'\n content += f + '</td>'\n content += '<td style=\"color: red;padding: 8px;line-height: 20px;vertical-align: top;border-top: 1px solid #ddd;\">yes</td>'\n content += '</tbody></table>'\n mailConfig = settings.get_mail()\n sendemail.send(mailConfig.get('FromAddr'), mailConfig.get('ToAddr'), mailConfig.get('SMTPServer'), content)", "def _send_mails(course_event, attendee, title,\n organisation, amount, is_test=False):\n\n if is_test:\n\n send_mail(\n '[GISMentors-kurzy] {} {}'.format(title, course_event.date),\n \"\"\"\n Kurz: {}\n รšฤastnรญk: {}\n E-mail: {}\n Organizace: {}\n Celkem registrovanรฝch รบฤastnรญkลฏ: {}\n Celkem penฤ›z (bez DPH): {}\n \"\"\".format(\n title,\n attendee.name,\n attendee.email,\n organisation,\n len(course_event.courseattendee_set.all()),\n course_event.suma_netto\n ),\n 'info@gismentors.cz',\n [settings.TEST_MAIL],\n fail_silently=True,\n )\n\n else:\n\n send_mail(\n '[GISMentors-kurzy] {} {}'.format(title, course_event.date),\n \"\"\"\n Kurz: {}\n รšฤastnรญk: {}\n E-mail: {}\n Organizace: {}\n Celkem registrovanรฝch รบฤastnรญkลฏ: {}\n Celkem penฤ›z (bez DPH): {}\n \"\"\".format(\n title,\n attendee.name,\n attendee.email,\n organisation,\n len(course_event.courseattendee_set.all()),\n course_event.suma_netto\n ),\n 'info@gismentors.cz',\n [settings.INFO_MAIL],\n fail_silently=True,\n )\n\n send_mail(\n '[GISMentors-kurzy] Potvrzenรญ pล™ihlรกลกky',\n render_to_string('potvrzeni.txt', {\n 'name': attendee.name,\n \"title\": title,\n \"date\": course_event.date,\n \"amount\": int(amount)\n }),\n 'info@gismentors.cz',\n [attendee.email],\n fail_silently=True,\n )", "def sendEmail(message):\n message_string = '\\n'.join(message)\n recipients = ['nadavo@campus.technion.ac.il', 'olegzendel@campus.technion.ac.il']\n msg = EmailMessage()\n msg['Subject'] = 'Finished training and predicting MEMM'\n msg['From'] = 'someserver@technion.ac.il'\n msg['To'] = ', '.join(recipients)\n msg.set_content(message_string)\n sender = SMTP('localhost')\n sender.send_message(msg)\n sender.quit()", "def send_email(subject: str, to_email_list: list, body: str):\n from_email = settings.DEFAULT_FROM_EMAIL\n mailer(\n subject=subject,\n message=body,\n from_email=from_email,\n recipient_list=to_email_list,\n fail_silently=True\n )", "def send_mail_to_onboard_new_reviewers(user_id, category):\n\n email_subject = 'Invitation to review suggestions'\n\n email_body_template = (\n 'Hi %s,<br><br>'\n 'Thank you for actively contributing high-quality suggestions for '\n 'Oppia\\'s lessons in %s, and for helping to make these lessons better '\n 'for students around the world!<br><br>'\n 'In recognition of your contributions, we would like to invite you to '\n 'become one of Oppia\\'s reviewers. As a reviewer, you will be able to '\n 'review suggestions in %s, and contribute to helping ensure that any '\n 'edits made to lessons preserve the lessons\\' quality and are '\n 'beneficial for students.<br><br>'\n 'If you\\'d like to help out as a reviewer, please visit your '\n '<a href=\"https://www.oppia.org/creator_dashboard/\">dashboard</a>. '\n 'and set your review preferences accordingly. Note that, if you accept,'\n 'you will receive occasional emails inviting you to review incoming '\n 'suggestions by others.<br><br>'\n 'Again, thank you for your contributions to the Oppia community!<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n recipient_user_settings = user_services.get_user_settings(user_id)\n can_user_receive_email = user_services.get_email_preferences(\n user_id).can_receive_email_updates\n\n if can_user_receive_email:\n # Send email only if recipient wants to receive.\n email_body = email_body_template % (\n recipient_user_settings.username, category, category,\n EMAIL_FOOTER.value)\n _send_email(\n user_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_ONBOARD_REVIEWER,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def send_email(subject, message, recipient_list, from_email=None,\n fail_silently=False, connection=None):\n if not from_email:\n from_email = _s('SERVER_EMAIL') or _s('DEFAULT_FROM_EMAIL')\n try:\n subj = unicode(subject)\n except UnicodeDecodeError:\n subj = subject.decode('utf8')\n datatuple = [(subj, message, from_email, [recipient],) \\\n for recipient in recipient_list]\n send_mass_mail(datatuple)", "def send_appointment_notifications(days=7):\n start = datetime.date.today()\n end = start + datetime.timedelta(days=days)\n blacklist = [Notification.STATUS_SENT, Notification.STATUS_CONFIRMED, Notification.STATUS_MANUAL]\n appts = Appointment.objects.filter(\n # Join subscriptions that haven't ended\n Q(Q(subscription__connection__timelines__end__gte=now()) | Q(subscription__connection__timelines__end__isnull=True)),\n subscription__connection__timelines__timeline=F('milestone__timeline'),\n # Filter appointments in range\n date__range=(start, end),\n ).exclude(notifications__status__in=blacklist)\n for appt in appts:\n msg = APPT_REMINDER % {'date': appt.date}\n send(msg, appt.subscription.connection)\n Notification.objects.create(appointment=appt,\n status=Notification.STATUS_SENT,\n sent=now(),\n message=msg)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def send_timesheet(\n attachment_paths: list, timesheet_month: datetime, mode: str, po=None\n ):\n username = \"apikey\"\n password = os.environ[\"SENDGRID_API_KEY\"]\n smtp_server = \"smtp.sendgrid.net\"\n\n message = EmailMessage()\n\n # Changes the subject and body depending on the mode\n email_subject = (\n f\"Autogenerated Timesheet {timesheet_month.strftime('%B %Y')}\"\n if mode == \"month\"\n else f\"Autogenerated Timesheet PO {str(po)}\"\n )\n email_body = (\n f\"See attachments for autogenerated timesheets for {timesheet_month.strftime('%B %Y')}.\"\n if mode == \"month\"\n else f\"See attachments for autogenerated timesheets for PO {str(po)} with end time set at {timesheet_month.strftime('%d %B %Y')}.\"\n )\n\n message[\"Subject\"] = email_subject\n message[\"From\"] = os.environ[\"SENDER\"]\n message[\"To\"] = os.environ[\"RECEIVER\"]\n message.set_content(email_body)\n\n # Attach all the files in attachment_paths\n for file in attachment_paths:\n with open(file, \"rb\") as f:\n file_data = f.read()\n file_name = os.path.basename(f.name)\n\n message.add_attachment(\n file_data,\n maintype=\"application\",\n subtype=\"octet-stream\",\n filename=file_name,\n )\n\n print(f\"Sending timesheet to {os.environ['RECEIVER']}\")\n with smtplib.SMTP_SSL(smtp_server, 465) as smtp:\n smtp.login(username, password)\n smtp.send_message(message)\n print(f\"Timesheet sent to {os.environ['RECEIVER']}\")", "def main3():\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #use DictReader method from csv module\r\n csv_reader = csv.DictReader(csvfile1)\r\n #read the lines\r\n for line in csv_reader:\r\n print(line['email'])", "def send_all(messages: List[Message], smtp_url: str) -> None:\n with smtplib.SMTP(smtp_url) as smtp:\n for message in messages:\n smtp.send_message(message.as_mime())", "def send_msg_scheduled_events():\n \n contact_all = Contact.objects.all()\n scheduled_events_all = ScheduledEvent.objects.all()\n\n connections_to_send = Connection.objects.none()\n\n for event in scheduled_events_all:\n connections_to_send = Connection.objects.none()\n for contact in contact_all:\n if (event.event_date - contact.date_of_birth).days >= event.days:\n contact_conn = Connection.objects.filter(contact=contact)\n connections_to_send = connections_to_send | contact_conn\n\n for conn in connections_to_send:\n send(event.msg_to_send, conn)", "def process(self, send_now=False):\n\t\tfinal_recipients = self.final_recipients()\n\t\tqueue_separately = (final_recipients and self.queue_separately) or len(final_recipients) > 20\n\t\tif not (final_recipients + self.final_cc()):\n\t\t\treturn []\n\n\t\tqueue_data = self.as_dict(include_recipients=False)\n\t\tif not queue_data:\n\t\t\treturn []\n\n\t\tif not queue_separately:\n\t\t\trecipients = list(set(final_recipients + self.final_cc() + self.bcc))\n\t\t\tq = EmailQueue.new({**queue_data, **{\"recipients\": recipients}}, ignore_permissions=True)\n\t\t\tsend_now and q.send()\n\t\telse:\n\t\t\tif send_now and len(final_recipients) >= 1000:\n\t\t\t\t# force queueing if there are too many recipients to avoid timeouts\n\t\t\t\tsend_now = False\n\t\t\tfor recipients in frappe.utils.create_batch(final_recipients, 1000):\n\t\t\t\tfrappe.enqueue(\n\t\t\t\t\tself.send_emails,\n\t\t\t\t\tqueue_data=queue_data,\n\t\t\t\t\tfinal_recipients=recipients,\n\t\t\t\t\tjob_name=frappe.utils.get_job_name(\n\t\t\t\t\t\t\"send_bulk_emails_for\", self.reference_doctype, self.reference_name\n\t\t\t\t\t),\n\t\t\t\t\tnow=frappe.flags.in_test or send_now,\n\t\t\t\t\tqueue=\"long\",\n\t\t\t\t)", "def import_csv_data(cr, registry):\n files = ['data/sc.info.csv']\n for file in files:\n tools.convert_file(cr, 'prospects_app', file, None,\n mode='init', noupdate=True, kind='init')", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def export_csv_with_filters():\n if request.vars.all_selected:\n # retrieve all candidates by using filters provided\n search_results = search_candidates(auth.user.domainId,\n request.vars.filters,\n search_limit=0,\n candidate_ids_only=True)\n candidate_ids = search_results['candidate_ids']\n\n # remove deselected candidates\n if request.vars.deselected_ids:\n candidate_ids = list(set(candidate_ids) - set(request.vars.deselected_ids))\n else:\n candidate_ids = request.vars.selected_ids\n\n if not candidate_ids:\n response.status = 400\n return dict(message=\"Nothing to export.\")\n\n queue_task('_export_csv', function_vars=dict(candidate_ids=candidate_ids, user_id=auth.user.id))\n return dict(\n message=\"An email will be sent to %s with the export data.\" % auth.user.email\n )", "def mail_activity(self, list_pc, t_min = 2, t_max = 5, sender = \"bob\", passwd = \"alice\", receiver = \"bob\"):\n for pc in list_pc:\n container = pc[\"properties\"][\"container_id\"]\n self.dm.copy_to_docker(\"./config_files/client/requests_mail.sh\", container)\n self.dm.copy_to_docker(\"./config_files/client/kill_mail.sh\", container)\n self.dm.copy_to_docker(\"./config_files/client/template_mail.txt\", container)\n self.dm.exec_to_docker(container, \"ash requests_mail.sh \"+str(t_min)+\" \"+str(t_max)+\" \"+sender+\" \"+str(passwd)+\" \"+receiver,isdetach=True)\n pass", "def csv_maker(ctx, output_file):\n ### Plan\n\n ### Configuration\n # Check if campaign_info is not None\n ## If not None\n ### Process the data\n ## Else:\n ### Get data\n ### Process the data\n\n #### Get the data\n # Authenticate to the GoPhish server\n ## Capture auth failures\n # Request campaign data\n # Parse returned data into buckets\n ## Capture bad campaign data\n\n\n if ctx.campaign_info is None: # Command is not chained together, get our own data\n gophish_inst = GoPhish(ctx.api_key, ctx.host, ctx.port, verify=False)\n\n campaign_info = gophish_inst.get_campaigns(ctx.campaign_number)\n\n ctx.campaign_info = campaign_info\n else:\n campaign_info = ctx.campaign_info\n\n # Dict of final values per email\n final_email_dict = dict()\n\n headers = ['Email Address', 'Time Clicked', 'Credentials Harvested', 'Reported', 'Replied to Email', 'Notes']\n\n\n\n for i in campaign_info['timeline']:\n if i['message'] != 'Campaign Created': # and len(i['details']) > 0:\n row = build_row(i)\n # Update file dictionary\n final_email_dict[row['Email Address']] = row\n\n with open(output_file, 'w') as f:\n writer = csv.DictWriter(f, headers)\n writer.writeheader()\n for email in final_email_dict:\n writer.writerow(final_email_dict[email])", "def process_multiple_files(self, filepaths, email_col='EMAIL',min_size=100, threshold=0.05):\r\n new_paths = []\r\n for f in filepaths:\r\n df = pd.read_csv(f)\r\n df = self.pre_process_frame(df, col=email_col)\r\n orig_size = df.index.size\r\n FLAG = True\r\n if orig_size < min_size:\r\n pass\r\n else:\r\n print(\"Cleaning {}\".format(f))\r\n \r\n try: # Try to first do an easy match with results directly from the database. (Very fast compared to API calls)\r\n self.count_matching_emails(df, col=email_col, verify_integrity=True, thresh=threshold)\r\n except Exception as e:\r\n \r\n print(\"{}\\n Calling missing emails from remote server.\".format(e))\r\n df = self.deep_clean_frame(df,dealno=0,clean_col=email_col) # The long way - calling the API.\r\n \r\n try:\r\n self.deep_processing_rerun(dealno=0,thresh=0.05,max_tries=5) # Handling records stuck in processing.\r\n count = self.count_matching_emails(df, col=email_col, verify_integrity=True, thresh=threshold)\r\n print(\"Successfully matched {} records\".format(count))\r\n except Exception as e:\r\n \r\n FLAG = False # Stop this from finalizing...too many records stuck in processing/not in database...somethings wrong.\r\n print(\"Failed to reprocess some records for {}\\n Error: {}\".format(f,e))\r\n \r\n if FLAG:\r\n df = self.suppress_email_frame(df, col=email_col, clean_type=1)\r\n new_path = os.path.splitext(f) + \"ListWised.csv\"\r\n df.to_csv(new_path, index=False)\r\n new_paths.append(new_path)\r\n \r\n self.deep_processing_rerun_all() # Wraps up making one last try at rerunning any emails stuck in processing (for next time).\r\n return new_paths", "def raw_csv_app_2w_scales(request):\n two_weeks = datetime.date.today() - datetime.timedelta(days=14)\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'atachment; filename = \"raw-powerbi-app-2w_sc.csv\"'\n app_er = App_error.objects.filter(event_date__gt=two_weeks).filter(machine_name__in=scales_ws)\n app_w = App_warning.objects.filter(event_date__gt=two_weeks).filter(machine_name__in=scales_ws)\n app_crit = App_critical.objects.filter(event_date__gt=two_weeks).filter(machine_name__in=scales_ws)\n writer = csv.writer(response)\n for line in app_er:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app error'])\n for line in app_w:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app warning'])\n for line in app_crit:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app critical'])\n\n return response", "def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def mail_participants(self, template_type=\"join\"):\n addrs = [p.email for p in self.participants.all()] + [self.host.email]\n\n with mail.get_connection() as connection:\n with translation.override(self.language):\n for addr in addrs:\n email = MailTemplate.get_mail(\n type=template_type,\n context={\"event\": self},\n to_email=addr,\n connection=connection,\n )\n if email:\n email.send(fail_silently=True)\n\n self.mails_sent = True\n self.save()", "def send_email(self, froma, addrs, message=\"\"):\n fd = self._stdout\n fd.write(\"To \")\n fd.write(\" \".join(addrs))\n fd.write(\"\\n\")\n fd.write(\"From \"+froma)\n fd.write(\"\\n\")\n fd.write(message)", "def test_38_bulk_csv_import_with_column_name(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,priority_0\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n task = db.session.query(Task).first()\r\n assert {u'Bar': u'2', u'Foo': u'1'} == task.info\r\n assert task.priority_0 == 3\r\n assert \"1 Task imported successfully!\" in res.data\r\n\r\n # Check that only new items are imported\r\n empty_file = FakeRequest('Foo,Bar,priority_0\\n1,2,3\\n4,5,6', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n app = db.session.query(App).first()\r\n assert len(app.tasks) == 2, \"There should be only 2 tasks\"\r\n n = 0\r\n csv_tasks = [{u'Foo': u'1', u'Bar': u'2'}, {u'Foo': u'4', u'Bar': u'5'}]\r\n for t in app.tasks:\r\n assert t.info == csv_tasks[n], \"The task info should be the same\"\r\n n += 1", "async def deliver(self, messages: EmailMessage | Iterable[EmailMessage]) -> None:", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def test_53_export_task_runs_csv(self):\r\n Fixtures.create()\r\n # First test for a non-existant app\r\n uri = '/app/somethingnotexists/tasks/export'\r\n res = self.app.get(uri, follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n # Now get the tasks in CSV format\r\n uri = \"/app/somethingnotexists/tasks/export?type=tas&format=csv\"\r\n res = self.app.get(uri, follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n\r\n # Now with a real app\r\n uri = '/app/%s/tasks/export' % Fixtures.app_short_name\r\n res = self.app.get(uri, follow_redirects=True)\r\n heading = \"<strong>%s</strong>: Export All Tasks and Task Runs\" % Fixtures.app_name\r\n assert heading in res.data, \"Export page should be available\\n %s\" % res.data\r\n # Now get the tasks in CSV format\r\n uri = \"/app/%s/tasks/export?type=task_run&format=csv\" % Fixtures.app_short_name\r\n res = self.app.get(uri, follow_redirects=True)\r\n csv_content = StringIO.StringIO(res.data)\r\n csvreader = unicode_csv_reader(csv_content)\r\n app = db.session.query(App)\\\r\n .filter_by(short_name=Fixtures.app_short_name)\\\r\n .first()\r\n exported_task_runs = []\r\n n = 0\r\n for row in csvreader:\r\n if n != 0:\r\n exported_task_runs.append(row)\r\n n = n + 1\r\n err_msg = \"The number of exported task runs is different \\\r\n from App Tasks Runs\"\r\n assert len(exported_task_runs) == len(app.task_runs), err_msg", "def test_send_to_all(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertEquals(len(mail.outbox), 1 + len(self.staff) + len(self.students))\r\n self.assertItemsEqual(\r\n [e.to[0] for e in mail.outbox],\r\n [self.instructor.email] + [s.email for s in self.staff] + [s.email for s in self.students]\r\n )", "def send_unsent_scheduled_emails():\n\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(\n scheduled__lte=current_time,\n sent__isnull=True\n ).select_related(\n 'event'\n ).prefetch_related(\n 'recipients'\n )\n\n # Fetch the contexts of every event so that they may be rendered\n context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])\n\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(\n to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address(),\n subject=email.subject or extract_email_subject_from_html_content(html_message),\n text=text_message,\n html=html_message,\n )\n emails.append(message)\n\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)", "def csv_import():\n activities = current_user.get_supervised_activities()\n if activities == []:\n flash(\"Fonction non autorisรฉe.\", \"error\")\n return redirect(url_for(\"event.index\"))\n\n choices = [(str(a.id), a.name) for a in activities]\n form = CSVForm(choices)\n\n if not form.is_submitted():\n form.description.data = current_app.config[\"DESCRIPTION_TEMPLATE\"]\n\n failed = []\n if form.validate_on_submit():\n activity_type = ActivityType.query.get(form.type.data)\n\n file = form.csv_file.data\n processed, failed = process_stream(\n file.stream, activity_type, form.description.data\n )\n\n flash(\n f\"Importation de {processed-len(failed)} รฉlรฉments sur {processed}\",\n \"message\",\n )\n\n return render_template(\n \"import_csv.html\",\n form=form,\n failed=failed,\n title=\"Crรฉation d'event par CSV\",\n )", "def notify_students():\n time_now = datetime.datetime.now(get_localzone())\n emails_to_send = Email.objects.all()\n for email in emails_to_send:\n if email.assignment.date_assigned <= time_now:\n send_mail(subject=email.subject,\n message=email.message,\n recipient_list=Student.objects.filter(assignments=email.assignment),\n from_email=None,\n fail_silently=False)\n email.delete()", "def send_mail(email):\n return email.send()", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()", "def bulk_convert_events_to_emails():\n\n # Get the email medium\n email_medium = get_medium()\n\n # Get the default from email\n default_from_email = get_from_email_address()\n\n email_params_list = []\n\n # Find any unseen events and create unsent email objects\n for event, targets in email_medium.events_targets(seen=False, mark_seen=True):\n\n # Check the event's context for a from_address, otherwise fallback to default\n from_address = event.context.get('from_address') or default_from_email\n\n email_params_list.append(dict(\n event=event,\n from_address=from_address,\n recipients=targets\n ))\n\n # Bulk create the emails\n Email.objects.create_emails(email_params_list)", "def execute_event(self):\n # step1: load the app_list csv file\n try:\n with open(self._applist_file_input.get(), 'r', encoding='utf-8') as \\\n raw_data_file:\n\n reader = csv.reader(raw_data_file)\n rows = []\n for row in reader:\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n rows.append(row)\n\n except FileNotFoundError:\n tk.messagebox.showwarning('warning', 'invalid import path / file '\n 'not found')\n sys.exit(1)\n # note: for debug, just comment the following except block\n except:\n tk.messagebox.showinfo('warning', 'import file invalid data format')\n sys.exit(2)\n\n\n # step2: do the count and sum operation with the search terms and the\n # app_list\n output_rows = self.search_term_counter(self._search_terms_input.get(),\n rows)\n\n # step3: if the client wants to translate the pkg to app name\n try:\n pkg_app_dict= self.pkg_appname_file_process(self._pkg_app_input.get())\n # now write the result to the output file\n output_rows = self.pkg_app_translator(pkg_app_dict, output_rows)\n\n try:\n with open(self._export_path_input.get(), 'w',\n encoding='utf-8-sig') as output_data_file:\n writer = csv.writer(output_data_file, dialect='excel')\n writer.writerows(output_rows)\n tk.messagebox.showinfo('message',\n 'job done! have a nice day!')\n\n except FileNotFoundError:\n tk.messagebox.showwarning('warning',\n 'invalid output file path / '\n 'file not found')\n sys.exit(5)\n # note: for debug, just comment the following except block\n except:\n tk.messagebox.showinfo('warning',\n 'output file invalid file type')\n sys.exit(6)\n\n except:\n try:\n with open(self._export_path_input.get(), 'w',\n encoding='utf-8-sig') as output_data_file:\n writer = csv.writer(output_data_file, dialect='excel')\n writer.writerows(output_rows)\n tk.messagebox.showinfo('message',\n 'job done! have a nice day!')\n except FileNotFoundError:\n tk.messagebox.showwarning('warning',\n 'invalid output file path / '\n 'file not found')\n sys.exit(7)\n # note: for debug, just comment the following except block\n except:\n tk.messagebox.showinfo('warning',\n 'output file invalid file type')\n sys.exit(8)", "def email_admins(subject, message):\n mail_admins(subject, message=message)", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)", "def get_influencer_csv(csv):\n\n df = pd.read_csv(csv)\n df = df[(df.Followers > MIN_FOLLOWERS) & (df.Followers < MAX_FOLLOWERS)]\n df = df.dropna(subset=['Email'])\n\n csv_name = csv.replace('.csv', '') + '_influencers.csv'\n df.to_csv(csv_name, index=False)", "def send_assignee_emails(self):\n\n assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features\n assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n\n for assignee in assignees:\n assignee_issues = [] # List of IssueClass objects\n # Get all stalled New feature issues for this assignee\n for item in self.stalled_nf_issues + self.stalled_st_issues:\n if item.assignee == assignee:\n# if item.assignee == \"ashih\":\n assignee_issues.append(item)\n assignee_email = item.assignee_email\n \n if len(assignee_issues):\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_time_in_status_rows(assignee_issues)\n html_table += '</table>' # Closing table tag\n #recipients.append(assignee_email)\n print \"Sending email to: %s\" % recipients\n self.send_email(recipients, html_table, assignee)", "def process_domains(self, save_path=None):\r\n emails = self.db.read_sql(\"SELECT * FROM emails\")\r\n emails.loc[:, email2] = emails.loc[:, email].apply(self.parse_email) \r\n emails.loc[:, DOMAIN] = emails.loc[:, email2].apply(self.get_domain)\r\n emails.drop_duplicates([DOMAIN], inplace=True)\r\n if save_path:\r\n emails.to_csv(save_path, index=False)\r\n emails.loc[:,DOMAIN].to_sql(DOMAINS, self.db.con, if_exists='append', index=False)", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <test_using_invite_use_host_in_from_email@example.com>\")", "def send_invitations(self):\n operator = self.operator_class(self)\n for script, application in [('', self.app)] + self.mounts.items():\n base_url = (self.base_url or '/'.rstrip('/')) + script\n environ = create_environ(path=self.invite_path, base_url=base_url)\n environ[self.partyline_key] = operator\n run_wsgi_app(application, environ)", "def lambda_handler(\n event, context, make_network_requests=True\n): # pylint: disable=unused-argument\n\n global AWS_REGION, EMAIL_FROM # pylint: disable=global-statement\n global EMAIL_TO, JOURNEY_ORIGIN, VIA # pylint: disable=global-statement\n global JOURNEY_DESTINATION # pylint: disable=global-statement\n global DATES_TO_QUERY # pylint: disable=global-statement\n\n AWS_REGION = event[\"AWS_REGION\"]\n EMAIL_FROM = event[\"EMAIL_FROM\"]\n EMAIL_TO = event[\"EMAIL_TO\"]\n JOURNEY_ORIGIN = event[\"JOURNEY_ORIGIN\"]\n VIA = event[\"VIA\"]\n JOURNEY_DESTINATION = event[\"JOURNEY_DESTINATION\"]\n DATES_TO_QUERY = event[\"DATES_TO_QUERY\"]\n\n csv_dict = []\n\n dates = get_dates(DATES_TO_QUERY, start_date=datetime.date.today())\n\n for date in dates:\n\n query_data_object = {\n \"date\": date,\n \"csv_dict\": csv_dict,\n \"make_network_requests\": make_network_requests,\n }\n\n csv_dict = run_query(\n query_data_object,\n origin=JOURNEY_ORIGIN,\n destination=JOURNEY_DESTINATION,\n )\n\n csv_dict = run_query(\n query_data_object,\n origin=JOURNEY_DESTINATION,\n destination=JOURNEY_ORIGIN,\n )\n\n csv_email_content = get_csv_email_content(csv_dict)\n\n send_email(csv_email_content, make_network_requests)\n\n return {\"statusCode\": 200, \"body\": json.dumps(csv_email_content)}", "def send_email(recent_seach_tweets):\n # Create e-mail message\n msg = C.intro + C.message_init.format(number_flash_cards=len(recent_seach_tweets))\n # Add a special text for the first 3 new tweets \n for tw in recent_seach_tweets[:3]:\n date = tw['date'].strftime('Le %d/%m/%Y ร  %H:%M')\n link_tweet = tw['tweet_link']\n link_picture = tw['images'][0]\n tweet_text = tw[\"text\"][:tw[\"text\"].index('#')-1]\n msg += C.flashcard.format(date=date, \n link_picture=link_picture, \n tweet_text=tweet_text,\n tweet_link=link_tweet)\n\n # mapping for the subject\n numbers = { 0 : 'zero', 1 : 'one', 2 : 'two', 3 : 'three', 4 : 'four', 5 : 'five',\n 6 : 'six', 7 : 'seven', 8 : 'eight', 9 : 'nine', 10 : 'ten'}\n\n message = MIMEText(msg, 'html')\n message['From'] = formataddr((str(Header('Twitter Parser', 'utf-8')), C.my_email_address))\n message['To'] = C.dest\n message['Subject'] = '%s new Machine Learning Flash Cards' % numbers[len(recent_seach_tweets)].title()\n msg_full = message.as_string()\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(C.my_email_address, C.password)\n server.sendmail(C.my_email_address, C.dest, msg_full)\n server.quit()\n print('%s - E-mail sent!' % datetime.datetime.now().strftime('%d/%m/%Y - %H:%M'))", "def execute_event(self):\n # step1: load the app_list csv file\n try:\n with open(self._applist_file_input.get(), 'r', encoding='utf-8') as \\\n raw_data_file:\n\n reader = csv.reader(raw_data_file)\n rows = []\n for row in reader:\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n rows.append(row)\n\n except Exception as e:\n tk.messagebox.showerror('error', e)\n\n # step2: do the count and sum operation with the search terms and the\n # app_list\n output_rows = self.search_term_counter(self._search_terms_input.get(),\n rows)\n\n # step3: if the client wants to translate the pkg to app name\n try:\n pkg_app_dict = self.pkg_appname_file_process(\n self._pkg_app_input.get())\n # now write the result to the output file\n output_rows = self.pkg_app_translator(pkg_app_dict, output_rows)\n\n try:\n with open(self._export_path_input.get(), 'w',\n encoding='utf-8-sig', newline='') as output_data_file:\n writer = csv.writer(output_data_file, dialect='excel')\n writer.writerows(output_rows)\n tk.messagebox.showinfo('message',\n 'job done! have a nice day!')\n\n except Exception as e:\n tk.messagebox.showerror('error', e)\n\n except:\n try:\n with open(self._export_path_input.get(), 'w',\n encoding='utf-8-sig', newline='') as output_data_file:\n writer = csv.writer(output_data_file, dialect='excel')\n writer.writerows(output_rows)\n tk.messagebox.showinfo('message',\n 'job done! have a nice day!')\n except Exception as e:\n tk.messagebox.showerror('error', e)" ]
[ "0.5723836", "0.556769", "0.5490822", "0.54697037", "0.54161894", "0.5410227", "0.5337697", "0.53324795", "0.5231842", "0.52236265", "0.5195237", "0.5186955", "0.51598907", "0.51523805", "0.5090487", "0.508662", "0.5077356", "0.50331646", "0.50279236", "0.50225353", "0.5014937", "0.50014985", "0.496455", "0.48563868", "0.48427927", "0.48389235", "0.48273385", "0.48131394", "0.47965944", "0.47829708", "0.47775993", "0.47454706", "0.4737785", "0.4729519", "0.4724453", "0.471384", "0.46908367", "0.46843854", "0.46839187", "0.46798176", "0.46773252", "0.46638802", "0.46634743", "0.46543366", "0.4638076", "0.46183485", "0.46165308", "0.46063453", "0.45929268", "0.45838115", "0.4579793", "0.45730746", "0.45730177", "0.45610654", "0.45553136", "0.4531736", "0.4530841", "0.4528985", "0.4527049", "0.45244628", "0.45241058", "0.45221782", "0.45121706", "0.45072556", "0.45062616", "0.45054477", "0.45050597", "0.44912156", "0.4486199", "0.44832042", "0.44672197", "0.4465752", "0.44623414", "0.44599465", "0.44566992", "0.44562027", "0.4451574", "0.44408777", "0.44408575", "0.44405416", "0.44401053", "0.44340533", "0.44232765", "0.44153035", "0.4414119", "0.44103363", "0.44103363", "0.44052997", "0.44011518", "0.43987364", "0.43893647", "0.43848196", "0.43834853", "0.4382542", "0.43747294", "0.43714827", "0.4370017", "0.43690687", "0.4368854", "0.4364306" ]
0.7534394
0
First build the sum of divisors for all numbers and then look for nonabundants
def solver2(input_val): sum_div = [1] * (input_val + 1) for i in range(2, int(input_val ** 0.5) + 1): sum_div[i * i] += i for k in range(i + 1, input_val // i + 1): sum_div[k * i] += k + i abundants, result = set(), 0 for n in range(1, input_val + 1): if sum_div[n] > n: abundants.add(n) if not any((n - a in abundants) for a in abundants): result += n return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def non_abundant_sums():\n # the sum of divisors of every number\n divisor_sum = [0] * LIMIT\n for i in range(1, LIMIT):\n for j in range(i * 2, LIMIT, i):\n divisor_sum[j] += i\n # abundant numbers\n abundant_nums = [i for (i, x) in enumerate(divisor_sum) if x > i]\n\n expressible = [False] * LIMIT\n for i in abundant_nums:\n for j in abundant_nums:\n if i + j < LIMIT:\n expressible[i + j] = True\n else:\n break\n ans = sum(i for (i, x) in enumerate(expressible) if not x)\n return str(ans)", "def solution(n):\n total = sum(\n [\n i\n for i in range(1, n)\n if sum_of_divisors(sum_of_divisors(i)) == i and sum_of_divisors(i) != i\n ]\n )\n return total", "def sum_of_proper_divisors(number: int):\n divisors = []\n\n for n in range(1, number):\n if number % n == 0:\n divisors.append(n)\n\n return sum(divisors)", "def sum_of_proper_divisors(n):\n\n\tpd = find_divisors(n)\n\n\treturn sum(pd)", "def sum_proper_divisors(n):\r\n return sum(proper_divisors(n))", "def sum_of_amicable_numbers(number: int):\n start_time = time.time()\n amicable = set()\n\n for n in range(1, number):\n if n not in amicable:\n a = sum_of_proper_divisors(n)\n b = sum_of_proper_divisors(a)\n if (n == b) and not (n == b == a):\n amicable.add(n)\n amicable.add(a)\n\n result = sum(amicable)\n print_time_log(start_time, result)\n return result", "def get_divisors_sum(number):\n if number == 0:\n return 0\n\n divisors_list = []\n for i in range(number+1):\n j = i + 1\n if number % j == 0:\n divisors_list.append(j)\n\n return sum(divisors_list)", "def sum_divisors(n):\r\n return sum(proper_divisors(n)) + n", "def d(n):\n return sum(divisors(n))", "def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res", "def getDivisors(n):", "def proper_divisors(n):\n divisors = set([1])\n for i in range(2, int(ceil(sqrt(n)))+1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n/i)\n return divisors", "def get_divisors(num):\n assert num != 0, \"Num is 0\"\n divisors = []\n sq_root = int(num**0.5)\n for i in range(1, sq_root + 1):\n if num % i == 0:\n divisors.extend([i, num // i])\n # if num has a perfect sq, that number will be added twice, then:\n if sq_root ** 2 == num:\n divisors.remove(sq_root)\n return divisors", "def find_divisors(n: int) -> Set[int]:\n divisors = {1, n}\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n // i)\n return divisors", "def divisors(x):\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x//i)\n return sorted(distinct(result))", "def divisors(n):\n d = []\n for i in range(1, int(math.sqrt(n) + 1)):\n if n % i == 0:\n d.append(i)\n d.append(n / i)\n return set(d)", "def divisors_sum(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(1, upper + 1):\n for j in range(i, upper + 1, i):\n nums[j] += i\n return nums", "def solveProblem021():\n total = 0\n for i in range(2, 10000):\n divs = getProperDivisors(i)\n s = sum(divs)\n # Skip stuff greater than, we'll get to it later if it's less than max.\n if s > i:\n continue\n if s == i:\n continue\n t = sum(getProperDivisors(s))\n if t == i:\n total = total + i + s\n print(\"The Sum is: %d\" % (total,))", "def divisors(number: int) -> Set[int]:\n\n if number == 0:\n return {0}\n divisor = 2\n while divisor * divisor <= number:\n if number % divisor == 0:\n smaller_result = divisors(number // divisor)\n multiplied_result = {d * divisor for d in smaller_result}\n\n return smaller_result | multiplied_result\n divisor = divisor + 1\n\n return {1, number}", "def find_proper_divisors(n: int) -> Set[int]:\n\n divisors = find_divisors(n)\n return divisors - {n} # without n", "def d(n):\n divisors = []\n for i in range(1, n):\n if n % i == 0:\n divisors.append(i)\n return sum(divisors)", "def proper_divisors(number):\n factors = (divisors(number))\n factors.remove(number)\n return factors", "def is_abundant(num: int) -> bool:\n return sum(divisors(num)) - num > num", "def find_divisors(n):\n\n\tpd = [1]\n\n\tsqrtN = int(math.sqrt(n))\n\n\tfor d in range(2, sqrtN+1):\n\t\tif n % d == 0:\n\t\t\tpd.append(d)\n\t\t\tpair = int(n/d)\n\t\t\tif not pair == d:\n\t\t\t\tpd.append(pair)\n\n\treturn pd", "def get_divisors(n):\n n = abs(n)\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n%i == 0:\n divisors.append(i)\n divisors.append(-i)\n if i*i != n:\n divisors.append(n//i)\n divisors.append(-n//i)\n return sorted(divisors, key=abs)", "def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0", "def divisors(n):\n dvs = []\n for i in range(1, int(math.sqrt(n)) + 1):\n if n % i == 0:\n dvs.append(i)\n j = n / i\n if j != i:\n dvs.append(j)\n\n dvs.remove(n)\n return dvs", "def is_abundant_number(x):\n return sum(proper_divisors(x)) > x", "def simple_get_divisors(num: int) -> list:\n all_divisors = []\n for possible_divisor in range(1, math.floor(num / 2) + 1):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def get_sum_of_proper_divisors(num: int, prime_factors: list = None) -> int:\n if not prime_factors:\n prime_factors = get_prime_factors(num)\n\n sum_proper_divisors = 1\n for prime_factor, multiplicity in prime_factors:\n temp_sum = 0\n for i in range(multiplicity + 1):\n temp_sum += prime_factor ** i\n sum_proper_divisors *= temp_sum\n\n return sum_proper_divisors - num", "def landau1(n):\n\n i = 2\n sum_factors = 1\n factors = set()\n\n while i <= n: \n common = {j for j in factors if gcd(j, i) != 1}\n if len(common) == 0:\n factors = add_factor(i, n, factors)\n sum_factors = sum(factors)\n elif product(common) <= i:\n difference = factors.difference(common)\n new_factors = add_factor(i, n, difference)\n if product(new_factors) > product(factors):\n factors = new_factors\n sum_factors = sum(factors)\n i += 1\n\n print(n, product(factors), factors)\n return product(factors)", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def divisori(n):\n div=set()\n for i in range(1,int(n**0.5+1)):\n if n%i==0:\n div.add(int(n/i))\n div.add(i)\n return sorted(div)", "def proper_divisors(n: int) -> [int]:\n\n if n == 1:\n return []\n\n x = 2\n divisors = set([1])\n while x * x <= n and n > 1:\n if n % x == 0:\n divisors.add(x)\n divisors.add(n // x)\n\n x += 1\n\n s = sorted(divisors)\n return s", "def findDivisor(num):\n divisors = [1]\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n divisors.append(i)\n temp = num / i\n if temp != i:\n divisors.append(temp)\n return divisors", "def divisors(decomp):\n combine = lambda acc, p: set(a * (p ** e) for a in acc for e in xrange(decomp[p] + 1))\n return reduce(combine, decomp, {1})", "def find_divisors_1(number):\n divisors = []\n # Test all numbers from 1 to number-1.\n # Actually, we can be more efficient with range(1, (number//2)+1)\n for n in range(1, number): \n if number % n == 0:\n divisors.append(n)\n return divisors", "def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div", "def is_abundant(n):\r\n if sum_proper_divisors(n) > n:\r\n return True\r\n else:\r\n return False", "def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div", "def find_divisors_2(number):\n divisors = [n for n in range(1, number) if number % n == 0]\n return divisors", "def is_abundant_num(num: int, get_divisors_func=simple_get_divisors) -> bool:\n divisors = get_divisors_func(num)\n return sum(divisors) > num", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def divisors(num: int) -> Iterable[int]:\n assert num > 0\n if num == 1:\n yield 1\n return\n\n for divisor in range(1, int(math.sqrt(num)) + 1):\n if num % divisor == 0:\n yield divisor\n divisor_2 = num // divisor\n if divisor_2 != divisor:\n yield divisor_2\n else:\n return", "def is_abundant(n: int, print_div: bool = False) -> bool:\n\n divisors = proper_divisors(n)\n if print_div:\n print(f\"Divisors of {n}: {divisors}\")\n\n divisor_sum = sum(divisors) if divisors else 0\n return divisor_sum > n", "def get_divisors_with_parity_check(num: int) -> list:\n all_divisors = []\n # if number is odd, increment by 2 because don't have to check evens\n increment = 2 if num % 2 == 1 else 1\n\n for possible_divisor in range(1, math.floor(num / 2) + 1, increment):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def twentyone():\r\n \r\n notamicable = []\r\n isamicable = []\r\n \r\n for i in range(10000):\r\n if i not in notamicable and i not in isamicable:\r\n a = i\r\n b = amicable(findDivisors(a))\r\n c = amicable(findDivisors(b))\r\n if a == c and not a == b:\r\n isamicable.append(a)\r\n isamicable.append(b)\r\n else:\r\n notamicable.append(a)\r\n notamicable.append(b)\r\n \r\n print isamicable\r\n t = 0\r\n for v in isamicable:\r\n t += v\r\n return t", "def perfectd(n: int) -> bool:\n if sum(divisors(n)) - n == n:\n return True\n else:\n return False", "def count_proper_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def gatherDivisors(number): # prvni string ve funkci je comment; \"\"\" znamenam ze je na vic radek\n\tdivisors = []\n\tfor div in range(1, number + 1): # range vyhodi vse od jedne az do number\n\t\tif number % div == 0:\n\t\t\tdivisors.append(div)\n\treturn divisors", "def restricted_divisors(x):\n return divisors(x)[1:-1]", "def amicable_numbers(n):\n amicables = []\n sumDivisors = {}\n for i in range(1, n):\n divisors = proper_divisors(i)\n sumDivisors[i] = sum(divisors)\n for i in range(1, n):\n sumDivisorsOfi = sumDivisors[i]\n if sumDivisorsOfi < n:\n compare = sumDivisors[sumDivisorsOfi]\n if compare == i and sumDivisorsOfi != i:\n amicables.append(i)\n return amicables", "def get_count_of_divisors_by_number(self, number):\n if int(number) < 1:\n print \"this method needs number >= 1\"\n return 0\n if int(number) == 1:\n return 1\n # n = (a ** p) * (b ** q) * (c ** r) ใฎใจใใ€\n # n ใฎ็ด„ๆ•ฐใฏ (p + 1) * (q + 1) * (r + 1) ใงๆฑ‚ใ‚ใ‚‰ใ‚Œใ‚‹\n factors = self.get_prime_factors_by_number(number)\n patterns = factors.values()\n patterns_considered_power_of_zero = map(lambda x: x + 1, patterns)\n ret = reduce(lambda x, y: x * y, patterns_considered_power_of_zero)\n return ret", "def is_abundant(check_number):\n if number < sum(proper_divisors(check_number)):\n return True\n else:\n return False", "def is_deficient_number(x):\n return sum(proper_divisors(x)) < x", "def divisors(n):\n divs = [1]\n for p, e in factorization(n):\n divs += [x*p**k for k in range(1,e+1) for x in divs]\n return divs", "def exercise_b2_24():\r\n number = input(\"Insert the number: \")\r\n flag = 0\r\n count = 0\r\n divisors_list =[]\r\n while flag <= int(number):\r\n flag +=1\r\n if (int(number) % flag) == 0:\r\n count += 1\r\n divisors_list.append(flag)\r\n print(\"\"\"\\nThe amount of divisors are: %s\"\"\"\r\n \"\"\"\\nThe numbers are: %s\\n\"\"\" % (count, divisors_list))\r\n return", "def proper_divisors(x):\n return divisors(x)[:-1]", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def main():\n numbers = int(input())\n count = 0\n for num in range(1, numbers+1):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n count += 1\n print(count)", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def list_of_divisors_v1(n):\n \"\"\"\n This is a slow algorithm. But it is correct.\n \"\"\"\n if n == 1:\n return [1]\n if n == 2:\n return [1,2]\n L = {}\n if n > 0:\n L[1] = True\n if n > 1:\n L[n] = True\n for i in list_of_prime_factors(n):\n L[i] = True\n for j in list_of_divisors(n // i):\n L[j] = True\n return L.keys()", "def count_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def properdivisors(n):\n propdiv = [1]\n start, step = [2, 1]\n\n # Odd numbers only have odd divisors\n if n % 2 == 1:\n start, step = [3, 2]\n\n for i in range(start, ceil(sqrt(n)), step):\n if n % i == 0:\n propdiv.extend([i, n//i])\n\n # If n is a perfect square, also add the square root.\n # Note: this does not work for VERY LARGE n.\n if sqrt(n).is_integer() and n != 1:\n propdiv.append(int(sqrt(n)))\n\n return(propdiv)", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def divisors(n: int) -> list:\n # iterate through every number <= n/2 and check whether the number is a divisor\n # append to list if not in list\n # in the end, append the number\n divs = [n]\n for i in range(1, n//2 + 1):\n if n % i == 0:\n divs.append(i)\n return divs", "def divisors(n):\n return [x for x in range(1, n) if n % x == 0]", "def proper_divisors(n):\n l = [1]\n if n == 1 or n == 2:\n return l\n else:\n limit = math.floor(n/2) + 1\n for i in range(2, limit):\n if n % i == 0:\n l.append(i)\n return l", "def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit", "def _find_cusps(self):\n N = self.level()\n s = []\n\n for d in arith.divisors(N):\n w = arith.gcd(d, N//d)\n if w == 1:\n if d == 1:\n s.append(Cusp(1,0))\n elif d == N:\n s.append(Cusp(0,1))\n else:\n s.append(Cusp(1,d))\n else:\n for a in range(1, w):\n if arith.gcd(a, w) == 1:\n while arith.gcd(a, d//w) != 1:\n a += w\n s.append(Cusp(a,d))\n return sorted(s)", "def findDivisors(num1, num2):\n divisors = (1,)\n for i in range(2, (min(num1, num2) + 1)):\n if num1 % i == 0 and num2 % i == 0:\n divisors += (i,)\n return divisors", "def getNumDivisors(n):\n\n n = abs(int(n))\n\n r = 1\n i = 2\n while i <= n:\n a = 0\n while n % i == 0:\n n = n / i\n a = a + 1\n r = r * (a + 1)\n i = i + 1\n\n return r", "def trivial_phase(indivs):\r\n\tpool=make_pool(len(indivs[0]))\r\n\r\n\tfor i in xrange(1,len(pool)+1):\r\n\t\tall_combi=itertools.combinations(pool,i)\r\n\t\tfor t in all_combi:\r\n\t\t\tt+=t\r\n\t\t\tcandidate_couples=list(itertools.combinations(t,2))\r\n\t\t\tgeno_list=map(lambda x: mix(x[0],x[1]), candidate_couples)\r\n\t \t\tif check(indivs, geno_list):\r\n\t \t\t\treturn list(set(t)), candidate_couples\r\n\tprint \"It's impossible to execute this, something must be wrong.\"", "def divisors(intgr):\n\tdivisors = []\n\tfor i in range(1,intgr+1):\n\t\tif(intgr%i==0):\n\t\t\tdivisors.append(i)\n\treturn divisors[1:-1]", "def problem33(nr_digits):\n\n a_sum = 1\n b_sum = 1\n for a in range(10**(nr_digits - 1), (10**nr_digits) - 1):\n for b in range(a + 1, 10**nr_digits):\n # print(a/b);\n\n a_str = str(a)\n b_str = str(b)\n\n for sa in range(0, nr_digits):\n if a_str[sa] == '0':\n continue\n\n # check in b\n for sb in range(0, nr_digits):\n if (a_str[sa] == b_str[sb]):\n a_short = a_str[:sa] + a_str[sa + 1:]\n b_short = b_str[:sb] + b_str[sb + 1:]\n a_short_int = int(a_short)\n b_short_int = int(b_short)\n if (b_short_int == 0):\n continue\n\n if (a / b == a_short_int / b_short_int):\n a_sum *= a_short_int\n b_sum *= b_short_int\n #print(a_str, \"/\", b_str, \"->\",\n # a_short, \"/\", b_short)\n\n return Fraction(a_sum, b_sum).denominator", "def get_divisors(n, includeN=True):\n lower_divisors, upper_divisors = [], []\n i = 1\n while i * i <= n:\n if n % i == 0:\n lower_divisors.append(i)\n if i != n // i:\n upper_divisors.append(n//i)\n i += 1\n upper_divisors = upper_divisors[::-1]\n if not includeN:\n upper_divisors.pop()\n return lower_divisors + upper_divisors", "def find_factors(num):\n factors = set()\n i = 1\n while i*i < num:\n if num % i == 0:\n factors.add(i)\n factors.add(int(num/i))\n i+=1\n factors = list(factors)\n factors.sort()\n return factors", "def divisions(self,domain,divisions):\n size = domain.height/divisions\n counter = []\n for i in range(divisions):\n count = ((self.z >= i*size) & (self.z < (i+1)*size)).sum()\n counter.append(count)\n return counter", "def _find_dividers(num: int) -> List[int]:\r\n\r\n dividers: List[int] = list()\r\n while num != 1:\r\n primes = PrimeHandler.find_all_primes(num)\r\n for prime in reversed(primes):\r\n if num % prime == 0:\r\n dividers.append(prime)\r\n num = num // prime\r\n break\r\n return list(reversed(dividers))", "def get_divisores(num):\n divisores = [] #uso una lista para guardar los divisores\n for i in range(1, num):\n if num%i == 0:\n divisores.append(i)\n return divisores", "def is_deficient(n):\r\n if sum_proper_divisors(n) < n:\r\n return True\r\n else:\r\n return False", "def is_primary_trivial_division(n):\n mod = int(math.sqrt(n))\n for _ in xrange(2, mod + 1):\n if n % _ == 0:\n return 0\n return n", "def isAbundant(n):\n\treturn sumProperDivisors(n, PRIMES) > n", "def divisor_lister(num):\n if num <= 0:\n raise ValueError('num must be a positive, non-zero number')\n\n divisors = []\n for possible_divisor in range(2, num-1):\n if num % possible_divisor == 0:\n divisors.append(possible_divisor)\n\n # 1 and num itself are divisors so throw them in there\n divisors.append(1)\n divisors.append(num)\n divisors.sort()\n return divisors", "def divisors(N):\n # Initialize the list of divisors\n divisor_list = [1]\n # Check division by d for d <= N/2\n for d in range(2,N // 2 + 1):\n if N % d == 0:\n divisor_list.append(d)\n divisor_list.append(N)\n return divisor_list", "def generate_numbers():\n\n nums = []\n\n for i in range(0, 12):\n for j in range(1, 12):\n nums.append(i / j)\n\n return list(set(nums))", "def findDivisors(n1, n2):\n divisors = () # the empty tuple\n for i in range(1, min(n1, n2) + 1):\n if n1%i == 0 and n2%i == 0:\n divisors = divisors + (i,)\n return divisors", "def selfDividingNumbers(left, right):\n ret = []\n bounds = list(range(left, right + 1))\n \n for num in bounds:\n div = True\n if '0' in str(num):\n pass\n elif num < 10:\n ret.append(num)\n else:\n for n in str(num): \n if num % int(n) !=0:\n div = False\n if div is True:\n ret.append(num) \n return ret", "def num_divisors(n):\n divisors = []\n for i in range(1, int(n**0.5) + 1):\n if n % i == 0:\n divisors += {i, n //i}\n return divisors", "def sum_of_proper_divisors_sieve(n):\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve", "def div_by(n, list_of_num):\n for num in list_of_num:\n if not n % num:\n return True\n return False", "def evansPerfectNumbers(n):\n assert n>1\n perfect = []\n for i in range(1,n+1):\n sums = 0\n for j in range(1,i):\n sums += evansMod(i,j)*j\n if sums == i:\n perfect.append(i)\n #print(perfect) #for testing only\n return perfect", "def perfect_number(number):\n sum_divisors = sum(proper_divisors(number))\n if number == sum_divisors:\n return 'perfect'\n else:\n if number < sum_divisors:\n return 'abundant'\n else:\n return 'deficient'", "def divisors(factors):\n ps = sorted(set(factors))\n omega = len(ps)\n\n def rec_gen(n=0):\n if n == omega:\n yield 1\n else:\n pows = [1]\n for j in xrange(factors.count(ps[n])):\n pows += [pows[-1] * ps[n]]\n for q in rec_gen(n + 1):\n for p in pows:\n yield p * q\n\n for p in rec_gen():\n yield p", "def problem1():\n return sum(i for i in range(1000) if i % 3 == 0 or i % 5 == 0)", "def eulerTotient(n): #\n result = 1\n for i in range(2, n): \n if (nt.gcd(i, n) == 1): \n result+=1\n return result", "def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False", "def is_perfect(n):\r\n if sum_proper_divisors(n) == n:\r\n return True\r\n else:\r\n return False" ]
[ "0.71596134", "0.6881637", "0.6874127", "0.68740165", "0.6870829", "0.6703803", "0.66957605", "0.66898984", "0.6660096", "0.66297686", "0.6602979", "0.65995604", "0.6504165", "0.64978504", "0.6480131", "0.6476575", "0.6474925", "0.6459346", "0.64593166", "0.6448392", "0.6386236", "0.63841355", "0.63231593", "0.630248", "0.62944144", "0.6232573", "0.622677", "0.6224869", "0.6223823", "0.6208383", "0.6197576", "0.617447", "0.6173649", "0.61535203", "0.61532736", "0.61256015", "0.6124515", "0.6123696", "0.6111611", "0.6104892", "0.60862416", "0.60832167", "0.60809755", "0.60809755", "0.60456777", "0.60387605", "0.6030207", "0.60282654", "0.6016515", "0.60072374", "0.59643835", "0.59452206", "0.5941618", "0.59213203", "0.5920594", "0.58895314", "0.5885629", "0.588458", "0.5861385", "0.585967", "0.5848378", "0.5843179", "0.58363837", "0.58357257", "0.58308643", "0.58307695", "0.58273256", "0.58156395", "0.5815046", "0.5798385", "0.57930875", "0.5787234", "0.57598215", "0.57446665", "0.5727005", "0.57245743", "0.5706054", "0.5684183", "0.56726354", "0.5666425", "0.56652766", "0.5661783", "0.5650003", "0.5645868", "0.56236273", "0.5621045", "0.5618876", "0.5599862", "0.5588803", "0.55808914", "0.5578677", "0.55680287", "0.55570817", "0.55478925", "0.55376756", "0.55372953", "0.55355984", "0.5533525", "0.5489641", "0.5483696" ]
0.687742
2
Perform a string compression on the input string.
def string_compression(input_string): compressed_string = '' char_count = 1 prev_char = '' for char in input_string: if char == prev_char: char_count += 1 else: compressed_string = compressed_string + str(char_count) + char char_count = 1 prev_char = char return compressed_string[1:] + str(char_count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compress(string):", "def compress(string):", "def compression(s):", "def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)", "def compress(string):\n \n # Build the dictionary.\n dict_size = 256\n seen = dict((chr(i), i) for i in range(dict_size))\n \n p = \"\"\n output = 0\n for c in string:\n pc = p + c\n if pc in seen:\n p = pc\n else:\n # We have not seen this. Output the stuff.\n output += 1\n seen[pc] = dict_size\n dict_size += 1\n p = c\n \n # Output the code for w.\n return output * 12", "def string_compression(w):\n if len(w) <= 1:\n return w\n\n substrings = []\n prev_char = w[0]\n char_count = 1\n for char in w[1:]:\n if prev_char == char:\n char_count += 1\n else:\n substrings.append('%s%s' % (prev_char, char_count))\n char_count = 1\n prev_char = char\n\n substrings.append('%s%s' % (prev_char, char_count))\n\n compression = ''.join(substrings)\n if len(compression) < len(w):\n return compression\n else:\n return w", "def compress(string):\n\n compressed = []\n\n curr_char = \"\"\n char_count = 0\n\n for char in string:\n if char != curr_char:\n compressed.append(curr_char)\n\n if char_count > 1:\n compressed.append(str(char_count))\n\n curr_char = char\n char_count = 0\n\n char_count += 1 \n\n compressed.append(curr_char)\n if char_count > 1:\n compressed.append(str(char_count))\n\n return \"\".join(compressed)", "def compressString(s):\n import cStringIO, gzip\n\n # Nasty monkeypatch to avoid gzip changing every time\n class FakeTime:\n def time(self):\n return 1111111111.111\n\n gzip.time = FakeTime()\n\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()", "def stringCompression(s):\n\n orig_len = len(s)\n t = []\n current_letter = s[0]\n count = 1\n\n for i in range(1, orig_len):\n if s[i] == current_letter:\n count += 1\n if i == orig_len - 1:\n t.append(current_letter + str(count))\n else:\n t.append(current_letter + str(count))\n current_letter = s[i]\n count = 1\n\n t = ''.join(t)\n return t if len(t) < orig_len else s\n\n # Time Complexity: O(len(s))\n # Space Complexity: O(len(s)), worst case is 2*len(s)", "def compress(string):\n\n past_chars = [string[0]]\n char_counts = [1]\n\n for i in range(1, len(string)):\n if string[i] == past_chars[-1]:\n char_counts[-1] += 1\n else:\n past_chars.append(string[i])\n char_counts.append(1)\n\n compressed_string = \"\"\n\n # list_of_ones = []\n # for i in range(len(string)):\n # list_of_ones.append(1)\n list_of_ones = [1 for x in range(len(string))]\n\n if char_counts == list_of_ones:\n return string\n else:\n for char, count in zip(past_chars, char_counts):\n compressed_string += char + str(count)\n\n\n return compressed_string", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def compress(string):\n r = \"\"\n l = len(string)\n\n if l == 0:\n return \"\"\n\n if l == 1:\n return string + \"1\"\n\n count = 1\n i = 1\n\n while i < l:\n\n if string[i] == string[i - 1]:\n count += 1\n else:\n r = r + string[i - 1] + str(count)\n count = 1\n\n i += 1\n\n r = r + string[i - 1] + str(count)\n\n return r", "def compress_v2(string):\n\n result = \"\"\n\n l = len(string)\n\n # Edge cases\n if l == 0:\n return \"\"\n\n if l == 1:\n return string + \"1\"\n\n last = string[0]\n count = 1\n i = 1\n\n while i < l:\n if string[i] == string[i-1]:\n count += 1\n else:\n result = result + string[i-1] + str(count)\n count = 1\n\n i += 1\n\n # For the last letter\n result = result + string[i-1] + str(count)\n\n return result", "def compression(self) -> str:\n ...", "def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")", "def compress_v3(string):\n\n string_dict = collections.OrderedDict()\n final = \"\"\n\n for letter in string:\n string_dict[letter] = string_dict.get(letter, 0)+1\n\n for letter, count in string_dict.iteritems():\n final += letter + str(count)\n\n return final", "def decompression(compressed_sequence:str):\r\n decompressed_sequence=\"\"\r\n for character in compressed_sequence:\r\n decompressed_sequence += bin(ord(character))[2:].zfill(8)\r\n return decompressed_sequence", "def _gzip_str(string_):\n out = BytesIO()\n\n with gzip.GzipFile(fileobj=out, mode='w') as fo:\n fo.write(string_.encode())\n\n bytes_obj = out.getvalue()\n return bytes_obj", "def compress(self, s):\n data = zlib.compress(s)\n # drop gzip headers and tail\n return data[2:-4]", "def compression(binary_sequence:str):\r\n compressed_sequence = \"\"\r\n calcul_byte =(len(binary_sequence) % 8)\r\n if calcul_byte != 0:\r\n binary_sequence = (8 - calcul_byte)*'0' + binary_sequence\r\n \"\"\" \r\n Add the missing 0's at the beginning of the string so that its length \r\n is divisible by 8 without remainder\r\n \"\"\"\r\n for byte in range(0, len(binary_sequence), 8):\r\n compressed_sequence += chr(int(binary_sequence[byte:byte+8], 2))\r\n return (compressed_sequence, calcul_byte)", "def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))", "def compress(uncompressed):\r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((chr(i), i) for i in range(dict_size))\r\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\r\n \r\n w = \"\"\r\n result = []\r\n for c in uncompressed:\r\n wc = w + c\r\n if wc in dictionary:\r\n w = wc\r\n else:\r\n result.append(dictionary[w])\r\n # Add wc to the dictionary.\r\n dictionary[wc] = dict_size\r\n dict_size += 1\r\n w = c\r\n \r\n # Output the code for w.\r\n if w:\r\n result.append(dictionary[w])\r\n return result", "def encode_string(string, level=9):\n return base64.b64encode(zlib.compress(string, level)[2:-4])", "def encode(string_):\n return (lambda f, s: f(list( ord(c) for c in str(string_) ) , \\\n s))(lambda f, s: sum(f[i] * 256 ** i for i in \\\n range(len(f))), str(string_))", "def test_compress_2_idenctical_char(self):\n text = 'aa'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'aa')\n self.assertEqual(actual, expected)", "def pack(self, input_string):\r\n #This function lacks basic error checking....\r\n klaf = ''\r\n for s in input_string:\r\n klaf += bin((ord(s) % 128) % 64)[2:].zfill(6)\r\n result = ''\r\n for i in range(0, 6):\r\n result = result + hex(int('' + klaf[i * 8:i * 8 + 8],\r\n 2))[2:].zfill(2)\r\n return result", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def test_compress_1_char(self):\n text = 'a'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'a')\n self.assertEqual(actual, expected)", "def compress(bstr):\n from sphobjinv.re import pb_comments, pb_data\n\n # Preconvert any DOS newlines to Unix\n s = bstr.replace(b\"\\r\\n\", b\"\\n\")\n\n # Pull all of the lines\n m_comments = pb_comments.findall(s)\n m_data = pb_data.finditer(s)\n\n # Assemble the binary header comments and data\n # Comments and data blocks must end in newlines\n hb = b\"\\n\".join(m_comments) + b\"\\n\"\n db = b\"\\n\".join(_.group(0) for _ in m_data) + b\"\\n\"\n\n # Compress the data block\n # Compression level nine is to match that specified in\n # sphinx html builder:\n # https://github.com/sphinx-doc/sphinx/blob/1.4.1/sphinx/\n # builders/html.py#L843\n dbc = zlib.compress(db, 9)\n\n # Return the composited bytestring\n return hb + dbc", "def postprocess(self, json_string):\n is_compressing, is_hash, compressed, spaces = False, False, [], 0\n for row in json_string.split(\"\\n\"):\n if is_compressing:\n if (row[:spaces + 5] == \" \" * (spaces + 4) +\n (\"\\\"\" if is_hash else \"{\")):\n compressed.append(row.rstrip())\n elif (len(row) > spaces and row[:spaces] == \" \" * spaces and\n re.match(\"[\\]\\}],?\", row[spaces:].rstrip())):\n compressed.append(row.rstrip())\n is_compressing = False\n else:\n compressed[-1] += \" \" + row.strip()\n else:\n compressed.append(row.rstrip())\n if any(a in row for a in [\"edges\", \"nodes\"]):\n # Fix to handle issues that arise with empty lists\n if \"[]\" in row:\n continue\n spaces = sum(1 for _ in takewhile(str.isspace, row))\n is_compressing, is_hash = True, \"{\" in row\n return \"\\n\".join(compressed)", "def test_compress_4_idenctical_char(self):\n text = 'bbbb'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'bb') + bytearray([0, 16])\n self.assertEqual(actual, expected)", "def compress_seq(s: str):\n bits = 64\n assert len(s) <= (bits / 2 - 1)\n result = 0\n for nuc in s:\n if nuc not in NUCS_INVERSE:\n return 1 << (bits - 1)\n result = result << 2\n result = result | NUCS_INVERSE[nuc]\n return result", "def compress(result):\n\treturn string.join((result.split()),' ')", "def gzdeflate():\n return zlib.compress(val)", "def encrypt_string(input: str):\n h = hashlib.new('sha512_256')\n h.update(str.encode(input))\n return str(h.hexdigest())", "def decompress_zlib(self, string):\n #encode the input string\n self.string = string\n return zlib.decompress(self.string).decode()", "def pack_string(s):\n\ts = unicode(s).encode('utf-8')\n\t\n\ttemp = s\n\treturn pack(\"!I\", len(temp)) + temp", "def encode(s, storage=BIT_STORAGE, alpha=ALPHABET, char_func=unichr):\n n = s\n buf = ''\n while len(n) > 0:\n b = n[:storage]\n n = n[storage:]\n\n d = 11 - len(b)\n for i in range(d):\n b += '\\0'\n\n bs = BitString(data=b)\n\n for i in range(8):\n v = bs.readbits(storage).uint\n buf += char_func(alpha[v])\n\n return buf.rstrip(char_func(alpha[0]))", "def compress(word):\n temp = []\n for i in range(len(word)):\n if word[i] in temp and word[i] == word[i - 1]:\n if temp[len(temp) - 2] == word[i]:\n last = int(temp[len(temp) - 1]) + 1\n temp[len(temp) - 1] = str(last)\n else:\n num = temp.index(word[i]) + 1\n val = int(temp[num]) + 1\n temp[num] = str(val)\n else:\n temp.append(word[i])\n temp.append(str(1))\n return ''.join(temp)", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def compress(value):\n # a) removing indentation in the begning of the string.\n value = re.sub(r\"(?m)^[\\t ]+\", \"\", value)\n\n # b) replacing each two whitespaces with a single one and each\n # __three__ newlines with __two__.\n return re_whitespace.sub(\"\\\\1\",\n re_newlines.sub(\"\\\\1\\\\1\", value)).strip()", "def encode(self, strs):", "def encode(self, strs):", "def encode(s: str) -> str:\n result = ''\n prev = s[:1]\n cnt = 0\n for c in s:\n if c == prev:\n cnt += 1\n else:\n result += str(cnt) + prev\n prev = c\n cnt = 1\n result += str(cnt) + prev\n\n return result", "def _encode_str(self, string):\n result = self._encode_vint(len(string))\n result += string\n return result", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def decompress_zlib(in_str):\n import zlib\n s = zlib.decompress(in_str)\n return s", "def compress(content, threshold=512):\n compression_enabled = CONF.logging.http_request_compression\n\n if is_dict(content):\n for key in content:\n content[key] = compress(content[key])\n if is_string(content) and compression_enabled:\n if len(content) > threshold:\n less_data = content[:50]\n compressed_data = base64.b64encode(\n zlib.compress(bytes(content.encode(\"utf-8\"))))\n if not six.PY2:\n compressed_data = str(compressed_data.decode(\"utf-8\"))\n return pprint.pformat(\n \"\\n***Content compressed by Syntribos.***\"\n \"\\nFirst fifty characters of content:\\n\"\n \"***{data}***\"\n \"\\nBase64 encoded compressed content:\\n\"\n \"{compressed}\"\n \"\\n***End of compressed content.***\\n\".format(\n data=less_data, compressed=compressed_data))\n return content", "def encode(string):\n return ' '.join(partition(decode(string), 5))", "def test_compress_seq_diff_8_char(self):\n text = '12345678'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'12345678')\n self.assertEqual(actual, expected)", "def encode(self, strs):\r\n return ''.join(len(x).to_bytes(4, byteorder='big').decode() + x for x in strs)", "def generate_compressed(text, codes):\n\n def cut(bits):\n \"\"\"Return a list of strings which represent bytes.\n\n @param str bits: A string representation of bits\n @rtype: list\n\n >>> cut(\"00000000\")\n ['00000000']\n >>> cut(\"101110011\")\n ['10111001', '1']\n \"\"\"\n\n if len(bits) <= 8:\n return [bits]\n else:\n list_ = [bits[:8]]\n list_.extend(cut(bits[8:]))\n return list_\n\n string = \"\"\n comp_byte = bytes([])\n for by in text:\n string += codes[by]\n list_ = cut(string)\n for i in list_:\n comp_byte += bytes([bits_to_byte(i)])\n return comp_byte", "def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw", "def decompress_gzip(in_str):\n import gzip\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def compress_bytes(text: bytes, codes: Dict[int, str]) -> bytes:\n\n if not text:\n return bytes([])\n else:\n bit = \"\"\n lst = []\n for symbols in text:\n bit += codes[symbols]\n\n if len(bit) == 8:\n lst.append(bits_to_byte(bit))\n bit = \"\"\n\n elif len(bit) > 8:\n lst.append(bits_to_byte(bit[:8]))\n bit = bit[8:]\n\n if 0 < len(bit) < 8:\n byte = bits_to_byte(bit)\n lst.append(byte)\n\n return bytes(lst)", "def encode(self, string):\n bitstring = ''\n\n for symbol in string.split(' '):\n symbol = symbol + ' '\n encoding = self.encode_symbol(symbol)\n if encoding == None:\n for s in symbol:\n encoding = self.encode_symbol(s)\n if encoding == None:\n print \"Skipping symbol: \" + s\n else:\n bitstring += encoding\n else:\n bitstring += encoding\n\n l = len(self.encode_symbol(' '))\n return bitstring[:-l]", "def decompress(compressed):\r\n \r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((i, chr(i)) for i in range(dict_size))\r\n # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)}\r\n \r\n # use StringIO, otherwise this becomes O(N^2)\r\n # due to string concatenation in a loop\r\n result = StringIO()\r\n w = chr(compressed.pop(0))\r\n result.write(w)\r\n for k in compressed:\r\n if k in dictionary:\r\n entry = dictionary[k]\r\n elif k == dict_size:\r\n entry = w + w[0]\r\n else:\r\n raise ValueError('Bad compressed k: %s' % k)\r\n result.write(entry)\r\n \r\n # Add w+entry[0] to the dictionary.\r\n dictionary[dict_size] = w + entry[0]\r\n dict_size += 1\r\n \r\n w = entry\r\n return result.getvalue()", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(self, data):\r\n return self.add_chunk(data)", "def parse_string_decompress(\n input: bytes,\n parse_string: bool = False,\n) -> bytes:\n global _pipeline\n if _pipeline is None:\n _pipeline = Pipeline(file_resources('itkwasm_compress_stringify_wasi').joinpath(Path('wasm_modules') / Path('parse-string-decompress.wasi.wasm')))\n\n pipeline_outputs: List[PipelineOutput] = [\n PipelineOutput(InterfaceTypes.BinaryStream),\n ]\n\n pipeline_inputs: List[PipelineInput] = [\n PipelineInput(InterfaceTypes.BinaryStream, BinaryStream(input)),\n ]\n\n args: List[str] = ['--memory-io',]\n # Inputs\n args.append('0')\n # Outputs\n args.append('0')\n # Options\n if parse_string:\n args.append('--parse-string')\n\n\n outputs = _pipeline.run(args, pipeline_outputs, pipeline_inputs)\n\n result = outputs[0].data.data\n return result", "def decompress_gzip(in_str):\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def compression_huffman(sequence:str):\r\n dico_frequency = frequency(sequence)\r\n dico_sort = sort_dico(dico_frequency)\r\n list_leaf = creation_list_node(dico_sort)\r\n tree = Tree(list_leaf)\r\n dict_seq_binary = tree.convertion_binaire_arbre()\r\n seq_trans_binary = binary_transformation(sequence, dict_seq_binary)\r\n compress_seq, add_binary = compression(seq_trans_binary)\r\n return sequence, dict_seq_binary, compress_seq, add_binary, seq_trans_binary", "def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()", "def test_compress_seq_diff_9_char(self):\n text = '123456789'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(b'12345678') \\\n + bytearray([0]) + bytearray(b'9')\n self.assertEqual(actual, expected)", "def sql_encode(data: str) -> str:\n return base64.urlsafe_b64encode(gzip.compress(data.encode())).decode()", "def compress_image(filename,k):", "def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))", "def compress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = build_frequency_dict(text)\n tree = build_huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (tree.num_nodes_to_bytes() + tree_to_bytes(tree) +\n int32_to_bytes(len(text)))\n result += compress_bytes(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def get_encoded_minhash(string: str) -> str:\n return encode_minhash(compute_minhash(string))", "def encode(src):\n if not src:\n return None\n\n output = scramble(src)\n\n return ' '.join(output[i:i+5] for i in xrange(0, len(output), 5))", "def __encrypt(string: str) -> str:\n key = 171\n result = b\"\\0\\0\\0\" + chr(len(string)).encode('latin-1')\n for i in string.encode('latin-1'):\n a = key ^ i\n key = a\n result += chr(a).encode('latin-1')\n return result", "def compress(cls, img, as_string=False):\n h0, w0 = img.shape\n w = binary_cast([w0], 'H', 'BB')\n h = binary_cast([h0], 'H', 'BB')\n cp = np.concatenate((w, h, img.astype('uint8').flatten()))\n # VLR.cmp: more 2x compression\n scp = VariableLength.compress(cp)\n if as_string:\n return scp\n # translate string into unit8 for storage\n vcp = np.array([ord(d) for d in scp]).astype('uint8')\n return vcp", "def compress(self, output_file):\n # create a ternary search trie and fill it with single ASCII characters\n st = TernarySt()\n for i in xrange(self.radix):\n st[chr(i)] = i\n code = self.radix + 1\n # read all the data from the input file (not optimal, but easy to code)\n data = self.file.read()\n bw = BitWriter(output_file)\n while len(data) > 0:\n lp = st.longest_prefix(data)\n # write the value of the prefix to output\n bw.writebits(st[lp], self.codeword_width)\n if len(lp) < len(data) and code < self.codeword_limit:\n # add new prefix to the symbol table\n st[data[:len(lp) + 1]] = code\n code += 1\n data = data[len(lp):]\n bw.writebits(self.radix, self.codeword_width)\n # output_file.close()", "def set_encode(string, password):\r\n binary_array = []\r\n # Combine the password and string arrays into one, then loop it\r\n for character in password + string:\r\n binary_array.append(get_binary(character))\r\n\r\n # Create one long binary from those values\r\n binary = \"\".join(binary_array)\r\n\r\n # This loops through the binary string, reducing it by\r\n # one (in length) with each pass\r\n # Stops once the binary length returns back to the\r\n # pre-defined STRING_LENGTH\r\n while len(binary) > (8 * STRING_LENGTH):\r\n binary = binary_reduction(binary)\r\n\r\n # Turn those binary values back into a string\r\n return get_string(binary)", "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def test_compress_max_1_seq_len(self):\n a_int = ord('a')\n seq = ''.join(map(chr, range(a_int, a_int + LZ77.max_seq + 1)))\n text = seq + '12' + seq + '1234'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(text[:8], 'utf-8')\\\n + bytearray([0]) + bytearray(text[8: 16], 'utf-8')\\\n + bytearray([12]) + bytearray(text[16: 20], 'utf-8')\\\n + bytearray([1, 63]) + bytearray([1, 49])\\\n + bytearray('34', 'utf-8')\n self.assertEqual(actual, expected)", "def encodeString(*args, **kwargs)->AnyStr:\n pass", "def test_compress_offset_less_len1(self):\n text = 'ababab'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'ab') + bytearray([0, 18])\n self.assertEqual(actual, expected)", "def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s", "def zip_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with ZipFile(destination, \"w\") as thezip:\n thezip.write(self.file)", "def generate_uncompressed(tree, text, size):\n\n def find_key(value, diction):\n \"\"\" Return the key which stores the value.\n\n Precondiction: All the values that are stored in the diction are unique\n values, and str value must be in the diction's values.\n\n @param str value:\n @param dict diction:\n @rtpe: str | int\n\n >>> diction = {1 : '0', 2: '1'}\n >>> find_key('0', diction)\n 1\n \"\"\"\n\n tuples = diction.items()\n for j, k in tuples:\n if value == k:\n return j\n\n process_bits = []\n for byte in text:\n process_bits.append(byte_to_bits(int(byte)))\n final_bits = \"\"\n for bit in process_bits:\n final_bits += bit\n codes_dict = get_codes(tree)\n got = 0\n re = bytes([])\n index = 0\n while got < size:\n t = \"\"\n while t not in codes_dict.values():\n t += final_bits[index]\n index += 1\n key = find_key(t, codes_dict)\n re += bytes([key])\n got += 1\n return re", "def encode(string, password):\r\n\r\n print(\"Calling encode...\")\r\n\r\n password = [p for p in password] # Set the password as an array\r\n encrypted_string = \"\" # To hold encrypted text.\r\n string_array = [] # To store the string as an array\r\n skipped_loop = 0 # For handling the \\\\x00 character\r\n\r\n # Scrambled hash array to use\r\n hash_array = set_hash_array(set_base_hash_array(), password)\r\n # Next array to use once the first is empty\r\n hash_array_next = set_hash_array(hash_array, password)\r\n\r\n for index, step in enumerate(string):\r\n # If a NULL_STRING is found, skip over the individual characters\r\n # until the next valid character is reached\r\n if skipped_loop > 0:\r\n skipped_loop -= 1\r\n continue\r\n\r\n if step == chr(92): # The \\ character\r\n # Check if the \\ symbol is part of the NULL_STRING\r\n if string[index : (index + 4)] == NULL_STRING:\r\n string_array.append(NULL_STRING)\r\n # Sets number of loops to skip based on NULL_STRING length\r\n skipped_loop = len(NULL_STRING) - 1\r\n else:\r\n string_array.append(step) # Only the \\ character was found\r\n\r\n else:\r\n # Otherwise, just append the found character\r\n string_array.append(step)\r\n\r\n # Once the string array is filled, move into the encoding phase\r\n if len(string_array) == STRING_LENGTH:\r\n\r\n # Checks if hash_array has enough characters to meet STRING_LENGTH\r\n if len(hash_array) < STRING_LENGTH:\r\n # If not, find how many character from the hash_array_next to use\r\n next_array_len = STRING_LENGTH - len(hash_array)\r\n # Set password_array\r\n password_array = hash_array + hash_array_next[:next_array_len]\r\n # Reset hash_array from the next one\r\n hash_array = hash_array_next\r\n # Create a new hash_array_next\r\n hash_array_next = set_hash_array(hash_array, password)\r\n # Finally, now that hash_array is the old hash_array_next and a\r\n # few characters were used, remove those values\r\n hash_array = hash_array[next_array_len:]\r\n else:\r\n # Create the password array\r\n password_array = hash_array[:STRING_LENGTH]\r\n # Remove those used values from the hash_array\r\n hash_array = hash_array[STRING_LENGTH:]\r\n\r\n # Do the encrypt functions\r\n encrypted_string += set_encode(string_array, password_array)\r\n # Reset string_array to empty for the next pass\r\n string_array = []\r\n\r\n # This is for catching the last chunk of string that may not meet STRING_LENGTH requirements\r\n if string_array != []:\r\n # Calls function to pad out the last array to match STRING_LENGTH\r\n # Pass hash_array and hash_array_next as one big array to avoice\r\n # an array being too small\r\n string_array = set_string(string_array, (hash_array + hash_array_next))\r\n\r\n if len(hash_array) < STRING_LENGTH:\r\n # If not, find how many character from the hash_array_next to use\r\n next_array_len = STRING_LENGTH - len(hash_array)\r\n # Set password_array\r\n password_array = hash_array + hash_array_next[:next_array_len]\r\n else:\r\n # Create the password array padded with the hash_array\r\n # password_array = password + hash_array[len(password) : STRING_LENGTH]\r\n password_array = hash_array[:STRING_LENGTH]\r\n\r\n # Build out the encrypted_string\r\n encrypted_string += set_encode(string_array, password_array)\r\n\r\n return encrypted_string", "def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)", "def prepareMealFromString(string=\"\"):\n binstring = \"\"\n for char in string:\n binstring += bin(ord(char))\n \n binstring = binstring.replace(\"b\",\"10\")\n \n stringSuffix = \"10\"*128 # filler string of length 256.\n # Adds enough filler string to be multiple of 256:\n binstring += stringSuffix[:((len(stringSuffix)-len(binstring))%len(stringSuffix))]\n \n return binstring", "def encodeLZ(fileIn, fileOut, dictionarySize = 1114112):\n try:\n fileContent = FileWork.getFileContent(fileIn)\n\n dictionary = {}\n for i in range(0, dictionarySize):\n dictionary[chr(i)] = i\n\n currentCode = dictionarySize\n\n encodedFileContent = \"\"\n buffer = fileContent[0]\n for pos in range(1, len(fileContent)):\n character = fileContent[pos]\n if (buffer + character) in dictionary:\n buffer += character\n else:\n dictionary[buffer + character] = currentCode\n currentCode += 1\n encodedFileContent += str(dictionary[buffer]) + \" \"\n buffer = character\n\n encodedFileContent += str(dictionary[buffer])\n\n FileWork.writeToFile(fileOut, encodedFileContent)\n return True\n except Exception:\n return False", "def compress(self, file):\n\t\t\n\t\ttext = file.read() \n\t\ttext = text.rstrip() #elimina los espacios en blanco del final\n\n\t\t\n\t\tfrequency = self.make_frequency_dict(text)#obtenemos la frencuencia de cada numero en el texto\n\t\tself.make_heap(frequency)\n\t\tself.merge_nodes()\n\t\tself.make_codes()\n\t\tencoded_text = self.get_encoded_text(text)\n\t\tpadded_encoded_text = self.pad_encoded_text(encoded_text)\n\n\t\tb = self.get_byte_array(padded_encoded_text)\n\n\t\treturn b", "def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]", "def encode(self, strs):\n encoded_str = \"\"\n for s in strs:\n encoded_str += \"%0*x\" % (8, len(s)) + s\n return encoded_str", "def _from_string(s):\n b = buffer()\n for c in s:\n insert(b, c)\n return b", "def sha512(s: str) -> str:\n return hashlib.sha512(s.encode()).hexdigest()", "def encripto(string_in):\n \n string_out=\"\"\n for char in string_in:\n index_letra=letras.index(char)\n string_out+= letras[index_letra-1]\n \n return string_out", "def retransformation(decompressed_sequence:str, binary_dict:dict, calcul_byte:int):\r\n if calcul_byte != 0:\r\n calcul_byte = (8-calcul_byte)\r\n decompressed_sequence = decompressed_sequence[calcul_byte:]\r\n \"\"\"\r\n Allows you to remove the zeros added at the beginning of a binary \r\n string to make a multiple of 8\r\n \"\"\"\r\n decompress_sequence = \"\"\r\n counter = 0\r\n for position in range(0, len(decompressed_sequence)+1, 1):\r\n if decompressed_sequence[counter:position] in binary_dict.keys():\r\n decompress_sequence += binary_dict[decompressed_sequence[counter:position]]\r\n counter = position\r\n return decompressed_sequence, decompress_sequence", "def _wadifyString(s):\n\n if len(s) < 8:\n s += \"\\x00\" * (8 - len(s))\n return s", "def encode_fn(s_in):\r\n s_out = s_in.split()\r\n return s_out", "def test_compress_offset_less_len2(self):\n text = 'abcdabcdab'\n actual = LZ77.compress(text)\n expected = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 52])\n self.assertEqual(actual, expected)", "def encode(string,bits=7,multipleblocks=1):\n B=1<<bits\n i = 0\n ans = \"\"\n for c in list(string):\n ## read a single character\n if( c == \"1\" ) :\n if (multipleblocks) : ans = ans + \"1\" ; pass ## to say 'there is another 1 in this block'\n\t ans = ans + dec_to_bin(i,bits) ## send the details of the bit\n i += 1\n pass\n elif( c == \"0\"):\n i += 1\n\tif (multipleblocks and (i>=B) ) :\n\t ans = ans + \"0\" ## to say 'a block has ended'\n\t i=0\n pass\n ## finish the file.\n if (multipleblocks) : \n ans = ans + \"0\"\n pass\n return ans\n pass" ]
[ "0.88319397", "0.88319397", "0.8700479", "0.79835236", "0.7687754", "0.7666799", "0.7238186", "0.7213857", "0.7115728", "0.7022821", "0.6945289", "0.69063985", "0.6815695", "0.66225106", "0.65985954", "0.65551895", "0.65420026", "0.649725", "0.6487858", "0.6423921", "0.64124584", "0.6400865", "0.6356928", "0.6304041", "0.6298446", "0.6270617", "0.6193901", "0.61922455", "0.6185782", "0.611441", "0.6087565", "0.6076138", "0.6025523", "0.59752226", "0.5883659", "0.5859078", "0.5836721", "0.5797344", "0.5781003", "0.5771012", "0.57314867", "0.5724058", "0.5692865", "0.5692865", "0.56890225", "0.5681527", "0.5669439", "0.56200856", "0.56058437", "0.5595452", "0.558687", "0.5576183", "0.556907", "0.55577874", "0.55115205", "0.5498114", "0.5496502", "0.5493172", "0.5490826", "0.54830897", "0.5482482", "0.5482482", "0.54435855", "0.5429777", "0.54142094", "0.53679657", "0.5365249", "0.53476065", "0.5343486", "0.5331983", "0.53309757", "0.5300286", "0.52894044", "0.5276254", "0.52447456", "0.523906", "0.5232773", "0.52316356", "0.5226828", "0.52173716", "0.52030164", "0.5196997", "0.51767683", "0.5176494", "0.5173913", "0.51733613", "0.5166061", "0.5134238", "0.51297385", "0.51221466", "0.5098808", "0.50947666", "0.5093862", "0.509081", "0.50892997", "0.5083797", "0.5074367", "0.5066543", "0.5065878", "0.5062325" ]
0.7696003
4
writes data from instream into additional allocated clusters of given file. Metadata of this file will be stored in Metadata object
def write(self, instream: typ.BinaryIO, filepath: str, filename: str = None) -> None: if filename is not None: filename = path.basename(filename) if self.fs_type == 'FAT': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) elif self.fs_type == 'NTFS': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __write(self, storage):\n\n positions = storage.get_positions()\n if len(positions) == 0: return\n\n X = storage.get_X()\n Y = storage.get_Y()\n\n if Y: assert len(positions) == len(X) == len(Y)\n else: assert len(positions) == len(X)\n\n start, end = positions[0][0][0], positions[-1][-1][0]\n\n group = self.f.create_group(f'{storage.name}_{start}-{end}')\n group['positions'] = positions\n\n if Y: group['labels'] = Y\n\n group.attrs['contig'] = storage.name\n group.attrs['size'] = len(positions)\n\n group.create_dataset('examples', data=X, chunks=(1, 200, 90))", "def WriteClustersToImage(self):\n # Use the array we built earlier\n print(f\"Writing the following list of clusters to FAT structure: {self.cluster_list}\")\n padding = 3\n with open(self.output_file, \"r+b\") as fh:\n # The first cluster goes into offset 26 (2 Bytes) in root directory\n seeker = (self.root_directory_offset*self.sector_size)+((self.index_number-1)*self.directory_index_size)+(self.starting_cluster_offset)\n # Convert first item in list to two bytes\n first_address = (self.cluster_list[0]).to_bytes(2, byteorder='little')\n print(f\"If I were me, I'd write {first_address} to {seeker}\")\n fh.seek(seeker)\n fh.write(first_address)\n # Now, the rest are written to FAT area\n for i, item in enumerate(self.cluster_list):\n # If Entry 1 then the byte calculation returned a whole number\n # If Entry 2 then the byte calculation returned a half number\n # This item determines where we write the data\n entry1, entry2, seeker = self.IsEntryHighOrLow(item)\n # The data we are writing is the next item\n if i+1 >= len(self.cluster_list):\n next_item = 4095\n else:\n next_item = self.cluster_list[i+1]\n # If we're at the end of the list then write 0xfff\n print(f\"Ready to perform calculations on {next_item} (hex:{hex(next_item)}) [entry1={entry1}; entry2={entry2}, seeker={seeker}]\")\n fh.seek(seeker)\n my_bytes = b'\\x00'+fh.read(3)\n if self.debug:\n print(f\"bytes from disk image: {my_bytes}\")\n unpacked_bytes, = struct.unpack('>I', bytes(my_bytes))\n if self.debug:\n print(type(unpacked_bytes), unpacked_bytes)\n nstr = str(hex(unpacked_bytes)).replace('0x', '').zfill(6)\n le_three_bytes = \"\".join(map(str.__add__, nstr[-2::-2] ,nstr[-1::-2]))\n if self.debug:\n print(f\"Existing values: unpacked_bytes:{hex(unpacked_bytes)}|nstr:{nstr}|(le)three_bytes:{le_three_bytes}|Entry1={le_three_bytes[-3:]}|Entry2={le_three_bytes[:3]}\")\n if entry1:\n # We need to deal with entry1 (see page 7 of scan24 paper)\n if self.debug:\n print(\"Updating entry1\")\n entry1_bytes = hex(next_item)[2:].zfill(3)\n entry2_bytes = le_three_bytes[:3]\n else:\n if self.debug:\n print(\"Updating entry2\")\n entry1_bytes = le_three_bytes[-3:]\n entry2_bytes = hex(next_item)[2:].zfill(3)\n new_entry = f\"{entry2_bytes}{entry1_bytes}\"\n if self.debug:\n print(f\"new_entry: {new_entry}\")\n packed_bytes = struct.pack('<I', int(new_entry, 16))\n if self.debug:\n print(f\"Writing packed_bytes ({packed_bytes[:-1]}) to {seeker}\")\n fh.seek(seeker)\n fh.write(packed_bytes[:-1])\n print(f\"{self.filename}.{self.extension} written to root directory index #{self.index_number}\")\n return True", "def __write_matlab_clusters(tel, filename):\n # type: (TelescopeAnalysis, str) -> None\n centre_x = np.array([])\n centre_y = np.array([])\n points_x = np.array([])\n points_y = np.array([])\n for name in tel.layouts:\n if name == 'ska1_v5':\n continue\n layout = tel.layouts[name]\n centre_x = np.hstack((centre_x, layout['cx']))\n centre_y = np.hstack((centre_y, layout['cy']))\n if points_x.size == 0:\n points_x = layout['x']\n points_y = layout['y']\n else:\n points_x = np.vstack((points_x, layout['x']))\n points_y = np.vstack((points_y, layout['y']))\n savemat(filename, dict(centre_x=centre_x, centre_y=centre_y,\n antennas_x=points_x, antennas_y=points_y))", "def insert_bicluster_info( self, db, db_file, run2id, row2id, col2id ):\n\t\t# Get all biclusters from cmonkey run\n\t\tconn = sqlite3.connect(db_file)\n\t \tc = conn.cursor()\n\t \tc.execute(\"SELECT max(iteration) FROM cluster_stats;\")\n\t \tlast_run = c.fetchone()[0] # i think there is an indexing problem in cMonkey python!! \n\t \tw = (last_run,)\n\t \tc.execute(\"SELECT cluster FROM cluster_stats WHERE iteration = ?;\",w)\n\t\tbiclusters = [self.assemble_bicluster_info_single( db, db_file, c, last_run, i[0], run2id, row2id, col2id ) for i in c.fetchall()]\n\t\tbicluster_info_collection = self.db.bicluster_info\n\n\t\t# Check whether documents are already present in the collection before insertion\n\t\tif bicluster_info_collection.count() > 0:\n\t\t\td_f = filter( None, [ self.check4existence( bicluster_info_collection, i, \"run_id\", i[\"run_id\"], \"cluster\", i[\"cluster\"] ) for i in biclusters ] )\n\t\telse:\n\t\t\td_f = biclusters\n\t\t\n\n\t\tprint \"%s new records to write\" % len( d_f )\n\n\t\tif len(d_f) > 0:\n\t\t\tbicluster_info_collection.insert( d_f )\n\n\t\treturn bicluster_info_collection", "def writePointwiseData(self, writeTo):\n rlz = self._writeSegmentsRealization(writeTo)\n # add some cluster stuff\n # cluster features\n ## both scaled and unscaled\n featureNames = sorted(list(self._clusterInfo['features']['unscaled'].keys()))\n for scaling in ['unscaled','scaled']:\n for name in featureNames:\n varName = 'ClusterFeature|{}|{}'.format(name, scaling)\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(self._clusterInfo['features'][scaling][name])\n varName = 'ClusterLabels'\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(self._clusterInfo['labels'])\n writeTo.addRealization(rlz)", "def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)", "def ior_write_dataset(self):\n for oclass in self.obj_class:\n for sizes in self.ior_chu_trs_blk_size:\n # Skip the object type if server count does not meet the minimum\n # EC object server count\n if oclass[1] > self.server_count:\n continue\n self.ior_param_update(oclass, sizes)\n\n # Create the new container with correct redundancy factor\n # for EC object type\n self.ec_contaier_create(oclass[0])\n self.update_ior_cmd_with_pool(oclass=oclass[0],\n create_cont=False)\n # Start IOR Write\n self.container.uuid = self.ec_container.uuid\n self.start_ior_load(operation=\"WriteRead\", percent=1,\n create_cont=False)\n self.cont_uuid.append(self.ior_cmd.dfs_cont.value)", "def add_file(self, letter, block_size):\n cluster = 1\n i = 0\n j = 0\n\n continuous = True\n while(i<self.size and j<block_size):\n if(self.disk_mem[i]==\".\"):\n self.disk_mem[i] = letter\n if not continuous:\n continuous = True\n cluster += 1\n j+=1\n else:\n continuous = False\n i+=1\n return cluster", "def seek_to_cluster(self, cluster):\n self.infile.seek(self.cluster_to_physical_offset(cluster))", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def cluster_data(data_loc, num_clusters, base_destination, vectorizer):\n cluster_df = __title_cluster_df(data_loc, num_clusters, vectorizer)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vectorizer, f)\n cluster_stats = {}\n for i in range(num_clusters):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_stats[i] = titles.shape[0]\n cluster_data = __get_data_with_titles(data_loc, titles)\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(i))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(cluster_stats[cluster]) + '\\n')", "def store_clusters(mapping, sff_fp, outdir=\"/tmp/\", store_members=False):\r\n\r\n # get mapping read to cluster\r\n invert_map = invert_mapping(mapping)\r\n (flowgrams, header) = lazy_parse_sff_handle(open(sff_fp))\r\n\r\n leftover_fasta_fh = open(outdir + \"/singletons.fasta\", \"w\")\r\n centroids = []\r\n for f in flowgrams:\r\n try:\r\n key = invert_map[f.Name]\r\n except KeyError:\r\n # this flowgram has not been clustered\r\n continue\r\n if (len(mapping[key]) == 0):\r\n # do not store singletons in a separate cluster\r\n leftover_fasta_fh.write(f.toFasta() + \"\\n\")\r\n continue\r\n elif(f.Name in mapping):\r\n # save as a centroid\r\n centroids.append((len(mapping[f.Name]) + 1, f.Name, f.toSeq()))\r\n\r\n if (store_members):\r\n flows_fh = open(outdir + key + \".flows\", \"a\")\r\n fasta_fh = open(outdir + key + \".fasta\", \"a\")\r\n flows_fh.write(\"%s\\n\" % f)\r\n fasta_fh.write(f.toFasta() + \"\\n\")\r\n fasta_fh.close()\r\n flows_fh.close()\r\n\r\n leftover_fasta_fh.close()\r\n\r\n # sort and store ordered by cluster_size\r\n centroids.sort(reverse=True)\r\n centroid_fh = open(outdir + \"/centroids.fasta\", \"w\")\r\n for size, name, seq in centroids:\r\n centroid_fh.write(\">%s | cluster size: %d \\n%s\\n\" %\r\n (name, size, seq))\r\n centroid_fh.close()", "def exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2):\n\n filename = string.replace(filename,'.pdf','.txt')\n export_text = open(filename,'w')\n column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_text.write(column_header)\n column_clusters = string.join(['column_clusters-flat','-']+ map(str, ind2),'\\t')+'\\n' ### format column-flat-clusters for export\n export_text.write(column_clusters)\n\n ### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match\n new_row_header = new_row_header[::-1]\n xt = xt[::-1]\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_text.write(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\\t')+'\\n')\n i+=1\n export_text.close()\n\n ### Transpose text file for easier reading!\n oldfile_h = open(filename, 'rb')\n\n elements = [ line.split() for line in oldfile_h ]\n oldfile_h.close()\n\n biglist = []\n for splitline in elements:\n #print len(splitline)\n #print splitline\n biglist.append(splitline)\n newarray = numpy.array(biglist)\n #print numpy.shape(newarray)\n t_array = newarray.transpose()\n #print numpy.shape(t_array)\n #print newarray[:,0]\n\n newfile_h = open(filename[:-4] + \"_transposed.txt\" , 'w')\n for row in t_array:\n #print \"The row is currently: %r\" % row\n newfile_h.write(\"\\t\".join(row) + \"\\n\")\n newfile_h.close()\n\n\n ### Export as CDT file\n filename = string.replace(filename,'.txt','.cdt')\n export_cdt = open(filename,'w')\n column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_cdt.write(column_header)\n eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\\t')+'\\n' ### format column-flat-clusters for export\n export_cdt.write(eweight)\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_cdt.write(string.join([new_row_header[i]]*2+['1']+map(str, row),'\\t')+'\\n')\n i+=1\n export_cdt.close()", "def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data", "def save_cluster_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_cluster_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.cluster_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']))\\\n .to_csv(f'{self.path_to_cluster_metrics}/{self.file_name}.csv', index=False)\n self.cluster_metrics = []", "def write_metadata(self, data_set, io=None, location=None):\n if not hasattr(data_set, '_h5_base_group'):\n # added here because loop writes metadata before data itself\n data_set._h5_base_group = self._create_data_object(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n del data_set._h5_base_group['metadata']\n metadata_group = data_set._h5_base_group.create_group('metadata')\n self.write_dict_to_hdf5(data_set.metadata, metadata_group)", "def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data", "def import_and_save(ADCthres=0, s=False):\n df = import_data(ADCthres, s)\n bus_vec = np.array(range(0,3))\n for bus in bus_vec:\n df_clu = cluster_data(df, bus) \n save_clusters(df_clu, bus)", "def _make_data_file(cls, filename):\n cls.cluster.fs.setuser(cls.cluster.superuser)\n f = cls.cluster.fs.open(filename, \"w\")\n for x in range(256):\n f.write(\"%d\\t0x%x\\n\" % (x, x))\n f.close()", "def _put(self, name, chunk, row_offset):\n grp = self.grp[name]\n lo = row_offset\n if isinstance(chunk, pd.Series):\n chunk = chunk.to_frame()\n n_rows = len(chunk)\n else:\n n_rows = len(chunk[next(iter(chunk.keys()))])\n hi = lo + n_rows\n\n for name in chunk.keys():\n\n x = np.asarray(chunk[name])\n\n data, dtype, fillvalue = self._normalize_column(x, x.dtype)\n\n if name in grp.keys():\n dset = grp[name]\n if hi > len(dset):\n dset.resize((hi,))\n dset[lo:hi] = data\n else:\n try:\n enum_dict = h5py.check_dtype(enum=dtype)\n except AttributeError:\n enum_dict = None\n dset = grp.create_dataset(\n name,\n shape=(hi,),\n dtype=dtype,\n data=data,\n fillvalue=fillvalue,\n **self.storage_options\n )\n if enum_dict is not None:\n # store enum dictionary as attribute\n dset.attrs[\"categories\"] = sorted(\n enum_dict, key=enum_dict.__getitem__\n )", "def writeData( self, file, bAddBeginOfDataChunk = True ):\n self.writeSpecificData( file, self.data, bAddBeginOfDataChunk = bAddBeginOfDataChunk )", "def write_star_files(self, star_input, outpath):\n \n with open(star_input, 'r') as f:\n table = parse_star(f)\n\n cluster_star = {}\n\n for cluster, nodes in clusters.items():\n if nodes:\n #convert to str to match df\n #add 1 to match RELION indexing\n avgs = [str(node+1) for node in nodes]\n subset = table[table['ClassNumber'].isin(avgs)]\n cluster_star[cluster] = subset\n\n for cluster, table in cluster_star.items():\n with open(outpath+'/slicem_cluster_{0}.star'.format(cluster), 'w') as f:\n #write the star file\n print('data_', file=f)\n print('loop_', file=f)\n for i, name in enumerate(table.columns):\n print('_rln' + name + ' #' + str(i+1), file=f)\n table.to_csv(f, sep='\\t', index=False, header=False)\n\n with open(outpath+'/slicem_clusters.txt', 'w') as f:\n for cluster, averages in clusters.items():\n f.write(str(cluster) + '\\t' + str(averages) + '\\n')\n \n print('star files written!')", "def test_store_cluster(self):\r\n\r\n self.tmpdir = mkdtemp(dir=\"./\", suffix=\"_store_clusters/\")\r\n\r\n self.files_to_remove.append(self.tmpdir + \"singletons.fasta\")\r\n self.files_to_remove.append(self.tmpdir + \"centroids.fasta\")\r\n\r\n # empty map results in empty files\r\n store_clusters({}, self.tiny_test, self.tmpdir)\r\n actual_centroids = list(\r\n parse_fasta(open(self.tmpdir + \"centroids.fasta\")))\r\n self.assertEqual(actual_centroids, [])\r\n actual_singletons = list(\r\n parse_fasta(open(self.tmpdir + \"singletons.fasta\")))\r\n self.assertEqual(actual_singletons, [])\r\n\r\n # non-empty map creates non-empty files, centroids sorted by size\r\n mapping = {'FZTHQMS01B8T1H': [],\r\n 'FZTHQMS01DE1KN': ['FZTHQMS01EHAJG'],\r\n 'FZTHQMS01EHAJG': [1, 2, 3]} # content doesn't really matter\r\n\r\n centroids = [(\r\n 'FZTHQMS01EHAJG | cluster size: 4', 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAACAAGACCATGCGGTCTGATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCA'),\r\n ('FZTHQMS01DE1KN | cluster size: 2', 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAACAAGACCATGCGGTCTGATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCA')]\r\n\r\n singletons = [(\r\n 'FZTHQMS01B8T1H',\r\n 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAATTAAACCATGCGGTTTTATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCATCACTTA')]\r\n\r\n store_clusters(mapping, self.tiny_test, self.tmpdir)\r\n actual_centroids = list(\r\n parse_fasta(open(self.tmpdir + \"centroids.fasta\")))\r\n self.assertEqual(actual_centroids, centroids)\r\n actual_singletons = list(\r\n parse_fasta(open(self.tmpdir + \"singletons.fasta\")))\r\n self.assertEqual(actual_singletons, singletons)", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def sent_or_doc_cluster(file_in, file_out, feature, method, n_cluster, show_or_write):\n\n original_file = file_in[0]\n original_words_file = file_in[1]\n file_vec = file_in[2]\n\n if feature.lower() == 'onehot':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, onehot in id_onehot.items():\n x.append(onehot.tolist())\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_cluster=n_cluster).fit(X)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n pass\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n\n elif feature.lower() == 'vec':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, vec in id_vec.items():\n x.append(vec.tolist()) # int object jas nor attribute 'tolist'\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_clusters=n_cluster).fit(X)\n else:\n raise ValueError(\"Method must be 'ap' or \"\n \"'kmeans'. Got %s instead\"\n % method)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n elif feature.lower() == 'doc2vec':\n # word2vec.doc2vec\n pass\n else:\n raise ValueError(\n \"Feature must be 'onehot' or 'vec' or 'doc2vec'. Got %s instead\" % feature)\n pass\n\n pass", "def save(self, file: Union[str, BinaryIO]=None) -> bytes:\n # Store all the chunks data as zlib compressed nbt data\n chunks_data = []\n for chunk in self.chunks:\n if chunk is None:\n chunks_data.append(None)\n continue\n chunk_data = BytesIO()\n if isinstance(chunk, Chunk):\n nbt_data = nbt.NBTFile()\n nbt_data.tags.append(nbt.TAG_Int(name='DataVersion', value=chunk.version))\n nbt_data.tags.append(chunk.data)\n else:\n nbt_data = chunk.save()\n nbt_data.write_file(buffer=chunk_data)\n chunk_data.seek(0)\n chunk_data = zlib.compress(chunk_data.read())\n chunks_data.append(chunk_data)\n\n # This is what is added after the location and timestamp header\n chunks_bytes = bytes()\n offsets = []\n for chunk in chunks_data:\n if chunk is None:\n offsets.append(None)\n continue\n # 4 bytes are for length, b'\\x02' is the compression type which is 2 since its using zlib\n to_add = (len(chunk)+1).to_bytes(4, 'big') + b'\\x02' + chunk\n\n # offset in 4KiB sectors\n sector_offset = len(chunks_bytes) // 4096\n sector_count = math.ceil(len(to_add) / 4096)\n offsets.append((sector_offset, sector_count))\n\n # Padding to be a multiple of 4KiB long\n to_add += bytes(4096 - (len(to_add) % 4096))\n chunks_bytes += to_add\n\n locations_header = bytes()\n for offset in offsets:\n # None means the chunk is not an actual chunk in the region\n # and will be 4 null bytes, which represents non-generated chunks to minecraft\n if offset is None:\n locations_header += bytes(4)\n else:\n # offset is (sector offset, sector count)\n locations_header += (offset[0] + 2).to_bytes(3, 'big') + offset[1].to_bytes(1, 'big')\n\n # Set them all as 0\n timestamps_header = bytes(4096)\n\n final = locations_header + timestamps_header + chunks_bytes\n\n # Pad file to be a multiple of 4KiB in size\n # as Minecraft only accepts region files that are like that\n final += bytes(4096 - (len(final) % 4096))\n assert len(final) % 4096 == 0 # just in case\n\n # Save to a file if it was given\n if file:\n if isinstance(file, str):\n with open(file, 'wb') as f:\n f.write(final)\n else:\n file.write(final)\n return final", "def _distribute_data_to_cluster(self):\n\n for data in self.data:\n _distances = self._calculate_distances(data)\n _cluster = self._get_closest_cluster(_distances)\n self.clusters[_cluster].append(data)", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def _processing( infile, rchr, dist, outf ):\n\n coords, sizes = build_dict(infile)\n qry_chrs = list(coords.keys())\n\n print(\"Primary\\tHaplotig\\tPrimary_Start\\tPrimary_end\\tHaplotig_Start\\tHaplotig_End\\tHaplotig_Length\", file=outf)\n for qchr in qry_chrs:\n refcoords = coords[qchr][0]\n qrycoords = coords[qchr][1]\n refst, refend, qryst, qryend = \\\n clustering( refcoords, sorted(qrycoords), sizes[qchr], dist )\n\n print(\"%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\" % \\\n (rchr, qchr, refst, refend, qryst, qryend, sizes[qchr]), file=outf)", "def ilastik_classify_mpi():\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = MPI.COMM_WORLD.Get_size()\n name = MPI.Get_processor_name()\n start_time = int(time.time())\n # Allow Ilatisk to use all available threads of the server/compute node.\n threads = int(no_of_threads/1)\n# threads = 1\n # Allow Ilastik to use available memory of the server/compute node.\n ram = ram_size\n# ram = int(ram_size/12)\n if rank == 0:\n print(\"*** size is %d, No of thread is %d, ram size is %d\" % (size, threads, ram))\n # assumes sub-volume image file extension is .hdf5\n input_files = sorted(glob(hdf_subvol_files_location + '/*.hdf5'))\n if not input_files:\n print(\"*** Did not find any file ending with .hdf5 extension ***\")\n return\n # Delete existing files created by ilastik (*.h5 files).\n if rank == 0:\n print(\"Ilastik input files/hdf_files_location\", hdf_subvol_files_location)\n oldoutput_files = sorted(glob(hdf_subvol_files_location + '/*.h5'))\n for file in oldoutput_files:\n print(\"*** Removing old Ilastik created file %s ***\" % file)\n os.remove(file)\n \n comm.Barrier()\n \n data_sets = []\n indices_ds = []\n rightoverlap_ds = []\n leftoverlap_ds = []\n # Get the dataset name in each sub-volume file. Dataset name is the same as file name.\n # Convert from unicode to ASCII since Ilastik does not like unicode\n for file in input_files:\n f = h5py.File(file, 'r')\n name, ext = os.path.splitext(os.path.basename(file))\n data_sets.append((file + '/' + name).encode('ascii'))\n indices_ds.append(f['orig_indices'][...])\n rightoverlap_ds.append(f['right_overlap'][...])\n leftoverlap_ds.append(f['left_overlap'][...])\n f.close()\n \n if rank == 0:\n print(\"Number of input/HDF5 files is %d, and Number of processes is %d\" % ((len(data_sets)), size))\n \n # Figure out how many sub-volume files each rank should handle.\n iterations = int(len(data_sets) / size) + (len(data_sets) % size > 0)\n # Divide pixel classification of sub-volume files among processes/ranks. \n for idx in range(iterations):\n if (rank + (size * idx)) >= len(data_sets):\n print(\"\\nBREAKING out, this rank is done with its processing, my rank is %d, number of files is %d, size is %d and idx is %d\" %\n (rank, len(data_sets), size, idx))\n break\n start_loop_time = time.time()\n data_set_name = data_sets[(rank + size * idx)]\n start_classify_time = time.time()\n hdf_dataset_path = classify_pixel_hdf(data_set_name, classifier, threads, ram)\n end_classify_time = time.time()\n classify_time = end_classify_time - start_classify_time\n print(\"Exec time for classification is %d Sec, rank is %d, hdf_dataset_path is %s\" % \n (classify_time, rank, hdf_dataset_path))\n # Create a dataset and save indices of the sub-volume into the whole volume.\n filename, dataset = os.path.split(hdf_dataset_path[0])\n file = h5py.File(filename, 'r+')\n subvol_indx = file.create_dataset('orig_indices', (6,), dtype='uint64')\n subvol_indx[...] = indices_ds[(rank + size * idx)]\n \n # Save the overlap sizes.\n subvol_rightoverlap = file.create_dataset('right_overlap', (3,), dtype='uint8')\n subvol_rightoverlap[...] = rightoverlap_ds[(rank + size * idx)]\n \n subvol_leftoverlap = file.create_dataset('left_overlap', (3,), dtype='uint8')\n subvol_leftoverlap[...] = leftoverlap_ds[(rank + size * idx)]\n file.close()\n end_loop_time = time.time()\n file_classify_time = end_loop_time - start_loop_time\n print(\"Exec Time per classifying one file is %d Sec, read/write time is %d Sec and rank is %d\" % \n (file_classify_time, (file_classify_time - classify_time), rank))\n \n end_time = int(time.time())\n exec_time = end_time - start_time\n print(\"*** My Rank is %d, exec time is %d sec - Done with classifying pixels in sub-volume files ***\" % (rank, exec_time))", "def write(self):\n f, ds = self.opendset()\n #\n # Now add the images\n #\n start_time = time.clock() # time this\n nframes = 0 # number completed\n print_every = 1; marker = \" .\";\n print('Frames written (of %s):' % self.ntowrite, end=\"\")\n for i in range(self.nfiles):\n if nframes >= self.ntowrite: break\n\n logging.debug('processing file %d of %d' % (i+1, self.nfiles))\n img_i = fabio.open(self.files[i])\n nfi = img_i.nframes\n for j in range(nfi):\n msg = '... file %d/image %d' % (i, j)\n logging.debug(msg)\n if j < self.nempty:\n logging.debug('... empty frame ... skipping')\n else:\n ds[nframes, :, :] = img_i.data\n nframes += 1\n if numpy.mod(nframes, print_every) == 0:\n print(marker, nframes, end=\"\")\n print_every *= 2\n sys.stdout.flush()\n logging.debug('... wrote image %s of %s' %\\\n (nframes, self.ntowrite))\n if nframes >= self.ntowrite:\n logging.debug('wrote last frame: stopping')\n break\n if j < nfi - 1:\n # on last frame in file, fabio will look for next file\n img_i = img_i.next()\n\n f.close()\n print(\"\\nTime to write: %f seconds \" %(time.clock()-start_time))", "def add_data(self, file_name: str, fabricated_count: dict) -> None:\n\n assert file_name not in self._meta_data_dict, \"Error, filename has already been used.\"\n\n self._meta_data_dict[file_name] = fabricated_count", "def addContainer(self, nwbfile):\n nwbfile.add_acquisition(self.clustering)\n nwbfile.add_acquisition(self.container)", "def loadMetaChunkToServerMap (fileName):\n if not os.path.exists(fileName):\n print \"File \", fileName, \" does not exists\"\n sys.exit(1)\n\n infile = open (fileName, \"r\")\n count = 0\n while infile:\n count = count + 1\n line = infile.readline()\n if not line:\n break\n print \"DEBUGME : processing line %s, %d\" % (line, count)\n lineParts = line.split(' ')\n gChunkMap[lineParts[0]] = ChunkInfo(lineParts[0], lineParts[1], lineParts[2])\n # Add a ChunkHostInfo\n numServers = int(lineParts[2])\n for i in range(numServers):\n i = i * 3\n gChunkMap[lineParts[0]].addChunkHostInfo(ChunkHostInfo(lineParts[i+3], lineParts[i+4], lineParts[i+5]))", "def _load_cluster(self):", "def add_coda_to_offsets(vr_part_file):\n\n num_name = {'Haloes': 'NumberOfBoundParticles_Total',\n 'Unbound': 'NumberOfUnboundParticles_Total',\n 'Groups': 'NumberOfSOParticles_Total'}\n\n for grp in ['Haloes', 'Unbound', 'Groups']:\n offsets = read_data(vr_part_file, f'{grp}/Offsets')\n num_ids = read_attribute(vr_part_file, 'Header', num_name[grp])\n\n offsets = np.concatenate((offsets, [num_ids]))\n write_data(vr_part_file, f'{grp}/Offsets', offsets)", "def append_output(metadata, filename):\n size_pre = os.path.getsize(filename) / 1.0e+6\n metadata.to_csv(filename, header=False,\n mode='a', compression='gzip')\n size_post = os.path.getsize(filename) / 1.0e+6\n size_appended = size_post - size_pre\n\n print(\"Appended {:.2F} MB of metadata to {} ({:.2F} MB)\"\n .format(size_appended, filename, size_post))", "def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()", "def write_compressed(self, filename):\n\n # Define which molecules to use \n # (counting indices of processed data set)\n indices = np.arange(len(self))\n # All charges and position arrays have the same size\n # (the one of the biggest molecule)\n size = np.max( self.num_atoms )\n # Initialize arrays\n num_atoms = np.zeros(len(indices))\n labels = np.zeros(len(indices))\n charges = np.zeros([len(indices),size])\n positions = np.zeros([len(indices),size,3])\n # For each molecule ...\n for j,idx in enumerate(indices):\n # load the data\n sample = self[idx]\n # assign per-molecule data\n labels[j] = sample['data']\n num_atoms[j] = sample['num_atoms']\n # ... and for each atom:\n for ia in range(sample['num_atoms']):\n charges[j,ia] = sample['charges'][ia]\n positions[j,ia,0] = sample['positions'][ia][0] \n positions[j,ia,1] = sample['positions'][ia][1] \n positions[j,ia,2] = sample['positions'][ia][2]\n\n # Merge pairs\n print(labels.shape,charges.shape,positions.shape)\n labels = labels[0::2]\n charges = np.array([np.concatenate((charges[i],charges[i+1])) for i in indices[0::2]])\n positions = np.array([np.concatenate((positions[i],positions[i+1])) for i in indices[0::2]])\n print(labels.shape,charges.shape,positions.shape)\n \n # Create a dictionary with all the values to save\n save_dict = {}\n save_dict['label'] = labels\n save_dict['charges'] = charges\n save_dict['positions'] = positions\n\n # Save as a compressed array \n np.savez_compressed(filename,**save_dict)\n \n return", "def parse_and_map(self, local_inet_path):\n for file_name in tqdm(self.filenames):\n # TODO: Add some log while processing data\n # Reads file name from full file path\n sliced_list = file_name.split(sep='/t')[-1].split(sep='_')\n self.data_dict['path'].append(file_name)\n self.data_dict['dataset'].append(sliced_list[1])\n self.data_dict['device'].append(sliced_list[2])\n self.data_dict['wn_id'].append(sliced_list[3])\n self.data_dict['im_id'].append(sliced_list[4])\n self.data_dict['eeg_session'].append(sliced_list[5])\n self.data_dict['global_session'].append(sliced_list[6].split(sep='.')[0])\n # File name: /MindBigData_Imagenet_Insight_n00007846_6247_1_785\n # Imagenet file path: /n00007846/n00007846_6247.JPEG\n file_name = str(sliced_list[3] + '_' + sliced_list[4] + '.JPEG')\n inet_path = os.path.join(local_inet_path, sliced_list[3], file_name)\n # If copy is true, data related local ImageNet images will be copied to separate folder\n if self.copy:\n try:\n # New file paths\n new_dir_path = os.path.join(self.copy_path, sliced_list[3])\n new_inet_path = os.path.join(new_dir_path, file_name)\n # Creates recursive folders in disk\n os.makedirs(new_dir_path, exist_ok=True, mode=0o771)\n # Copies file to destination\n shutil.copy(inet_path, new_inet_path)\n # Appends new file path to list\n self.data_dict['inet_path'].append(new_inet_path)\n except Exception as e:\n # TODO: More useful exception\n print(e)\n else:\n # Append local ImageNet path to list\n self.data_dict['inet_path'].append(inet_path)", "def cluster_shrinkage_clustering(from_file):\n points = read_points(from_file)\n shuffle(points)\n S = similarity_matrix(points, similarity_measure=euclidean_distance)\n A = cluster(S, k=10, max_iter=1000)\n labels = [np.argmax(p) for p in A]\n xs, ys = zip(*points)\n \n return xs, ys, labels", "def save_people_files(self):\r\n\r\n # Check existence of clustering results\r\n if len(self.recognized_faces) == 0:\r\n\r\n # Try to load YAML files\r\n if os.path.exists(self.cluster_files_path):\r\n\r\n print 'Loading YAML files with clustering results'\r\n logger.debug('Loading YAML file with clustering results')\r\n\r\n self.recognized_faces = []\r\n for yaml_file in os.listdir(self.cluster_files_path):\r\n yaml_file_path = os.path.join(\r\n self.cluster_files_path, yaml_file)\r\n with open(yaml_file_path) as f:\r\n self.recognized_faces.append(yaml.load(f))\r\n\r\n print 'YAML file with clustering results loaded'\r\n logger.debug('YAML file with clustering results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No clustering results found!'\r\n logger.warning('No clustering results found!')\r\n\r\n return\r\n\r\n # Delete already saved files\r\n if os.path.exists(self.compl_ann_path):\r\n\r\n ann_files = os.listdir(self.compl_ann_path)\r\n\r\n for ann_file in ann_files:\r\n ann_file_path = os.path.join(self.compl_ann_path, ann_file)\r\n os.remove(ann_file_path)\r\n\r\n else:\r\n\r\n os.makedirs(self.compl_ann_path)\r\n\r\n # Delete already saved files\r\n if os.path.exists(self.simple_ann_path):\r\n\r\n ann_files = os.listdir(self.simple_ann_path)\r\n\r\n for ann_file in ann_files:\r\n ann_file_path = os.path.join(self.simple_ann_path, ann_file)\r\n os.remove(ann_file_path)\r\n\r\n else:\r\n\r\n os.makedirs(self.simple_ann_path)\r\n\r\n # Get minimum segment duration\r\n min_duration = c.MIN_SEGMENT_DURATION\r\n\r\n if ((self.params is not None) and\r\n (c.MIN_SEGMENT_DURATION_KEY in self.params)):\r\n min_duration = self.params[c.MIN_SEGMENT_DURATION_KEY]\r\n\r\n # Save unique tags\r\n tags = []\r\n\r\n for person_dict in self.recognized_faces:\r\n\r\n ann_tag = person_dict[c.ASSIGNED_TAG_KEY]\r\n\r\n if (ann_tag != c.UNDEFINED_TAG) and (ann_tag not in tags):\r\n tags.append(ann_tag)\r\n\r\n for tag in tags:\r\n\r\n # Create complete annotations\r\n person_dict = {}\r\n\r\n # Create simple annotations\r\n simple_dict = {c.ANN_TAG_KEY: tag}\r\n\r\n person_dict[c.ANN_TAG_KEY] = tag\r\n\r\n segment_list = []\r\n\r\n simple_segment_list = []\r\n\r\n tot_dur = 0\r\n\r\n # Iterate through all recognized people in video\r\n for temp_person_dict in self.recognized_faces:\r\n\r\n ann_tag = temp_person_dict[c.ASSIGNED_TAG_KEY]\r\n\r\n if ann_tag == tag:\r\n\r\n temp_segment_list = temp_person_dict[c.SEGMENTS_KEY]\r\n\r\n for segment_dict in temp_segment_list:\r\n segment_list.append(segment_dict)\r\n\r\n simple_seg_dict = {}\r\n\r\n start = segment_dict[c.SEGMENT_START_KEY]\r\n\r\n simple_seg_dict[c.SEGMENT_START_KEY] = start\r\n\r\n dur = segment_dict[c.SEGMENT_DURATION_KEY]\r\n\r\n tot_dur = tot_dur + dur\r\n\r\n simple_seg_dict[c.SEGMENT_DURATION_KEY] = dur\r\n\r\n simple_segment_list.append(simple_seg_dict)\r\n\r\n person_dict[c.SEGMENTS_KEY] = segment_list\r\n\r\n # (simple_segment_list, tot_dur) = utils.merge_consecutive_segments(\r\n # simple_segment_list, min_duration)\r\n\r\n simple_dict[c.SEGMENTS_KEY] = simple_segment_list\r\n\r\n person_dict[c.TOT_SEGMENT_DURATION_KEY] = tot_dur\r\n\r\n simple_dict[c.TOT_SEGMENT_DURATION_KEY] = tot_dur\r\n\r\n file_name = tag + '.YAML'\r\n\r\n # Save complete annotations\r\n\r\n file_path = os.path.join(self.compl_ann_path, file_name)\r\n\r\n utils.save_YAML_file(file_path, person_dict)\r\n\r\n # Save simple annotations\r\n\r\n file_path = os.path.join(self.simple_ann_path, file_name)\r\n\r\n utils.save_YAML_file(file_path, simple_dict)", "def buildClusters(self):\n oldLatFile = 'needed_files/lat.in'\n oldFile = open(oldLatFile, 'r')\n oldLines = [line for line in oldFile]\n oldFile.close()\n \n newFile = open('enum/lat.in','w')\n for i in xrange(len(oldLines)):\n if 'Number pairs' in oldLines[i-1] and i>=1: #bch use label on previous line\n for num in self.clusterNums:\n newFile.write(str(num) + \" \")\n newFile.write(\"\\n\")\n else:\n newFile.write(oldLines[i])\n newFile.close()\n \n lastDir = os.getcwd()\n os.chdir(lastDir + '/enum')\n if sum(self.clusterNums)<=1500: #the 1500 assumes you are running Main with 16G. \n subprocess.call([self.uncleExec, '10'], stdout=self.uncleOut)\n else:\n subprocess.call(['echo','Warning: BLOCKING CLUSTER JOB to save time'])\n# clustersjob = ClustersBuild.clustersjob()\n# clustersjob.clustBuild()\n# \n os.chdir(lastDir)", "def readInstance(self):\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()", "def writeOrganisms( self ):\n\n self.logger.info( 'writeOrganisms: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerOrganism.writeOrganisms()\n \n\n self.logger.info( 'writeOrganisms: keggreader.getAllOrganismEcs() : START' )\n\n # Get all organism ecs relations.\n organismEcs = self.reader.getAllOrganismEcs()\n\n self.logger.info( 'writeOrganisms: keggreader.getAllOrganismEcs() : DONE' )\n\n\n self.logger.info( 'writeOrganisms: keggreader.getAllOrganismMaps() : START' )\n\n # Get all organism maps relations.\n organismMaps = self.reader.getAllOrganismMaps()\n\n self.logger.info( 'writeOrganisms: keggreader.getAllOrganismMaps() : DONE' )\n\n\n self.logger.info( 'writeOrganisms: organismEcFile is organismEcsInsert.psql' )\n\n # Open protein_ecs insert file.\n organismEcFile = self.openInsertFile( 'organismEcsInsert.psql' )\n\n\n self.logger.info( 'writeOrganisms: organismMapFile is organismMapsInsert.psql' )\n\n # Open organism_maps insert file.\n organismMapFile = self.openInsertFile( 'organismMapsInsert.psql' )\n\n\n # Now we have to write organism_ecs table.\n for organism,relationalDatabaseId in self.importerOrganism.organismsInserted.iteritems():\n\n\n organismId = relationalDatabaseId\n\n if len( organismEcs[ organism ] ) > 0:\n \n self.logger.info( 'writeOrganisms: the organism: ' + organism + ' : FOUND ' + str(len(organismEcs[organism])) + ' EC numbers.' )\n for ec in organismEcs[ organism ]:\n ecId = self.importerEc.ecsInserted[ ec ]\n\n #self.writeOrganismEcsFile( organismEcFile, organismId , ecId )\n self.writeFile( organismEcFile, 'organism_ecs', [ str(organismId) , str(ecId) ] )\n else:\n self.logger.info( 'writeOrganisms: the organism: ' + organism + ' : doesnt have EC numbers associated.' )\n\n\n if len( organismMaps[ organism ] ) > 0:\n \n self.logger.info( 'writeOrganisms: the organism: ' + organism + ' : FOUND ' + str(len(organismMaps[organism])) + ' MAP numbers.' )\n for mapNumber in organismMaps[ organism ]:\n\n # We don't need maps that is not metabolic maps.\n if mapNumber in self.importerPathway.pathwayMapsInserted:\n mapId = self.importerPathway.pathwayMapsInserted[ mapNumber ]\n\n #self.writeOrganismMapsFile( organismMapFile, organismId , mapId )\n self.writeFile( organismMapFile, 'organism_maps', [ str(organismId) , str(mapId) ] )\n else:\n self.logger.info( 'writeOrganisms: the organism: ' + organism + ' : doesnt have MAP numbers associated.' )\n\n\n self.logger.info( 'writeOrganisms: DONE' )", "def put_metadata(self, metadata, tombstone=False):\n if tombstone:\n # We don't write tombstone files. So do nothing.\n return\n assert self.data_file is not None, \\\n \"put_metadata: no file to put metadata into\"\n metadata = _adjust_metadata(metadata)\n self.threadpool.run_in_thread(write_metadata, self.data_file, metadata)\n self.metadata = metadata\n self._filter_metadata()", "def writeEcMaps( self ):\n\n self.logger.info( 'writeEcMaps: START' )\n\n self.logger.info( 'writeEcMaps: insert file will be ecMapsInsert.psql' )\n\n ecMapsFile = self.openInsertFile( 'ecMapsInsert.psql' )\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n ecMaps = self.reader.getEcMaps()\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n for ec,mapNumbers in ecMaps.iteritems():\n ecId = self.importerEc.ecsInserted[ ec ]\n \n for mapNumber in mapNumbers:\n\n if mapNumber in self.importerPathway.pathwayMapsInserted:\n\n mapId = self.importerPathway.pathwayMapsInserted[ mapNumber ]\n\n #self.writeEcMapsFile( ecMapsFile, ecId, mapId )\n self.writeFile( ecMapsFile, 'ec_maps', [ str(ecId), str(mapId) ] )\n\n self.logger.info( 'writeEcMaps: DONE' )", "def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)", "def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)", "def read_data(self, filename, chunksize = 2048):\n pdt_tipsy = np.dtype([('mass', 'f4'),('pos', 'f4', 3),('vel', 'f4', 3), ('eps', 'f4'), ('phi', 'f4')])\n\n # helper functions\n def convert_to_fof_particle_partition(index, iterator): \n for s in iterator: \n p_arr = np.frombuffer(s, pdt_tipsy)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos'] \n if count: \n npart_acc.add({index: len(new_arr)})\n yield new_arr\n\n def set_particle_IDs_partition(index, iterator): \n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n local_index += len(arr)\n yield arr\n \n sc = self.sc\n\n rec_rdd = sc.binaryRecords(filename, pdt_tipsy.itemsize*chunksize)\n nPartitions = rec_rdd.getNumPartitions()\n # set the partition count accumulator\n npart_acc = sc.accumulator({i:0 for i in range(nPartitions)}, dictAdd())\n count=True\n # read the data and count the particles per partition\n rec_rdd = rec_rdd.mapPartitionsWithIndex(convert_to_fof_particle_partition)\n rec_rdd.count()\n count=False\n\n partition_counts = sc.broadcast(npart_acc.value)\n\n rec_rdd = rec_rdd.mapPartitionsWithIndex(set_particle_IDs_partition)\n rec_rdd = (self._partition_rdd(rec_rdd, partition_array).partitionBy(self.nPartitions) \n .map(lambda (_,v): v, preservesPartitioning=True)) \n return rec_rdd", "def Construct3DMolToFileMultiprocess2(fileName,writeFile):\r\n # Writing sets of molecules\r\n\r\n suppl = Chem.SDMolSupplier(fileName)\r\n w = Chem.SDWriter(writeFile)\r\n\r\n mols = [x for x in suppl]\r\n p = Pool(processes=2)\r\n result = p.map(Get3DMolFromMol,mols)\r\n p.close()\r\n for i in result:\r\n \tw.write(i)\r\n\r\n\r\n w.close()", "def write_embeddings_to_file(self):\n modes = [self.generator, self.discriminator]\n for i in range(2):\n embedding_matrix = modes[i].embedding_matrix\n embedding_matrix = embedding_matrix.detach().to('cpu').numpy()\n index = np.array(range(self.n_node)).reshape(-1, 1)\n embedding_matrix = np.hstack([index, embedding_matrix])\n embedding_list = embedding_matrix.tolist()\n embedding_str = [str(int(emb[0])) + \"\\t\" + \"\\t\".join([str(x) for x in emb[1:]]) + \"\\n\" \n for emb in embedding_list]\n with open(config.emb_filenames[i], \"w+\") as f:\n lines = [str(self.n_node) + \"\\t\" + str(config.n_emb) + \"\\n\"] + embedding_str\n f.writelines(lines)", "def __init__(self, entries):\r\n if isinstance(entries, str): # filename\r\n self.contigs = readstatsFile(entries)\r\n else:\r\n self.contigs = OrderedDict()\r\n for entry in entries:\r\n self.contigs[entry.contigName] = entry", "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def test_assign_clusters_sparse_long(self, new_data, filename):\n\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n \n print('creating test sparse matrix...')\n if self.split_type == 'random':\n averages_seg = pd.read_sql('SELECT * FROM clust_sparse_long_avebysegment_random',con=engine)\n if self.split_type == 'date':\n averages_seg = pd.read_sql('SELECT * FROM clust_sparse_long_avebysegment_date',con=engine)\n \n test_matrix = pd.merge(new_data['segment_id'].to_frame(), averages_seg, how='inner', on=['segment_id'])\n test_sparse_matrix = test_matrix.drop(columns = ['segment_id','segmentskey','index'])\n \n print('clustering new data...')\n cluster_model = joblib.load(filename)\n cluster_predictions = cluster_model.predict(test_sparse_matrix)\n \n clusterdf = pd.DataFrame(cluster_predictions,columns = ['cluster_sparse_long'])\n clusterdf['index'] = clusterdf.index\n segmentdf = test_matrix['segment_id'].to_frame()\n segmentdf['index'] = segmentdf.index\n test_cluster_df_sparse = pd.merge(clusterdf, segmentdf, on=['index'])\n test_cluster_df_sparse = test_cluster_df_sparse[['segment_id', 'cluster_sparse_long']].groupby(['segment_id', 'cluster_sparse_long']).count()\n \n return test_cluster_df_sparse.reset_index()", "def readFile(self, files):\n files = np.atleast_1d(files) # allow scalar input\n\n events = list()\n groups = list()\n flashes = list()\n one_sec = list()\n\n ev_id_ctr = 0\n gr_id_ctr = 0\n fl_id_ctr = 0\n\n for _file in files:\n # todo: with...open\n nc = Dataset(_file)\n\n this_ev = _extract_events(nc)\n this_grp = _extract_groups(nc)\n this_fl = _extract_flashes(nc)\n this_one_sec = _extract_one_second(nc, background=False)\n\n nc.close()\n\n # TODO: do we need check for \"empty\" files like w/GLM?\n\n # IDs are not necessarily unique. We'll modify them so they are.\n # Similar to what is done with GLM data (glm.py in this package)\n # See there for details, but the gist is get unique values and map\n # TODO: refactor?\n\n this_ev.sort_values('id', inplace=True)\n this_grp.sort_values('id', inplace=True)\n this_fl.sort_values('id', inplace=True)\n\n new_flash_id = np.arange(len(this_fl))\n this_fl.id = new_flash_id\n flash_id_map = dict(zip(this_fl._orig_id.values, new_flash_id))\n\n # Update group parent\n new_id = this_grp.parent_id.map(flash_id_map.get)\n this_grp.parent_id = new_id\n\n # New id for the group:\n new_group_id = np.arange(len(this_grp))\n this_grp.id = new_group_id\n group_id_map = dict(zip(this_grp._orig_id.values, new_group_id))\n\n # Update event parent\n this_ev.parent_id = this_ev.parent_id.map(group_id_map.get)\n\n # New event ID (although I don't think is really necessary)\n new_event_id = np.arange(len(this_ev))\n this_ev.id = new_event_id\n\n # Add in an offset to get unique values across files\n this_ev['id'] += ev_id_ctr\n this_grp['id'] += gr_id_ctr\n this_fl['id'] += fl_id_ctr\n\n # Offset the parent IDs for the children too:\n this_ev['parent_id'] += gr_id_ctr\n this_grp['parent_id'] += fl_id_ctr\n\n # Next, update the counters\n ev_id_ctr = this_ev['id'].iloc[-1]+1\n gr_id_ctr = this_grp['id'].iloc[-1]+1\n fl_id_ctr = this_fl['id'].iloc[-1]+1\n\n # Modify the times to UTC:\n for val in [this_ev, this_grp, this_fl]: # one seconds already converted\n val.time = tai93_to_utc(val.time)\n\n # todo: add option to not sort by time\n # this_event.sort_values('time', inplace=True)\n # this_group.sort_values('time', inplace=True)\n # this_flash.sort_values('time', inplace=True)\n\n # Finally, add \"this\" data\n events.append(this_ev)\n groups.append(this_grp)\n flashes.append(this_fl)\n one_sec.append(this_one_sec)\n\n # Put these as attributes of the class\n self.events = Ltg(pd.concat(events))\n self.groups = Ltg(pd.concat(groups))\n self.flashes = Ltg(pd.concat(flashes))\n self.one_second = Ltg(pd.concat(one_sec))", "def save_clusters(db, savepath, time_req):\n # try:\n # all_id = np.unique(db['pred_labels']) # Includes noise tag\n # except:\n print('fallo de unique')\n import pdb\n pdb.set_trace()\n all_id = []\n num_id = [0 for _ in range(50)]\n for line in db['pred_labels']:\n if all_id.count(line)==0:\n all_id.append(line)\n num_id[line+1]+=1\n pdb.set_trace()\n\n\n for iddty in all_id:\n data = db.loc[db['pred_labels'] == iddty]\n if len(data) >= time_req and iddty != -1:\n id_path = join(savepath, 'id_' + str(iddty))\n os.makedirs(id_path, exist_ok=True)\n\n data_vector, size = load_embeddings(data)\n centroid = np.mean(data_vector, axis=0)\n std = np.std(data_vector, axis=0)\n cov = np.cov(np.array(data_vector).T)\n\n print(all_id)\n print(cov)\n pdb.set_trace()\n\n #inv_cov = np.linalg.inv(cov)\n export(path=join(id_path, 'centroid'),\n data=centroid)\n export(path=join(id_path, 'std'),\n data=std)\n export(path=join(id_path, 'covmat'),\n data=cov)\n #export(path=join(id_path, 'inv_covmat'), data=inv_cov)\n\n imgs = data['img'].values\n for img_path in imgs:\n img = PIL.Image.open(img_path)\n img_name = img_path.split('/')[-1]\n img.save(join(id_path, img_name))", "def _write_attributes_(self):\n #Open the Netcdf GRID file output from PreMOD\n try: dataset = Dataset(self.netcdf_file,'r+',format='NETCDF4')\n except Exception, e:\n print \"ERROR: %s\" % e\n sys.exit()\n\n dataset.title = self.title \n dataset.description = self.description\n dataset.ngh_file = self.ngh_file\n dataset.rtr_file = self.rtr_file\n dataset.netcdf_file = self.netcdf_file\n dataset.epsg = 4326\n dataset.close()", "def add(self, filename, n_pages):\n with open(filename, 'rb') as f:\n content = f.read()\n size = len(content)\n\n # Before storing the filename I strip first part of output path which is the parent\n # directory of all of these files. We don't want that info in here because it will become\n # wrong if these files are ever moved, and we want them to be relocatable without\n # breaking anything.\n filename = os.path.relpath(filename, self.output_path)\n\n self.fileinfo[filename] = {'n_pages': n_pages,\n 'size': size,\n 'hash': hashlib.sha256(content).hexdigest(),\n }\n\n self.n_total_pages += n_pages\n self.n_total_bytes += size", "def test_assign_clusters_nonsparse(self, new_data, filename):\n\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n if self.split_type == 'random':\n averages_seg = pd.read_sql('SELECT * FROM clust_nonsparse_avebysegment_random',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_nonsparse_avebyrt_random',con=engine)\n elif self.split_type == 'date':\n averages_seg = pd.read_sql('SELECT * FROM clust_nonsparse_avebysegment_date',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_nonsparse_avebyrt_date',con=engine)\n \n averages_seg['exists'] = 1\n test_data_exists = pd.merge(new_data, averages_seg[['segment_id', 'day_of_week', 'time_idx', 'exists']], on=['segment_id', 'day_of_week', 'time_idx'])\n test_exists = test_data_exists[test_data_exists['exists']==1]\n test_notexists = test_data_exists[test_data_exists['exists']!=1]\n \n test_exists_tmp = test_exists[['date','time','date_idx', 'time_idx', 'day_of_week', 'segment_id', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2']]\n test_notexists_tmp = test_notexists[['date','time','date_idx', 'time_idx', 'day_of_week', 'segment_id', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2']]\n test_matrix_exists = pd.merge(test_exists_tmp, averages_seg, how='left', on=['segment_id', 'day_of_week', 'time_idx'])\n test_matrix_notexists = pd.merge(test_notexists_tmp, averages_rt, how='left', on=['road_type', 'day_of_week', 'time_idx'])\n test_matrix = pd.concat([test_matrix_exists, test_matrix_notexists])\n test_matrix = test_matrix.fillna(0)\n \n test_nonsparse_matrix = test_matrix[['segment_id','date','time','date_idx', 'time_idx', 'day_of_week', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2', 'level_binary', 'level_min', 'level_max', 'level_mean', 'level_count']]\n test_nonsparse_matrix = self.scale_matrix(test_nonsparse_matrix)\n\n print('clustering new data...')\n cluster_model = joblib.load(filename)\n cluster_predictions = cluster_model.predict(test_nonsparse_matrix.drop(columns = ['segment_id','date','time']))\n \n clusterdf = pd.DataFrame(cluster_predictions,columns = ['cluster_nonsparse']).reset_index()\n keydf = test_matrix[['segment_id','date','time']].reset_index()\n test_cluster_df_sparse = pd.merge(clusterdf, keydf, on=['index'])\n \n return test_cluster_df_sparse[['segment_id','date','time','cluster_nonsparse']]", "def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)", "def process_file(filename):\n \n # create a new file to write to\n nfn = \"padded-\" + filename\n new_file = open(nfn, \"w\")\n data = False\n\n # track the number of real cells and the number of cells including padding\n num_real_cells = 0\n num_cells_with_padding = 0\n num_special_initial = 0\n num_special_with_padding = 0\n with open(filename) as f:\n for line in f:\n line = line.strip()\n\n # if the line contains instance data, pad it\n if data == True and \",\" in line:\n padded_instance, initial_cells, padded_cells, general = \\\n pad_instance(line)\n num_real_cells += initial_cells\n num_cells_with_padding += padded_cells\n\n # track how much overhead is added to special circuits\n if not general:\n num_special_initial += initial_cells\n num_special_with_padding += padded_cells\n new_file.write(padded_instance + \"\\n\")\n\n # otherwise, write line to file (i.e. attributes)\n else:\n new_file.write(line + \"\\n\")\n if line == \"@DATA\":\n data = True\n new_file.close()\n \n # print overhead details about only special circuits\n print(\"number of unpadded cells (only special circuits):\", \\\n num_special_initial)\n print(\"number of cells including padding (only special circuits):\", \\\n num_special_with_padding)\n special_padding_cells = num_special_with_padding - num_special_initial\n special_overhead = special_padding_cells/num_special_initial * 100\n print(\"added special circuit overhead from padding cells:\", \\\n round(special_overhead, 2), \"percent\\n\")\n \n # print overhead details about all circuits\n print(\"number of unpadded cells (all circuit types):\", num_real_cells)\n print(\"number of cells including padding (all circuit types):\", \\\n num_cells_with_padding)\n padding_cells = num_cells_with_padding - num_real_cells\n network_overhead = padding_cells/num_real_cells * 100\n print(\"added network overhead from padding cells:\", \\\n round(network_overhead, 2), \"percent\")", "def __init__(self, path, head=None, update_interval=0, ofile=sys.stdout):\n self.all = None\n self.input = None\n self.target = None\n self.weight = None\n self.extra = None\n\n self.header = False\n self.header_size = None\n self.header_rows = None\n self.header_cols = None\n self.header_sizes = None\n self.header_col_names = []\n\n data_started = False\n data = array.array('d')\n\n f = open(path)\n n_data_lines = 0\n len_float_line = None\n\n for i,line in enumerate(f):\n if n_data_lines == head:\n #we've read enough data,\n # break even if there's more in the file\n break\n if len(line) == 0 or line == '\\n':\n continue\n if line[0] == '#':\n if not data_started:\n #the condition means that the file has a header, and we're on\n # some header line\n self.header = True\n if line.startswith(AMat.marker_size):\n info = line[len(AMat.marker_size):]\n self.header_size = [int(s) for s in info.split()]\n self.header_rows, self.header_cols = self.header_size\n if line.startswith(AMat.marker_col_names):\n info = line[len(AMat.marker_col_names):]\n self.header_col_names = info.split()\n elif line.startswith(AMat.marker_sizes):\n info = line[len(AMat.marker_sizes):]\n self.header_sizes = [int(s) for s in info.split()]\n else:\n #the first non-commented line tells us that the header is done\n data_started = True\n float_line = [float(s) for s in line.split()]\n if len_float_line is None:\n len_float_line = len(float_line)\n if (self.header_cols is not None) \\\n and self.header_cols != len_float_line:\n print >> sys.stderr, \\\n 'WARNING: header declared %i cols but first line has %i, using %i',\\\n self.header_cols, len_float_line, len_float_line\n else:\n if len_float_line != len(float_line):\n raise IOError('wrong line length', i, line)\n data.extend(float_line)\n n_data_lines += 1\n\n if update_interval > 0 and (ofile is not None) \\\n and n_data_lines % update_interval == 0:\n ofile.write('.')\n ofile.flush()\n\n if update_interval > 0:\n ofile.write('\\n')\n f.close()\n\n # convert from array.array to numpy.ndarray\n nshape = (len(data) / len_float_line, len_float_line)\n self.all = numpy.frombuffer(data).reshape(nshape)\n self.n_examples = self.all.shape[0]\n\n # assign\n if self.header_sizes is not None:\n if len(self.header_sizes) > 4:\n print >> sys.stderr, 'WARNING: ignoring sizes after 4th in %s' % path\n leftmost = 0\n #here we make use of the fact that if header_sizes has len < 4\n # the loop will exit before 4 iterations\n attrlist = ['input', 'target', 'weight', 'extra']\n for attr, ncols in zip(attrlist, self.header_sizes):\n setattr(self, attr, self.all[:, leftmost:leftmost+ncols])\n leftmost += ncols", "def PadSparseImage(self, out_file):\n raise NotImplementedError", "def create_index_molecules(self):\n logger.info(\"\\n\\n Processing data set:\")\n\n self.index_complexes = []\n\n # Training dataset\n desc = '{:25s}'.format(' Train dataset')\n if self.tqdm:\n data_tqdm = tqdm(self.train_database,\n desc=desc, file=sys.stdout)\n else:\n logger.info(' Train dataset')\n data_tqdm = self.train_database\n sys.stdout.flush()\n\n for fdata in data_tqdm:\n if self.tqdm:\n data_tqdm.set_postfix(mol=os.path.basename(fdata))\n try:\n fh5 = h5py.File(fdata, 'r')\n mol_names = list(fh5.keys())\n mol_names = self._select_pdb(mol_names)\n # to speed up in case of no filtering:\n if not self.dict_filter:\n self.index_complexes = [[fdata, k, None, None] for k in mol_names]\n else:\n for k in mol_names: \n if self.filter(fh5[k]):\n self.index_complexes += [(fdata,\n k, None, None)]\n for irot in range(self.data_augmentation):\n axis, angle = pdb2sql.transform.get_rot_axis_angle(\n self.rotation_seed)\n self.index_complexes += [\n (fdata, k, angle, axis)]\n fh5.close()\n except Exception:\n logger.exception(f'Ignore file: {fdata}')\n\n self.ntrain = len(self.index_complexes)\n self.index_train = list(range(self.ntrain))\n\n if self.ntrain == 0:\n raise ValueError(\n 'No avaiable training data after filtering')\n\n # Validation dataset\n if self.valid_database:\n\n desc = '{:25s}'.format(' Validation dataset')\n if self.tqdm:\n data_tqdm = tqdm(self.valid_database,\n desc=desc, file=sys.stdout)\n else:\n data_tqdm = self.valid_database\n logger.info(' Validation dataset')\n sys.stdout.flush()\n\n for fdata in data_tqdm:\n if self.tqdm:\n data_tqdm.set_postfix(mol=os.path.basename(fdata))\n try:\n fh5 = h5py.File(fdata, 'r')\n mol_names = list(fh5.keys())\n mol_names = self._select_pdb(mol_names)\n self.index_complexes += [(fdata, k, None, None)\n for k in mol_names]\n fh5.close()\n except Exception:\n logger.exception(f'Ignore file: {fdata}')\n\n self.ntot = len(self.index_complexes)\n self.index_valid = list(range(self.ntrain, self.ntot))\n self.nvalid = self.ntot - self.ntrain\n\n # Test dataset\n if self.test_database:\n\n desc = '{:25s}'.format(' Test dataset')\n if self.tqdm:\n data_tqdm = tqdm(self.test_database,\n desc=desc, file=sys.stdout)\n else:\n data_tqdm = self.test_database\n logger.info(' Test dataset')\n sys.stdout.flush()\n\n for fdata in data_tqdm:\n if self.tqdm:\n data_tqdm.set_postfix(mol=os.path.basename(fdata))\n try:\n fh5 = h5py.File(fdata, 'r')\n mol_names = list(fh5.keys())\n mol_names = self._select_pdb(mol_names)\n self.index_complexes += [(fdata, k, None, None)\n for k in mol_names]\n fh5.close()\n except Exception:\n logger.exception(f'Ignore file: {fdata}')\n\n self.ntot = len(self.index_complexes)\n self.index_test = list(\n range(self.ntrain + self.nvalid, self.ntot))\n self.ntest = self.ntot - self.ntrain - self.nvalid", "def _write_outputs(self):\n\n #########################\n # Create necessary variables for generic metadata file, as well as\n # generate and fill metadata file, if user wants it\n record_start = pd.to_datetime(self.dt_array[0]).date()\n record_end = pd.to_datetime(self.dt_array[-1]).date()\n\n if self.metadata_mode == 1 and self.script_mode == 1:\n # user wants to fill metadata and it is the correct mode\n\n # First check to see if metadata file already exists\n if not os.path.isfile('correction_metadata.xlsx'):\n # file does not exist, create new one\n metadata_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n metadata_info.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n else:\n # file is already created, so we need to read it in, append our new information to the bottom of it\n # and then save the info\n metadata_info = pd.read_excel('correction_metadata.xlsx', sheet_name=0, index_col=None, engine='xlrd',\n keep_default_na=False, verbose=True, skip_blank_lines=True)\n\n new_meta_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n output_metadata = pd.concat([metadata_info, new_meta_info], ignore_index=True)\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n output_metadata.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n\n else:\n # do nothing\n pass\n\n # if we are using a network-specific metadata file, we need to update the run count to pass it on\n if self.metadata_path is not None:\n current_row = self.metadata_df.run_count.ne(2).idxmax() - 1\n current_run = self.metadata_df.run_count.iloc[current_row] + 1\n\n self.metadata_df.run_count.iloc[current_row] = current_run\n self.metadata_df.record_start.iloc[current_row] = record_start\n self.metadata_df.record_end.iloc[current_row] = record_end\n self.metadata_df.output_path.iloc[current_row] = self.output_file_path\n\n with pd.ExcelWriter(self.metadata_path, date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD', engine='openpyxl', mode='w') as writer:\n self.metadata_df.to_excel(writer, header=True, index=True, sheet_name='Sheet1')\n\n #########################\n # Generate output file\n # Create any final variables, then create panda dataframes to save all the data\n # Includes the following sheets:\n # Corrected Data : Actual corrected values\n # Delta : Magnitude of difference between original data and corrected data\n # Filled Data : Tracks which data points have been filled by script generated values instead of provided\n # Data that is provided and subsequently corrected by the script do not count as filled values.\n print(\"\\nSystem: Saving corrected data to .xslx file.\")\n\n # Create any individually-requested output data\n ws_2m = _wind_height_adjust(uz=self.data_ws, zw=self.ws_anemometer_height)\n\n # Create corrected-original delta numpy arrays\n diff_tavg = np.array(self.data_tavg - self.original_df.tavg)\n diff_tmax = np.array(self.data_tmax - self.original_df.tmax)\n diff_tmin = np.array(self.data_tmin - self.original_df.tmin)\n diff_tdew = np.array(self.data_tdew - self.original_df.tdew)\n diff_ea = np.array(self.data_ea - self.original_df.ea)\n diff_rhavg = np.array(self.data_rhavg - self.original_df.rhavg)\n diff_rhmax = np.array(self.data_rhmax - self.original_df.rhmax)\n diff_rhmin = np.array(self.data_rhmin - self.original_df.rhmin)\n diff_rs = np.array(self.data_rs - self.original_df.rs)\n diff_rs_tr = np.array(self.opt_rs_tr - self.orig_rs_tr)\n diff_rso = np.array(self.rso - self.original_df.rso)\n diff_ws = np.array(self.data_ws - self.original_df.ws)\n diff_precip = np.array(self.data_precip - self.original_df.precip)\n diff_etr = np.array(self.etr - self.original_df.etr)\n diff_eto = np.array(self.eto - self.original_df.eto)\n\n # Create datetime for output dataframe\n datetime_df = pd.DataFrame({'year': self.data_year, 'month': self.data_month, 'day': self.data_day})\n datetime_df = pd.to_datetime(datetime_df[['month', 'day', 'year']])\n\n # Create output dataframe\n output_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': self.data_tavg, 'TMax (C)': self.data_tmax,\n 'TMin (C)': self.data_tmin, 'TDew (C)': self.data_tdew,\n 'Vapor Pres (kPa)': self.data_ea, 'RHAvg (%)': self.data_rhavg,\n 'RHMax (%)': self.data_rhmax, 'RHMin (%)': self.data_rhmin, 'Rs (w/m2)': self.data_rs,\n 'Opt_Rs_TR (w/m2)': self.opt_rs_tr, 'Rso (w/m2)': self.rso,\n 'Windspeed (m/s)': self.data_ws, 'Precip (mm)': self.data_precip,\n 'ETr (mm)': self.etr, 'ETo (mm)': self.eto, 'ws_2m (m/s)': ws_2m},\n index=datetime_df)\n\n # Creating difference dataframe to track amount of correction\n delta_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': diff_tavg, 'TMax (C)': diff_tmax,\n 'TMin (C)': diff_tmin, 'TDew (C)': diff_tdew,\n 'Vapor Pres (kPa)': diff_ea, 'RHAvg (%)': diff_rhavg, 'RHMax (%)': diff_rhmax,\n 'RHMin (%)': diff_rhmin, 'Rs (w/m2)': diff_rs, 'Opt - Orig Rs_TR (w/m2)': diff_rs_tr,\n 'Rso (w/m2)': diff_rso, 'Windspeed (m/s)': diff_ws, 'Precip (mm)': diff_precip,\n 'ETr (mm)': diff_etr, 'ETo (mm)': diff_eto}, index=datetime_df)\n\n # Creating a fill dataframe that tracks where missing data was filled in\n fill_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TMax (C)': self.fill_tmax, 'TMin (C)': self.fill_tmin,\n 'TDew (C)': self.fill_tdew, 'Vapor Pres (kPa)': self.fill_ea, 'Rs (w/m2)': self.fill_rs,\n 'Complete Record Rso (w/m2)': self.fill_rso},\n index=datetime_df)\n\n # Open up pandas excel writer\n output_writer = pd.ExcelWriter(self.output_file_path, engine='xlsxwriter')\n # Convert data frames to xlsxwriter excel objects\n output_df.to_excel(output_writer, sheet_name='Corrected Data', na_rep=self.missing_fill_value)\n delta_df.to_excel(output_writer, sheet_name='Delta (Corr - Orig)', na_rep=self.missing_fill_value)\n fill_df.to_excel(output_writer, sheet_name='Filled Data', na_rep=self.missing_fill_value)\n # Save output file\n output_writer.save()\n\n logger = open(self.log_file, 'a')\n if self.script_mode == 1 and self.fill_mode == 1:\n if np.isnan(self.eto).any() or np.isnan(self.etr).any():\n print(\"\\nSystem: After finishing corrections and filling data, \"\n \"ETr and ETo still had missing observations.\")\n logger.write('After finishing corrections and filling data, '\n 'ETr and ETo still had missing observations. \\n')\n else:\n logger.write('The output file for this station has a complete record of ETo and ETr observations. \\n')\n else:\n pass\n logger.write('\\nThe file has been successfully processed and output files saved at %s.' %\n dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n logger.close()", "def __enter__(self, content):\n self.filename = open(self.filename, self.mode)\n self.filename.write(content)", "def test_assign_clusters_sparse(self, new_data, filename):\n\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n \n print('creating test sparse matrix...')\n if self.split_type == 'random':\n averages_seg = pd.read_sql('SELECT * FROM clust_sparse_avebysegment_random',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_sparse_avebyrt_random',con=engine)\n if self.split_type == 'date':\n averages_seg = pd.read_sql('SELECT * FROM clust_sparse_avebysegment_date',con=engine)\n averages_rt = pd.read_sql('SELECT * FROM clust_sparse_avebyrt_date',con=engine)\n\n averages_seg['exists'] = 1\n test_data_exists = pd.merge(new_data, averages_seg[['segment_id', 'exists']], on=['segment_id'])\n test_exists = test_data_exists[test_data_exists['exists']==1]\n test_notexists = test_data_exists[test_data_exists['exists']!=1] \n \n test_matrix_exists = pd.merge(test_exists[['segment_id', 'road_type']], averages_seg, how='left', on=['segment_id'])\n test_matrix_notexists = pd.merge(test_notexists[['segment_id', 'road_type']], averages_rt, how='left', on=['road_type'])\n test_matrix = pd.concat([test_matrix_exists, test_matrix_notexists])\n test_matrix = test_matrix.fillna(0) \n \n test_sparse_matrix = test_matrix.drop(columns = ['segment_id', 'road_type', 'exists', 'index', 'roadtypekey', 'segmentskey'])\n num = list(range(len(list(averages_seg))-4))\n columns = [str(item) for item in num]\n test_sparse_matrix = test_sparse_matrix[columns] \n \n print('clustering new data...')\n cluster_model = joblib.load(filename)\n cluster_predictions = cluster_model.predict(test_sparse_matrix)\n \n clusterdf = pd.DataFrame(cluster_predictions,columns = ['cluster_sparse'])\n clusterdf['index'] = clusterdf.index\n segmentdf = test_matrix['segment_id'].to_frame()\n segmentdf['index'] = segmentdf.index\n test_cluster_df_sparse = pd.merge(clusterdf, segmentdf, on=['index'])\n test_cluster_df_sparse = test_cluster_df_sparse[['segment_id','cluster_sparse']].groupby(['segment_id','cluster_sparse']).count()\n \n return test_cluster_df_sparse.reset_index()", "def add_entries(self, entries):\n write_p = self._pointer\n for entry in entries:\n write_header(write_p, entry._entry_p)\n for block in entry.get_blocks():\n write_data(write_p, block, len(block))\n write_finish_entry(write_p)", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def WriteFileSize(self):\n # Simply a calculation of the number of clusters (e.g. sectors) * 512\n total_size = 0\n for cluster_range in self.cluster_ranges:\n clusters = cluster_range.split(\"-\")\n difference = int(clusters[1]) - int(clusters[0]) + 1\n self.cluster_list.extend(self.CreateList(int(clusters[0]), int(clusters[1])))\n print(f\"Cluster difference between {clusters[1]} and {clusters[0]} is {difference}\")\n total_size += difference*512\n print(f\"Total size has been calculated as {total_size}\")\n with open(self.output_file, \"r+b\") as fh:\n seeker = (self.root_directory_offset*self.sector_size)+((self.index_number-1)*self.directory_index_size)+(self.file_size_offset)\n #s_array = bytearray()\n print(f\"Reversing {total_size}\")\n ba_size = (total_size).to_bytes(4, byteorder='little')\n print(f\"Preparing to write {ba_size} to {seeker}\")\n fh.seek(seeker)\n fh.write(ba_size)\n print(\"File size written to root directory\")\n return True", "def store_individual_means(src_file: H5File) -> None:\n paths = rawnav.all_signal_dataset_paths(src_file)\n for path in paths:\n points = len(src_file[path])\n data = np.empty(points, dtype=np.float64)\n src_file[path].read_direct(data)\n src_file[path].attrs['mean'] = data.mean()\n return", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def write_chunk(self, outfile, tag, data):\n outfile.write(struct.pack(\"!i\", len(data)))\n outfile.write(tag)\n outfile.write(data)\n checksum = zlib.crc32(tag)\n checksum = zlib.crc32(data, checksum)\n outfile.write(struct.pack(\"!i\", checksum))", "def write_map_file(new_path, flat_seq, chr_id, cname, map, chr_no):\n\n f_flat = open('%s/genome/Contig%d.flat' % (new_path, chr_id[0]+1), 'w') ## create new contig file in .flat format in target directory\n f_flat.write(flat_seq)\n f_flat.close()\n\n i = 0\n tc = len(cname)\n for ele in cname: ## writing a mapping file old contig to new contig information.\n if i == 0:\n start = 1\n stop = map[i]\n else:\n start = p_stop\n stop = start + map[i] - 1\n print 'Contig%d\\t%s\\t%d\\t%d\\t%d' %(chr_id[0]+1, ele, start, stop, map[i])\n if i==(tc-1): \n break\n print 'Contig%d\\tNSPACER\\t%d\\t%d\\t%d' % (chr_id[0]+1, stop+1, stop+25000, 25000) \n p_stop = stop + 25001 # default spacer nts \n i += 1 \n \n (flat_seq, cname, map) = ('', [], [])\n chr_no.append(chr_id[0]+1)\n chr_id = chr_id[1:]\n\n return flat_seq, cname, map, chr_no, chr_id", "def prepare_statistics(self):\n\n # statistics of clustering files\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n\n percentage_stars = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[shorter_index]))\n percentage_starlets = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[longer_index]))\n\n head = \"{0:<25}{1:<20}{2:<20}\\n\".format(\"name\", \"number\", \"description\")\n rows = \"\"\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"stars No.\", self.stars_length, \"in file with less(or equal) clusters: file\" + str(shorter_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"starlets No.\", self.starlets_length, \"in file with more(or equal) clusters: file\" + str(longer_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"identical cluster No.\", self.similarity_dist[10], \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[shorter_index], \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[longer_index], \"in starlets \")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum No\", self.shared_spec_num, \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_stars, \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_starlets, \"in starlets\")\n self.tables.append(('statistics of files', head, rows))\n\n # distribution of cluster size in stars\n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_star_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[shorter_index].keys()):\n value = self.cluster_size_dist[shorter_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.stars_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in stars', head, rows))\n \n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_starlet_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[longer_index].keys()):\n value = self.cluster_size_dist[longer_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.starlets_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in starlets', head, rows))\n\n # distribution of similarity\n head = \"{0:<20}{1:<20}{2:<20}{3:<20}\\n\".format(\"similarity score\", \"pairs of clusters\", \"percentage(stars)\", \"percentage(starlets)\")\n rows = \"\"\n for key in reversed(sorted(self.similarity_dist.keys())):\n value = self.similarity_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent_star, percent_starlet)\n self.tables.append(('distribution of similarity (identical = 10)', head, rows))\n\n # distribution of star divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_star), \"average\")\n for key in sorted(self.star_divide_factor_dist.keys()):\n value = self.star_divide_factor_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_star)\n self.tables.append(('distribution of star divide factors', head, rows))\n\n # distribution of starlet divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_starlet), \"average\")\n for key in sorted(self.starlet_divide_factor_dist.keys()):\n value = self.starlet_divide_factor_dist[key]\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_starlet)\n self.tables.append(('distribution of starlet divide factors', head, rows))", "def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)", "def parse_cluster(\n fasta_path, file_dict=None, file_writer=None, neighbor_joining=False\n):\n cluster_id = fasta_path.name[:-3]\n outdir = fasta_path.parent\n clusters = parse_cluster_fasta(fasta_path)\n if len(clusters) < 2:\n # fasta_path.unlink()\n logger.error(f\"Singleton Cluster {cluster_id} is size {len(clusters)}\")\n cluster_dict = {\n \"size\": len(clusters),\n \"n_memb\": None,\n \"n_members\": None,\n \"n_adj\": None,\n \"adj_groups\": None,\n }\n return int(cluster_id)\n # calculate MSA and return guide tree\n muscle_args = [\n \"-in\",\n f\"{outdir}/{cluster_id}.fa\",\n \"-out\",\n f\"{outdir}/{cluster_id}.faa\",\n \"-diags\",\n \"-sv\",\n \"-maxiters\",\n \"2\",\n \"-quiet\",\n \"-distance1\",\n \"kmer20_4\",\n ]\n if len(clusters) >= 4:\n muscle_args += [\n \"-tree2\",\n f\"{outdir}/{cluster_id}.nwk\",\n ]\n if neighbor_joining:\n muscle_args += [\"-cluster2\", \"neighborjoining\"] # adds 20%\n try:\n muscle = sh.Command(\"muscle\", search_paths=SEARCH_PATHS)\n except sh.CommandNotFound:\n logger.error(\"muscle must be installed first.\")\n sys.exit(1)\n muscle(muscle_args)\n # fasta_path.unlink()\n clusters[\"prot.idx\"] = clusters[\"path\"].map(file_dict)\n clusters.sort_values(by=[\"prot.idx\", \"frag.id\", \"frag.pos\"], inplace=True)\n n_adj, adj_gr_count, unused_adj_group = calculate_adjacency_group(\n clusters[\"frag.pos\"], clusters[\"frag.idx\"]\n )\n idx_values = clusters[\"prot.idx\"].value_counts()\n idx_list = list(idx_values.index)\n idx_list.sort()\n write_tsv_or_parquet(clusters, outdir / f\"{cluster_id}.{CLUSTER_FILETYPE}\")\n cluster_dict = {\n \"size\": len(clusters),\n \"n_memb\": len(idx_values),\n \"n_members\": str(idx_list),\n \"n_adj\": n_adj,\n \"adj_groups\": adj_gr_count,\n }\n for group_id, subframe in clusters.groupby(by=[\"prot.idx\"]):\n proteome_frame = subframe.copy()\n proteome_frame[\"hom.cluster\"] = cluster_id\n proteome_frame[\"hom.cl_size\"] = len(idx_values)\n proteome_frame.drop(\n proteome_frame.columns.drop(HOMOLOGY_COLS), # drop EXCEPT these\n axis=1,\n inplace=True,\n )\n with file_writer(group_id) as file_handle:\n proteome_frame.to_csv(file_handle, header=False, sep=\"\\t\")\n return int(cluster_id), cluster_dict", "def writeSpecificData( self, file, data, bAddBeginOfDataChunk = True ):\n if( not isinstance( data, np.ndarray ) ):\n data = np.array( data, dtype = self.dataType )\n if( bAddBeginOfDataChunk ):\n file.write( \"data\" )\n file.write( struct.pack( \"I\", len(data)*self.nNbrBitsPerSample/8 ) )\n data.tofile( file )", "def write_hdf5( self, iteration ) :\n # Before opening the file, select the particles that\n # need to be written for each species\n # (This allows to know the number of particles to be written,\n # which is needed when setting up the file)\n select_array_dict = {}\n selected_nlocals_dict = {}\n selected_nglobal_dict = {}\n # Loop over the different species, select the particles and fill\n # select_array_dict, selected_nlocals_dict, selected_nglobal_dict\n for species_name in sorted(self.species_dict.keys()):\n # Select the particles that will be written\n species = self.species_dict[species_name]\n select_array_dict[species_name] = self.apply_selection( species )\n # Get their total number\n n = select_array_dict[species_name].sum()\n if self.comm_world is not None :\n # In MPI mode: gather and broadcast an array containing\n # the number of particles on each process\n selected_nlocals_dict[species_name] = mpiallgather( n )\n selected_nglobal_dict[species_name] = \\\n sum(selected_nlocals_dict[species_name])\n else:\n # Single-proc output\n selected_nlocals_dict[species_name] = None\n selected_nglobal_dict[species_name] = n\n\n # Find the file name\n filename = \"data%08d.h5\" %iteration\n fullpath = os.path.join( self.write_dir, \"hdf5\", filename )\n\n # Create the file and setup its attributes\n # (can be done by one proc or in parallel)\n self.create_file_empty_particles( fullpath, self.top.it,\n self.top.time, self.top.dt, selected_nglobal_dict )\n\n # Open the file again (possibly in parallel)\n f = self.open_file( fullpath, parallel_open=self.lparallel_output )\n # (f is None if this processor does not participate in writing data)\n\n # Loop over the different species and write the requested quantities\n for species_name in sorted(self.species_dict.keys()) :\n\n # Get the HDF5 species group\n if f is not None:\n species_path = \"/data/%d/particles/%s\"%(iteration,species_name)\n species_grp = f[species_path]\n else:\n species_grp = None\n\n # Get the relevant species object and selection array\n species = self.species_dict[species_name]\n select_array = select_array_dict[species_name]\n n_rank = selected_nlocals_dict[species_name]\n\n # Write the datasets for each particle datatype\n self.write_particles( species_grp, species, n_rank, select_array )\n\n # Close the file\n if f is not None:\n f.close()", "def write(self, fname, group=None, write_mode='w'):\n with h5py.File(fname, write_mode) as f:\n # write to group if group is given\n if group is not None:\n fobj = f.create_group(group)\n else:\n fobj = f\n for chan, ts in zip(self.channels, self.data):\n dset = fobj.create_dataset(chan, data=ts, compression='gzip')\n dset.attrs['sample_rate'] = self.fs\n dset.attrs['t0'] = self.t0\n dset.attrs['channel'] = str(chan)\n dset.attrs['name'] = str(chan)", "def update(self, i):\n data = next(self.stream)\n # Set x and y data...\n self.scat.set_offsets(np.concatenate([data[0], data[1]]).reshape((11, 2)))\n return self.scat,", "def writefile(name, instream, start=None, end=None, append=False):", "def process_metadata(self, metadata_file, show_progress=False):\n vcf = cyvcf2.VCF(self.data_file)\n individual_names = list(vcf.samples)\n vcf.close()\n self.num_samples = len(individual_names) * 2\n populations = list()\n sample_metadata = list()\n with open(metadata_file, \"r\") as max_planck_metadata:\n # Parse the individual metadata out of the file.\n lines = max_planck_metadata.read().splitlines()\n for line in lines[1:]:\n metadata = {}\n row = line.split(\" \")\n name = row[0]\n metadata[\"name\"] = name\n metadata[\"age\"] = int(row[2]) / GENERATION_TIME\n populations.append(row[1])\n sample_metadata.append(metadata)\n for population in populations:\n pop_id = self.samples.add_population(\n {\"name\": population, \"super_population\": \"Max Planck\"}\n )\n # Assumes two samples per population\n for pop_id, metadata in enumerate(sample_metadata):\n self.samples.add_individual(\n time=metadata[\"age\"], metadata=metadata, population=pop_id, ploidy=2\n )", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def exposed_write_data(self, chunk_id, data):\n local_filename = self.chunk_filename(chunk_id)\n with open(local_filename, \"w\") as file:\n file.write(data)\n # self.handle_table[chunk_id] = local_filename", "def write_data():", "def process_set_metadata(self, data, set_name):\n hdf5_handler = self.hdf5_manager.get_group(set_name)\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n if 'test' in set_name:\n is_test = True\n data_ = data[0]\n filename_ids = data[1]\n annotations = data[2]\n category = data[3]\n supercategory = data[4]\n category_id = data[5]\n else:\n is_test = False\n data_ = data[0]\n annotations = data[1]\n annotation_id_dict = data[2]\n category = data[3]\n supercategory = data[4]\n category_id = data[5]\n filename_ids = data[6]\n images_fname_by_id = data[7]\n skeleton = data[8]\n keypoints = data[9]\n\n keypoints_ = str2ascii(keypoints)\n skeleton_ = np.array(pad_list(skeleton, -1), dtype=np.uint8)\n\n category_ = str2ascii(category)\n supercategory_ = str2ascii(supercategory)\n\n image_filenames = []\n coco_urls = []\n width = []\n height = []\n image_id = []\n\n annotation_id = []\n area = []\n iscrowd = [0, 1]\n segmentation = []\n num_keypoints = list(range(0, 17 + 1))\n keypoints_list = []\n bbox = []\n object_id = []\n\n # coco id lists\n # These are order by entry like in the annotation files.\n # I.e., coco_images_ids[0] has the object_id with the file_name, id, height, etc.\n # as coco_annotation_file[set_name][\"images\"][0]\n coco_images_ids = []\n coco_categories_ids = []\n coco_annotations_ids = []\n\n if is_test:\n object_fields = [\"image_filenames\", \"coco_urls\", \"width\", \"height\"]\n else:\n object_fields = [\"image_filenames\", \"coco_urls\", \"width\", \"height\",\n \"category\", \"supercategory\", \"boxes\", \"area\",\n \"iscrowd\", \"segmentation\",\n \"image_id\", \"category_id\", \"annotation_id\",\n \"num_keypoints\", \"keypoints\"]\n\n list_boxes_per_image = []\n list_keypoints_per_image = []\n list_object_ids_per_image = []\n list_image_filenames_per_num_keypoints = []\n list_object_ids_per_keypoint = [] # body part\n\n if self.verbose:\n print('> Adding data to default group:')\n prgbar = progressbar.ProgressBar(max_value=len(data_))\n\n counter = 0\n tmp_coco_annotations_ids = {}\n\n for i, key in enumerate(data_):\n annotation = data_[key]\n image_filenames.append(annotation[\"file_name\"])\n width.append(annotation[\"width\"])\n height.append(annotation[\"height\"])\n coco_urls.append(annotation[\"coco_url\"])\n image_id.append(annotation[\"id\"])\n\n if is_test:\n # *** object_id ***\n # [filename, coco_url, width, height]\n object_id.append([i, i, i, i])\n list_object_ids_per_image.append([i])\n else:\n boxes_per_image = []\n\n if \"object\" in annotation:\n for j, obj_idx in enumerate(annotation[\"object\"]):\n obj = annotation[\"object\"][obj_idx]\n area.append(obj[\"area\"])\n bbox.append(obj[\"bbox\"])\n annotation_id.append(obj[\"id\"])\n segmentation.append(obj[\"segmentation\"])\n keypoints_list.append(obj[\"keypoints\"])\n\n # *** object_id ***\n # [filename, coco_url, width, height,\n # category, supercategory,\n # bbox, area, iscrowd, segmentation,\n # \"image_id\", \"category_id\", \"annotation_id\"\n # \"num_keypoints\", \"keypoints\"]\n object_id.append([i, i, i, i,\n category.index(obj[\"category\"]), supercategory.index(\n obj[\"supercategory\"]),\n counter, counter, obj[\"iscrowd\"], counter,\n i, category.index(obj[\"category\"]), counter,\n obj[\"num_keypoints\"], counter])\n\n boxes_per_image.append(counter)\n\n # temporary var\n tmp_coco_annotations_ids[obj[\"id\"]] = counter\n\n # update counter\n counter += 1\n\n list_boxes_per_image.append(boxes_per_image)\n list_keypoints_per_image.append(boxes_per_image)\n list_object_ids_per_image.append(boxes_per_image)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n if self.verbose:\n print('> Processing coco lists:')\n prgbar = progressbar.ProgressBar(max_value=len(annotations['images']))\n\n # set coco id lists\n for i, annot in enumerate(annotations['images']):\n fname_id = image_filenames.index(os.path.join(image_dir, annot['file_name']))\n coco_images_ids.append(fname_id)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n coco_categories_ids = list(range(len(category)))\n\n if not is_test:\n if self.verbose:\n prgbar = progressbar.ProgressBar(max_value=len(annotations['annotations']))\n for i, annot in enumerate(annotations['annotations']):\n annot_id = tmp_coco_annotations_ids[annot['id']]\n coco_annotations_ids.append(annot_id)\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n # process lists\n if not is_test:\n if self.verbose:\n print('> Processing lists...')\n\n for i in range(len(keypoints)):\n imgs_per_num = [val[0] for _, val in enumerate(object_id) if val[8] == i]\n imgs_per_num = list(set(imgs_per_num)) # get unique values\n imgs_per_num.sort()\n list_image_filenames_per_num_keypoints.append(imgs_per_num)\n\n for i in range(len(keypoints)):\n objs_per_keypoint = [j for j, val in enumerate(\n keypoints_list) if val[i * 3] > 0 or val[i * 3 + 1] > 0]\n objs_per_keypoint = list(set(objs_per_keypoint)) # get unique values\n objs_per_keypoint.sort()\n list_object_ids_per_keypoint.append(objs_per_keypoint)\n\n hdf5_write_data(hdf5_handler, 'image_filenames',\n str2ascii(image_filenames), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_urls',\n str2ascii(coco_urls), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'width',\n np.array(width, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'height',\n np.array(height, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'category',\n category_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'supercategory',\n supercategory_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'image_id',\n np.array(image_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'category_id',\n np.array(category_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_ids',\n np.array(object_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_fields',\n str2ascii(object_fields), dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_images_ids',\n np.array(coco_images_ids, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'coco_categories_ids',\n np.array(coco_categories_ids, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'list_object_ids_per_image',\n np.array(pad_list(list_object_ids_per_image, -1), dtype=np.int32),\n fillvalue=-1)\n\n if not is_test:\n hdf5_write_data(hdf5_handler, 'annotation_id',\n np.array(annotation_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'keypoint_names',\n keypoints_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'skeleton',\n skeleton_, dtype=np.uint8,\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'boxes',\n np.array(bbox, dtype=np.float),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'iscrowd',\n np.array(iscrowd, dtype=np.uint8),\n fillvalue=-1)\n\n nrows = len(segmentation)\n ncols = max([len(elem) for elem in segmentation])\n dset = hdf5_handler.create_dataset('segmentation',\n (nrows, ncols),\n dtype=np.float,\n chunks=True,\n compression=\"gzip\",\n compression_opts=4,\n fillvalue=-1)\n\n if self.verbose:\n print(' -- Saving segmentation masks to disk (this will take some time)')\n prgbar = progressbar.ProgressBar(max_value=nrows)\n for i in range(nrows):\n dset[i, :len(segmentation[i])] = np.array(segmentation[i], dtype=np.float)\n if self.verbose:\n prgbar.update(i)\n\n if self.verbose:\n prgbar.finish()\n\n hdf5_write_data(hdf5_handler, 'area',\n np.array(area, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'num_keypoints',\n np.array(num_keypoints, dtype=np.uint8),\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'keypoints',\n np.array(keypoints_list, dtype=np.int32),\n fillvalue=0)\n hdf5_write_data(hdf5_handler, 'coco_annotations_ids',\n np.array(coco_annotations_ids, dtype=np.int32),\n fillvalue=-1)\n\n pad_value = -1\n hdf5_write_data(hdf5_handler, 'list_boxes_per_image',\n np.array(pad_list(list_boxes_per_image, pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_keypoints_per_image',\n np.array(pad_list(list_keypoints_per_image, pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_image_filenames_per_num_keypoints',\n np.array(pad_list(list_image_filenames_per_num_keypoints,\n pad_value), dtype=np.int32),\n fillvalue=pad_value)\n hdf5_write_data(hdf5_handler, 'list_object_ids_per_keypoint',\n np.array(pad_list(list_object_ids_per_keypoint,\n pad_value), dtype=np.int32),\n fillvalue=pad_value)", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def write_collected(self, names_file, kb_file, cat_file):\n with open(names_file, 'w') as fp:\n for kb_id, name in self.collected_names.items():\n fp.write('\\t'.join(['name', kb_id, name]) + '\\n')\n with open(kb_file, 'w') as fp:\n for kb_id, tail_set in self.collected_edges.items():\n for (rel, tail_id) in tail_set:\n fp.write('\\t'.join([rel, kb_id, tail_id]) + '\\n')\n with open(cat_file, 'w') as fp:\n for c, ms in self.collected_cat_mems.items():\n fp.write(c + '\\t' + self.kb[c].name + '\\t')\n fp.write('|'.join(ms) + '\\n')", "def writeNew(self,masters=[],mtime=0):\n tes3 = Tes3()\n tes3.hedr = Tes3_Hedr('HEDR',0)\n if self.isEsp(): tes3.hedr.fileType = 0\n elif self.isEsm(): tes3.hedr.fileType = 1\n elif self.isEss(): tes3.hedr.fileType = 32\n for master in masters:\n tes3.masters.append((master,modInfos[master].size))\n tes3.hedr.setChanged()\n tes3.setChanged()\n #--Write it\n path = os.path.join(self.dir,self.name)\n out = file(path,'wb')\n tes3.getSize()\n tes3.dump(out)\n out.close()\n self.setMTime(mtime)", "def write(self, filename, data, hdr):\n pass", "def publish_metadata_in_ipfs(self):\n self._printout(self._publish_metadata_in_ipfs(self.args.metadata_file))", "def cluster_by_addressLocality(input_file, output_file=None):\n pass", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def _read(self):\n\t\tself._infoMuscles = []\n\t\tself._infoCommonCellsInMuscles = []\n\t\tself._infoSpecialCells = []\n\t\tself._infoCommonMuscleConnections = []\n\t\tself._infoInterMuscSensorimotorConnections = {}\n\t\tself._infoSpecialConnections = []\n\t\tif rank==0:\n\t\t\tsection = None\n\t\t\tsensorimotorConnections = None\n\t\t\tsensorimotorMatrix = None\n\t\t\tfor line in open(\"../nnStructures/\"+self._inputFile,\"r\"):\n\t\t\t\tif line[0] == \"#\" or line[0] == \"\\n\": continue\n\t\t\t\telif line[0] == \"@\": section = float(line[1])\n\t\t\t\telif section == 1: self._infoMuscles.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 2: self._infoCommonCellsInMuscles.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 3: self._infoSpecialCells.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 4: self._infoCommonMuscleConnections.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 5:\n\t\t\t\t\tif line[0] == \"+\":\n\t\t\t\t\t\tdictName = line[1:].strip(\"\\n\")\n\t\t\t\t\t\tself._infoInterMuscSensorimotorConnections[dictName] = {}\n\t\t\t\t\t\tsensorimotorConnections = False\n\t\t\t\t\t\tsensorimotorMatrix = False\n\t\t\t\t\telif \"Connections\" in line:\n\t\t\t\t\t\t sensorimotorConnections = True\n\t\t\t\t\t\t self._infoInterMuscSensorimotorConnections[dictName][\"connections\"]=[]\n\t\t\t\t\telif \"WeightsMatrix\" in line:\n\t\t\t\t\t\t sensorimotorConnections = False\n\t\t\t\t\t\t sensorimotorMatrix = True\n\t\t\t\t\t\t self._infoInterMuscSensorimotorConnections[dictName][\"matrix\"]=[]\n\t\t\t\t\telif sensorimotorConnections:\n\t\t\t\t\t\tself._infoInterMuscSensorimotorConnections[dictName][\"connections\"].append(line.strip(\"\\n\").split())\n\t\t\t\t\telif sensorimotorMatrix:\n\t\t\t\t\t\tself._infoInterMuscSensorimotorConnections[dictName][\"matrix\"].append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 6: self._infoSpecialConnections.append(line.strip(\"\\n\").split())\n\n\t\tself._infoMuscles = comm.bcast(self._infoMuscles,root=0)\n\t\tself._infoCommonCellsInMuscles = comm.bcast(self._infoCommonCellsInMuscles,root=0)\n\t\tself._infoSpecialCells = comm.bcast(self._infoSpecialCells,root=0)\n\t\tself._infoCommonMuscleConnections = comm.bcast(self._infoCommonMuscleConnections,root=0)\n\t\tself._infoInterMuscSensorimotorConnections = comm.bcast(self._infoInterMuscSensorimotorConnections,root=0)\n\t\tself._infoSpecialConnections = comm.bcast(self._infoSpecialConnections,root=0)", "def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy, zz, ww in zip(self.x, self.y, self.field, self.weight):\n f.write(\"%s %s %s %s\\n\" % (xx, yy, zz, ww))\n logger.info(\"Written data into file {0}\".format(filename))", "def write_data(infbfile,begin_N,dur_N,outfbfile):\n infbfile.seek_to_sample(begin_N)\n for i in range(begin_N,(begin_N+dur_N)):\n data = infbfile.read_sample()\n data.tofile(outfbfile)" ]
[ "0.56617075", "0.56513876", "0.561701", "0.55935436", "0.5525497", "0.5482356", "0.54683065", "0.54394776", "0.5432194", "0.5414132", "0.5392", "0.5390164", "0.53824586", "0.53770757", "0.5337196", "0.5310381", "0.5272025", "0.5262886", "0.5257946", "0.52343404", "0.52302665", "0.51557195", "0.5148239", "0.5117391", "0.51146483", "0.51024026", "0.5072759", "0.50514144", "0.50434846", "0.50263464", "0.50075895", "0.5003112", "0.5002889", "0.5001853", "0.49996492", "0.49911726", "0.496616", "0.49519604", "0.4950578", "0.49497914", "0.49481633", "0.49428162", "0.4936052", "0.49309793", "0.4909892", "0.49067438", "0.49060807", "0.48967275", "0.48928133", "0.4887533", "0.48864132", "0.4879015", "0.4876483", "0.4873808", "0.48727992", "0.4871278", "0.48706216", "0.48695087", "0.48627606", "0.48586673", "0.48584795", "0.48584732", "0.48570085", "0.48558074", "0.4852154", "0.48431015", "0.48398736", "0.4838828", "0.48347884", "0.48216334", "0.4819898", "0.48167446", "0.4816474", "0.48160616", "0.4815368", "0.48128214", "0.48122722", "0.48110574", "0.48088476", "0.47957045", "0.47922105", "0.47866854", "0.47853887", "0.47825685", "0.47799036", "0.4778337", "0.4774954", "0.4765721", "0.47641447", "0.47607678", "0.47551477", "0.47518814", "0.47372755", "0.47352648", "0.47330853", "0.47330108", "0.4727707", "0.4723569", "0.47230563", "0.47170973" ]
0.60996723
0
writes hidden data from slackspace into stream. The examined slack space information is taken from metadata.
def read(self, outstream: typ.BinaryIO): file_metadata = self.metadata.get_file("0")['metadata'] if self.fs_type == 'FAT': allocator_metadata = FATAllocatorMeta(file_metadata) self.fs.read(outstream, allocator_metadata) elif self.fs_type == 'NTFS': allocator_metadata = NTFSAllocatorMeta(file_metadata) self.fs.read(outstream, allocator_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_hidden(file_name, data):\n # For *nix add a '.' prefix.\n prefix = '.' if os.name != 'nt' else ''\n file_name = prefix + file_name\n\n # Write file.\n with open(file_name, 'w') as f:\n f.write(data)\n\n # For windows set file attribute.\n if os.name == 'nt':\n ret = ctypes.windll.kernel32.SetFileAttributesW(file_name,\n FILE_ATTRIBUTE_HIDDEN)\n if not ret: # There was an error.\n raise ctypes.WinError()", "def write_data(self, blacklist=('normE', 'normEsquared', 'genRate')):\n\n start = time.time()\n self.data.write_data(blacklist=blacklist)\n end = time.time()\n self.log.info('Write time: %.2f seconds', end - start)", "def _stop_streaming_ram_to_host(self):\n self.regs.SDRAM_HOST_READ_GO = 0\n self.regs.CSTREAM_CFG = 0", "def _OpenWrite(self):\n if self._last_stream_numbers['event'] == 1:\n self._WriteStorageMetadata()", "def save(datastream):", "def Write(self):\n if not self._mem: return\n\n logger.info(\"Writing %s\" % self)\n if self.data is None:\n self.hostmemmgr.zero(self._mem, self.size)\n else:\n logger.info(\"=\" * 30, \"WRITING BUFFER\", \"=\" * 30)\n scapyfactory.Parse(self.data).Show()\n self.hostmemmgr.write(self._mem, bytes(self.data))\n logger.info(\"=\" * 30, \"END WRITING BUFFER\", \"=\" * 30)", "def write_max_violated_soft_tw(io_stream):\n io_stream.write('value max_violated_soft_tw\\n0\\n')", "def write_data():", "def collect_data(self):\n chatoutput = list()\n blacklist = set()\n join_groups = self.read_leftout_groups()\n metadata = list()\n for channel in self.groups:\n if channel.active:\n self.blacklist = self.blacklist.union(channel.groups_blocked)\n join_groups = join_groups.union(channel.groups)\n chatoutput.append(channel.output)\n metadata.append(channel.metadata)\n else:\n if self.leave:\n self.leavechannel(channel.dialog)\n self.join_groups(join_groups, blacklist)\n self.write_data(self.blacklist, \"blocked_groups\")\n self.write_data(metadata, \"groups.meta\")\n block_number = self.get_highest_chatblock()\n self.write_data(chatoutput, \"chat_block-{}\".format(block_number))\n self.write_leftout_groups()", "def _write_stream(self):\n enrich_df = self._process_stream()\n df_writer = enrich_df \\\n .writeStream \\\n .queryName(\"Agro Data Writer\") \\\n .foreachBatch(db_utils.foreach_batch_function) \\\n .option(\"checkpointLocation\", \"chk-point-dir\") \\\n .trigger(processingTime=\"1 minute\") \\\n .start()\n\n df_writer.awaitTermination()", "def remove_streaming(self):\n self.streaming = None", "def for_streaming_only(self):\n self.token['type'] = 'stream'\n\n return self", "def ignorableWhitespace(self, data):\n pass", "def render_sheet_to_stream(self, file_stream, sheet, **keywords):\n raise NotImplementedError(\"We are not writing to file\")", "def export(self, stream):\n pass", "def __exit__(self, *_):\n with self._info_yaml_file_path.open(\"w\") as info:\n self._yml.dump(self._info, info)", "def find_tracelogging_meta(bv, start, end) -> Stream:\n entries = bv.read(start, end - start)\n result = entries.find(b\"ETW0\")\n if result == -1:\n raise ETWBreakerTLNotFound()\n\n return Stream(entries[result:])", "def write(data):", "def dump(self, output_stream):\n raise NotImplementedError", "def _data_move_out_mc_on_h():\n\n pass", "def write( data ):", "def dumpData(self,out):\n #--Get sizes\n for record in self.records:\n #--Text\n if record.name == 'NAME':\n #--Truncate text?\n if len(self.text) > 511:\n self.text = self.text[:511]\n record.data = self.text\n record.size = len(self.text)\n #--Speaker\n elif record.name == 'ONAM':\n record.data = self.spId+'\\x00'\n record.size = len(self.spId) + 1\n record.getSize()\n record.dump(out)", "def peek_write(self):\n ...", "def savenet(self, name):\n file_p = self._newstream(name)\n # (const net_bn* net, stream_ns* file)\n cnetica.WriteNet_bn.argtypes = [c_void_p, c_void_p]\n cnetica.WriteNet_bn.restype = None\n cnetica.WriteNet_bn(self.net, file_p)", "def apply(self,d,object_history=False,instance=None):\n if not (isinstance(d,TimeSeries) or isinstance(d,Seismogram)):\n raise MsPASSError(\"TopMute.apply: usage error. Input data must be a TimeSeries or Seismogram object\",\n ErrorSeverity.Invalid)\n if d.dead():\n return\n if d.t0>self.t0:\n d.elog.log_error(\"TopMute.apply\",\"Data start time is later than time of mute zero zone\\n\"\n + \"Datum killed as this would produce a null signal\",ErrorSeverity.Invalid)\n d.kill()\n else:\n self.processor.apply(d)\n if(object_history):\n if(instance==None):\n d.elog(\"TopMute.apply\",\n \"Undefined instance argument - cannot save history data\",\n ErrorSeverity.Complaint)\n elif(d.is_empty()):\n d.elog(\"TopMute.apply\",\n \"Error log is empty. Cannot be extended without a top level entry\",\n ErrorSeverity.Complaint)\n else:\n if isinstance(d,Seismogram):\n d.new_map(\"TopMute\",instance,AtomicType.SEISMOGRAM,\n ProcessingStatus.VOLATILE)\n else:\n d.new_map(\"TopMute\",instance,AtomicType.TIMESERIES,\n ProcessingStatus.VOLATILE)", "def filter_stream(self, req, method, filename, stream, data):\n if self.sql_read_only and filename == 'admin_users.html':\n stream |= Transformer(\".//input[@name='remove']\").attr('value', 'Remove session and permissions data for selected accounts')\n return stream", "def print_data(self):\n dataset = open('present_data.txt', 'w')\n header = 'Messages '\n i = 0\n for _ in self.words:\n header += \"word\" + str(i) + ' '\n i += 1\n dataset.write(header)\n n = 0\n for _ in self.messages:\n i_d = self.ids[n]\n line = i_d + '\\t\\t'\n for count in self.wordcounts[i_d]:\n line += str(count) + '\\t\\t'\n for j in range(len(self.words) - len(self.wordcounts[i_d])):\n self.wordcounts[i_d].append(0)\n line += str(0) + '\\t\\t'\n n += 1\n dataset.write(line)\n dataset.close()", "def write_to(self, stream: StreamWrapper):\n stream.write_int(len(self.moves))\n for element in self.moves:\n element.write_to(stream)\n stream.write_int(len(self.buildings))\n for element in self.buildings:\n element.write_to(stream)\n if self.choose_specialty is None:\n stream.write_bool(False)\n else:\n stream.write_bool(True)\n stream.write_int(self.choose_specialty)", "def write_infodata(self, data):\n if not self._wrt_defined:\n print \"Please, call set_write_cycle_time() first\"\n return False\n if len(data) != 4:\n print \"Infodata block is 4 byte\"\n return False\n self.SPItrans([0xac, 0x00, data[0], data[1]])\n self.SPItrans([0xac, 0x10, data[2], data[3]])\n return True", "def process(self, stream, mask):\n\n # flag = mask[:, :, np.newaxis, :]\n\n # Create a slice that will expand the mask to\n # the same dimensions as the weight array\n waxis = stream.weight.attrs[\"axis\"]\n slc = [slice(None)] * len(waxis)\n for ww, name in enumerate(waxis):\n if name not in mask.mask.attrs[\"axis\"]:\n slc[ww] = None\n\n # Extract mask, transform to regular numpy array\n # TODO: Changes needed when distributed reading work for HFBData\n flag = mask.mask[:].local_array\n\n # Expand mask to same dimension as weight array\n flag = flag[tuple(slc)]\n\n # Log how much data we're masking\n self.log.info(\n \"%0.2f percent of data will be masked.\"\n % (100.0 * np.sum(flag) / float(flag.size),)\n )\n\n # Apply the mask\n if np.any(flag):\n # Apply the mask to the weights\n stream.weight[:] *= 1.0 - flag\n\n # If requested, apply the mask to the data\n if self.zero_data:\n stream.hfb[:] *= 1.0 - flag\n\n return stream", "def write(self, stream):\n # write the data\n pyffi.object_models.xml.struct_.StructBase.write(\n self, stream, self)", "def unknown(self, w):\n # WORK HERE!!", "def hide_data(infile: str, outfile: str, datafile: str, lsb=None):\n if not infile.endswith('.wav'):\n return 4\n\n if not outfile.endswith('.wav'):\n return 4\n\n try:\n lsb = int(lsb)\n except TypeError:\n lsb = None\n\n steganographer = Stego(infile, lsb)\n lsb, datasize = steganographer.hide(datafile, outfile)\n\n if not datasize:\n cleanup(outfile)\n return 3\n\n return (lsb, datasize)", "def _start_streaming_ram_to_host(self):\n self.regs.SDRAM_HOST_READ_GO = 1\n self.regs.CSTREAM_CFG = 1", "def write(self, data, target):\n fobj = open(target, \"w\")\n fobj.write(self.freezeDry(data))\n fobj.close()", "def stream_tweets(bearer_token):\n print(\"Streaming tweets...\")\n\n oauth2 = osometweet.OAuth2(\n bearer_token=bearer_token,\n manage_rate_limits=False\n )\n ot = osometweet.OsomeTweet(oauth2)\n\n # Add all tweet fields\n all_tweet_fields = osometweet.TweetFields(everything=True)\n\n # Add streaming rules\n rules = [{\"value\": \"coronavirus\", \"tag\": \"all coronavirus tweets\"},\n {\"value\": \"indiana\", \"tag\": \"all indiana tweets\"}]\n add_rules = {\"add\": rules}\n response = ot.set_filtered_stream_rule(rules=add_rules)\n print(f\"API response from adding two rules:\\n{response}\\n\")\n\n # Retrieve active streaming rules\n current_rules = ot.get_filtered_stream_rule()\n print(f'The current filtered stream rules are:\\n{current_rules}\\n')\n\n # Remove a streaming rule by using it's tag\n indiana_rule = [\n rule[\"id\"] for rule in current_rules[\"data\"]\n if 'all indiana tweets' in rule[\"tag\"]\n ]\n delete_rule = {'delete': {'ids': indiana_rule}}\n response = ot.set_filtered_stream_rule(rules=delete_rule)\n print(f\"API response from deleting one rule:\\n{response}\\n\")\n\n # Get today's date\n today = dt.strftime(dt.today(), \"%Y-%m-%d_%H-%M\")\n\n # Open two files. One for good data, the other for tweet errors.\n with open(f\"tweet_stream--{today}.json\", \"a\") as data_file:\n # stream is a Generator\n stream = ot.filtered_stream(fields=all_tweet_fields)\n # We have to iterate over the stream to fetch streamed tweets\n for tweet in stream.iter_lines():\n # Get data and errors\n try:\n data = json.loads(tweet).get(\"data\")\n\n # When data is found, we write it to the open file\n if data:\n json.dump(data, data_file)\n data_file.write(\"\\n\")\n except json.JSONDecodeError:\n pass", "def save_well(self):\n try:\n storage = WellStorage(self.hdf_file)\n storage.update_well(self.well_name, self.data_frame)\n except Exception as inst:\n print(inst)", "def _WriteStorageMetadata(self):\n stream_name = 'metadata.txt'\n if self._HasStream(stream_name):\n return\n\n stream_data = (\n '[plaso_storage_file]\\n'\n 'format_version: {0:d}\\n'\n 'serialization_format: {1:s}\\n'\n 'storage_type: {2:s}\\n'\n '\\n').format(\n self._FORMAT_VERSION, self.serialization_format, self.storage_type)\n\n stream_data = stream_data.encode('utf-8')\n self._WriteStream(stream_name, stream_data)", "def write_no_seek(meta_file, data_block):\n position = meta_file.tell()\n # seek 0 bytes from the end of file (2)\n meta_file.seek(0, 2)\n meta_file.write(data_block)\n meta_file.seek(position)", "def write(self, data, meta):\n raise NotImplementedError", "def _dw(self):\n if not self.no_cache:\n dw(self.title, self.stream_url, self.URL)\n else:\n logger.info(\"Caching is disabled\")", "def write(self, data):\n if not data.endswith('\\n'):\n data += '\\n'\n self.rpc.call(MsfRpcMethod.SessionMeterpreterWrite, [self.sid, data])", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def _shellntube_streams(ci, hi, co, ho, inside_heating) -> 's_tube, s_shell':\n # Mean temperatures\n Tci, Thi, Tco, Tho = ci.T, hi.T, co.T, ho.T\n Tc_ave = (Tci + Tco)/2\n Th_ave = (Thi + Tho)/2\n \n # Choose which fluid goes in tube side to minimize heat losses (configuration with smallest temperature difference between shell and surroundings).\n s_shell = Stream('s_shell')\n s_tube = Stream('s_tube')\n \n if inside_heating:\n # Cold stream goes in tube side\n s_tube.copylike(ci); s_tube.T = Tc_ave\n s_shell.copylike(hi); s_shell.T = Th_ave\n else:\n # Hot stream goes in tube side\n s_tube.copylike(hi); s_tube.T = Th_ave\n s_shell.copylike(ci); s_shell.T = Tc_ave\n return s_tube, s_shell", "def get_sharable_data(self):\n raise NotImplementedError", "def last_write_out(self):\n end_name = self.comp_name\n self.write_out_part_counter += 1\n end_name += \"/streaming/p\" + str(self.write_out_part_counter)\n end_streaming_content_object = Content(end_name, \"sdo:endstreaming\")\n self.cs.add_content_object(end_streaming_content_object)\n print(\"[last_write_out] Last entry in content store:\", self.cs.get_container()[-1].content.name,\n self.cs.get_container()[-1].content.content)", "def dump(self, stream):\n log.error('Cannot dump: %s', self.file_name)", "def stop_stream(self):\n pass", "async def soil_water_content(message, nats_handler, shared_storage, logger):\n\n time = message.data[\"time\"]\n for i in range(shared_storage[\"data_rate\"]):\n time_struct = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S.%f\").timetuple()\n total_seconds = time_struct.tm_hour*3600 + time_struct.tm_min*60 + time_struct.tm_sec\n data_value = 10 * math.cos(2 * math.pi * total_seconds * (1 / 86400)) + 40\n message = nats_handler.create_message(data_value, MessageSchemas.IOT_DATA_MESSAGE)\n message.message_type = \"soil_water_content\"\n await nats_handler.send_data(\"data.out\", message)\n await asyncio.sleep(0.5/shared_storage[\"data_rate\"])", "def hide_messages():\n\n print(\"Keep uncertainty data?\")\n print(\"NewDatabase(..., keep_uncertainty_data=True)\")\n print(\"\")\n print(\"Hide these messages?\")\n print(\"NewDatabase(..., quiet=True)\")", "def _WriteStream(self, stream_name, stream_data):\n # TODO: this can raise an IOError e.g. \"Stale NFS file handle\".\n # Determine if this be handled more error resiliently.\n\n # Prevent zipfile from generating \"UserWarning: Duplicate name:\".\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self._zipfile.writestr(stream_name, stream_data)", "def stream(self):\n d = self.dictionary()\n # binary data comes after dict\n self.maybe_spaces_or_comments()\n return self._stream(d)", "def write(self):", "def write(self):", "def server_streaming(self) -> global___Snippet.ServerStreaming:", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "async def _clear(self, ctx):\n try:\n a = discord.Streaming\n p = ctx.bot.config[\"prefix\"]\n g = a(\n name=f\"{p}help | v{ctx.bot.version}\", url=\"https://twitch.tv/monstercat\"\n )\n await self.bot.change_presence(activity=g)\n except Exception:\n await ctx.send(f\"```\\n{traceback.format_exc()}```\")\n else:\n await ctx.send(\":white_check_mark: Cleared.\")", "def filter_ms2fits(stack, fit_data, channel=1, peakiness=4.5):\n \n fit_data = fit_data.copy()\n for t in range(0, len(fit_data)):\n frame_data = fit_data[t]\n frame_med = np.median(stack[channel, t])\n xy_width_means = np.mean(frame_data[:,5:7], axis=1)\n peak_heights = frame_data[:,3]\n spot_peakiness = np.log(peak_heights / xy_width_means)\n frame_data_filtered = frame_data[(peak_heights > frame_med) & (spot_peakiness > peakiness),:]\n fit_data[t] = frame_data_filtered\n return fit_data", "def test_delete_private_stream(self) -> None:\n stream = self.set_up_stream_for_archiving(\"newstream\", invite_only=True)\n self.archive_stream(stream)", "def _applyToStream(self):\n cs = caseSettings.Settings()\n reader = settingsIO.SettingsReader(cs)\n reader.readFromStream(self.stream)\n\n if reader.invalidSettings:\n runLog.info(\n \"The following deprecated settings will be deleted:\\n * {}\"\n \"\".format(\"\\n * \".join(list(reader.invalidSettings)))\n )\n\n _modify_settings(cs)\n writer = settingsIO.SettingsWriter(cs)\n newStream = io.StringIO()\n writer.writeYaml(newStream)\n newStream.seek(0)\n return newStream", "def writetipsy(self, outfile=None, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = self._base()\n if outfile is None: outfile = s.filename+'.gtp'\n print \"write tipsy file to \", outfile\n sout = new(star=self._nhalos) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None: hubble = s.properties['h']\n sout.properties['h'] = hubble\n ### ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc*sout.properties['h']\n lboxmpch = lboxkpc*sout.properties['h']/1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8./3.)**.5\n tipsymunitmsun = rhocrithhco * lboxmpch**3 / sout.properties['h']\n\n print \"transforming \", self._nhalos, \" halos into tipsy star particles\"\n for ii in xrange(self._nhalos):\n h = self[ii+1].properties\n sout.star[ii]['mass'] = h['m']/hubble / tipsymunitmsun\n ## tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['pos'][0][0]/lboxmpch - 0.5\n sout.star[ii]['y'] = h['pos'][0][1]/lboxmpch - 0.5\n sout.star[ii]['z'] = h['pos'][0][2]/lboxmpch - 0.5\n sout.star[ii]['vx'] = h['vel'][0][0]/tipsyvunitkms\n sout.star[ii]['vy'] = h['vel'][0][1]/tipsyvunitkms\n sout.star[ii]['vz'] = h['vel'][0][2]/tipsyvunitkms\n sout.star[ii]['eps'] = h['r']/lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n print \"writing tipsy outfile %s\"%outfile\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout", "def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)", "def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):\r\n\t\t\r\n\t\tpdim=self.getPointDim()\r\n\t\t\r\n\t\tfield=self.getField(field)\r\n\t\t\r\n\t\tif time is None:\r\n\t\t\ttime = self.getTime()\r\n\r\n\r\n\t\tdims=list(data.shape)\r\n\t\t\r\n\t\t# remove last components\r\n\t\tif field.dtype.ncomponents()>1:\r\n\t\t\tdims=dims[:-1]\r\n\t\t\r\n\t\t\t# could be I'm writing a slice, I need to increment the \"dimension\"\r\n\t\twhile len(dims)<pdim: \r\n\t\t\tdims=[1] + dims\t\r\n\t\t\r\n\t\tdims=list(reversed(dims))\t\r\n\r\n\t\tif logic_box is None:\r\n\t\t\tp1=PointNi([x,y,z][0:pdim])\r\n\t\t\tlogic_box=BoxNi(p1,p1+PointNi(dims))\r\n\r\n\t\tif isinstance(logic_box,(tuple,list)):\r\n\t\t\tlogic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))\r\n\r\n\t\tquery = self.db.createBoxQuery(logic_box, field , time , ord('w'))\r\n\t\tquery.end_resolutions.push_back(self.getMaxResolution())\r\n\t\t\r\n\t\tself.db.beginBoxQuery(query)\r\n\t\t\r\n\t\tif not query.isRunning():\r\n\t\t\traise Exception(\"begin query failed {0}\".format(query.errormsg))\r\n\t\t\t\r\n\t\tif not access:\r\n\t\t\taccess=IdxDiskAccess.create(self.db)\r\n\t\t\taccess.disableAsync()\r\n\t\t\taccess.disableWriteLock()\r\n\t\t\r\n\t\t# I need to change the shape of the buffer, since the last component is the channel (like RGB for example)\r\n\t\tbuffer=Array.fromNumPy(data,bShareMem=True)\r\n\t\tAssert(buffer.c_size()==data.nbytes)\r\n\t\tbuffer.resize(PointNi(dims),query.field.dtype,__file__,0)\r\n\t\t\r\n\t\tquery.buffer=buffer\r\n\t\t\r\n\t\tif not self.db.executeBoxQuery(access, query):\r\n\t\t\traise Exception(\"query error {0}\".format(query.errormsg))", "def test_1020(self, gmn_client_v2):\n str_buf = io.StringIO()\n d1_gmn.app.sysmeta_extract.extract_values(out_stream=str_buf)\n self.sample.assert_equals(str_buf.getvalue(), \"all_stream\")", "def test_visible_data(state):\n st_data = state.to_player_data(0)\n\n assert st_data, \"Expect that we would have some data!\"\n assert \"deck\" not in st_data, \"We should not see the deck\"\n assert len(st_data[\"discarded\"]) == 0, \"We should see discarded\"\n\n # Should see all data of the player self\n assert len(st_data[\"self\"][\"hand\"]) == 0\n\n # Should not see other player's data\n other_hand = st_data[\"others\"][0]\n assert \"hand\" not in other_hand\n assert len(other_hand[\"open_hand\"]) == 0", "def _save_metadata(self, search_name):\r\n with open_(self.output_path / \"metadata\", \"a\") as f:\r\n f.write(\r\n f\"\"\"name={self.name}\r\n non_linear_search={search_name}\r\n \"\"\"\r\n )", "def _write_valence(\n top_file: IO, mol_name: str, mol_data: Dict, openff_sys: \"System\", typemap: Dict\n):\n _write_bonds(top_file, openff_sys, mol_data[\"reference_molecule\"])\n _write_angles(top_file, openff_sys, mol_data[\"reference_molecule\"])\n _write_dihedrals(top_file, openff_sys, mol_data[\"reference_molecule\"])", "def export_wells(self, w, title):\r\n self._check_out(title)\r\n np.savez_compressed(os.path.join(self.out_dir, title, title), w)", "def rw_pifo_sm(self):\r\n data_words = random.sample(range(0, 20), 20)\r\n\r\n # push all data\r\n for word in data_words:\r\n print ('@ {:04d} - pushed data word {}'.format(self.env.now, word))\r\n self.pifo_w_in_pipe.put(word)\r\n ((done, popped_data, popped_data_valid)) = yield self.pifo_w_out_pipe.get() # tuple\r\n if popped_data_valid:\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, popped_data))\r\n \r\n\r\n # pop all items\r\n for i in range(min(self.pifo_maxsize,len(data_words))):\r\n # submit pop request (value in put is a don't care)\r\n self.pifo_r_in_pipe.put(1) \r\n word = yield self.pifo_r_out_pipe.get()\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, word))", "def flush_analysis_data(self):\n self.writer.write_bulk(zip(self.analyzed_types, self.analyzed))\n self.analyzed_types = []\n self.analyzed = []", "def test_hiddenpart(self):\n testfile='hiddenpart.eml'\n try:\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='hidden', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = 'recipient-hiddenpart@unittests.fuglu.org'\n conffile = self.tempdir + \"/%s-filetypes.conf\" % user\n # the largefile in the test message is just a bunch of zeroes\n open(conffile, 'w').write(\n \"deny application\\/zip no zips allowed\")\n self.rulescache._loadrules()\n suspect = Suspect(\n 'sender@unittests.fuglu.org', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'hidden message part was not detected')\n\n finally:\n tmpfile.close()\n os.remove(conffile)", "def WriteFLASH_old(self, data):\n # print('program flash start')\n start_addr = 9 << 18\n cmd = self.board_def.CMD_WRITE_MEM\n pad = 0xFFFFFF\n #I need to pack bank into 4 bytes and then only use the 3\n packedPad = struct.pack(\"L\", pad)\n unpackedPad = struct.unpack('4b', packedPad)\n length = len(data)\n packet = struct.pack(\"4bLL\", cmd, unpackedPad[0], unpackedPad[1], unpackedPad[2], start_addr, length)\n #Next I need to send the command\n self.send_data(packet)\n #next read from the socket\n recv_stat, recv_data = self.receive_data()\n if recv_stat != 0x0:\n print ('Ram Write cmd Error stat={}!!!'.format(recv_stat))\n return self.board_def.STAT_ERROR\n\n self.send_data(data)\n #next read from the socket to ensure no errors occur\n self.sockfd.settimeout(1000);\n stat, data = self.receive_data()\n self.sockfd.settimeout(5)\n # print(packet)\n\n # print('program flash end')\n if stat != 0x0:\n print ('Ram Write Error stat={}!!!'.format(stat))\n return self.board_def.STAT_ERROR", "def _write(self, data):\n self._writer.write(data)", "def writetipsy(self, snapshot, halos, tipsyoutfile, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = snapshot\n outfile = tipsyoutfile\n nhalos = halos._nhalos\n nstar = nhalos\n sout = new(star=nstar) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None:\n hubble = s.properties['h']\n sout.properties['h'] = hubble\n # ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc * sout.properties['h']\n lboxmpch = lboxkpc * sout.properties['h'] / 1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8. / 3.) ** .5\n tipsymunitmsun = rhocrithhco * lboxmpch ** 3 / sout.properties['h']\n\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties\n sout.star[ii]['mass'] = h['mass'] / hubble / tipsymunitmsun\n # tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['Xc'] / lboxkpch - 0.5\n sout.star[ii]['y'] = h['Yc'] / lboxkpch - 0.5\n sout.star[ii]['z'] = h['Zc'] / lboxkpch - 0.5\n sout.star[ii]['vx'] = h['VXc'] / tipsyvunitkms\n sout.star[ii]['vy'] = h['VYc'] / tipsyvunitkms\n sout.star[ii]['vz'] = h['VZc'] / tipsyvunitkms\n sout.star[ii]['eps'] = h['Rvir'] / lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout", "def decode_faceshift_datastream(self, data):\n \n #block_id = struct.unpack_from('H', data)\n #print(\"Received block id \" + str(block_id)) ;\n\n offset = 0\n block_id, version, block_size = struct.unpack_from('HHI', data, offset)\n \n #print(\"ID, v, size = \" + str(block_id) + \",\" + str(version) + \",\" + str(block_size) )\n \n offset += 8\n\n if(block_id == BLOCK_ID_TRACKING_STATE):\n n_blocks, = struct.unpack_from('H', data, offset)\n #print(\"n_blocks = \" + str(n_blocks))\n offset += 2\n\n track_ok = 0 # Will be a byte: 1 if tracking ok, 0 otherwise.\n head_rotation_quat = None # Will be filled with the rotation using mathutils.Quaternion\n blend_shape_values = [] # Will be a list of float in the range 0-1\n #eyes_values = None # Will be a sequence of 4 angle values\n markers_position = [] # Will be a list of mathutils.Vector\n \n curr_block = 0\n while(curr_block < n_blocks):\n block_id, version, block_size = struct.unpack_from('HHI', data, offset)\n #print(\"ID, v, size = \" + str(block_id) + \",\" + str(version) + \",\" + str(block_size) )\n \n # put the offset at the beginning of the block\n offset += 8\n \n if(block_id == 101): # Frame Information blobk (timestamp and tracking status)\n ts, track_ok = struct.unpack_from('dB', data, offset)\n #print(\"timestamp, track_ok \" + str(ts) + \", \" + str(track_ok) )\n #offset += 9\n elif(block_id == 102): # Pose block (head rotation and position)\n x,y,z,w = struct.unpack_from('ffff', data, offset)\n #head_rotation_quat = mathutils.Quaternion((w,x,y,z))\n elif(block_id == 103): # Blendshapes block (blendshape values)\n n_coefficients, = struct.unpack_from('I', data, offset)\n #print(\"Blend shapes count=\"+ str(n_coefficients) )\n i = 0\n coeff_list = \"\"\n while(i < n_coefficients):\n # Offset of the block, plus the 4 bytes for int n_coefficients, plus 4 bytes per float\n val, = struct.unpack_from('f', data, offset + 4 + (i*4))\n blend_shape_values.append(val)\n coeff_list += repr(val) + \" \"\n i += 1\n print(\"Values: \" + coeff_list)\n elif(block_id == 104): # Eyes block (eyes gaze)\n leye_theta, leye_phi, reye_theta, reye_phi = struct.unpack_from('ffff', data, offset)\n elif(block_id == 105): # Markers block (absolute position of mark points)\n n_markers, = struct.unpack_from('H', data, offset)\n #print(\"n markers=\"+str(n_markers))\n i = 0\n while(i < n_markers):\n # Offset of the block, plus the 2 bytes for int n_markers, plus 4 bytes for each x,y,z floats\n x, y, z = struct.unpack_from('fff', data, offset + 2 + (i*4*3))\n #print(\"m\" + str(i) + \" \" + str(x) + \"\\t\" + str(y) + \"\\t\" + str(z))\n markers_position.append(mathutils.Vector((x,y,z)))\n i += 1\n \n curr_block += 1\n offset += block_size\n \n msg = fsMsgTrackingState()\n\n msg.m_timestamp = ts\n\n self.pub.publish(msg)\n\n # end -- while on blocks. Track State scan complete", "def off(self):\n self._current_stream = self._devnull", "def writestat(self, outfile=None, hubble=None):\n s = self._base()\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n if outfile is None: outfile = self._base().filename+'.stat'\n print \"write stat file to \", outfile\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n for ii in np.arange(self._nhalos)+1:\n print '%d '%ii,\n sys.stdout.flush()\n h = self[ii].properties # halo index starts with 1 not 0\n## 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(self[ii].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n## may want to add implement satellite test and false central breakup test.\n ss = \" \" # can adjust column spacing\n outstring = str(ii)+ss\n outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss\n outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss\n outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss\n outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss\n outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss\n outstring += str(h['vrms'])+ss\n ## pos: convert kpc/h to mpc (no h).\n outstring += str(h['pos'][0][0]/hubble)+ss\n outstring += str(h['pos'][0][1]/hubble)+ss\n outstring += str(h['pos'][0][2]/hubble)+ss\n outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss\n outstring += str(h['vel'][0][2])+ss\n outstring += contam+ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\"+ss # false central breakup.\n print >> fpout, outstring\n fpout.close()", "def writetrack2(self, dmbin, tbin, tshift=0, bgwindow=0, show=0, pol=0):\n\n # create bgsub data\n datadiffarr = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n if n.shape(datadiffarr) == n.shape([0]): # if track doesn't cross band, ignore this iteration\n return 0\n\n data = n.zeros(self.nchan, dtype='complex64') # default data array. gets overwritten.\n data0 = n.zeros(self.nchan, dtype='complex64') # zero data array for flagged bls\n flags = n.zeros(self.nchan, dtype='bool')\n\n # define output visibility file names\n outname = string.join(self.file.split('.')[:-1], '.') + '.' + str(self.nskip/self.nbl) + '-' + 'dm' + str(dmbin) + 't' + str(tbin) + '.mir'\n print outname\n vis = miriad.VisData(self.file,)\n\n int0 = int((tbin + tshift) * self.nbl)\n flags0 = []\n i = 0\n for inp, preamble, data, flags in vis.readLowlevel ('dsl3', False, nocal=True, nopass=True):\n if i == 0:\n # prep for temp output vis file\n shutil.rmtree(outname, ignore_errors=True)\n out = miriad.VisData(outname)\n dOut = out.open ('c')\n\n # set variables\n dOut.setPreambleType ('uvw', 'time', 'baseline')\n dOut.writeVarInt ('nants', self.nants0)\n dOut.writeVarFloat ('inttime', self.inttime0)\n dOut.writeVarInt ('nspect', self.nspect0)\n dOut.writeVarDouble ('sdf', self.sdf0)\n dOut.writeVarInt ('nwide', self.nwide0)\n dOut.writeVarInt ('nschan', self.nschan0)\n dOut.writeVarInt ('ischan', self.ischan0)\n dOut.writeVarDouble ('sfreq', self.sfreq0)\n dOut.writeVarDouble ('restfreq', self.restfreq0)\n dOut.writeVarInt ('pol', self.pol0)\n# inp.copyHeader (dOut, 'history')\n inp.initVarsAsInput (' ') # ???\n inp.copyLineVars (dOut)\n if i < self.nbl:\n flags0.append(flags.copy())\n i = i+1\n else:\n break\n\n l = 0\n for i in xrange(len(flags0)): # iterate over baselines\n # write out track, if not flagged\n if n.any(flags0[i]):\n k = 0\n for j in xrange(self.nchan):\n if j in self.chans:\n data[j] = datadiffarr[pol, l, k]\n# flags[j] = flags0[i][j]\n k = k+1\n else:\n data[j] = 0 + 0j\n# flags[j] = False\n l = l+1\n else:\n data = data0\n# flags = n.zeros(self.nchan, dtype='bool')\n\n dOut.write (self.preamble[int0 + i], data, flags0[i])\n\n dOut.close ()\n return 1", "def feed(self):\n # or intelligence discard\n pass", "def dumpDmesg(self):\n pass", "def streams():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Streams', level=1)\r\n streams = get_qlik_sense.get_streams()\r\n num_of_streams = len(streams)\r\n table = document.add_table(rows=num_of_streams+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Stream name'\r\n for stream in range(num_of_streams):\r\n row = table.rows[stream+1]\r\n row.cells[0].text = str(streams[stream]['name'])\r\n document.add_page_break()", "def sanitize(self):\n self.clear_streams()\n self.stop_sniff()\n if self.init_lhost:\n self._lhost.ui.disconnect()", "def _write_results_chunk(self):\n if self._is_guess:\n targ_dset = self._h5_guess\n source_dset = self._guess\n else:\n targ_dset = self._h5_fit\n source_dset = self._fit\n\n curr_pixels = self._get_pixels_in_current_batch()\n\n if self.verbose and self.mpi_rank == 0:\n print('Writing data of shape: {} and dtype: {} to position range: '\n '{} in HDF5 dataset:{}'.format(source_dset.shape,\n source_dset.dtype,\n [curr_pixels[0],curr_pixels[-1]],\n targ_dset))\n targ_dset[curr_pixels, :] = source_dset", "def flush(self, data):", "def debug_file(self, pkt_count, attack_count, data_list, ds_calc_time, ds_vals, metric_means, distances):\n # Current frame no. //\n # Current frame metric data //\n # Current sliding window data\n # Distances for each metric\n # DS probabilities, BPA's, time to calculate\n # Fusion results for each metric\n # Averages for each metric\n # Final result for frame\n # Current number of malicious frames detected\n metric_list = ['RSSI', 'Rate', 'NAV', 'Seq', 'TTL']\n x = [1, 2, 3, 4, 5]\n with open('debug.txt', 'a') as debug_file:\n debug_file.write('\\nFrame number: %d\\n' % pkt_count)\n debug_file.write('Current frame data. \\n')\n debug_file.writelines('%s : %d \\n ' % (metric, value) for metric, value in zip(self._features_to_analyse,\n data_list))\n debug_file.write('\\nCurrent sliding window data: \\n')\n debug_file.writelines('\\n%s:\\n %s \\nMean value = %f \\n' % (str(metric_array[0]), str(metric_array[1]), mean) for metric_array, mean in zip(self._sw_dict.items(), metric_means))\n debug_file.write('\\nDempster Shafer calculation times: \\n')\n\n if self._ds_timer is True:\n debug_file.writelines('Iteration %d time (s) = %f\\n' % (count, ds_time) for count, ds_time in zip(x, ds_calc_time))\n debug_file.write('Total time to calculate DS = %f (s)\\n' % sum(ds_calc_time))\n\n debug_file.write('Number of malicious frames detected: %d \\n' % attack_count)\n\n\n debug_file.close()", "def write(self, unknown_category):\n\n self.unknown_category = unknown_category\n rules, lexicon = self.generate_rules_and_lexicon()\n self.rules_generated = u' '.join(map(u''.join, rules))\n cPickle.dump(lexicon, open(self.get_file_path('lexicon'), 'wb'))\n if not self.rich_upper:\n dictionary = self.generate_dictionary(lexicon)\n cPickle.dump(dictionary, open(self.get_file_path('dictionary'), 'wb'))\n script_path = self.get_file_path('script')\n binary_path = self.get_file_path('binary')\n compiler_path = self.get_file_path('compiler')\n with open(compiler_path, 'w') as f:\n if self.script_type == 'lexc':\n f.write('#!/bin/sh\\nfoma -e \"read lexc %s\" -e \"save stack %s\" -e \"quit\"' % (\n script_path, binary_path))\n else:\n f.write('#!/bin/sh\\nfoma -e \"source %s\" -e \"regex morphology;\" '\n '-e \"save stack %s\" -e \"quit\"' % (script_path, binary_path))\n os.chmod(compiler_path, 0744)\n morphology_generator = self.get_morphology_generator(rules, lexicon)\n with codecs.open(script_path, 'w', 'utf8') as f:\n for line in morphology_generator:\n f.write(line)", "def detach_hidden(self, zero=False):\n if zero:\n self.hidden = self._make_hidden(self.batch_size)\n else:\n self.hidden = self.hidden.detach()", "def _stream(self):\n logger.info('getting meta-data')\n while not self.handle.has_metadata():\n time.sleep(0.1)\n\n #self.handle.rename_file(0, 'test.mp4')\n\n while not self.handle.is_seed():\n stat = self.handle.status()\n\n print 'downloading %.2f%%'%(stat.progress * 100)\n sys.stdout.flush()\n\n time.sleep(1)", "def write_out(self, content_content: str):\n print(\"[write_out] Computation name: \", self.comp_name)\n # meta_title_content object creation to return as a first part\n if self.write_out_part_counter < 0:\n metatitle_content = Content(self.comp_name, \"sdo:\\n\" + str(self.comp_name) + \"/streaming/p*\")\n self.queue_to_lower.put((self.packetid, metatitle_content))\n # self.cs.add_content_object(metatitle_content) TODO not needed? \n\n # actual content_object for streaming\n self.write_out_part_counter += 1\n content_name = self.comp_name\n content_name += \"/streaming/p\" + str(self.write_out_part_counter)\n content_object = Content(content_name, content_content)\n self.cs.add_content_object(content_object)\n print(\"[write_out] Last entry in content store:\", self.cs.get_container()[-1].content.name,\n self.cs.get_container()[-1].content.content)", "def stream(self, index, version, streamdata, dictionary=\"<< /Length %d >>\"):\n self.appendString(\"\\n\")\n self.indirectObjects[index] = self.filesize()\n self.appendString((\"%d %d obj\\n\" + dictionary + \"\\nstream\\n\") % (index, version, len(streamdata)))\n position = self.filesize()\n self.appendBinary(streamdata)\n self.appendString(\"\\nendstream\\nendobj\\n\")\n\n return position", "def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)", "def stream(self, write, request):\n raise NotImplementedError(\"%s.stream\" % reflect.qual(self.__class__))", "def render_book_to_stream(self, file_stream, book, **keywords):\n raise NotImplementedError(\"We are not writing to file\")", "def _write_psf_cutouts_hst(self):\n\n print('writing psf cutouts')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n nfile=self.image_info.size\n nobj=obj_data.size\n\n cutout_hdu = self.fits['psf']\n\n for iobj in range(nobj):\n if (iobj+1) % 100 == 0:\n print(' %d/%d' % (iobj+1,obj_data.size))\n\n # HST psf is same for every cutout, in fact ncut should always\n # be 1\n try:\n psf_im = self.psf_data.get_psf(iobj)\n except AttributeError:\n psf_im = None\n\n ncut=obj_data['ncutout'][iobj]\n\n for icut in range(ncut):\n\n if psf_im is None:\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = self.psf_data[file_id]\n\n psf_im = p.get_rec(row,col)\n\n expected_psf_shape = (\n obj_data['psf_row_size'][iobj,icut],\n obj_data['psf_col_size'][iobj,icut],\n )\n\n file_id = obj_data['file_id'][iobj, icut]\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n start_row = obj_data['psf_start_row'][iobj, icut]\n\n if psf_im.shape != expected_psf_shape:\n raise ValueError(\"psf size mismatch, expected %s \"\n \"got %s\" % (expected_psf_shape, psf_im.shape))\n\n cutout_hdu.write(psf_im, start=start_row)", "def write(self, data):\n with self.writing:\n raise NotImplementedError()", "def writeNoise(self):\n\n if (self.noise_file == None or self.noise_file == \"\"):\n return\n ofname = self.noise_file\n ofh = open(ofname,'w')\n\n # these have to be there as long as we've read the FAST file already\n ## not true: we don't store these in the dict.\n have_data = False\n if (\"TipRad\" in self.fstDict and 'TowerHt' in self.fstDict and 'Twr2Shft' in self.fstDict):\n tiprad = self.fstDict['TipRad']\n towerht = self.fstDict['TowerHt']\n twr2shft = self.fstDict['Twr2Shft']\n have_data = True\n\n for line in self.lines_noise:\n if (have_data and line.find('Observer location') >= 0):\n xdist = -1.0 * (tiprad + (towerht + twr2shft))\n ofh.write('{:.1f} 0.0 0.0'.format(xdist))\n ofh.write(' (x,y,z) Observer location in tower-base coordinate system. Use -(RotRad+HubHt)\\n')\n else:\n ofh.write(line)\n ofh.close()", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def detach_hidden(self, zero=False):\n if zero:\n self.hidden = self._make_hidden(self.batch_size)\n else:\n self.hidden[0].detach()", "def test_delete_public_stream(self) -> None:\n stream = self.set_up_stream_for_archiving(\"newstream\")\n self.archive_stream(stream)", "def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()", "def writetrack(self, dmbin, tbin, tshift=0, bgwindow=0, show=0, pol=0):\n\n # create bgsub data\n datadiffarr = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n if n.shape(datadiffarr) == n.shape([0]): # if track doesn't cross band, ignore this iteration\n return 0\n\n data = n.zeros(self.nchan, dtype='complex64') # default data array. gets overwritten.\n data0 = n.zeros(self.nchan, dtype='complex64') # zero data array for flagged bls\n flags = n.zeros(self.nchan, dtype='bool')\n\n # define output visibility file names\n outname = string.join(self.file.split('.')[:-1], '.') + '.' + str(self.nskip/self.nbl) + '-' + 'dm' + str(dmbin) + 't' + str(tbin) + '.mir'\n print outname\n vis = miriad.VisData(self.file,)\n\n int0 = int((tbin + tshift) * self.nbl)\n flags0 = []\n i = 0\n for inp, preamble, data, flags in vis.readLowlevel ('dsl3', False, nocal=True, nopass=True):\n if i == 0:\n # prep for temp output vis file\n shutil.rmtree(outname, ignore_errors=True)\n out = miriad.VisData(outname)\n dOut = out.open ('c')\n\n # set variables\n dOut.setPreambleType ('uvw', 'time', 'baseline')\n dOut.writeVarInt ('nants', self.nants0)\n dOut.writeVarFloat ('inttime', self.inttime0)\n dOut.writeVarInt ('nspect', self.nspect0)\n dOut.writeVarDouble ('sdf', self.sdf0)\n dOut.writeVarInt ('nwide', self.nwide0)\n dOut.writeVarInt ('nschan', self.nschan0)\n dOut.writeVarInt ('ischan', self.ischan0)\n dOut.writeVarDouble ('sfreq', self.sfreq0)\n dOut.writeVarDouble ('restfreq', self.restfreq0)\n dOut.writeVarInt ('pol', self.pol0)\n# inp.copyHeader (dOut, 'history')\n inp.initVarsAsInput (' ') # ???\n inp.copyLineVars (dOut)\n if i < self.nbl:\n flags0.append(flags.copy())\n i = i+1\n else:\n break\n\n l = 0\n for i in xrange(len(flags0)): # iterate over baselines\n # write out track, if not flagged\n if n.any(flags0[i]):\n k = 0\n for j in xrange(self.nchan):\n if j in self.chans:\n data[j] = datadiffarr[pol, l, k]\n# flags[j] = flags0[i][j]\n k = k+1\n else:\n data[j] = 0 + 0j\n# flags[j] = False\n l = l+1\n else:\n data = data0\n# flags = n.zeros(self.nchan, dtype='bool')\n\n dOut.write (self.preamble[int0 + i], data, flags0[i])\n\n dOut.close ()\n return 1" ]
[ "0.51169866", "0.50111604", "0.4976669", "0.4859597", "0.4805777", "0.4788097", "0.46907136", "0.4596308", "0.45624852", "0.4542059", "0.45340312", "0.45145792", "0.45093474", "0.45045894", "0.4497126", "0.4490438", "0.44690353", "0.44639584", "0.44553003", "0.44552484", "0.4449577", "0.4439231", "0.44239864", "0.44111276", "0.44062975", "0.44051445", "0.43969536", "0.4385505", "0.43828416", "0.4360129", "0.4347852", "0.43466616", "0.4346603", "0.43387857", "0.43250433", "0.43240562", "0.43076906", "0.42883483", "0.42836812", "0.42798156", "0.4277899", "0.42739648", "0.42708117", "0.42632776", "0.4258193", "0.42567888", "0.42546704", "0.42488033", "0.42368177", "0.4236517", "0.42326653", "0.42307514", "0.4222311", "0.4222311", "0.4211021", "0.41991156", "0.41974473", "0.41948625", "0.4167728", "0.41663945", "0.41582096", "0.41559044", "0.4155313", "0.41540658", "0.41536942", "0.41403878", "0.41335368", "0.413301", "0.41293663", "0.4124469", "0.41235948", "0.41198555", "0.41172716", "0.41140372", "0.41079682", "0.4106139", "0.4104222", "0.41040504", "0.41037992", "0.41024828", "0.40989563", "0.4095009", "0.40911454", "0.40890867", "0.40853658", "0.40808207", "0.40796053", "0.4079516", "0.40769336", "0.40760198", "0.40743887", "0.407419", "0.4073048", "0.4072159", "0.4069824", "0.40690058", "0.40627968", "0.40623644", "0.40622795", "0.406164", "0.40595773" ]
0.0
-1
reads hidden data from slack into files
def read_into_file(self, outfilepath: str): if self.fs_type == 'FAT': with open(outfilepath, 'wb+') as outfile: self.read(outfile) elif self.fs_type == 'NTFS': with open(outfilepath, 'wb+') as outfile: self.read(outfile) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_parsed_data():\n\n echonest_data_files = [f for f in os.listdir('.') if re.match(\"^echonest_[\\w]+.txt$\", f)]\n\n # Setting up header with user id and attributes\n header = ['user_id']\n header.extend(ATTRIBUTES)\n\n # printing header to standard out\n print \",\".join(header) \n\n # Processing each file to obtain parsed data\n for data_file in echonest_data_files:\n user_id = data_file[9:-4] # strip file prefix/suffix to get username/id\n parse_echonest_data_file(data_file, user_id)", "def read(self):\n global filepath\n\n ham_file_cnt, spam_file_cnt = 0, 0\n\n for (dirpath, dirnames, filenames) in os.walk(filepath):\n\n for file in filenames:\n with open(dirpath + '/' + file, \"r\", encoding=\"latin1\") as f:\n line = f.readline()\n\n if dirpath.split(\"/\")[-1] == \"ham\":\n self.tokens_ham += str(line).lower().rstrip('\\n') + \" \"\n ham_file_cnt += 1\n\n if dirpath.split(\"/\")[-1] == \"spam\":\n self.tokens_spam += str(line).lower().rstrip('\\n') + \" \"\n spam_file_cnt += 1\n\n # /Users/adityagupta/Documents/00\\ USC\\ Courses/06\\ CSCI\\ 544\\ -\\ NLP/03\\ Assignments/01\\ -\\ HW/01\\ Code/train\n\n # print(filepath)\n # print(self.filename_ham[-1])\n # print(self.filename_spam[-1])\n\n # Split tokens by whitespace, and store as list\n self.tokens_ham = self.tokens_ham.split(\" \")\n self.tokens_spam = self.tokens_spam.split(\" \")\n\n # Probability of it being a Spam File or Ham File\n self.prob_ham = ham_file_cnt/(ham_file_cnt + spam_file_cnt)\n self.prob_spam = spam_file_cnt / (ham_file_cnt + spam_file_cnt)\n\n # print(self.prob_ham, self.prob_spam)\n\n # Smoothing for tokens which are only in either spam or in ham\n self.smoothing()", "def _read_data(self):", "def backfill_files(slack_token, save_folder):\n def get_history_files(save_folder):\n \"\"\"Returns a mapping from channel IDs to absolute file paths of their history entries\"\"\"\n for dirpath, _, filenames in os.walk(save_folder):\n result = {}\n for history_file in filenames:\n channel_id, extension = os.path.splitext(os.path.basename(history_file))\n if extension != \".json\": continue\n result[channel_id] = os.path.join(dirpath, history_file)\n return result\n return {}\n for channel_id, history_file in get_history_files(save_folder).items():\n with open(history_file, \"r\") as f:\n for entry in f:\n message = json.loads(entry)\n if \"file\" not in message: continue\n file_entry = message[\"file\"]\n file_id = file_entry[\"id\"]\n file_url = file_entry.get(\"url_private\") or file_entry.get(\"url_private_download\") or file_entry.get(\"url_download\")\n if file_url is None: continue\n file_slack_name = re.search(r\"/([^/]+)$\", file_url).group(1)\n download_file(slack_token, save_folder, \"{}-{}\".format(file_id, file_slack_name), file_url)", "def get_data(args, load_extracted=True):\n path = args.data_path1\n tokenizer_en = tokener()\n table = str.maketrans(\"\", \"\", '\"#$%&\\'()*+-/:;<=>@[\\\\]^_`{|}~')\n if load_extracted:\n df = load_pickle(\"df_unencoded.pkl\")\n else:\n logger.info(\"Extracting CNN stories...\")\n df = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df.iloc[idx][\"body\"] = body\n df.iloc[idx][\"highlights\"] = highlights\n \n if len(args.data_path2) > 2:\n path = args.data_path2\n logger.info(\"Extracting dailymail stories...\")\n df1 = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df1.iloc[idx][\"body\"] = body\n df1.iloc[idx][\"highlights\"] = highlights\n df = pd.concat([df, df1], ignore_index=True)\n del df1\n \n save_as_pickle(\"df_unencoded.pkl\", df)\n logger.info(\"Dataset length: %d\" % len(df)) \n \n if (args.level == \"word\") or (args.level == \"char\"):\n logger.info(\"Tokenizing and cleaning extracted text...\")\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en), \\\n axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n \n logger.info(\"Limiting to max features length, building vocab and converting to id tokens...\")\n df = df[df[\"body_length\"] <= args.max_features_length]\n v = vocab(level=args.level)\n v.build_vocab(df[\"body\"])\n v.build_vocab(df[\"highlights\"])\n df.loc[:, \"body\"] = df.apply(lambda x: v.convert_w2idx(x[\"body\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: v.convert_w2idx(x[\"highlights\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], 0, 2), axis=1)\n save_as_pickle(\"df_encoded.pkl\", df)\n save_as_pickle(\"vocab.pkl\", v)\n \n elif args.level == \"bpe\":\n encoder = Encoder(vocab_size=args.bpe_vocab_size, pct_bpe=args.bpe_word_ratio, word_tokenizer=tokenizer_en.tokenize)\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en, clean_only=True), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en, clean_only=True), \\\n axis=1)\n logger.info(\"Training bpe, this might take a while...\")\n text_list = list(df[\"body\"])\n text_list.extend(list(df[\"highlights\"]))\n encoder.fit(text_list); del text_list\n \n logger.info(\"Tokenizing to ids and limiting to max features length...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform([x[\"body\"]])), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform([x[\"highlights\"]])), axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n df = df[df[\"body_length\"] <= args.max_features_length]\n \n '''\n logger.info(\"Converting tokens to ids...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"body\"])))),\\\n axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"highlights\"])))),\\\n axis=1)\n '''\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], encoder.word_vocab[\"__sos\"], encoder.word_vocab[\"__eos\"]),\\\n axis=1)\n \n save_as_pickle(\"df_encoded.pkl\", df)\n encoder.save(\"./data/vocab.pkl\")\n return df", "def get_data(path=\"/content/drive/My Drive/colab_data/WhatsApp Chat with YJHD ๐Ÿ˜‚.txt\"):\n ls_rows = []\n try:\n with open(path) as f:\n for line in tqdm(f):\n message_from = None\n message_text = None\n media = False\n emojis = []\n clean_text = \"\"\n mention = None\n list_to_exclude = [\"https\",\n \"This message was deleted\",\n \"<Media omitted>\"]\n split_line = line.split(\" - \")\n try:\n date = datetime.strptime(split_line[0], \"%d/%m/%y, %H:%M\")\n except ValueError as e:\n logging.debug(\"Not a Date: \" + split_line[0] + \" Exception: \" + str(e))\n continue\n message_split = split_line[1].split(\":\")\n if len(message_split) > 1:\n message_from = message_split[0]\n message_text = message_split[1].strip()\n if \"<Media omitted>\" in message_text:\n media = True\n if any(exclude in message_text for exclude in list_to_exclude):\n message_text = None\n else:\n if \"@\" in message_text:\n new_message = \"\"\n for word in message_text.split():\n if word.startswith(\"@\"):\n mention = word\n continue\n new_message += word\n message_text = new_message\n for character in message_text:\n if character in UNICODE_EMOJI:\n emojis.append(character)\n else:\n clean_text += character\n clean_text = None if clean_text.strip() == \"\" else clean_text\n emojis = None if len(emojis) < 1 else ','.join(emojis)\n POS = __get_relevant_words(clean_text)\n ls_rows.append((date, message_from, message_text, media, emojis, clean_text, mention, POS))\n df = pd.DataFrame(ls_rows, columns=[\"time\", \"from\", \"text\", \"media\", \"emojis\", \"clean_text\", \"mention\", \"POS\"])\n df.dropna(subset=['text'], inplace=True)\n return df\n except Exception as e:\n print(\"Critical Exception \" + str(e))\n return", "def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')", "async def _get_data(self, ctx: Context):\n\n data = await self.config.user(ctx.author).all()\n\n data_json = json.dumps(data)\n\n file = text_to_file(data_json, filename=f\"{ctx.author.name.lower()}_brawlcord_data.json\")\n\n try:\n await ctx.author.send(file=file)\n except discord.Forbidden:\n await ctx.send(\"Unable to DM you.\")\n\n await ctx.send(\"Sent you the file!\")", "def read(self):", "def read():\n # TODO", "def findReadData(day,scope,chan,shot):\n return readData(conf.dataDir + \"%d_01_2013_osc%d/C%dosc%d-%05d.txt\" % (day, scope, chan, scope, shot),\n conf.timeDelay[scope,chan],\n conf.ampMult[scope,chan])", "def get_data_from_storage(data_file):\n print(f\"{CR}Yipes, I don't know how to pull data from dvc yet{C0}\")", "def fetch_lovecraft():\n data_path = check_fetch_lovecraft()\n all_data = []\n all_words = Counter()\n with zipfile.ZipFile(data_path, \"r\") as f:\n for name in f.namelist():\n if \".txt\" not in name:\n # Skip README\n continue\n data = f.read(name)\n data = data.split(\"\\n\")\n data = [l.strip() for l in data if l != \"\"]\n words = [w for l in data for w in regex.sub('', l.lower()).split(\n \" \") if w != \"\"]\n all_data.extend(data)\n all_words.update(words)\n return {\"data\": all_data,\n \"words\": all_words.keys()}", "def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"ไฝ ๅฅฝๅฐ้กบ\", \"ๅฐ้กบๅฐ้กบ\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\TvInfo\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def dummy_check_metadata():\n files = collect_files('silence_removed')\n dummy_dir = 'dummy_files_check'\n if not os.path.isdir(dummy_dir):\n os.mkdir(dummy_dir)\n for i, file in tqdm(enumerate(files)):\n metadata = get_file_metadata(file)\n target_path = os.path.join(dummy_dir,\n metadata['speaker'] + '_' + metadata['word'] + '_' + str(i) + '.wav')\n shutil.copy(file, target_path)", "def main():\n LESSONS_PATH = os.path.join(LESSON_LOCATOR_DATA, LESSON_SETS[0])\n ORIGINAL_LESSONS_PATH = os.path.join(LESSONS_PATH, \"original\")\n ANNOTATED_LESSONS_PATH = os.path.join(LESSONS_PATH, \"annotated\")\n\n if not os.path.exists(ANNOTATED_LESSONS_PATH):\n os.mkdir(ANNOTATED_LESSONS_PATH)\n\n print(\"Scanning original lessons in %s...\" % ORIGINAL_LESSONS_PATH)\n\n for item in os.listdir(ORIGINAL_LESSONS_PATH):\n if item == \".DS_Store\": continue\n\n print(\" found: %s\" % item)\n\n item_path = os.path.join(ORIGINAL_LESSONS_PATH, item)\n\n lesson_number = None\n lesson_description = None\n mobj = re.search(r'^AY\\s+(\\d+)\\s*-\\s*(.+)\\.txt$', item)\n if mobj:\n lesson_number = mobj.group(1)\n lesson_description = mobj.group(2)\n\n print(\" number: %s\" % lesson_number)\n print(\" description: %s\" % lesson_description)\n\n lesson = dict()\n lesson['number'] = lesson_number\n lesson['description'] = lesson_description\n\n fh = open(item_path)\n lesson_raw_text = fh.read()\n fh.close()\n lesson_text = re.split(r'\\n', lesson_raw_text)\n# lesson_raw_text_reencoded = lesson_raw_text.decode('mac-roman').encode('utf-8')\n# lesson_text = re.split(r'\\n', lesson_raw_text_reencoded)\n\n lesson['text'] = lesson_text\n lesson['parsed'] = parseLesson(lesson_text)\n\n if lesson['parsed']['end_of_lesson'] is None:\n print(\" lesson has no 'end of lesson' marker\")\n\n lesson_json = json.dumps(lesson, indent=4)\n annotated_lesson_path = os.path.join(ANNOTATED_LESSONS_PATH, \"ay_%04d.json\" % int(lesson_number))\n fh = open(annotated_lesson_path, \"w\")\n fh.write(lesson_json)\n fh.close()\n\n else:\n print(\"ERROR: File name not understood: %s\" % item)\n\n return 0", "def test_hiddenpart(self):\n testfile='hiddenpart.eml'\n try:\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='hidden', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = 'recipient-hiddenpart@unittests.fuglu.org'\n conffile = self.tempdir + \"/%s-filetypes.conf\" % user\n # the largefile in the test message is just a bunch of zeroes\n open(conffile, 'w').write(\n \"deny application\\/zip no zips allowed\")\n self.rulescache._loadrules()\n suspect = Suspect(\n 'sender@unittests.fuglu.org', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'hidden message part was not detected')\n\n finally:\n tmpfile.close()\n os.remove(conffile)", "def main():\n try:\n init_file = open('keywords.json', 'r')\n init_file.close()\n except IOError:\n copy2('keywords.base', 'keywords.json')\n try:\n init_file = open('rsslist.json', 'r')\n init_file.close()\n except IOError:\n copy2('rsslist.base', 'rsslist.json')\n \n\n config_file = 'config.ini'\n config_section = 'dev'\n slack_token = load_config(config_file, config_section)\n slack_client = SlackClient(slack_token)\n feed_count = len(feed_db)\n feed_counter = feed_count\n while feed_counter > 0:\n url = feed_db.get(doc_id = feed_counter)['url']\n last_update_obj = feed_db.get(doc_id = feed_counter)['lastupdate']\n post_list, published_date = getfeed(url, last_update_obj)\n feed_counter = feed_counter - 1\n print(post_list)\n post_lastUpdate(url, published_date)\n post_to_slack(slack_client, post_list)", "def test3_advanced_info(self):\n\t\tprint \"\\nTEST 3: Extracting detailed entities info from each ontology in %s folder.\\n=================\" % DATA_FOLDER\n\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\n\n\t\t\t\t# divert output to a file temporarily \n\t\t\t\tsaveout = sys.stdout \n\t\t\t\tfsock = open('out.log', 'w') \n\t\t\t\tsys.stdout = fsock \n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\tprintEntitiesInformation(o)\t\t\t\t\n\t\t\t\t\n\t\t\t\tsys.stdout = saveout\n\t\t\t\tfsock.close()\n\t\t\t\tprint \"Success.\"", "def read(path):", "def read_unlabeled(data_loc, dname):\n data = []\n fnames = []\n raw_fnames = os.listdir(data_loc + dname)\n for raw_fname in raw_fnames:\n fname = dname + '/' + raw_fname\n content = read_instance(data_loc, fname)\n data.append(content)\n fnames.append(fname)\n return data, fnames", "def read_data_nmt():\n data_dir = download_extract('fra-eng')\n with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:\n return f.read()", "def return_file_read(_):\n return [\"scorevideo LOG\", \"File: log.mat\"]", "def setup_datafiles(shell,params_info):\n\n parameters_text_items = []\n for key,value in params_info.items():\n shell.write_file(value['path'], value['text'])\n parameters_text_items.append(\"%s:%s\" % (value['type'],value['path']))\n\n # generate the parameters file to feed into the url\n parameters_text = '\\n'.join(parameters_text_items)\n\n return parameters_text", "def open(self):", "def open_h5meta(filepath):\n data = dict()\n h5meta_content = read_h5meta(filepath)\n for file in h5meta_content[\"filelist\"]:\n data[file] = read_detector_data(file)\n\n return data", "def read_data(self):\n\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print(\n \"# The file {} belonging to {} do not exist.\".format(\n file_error.filename, self.fName))", "def get_files_io():\n if GC.conf['general']['training']:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'train.txt'),\n 'new': os.path.join(COOKED_DATA, 'train_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'train_norm.txt'),\n 'manu': os.path.join(RAW_DATA, 'others', 'temp_updt_manu.txt'),\n 'labels': os.path.join(TRAIN_DATA, 'train_norm.txt_labels.pkl'),\n 'segll': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_deeplog.pkl'),\n 'struct': os.path.join(TRAIN_DATA, 'train_norm.txt_structured.csv'),\n 'output': TRAIN_DATA\n }\n else:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'test.txt'),\n 'new': os.path.join(COOKED_DATA, 'test_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'test_norm.txt'),\n 'labels': os.path.join(TEST_DATA, 'test_norm.txt_labels.pkl'),\n 'segll': os.path.join(TEST_DATA, 'test_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TEST_DATA, 'test_norm.txt_seginf_deeplog.pkl'),\n 'map_norm_raw': os.path.join(TEST_DATA, 'map_norm_raw.pkl'),\n 'map_norm_rcv': os.path.join(TEST_DATA, 'map_norm_rcv.pkl'),\n 'norm_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt'),\n 'struct': os.path.join(TEST_DATA, 'test_norm.txt_structured.csv'),\n 'struct_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt_structured.csv'),\n 'top': os.path.join(TEST_DATA, 'analysis_summary_top.txt'),\n 'sum': os.path.join(TEST_DATA, 'analysis_summary.csv'),\n 'rst_llab': os.path.join(TEST_DATA, 'results_loglab.csv'),\n 'rst_dlog': os.path.join(TEST_DATA, 'results_deeplog.txt'),\n 'rst_llzr': os.path.join(TEST_DATA, 'results_loglizer.csv'),\n 'dbg': os.path.join(TEST_DATA, 'debug.csv'),\n 'output': TEST_DATA\n }\n return files_zip", "def fetch_lingspam(data_home=PATH_DATA):\n if not os.path.exists(data_home + '/lingspam_public.tar.gz'):\n urlretrieve(URL_LINGSPAM, data_home + '/lingspam_public.tar.gz')\n df = pd.DataFrame(columns=['text', 'spam?'])\n with tarfile.open(mode=\"r:gz\", name=data_home+'/lingspam_public.tar.gz') as f:\n # We load only the raw texts. \n folder = 'lingspam_public/bare/'\n #cada subcarpeta de lingspam_public/bare/ tiene muchos txt con mensajes clasificados por el nombre del archivos\n files = [name for name in f.getnames() if name.startswith(folder) and name.endswith('.txt')]\n for name in files:\n m = f.extractfile(name)\n df = df.append({'text':str(m.read(), 'utf-8'), \n 'spam?':1 if 'spmsg' in name else 0}, \n ignore_index=True)\n return df", "def test2_basic_info(self):\n\t\tprint \"\\nTEST 2: Extracting basic info from each ontology in %s folder.\\n=================\" % DATA_FOLDER\n\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\n\t\t\t\t\n\t\t\t\t# divert output to a file temporarily \n\t\t\t\tsaveout = sys.stdout \n\t\t\t\tfsock = open('out.log', 'w') \n\t\t\t\tsys.stdout = fsock \n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\tprintBasicInfo(o)\t\t\t\t\n\t\t\t\t\n\t\t\t\tsys.stdout = saveout\n\t\t\t\tfsock.close()\n\t\t\t\tprint \"Success.\"", "def getFileData(self, context, manifest, filedata):\n files = context.source.listFiles()\n for fn in files:\n if 'dat' == fn.split('.')[-1]:\n data = context.source.readFile(fn)\n doc = parseDoc(data)\n root = doc.getRootElement()\n if 'CONTENT' == root.name:\n id = fn.split('.')[0]\n docfn = id + '.html'\n data = context.performTransform(data, ['Blackboard Content', 'Blackboard_content_import_xform.xsl'])\n data = data.replace('@X@EmbeddedFile.location@X@', '%s/embedded/' %id)\n filedata[docfn] = data.replace('@X@LOCALFOLDERLOCATION@X@', '%s/' %id)", "def getFileListLocal(dataset,blacklist=[ ],tag=\"\"):\n if '/pnfs/' in dataset:\n tag += \"_pnfs\"\n dataset = '__'.join(dataset.split('/')[-3:])\n filename = \"filelist/filelist_%s%s.txt\"%(dataset.lstrip('/').replace('/','__'),tag)\n filelist = [ ]\n if os.path.exists(filename):\n with open(filename,'r') as file:\n for line in file:\n line = line.rstrip('\\n')\n if line and '#' not in line and line not in blacklist:\n filelist.append(line.rstrip('\\n'))\n return filelist", "def get_file_data(filename):", "def read_file_unlabeled(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n ret.append( (lines) )\n return ret", "def load_raw_text():\n if not os.path.exists( os.path.join( DATA_HOME, RAW_TEXT_FILE ) ) or \\\n not os.path.exists( os.path.join( DATA_HOME, LABELS_FILE ) ):\n print( 'no prior files found. staring from scratch' )\n rev, rat = parse_json( os.path.join( DATA_HOME, JSON_FILE ) )\n y = np.array( rat )\n print( 'saving data to files' )\n pickle.dump( rev , open( os.path.join( DATA_HOME, RAW_TEXT_FILE ), 'wb' ) )\n pickle.dump( y , open( os.path.join( DATA_HOME, LABELS_FILE ), 'wb' ) )\n else:\n print( 'found raw text and labes. loading...' )\n rev = pickle.load( open( os.path.join( DATA_HOME, RAW_TEXT_FILE ), 'rb' ) )\n y = pickle.load( open( os.path.join( DATA_HOME, LABELS_FILE ), 'rb' ) )\n print( 'done' )\n \n return rev, y", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def read_faq_from_disk():\n return json.load(open(\"./faq.json\"))", "def load_rentedout():", "def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return", "def read_in_files():\n\n num_files = len([name for name in os.listdir(DATA_SOURCE) if name.endswith(\".txt\")])\n loading_section_size = num_files / 30\n count = 0\n\n sentences_as_lists = []\n for filename in os.listdir(DATA_SOURCE):\n if filename.endswith(\".txt\"):\n\n # Pretty loading bar\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == num_files:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()\n\n # Open the paper\n paper_to_open = DATA_SOURCE + filename\n paper = Reader().open_file_single_string(paper_to_open)\n udata = paper.decode(\"utf-8\")\n paper = udata.encode(\"ascii\", \"ignore\")\n\n # Split the data into a list of sentences, where each sentence is a list of words\n sentences = sent_tokenize(paper)\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n sentences_as_lists.append(words)\n\n if DEBUG:\n print(sentences_as_lists)\n wait()\n\n count += 1\n\n return sentences_as_lists", "def get_additional_data_from_files(df, file_description): # file description one of [\"video\", \"eaf\", \"seg\", \"gentle\"]\n if file_description == \"gentle\":\n file_folder = FILE_BASE + \"/gentle/\"\n is_gentle_file = True\n else:\n file_folder = FILE_BASE + \"/original/\"\n is_gentle_file = False\n\n file_df = None\n\n if file_description not in list(FILE_DESCRIPTIONS_TO_EXT.keys()):\n print(\"Unknown file description! Don't know what to do with %s files...\" % file_description)\n return None\n\n else:\n print(\"Load and extract information from %s files...\" % file_description)\n #pbar = tqdm.tqdm(total = len(np.unique(df[\"source_file\"])),desc='Files', position=0,leave=True,file=sys.stdout)\n #file_log = tqdm.tqdm(total=0, position=1, bar_format='{desc}',leave=True,file=sys.stdout)\n print(\"Total files to laod and preprocess: \", len(np.unique(df[\"source_file\"])))\n \n for i,file in enumerate(np.unique(df[\"source_file\"])):\n if i%100 == 0:\n print(\"File: \",i)\n \n filepath = file_folder + get_file_path(file,is_gentle_file=is_gentle_file) + FILE_DESCRIPTIONS_TO_EXT[file_description]\n\n if file_description == \"video\":\n file_i_df = mp4_file_processing.get_word_video_snippet_size(df, filepath)\n elif file_description == \"eaf\":\n speech_annotation_eaf_data, gesture_eaf_data = eaf_file_processing.read_eaf(filepath)\n file_i_df = eaf_file_processing.map_gestures_to_annotation(speech_annotation_eaf_data, gesture_eaf_data, remove_pauses=False)\n file_i_df = eaf_file_processing.binary_encode_gestures(file_i_df, gesture_column=\"gesture\")\n\n elif file_description == \"seg\":\n file_i_df = seg_file_processing.get_seg_file_pos_info(filepath)\n\n elif file_description == \"gentle\":\n file_i_df = gentle_file_processing.get_gentle_file_transcripts(filepath)\n \n else:\n print(\"Unknown file format!!!\")\n return \n\n if file_df is None:\n file_df = file_i_df\n else:\n file_df = pd.concat([file_df, file_i_df], ignore_index=True)\n\n #file_log.set_description_str(f'Processed file: {file}')\n #pbar.update(1)\n #sleep(0.02)\n #file_log.close()\n #pbar.close()\n return file_df", "def get_data(stage=0):\n return get_files(stage)[1]", "def open_raw(self, name):\n self._canOperate = False\n self._txt = \"\"\n try:\n with open(name, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f:\n l = line.strip(\"\\n\")\n if l != \"\":\n self._txt += l + \" \"\n else:\n # paragraphing\n self._txt += \"\\n\"\n\n # cut the source into words\n self._words = re.findall(\"[\\w\\dร€รร‚รƒร„ร…ร รกรขรฃรครฅร’ร“ร”ร•ร–ร˜รฒรณรดรตรถรธรˆร‰รŠร‹รจรฉรชรซร‡รงรŒรรŽรรฌรญรฎรฏร™รšร›รœรนรบรปรผรฟร‘รฑ]+\", self._txt)\n self._length = len(self._words)\n except:\n raise FileNotFound(name)", "def save_file():\n generic = pull_list()\n result = list()\n i = 0\n while True:\n try:\n if generic[i].startswith('CVE'):\n cve_pattern = \"^CVE-\\d+-\\d+|^CVE-\\d+-[X]+\"\n header = re.findall(cve_pattern, generic[i])[0]\n i += 1\n notes = list()\n while not generic[i].startswith('CVE'):\n commit_pattern = \"http[s]?:\\/\\/.+commit\\/[\\S]+\"\n if re.search(commit_pattern, generic[i]):\n link = re.findall(commit_pattern, generic[i])\n notes.append(link[0])\n i += 1\n if notes != list():\n result.append(Data(header, notes))\n except IndexError:\n print('Finished')\n break\n return result", "def open_text(name):\n\t# Load data for each from from a file (will be part of your data processing script)\n\tinput_file = open(name+ '.pickle','r')\n\ttext = pickle.load(input_file)\n\treturn text", "async def info(self, ctx):\r\n openfile = open(\"info.txt\", \"r\")\r\n embed = discord.Embed(title='Aristobot', description='This is a bot made by Aristoza that uses the TrueSkill '\r\n 'python package (http://trueskill.org/) which is based on '\r\n 'the '\r\n 'TrueSkill rating system developed by Microsoft.',\r\n color=33023)\r\n embed.add_field(name='How it works', value=openfile.read(), inline=False)\r\n await ctx.send(embed=embed)", "def analyze_data(self):\n\n self.truth = self.analyze_folder(\"Truth\")\n self.truth.to_csv(self.folder + \"/truth.csv\")\n self.false = self.analyze_folder(\"False\")\n self.flase.to_csv(self.folder + \"/false.csv\")", "def read_data(self):\n self.days = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14]\n path = '../data/'\n data = []\n for day in self.days:\n filename = path + 'spectrum_day{}.txt'.format(day)\n data.append(read_file(filename))\n return data", "def grab_game_data(self):\n\n # The files are - most of the time - stored in a temporary folder.\n # The common location is /tmp/proton_UNIX-USERNAME .\n # One log file is enough for the mod manager : /tmp/proton_UNIX-USERNAME/run\n\n # Get the file location\n home_directory = os.path.expanduser(\"~\")\n username = os.path.basename(home_directory)\n # So, the file we want is...\n path_to_run_exe = '/tmp/proton_' + username + '/run'\n # We want to copy it somewhere. ~/v2mm/run might be a good place.\n try:\n file_source = open(path_to_run_exe, 'r')\n if not os.path.exists(self.manager_data_directory): # Creates the directory if it does not exist.\n os.mkdir(home_directory + '/.v2mm/')\n self.add_new_logs('Creation of a new directory for the manager : ' + home_directory + '/.v2mm/')\n file_destination = open(home_directory + '/.v2mm/run', 'w')\n file_destination.write(file_source.read())\n file_source.close()\n file_destination.close()\n except Exception as e:\n self.add_new_logs(e.__str__())\n\n # Update the mod listbox once the data are loaded.\n self.mod_list.delete(0, END)\n for mod in self.get_list_of_mods(self.manager_data_directory + 'run'):\n self.mod_list.insert(END, mod)", "def read_data(osint_url, file_name):\n\n # Read in the file from https://osint.bambenekconsulting.com/feeds/\n osint_feed_url = osint_url + file_name\n\n logs_feed = urllib.request.urlopen(url=osint_feed_url).read().decode('utf-8')\n logs_feed = logs_feed.split('\\n')\n return logs_feed", "def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))", "def main(data_path, target_path, skip, start_index=None):\n\n ### TODO: rethink this (because it is used in another files)\n extension = \".txt\"\n\n sindex = start_index\n for root, dirs, files in os.walk(data_path):\n for sfile in files:\n if sfile.endswith(\".json\"):\n filename = sfile[:(sfile.rfind('-') if '-' in sfile else sfile.rfind('.'))]\n if skip and os.path.isfile(os.path.join(target_path, filename + extension)):\n continue\n # get file name (track id) to compare with start_index\n if sindex is not None:\n if sindex == filename:\n sindex = None\n else:\n continue\n\n path = os.path.join(root, sfile)\n # get data from .json file\n track_data = None\n try:\n with open(path) as fp_in:\n track_data = json.load(fp_in)\n print('...')\n except json.decoder.JSONDecodeError:\n print('JSON Decoding problem [', path, ']')\n\n track_id = None\n lyrics_url = None\n if track_data and 'lyrics_url' in track_data.keys():\n track_id = track_data['track_id']\n lyrics_url = track_data['lyrics_url']\n else:\n print('No lyrics data')\n continue\n\n if lyrics_url:\n print(lyrics_url)\n lyrics_text = get_lyrics_text(lyrics_url)\n if lyrics_text:\n store_lyrics_text(target_path, track_id, lyrics_text, extension)\n else:\n print('No lyrics data')\n continue\n else:\n print('No lyrics data')\n continue\n\n return", "def build_extra_vars_file(self, instance, private_data_dir):", "def read_clean_files(subjects='all', activities='all', model='all'):", "def text_only(feedback_folder_path):\n elems = os.listdir(feedback_folder_path)\n global mos_sim\n global mos_nat\n # ignore instruction text files\n for junk in [\"Anleitung.txt\", \"instructions.txt\"]:\n if junk in elems: elems.remove(junk)\n # iterate score text files and update MOS dictionaries\n for file in elems:\n filepath = os.path.join(feedback_folder_path, file)\n code, nat_score, sim_score = score_filepath_to_scores(filepath)\n update_dicts(code, nat_score, sim_score)", "def load_slack_users(message):\n\n users = hf.get_users()\n\n for user in users:\n if user[\"id\"] == message._get_user_id():\n if user[\"approval_level\"] != \"admin\":\n message.reply(\"Insufficient privileges.\")\n return\n\n with open(user_path) as outfile:\n users = json.load(outfile)\n\n existing_users = []\n\n for user in users:\n if (user[\"metadata\"] != \"\" or user[\"approval_level\"] != \"unapproved\"):\n existing_users.append(user)\n\n #print (existing_users)\n\n user_list = []\n for userid, user in iteritems(message._client.users):\n user_info = {}\n user_info[\"name\"] = user[\"name\"]\n user_info[\"id\"] = user[\"id\"]\n user_info[\"approval_level\"] = \"unapproved\" # By default, not approved or denied\n user_info[\"metadata\"] = \"\" # Metadata to be edited later on\n\n user_list.append(user_info)\n\n if existing_users:\n for user in existing_users:\n for listed_user in user_list:\n if user[\"id\"] == listed_user[\"id\"]:\n user_list[user_list.index(listed_user)] = user\n\n with open(user_path, 'w') as outfile:\n json.dump(user_list, outfile)\n\n message.reply(\"Successfully loaded users into json file.\")", "def _get_bids_readme(self):\n readme = []\n # Grab all readme files, loop through\n for README_fname in [\n file for file in Path(self.dataset.path).glob(\"[Rr][Ee][Aa][Dd][Mm][Ee]*\")\n ]:\n # datalad get content if annexed\n self.dataset.get(README_fname)\n # read text from file\n try:\n file_text = ensure_unicode(README_fname.read_text()).strip()\n except:\n file_text = \"\"\n # Append dict with file text + extension to list\n readme.append({\"extension\": README_fname.suffix, \"text\": file_text})\n return readme if readme else None", "def run(self):\n\n self.log.debug('Reading wordlist %s', self.wordlists)\n\n # Test\n data = self.symfony.profiler.open(self.symfony.root)\n if data is None:\n self.log.info('The target does not support file preview')\n return\n\n # Enqueue\n with open(self.wordlists) as file:\n i = 0\n for i, line in enumerate(file, 1):\n url = self.symfony.profiler.url('open')\n params = {'line': 1, 'file': line.strip()}\n self.engine.queue.put(Request(method='GET', url=url, params=params))\n self.log.debug('Enqueued %d entries', i)\n\n self.engine.join()\n found = [response for response in self.engine.results if response.status_code != 404]\n files = [resp.request.params['file'] for resp in found]\n\n # Composer lookup\n composer = [file for file in files if file.endswith('composer.lock')]\n if composer:\n self.log.info(\"Found: %s, run 'symfony security:check' or submit it at %s\", composer[0], self.security_url)\n\n if not found:\n self.log.warning('Did not find any file')\n return\n\n # Save results\n for response in found:\n data = self.symfony.profiler.parse_file_preview(response.text)\n self.symfony.files[response.request.params['file']] = data\n\n self.log.warning('Found the following files:')\n for file in files:\n self.log.warning(' %s', file)", "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "def list_contents(self, show_hidden: bool = False):\n directories = []\n files = []\n if show_hidden:\n directories.extend([\".\", \"..\"])\n for entry in self.get_entries():\n if not entry.is_hidden() or show_hidden:\n if isinstance(entry, SaveFile):\n files.append(entry.get_bytes())\n elif isinstance(entry, Directory):\n directories.append(entry.get_name())\n elif isinstance(entry, NormalFile):\n files.append(entry.get_name())\n return \"\\n\".join(directories + files)", "def hack_text_file(self):\r\n\t\t#Ensures that the file has something that can be hacked.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Verifys file contains a message before before running through hack and giving user their list of decryption hits.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain a hackable message.\")\r\n\t\t\t\t\tbreak\t\t\t\r\n\t\tmax_key = len(self.message)\r\n\t\tself.i = 1\r\n\t\tpotential_hits = []\r\n\t\t#Runs through all potential keys. \r\n\t\tfor self.i in range(1, max_key):\r\n\t\t\tprint(f\"Trying key #{self.i} of {max_key} possible keys\")\t\t\t\r\n\t\t\tself.my_code = Decryptor(self.message, self.i).transfer_decrypt()\r\n\t\t\tself.hack_plausible = False\r\n\t\t\tself.verify_hack_key()\r\n\t\t\tif self.hack_plausible:\r\n\t\t\t\tpotential_hits.append(f\"Key #{self.i} yeilded {self.percent_english}% english words after decryption.\\n\" + \"\\t\" + self.my_code[:50])\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Hacking results:\\n\")\r\n\t\tfor hit in potential_hits:\r\n\t\t\tprint(\"\\t\" + hit + \"|\\n\")", "def _file_data(self, message, data, is_external=True):\n message.file = o.File(data)\n if data.get('is_starred'):\n message.is_starred = True\n\n if is_external:\n logging.debug(\"Found external file `%s'\", data['url_private'])\n message.file.url = data['url_private']\n else:\n logging.debug(\"Found internal file `%s'\",\n data['url_private_download'])\n priv_url = data['url_private_download']\n message.file.filepath = self.downloader.download(priv_url, 'file')\n self.session.add(message.file)", "def load_data(self):", "def test_file_shizz(topic_path):\n\tL = [[1,2,3],[4,5,6]]\n\tprint(open(topic_path+'/D/'+str(L[1][0])+'.txt'))", "def __init__(self, wfile):\n self.wfile = wfile\n self.contents = []", "def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def smartmeter_data():\n path = '/datc/opschaler/smartmeter_data'\n file_paths = np.array(glob.glob(path + \"/*.csv\"))\n\n print('Detected %s smartmeter_data files.' % len(file_paths))\n dwelling_ids = np.array(list((map(lambda x: x[-15:-4], file_paths))))\n\n return file_paths, dwelling_ids", "def get_training_data(self):\n labels = self.get_labels()\n\n print 'Loading training data from ', self.train_folder , '...'\n train_index = []\n #train_ans = []\n train_text = []\n cnt = 0\n\n for f in listdir(self.train_folder):\n file_path = join(self.train_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n #train_index.append(f[:-4])\n self.train_ans.append(labels[f[:-4]])\n with open(file_path, 'rb') as f:\n train_text.append( f.read() )\n\n return train_text", "def do_stuff_with_data_file():\n import os\n script_path = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(script_path, \"data/exampledata.txt\")\n print(\"reading the data file that was added to the installation:\")\n with open(file_path, \"r\") as f:\n print(f.read())", "def get_vuln_data(self):\n # Will be True if we need the entire feed to run. It will recreate vuln file.\n if self.helper.is_complete_vuln_mode():\n with open('data/default_vulnerability.json', encoding='utf-8') as f:\n data = json.load(f)\n else:\n # in normal mode will add delta info into existing file.\n data = self.helper.read_data_from_s3(\"vulnerability-data\", \"snyk-feed/\")\n return data", "def main():\n\n open_read_write()", "def write_data(tech_id, tech_name, sentence, source, date_crawled):\n with open('PDF_data.txt', 'a') as f:\n # text = match[\"tid\"] + '\\n' + match[\"name\"] + '\\n' + sent + '\\n' + source + '\\n' + date_crawled + '\\n\\n'\n text = tech_id + '\\n' + tech_name + '\\n' + sentence + '\\n' + source + '\\n' + date_crawled + '\\n\\n'\n f.write(text)", "def get_bodyparts(project_dir):\n print(f\"\\n\\n\\nLoading data\")\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n arr = points_2d_df[points_2d_df[\"frame\"]==0][[\"marker\"]][points_2d_df[\"camera\"]==0].values\n final_arr = arr.flatten().tolist()\n return(final_arr)", "def collect_data(self):\n chatoutput = list()\n blacklist = set()\n join_groups = self.read_leftout_groups()\n metadata = list()\n for channel in self.groups:\n if channel.active:\n self.blacklist = self.blacklist.union(channel.groups_blocked)\n join_groups = join_groups.union(channel.groups)\n chatoutput.append(channel.output)\n metadata.append(channel.metadata)\n else:\n if self.leave:\n self.leavechannel(channel.dialog)\n self.join_groups(join_groups, blacklist)\n self.write_data(self.blacklist, \"blocked_groups\")\n self.write_data(metadata, \"groups.meta\")\n block_number = self.get_highest_chatblock()\n self.write_data(chatoutput, \"chat_block-{}\".format(block_number))\n self.write_leftout_groups()", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def load_data():\n directories=[\"./track1/\",\n \"./track1_recovery/\",\n \"./track2/\",\n \"./track1_reverse/\",\n \"./track2_reverse/\",#Additional data for model built on top of lenet.h5\n \"./track2_recovery/\",#Additions data for model built on top of lenet.h5\n ]\n lines=[]\n for directory in directories:\n with open(directory+\"driving_log.csv\") as csvfile:\n reader=csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n train_samples, validation_samples = train_test_split(lines, test_size=0.2)\n return train_samples, validation_samples", "def read(self):\n print(f'Metadata version {self.version_num}')\n print(f'Saved at: {self.datetime}')\n print(f'Self chat: {self.self_chat}')\n print(f'Speakers: {self.speakers}')\n print('Opt:')\n for k, v in self.opt.items():\n print(f'\\t{k}: {v}')\n for k, v in self.extra_data.items():\n print(f'{k}: {v}')", "def get_data_not_yet_ready_file(self):\n pass", "def notest_file(text):\n if debug == 2:\n print(text)\n with open(\"info_file.txt\", \"a\", encoding=\"utf-8\", ) as f:\n f.write(text + \"\\n\")\n elif debug == 1:\n with open(\"info_file.txt\", \"a\", encoding=\"utf-8\", ) as f:\n f.write(text + \"\\n\")", "def read_data(self):\n if not self.header['data included']:\n pass\n elif self.header['file type'] in (21, 26):\n self._isotope_data()\n if os.path.exists(self.filename + '_txt'):\n self._isotope_txt_data()\n elif self.header['file type'] == 22:\n # line scan types, no ImageHeader\n warnings.warn('No data read for line scan, fix')\n pass\n elif self.header['file type'] in (31, 35):\n self._beamstability_data()\n else:\n self._image_data()", "def get_movie_data(files: list) -> list:\n pass", "def read_data(input_file):\n\n def process_line(labels, words):\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append((l, w))\n words = []\n labels = []\n return words, labels, lines\n\n rf = open(input_file, 'r')\n lines = [];\n words = [];\n labels = []\n for line in rf:\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n # here we dont do \"DOCSTART\" check\n\n if len(line.strip()) == 0: # and words[-1] == '.'\n words, labels, lines = process_line(labels, words)\n words.append(word)\n labels.append(label)\n rf.close()\n return lines", "def clean_data(self, path, exclude_msgtypes=None):", "def build_rawdata(user_id):\n path = 'wattbikesessions/'\n files = os.listdir(path+user_id+'/')\n rawdata = []\n for file in files:\n try:\n rawdata.append(pd.read_pickle(path + file))\n except Exception:\n print('Could not load:',file)\n continue\n return preprocess.load_session_data(rawdata)", "def main():\n with open('./monitor_out.data', 'rb') as fin:\n data_in = pickle.load(fin)\n crew_data = data_in[\"crew_list\"]\n\n crew_list = []\n for crew in crew_data:\n crew_list.append({'iotid': crew})\n\n\n \"\"\"\n Broken bots status data from monitor_out2.data\n \"\"\"\n with open('./monitor_out2.data', 'rb') as fin:\n bot_list = pickle.load(fin)\n\n bot_list = [bot for bot in bot_list if bot['status'] != 'Healthy']\n\n\n \"\"\"\n Obtain coordinates of crew and broken robots\n Obtain crew description\n \"\"\"\n crew_error = []\n\n for index in crew_list:\n try:\n crew_loc = requests.get(url = \"http://routescout.sensorup.com/v1.0/Things(\"+ str(index['iotid']) + \")/Locations\", headers = headers)\n if (crew_loc.status_code >= 200) and (crew_loc.status_code < 300):\n responseJSON = crew_loc.json()\n if responseJSON['@iot.count'] == 0:\n crew_error.append(index['iotid'])\n else:\n index['coord'] = responseJSON['value'][0]['location']['coordinates']\n index['route'] = []\n else:\n print(\"there was an error getting crew coordinate\")\n except:\n logging.exception(\"Failed getting list of crew coordinates\")\n\n crew_list = [crew for crew in crew_list if crew['iotid'] not in crew_error]\n\n\n for index in crew_list:\n try:\n crew_loc = requests.get(url = \"http://routescout.sensorup.com/v1.0/Things(\"+ str(index['iotid']) + \")\", headers = headers)\n if (crew_loc.status_code >= 200) and (crew_loc.status_code < 300):\n responseJSON = crew_loc.json()\n index['desc'] = responseJSON['description']\n else:\n print(\"there was an error getting crew description\")\n except:\n logging.exception(\"Failed getting list of crew description\")\n\n\n for index in bot_list:\n try:\n broken_bot_loc = requests.get(url = \"http://routescout.sensorup.com/v1.0/Things(\"+ str(index['iotid']) + \")/Locations\", headers = headers)\n if (broken_bot_loc.status_code >= 200) and (broken_bot_loc.status_code < 300):\n responseJSON = broken_bot_loc.json()\n index['coord'] = responseJSON['value'][0]['location']['coordinates']\n else:\n print(\"there was an error\")\n except:\n logging.exception(\"Failed getting list of broken bots coordinates\")\n\n\n \"\"\"\n Swap coordinate format to [lat,long]\n \"\"\"\n for crew in crew_list:\n lon, lat = crew['coord'][0], crew['coord'][1]\n crew['coord'] = lat, lon\n\n for bot in bot_list:\n lon, lat = bot['coord'][0], bot['coord'][1]\n bot['coord'] = lat, lon\n\n\n \"\"\"\n If crew description is valid and not empty, add to crew route\n \"\"\"\n for crew in crew_list:\n if not crew['desc']:\n continue\n else:\n crew_desc = crew['desc'][1:-1]\n crew_desc = crew_desc.split(',')\n try:\n crew_desc = list(map(int, crew_desc))\n crew['route'] = crew_desc\n for bot in bot_list:\n if crew_desc[-1] == bot['iotid']:\n crew['coord'] = bot['coord']\n bot_list = [bot for bot in bot_list if bot['iotid'] not in crew_desc]\n except:\n crew['desc'] = [];\n\n\n \"\"\"\n Specify hyper-parameters for genomes\n \"\"\"\n broken_bots = {}\n for bot in bot_list:\n broken_bots.update({bot['iotid']: bot['coord']})\n\n print(broken_bots)\n\n genome_parameters = {\n \"path\": {\"type\": \"[int]\", \"possible_values\": broken_bots.keys(), \"mutation_function\": \"swap\"}\n }\n\n\n \"\"\"\n Define a fitness function\n A pythonic way to find the length of a round trip\n \"\"\"\n def distance(p1, p2):\n dist = geopy.distance.distance(p1,p2).m\n return dist\n\n def sum_of_distances(individual):\n broken_bots = [position[bot] for bot in individual[\"path\"]]\n return sum(\n [distance(bot_1, bot_2) for (bot_1, bot_2) in zip(broken_bots, broken_bots[1:] + [broken_bots[0]])]\n )\n my_population = evolve(\n genome_parameters,\n fitness_function=my_fitness_function,\n anneal_mutation_rate=True,\n show_fitness_plot=True,\n num_generations=100,\n )\n\n print(my_population)\n\n\n\n \"\"\"\n Uploading routes to server\n\n for crew in crew_list:\n try:\n data = {\"description\": crew['desc'], \"properties\": {\"route\": crew['route']}}\n r = requests.patch(url = \"http://routescout.sensorup.com/v1.0/Things(%d)\" % crew['iotid'], json = data, headers = headers)\n if (r.status_code >= 200) and (r.status_code < 300):\n logging.debug(\"Adding route to crew %d\" % crew_list[index])\n else:\n logging.warning(\"HTTP Error code while adding route for crew %d\" % crew_list[index])\n except:\n logging.exception(\"Exception thrown generating route\")\n\n \"\"\"", "def test_Telegramfd (self):\n\t\t# This file is delivered openned while this test.\n\t\ttestfilepath = os.path.join (self.hotfolder, \"fileinuse.txt\")\n\t\tf = open(testfilepath,\"a\") #opens file with name of \"test.txt\"\n\t\tf.write(\"This file is now opened and i'm writting on it \\n\")\n\n\n\t\tknown_values = set ([\n\t\t\t('TESTS/Test3/Telegram Desktop/Printer output.pdf', '.file'),\n\t\t\t('TESTS/Test3/Telegram Desktop/This is a Zero avi file.avi', '.file'),\n\t\t\t('TESTS/Test3/Telegram Desktop/File not in use.CBR', '.file'),\n\t\t\t('TESTS/Test3/Telegram Desktop/This is a dummy rar file.rar', '.rar'),\n\t\t\t])\n\n\t\tEntries = MD.Telegramfd (self.hotfolder)\n\t\tself.assertEqual (known_values, set(Entries))\n\n\t\tf.close ()", "def get_parsed_data(path):\n # make sure file does exist\n if not os.path.exists(path):\n print (f\"Error: path: {path} does not exist!\")\n return None\n # fetch the extension of the file\n ext = path.split('.')[-1]\n\n # read the file\n if ext == \"pdf\": text = extract_text_from_pdf(path)\n elif ext == \"docx\": text = extract_text_from_docx(path)\n else: \n print (f\"Error: extension {ext} is not supported yet! File will be removed.\")\n os.remove(path)\n return None\n\n # get tokens\n tokens = remove_noisy_words(text)\n \n # make sure we have data\n if tokens == None or len(tokens) == 0:\n print (f\"Error: no tokens are extracted! from file: {path}\")\n return None\n\n # else? every thing is fine\n return tokens", "def saveFileListLocal(dataset,filelist,blacklist=[ ],tag=\"\"):\n if '/pnfs/' in dataset:\n tag += \"_pnfs\"\n dataset = '__'.join(dataset.split('/')[-3:])\n filename = \"filelist/filelist_%s%s.txt\"%(dataset.replace('/','__'),tag)\n with open(filename,'w+') as file:\n for line in filelist:\n if line not in blacklist:\n file.write(line+'\\n')\n return filename", "def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):", "def load_sotu_data():\n sotu_files = glob.glob(\"sotu-data/*.txt\")\n path_desc = re.compile(r\"sotu-data/([A-Za-z]+)_([0-9]{4})\\.txt\")\n for filepath in sotu_files:\n with open(filepath, \"r\") as f:\n raw_text = f.read()\n pres, year = path_desc.search(filepath).groups()\n yield {\"president\": pres, \"year\": year, \"speech\": raw_text}", "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def readFiles(self):\n #return a list of traces\n alltraces = []\n for dfile in self._datafiles:\n traces,headers = readgeonet(dfile)\n alltraces += traces\n return alltraces", "def after_make_ww3_wind_file(msg, config, checklist):\n return []", "def read_all(self):\r\n pass" ]
[ "0.5406376", "0.5285635", "0.5277233", "0.5269591", "0.5216304", "0.52033526", "0.5198252", "0.51780796", "0.5176623", "0.516422", "0.5149858", "0.51421016", "0.5128155", "0.5119425", "0.510754", "0.5094407", "0.5070182", "0.5065792", "0.5031172", "0.50137323", "0.49910444", "0.4985972", "0.49823797", "0.4981101", "0.49797884", "0.49677342", "0.49670312", "0.4965001", "0.49591258", "0.49576238", "0.49572504", "0.49544576", "0.49505028", "0.49480513", "0.49319834", "0.4915855", "0.49141094", "0.49118033", "0.49041232", "0.49017602", "0.48928806", "0.48924267", "0.48920703", "0.48850247", "0.48788723", "0.48729354", "0.4870762", "0.4870013", "0.4859124", "0.48507857", "0.48397356", "0.48366177", "0.4832531", "0.48190767", "0.48154512", "0.48138273", "0.48079473", "0.4807708", "0.48011786", "0.47881025", "0.47878656", "0.4784887", "0.4781131", "0.47808072", "0.4778229", "0.47781786", "0.47759816", "0.47648957", "0.47619927", "0.47606954", "0.47606954", "0.47606954", "0.47514662", "0.47301707", "0.47234648", "0.4721189", "0.47176424", "0.47120488", "0.47101328", "0.47059596", "0.4699716", "0.4699716", "0.46925947", "0.46913615", "0.46880287", "0.46875143", "0.4683947", "0.46781066", "0.4673254", "0.46718812", "0.46710405", "0.46707043", "0.46604988", "0.46596524", "0.4659552", "0.4659235", "0.46591854", "0.46572348", "0.46559682", "0.46480787", "0.46439546" ]
0.0
-1
clears the slackspace of files. Information of them is stored in metadata.
def clear(self): if self.fs_type == 'FAT': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = FATAllocatorMeta(file_metadata) self.fs.clear(file_metadata) elif self.fs_type == 'NTFS': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = NTFSAllocatorMeta(file_metadata) self.fs.clear(file_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_files()", "def clean_files(self):\n self.filenames.clear()", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []", "def __del__(self):\n for filename in self.files:\n unlink(filename)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clear():", "def clean(self):\n\t\tself.archiver.closeFile()", "def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')", "def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()", "def clear_files_paths(self):\n del self.__files_paths[:]", "def reset_memory(self, path):\n files_to_delete = os.listdir(path)\n for file in files_to_delete:\n os.remove(path + \"/\" + str(file))\n self.term_dictionary.clear()", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def _clear_variables( self ):\r\n self.navigation = None\r\n self.resPath = None\r\n self.resolutions = None\r\n self.currentResolution = None\r\n self.resolution = None\r\n for doc in self.include_doc:\r\n try: doc.unlink()\r\n except: pass", "def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])", "def clear(self, cacheDir):", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def __del__(self):\n for f in self._files:\n f.close()", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"", "def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()", "def clear_all(self):\n\n self.general_file = None\n self.general_parser = None\n\n self.specific_file = None\n self.specific_parser = None\n\n self.audio_file = None\n self.audio_parser = None\n\n self.video_file = None\n self.video_parser = None\n\n\n self.top_unique_num = None\n\n self.general_box.delete(0, END)\n self.specific_box.delete(0, END)\n self.audio_box.delete(0, END)\n self.video_box.delete(0, END)\n self.top_unique_audio_box.delete(0, END)\n self.top_unique_video_box.delete(0, END)\n\n self.top_unique_audio_entry.delete(0, END)\n self.top_unique_video_entry.delete(0, END)\n\n if self.missing_files_label is not None:\n self.missing_files_label.grid_remove()\n if self.no_month_selected_label is not None:\n self.no_month_selected_label.grid_remove()\n if self.top_n_too_large_label is not None:\n self.top_n_too_large_label.grid_remove()\n if self.cant_export_label is not None:\n self.cant_export_label.grid_remove()", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def clear(self):\n\n for a in self.formats + self.other_clear:\n setattr(self, a, None)\n self.filename = None\n self.timestamp = None\n self.lastfail = None", "def reset(self):\n q.system.fs.removeDirTree(self.metadataPath)\n self.__init__(self.metadataPath,self.root)", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def clearMetaFiles(self, meta_id,fpath):\n\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('clear_meta_analysis_files', [meta_id,fpath])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception(str(e))", "def reset(self):\n self.files = []\n self.regions = []\n self.headers = {}\n self.radial_data = []\n self.histogram_data = []\n self.p2p_data = []\n self.ptable = None", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def __cleanup(self):\n self.display = None\n self.parent = None\n self.name = None\n self.files = None\n return self", "def cleanUp(self):\n self.dirMonitor.stop()\n self.filesList.cleanUp()", "def cleanup(self): \n if os.path.exists(self.inpms):\n shutil.rmtree(self.inpms)", "def clean_up(self):\n self.fname = None\n self.failed_files = []\n self.custom_failed = []\n self.results = None", "def rmGt(self):\n gtfiles = [self.outselect, self.outmktime, self.outltcube,\n self.outbincub, self.outbinmap, self.outbinexp, \n self.outexpmap, self.outsrcmap, \n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.fits'),\n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.log')]\n for f in gtfiles:\n if os.path.isfile(f):\n os.remove(f)\n return", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def clear_specific(self):\n self.specific_file = None\n self.specific_parser = None\n\n self.specific_box.delete(0, END)", "def __del__(self):\n for file in list(self.mFiles.values()):\n file.close()", "def __del__(self):\n for component_name, file in self._file_list.items():\n file.close()", "def cleanUp(self, f):\n os.system('rm ' + f)", "def clearRecentFiles(self):\n self.recentFiles.clear()\n for n in range(RECENTFILEMAX):\n self.setSection(CFG_RECENT, str(n), None)", "def truncate(self):\n for file_name in os.listdir(self.path):\n if file_name[0:4] == 'data':\n os.remove(self.path + '/' + file_name)\n self.current_row = 0", "def remove(self):\n self.remove_file()", "def clear_base_files(self):\r\n compilelock.get_lock()\r\n try:\r\n for base_dir in ('cuda_ndarray', 'cutils_ext', 'lazylinker_ext',\r\n 'scan_perform'):\r\n to_delete = os.path.join(self.dirname, base_dir + '.delete.me')\r\n if os.path.isdir(to_delete):\r\n try:\r\n shutil.rmtree(to_delete)\r\n _logger.debug('Deleted: %s', to_delete)\r\n except Exception:\r\n _logger.warning('Could not delete %s', to_delete)\r\n continue\r\n to_rename = os.path.join(self.dirname, base_dir)\r\n if os.path.isdir(to_rename):\r\n try:\r\n shutil.move(to_rename, to_delete)\r\n except Exception:\r\n _logger.warning('Could not move %s to %s',\r\n to_rename, to_delete)\r\n finally:\r\n compilelock.release_lock()", "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "def teardown(self):\n self.file_comm.remove_file()\n super(TestCisAsciiFileOutput, self).teardown()", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def clean_cwd():\n\n # Generator of the files generated for each runs\n del_files = (file for file in os.listdir() if file.endswith('.vtk')\n or file.endswith('.dat')\n or file.startswith('eeldata')\n or file.endswith('.log'))\n\n for file in del_files:\n try:\n os.remove(file)\n print(\"\\rRemoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to remove {:s}\".format(file))\n raise\n\n print('')", "def __del__(self):\n self.close_files()", "def tearDown(self):\n\n for fname in self.fnames:\n os.remove(fname)", "def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)", "def clear_figures() -> None:\n \n for filename in os.listdir(FIGURE_DIR):\n filepath = os.path.join(FIGURE_DIR, filename)\n try:\n shutil.rmtree(filepath)\n except OSError:\n os.remove(filepath)", "def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def wipe(self):", "def wipe(self):", "def cleanup(self):\n self.all_wav_to_mp3()\n self.past_songs_db.close()\n self.move_tracks_to_music_folder( )\n self.delete_leftovers()\n print \"Cleanup finished\"", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def cleanup(self):\n\t\tfor filename in self.cfg_files:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tsize = os.stat(filename)[6]\n\t\t\t\tif size == 0:\n\t\t\t\t\tos.remove(filename)\n\n\t\treturn True", "def clear(self):\n cols = list(self.info.columns.keys())\n for col_name in cols:\n if col_name == DEFAULT_COLUMN_NAME:\n continue\n self.clear_column(col_name)\n\n self.info.clear_files()", "def clear_file(filename):\n with open(filename, 'w'):\n pass", "def clear_model_checkpoints(self):\n if self.file_prefix is None:\n return\n\n with os.scandir() as path_list:\n for entry in path_list:\n if entry.is_file() and entry.name.startswith(self.file_prefix) and entry.name.endswith(\".h5\"):\n print(\"{}: Removing {}\".format(self.MODEL_NAME, entry.path))\n os.remove(entry.path)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)", "def clear(self) -> None:", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def clear_cache(self):\n for fle in self.cache_location.glob(\"*.pickle\"):\n fle.unlink()", "def clean(self):\n if self.options.format != 'svg':\n for svgfile in self.svgouts.itervalues():\n os.remove(svgfile)\n os.rmdir(self.tmpdir)", "def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()", "def clear_brain():\n\n if os.path.exists(os.path.abspath(\"papaya_data\")):\n shutil.rmtree(os.path.abspath(\"papaya_data\"))", "def clear(self, unversioned_min_age=None, clear_base_files=False,\r\n delete_if_problem=False):\r\n compilelock.get_lock()\r\n try:\r\n self.clear_old(\r\n age_thresh_del=-1.0,\r\n delete_if_problem=delete_if_problem)\r\n self.clear_unversioned(min_age=unversioned_min_age)\r\n if clear_base_files:\r\n self.clear_base_files()\r\n finally:\r\n compilelock.release_lock()", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()\n self.reset_snippet()", "def delEvery():\n delMain()\n delFile()\n delPuls()\n delSat()\n delFreq()\n delTemp()\n delGly()\n delDlr()\n label['text'] = \"All json files have been deleted !\"", "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def clear(self) -> None:\n self._tiles.clear()\n self._chunks.clear()", "def clear_tempfiles(self, remove=True):\n while self._tempfiles:\n self.pop(remove)\n self.push()", "def cleanUp(self):\n\n tapeList = sorted(glob.glob('TAPE?'))\n tapeList = ['TAPE%d' % num for num in [1, 2, 5, 6, 7, 10]]\n for tape in tapeList:\n if os.path.isfile(tape): os.remove(tape)\n # end TAPE loop", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "async def clear_all(self) -> None:", "def clearImageFolder():\n filelist = listImageFolder()\n for f in filelist:\n os.remove('{}/{}'.format(imageFolder, f))", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)" ]
[ "0.74671215", "0.7415675", "0.6979348", "0.6971708", "0.6971565", "0.689763", "0.68770945", "0.6817018", "0.6782567", "0.67809623", "0.6693809", "0.666803", "0.666092", "0.6653605", "0.6635497", "0.66262114", "0.6615348", "0.66098607", "0.6608693", "0.65980065", "0.6585141", "0.6583147", "0.6582249", "0.658113", "0.6569132", "0.65410054", "0.6535095", "0.6534438", "0.6533088", "0.6526979", "0.6524327", "0.651266", "0.6494354", "0.64921427", "0.64891994", "0.64817417", "0.64670014", "0.6457787", "0.6448601", "0.6443691", "0.64342654", "0.64339244", "0.64329106", "0.64325935", "0.6423586", "0.6419193", "0.6417615", "0.6411612", "0.6410564", "0.63939506", "0.63884026", "0.63812804", "0.636938", "0.6367002", "0.6329673", "0.63279", "0.63222736", "0.6319783", "0.63166136", "0.63155746", "0.6311929", "0.62974894", "0.629731", "0.6296615", "0.6293398", "0.62828666", "0.6281185", "0.6266799", "0.62643987", "0.62643987", "0.6263217", "0.62621564", "0.6260927", "0.6256105", "0.6254578", "0.6252688", "0.625165", "0.6250273", "0.6242732", "0.624114", "0.6240974", "0.62364817", "0.6236304", "0.62234724", "0.6220622", "0.62204486", "0.62151784", "0.62099326", "0.6207238", "0.6206015", "0.61908805", "0.6187781", "0.6184907", "0.61836565", "0.6181067", "0.61778665", "0.6171138", "0.61710244", "0.6164132", "0.6159375" ]
0.74563277
1
Returns the unit vector of the vector.
def unit_vector(vector): vector = np.array(vector) if np.linalg.norm(vector) <= 0.00010: normv = 1.0 else: normv = np.linalg.norm(vector) return vector / normv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def get_unit_vector(self, vector):\n return vector / la.norm(vector)", "def unit(self):\r\n return Vector(self.x/self.length(), self.y/self.length())", "def unit_vector(self,vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit():\n return Vec2d(0, 1)", "def getUnitVector(self):\n return Vector.createFromPolar(1, self.angle)", "def unit_vector(self, vector):\n return vector / np.linalg.norm(vector)", "def _unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def cal_unit_vec(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def vec_unit( vec ):\r\n return np.divide( vec , np.linalg.norm( vec ) )", "def unit(vector: np.array) -> np.array:\n return np.array([*vector]) / np.sqrt((vector * vector).sum(axis=0))", "def unit_vec(v):\n vlen = np.linalg.norm(v)\n if np.isclose(vlen, 0):\n raise ValueError('Cannot make unit vector from zero vector.')\n else:\n return v / vlen", "def unit_vector(vector):\n return vector / max(np.linalg.norm(vector), 1e-10)", "def as_unit(self):\n new_vec = self.copy()\n new_vec.normalize()\n return new_vec", "def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector", "def unit_vector(vector):\n unit_vector = np.zeros((len(vector), vector.shape[1]))\n norm = np.linalg.norm(vector, axis=1)\n ndim = vector.ndim\n\n if ndim == 1: # Handling of 1-dimensional array\n unit_vector = vector / norm\n elif ndim == 2: # Handling of 2-dimensional array\n for i in range(0, vector.shape[1]):\n unit_vector[:, i] = vector[:, i] / norm\n else:\n log.fatal(f\"Dimension of vector should be either 1- or 2-dimensional and not {ndim}-dimensional.\")\n\n return unit_vector", "def unit_vector(v):\n h = ((v[0]**2)+(v[1]**2))**0.5\n if h == 0:\n h = 0.000000000000001\n ua = v[0] / h\n ub = v[1] / h\n return (ua, ub)", "def unit_vector(vector):\n return 0 if vector[0] == 0 else vector[0]/abs(vector[0]), 0 if vector[1] == 0 else vector[1]/abs(vector[1])", "def unit_vector(vector):\n if not np.all((vector == 0)):\n return vector / np.linalg.norm(vector)\n else:\n return vector", "def _unit_vector(pt0, pt1):\n dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2)\n return (pt1[0] - pt0[0]) / dis_0_to_1, \\\n (pt1[1] - pt0[1]) / dis_0_to_1", "def tangeant_unit_vector(self, t):\n a = self.a0 + t * self.da\n ca = cos(a)\n sa = sin(a)\n v = Vector((sa, -ca))\n if self.da > 0:\n v = -v\n return v", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def uw(self):\n return sm.unitvec(self.w)", "def getNormalizedVector(self):\n return self.scalarMultiplication(self.norm() ** -1.0)", "def unit_vector(vec_in):\n if vec_in.ndim == 1:\n out = _unit_vector_single(vec_in)\n elif vec_in.ndim == 2:\n out = _unit_vector_multi(vec_in)\n else:\n raise ValueError(\n \"incorrect arg shape; must be 1-d or 2-d, yours is %d-d\"\n % (vec_in.ndim)\n )\n return out", "def unit_vector(i, j):\n magnitude = np.sqrt(i ** 2 + j ** 2)\n unit_i = i / magnitude\n unit_j = j / magnitude\n\n return unit_i, unit_j", "def unit_vector(self,vector):\n\t\tunit_vector_query=0;\n\t\tfor word in vector:\n\t\t\tunit_vector_query += vector[word]*vector[word];\n\t\tunit_vector_query = math.sqrt(unit_vector_query);\n\t\treturn unit_vector_query", "def unit(vector):\r\n result = [[0] for row in range(len(vector))]\r\n # creates the initial value for result of this function, which is a vector full of 0s with the same lenght of a given vector \r\n for z in range(len(vector)):\r\n # for loop which continues as long as there are more elements in the vector \r\n result[z] = vector[z]/norm(vector)\r\n # the new result being each element in the list being divided by the norm \r\n return result", "def _unitVector(self, data: numpy.array, axis: Optional[int] = None, out: Optional[numpy.array] = None) -> numpy.array:\n if out is None:\n data = numpy.array(data, dtype = numpy.float64, copy = True)\n if data.ndim == 1:\n data /= math.sqrt(numpy.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = numpy.array(data, copy = False)\n data = out\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\n numpy.sqrt(length, length)\n if axis is not None:\n length = numpy.expand_dims(length, axis)\n data /= length\n if out is None:\n return data", "def vector(self) -> Vector:\n return self._normal * self._distance_from_origin", "def unit_vector(data, axis=None, out=None):\r\n if out is None:\r\n data = numpy.array(data, dtype=numpy.float64, copy=True)\r\n if data.ndim == 1:\r\n data /= math.sqrt(numpy.dot(data, data))\r\n return data\r\n else:\r\n if out is not data:\r\n out[:] = numpy.array(data, copy=False)\r\n data = out\r\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\r\n numpy.sqrt(length, length)\r\n if axis is not None:\r\n length = numpy.expand_dims(length, axis)\r\n data /= length\r\n if out is None:\r\n return data", "def unit_vector(data, axis=None, out=None):\r\n if out is None:\r\n data = np.array(data, dtype=np.float64, copy=True)\r\n if data.ndim == 1:\r\n data /= math.sqrt(np.dot(data, data))\r\n return data\r\n else:\r\n if out is not data:\r\n out[:] = np.array(data, copy=False)\r\n data = out\r\n length = np.atleast_1d(np.sum(data*data, axis))\r\n np.sqrt(length, length)\r\n if axis is not None:\r\n length = np.expand_dims(length, axis)\r\n data /= length\r\n if out is None:\r\n return data", "def unit_vector(data, axis=None, out=None):\n if out is None:\n data = np.array(data, dtype=np.float64, copy=True)\n if data.ndim == 1:\n data /= math.sqrt(np.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = np.array(data, copy=False)\n data = out\n length = np.atleast_1d(np.sum(data*data, axis))\n np.sqrt(length, length)\n if axis is not None:\n length = np.expand_dims(length, axis)\n data /= length\n if out is None:\n return data", "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)", "def unit(direction):\r\n return Vector(0, -1).rotate(direction)", "def vector(self):\n return self.__vector", "def unitize_vector(vector):\n # Section 1: Ensure that a vector was given\n if len(vector) > 1 and len(vector[0]) > 1:\n raise ArithmeticError(\n 'Vector must be a row or column vector.')\n\n # Section 2: Determine vector magnitude\n rows = len(vector); cols = len(vector[0])\n mag = 0\n for row in vector:\n for value in row:\n mag += value ** 2\n mag = mag ** 0.5\n\n # Section 3: Make a copy of vector\n new = copy_matrix(vector)\n\n # Section 4: Unitize the copied vector\n for i in range(rows):\n for j in range(cols):\n new[i][j] = new[i][j] / mag\n\n return new", "def Normal(self):\n return Vector(self.normal)", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def unit_vector(a, b):\n tmp = _np.zeros(b)\n tmp[a] = 1\n return tmp", "def unit_vectors(x):\n xnew = x.copy()\n for v in range(x.shape[-1]):\n xnew[:, v] = x[:, v] / np.linalg.norm(x[:, v])\n return xnew", "def sphere_to_unit(v):\n sin_theta = math.sin(v[0])\n cos_theta = math.cos(v[0])\n return (sin_theta * math.cos(v[1]),\n sin_theta * math.sin(v[1]),\n cos_theta)", "def get_normalized_vector(vector):\n # WARN: Zero length may cause problems!\n vector_lenght = get_vector_length(vector)\n if vector_lenght != 0:\n return np.divide(vector, get_vector_length(vector))\n else:\n return [0, 0]", "def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)", "def vector(self):\n return self.q[1:4]", "def __call__(self):\n return self._vector", "def v(self):\n return Vector2(self.position)", "def direction(self):\n len = self.length()\n if len == 0.0:\n uvec = pos.Pos(np.transpose(np.array([0, 0, 0])))\n else:\n uvec = pos.Pos(np.transpose(np.array([(self.end.x - self.start.x) / len,\n (self.end.y - self.start.y) / len,\n (self.end.z - self.start.z) / len])))\n return uvec", "def normal(self) -> Vector:\n return self._normal", "def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def unit_sun_r(sun_pos):\n return sun_pos / vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2])", "def __truediv__(self, factor):\n if type(factor) == Vector:\n raise NotImplementedError\n else:\n return Vector([c / factor for c in self.components])", "def magni(vector):\n return(np.linalg.norm(vector))", "def __rmul__(self, el2):\n if type(el2) is float or type(el2) is int:\n return vector(el2 * self.x, el2 * self.y, el2 * self.z)\n elif type(el2) is vector:\n return vector(el2.y * self.z - el2.z * self.y,\n el2.z * self.x - el2.x * self.z,\n el2.x * self.y - el2.y * self.x)\n else:\n raise TypeError('Cannot multiply a vector with something'\n 'that is neither a vector, a float or an int')", "def norm(vec):\n vel = numpy.sqrt(numpy.dot(vec,vec))\n return vel", "def normalized(first):\n if isinstance(first,FreeCAD.Vector):\n l=length(first)\n return FreeCAD.Vector(first.x/l, first.y/l, first.z/l)", "def vector(self, base_ring=None):\n if (base_ring is None) or (base_ring is self._base_ring):\n return self._vector\n else:\n return vector(base_ring, self._vector)", "def unit(self):\n # type: () -> PositionUnit\n return self._unit", "def get_channel_v_unit(self)->float:\n return self.__channel_v_unit", "def AsVector(self) -> ngsolve.la.BaseVector:", "def _get_unit_factor(self, unit: str) -> np.ndarray:\n\n unit_factors = {\n 'vx': np.array((1, 1, 1)),\n 'nm': np.array(self.parameters.scale),\n 'um': np.array(self.parameters.scale)/1000\n }\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n\n return unit_factor", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def normalize(self): # Function is fucked TODO\n l = self.length()\n for i in range(0, len(self.coords)):\n self.coords[i] /= l\n return self\n # return Vector(list([0 for i in range(len(v.coords))]))\n\n # if round(self.length() == 0):\n # s = 1 / self.length()\n # return self * s\n # else:\n # return Vector(list([0 for i in range(len(v.coords))]))", "def as_vector(self):\n return self.pdm.as_vector()", "def unit(x):\n\tl = sum([i**2 for i in x])**0.5\n\treturn [xi/l for xi in x]", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def getVector(self):\n return Vector.createFromTwoPoints(self.p1, self.p2)", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def random_vector():\n\n import numpy as np\n\n zeta = np.random.rand(2) # Two uniformly sampled random numbers in range (0,1)\n c = 2.0*zeta[0] - 1.0 # Random cos(theta) uniformly sampled in range (-1,+1)\n if c >= 1.0: # Guard against very small chance of roundoff error\n s = 0.0 # Set sin(theta) to zero\n else:\n s = np.sqrt(1.0-c**2) # Calculate sin(theta) from cos(theta), always positive\n\n phi = zeta[1] * 2.0*np.pi # Random angle uniformly sampled in range (0,2*pi)\n\n return np.array ( ( s*np.cos(phi), s*np.sin(phi), c ), dtype=np.float_ ) # Random unit vector", "def vector_component(u, v):\n x = dot_vectors(u, v) / length_vector_sqrd(v)\n return scale_vector(v, x)", "def AsVector(self) -> BaseVector:", "def vector(x, y, z):\n return point_or_vector(x,y,z,0.0)", "def get_ucm_vec(p0=None, p1=None):\n if p0 is None:\n p0 = np.array([25, 100])\n if p1 is None:\n p1 = np.array([100, 25])\n parallel = p1 - p0\n parallel = parallel / np.linalg.norm(parallel) # Normalize.\n return parallel", "def magnitude(self): # @todo @caution check: something wrong?\n\n return (math.sqrt(reduce(lambda x, y: x+y,\n [x**2 for x in self.vector])))", "def normalize(self, vec):\n length = math.sqrt( vec[0,0]*vec[0,0] + vec[0,1]*vec[0,1] + vec[0,2]*vec[0,2] )\n vnorm = vec / length\n return vnorm", "def __rmul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def dirVector(self,p1,p2):\n v=p2-p1\n l=v.Length\n return self.toMatrix(v)/l", "def unit_vector_stream(stream):\n return stream.map(lambda x: x / np.linalg.norm(x, axis=-1)[...,None])" ]
[ "0.8355314", "0.8344175", "0.83402044", "0.83041203", "0.8288785", "0.82710177", "0.8263843", "0.8237869", "0.8219826", "0.8203201", "0.8180198", "0.8180198", "0.8180198", "0.8157419", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.80363405", "0.8024346", "0.7914348", "0.7900733", "0.78349787", "0.7770651", "0.77661633", "0.77234215", "0.77207124", "0.76531506", "0.7631576", "0.7595943", "0.7572932", "0.7451418", "0.7346349", "0.7336356", "0.72942096", "0.7267235", "0.7236896", "0.7228997", "0.7191048", "0.718393", "0.7158418", "0.71065384", "0.7056104", "0.7035316", "0.7014858", "0.69491696", "0.6938983", "0.685306", "0.68301356", "0.67822766", "0.6776589", "0.66083777", "0.65908873", "0.6557686", "0.6555904", "0.653139", "0.6499029", "0.64957076", "0.64857525", "0.6430134", "0.64204764", "0.6407004", "0.6403999", "0.6399956", "0.63776267", "0.6363924", "0.63528776", "0.6321036", "0.62962717", "0.62949187", "0.6268904", "0.6238375", "0.6219153", "0.6213419", "0.6211758", "0.6211182", "0.6210066", "0.61896944", "0.618916", "0.61679196", "0.6164343", "0.6158671", "0.61572766", "0.6149799", "0.61477876", "0.6145891", "0.61415035", "0.6137127", "0.6131029", "0.61207145", "0.611959", "0.6116146", "0.61125845" ]
0.7595422
37
ClairpbVulnerability a model defined in OpenAPI
def __init__(self, name=None, namespace_name=None, description=None, link=None, severity=None, metadata=None, fixed_by=None, affected_versions=None): # noqa: E501 # noqa: E501 self._name = None self._namespace_name = None self._description = None self._link = None self._severity = None self._metadata = None self._fixed_by = None self._affected_versions = None self.discriminator = None if name is not None: self.name = name if namespace_name is not None: self.namespace_name = namespace_name if description is not None: self.description = description if link is not None: self.link = link if severity is not None: self.severity = severity if metadata is not None: self.metadata = metadata if fixed_by is not None: self.fixed_by = fixed_by if affected_versions is not None: self.affected_versions = affected_versions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vulnerabilities(self) -> api.Vulnerabilities:\n return self._get_model(model=api.Vulnerabilities)", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'uses_git': 'bool',\n 'git_remote_url': 'str',\n 'git_username': 'str',\n 'git_password': 'str',\n 'git_username_user_attribute': 'str',\n 'git_password_user_attribute': 'str',\n 'git_service_name': 'str',\n 'deploy_secret': 'str',\n 'unset_deploy_secret': 'bool',\n 'pull_request_mode': 'str',\n 'validation_required': 'bool',\n 'allow_warnings': 'bool',\n 'is_example': 'bool',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'uses_git': 'uses_git',\n 'git_remote_url': 'git_remote_url',\n 'git_username': 'git_username',\n 'git_password': 'git_password',\n 'git_username_user_attribute': 'git_username_user_attribute',\n 'git_password_user_attribute': 'git_password_user_attribute',\n 'git_service_name': 'git_service_name',\n 'deploy_secret': 'deploy_secret',\n 'unset_deploy_secret': 'unset_deploy_secret',\n 'pull_request_mode': 'pull_request_mode',\n 'validation_required': 'validation_required',\n 'allow_warnings': 'allow_warnings',\n 'is_example': 'is_example',\n 'can': 'can'\n }\n\n self._id = None\n self._name = None\n self._uses_git = None\n self._git_remote_url = None\n self._git_username = None\n self._git_password = None\n self._git_username_user_attribute = None\n self._git_password_user_attribute = None\n self._git_service_name = None\n self._deploy_secret = None\n self._unset_deploy_secret = None\n self._pull_request_mode = None\n self._validation_required = None\n self._allow_warnings = None\n self._is_example = None\n self._can = None", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def __init__(self): # noqa: E501\n self.openapi_types = {\n }\n\n self.attribute_map = {\n }", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def __init__(self, id=None, name=None, identifier=None, description=None, created_at=None, updated_at=None, position=None, custom_compliance_standard=None, custom_compliance_standard_id=None, custom_compliance_domain=None, custom_compliance_domain_id=None, signatures=None, custom_signatures=None):\n self.swagger_types = {\n 'id': 'int',\n 'name': 'str',\n 'identifier': 'str',\n 'description': 'str',\n 'created_at': 'datetime',\n 'updated_at': 'datetime',\n 'position': 'int',\n 'custom_compliance_standard': 'CustomComplianceStandard',\n 'custom_compliance_standard_id': 'int',\n 'custom_compliance_domain': 'CustomComplianceDomain',\n 'custom_compliance_domain_id': 'int',\n 'signatures': 'list[Signature]',\n 'custom_signatures': 'list[CustomSignature]'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'identifier': 'identifier',\n 'description': 'description',\n 'created_at': 'created_at',\n 'updated_at': 'updated_at',\n 'position': 'position',\n 'custom_compliance_standard': 'custom_compliance_standard',\n 'custom_compliance_standard_id': 'custom_compliance_standard_id',\n 'custom_compliance_domain': 'custom_compliance_domain',\n 'custom_compliance_domain_id': 'custom_compliance_domain_id',\n 'signatures': 'signatures',\n 'custom_signatures': 'custom_signatures'\n }\n\n self._id = id\n self._name = name\n self._identifier = identifier\n self._description = description\n self._created_at = created_at\n self._updated_at = updated_at\n self._position = position\n self._custom_compliance_standard = custom_compliance_standard\n self._custom_compliance_standard_id = custom_compliance_standard_id\n self._custom_compliance_domain = custom_compliance_domain\n self._custom_compliance_domain_id = custom_compliance_domain_id\n self._signatures = signatures\n self._custom_signatures = custom_signatures", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def get_vulnerabilities(self, **kwargs):\n ...", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def __init__(self, **kwargs):\n APIBaseModel.__init__(self, **kwargs)\n self.flavor = Flavor(**self.flavor)", "def validate(self, apiobj, method, api, param, safe):", "def __init__(self, client):\n self.client = client\n self.definitions = client.swagger_spec.definitions", "def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass", "def check_vulnerability_in_result(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for vulnerability in vulnerabilities:\n assert \"cvss\" in vulnerability\n assert \"is_private\" in vulnerability\n assert \"vendor_cve_ids\" in vulnerability", "def __init__(self):\n self.swagger_types = {\n 'autocreate': 'bool',\n 'autodelete': 'bool',\n 'global_visible_accessible': 'bool',\n 'local_root_accessible': 'bool',\n 'local_root_visible': 'bool',\n 'local_subdir_accessible': 'bool',\n 'nfs_root_accessible': 'bool',\n 'nfs_root_visible': 'bool',\n 'nfs_subdir_accessible': 'bool',\n 'reserve': 'float',\n 'service': 'bool',\n 'smb_root_accessible': 'bool',\n 'smb_root_visible': 'bool',\n 'smb_subdir_accessible': 'bool'\n }\n\n self.attribute_map = {\n 'autocreate': 'autocreate',\n 'autodelete': 'autodelete',\n 'global_visible_accessible': 'global_visible_accessible',\n 'local_root_accessible': 'local_root_accessible',\n 'local_root_visible': 'local_root_visible',\n 'local_subdir_accessible': 'local_subdir_accessible',\n 'nfs_root_accessible': 'nfs_root_accessible',\n 'nfs_root_visible': 'nfs_root_visible',\n 'nfs_subdir_accessible': 'nfs_subdir_accessible',\n 'reserve': 'reserve',\n 'service': 'service',\n 'smb_root_accessible': 'smb_root_accessible',\n 'smb_root_visible': 'smb_root_visible',\n 'smb_subdir_accessible': 'smb_subdir_accessible'\n }\n\n self._autocreate = None\n self._autodelete = None\n self._global_visible_accessible = None\n self._local_root_accessible = None\n self._local_root_visible = None\n self._local_subdir_accessible = None\n self._nfs_root_accessible = None\n self._nfs_root_visible = None\n self._nfs_subdir_accessible = None\n self._reserve = None\n self._service = None\n self._smb_root_accessible = None\n self._smb_root_visible = None\n self._smb_subdir_accessible = None", "def __init__(self, cod_id: int=None, nombre: str=None, tipo: str=None, capacidad: int=None, precio_hora: int=None, facultad: str=None):\n self.swagger_types = {\n 'cod_id': int,\n 'nombre': str,\n 'tipo': str,\n 'capacidad': int,\n 'precio_hora': int,\n 'facultad': str\n }\n\n self.attribute_map = {\n 'cod_id': 'codId',\n 'nombre': 'nombre',\n 'tipo': 'tipo',\n 'capacidad': 'capacidad',\n 'precio_hora': 'precioHora',\n 'facultad': 'facultad'\n }\n\n self._cod_id = cod_id\n self._nombre = nombre\n self._tipo = tipo\n self._capacidad = capacidad\n self._precio_hora = precio_hora\n self._facultad = facultad", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def modelClass(self):\n raise NotImplementedError", "def P_GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def schema(self):", "def __init__(self):\n self.swagger_types = {\n 'discovery': 'Discovery',\n 'groups': 'list[str]',\n 'labels': 'object'\n }\n\n self.attribute_map = {\n 'discovery': 'discovery',\n 'groups': 'groups',\n 'labels': 'labels'\n }\n\n self._discovery = None\n self._groups = None\n self._labels = None", "def test_civic_pb():\n harvested_evidence = json_format.Parse(\n CIVIC_TEST_MESSAGE,\n evidence.Evidence(),\n ignore_unknown_fields=True)\n assert harvested_evidence.gene == \"CDKN2A\"\n assert harvested_evidence.source == \"civic\"\n assert harvested_evidence.feature.end == 21974865\n assert 'expression' in harvested_evidence.association.description\n assert harvested_evidence.civic\n harvested_evidence.civic['evidence_items'][0][\"clinical_significance\"] == \\\n \"Better Outcome\"", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self, service_area: object=None, funder: object=None, area_served: object=None, member_of: object=None, events: object=None, sub_organization: object=None, has_offer_catalog: object=None, global_location_number: str=None, reviews: object=None, members: object=None, aggregate_rating: object=None, duns: str=None, tax_id: str=None, award: str=None, makes_offer: object=None, contact_points: object=None, awards: str=None, seeks: object=None, member: object=None, founders: object=None, alumni: object=None, dissolution_date: datetime=None, address: object=None, logo: str=None, employees: object=None, telephone: str=None, email: str=None, department: object=None, contact_point: object=None, parent_organization: object=None, legal_name: str=None, founding_date: datetime=None, employee: object=None, number_of_employees: object=None, naics: str=None, has_pos: object=None, review: object=None, founding_location: object=None, owns: object=None, event: object=None, founder: object=None, publishing_principles: object=None, sponsor: object=None, isic_v4: str=None, location: object=None, brand: object=None, vat_id: str=None, lei_code: str=None, fax_number: str=None, same_as: str=None, url: str=None, image: object=None, additional_type: str=None, name: str=None, identifier: str=None, potential_action: object=None, main_entity_of_page: str=None, description: str=None, disambiguating_description: str=None, alternate_name: str=None): # noqa: E501\n self.swagger_types = {\n 'service_area': object,\n 'funder': object,\n 'area_served': object,\n 'member_of': object,\n 'events': object,\n 'sub_organization': object,\n 'has_offer_catalog': object,\n 'global_location_number': str,\n 'reviews': object,\n 'members': object,\n 'aggregate_rating': object,\n 'duns': str,\n 'tax_id': str,\n 'award': str,\n 'makes_offer': object,\n 'contact_points': object,\n 'awards': str,\n 'seeks': object,\n 'member': object,\n 'founders': object,\n 'alumni': object,\n 'dissolution_date': datetime,\n 'address': object,\n 'logo': str,\n 'employees': object,\n 'telephone': str,\n 'email': str,\n 'department': object,\n 'contact_point': object,\n 'parent_organization': object,\n 'legal_name': str,\n 'founding_date': datetime,\n 'employee': object,\n 'number_of_employees': object,\n 'naics': str,\n 'has_pos': object,\n 'review': object,\n 'founding_location': object,\n 'owns': object,\n 'event': object,\n 'founder': object,\n 'publishing_principles': object,\n 'sponsor': object,\n 'isic_v4': str,\n 'location': object,\n 'brand': object,\n 'vat_id': str,\n 'lei_code': str,\n 'fax_number': str,\n 'same_as': str,\n 'url': str,\n 'image': object,\n 'additional_type': str,\n 'name': str,\n 'identifier': str,\n 'potential_action': object,\n 'main_entity_of_page': str,\n 'description': str,\n 'disambiguating_description': str,\n 'alternate_name': str\n }\n\n self.attribute_map = {\n 'service_area': 'serviceArea',\n 'funder': 'funder',\n 'area_served': 'areaServed',\n 'member_of': 'memberOf',\n 'events': 'events',\n 'sub_organization': 'subOrganization',\n 'has_offer_catalog': 'hasOfferCatalog',\n 'global_location_number': 'globalLocationNumber',\n 'reviews': 'reviews',\n 'members': 'members',\n 'aggregate_rating': 'aggregateRating',\n 'duns': 'duns',\n 'tax_id': 'taxID',\n 'award': 'award',\n 'makes_offer': 'makesOffer',\n 'contact_points': 'contactPoints',\n 'awards': 'awards',\n 'seeks': 'seeks',\n 'member': 'member',\n 'founders': 'founders',\n 'alumni': 'alumni',\n 'dissolution_date': 'dissolutionDate',\n 'address': 'address',\n 'logo': 'logo',\n 'employees': 'employees',\n 'telephone': 'telephone',\n 'email': 'email',\n 'department': 'department',\n 'contact_point': 'contactPoint',\n 'parent_organization': 'parentOrganization',\n 'legal_name': 'legalName',\n 'founding_date': 'foundingDate',\n 'employee': 'employee',\n 'number_of_employees': 'numberOfEmployees',\n 'naics': 'naics',\n 'has_pos': 'hasPOS',\n 'review': 'review',\n 'founding_location': 'foundingLocation',\n 'owns': 'owns',\n 'event': 'event',\n 'founder': 'founder',\n 'publishing_principles': 'publishingPrinciples',\n 'sponsor': 'sponsor',\n 'isic_v4': 'isicV4',\n 'location': 'location',\n 'brand': 'brand',\n 'vat_id': 'vatID',\n 'lei_code': 'leiCode',\n 'fax_number': 'faxNumber',\n 'same_as': 'sameAs',\n 'url': 'url',\n 'image': 'image',\n 'additional_type': 'additionalType',\n 'name': 'name',\n 'identifier': 'identifier',\n 'potential_action': 'potentialAction',\n 'main_entity_of_page': 'mainEntityOfPage',\n 'description': 'description',\n 'disambiguating_description': 'disambiguatingDescription',\n 'alternate_name': 'alternateName'\n }\n\n self._service_area = service_area\n self._funder = funder\n self._area_served = area_served\n self._member_of = member_of\n self._events = events\n self._sub_organization = sub_organization\n self._has_offer_catalog = has_offer_catalog\n self._global_location_number = global_location_number\n self._reviews = reviews\n self._members = members\n self._aggregate_rating = aggregate_rating\n self._duns = duns\n self._tax_id = tax_id\n self._award = award\n self._makes_offer = makes_offer\n self._contact_points = contact_points\n self._awards = awards\n self._seeks = seeks\n self._member = member\n self._founders = founders\n self._alumni = alumni\n self._dissolution_date = dissolution_date\n self._address = address\n self._logo = logo\n self._employees = employees\n self._telephone = telephone\n self._email = email\n self._department = department\n self._contact_point = contact_point\n self._parent_organization = parent_organization\n self._legal_name = legal_name\n self._founding_date = founding_date\n self._employee = employee\n self._number_of_employees = number_of_employees\n self._naics = naics\n self._has_pos = has_pos\n self._review = review\n self._founding_location = founding_location\n self._owns = owns\n self._event = event\n self._founder = founder\n self._publishing_principles = publishing_principles\n self._sponsor = sponsor\n self._isic_v4 = isic_v4\n self._location = location\n self._brand = brand\n self._vat_id = vat_id\n self._lei_code = lei_code\n self._fax_number = fax_number\n self._same_as = same_as\n self._url = url\n self._image = image\n self._additional_type = additional_type\n self._name = name\n self._identifier = identifier\n self._potential_action = potential_action\n self._main_entity_of_page = main_entity_of_page\n self._description = description\n self._disambiguating_description = disambiguating_description\n self._alternate_name = alternate_name", "def __init__(self, api, coordinator, name, dev_id, model):\n super().__init__(api, coordinator, name, dev_id)\n\n self._model = model\n\n self._is_on = False\n\n self._unique_id = f\"{dev_id}-plug\"", "def __init__(self):\n self.swagger_types = {\n 'enabled': 'bool',\n 'auto_review': 'bool',\n 'allow_direct_trades': 'bool',\n 'min_hours_in_future': 'int',\n 'unequal_paid': 'str',\n 'one_sided': 'str',\n 'weekly_min_paid_violations': 'str',\n 'weekly_max_paid_violations': 'str',\n 'requires_matching_queues': 'bool',\n 'requires_matching_languages': 'bool',\n 'requires_matching_skills': 'bool',\n 'requires_matching_planning_groups': 'bool',\n 'activity_category_rules': 'list[ShiftTradeActivityRule]'\n }\n\n self.attribute_map = {\n 'enabled': 'enabled',\n 'auto_review': 'autoReview',\n 'allow_direct_trades': 'allowDirectTrades',\n 'min_hours_in_future': 'minHoursInFuture',\n 'unequal_paid': 'unequalPaid',\n 'one_sided': 'oneSided',\n 'weekly_min_paid_violations': 'weeklyMinPaidViolations',\n 'weekly_max_paid_violations': 'weeklyMaxPaidViolations',\n 'requires_matching_queues': 'requiresMatchingQueues',\n 'requires_matching_languages': 'requiresMatchingLanguages',\n 'requires_matching_skills': 'requiresMatchingSkills',\n 'requires_matching_planning_groups': 'requiresMatchingPlanningGroups',\n 'activity_category_rules': 'activityCategoryRules'\n }\n\n self._enabled = None\n self._auto_review = None\n self._allow_direct_trades = None\n self._min_hours_in_future = None\n self._unequal_paid = None\n self._one_sided = None\n self._weekly_min_paid_violations = None\n self._weekly_max_paid_violations = None\n self._requires_matching_queues = None\n self._requires_matching_languages = None\n self._requires_matching_skills = None\n self._requires_matching_planning_groups = None\n self._activity_category_rules = None", "def __init__(self):\n self.incidents_models = {}\n self.risks = []\n self.incidents_models = None", "def GetModelVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_update_control_with_cads(self):\n ext_user_email = \"external@example.com\"\n factories.CustomAttributeDefinitionFactory(\n id=444,\n attribute_type=\"Text\",\n definition_type=\"control\"\n )\n external_user = factories.PersonFactory(email=ext_user_email)\n control = factories.ControlFactory(id=123, modified_by=external_user)\n response = self.api.get(control, control.id)\n response_json = response.json\n cad_body = self.prepare_external_cad_body(\"Text\", \"Control\")\n cav_body = self.prepare_external_cav_body(123, \"Control\")\n response_json[\"control\"].update({\n \"custom_attribute_definitions\": [cad_body],\n \"custom_attribute_values\": [cav_body],\n })\n\n response = self.api.put(\n control,\n control.id,\n response_json\n )\n\n self.assertEqual(response.status_code, 200)\n cav = all_models.CustomAttributeValue.query.one()\n self.assert_cav_fields(cav, cav_body)", "def __init__(self):\n self.swagger_types = {\n 'app_id': 'int',\n 'app_sw_rev': 'str',\n 'avg_hops': 'float',\n 'avg_latency': 'int',\n 'charge': 'int',\n 'estimated_latency_to_mote': 'int',\n 'hw_model': 'int',\n 'hw_rev': 'int',\n 'id': 'int',\n 'join_sys_time': 'datetime',\n 'last_voltage': 'int',\n 'lost_packet_count': 'int',\n 'mac_address': 'str',\n 'max_current': 'int',\n 'max_num_links': 'int',\n 'max_num_neighbors': 'int',\n 'need_neighbor': 'bool',\n 'num_good_neighbors': 'int',\n 'num_joins': 'int',\n 'num_links': 'int',\n 'num_neighbors': 'int',\n 'num_parents': 'int',\n 'power_cost_rx_link': 'int',\n 'power_cost_tx_link': 'int',\n 'reliability': 'float',\n 'rx_packet_count': 'int',\n 'stack_sw_rev': 'str',\n 'state': 'str',\n 'state_reason': 'str',\n 'state_sys_time': 'datetime',\n 'used_current': 'int'\n }\n\n self.attribute_map = {\n 'app_id': 'appId',\n 'app_sw_rev': 'appSwRev',\n 'avg_hops': 'avgHops',\n 'avg_latency': 'avgLatency',\n 'charge': 'charge',\n 'estimated_latency_to_mote': 'estimatedLatencyToMote',\n 'hw_model': 'hwModel',\n 'hw_rev': 'hwRev',\n 'id': 'id',\n 'join_sys_time': 'joinSysTime',\n 'last_voltage': 'lastVoltage',\n 'lost_packet_count': 'lostPacketCount',\n 'mac_address': 'macAddress',\n 'max_current': 'maxCurrent',\n 'max_num_links': 'maxNumLinks',\n 'max_num_neighbors': 'maxNumNeighbors',\n 'need_neighbor': 'needNeighbor',\n 'num_good_neighbors': 'numGoodNeighbors',\n 'num_joins': 'numJoins',\n 'num_links': 'numLinks',\n 'num_neighbors': 'numNeighbors',\n 'num_parents': 'numParents',\n 'power_cost_rx_link': 'powerCostRxLink',\n 'power_cost_tx_link': 'powerCostTxLink',\n 'reliability': 'reliability',\n 'rx_packet_count': 'rxPacketCount',\n 'stack_sw_rev': 'stackSwRev',\n 'state': 'state',\n 'state_reason': 'stateReason',\n 'state_sys_time': 'stateSysTime',\n 'used_current': 'usedCurrent'\n }\n\n self._app_id = None\n self._app_sw_rev = None\n self._avg_hops = None\n self._avg_latency = None\n self._charge = None\n self._estimated_latency_to_mote = None\n self._hw_model = None\n self._hw_rev = None\n self._id = None\n self._join_sys_time = None\n self._last_voltage = None\n self._lost_packet_count = None\n self._mac_address = None\n self._max_current = None\n self._max_num_links = None\n self._max_num_neighbors = None\n self._need_neighbor = None\n self._num_good_neighbors = None\n self._num_joins = None\n self._num_links = None\n self._num_neighbors = None\n self._num_parents = None\n self._power_cost_rx_link = None\n self._power_cost_tx_link = None\n self._reliability = None\n self._rx_packet_count = None\n self._stack_sw_rev = None\n self._state = None\n self._state_reason = None\n self._state_sys_time = None\n self._used_current = None", "def __init__(self, avg_inference_time=None, avg_latency=None, avg_training_time=None, data_alerts=None, enable_health=None, ion_health_info=None, ion_instance=None, is_ml_healthy=None, is_system_healthy=None, ml_alerts=None, operation_mode=None, pending_models=None, status=None, total_data_processed=None, total_models_produced=None, total_models_updated=None, total_predictions=None, total_samples_processed=None): # noqa: E501\n self.openapi_types = {\n 'avg_inference_time': 'int',\n 'avg_latency': 'int',\n 'avg_training_time': 'int',\n 'data_alerts': 'int',\n 'enable_health': 'bool',\n 'ion_health_info': 'List[DashboardIonHealthInfo]',\n 'ion_instance': 'object',\n 'is_ml_healthy': 'bool',\n 'is_system_healthy': 'bool',\n 'ml_alerts': 'int',\n 'operation_mode': 'str',\n 'pending_models': 'int',\n 'status': 'bool',\n 'total_data_processed': 'int',\n 'total_models_produced': 'int',\n 'total_models_updated': 'int',\n 'total_predictions': 'int',\n 'total_samples_processed': 'int'\n }\n\n self.attribute_map = {\n 'avg_inference_time': 'avgInferenceTime',\n 'avg_latency': 'avgLatency',\n 'avg_training_time': 'avgTrainingTime',\n 'data_alerts': 'dataAlerts',\n 'enable_health': 'enableHealth',\n 'ion_health_info': 'ionHealthInfo',\n 'ion_instance': 'ionInstance',\n 'is_ml_healthy': 'isMLHealthy',\n 'is_system_healthy': 'isSystemHealthy',\n 'ml_alerts': 'mlAlerts',\n 'operation_mode': 'operationMode',\n 'pending_models': 'pendingModels',\n 'status': 'status',\n 'total_data_processed': 'totalDataProcessed',\n 'total_models_produced': 'totalModelsProduced',\n 'total_models_updated': 'totalModelsUpdated',\n 'total_predictions': 'totalPredictions',\n 'total_samples_processed': 'totalSamplesProcessed'\n }\n\n self._avg_inference_time = avg_inference_time\n self._avg_latency = avg_latency\n self._avg_training_time = avg_training_time\n self._data_alerts = data_alerts\n self._enable_health = enable_health\n self._ion_health_info = ion_health_info\n self._ion_instance = ion_instance\n self._is_ml_healthy = is_ml_healthy\n self._is_system_healthy = is_system_healthy\n self._ml_alerts = ml_alerts\n self._operation_mode = operation_mode\n self._pending_models = pending_models\n self._status = status\n self._total_data_processed = total_data_processed\n self._total_models_produced = total_models_produced\n self._total_models_updated = total_models_updated\n self._total_predictions = total_predictions\n self._total_samples_processed = total_samples_processed", "def model(self) -> Type[Model]:", "def __init__(self, model: object):\n self.model = model", "def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None", "def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckMalware, self)._set_id(kwargs)\n self.created_by_ref = super(AttckMalware, self)._set_attribute(kwargs, 'created_by_ref')\n self.name = super(AttckMalware, self)._set_attribute(kwargs, 'name')\n self.aliases = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_aliases')\n self.platforms = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_platforms')\n self.labels = super(AttckMalware, self)._set_list_items(kwargs, 'labels')\n self.description = super(AttckMalware, self)._set_attribute(kwargs, 'description')\n self.external_references = super(AttckMalware, self)._set_reference(kwargs)\n self.created = super(AttckMalware, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckMalware, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckMalware, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckMalware, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckMalware, self)._set_wiki(kwargs)\n self.contributor = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_contributors')\n self.revoked = super(AttckMalware, self)._set_attribute(kwargs, 'revoked')", "def __init__(self, carrier_service_code: CarrierServiceCode=None, carrier_voyage_number: Object=None, transport_call_sequence_number: TransportCallSequenceNumber=None, un_location_code: UNLocationCode1=None, facility_code: FacilityCode=None, facility_code_list_provider: FacilityCodeListProvider=None, facility_type_code: FacilityTypeCodeTRN=None, other_facility: OtherFacility=None, mode_of_transport: ModeOfTransport=None, location: Object=None, vessel: Vessel=None): # noqa: E501\n self.swagger_types = {\n 'carrier_service_code': CarrierServiceCode,\n 'carrier_voyage_number': Object,\n 'transport_call_sequence_number': TransportCallSequenceNumber,\n 'un_location_code': UNLocationCode1,\n 'facility_code': FacilityCode,\n 'facility_code_list_provider': FacilityCodeListProvider,\n 'facility_type_code': FacilityTypeCodeTRN,\n 'other_facility': OtherFacility,\n 'mode_of_transport': ModeOfTransport,\n 'location': Object,\n 'vessel': Vessel\n }\n\n self.attribute_map = {\n 'carrier_service_code': 'carrierServiceCode',\n 'carrier_voyage_number': 'carrierVoyageNumber',\n 'transport_call_sequence_number': 'transportCallSequenceNumber',\n 'un_location_code': 'UNLocationCode',\n 'facility_code': 'facilityCode',\n 'facility_code_list_provider': 'facilityCodeListProvider',\n 'facility_type_code': 'facilityTypeCode',\n 'other_facility': 'otherFacility',\n 'mode_of_transport': 'modeOfTransport',\n 'location': 'location',\n 'vessel': 'vessel'\n }\n self._carrier_service_code = carrier_service_code\n self._carrier_voyage_number = carrier_voyage_number\n self._transport_call_sequence_number = transport_call_sequence_number\n self._un_location_code = un_location_code\n self._facility_code = facility_code\n self._facility_code_list_provider = facility_code_list_provider\n self._facility_type_code = facility_type_code\n self._other_facility = other_facility\n self._mode_of_transport = mode_of_transport\n self._location = location\n self._vessel = vessel", "def check_vulnerability(self):\n\t\tpass", "def __init__(self, accepting_change_of_payor_patients=None, accepting_medicaid_patients=None, accepting_medicare_patients=None, accepting_private_patients=None, accepting_referral_patients=None, city=None, email=None, gender=None, first_name=None, hios_ids=None, id=None, last_name=None, latitude=None, longitude=None, middle_name=None, network_ids=None, personal_phone=None, phone=None, presentation_name=None, specialty=None, state=None, state_id=None, street_line_1=None, street_line_2=None, suffix=None, title=None, type=None, zip_code=None):\n self.swagger_types = {\n 'accepting_change_of_payor_patients': 'bool',\n 'accepting_medicaid_patients': 'bool',\n 'accepting_medicare_patients': 'bool',\n 'accepting_private_patients': 'bool',\n 'accepting_referral_patients': 'bool',\n 'city': 'str',\n 'email': 'str',\n 'gender': 'str',\n 'first_name': 'str',\n 'hios_ids': 'list[str]',\n 'id': 'int',\n 'last_name': 'str',\n 'latitude': 'float',\n 'longitude': 'float',\n 'middle_name': 'str',\n 'network_ids': 'list[int]',\n 'personal_phone': 'str',\n 'phone': 'str',\n 'presentation_name': 'str',\n 'specialty': 'str',\n 'state': 'str',\n 'state_id': 'int',\n 'street_line_1': 'str',\n 'street_line_2': 'str',\n 'suffix': 'str',\n 'title': 'str',\n 'type': 'str',\n 'zip_code': 'str'\n }\n\n self.attribute_map = {\n 'accepting_change_of_payor_patients': 'accepting_change_of_payor_patients',\n 'accepting_medicaid_patients': 'accepting_medicaid_patients',\n 'accepting_medicare_patients': 'accepting_medicare_patients',\n 'accepting_private_patients': 'accepting_private_patients',\n 'accepting_referral_patients': 'accepting_referral_patients',\n 'city': 'city',\n 'email': 'email',\n 'gender': 'gender',\n 'first_name': 'first_name',\n 'hios_ids': 'hios_ids',\n 'id': 'id',\n 'last_name': 'last_name',\n 'latitude': 'latitude',\n 'longitude': 'longitude',\n 'middle_name': 'middle_name',\n 'network_ids': 'network_ids',\n 'personal_phone': 'personal_phone',\n 'phone': 'phone',\n 'presentation_name': 'presentation_name',\n 'specialty': 'specialty',\n 'state': 'state',\n 'state_id': 'state_id',\n 'street_line_1': 'street_line_1',\n 'street_line_2': 'street_line_2',\n 'suffix': 'suffix',\n 'title': 'title',\n 'type': 'type',\n 'zip_code': 'zip_code'\n }\n\n self._accepting_change_of_payor_patients = accepting_change_of_payor_patients\n self._accepting_medicaid_patients = accepting_medicaid_patients\n self._accepting_medicare_patients = accepting_medicare_patients\n self._accepting_private_patients = accepting_private_patients\n self._accepting_referral_patients = accepting_referral_patients\n self._city = city\n self._email = email\n self._gender = gender\n self._first_name = first_name\n self._hios_ids = hios_ids\n self._id = id\n self._last_name = last_name\n self._latitude = latitude\n self._longitude = longitude\n self._middle_name = middle_name\n self._network_ids = network_ids\n self._personal_phone = personal_phone\n self._phone = phone\n self._presentation_name = presentation_name\n self._specialty = specialty\n self._state = state\n self._state_id = state_id\n self._street_line_1 = street_line_1\n self._street_line_2 = street_line_2\n self._suffix = suffix\n self._title = title\n self._type = type\n self._zip_code = zip_code", "def swagger():\n return jsonify(current_app.spec.to_dict())", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self):\n self.swagger_types = {\n 'is_waiting': 'bool',\n 'is_active': 'bool',\n 'is_acd': 'bool',\n 'is_preferred': 'bool',\n 'is_screenshare': 'bool',\n 'is_cobrowse': 'bool',\n 'is_voicemail': 'bool',\n 'is_flagged': 'bool',\n 'is_monitored': 'bool',\n 'filter_wrap_up_notes': 'bool',\n 'match_all': 'bool'\n }\n\n self.attribute_map = {\n 'is_waiting': 'isWaiting',\n 'is_active': 'isActive',\n 'is_acd': 'isAcd',\n 'is_preferred': 'isPreferred',\n 'is_screenshare': 'isScreenshare',\n 'is_cobrowse': 'isCobrowse',\n 'is_voicemail': 'isVoicemail',\n 'is_flagged': 'isFlagged',\n 'is_monitored': 'isMonitored',\n 'filter_wrap_up_notes': 'filterWrapUpNotes',\n 'match_all': 'matchAll'\n }\n\n self._is_waiting = None\n self._is_active = None\n self._is_acd = None\n self._is_preferred = None\n self._is_screenshare = None\n self._is_cobrowse = None\n self._is_voicemail = None\n self._is_flagged = None\n self._is_monitored = None\n self._filter_wrap_up_notes = None\n self._match_all = None", "def __init__(self, dry_cargo=None, rocket=None, decay=None, type=None, port1=None, introduction_date=None, port2=None, free_flight_time=None, diameter=None, design_company=None, discharge=None, port2_docked_time=None, id=None, rebuilder=None, port1_docking_date=None, height=None, model_end_date=None, number_of_launches=None, port1_undocking_date=None, model_end_year=None, cargo_water=None, port2_undocking_date=None, port1_docked_time=None, weight=None, version=None, apoapsis=None, discharge_average=None, total_cargo=None, number_of_crew=None, power_type=None, cargo_fuel=None, cargo_gas=None, mass=None, description=None, engine_type=None, total_mass=None, crew=None, docked_time=None, assembly=None, _class=None, model_start_date=None, inclination=None, periapsis=None, regime=None, port2_docking_date=None, length=None, launch=None, label=None, number_of_seats=None, model_start_year=None, width=None, related_mean_of_transportation=None, target_space_station=None): # noqa: E501\n\n\n self.openapi_types = {\n 'dry_cargo': List[object],\n 'rocket': List[object],\n 'decay': List[str],\n 'type': List[str],\n 'port1': List[object],\n 'introduction_date': List[str],\n 'port2': List[object],\n 'free_flight_time': List[object],\n 'diameter': List[object],\n 'design_company': List[object],\n 'discharge': List[float],\n 'port2_docked_time': List[object],\n 'id': str,\n 'rebuilder': List[object],\n 'port1_docking_date': List[str],\n 'height': List[object],\n 'model_end_date': List[str],\n 'number_of_launches': List[int],\n 'port1_undocking_date': List[str],\n 'model_end_year': List[str],\n 'cargo_water': List[object],\n 'port2_undocking_date': List[str],\n 'port1_docked_time': List[object],\n 'weight': List[object],\n 'version': List[object],\n 'apoapsis': List[object],\n 'discharge_average': List[float],\n 'total_cargo': List[object],\n 'number_of_crew': List[int],\n 'power_type': List[object],\n 'cargo_fuel': List[object],\n 'cargo_gas': List[object],\n 'mass': List[object],\n 'description': List[str],\n 'engine_type': List[object],\n 'total_mass': List[object],\n 'crew': List[object],\n 'docked_time': List[object],\n 'assembly': List[object],\n '_class': List[object],\n 'model_start_date': List[str],\n 'inclination': List[float],\n 'periapsis': List[object],\n 'regime': List[str],\n 'port2_docking_date': List[str],\n 'length': List[object],\n 'launch': List[str],\n 'label': List[str],\n 'number_of_seats': List[int],\n 'model_start_year': List[str],\n 'width': List[object],\n 'related_mean_of_transportation': List[object],\n 'target_space_station': List[object]\n }\n\n self.attribute_map = {\n 'dry_cargo': 'dryCargo',\n 'rocket': 'rocket',\n 'decay': 'decay',\n 'type': 'type',\n 'port1': 'port1',\n 'introduction_date': 'introductionDate',\n 'port2': 'port2',\n 'free_flight_time': 'freeFlightTime',\n 'diameter': 'diameter',\n 'design_company': 'designCompany',\n 'discharge': 'discharge',\n 'port2_docked_time': 'port2DockedTime',\n 'id': 'id',\n 'rebuilder': 'rebuilder',\n 'port1_docking_date': 'port1DockingDate',\n 'height': 'height',\n 'model_end_date': 'modelEndDate',\n 'number_of_launches': 'numberOfLaunches',\n 'port1_undocking_date': 'port1UndockingDate',\n 'model_end_year': 'modelEndYear',\n 'cargo_water': 'cargoWater',\n 'port2_undocking_date': 'port2UndockingDate',\n 'port1_docked_time': 'port1DockedTime',\n 'weight': 'weight',\n 'version': 'version',\n 'apoapsis': 'apoapsis',\n 'discharge_average': 'dischargeAverage',\n 'total_cargo': 'totalCargo',\n 'number_of_crew': 'numberOfCrew',\n 'power_type': 'powerType',\n 'cargo_fuel': 'cargoFuel',\n 'cargo_gas': 'cargoGas',\n 'mass': 'mass',\n 'description': 'description',\n 'engine_type': 'engineType',\n 'total_mass': 'totalMass',\n 'crew': 'crew',\n 'docked_time': 'dockedTime',\n 'assembly': 'assembly',\n '_class': 'class',\n 'model_start_date': 'modelStartDate',\n 'inclination': 'inclination',\n 'periapsis': 'periapsis',\n 'regime': 'regime',\n 'port2_docking_date': 'port2DockingDate',\n 'length': 'length',\n 'launch': 'launch',\n 'label': 'label',\n 'number_of_seats': 'numberOfSeats',\n 'model_start_year': 'modelStartYear',\n 'width': 'width',\n 'related_mean_of_transportation': 'relatedMeanOfTransportation',\n 'target_space_station': 'targetSpaceStation'\n }\n\n self._dry_cargo = dry_cargo\n self._rocket = rocket\n self._decay = decay\n self._type = type\n self._port1 = port1\n self._introduction_date = introduction_date\n self._port2 = port2\n self._free_flight_time = free_flight_time\n self._diameter = diameter\n self._design_company = design_company\n self._discharge = discharge\n self._port2_docked_time = port2_docked_time\n self._id = id\n self._rebuilder = rebuilder\n self._port1_docking_date = port1_docking_date\n self._height = height\n self._model_end_date = model_end_date\n self._number_of_launches = number_of_launches\n self._port1_undocking_date = port1_undocking_date\n self._model_end_year = model_end_year\n self._cargo_water = cargo_water\n self._port2_undocking_date = port2_undocking_date\n self._port1_docked_time = port1_docked_time\n self._weight = weight\n self._version = version\n self._apoapsis = apoapsis\n self._discharge_average = discharge_average\n self._total_cargo = total_cargo\n self._number_of_crew = number_of_crew\n self._power_type = power_type\n self._cargo_fuel = cargo_fuel\n self._cargo_gas = cargo_gas\n self._mass = mass\n self._description = description\n self._engine_type = engine_type\n self._total_mass = total_mass\n self._crew = crew\n self._docked_time = docked_time\n self._assembly = assembly\n self.__class = _class\n self._model_start_date = model_start_date\n self._inclination = inclination\n self._periapsis = periapsis\n self._regime = regime\n self._port2_docking_date = port2_docking_date\n self._length = length\n self._launch = launch\n self._label = label\n self._number_of_seats = number_of_seats\n self._model_start_year = model_start_year\n self._width = width\n self._related_mean_of_transportation = related_mean_of_transportation\n self._target_space_station = target_space_station", "def model_info():\n pass", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'channel_id': 'str',\n 'channel_secret': 'str',\n 'switcher_secret': 'str',\n 'service_code': 'str',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'channel_id': 'channelId',\n 'channel_secret': 'channelSecret',\n 'switcher_secret': 'switcherSecret',\n 'service_code': 'serviceCode',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._channel_id = None\n self._channel_secret = None\n self._switcher_secret = None\n self._service_code = None\n self._self_uri = None", "def __init__(self, id=None, user_id=None, title=None, description=None, readonly=None, hidden=None, refresh_interval=None, refresh_interval_to_i=None, space=None, model=None, content_favorite_id=None, scheduled_plan=None, content_metadata_id=None, query_timezone=None, can=None):\n self.swagger_types = {\n 'id': 'str',\n 'user_id': 'int',\n 'title': 'str',\n 'description': 'str',\n 'readonly': 'bool',\n 'hidden': 'bool',\n 'refresh_interval': 'str',\n 'refresh_interval_to_i': 'int',\n 'space': 'SpaceBase',\n 'model': 'LookModel',\n 'content_favorite_id': 'int',\n 'scheduled_plan': 'ScheduledPlan',\n 'content_metadata_id': 'int',\n 'query_timezone': 'str',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'user_id': 'user_id',\n 'title': 'title',\n 'description': 'description',\n 'readonly': 'readonly',\n 'hidden': 'hidden',\n 'refresh_interval': 'refresh_interval',\n 'refresh_interval_to_i': 'refresh_interval_to_i',\n 'space': 'space',\n 'model': 'model',\n 'content_favorite_id': 'content_favorite_id',\n 'scheduled_plan': 'scheduled_plan',\n 'content_metadata_id': 'content_metadata_id',\n 'query_timezone': 'query_timezone',\n 'can': 'can'\n }\n\n self._id = id\n self._user_id = user_id\n self._title = title\n self._description = description\n self._readonly = readonly\n self._hidden = hidden\n self._refresh_interval = refresh_interval\n self._refresh_interval_to_i = refresh_interval_to_i\n self._space = space\n self._model = model\n self._content_favorite_id = content_favorite_id\n self._scheduled_plan = scheduled_plan\n self._content_metadata_id = content_metadata_id\n self._query_timezone = query_timezone\n self._can = can", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'ticket_id': 'str',\n 'type': 'str',\n 'from_number': 'str',\n 'from_name': 'str',\n 'to_number': 'str',\n 'to_name': 'str',\n 'via_number': 'str',\n 'date_created': 'datetime',\n 'date_answered': 'datetime',\n 'date_finished': 'datetime'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'ticket_id': 'ticketId',\n 'type': 'type',\n 'from_number': 'fromNumber',\n 'from_name': 'fromName',\n 'to_number': 'toNumber',\n 'to_name': 'toName',\n 'via_number': 'viaNumber',\n 'date_created': 'dateCreated',\n 'date_answered': 'dateAnswered',\n 'date_finished': 'dateFinished'\n }\n\n self._id = None\n self._ticket_id = None\n self._type = None\n self._from_number = None\n self._from_name = None\n self._to_number = None\n self._to_name = None\n self._via_number = None\n self._date_created = None\n self._date_answered = None\n self._date_finished = None", "def __init__(self, model):\n aaa\n self.model = model\n\n #: the list of CROD cards\n self._crod = []\n\n #: the list of CONROD cards\n self._conrod = []\n\n self._crod_comment = []\n self._conrod_comment = []\n\n self.crod = CROD(self.model)\n self.conrod = CONROD(self.model)", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self, api_key: str):\n self.Bonuses = wrap_class(_Bonuses, api_key)\n self.EligibilityRequirements = wrap_class(_EligibilityRequirements, api_key)\n self.Invitations = wrap_class(_Invitations, api_key)\n self.Messages = wrap_class(_Messages, api_key)\n self.ParticipantGroups = wrap_class(_ParticipantGroups, api_key)\n self.Projects = wrap_class(_Projects, api_key)\n self.Studies = wrap_class(_Studies, api_key)\n self.Submissions = wrap_class(_Submissions, api_key)\n self.Users = wrap_class(_Users, api_key)\n self.Workspaces = wrap_class(_Workspaces, api_key)", "def __init__(self, version_name=None, revision_notes=None, version_oid=None, fields=None, revision_date=None, primary_flag=None, disp_sequence=None, enabled=None, screening_expression=None, screening_flag=None, custom_enroll_flag=None, used_at_enroll=None, randomization_expression=None, irb_approval_date=None, valid_from=None, valid_to=None, screening_e_consent_flag=None, econsent=None, crfform_definition=None, crfform_section=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._version_name = None\n self._revision_notes = None\n self._version_oid = None\n self._fields = None\n self._revision_date = None\n self._primary_flag = None\n self._disp_sequence = None\n self._enabled = None\n self._screening_expression = None\n self._screening_flag = None\n self._custom_enroll_flag = None\n self._used_at_enroll = None\n self._randomization_expression = None\n self._irb_approval_date = None\n self._valid_from = None\n self._valid_to = None\n self._screening_e_consent_flag = None\n self._econsent = None\n self._crfform_definition = None\n self._crfform_section = None\n self.discriminator = None\n\n if version_name is not None:\n self.version_name = version_name\n if revision_notes is not None:\n self.revision_notes = revision_notes\n if version_oid is not None:\n self.version_oid = version_oid\n if fields is not None:\n self.fields = fields\n if revision_date is not None:\n self.revision_date = revision_date\n if primary_flag is not None:\n self.primary_flag = primary_flag\n if disp_sequence is not None:\n self.disp_sequence = disp_sequence\n if enabled is not None:\n self.enabled = enabled\n if screening_expression is not None:\n self.screening_expression = screening_expression\n if screening_flag is not None:\n self.screening_flag = screening_flag\n if custom_enroll_flag is not None:\n self.custom_enroll_flag = custom_enroll_flag\n if used_at_enroll is not None:\n self.used_at_enroll = used_at_enroll\n if randomization_expression is not None:\n self.randomization_expression = randomization_expression\n if irb_approval_date is not None:\n self.irb_approval_date = irb_approval_date\n if valid_from is not None:\n self.valid_from = valid_from\n if valid_to is not None:\n self.valid_to = valid_to\n if screening_e_consent_flag is not None:\n self.screening_e_consent_flag = screening_e_consent_flag\n if econsent is not None:\n self.econsent = econsent\n if crfform_definition is not None:\n self.crfform_definition = crfform_definition\n if crfform_section is not None:\n self.crfform_section = crfform_section", "def __init__(self, jsondict=None, strict=True):\n \n self.action = None\n \"\"\" What mitigation?.\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.author = None\n \"\"\" Who is committing?.\n Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). \"\"\"\n \n self.date = None\n \"\"\" Date committed.\n Type `FHIRDate` (represented as `str` in JSON). \"\"\"\n \n super(DetectedIssueMitigation, self).__init__(jsondict=jsondict, strict=strict)", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None", "def __init__(self, mean_radius=None, cluster=None, orbital_period=None, surface_area=None, orbital_eccentricity=None, mass=None, description=None, type=None, max_absolute_magnitude=None, mean_temperature=None, constellation=None, von_klitzing_constant=None, maximum_temperature=None, temperature=None, definition=None, id=None, periapsis=None, absolute_magnitude=None, density=None, notable_features=None, average_speed=None, label=None, apoapsis=None, volume=None, messier_name=None, max_apparent_magnitude=None, explorer=None, minimum_temperature=None, ngc_name=None): # noqa: E501\n\n\n self.openapi_types = {\n 'mean_radius': List[object],\n 'cluster': List[object],\n 'orbital_period': List[object],\n 'surface_area': List[object],\n 'orbital_eccentricity': List[float],\n 'mass': List[object],\n 'description': List[str],\n 'type': List[str],\n 'max_absolute_magnitude': List[float],\n 'mean_temperature': List[object],\n 'constellation': List[object],\n 'von_klitzing_constant': List[float],\n 'maximum_temperature': List[object],\n 'temperature': List[object],\n 'definition': List[str],\n 'id': str,\n 'periapsis': List[object],\n 'absolute_magnitude': List[float],\n 'density': List[object],\n 'notable_features': List[str],\n 'average_speed': List[object],\n 'label': List[str],\n 'apoapsis': List[object],\n 'volume': List[object],\n 'messier_name': List[str],\n 'max_apparent_magnitude': List[float],\n 'explorer': List[object],\n 'minimum_temperature': List[object],\n 'ngc_name': List[str]\n }\n\n self.attribute_map = {\n 'mean_radius': 'meanRadius',\n 'cluster': 'cluster',\n 'orbital_period': 'orbitalPeriod',\n 'surface_area': 'surfaceArea',\n 'orbital_eccentricity': 'orbitalEccentricity',\n 'mass': 'mass',\n 'description': 'description',\n 'type': 'type',\n 'max_absolute_magnitude': 'maxAbsoluteMagnitude',\n 'mean_temperature': 'meanTemperature',\n 'constellation': 'constellation',\n 'von_klitzing_constant': 'vonKlitzingConstant',\n 'maximum_temperature': 'maximumTemperature',\n 'temperature': 'temperature',\n 'definition': 'definition',\n 'id': 'id',\n 'periapsis': 'periapsis',\n 'absolute_magnitude': 'absoluteMagnitude',\n 'density': 'density',\n 'notable_features': 'notableFeatures',\n 'average_speed': 'averageSpeed',\n 'label': 'label',\n 'apoapsis': 'apoapsis',\n 'volume': 'volume',\n 'messier_name': 'messierName',\n 'max_apparent_magnitude': 'maxApparentMagnitude',\n 'explorer': 'explorer',\n 'minimum_temperature': 'minimumTemperature',\n 'ngc_name': 'ngcName'\n }\n\n self._mean_radius = mean_radius\n self._cluster = cluster\n self._orbital_period = orbital_period\n self._surface_area = surface_area\n self._orbital_eccentricity = orbital_eccentricity\n self._mass = mass\n self._description = description\n self._type = type\n self._max_absolute_magnitude = max_absolute_magnitude\n self._mean_temperature = mean_temperature\n self._constellation = constellation\n self._von_klitzing_constant = von_klitzing_constant\n self._maximum_temperature = maximum_temperature\n self._temperature = temperature\n self._definition = definition\n self._id = id\n self._periapsis = periapsis\n self._absolute_magnitude = absolute_magnitude\n self._density = density\n self._notable_features = notable_features\n self._average_speed = average_speed\n self._label = label\n self._apoapsis = apoapsis\n self._volume = volume\n self._messier_name = messier_name\n self._max_apparent_magnitude = max_apparent_magnitude\n self._explorer = explorer\n self._minimum_temperature = minimum_temperature\n self._ngc_name = ngc_name", "def __init__(self, jsondict=None, strict=True):\n \n self.author = None\n \"\"\" The provider or device that identified the issue.\n Type `FHIRReference` referencing `Practitioner, Device` (represented as `dict` in JSON). \"\"\"\n \n self.category = None\n \"\"\" Issue Category, e.g. drug-drug, duplicate therapy, etc..\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n self.date = None\n \"\"\" When identified.\n Type `FHIRDate` (represented as `str` in JSON). \"\"\"\n \n self.detail = None\n \"\"\" Description and context.\n Type `str`. \"\"\"\n \n self.identifier = None\n \"\"\" Unique id for the detected issue.\n Type `Identifier` (represented as `dict` in JSON). \"\"\"\n \n self.implicated = None\n \"\"\" Problem resource.\n List of `FHIRReference` items referencing `Resource` (represented as `dict` in JSON). \"\"\"\n \n self.mitigation = None\n \"\"\" Step taken to address.\n List of `DetectedIssueMitigation` items (represented as `dict` in JSON). \"\"\"\n \n self.patient = None\n \"\"\" Associated patient.\n Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). \"\"\"\n \n self.reference = None\n \"\"\" Authority for issue.\n Type `str`. \"\"\"\n \n self.severity = None\n \"\"\" high | moderate | low.\n Type `str`. \"\"\"\n \n super(DetectedIssue, self).__init__(jsondict=jsondict, strict=strict)", "def __init__(self, model):\n self._model = model", "def __init__(__self__, *,\n api_version: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,\n spec: Optional[pulumi.Input['PodDisruptionBudgetSpecArgs']] = None,\n status: Optional[pulumi.Input['PodDisruptionBudgetStatusArgs']] = None):\n if api_version is not None:\n pulumi.set(__self__, \"api_version\", 'policy/v1beta1')\n if kind is not None:\n pulumi.set(__self__, \"kind\", 'PodDisruptionBudget')\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if spec is not None:\n pulumi.set(__self__, \"spec\", spec)\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "def resolve(self, spec: \"ModelSpec\"):", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def test_kyc_get_validation_legal(self):\n pass", "def __init__(self):\r\n\r\n super(Bypass, self).__init__()\r\n\r\n # Initialize public scalar attributes.\r\n self.specification = 0 # MIL-C-25 or MIL-C-12889.\r\n self.spec_sheet = 0 #\r\n if self.hazard_rate_type < 3: # MIL-HDBK-217\r\n self.reference_temperature = 358.0\r\n\r\n# def set_attributes(self, values):\r\n \"\"\"\r\n Method to set the Capacitor data model attributes.\r\n\r\n :param tuple values: tuple of values to assign to the instance\r\n attributes.\r\n :return: (_code, _msg); the error code and error message.\r\n :rtype: tuple\r\n \"\"\"", "def openapi(self) -> api.OpenAPISpec:\n return self._get_model(model=api.OpenAPISpec)", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def ApplicationEvidence(self) -> Evidence:", "def __init__(\n self,\n model: types.BaseChannel,\n serving_spec: infra_validator_pb2.ServingSpec,\n examples: Optional[types.BaseChannel] = None,\n request_spec: Optional[infra_validator_pb2.RequestSpec] = None,\n validation_spec: Optional[infra_validator_pb2.ValidationSpec] = None):\n blessing = types.Channel(type=standard_artifacts.InfraBlessing)\n spec = standard_component_specs.InfraValidatorSpec(\n model=model,\n examples=examples,\n blessing=blessing,\n serving_spec=serving_spec,\n validation_spec=validation_spec,\n request_spec=request_spec)\n super().__init__(spec=spec)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'error_entity': 'DomainEntityRef',\n 'related_entity': 'DomainEntityRef',\n 'timestamp': 'datetime',\n 'level': 'str',\n 'category': 'str',\n 'correlation_id': 'str',\n 'event_message': 'EventMessage',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'error_entity': 'errorEntity',\n 'related_entity': 'relatedEntity',\n 'timestamp': 'timestamp',\n 'level': 'level',\n 'category': 'category',\n 'correlation_id': 'correlationId',\n 'event_message': 'eventMessage',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._error_entity = None\n self._related_entity = None\n self._timestamp = None\n self._level = None\n self._category = None\n self._correlation_id = None\n self._event_message = None\n self._self_uri = None", "def test_Vulnerability(self):\n self.assertEqual(3, Vulnerability.objects.count())\n\n self.assertTrue(Vulnerability.objects.filter(cve_id='CVE-2009-1382'))\n self.assertTrue(Vulnerability.objects.filter(cve_id='CVE-2009-2459'))\n self.assertTrue(Vulnerability.objects.filter(cve_id='CVE-2014-8242'))", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def test_create_control_with_cads(self):\n factories.CustomAttributeDefinitionFactory(\n id=444,\n attribute_type=\"Text\",\n definition_type=\"control\"\n )\n control_body = self.prepare_control_request_body()\n cad_body = self.prepare_external_cad_body(\"Text\", \"Control\")\n cav_body = self.prepare_external_cav_body(123, \"Control\")\n control_body.update({\n \"custom_attribute_definitions\": [cad_body],\n \"custom_attribute_values\": [cav_body],\n })\n\n response = self.api.post(all_models.Control, data={\n \"control\": control_body,\n })\n\n self.assertEqual(response.status_code, 201)\n cav = all_models.CustomAttributeValue.query.one()\n self.assert_cav_fields(cav, cav_body)", "def __init__(self, project_id, dataset_id, sandbox_dataset_id):\n desc = f'suppress all records associated with a PPI vocabulary explicit identifier associated PPI vocabulary'\n super().__init__(issue_numbers=ISSUE_NUMBERS,\n description=desc,\n affected_datasets=[\n cdr_consts.CONTROLLED_TIER_DEID,\n cdr_consts.COMBINED\n ],\n affected_tables=OBSERVATION,\n project_id=project_id,\n dataset_id=dataset_id,\n sandbox_dataset_id=sandbox_dataset_id)", "def vulnerability_management_post():\n if not request.is_json:\n raise InvalidUsage(\"Missing JSON in request\")\n req_json = request.json\n action = req_json.get(\"action\", \"get\")\n filters = req_json.get(\"filters\", {})\n if not filters:\n filters = {\"type\": \"cve\"}\n\n # if \"vulnerability_id\" in filters:\n # filters[\"doc_id\"] = filters.pop(\"vulnerability_id\")\n if filters.get('masked') is None:\n filters[\"masked\"] = \"false\"\n if action == \"get\":\n es_resp = ESConn.search_by_and_clause(CVE_INDEX, filters, req_json.get(\"start_index\", 0),\n req_json.get(\"sort_order\", \"desc\"), size=req_json.get(\"size\", 10),\n _source=source_detailed)\n es_resp = format_es_resp_for_api(es_resp[\"hits\"], \"vulnerability_id\")\n group_by = req_json.get(\"group_by\", None)\n if group_by and group_by in source_detailed:\n group_by_resp = defaultdict(list)\n for es_doc in es_resp:\n group_by_resp[es_doc.get(group_by, \"\")].append(es_doc)\n es_resp = dict(group_by_resp)\n return set_response(data=es_resp)\n elif action == \"delete\":\n es_resp = ESConn.search_by_and_clause(CVE_INDEX, filters, req_json.get(\"start_index\", 0),\n req_json.get(\"sort_order\", \"desc\"), size=req_json.get(\"size\", 10),\n _source=[\"_id\"])\n no_of_docs_to_be_masked = mask_vulnerabilities(es_resp[\"hits\"])\n return set_response(data={\"message\": \"deleted {0} vulnerabilities\".format(no_of_docs_to_be_masked)})\n else:\n raise InvalidUsage(\"Unsupported action: {0}\".format(action))", "def __createCovidModelInstance(self, *args, **kwargs):\n try:\n if 'MODEL_TYPE' in kwargs:\n if kwargs['MODEL_TYPE'] == CovidModel.AGGREGATE_CASES_DECEASED:\n covidModel = CovidAggregateTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n \n if kwargs['MODEL_TYPE'] == CovidModel.MONTHLY_CASES_DECEASED:\n covidModel = CovidMonthlyTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n\n if kwargs['MODEL_TYPE'] == CovidModel.PAST_30_DAYS:\n covidModel = CovidDailyTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n\n if kwargs['MODEL_TYPE'] == CovidModel.MESSAGES:\n covidModel = CovidMessages() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return\n\n if kwargs['MODEL_TYPE'] == CovidModel.LOCATIONS:\n covidModel = CovidLocationInfo() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return\n\n print (\"CovidMessages.__createCovidModelInstance() - did not receive a recognizable model type - no model object instantiated. Args received = \",kwargs)\n return None\n except:\n print (\"CovidMessages.__createCovidModelInstance() - unexpected error: \",sys.exc_info()[0])\n return None", "def load_model(self) -> Any:", "def build_model():", "def __init__(self):\n self.swagger_types = {\n 'maximum_over_capacity': 'float',\n 'minimum_health_capacity': 'float'\n }\n\n self.attribute_map = {\n 'maximum_over_capacity': 'maximumOverCapacity',\n 'minimum_health_capacity': 'minimumHealthCapacity'\n }\n\n self._maximum_over_capacity = None\n self._minimum_health_capacity = None", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'identifier': 'int',\n 'success': 'bool',\n 'description': 'str',\n 'duration': 'float',\n 'bag_name': 'str',\n 'bag_store_name': 'str',\n 'results': 'object',\n 'bag': 'BagSummary'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'identifier': 'identifier',\n 'success': 'success',\n 'description': 'description',\n 'duration': 'duration',\n 'bag_name': 'bag_name',\n 'bag_store_name': 'bag_store_name',\n 'results': 'results',\n 'bag': 'bag'\n }\n\n self._detail_type = None\n self._identifier = None\n self._success = None\n self._description = None\n self._duration = None\n self._bag_name = None\n self._bag_store_name = None\n self._results = None\n self._bag = None", "def model_definition(self):\n pass", "def __init__(self, model):\n\t\tself.model = model", "def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))", "def __init__(__self__, *,\n api: Optional[pulumi.Input['ApplicationApiArgs']] = None,\n app_role_ids: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n app_roles: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationAppRoleArgs']]]] = None,\n application_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n device_only_auth_enabled: Optional[pulumi.Input[bool]] = None,\n disabled_by_microsoft: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n fallback_public_client_enabled: Optional[pulumi.Input[bool]] = None,\n feature_tags: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationFeatureTagArgs']]]] = None,\n group_membership_claims: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n identifier_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n logo_image: Optional[pulumi.Input[str]] = None,\n logo_url: Optional[pulumi.Input[str]] = None,\n marketing_url: Optional[pulumi.Input[str]] = None,\n notes: Optional[pulumi.Input[str]] = None,\n oauth2_permission_scope_ids: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n oauth2_post_response_required: Optional[pulumi.Input[bool]] = None,\n object_id: Optional[pulumi.Input[str]] = None,\n optional_claims: Optional[pulumi.Input['ApplicationOptionalClaimsArgs']] = None,\n owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,\n privacy_statement_url: Optional[pulumi.Input[str]] = None,\n public_client: Optional[pulumi.Input['ApplicationPublicClientArgs']] = None,\n publisher_domain: Optional[pulumi.Input[str]] = None,\n required_resource_accesses: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationRequiredResourceAccessArgs']]]] = None,\n service_management_reference: Optional[pulumi.Input[str]] = None,\n sign_in_audience: Optional[pulumi.Input[str]] = None,\n single_page_application: Optional[pulumi.Input['ApplicationSinglePageApplicationArgs']] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n template_id: Optional[pulumi.Input[str]] = None,\n terms_of_service_url: Optional[pulumi.Input[str]] = None,\n web: Optional[pulumi.Input['ApplicationWebArgs']] = None):\n if api is not None:\n pulumi.set(__self__, \"api\", api)\n if app_role_ids is not None:\n pulumi.set(__self__, \"app_role_ids\", app_role_ids)\n if app_roles is not None:\n pulumi.set(__self__, \"app_roles\", app_roles)\n if application_id is not None:\n pulumi.set(__self__, \"application_id\", application_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if device_only_auth_enabled is not None:\n pulumi.set(__self__, \"device_only_auth_enabled\", device_only_auth_enabled)\n if disabled_by_microsoft is not None:\n pulumi.set(__self__, \"disabled_by_microsoft\", disabled_by_microsoft)\n if display_name is not None:\n pulumi.set(__self__, \"display_name\", display_name)\n if fallback_public_client_enabled is not None:\n pulumi.set(__self__, \"fallback_public_client_enabled\", fallback_public_client_enabled)\n if feature_tags is not None:\n pulumi.set(__self__, \"feature_tags\", feature_tags)\n if group_membership_claims is not None:\n pulumi.set(__self__, \"group_membership_claims\", group_membership_claims)\n if identifier_uris is not None:\n pulumi.set(__self__, \"identifier_uris\", identifier_uris)\n if logo_image is not None:\n pulumi.set(__self__, \"logo_image\", logo_image)\n if logo_url is not None:\n pulumi.set(__self__, \"logo_url\", logo_url)\n if marketing_url is not None:\n pulumi.set(__self__, \"marketing_url\", marketing_url)\n if notes is not None:\n pulumi.set(__self__, \"notes\", notes)\n if oauth2_permission_scope_ids is not None:\n pulumi.set(__self__, \"oauth2_permission_scope_ids\", oauth2_permission_scope_ids)\n if oauth2_post_response_required is not None:\n pulumi.set(__self__, \"oauth2_post_response_required\", oauth2_post_response_required)\n if object_id is not None:\n pulumi.set(__self__, \"object_id\", object_id)\n if optional_claims is not None:\n pulumi.set(__self__, \"optional_claims\", optional_claims)\n if owners is not None:\n pulumi.set(__self__, \"owners\", owners)\n if prevent_duplicate_names is not None:\n pulumi.set(__self__, \"prevent_duplicate_names\", prevent_duplicate_names)\n if privacy_statement_url is not None:\n pulumi.set(__self__, \"privacy_statement_url\", privacy_statement_url)\n if public_client is not None:\n pulumi.set(__self__, \"public_client\", public_client)\n if publisher_domain is not None:\n pulumi.set(__self__, \"publisher_domain\", publisher_domain)\n if required_resource_accesses is not None:\n pulumi.set(__self__, \"required_resource_accesses\", required_resource_accesses)\n if service_management_reference is not None:\n pulumi.set(__self__, \"service_management_reference\", service_management_reference)\n if sign_in_audience is not None:\n pulumi.set(__self__, \"sign_in_audience\", sign_in_audience)\n if single_page_application is not None:\n pulumi.set(__self__, \"single_page_application\", single_page_application)\n if support_url is not None:\n pulumi.set(__self__, \"support_url\", support_url)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if template_id is not None:\n pulumi.set(__self__, \"template_id\", template_id)\n if terms_of_service_url is not None:\n pulumi.set(__self__, \"terms_of_service_url\", terms_of_service_url)\n if web is not None:\n pulumi.set(__self__, \"web\", web)", "def model(self) -> str:\n ...", "def api(self) -> str:", "def __init__(__self__, *,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if attributes is not None:\n pulumi.set(__self__, \"attributes\", attributes)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if disable_status_check is not None:\n pulumi.set(__self__, \"disable_status_check\", disable_status_check)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if masters is not None:\n pulumi.set(__self__, \"masters\", masters)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_specs is not None:\n pulumi.set(__self__, \"value_specs\", value_specs)", "def __init__(__self__, *,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if attributes is not None:\n pulumi.set(__self__, \"attributes\", attributes)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if disable_status_check is not None:\n pulumi.set(__self__, \"disable_status_check\", disable_status_check)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if masters is not None:\n pulumi.set(__self__, \"masters\", masters)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_specs is not None:\n pulumi.set(__self__, \"value_specs\", value_specs)", "def model() -> Model:\n return Model()", "def __init__(\n self,\n spacy_pipeline,\n lowercase=False,\n format: str ='gem',\n ):\n\n self.lowercase = lowercase\n self.format = format\n self.spacy_pipeline = spacy_pipeline", "def predict_api():\n pass", "def make_instance(self, include_optional):\n # model = openapi_client.models.shared_runtime_audit.SharedRuntimeAudit() # noqa: E501\n if include_optional :\n return SharedRuntimeAudit(\n id = '', \n account_id = '', \n app = '', \n app_id = '', \n attack_techniques = [\n '[\\\"exploitationForPrivilegeEscalation\\\",\\\"exploitPublicFacingApplication\\\",\\\"applicationExploitRCE\\\",\\\"networkServiceScanning\\\",\\\"endpointDenialOfService\\\",\\\"exfiltrationGeneral\\\",\\\"systemNetworkConfigurationDiscovery\\\",\\\"unsecuredCredentials\\\",\\\"credentialDumping\\\",\\\"systemInformationDiscovery\\\",\\\"systemNetworkConnectionDiscovery\\\",\\\"systemUserDiscovery\\\",\\\"accountDiscovery\\\",\\\"cloudInstanceMetadataAPI\\\",\\\"accessKubeletMainAPI\\\",\\\"queryKubeletReadonlyAPI\\\",\\\"accessKubernetesAPIServer\\\",\\\"softwareDeploymentTools\\\",\\\"ingressToolTransfer\\\",\\\"lateralToolTransfer\\\",\\\"commandAndControlGeneral\\\",\\\"resourceHijacking\\\",\\\"manInTheMiddle\\\",\\\"nativeBinaryExecution\\\",\\\"foreignBinaryExecution\\\",\\\"createAccount\\\",\\\"accountManipulation\\\",\\\"abuseElevationControlMechanisms\\\",\\\"supplyChainCompromise\\\",\\\"obfuscatedFiles\\\",\\\"hijackExecutionFlow\\\",\\\"impairDefences\\\",\\\"scheduledTaskJob\\\",\\\"exploitationOfRemoteServices\\\",\\\"eventTriggeredExecution\\\",\\\"accountAccessRemoval\\\",\\\"privilegedContainer\\\",\\\"writableVolumes\\\",\\\"execIntoContainer\\\",\\\"softwareDiscovery\\\",\\\"createContainer\\\",\\\"kubernetesSecrets\\\",\\\"fileAndDirectoryDiscovery\\\",\\\"masquerading\\\",\\\"webShell\\\",\\\"compileAfterDelivery\\\"]'\n ], \n attack_type = '[\\\"\\\",\\\"cloudMetadataProbing\\\",\\\"kubeletAPIAccess\\\",\\\"kubeletReadonlyAccess\\\",\\\"kubectlSpawned\\\",\\\"kubectlDownloaded\\\",\\\"horizontalPortScanning\\\",\\\"verticalPortScanning\\\",\\\"explicitlyDeniedIP\\\",\\\"customFeedIP\\\",\\\"feedIP\\\",\\\"unexpectedOutboundPort\\\",\\\"suspiciousNetworkActivity\\\",\\\"unexpectedListeningPort\\\",\\\"explicitlyDeniedListeningPort\\\",\\\"explicitlyDeniedOutboundPort\\\",\\\"listeningPortModifiedProcess\\\",\\\"outboundPortModifiedProcess\\\",\\\"feedDNS\\\",\\\"explicitlyDeniedDNS\\\",\\\"dnsQuery\\\",\\\"unexpectedProcess\\\",\\\"portScanProcess\\\",\\\"malwareProcessCustom\\\",\\\"malwareProcessFeed\\\",\\\"explicitlyDeniedProcess\\\",\\\"modifiedProcess\\\",\\\"cryptoMinerProcess\\\",\\\"lateralMovementProcess\\\",\\\"tmpfsProcess\\\",\\\"policyHijacked\\\",\\\"reverseShell\\\",\\\"suidBinaries\\\",\\\"unknownOriginBinary\\\",\\\"webShell\\\",\\\"administrativeAccount\\\",\\\"encryptedBinary\\\",\\\"sshAccess\\\",\\\"explicitlyDeniedFile\\\",\\\"malwareFileCustom\\\",\\\"malwareFileFeed\\\",\\\"execFileAccess\\\",\\\"elfFileAccess\\\",\\\"secretFileAccess\\\",\\\"regFileAccess\\\",\\\"wildfireMalware\\\",\\\"unknownOriginBinary\\\",\\\"webShell\\\",\\\"fileIntegrity\\\",\\\"alteredBinary\\\",\\\"malwareDownloaded\\\",\\\"suspiciousELFHeader\\\",\\\"executionFlowHijackAttempt\\\",\\\"customRule\\\"]', \n cluster = '', \n collections = [\n ''\n ], \n container = True, \n container_id = '', \n container_name = '', \n count = 56, \n country = '', \n effect = '[\\\"block\\\",\\\"prevent\\\",\\\"alert\\\",\\\"disable\\\"]', \n err = '', \n fqdn = '', \n function = '', \n function_id = '', \n hostname = '', \n image_id = '', \n image_name = '', \n interactive = True, \n label = '', \n labels = {\n 'key' : ''\n }, \n msg = '', \n namespace = '', \n os = '', \n pid = 56, \n process_path = '', \n profile_id = '', \n raw_event = '', \n region = '', \n request_id = '', \n rule_name = '', \n runtime = '[\\\"python\\\",\\\"python2.7\\\",\\\"python3.6\\\",\\\"python3.7\\\",\\\"python3.8\\\",\\\"nodejs10.x\\\",\\\"dotnetcore2.1\\\",\\\"java8\\\"]', \n severity = '[\\\"low\\\",\\\"medium\\\",\\\"high\\\"]', \n time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), \n type = '[\\\"processes\\\",\\\"network\\\",\\\"kubernetes\\\",\\\"filesystem\\\"]', \n user = '', \n version = ''\n )\n else :\n return SharedRuntimeAudit(\n )" ]
[ "0.54777217", "0.539541", "0.538349", "0.53488857", "0.53337604", "0.53252006", "0.5288814", "0.5252521", "0.5168391", "0.516711", "0.51452863", "0.51452863", "0.51452863", "0.51452863", "0.51452863", "0.5060443", "0.5005577", "0.5000991", "0.50000215", "0.49836135", "0.4980472", "0.4960993", "0.49419108", "0.49212033", "0.48951513", "0.48726276", "0.48694834", "0.48633763", "0.48119855", "0.48084188", "0.47916713", "0.4788368", "0.47771505", "0.47754344", "0.47724685", "0.4772215", "0.47592464", "0.47574008", "0.47497958", "0.47437185", "0.4740548", "0.4738949", "0.47380126", "0.47357664", "0.47161782", "0.4711486", "0.47077677", "0.47066775", "0.47009522", "0.47004735", "0.46974817", "0.46974817", "0.46974817", "0.46974817", "0.46940988", "0.46900523", "0.46819213", "0.4671498", "0.4665107", "0.46650743", "0.4661295", "0.4655539", "0.4646536", "0.46390557", "0.46373034", "0.46367607", "0.46345574", "0.4623872", "0.46229362", "0.4619619", "0.46179095", "0.46096197", "0.4606334", "0.45955524", "0.45920485", "0.45911363", "0.45896986", "0.4589574", "0.45812744", "0.45748365", "0.45744655", "0.45739645", "0.45736885", "0.4572547", "0.45694387", "0.45641816", "0.45639983", "0.45621273", "0.45605132", "0.45527205", "0.45483327", "0.45428738", "0.45428252", "0.45403862", "0.4537545", "0.45320973", "0.45320973", "0.45202738", "0.45190814", "0.4516418", "0.4510548" ]
0.0
-1
Sets the name of this ClairpbVulnerability.
def name(self, name): self._name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def set_name(self, name):\n self._name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, _name):\n self.name = _name", "def set_name(self, name: str):\n self._name = name", "def SetName(self, name):\n self.name = name", "def setname(self, name):\n self.__name = name", "def set_name(self, name):\n assert isinstance(name, str), 'Name must be string'\n self._name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def set_name(self,name):\r\n self._name = __name", "def setName(self, name):\n self.name = str(name)", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name): \n\n self._name = name", "def set_name(self, a_name):\n self.set_parameter('name', a_name)\n return self", "def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name", "def set_name(self, newname=\"\"):\n self.name = newname", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str) -> None:\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n \n self._name = name", "def name(self, name):\n \n self._name = name", "def name(self, name):\n \n self._name = name", "def name(self, name):\n \n self._name = name", "def set_name(self, name):\n self.options['name'] = name", "def set_name(self, _name):\n self.name = _name\n return self.name", "def set_name(self, name):\n\n\t\tif name is not None and not isinstance(name, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: name EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__name = name\n\t\tself.__key_modified['name'] = 1", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "def name(self, name):\n self.__name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def setName(self, name):\n self.name = name\n return self" ]
[ "0.7476369", "0.7476369", "0.74305564", "0.74305564", "0.74305564", "0.74305564", "0.74305564", "0.7423173", "0.73924625", "0.73535186", "0.73535186", "0.73509634", "0.7336383", "0.7328558", "0.7323258", "0.73170054", "0.71923685", "0.71672845", "0.71672845", "0.71484566", "0.71279055", "0.71053517", "0.71053517", "0.71053517", "0.71053517", "0.70980537", "0.7075148", "0.70527834", "0.70414805", "0.7032685", "0.7032685", "0.7032685", "0.7032685", "0.70260674", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.70113415", "0.6990382", "0.6990382", "0.6990382", "0.6990382", "0.69893354", "0.6979326", "0.6965529", "0.6956253", "0.69308233", "0.69295615", "0.69295615", "0.69295615", "0.69295615", "0.69295615", "0.69295615", "0.69295615", "0.6918882" ]
0.0
-1
Sets the namespace_name of this ClairpbVulnerability.
def namespace_name(self, namespace_name): self._namespace_name = namespace_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_namespace(self, namespace: str) -> None:\n self._namespace = namespace", "def namespace(self, namespace: str):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namespace", "def set_target_namespace(self, namespace):\n # do shit\n self.target_namespace = namespace.strip(\":\")", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n self.c_prefix, self.f_prefix = ns_for_name[self.name]", "def set_test_namespace_value(namespace_name=None):\r\n global namespace_value\r\n namespace_value = namespace_name", "def set_ns_prefix(\n self, ns_for_name: Dict[str, Tuple[str, str]], c_ns: str, f_ns: str\n ) -> None:\n self.c_prefix = c_ns\n self.f_prefix = f_ns", "def conventionalize_namespace(self, namespace: str) -> str:\n return _conventionalize(self.options, \"namespace\", namespace)", "def nvmf_namespace_num(self, nvmf_namespace_num):\n\n self._nvmf_namespace_num = nvmf_namespace_num", "def __setattr__(self, name, value):\n if not isinstance(name, str):\n raise ValueError('Namespace label must be a string')\n if name.startswith('_'):\n raise ValueError('Namespace cannot start with an underscore')\n\n if name in self._namespaces:\n raise ValueError('Namespaces cannot be redefined')\n\n self._namespaces[name] = Namespace(name, label=value)", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n self.c_prefix, self.f_prefix = ns_for_name[self.class_name]\n self.ret_type.set_ns_prefix(ns_for_name, self.c_prefix, self.f_prefix)\n for param in self.params:\n param.set_ns_prefix(ns_for_name, self.c_prefix, self.f_prefix)", "def setScope(self, fileBasename):\n self.fileBasename = fileBasename\n scopeNamespace = self.defaultNamespacePrefix + fileBasename + '/'\n \n # Annotations go to a different namespace\n annotationScopeNamespace = self.annotationsNamespacePrefix + fileBasename + '/'\n \n self.log.debug('Adding namespace for {0}: {1}'.format(fileBasename, scopeNamespace))\n \n self.namespaces['scope'] = Namespace(scopeNamespace)\n self.annotationNamespaces['scope'] = Namespace(annotationScopeNamespace)\n self.graph.namespace_manager.bind('', self.namespaces['scope'])\n self.annotationGraph.namespace_manager.bind('', self.annotationNamespaces['scope'])", "def setElementNamespace(self, *args):\n return _libsbml.ASTBasePlugin_setElementNamespace(self, *args)", "def setNs(self, ns):\n if ns is None: ns__o = None\n else: ns__o = ns._o\n libxml2mod.xmlSetNs(self._o, ns__o)", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n for member in self.members:\n member.set_ns_prefix(ns_for_name)", "def __init__(self, name: str, namespace: str):\n self.name = name\n self.namespace = namespace", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n for instance in self.instances:\n instance.set_ns_prefix(ns_for_name)", "def as_namespace_name(name, version):\n return name + ':' + version", "def setElementNamespace(self, *args):\n return _libsbml.SBasePlugin_setElementNamespace(self, *args)", "def set_namespace(self, namespace):\n if not isinstance(namespace, NamespaceModel):\n raise ConfigException(\"given an object that is not \"\n \"a kind of NamespaceModel: %s\" % str(namespace))\n self.namespace_model_instance = namespace", "def test_replace_net_namespace(self):\n pass", "def set_doc_namespace(self, doc, namespace):\n if not self.doc_namespace_set:\n self.doc_namespace_set = True\n if validations.validate_doc_namespace(namespace):\n doc.namespace = namespace\n return True\n else:\n raise SPDXValueError('Document::Namespace')\n else:\n raise CardinalityError('Document::Comment')", "def set_name(self, name):\r\n self.stream.set_node_name(self.node, name)", "def namespace_group_num(self, namespace_group_num):\n\n self._namespace_group_num = namespace_group_num", "def setname(self, name):\n self.__name = name", "def replace_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setNamespaces(self, *args):\n return _libsbml.SBase_setNamespaces(self, *args)", "def set_qname(self, qname):\n self._qname = qname", "def setOpenIDNamespace(self, openid_ns_uri, implicit):\n if isinstance(openid_ns_uri, bytes):\n openid_ns_uri = str(openid_ns_uri, encoding=\"utf-8\")\n if openid_ns_uri not in self.allowed_openid_namespaces:\n raise InvalidOpenIDNamespace(openid_ns_uri)\n\n self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)\n self._openid_ns_uri = openid_ns_uri", "def namespace(self) -> str:\n return pulumi.get(self, \"namespace\")", "def setVarName(self, theName):\n self._name = theName\n return self", "def replace_namespaced_namespace(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_namespaced_namespace_with_http_info(body, name, **kwargs)\n else:\n (data) = self.replace_namespaced_namespace_with_http_info(body, name, **kwargs)\n return data", "def set_policyname(self, policyname):\n self.options['policyname'] = policyname", "def setNamespaces(self, *args):\n return _libsbml.XMLToken_setNamespaces(self, *args)", "def set_policyname(self, policyname):\n self.options[\"policyname\"] = policyname", "def namespace(self) -> str:\n return self._namespace", "def _namespace_mangle(self, namespace):\n return namespace.replace(\".\", \"__\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def set_name(self, name):\n self._name = name", "def setNs(self, node):\n if node is None: node__o = None\n else: node__o = node._o\n libxml2mod.xmlSetNs(node__o, self._o)", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def setName(self, newName):\n self.__username = newName", "def __init__(__self__, *,\n namespace: Optional[pulumi.Input[str]] = None):\n if namespace is not None:\n pulumi.set(__self__, \"namespace\", namespace)", "def set_cname(self, cname):\n self.__cname = cname", "def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")", "def set_BucketName(self, value):\n super(PutBucketWebsiteRedirectInputSet, self)._set_input('BucketName', value)", "def scope_name(self, name):\n self._scope_name = name", "def setName(self, name): \n\n self._name = name", "def _fixNS(self, namespace):\n if isinstance(namespace, bytes):\n namespace = str(namespace, encoding=\"utf-8\")\n\n if namespace == OPENID_NS:\n if self._openid_ns_uri is None:\n raise UndefinedOpenIDNamespace('OpenID namespace not set')\n else:\n namespace = self._openid_ns_uri\n\n if namespace != BARE_NS and not isinstance(namespace, str):\n raise TypeError(\n \"Namespace must be BARE_NS, OPENID_NS or a string. got %r\" %\n (namespace, ))\n\n if namespace != BARE_NS and ':' not in namespace:\n fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'\n warnings.warn(fmt % (namespace, ), DeprecationWarning)\n\n if namespace == 'sreg':\n fmt = 'Using %r instead of \"sreg\" as namespace'\n warnings.warn(\n fmt % (SREG_URI, ),\n DeprecationWarning, )\n return SREG_URI\n\n return namespace", "def set_name(self,name):\r\n self._name = __name", "def set_name(self, _name):\n self.name = _name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def setName(self, *args):\n return _libsbml.Port_setName(self, *args)", "def setDefaultNS(self, ns):\n self.default_ns = ns", "def document_name(self, document_name):\n\n self._document_name = document_name", "def document_name(self, document_name):\n\n self._document_name = document_name", "def document_name(self, document_name):\n\n self._document_name = document_name", "def SetName(self, name):\n self.name = name", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")", "def setSBMLNamespaces(self, *args):\n return _libsbml.XMLInputStream_setSBMLNamespaces(self, *args)", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name" ]
[ "0.7218368", "0.6848428", "0.6652187", "0.6652187", "0.6394273", "0.61235774", "0.60623956", "0.59424466", "0.5783313", "0.57451713", "0.57072073", "0.56936276", "0.56725025", "0.5653841", "0.5579495", "0.55766636", "0.55388397", "0.5538237", "0.5520687", "0.55004734", "0.5398711", "0.5376789", "0.53735054", "0.5372944", "0.5346059", "0.5335326", "0.5334406", "0.5331527", "0.53207177", "0.5314207", "0.52942", "0.52840334", "0.5283935", "0.5269259", "0.52678025", "0.5267166", "0.5264099", "0.52607965", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.5259071", "0.52472216", "0.5209319", "0.5209319", "0.51920694", "0.51920694", "0.51919144", "0.51919144", "0.51840967", "0.5163697", "0.5160205", "0.5158777", "0.5157397", "0.51422113", "0.51410145", "0.5126784", "0.5126784", "0.5126784", "0.51259524", "0.51256585", "0.5125032", "0.5121315", "0.5116579", "0.510707", "0.5092831", "0.5092831", "0.5067823", "0.50634176", "0.50576866", "0.50576866", "0.50576866", "0.50572944", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.50519377", "0.5050325", "0.5047664", "0.5047664", "0.5047664", "0.5047664" ]
0.7917395
0
Sets the description of this ClairpbVulnerability.
def description(self, description): self._description = description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_description(self, description):\r\n self.__description = description", "def set_description(self, description):\n self.description = description", "def set_description(self, description):\n self.__description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def description(self, description):\n self._description = description", "def set_description(self, data):\n self._description = self._uni(data)", "def set_description(self, description):\n self._description = description", "def SetDescription(self, description):\n self.description = str(description)", "def description(self, description: str):\n\n self._description = description", "def description(self, description: str):\n\n self._description = description", "def description(self, description: str):\n\n self._description = description", "def description(self, description: str):\n\n self._description = description", "def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e", "def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc", "def description(self, value):\n self.definition.description = value", "def description(self, description):\n\n self._set_field(\"description\", description)", "def description(self, description):\n if description is not None and len(description) > 255:\n raise ValueError(\"Invalid value for `description`, length must be less than or equal to `255`\")\n\n self._description = description", "def set_desc(self, item_desc):\r\n self.description = item_desc", "def description(self, value):\n self._update_values('description', value)", "def challenge_description(self, challenge_description):\n\n self._challenge_description = challenge_description", "def set_desc(self, desc: str):\n self._desc = desc", "def description(self, description):\n \n if description is not None and len(description) > 128: \n raise ValueError(\"Invalid value for `description`, length must be less than `128`\")\n\n self._description = description", "def set_description(self, sDescription):\n\t\tcall_sdk_function('PrlVirtNet_SetDescription', self.handle, sDescription)", "def add_description(self, desc):\n self.description = desc", "def description(self, description: ConfigNodePropertyString):\n\n self._description = description", "def description(self, description):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\")\n\n self._description = description", "def description(self, newDescription=None):\n pass", "def description(self, description):\n if (self.local_vars_configuration.client_side_validation and\n description is not None and len(description) > 200):\n raise ValueError(\"Invalid value for `description`, length must be less than or equal to `200`\") # noqa: E501\n\n self._description = description", "def set_description(self, room_description):\n self.description = room_description", "def set_description(self, descr):\n self._current_test_descr = descr", "def description(self, description):\n if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description", "def description(self, description):\n if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description", "def set_description(self, sNewDescription):\n\t\tcall_sdk_function('PrlVmDev_SetDescription', self.handle, sNewDescription)", "def with_description(self, description):\r\n self.description = description\r\n return self", "def set_description(self, desc):\n super().set_description(desc, refresh=True)\n if self._pbar:\n self._pbar._set_description(self.desc)", "def description(self, new_description):\n self.set_description(new_description, self._xml)\n self._description = self._read_description(self._xml)", "def descricao(self, descricao: str):\n\n self._descricao = descricao", "def _set_desc(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'0 .. 64']}), default=unicode(\"\"), is_leaf=True, yang_name=\"desc\", rest_name=\"desc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Description of the user (default='')\", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"desc must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'0 .. 64']}), default=unicode(\"\"), is_leaf=True, yang_name=\"desc\", rest_name=\"desc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Description of the user (default='')\", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__desc = t\n if hasattr(self, '_set'):\n self._set()", "def description(self, new_description):\r\n self.set({\"description\": new_description})", "def set_description(self, description):\n self.description = description\n if not self.record:\n return\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'test_case':description}})", "def set_desc(self, annonce):\n d = annonce.find_element_by_class_name('prdtBILDesc.jsPrdtBILLink')\n self.desc = d.text.splitlines()", "def set_description(\n self, path: Union[bytes, str], description: Optional[Union[bytes, str]] = None\n ) -> None:\n path = _to_bytes_or_null(path)\n description = _to_bytes_or_null(description)\n ret = lib.Fapi_SetDescription(self._ctx, path, description)\n _chkrc(ret)", "def set_longdescription(self, longdesc):\n self.longdescription(longdesc)", "def set_description(self, sNewVmDescription):\n\t\tcall_sdk_function('PrlVmCfg_SetDescription', self.handle, sNewVmDescription)", "def _set_description(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=six.text_type,\n is_leaf=True,\n yang_name=\"description\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"string\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"description must be of a type compatible with string\"\"\",\n \"defined-type\": \"string\",\n \"generated-type\": \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)\"\"\",\n }\n )\n\n self.__description = t\n if hasattr(self, \"_set\"):\n self._set()", "def _set_description(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=six.text_type,\n is_leaf=True,\n yang_name=\"description\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"string\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"description must be of a type compatible with string\"\"\",\n \"defined-type\": \"string\",\n \"generated-type\": \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)\"\"\",\n }\n )\n\n self.__description = t\n if hasattr(self, \"_set\"):\n self._set()", "def setDescription(self, value):\n return self.getDbRecord().setColumnValue(DESCRIPTION_COLUMN, value)", "def long_description(self, long_description):\n self._long_description = long_description", "async def set_profile_description(self, ctx, *, description: str):\n max_words = self.plugin.data.profile.max_description_length\n if len(description) > max_words:\n res = f\"{ctx.emotes.web_emotion.xx} Sorry but profile description cannot exceed {max_words} word limit.\"\n return await ctx.send_line(res)\n profile = await self.cache.get_profile(ctx.author.id)\n await profile.set_description(description)\n embed = self.bot.theme.embeds.primary(title=\"โœ… Your Profile Description has been updated to:\")\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.description = profile.description\n await ctx.send(\"\", embed=embed)", "def description(self, value):\n if len(value):\n self._description = value\n self._description = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._description.append('')", "def change_description(self, description):\n if type(description) == str:\n self.description = description\n elif description is None:\n self.description = None\n else:\n raise TypeError('str expect, not {}'.format(type(description)))", "def set_description(desc):\n global last_description\n last_description = desc", "def description(self, description):\n if description is not None and len(description) > 30:\n raise ValueError(\"Invalid value for `description`, length must be less than or equal to `30`\") # noqa: E501\n\n self._description = description", "def update_description(self, host, baseUrl, description):\n self._host = host\n self._urlBase = baseUrl\n self._description = description\n return", "def description(self, description: str):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description", "def description(self, description: str):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description", "def description(self, description):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description", "def description(self, description):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description", "def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''" ]
[ "0.72097325", "0.7180765", "0.7136288", "0.7065354", "0.7065354", "0.7065354", "0.7065354", "0.7059732", "0.70556676", "0.7041413", "0.6891786", "0.6891786", "0.6891786", "0.6891786", "0.68883944", "0.68043464", "0.670095", "0.66983426", "0.6676779", "0.6659177", "0.6595515", "0.65730536", "0.6567598", "0.6560873", "0.65507966", "0.6550098", "0.65411705", "0.65307146", "0.6482089", "0.6473754", "0.6461542", "0.64558893", "0.64346236", "0.64346236", "0.64244264", "0.6423395", "0.6272999", "0.623771", "0.6230686", "0.6228276", "0.6209324", "0.61496055", "0.6146941", "0.6142617", "0.61374986", "0.6137048", "0.6096297", "0.6096297", "0.60710746", "0.6043911", "0.59943855", "0.59893525", "0.5987464", "0.5953085", "0.5944254", "0.59375685", "0.59263843", "0.59263843", "0.59171456", "0.59171456", "0.5914581" ]
0.69659215
38
Sets the link of this ClairpbVulnerability.
def link(self, link): self._link = link
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def link(self, link):\n\n self._set_field(\"link\", link)", "def link(self, link):\n\n self.container['link'] = link", "def href(self, href):\n\n self._href = href", "def href(self, href):\n\n self._href = href", "def href(self, href):\n\n self._href = href", "def href(self, href):\n\n self._href = href", "def review_link(self, review_link):\n\n self._review_link = review_link", "def link_id(self, link_id):\n\n self._link_id = link_id", "def setLink(self, link):\n if type(link)==Member or link==None:\n self.link = link\n return True\n else:\n return False", "def link_data(self, link_data):\n\n self._link_data = link_data", "def set_card_link(self):\n self.response.card.type = 'LinkAccount'", "def add_link(self, link):\n raise NotImplementedError", "def __init__(self, link):\n self.__link = link", "def update_links(self, new_link):\r\n self.__links = new_link", "def set_url(self, url):\n super(Cabling, self).set_url(url)", "def set_linked_data(\n self,\n val=None\n ):\n if val != None:\n self.linked_data = val", "def set_url(self, url):\n self.url = url", "def price_link(self, price_link):\n\n self._price_link = price_link", "def set_url(self, url):\n self.url = url", "def _set_url(self): \n self.url = self.geturl()", "def repo_link_set(self, repo_id, link_type, target):\n self.send(repo_id, 'repo_link_set', link_type, target)", "def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value", "def _set_link(self, value, handler):\n self._mapping[value] = handler", "def set_url(self, url):\n self.data['url'] = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def url(self, url):\n\n self._url = url", "def link(self, link):\n if link is None:\n raise ValueError(\"Invalid value for `link`, must not be `None`\") # noqa: E501\n\n self._link = link", "def add_link_setting(self, key, link, default):\n\n setting = self.settings().new_link(key, link, default)\n self._add_to_list_field(\"settings\", setting)", "def set_url(self, url):\n if url is not None:\n self.url = url", "def item_href(self, item_href):\n\n self._item_href = item_href", "def links(self, links):\n self._links = links", "def affiliate_link_oid(self, affiliate_link_oid):\n\n self._affiliate_link_oid = affiliate_link_oid", "def set_lien(self, annonce):\n h = annonce.find_element_by_class_name('prdtBILDetails')\n l= h.find_element_by_tag_name('a')\n self.lien = l.get_attribute(\"href\")", "def setUrl( self, url ):\n self._urlEdit.setText(str(url))", "def url(self, url: str):\n self._url = url", "def link(self, link):\r\n return links.Link(self, link)", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def relink(self, link_id):", "def set_dbgap_link(self):\n return self.VARIABLE_URL.format(\n self.source_dataset.source_study_version.full_accession, self.i_dbgap_variable_accession)", "def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s", "def url(self, url: str):\n\n self._url = url", "def url(self, url: str):\n\n self._url = url", "def url(self, url: str):\n\n self._url = url", "def linkedin(self, linkedin):\n\n self._linkedin = linkedin", "def setRemoteUrl(self, value, **kwargs):\n if value:\n value = urlparse.urlunparse(urlparse.urlparse(value))\n self.getField('remoteUrl').set(self, value, **kwargs)", "def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)", "def setAnchor(self,a):\n self.anchor = a", "def _set_link(\n meta: Dict,\n link: Optional[Union[type(None), str, bool, KEChainPages]] = None,\n link_value: Optional[CardWidgetLinkValue] = None,\n link_target: Optional[Union[str, LinkTargets]] = LinkTargets.SAME_TAB,\n **kwargs,\n) -> Dict:\n meta[\"linkTarget\"] = check_enum(link_target, LinkTargets, \"link_target\")\n\n from pykechain.models import Activity\n\n if isinstance(link, Activity):\n if link.activity_type == ActivityType.TASK:\n default_link_value = CardWidgetLinkValue.TASK_LINK\n else:\n default_link_value = CardWidgetLinkValue.TREE_VIEW\n\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link.id,\n MetaWidget.SHOW_LINK_VALUE: default_link_value,\n }\n )\n elif isinstance(link, str) and is_uuid(link):\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.TASK_LINK,\n }\n )\n elif link is None or link is False:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: None,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.NO_LINK,\n }\n )\n elif link in KEChainPages.values():\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: \"\",\n MetaWidget.SHOW_LINK_VALUE: CardWidgetKEChainPageLink[link],\n }\n )\n else:\n meta.update(\n {\n MetaWidget.CUSTOM_LINK: link,\n MetaWidget.SHOW_LINK_VALUE: CardWidgetLinkValue.EXTERNAL_LINK,\n }\n )\n\n if link_value is not None:\n meta.update(\n {\n MetaWidget.SHOW_LINK_VALUE: check_enum(\n link_value, CardWidgetLinkValue, \"link_value\"\n ),\n }\n )\n\n return meta", "def bank_link_id(self, bank_link_id):\n\n self._bank_link_id = bank_link_id", "def __init__(self, dataset_page_link):\n self.dataset_page_link = dataset_page_link", "def linked_node(self, value):\n self._linked_node = value", "def edit_link(parser, token):\n return EditLinkTag(parser, token)", "def save(self, *args, **kwargs):\n domain = urlsplit(self.url).netloc\n\n try:\n self.icon = LinkBrand.objects.get(domain=domain)\n except ObjectDoesNotExist:\n pass\n\n super(UserLink, self).save(*args, **kwargs)", "def links(self, links):\n\n self.container['links'] = links", "def set_access_point(self, value: str) -> None:\n\n self.__requester.set_base_url(value)", "def link_information_source(self, link_information_source):\n\n self._link_information_source = link_information_source", "def wiki_page_link(self, wiki_page_link):\n if wiki_page_link is None:\n raise ValueError(\"Invalid value for `wiki_page_link`, must not be `None`\")\n\n self._wiki_page_link = wiki_page_link", "def get_link(self, conf, link_id):\n\t\tpass", "def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)", "def addLinkToResource(link):\n\n\tif link not in variables.resources:\n\t\tvariables.resources.append(link)", "def __init__(self,\n href: str) -> None:\n self.href = href", "def add_link(self, **kwgs):\n self.links.append(kwgs)", "def setMergeURL(self,value):\n self.PDFreactorConfiguration.in1[\"mergeURL\"] = value", "def user_url(self, user_url):\n\n self._user_url = user_url", "def html_url(self, html_url):\n\n self._html_url = html_url", "def pdf_url(self, pdf_url):\n\n self._pdf_url = pdf_url", "def pdf_url(self, pdf_url):\n\n self._pdf_url = pdf_url", "def is_link_verified(self, is_link_verified):\n\n self._is_link_verified = is_link_verified", "def __init__(self,\n *,\n href: str = None) -> None:\n self.href = href", "def __init__(self,\n *,\n href: str = None) -> None:\n self.href = href", "def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)", "def put(self):\n if jwthandler.authorize_action(self, 1) == False:\n return None\n\n userdata = jwthandler.decode_userdata(self.request.headers[\"Authorization\"])\n\n body_categories = {\"link_id\": 1, \"view_id\": 0, \"node_id_1\": 0, \"node_id_2\": 0, \"relationship_id\":0}\n link_dict = errorutil.check_fields(self.request.body.decode(), body_categories, self)\n\n print(link_dict)\n\n if link_dict == False:\n return None\n\n link_id = link_dict[\"link_id\"]\n del link_dict[\"link_id\"]\n\n if linkutil.change_link(link_id, link_dict, self) == False:\n return None\n\n formatted_message = loggerhandler.form_message_dictionary(userdata, \n \"links\", \n link_id,\n link_dict)\n\n\n loggerhandler.log_message(\"change\", formatted_message)\n\n self.write({\"message\":\"Success\"})", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def create_link(self, key, link, default):\n\n setting = self.new_link(key, link, default)\n setting.create()\n return setting", "def onCaptureWebLinkClicked(self, linkId=None):\n self.LinkWebMacro.emit()", "def clan(self, clan):\n\n self._clan = clan", "def link(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.LINK_TO_TEMPLATE,\n EntityType.ROLE.value, None)", "def repo_url(self, repo_url):\n\n self._repo_url = repo_url" ]
[ "0.7018405", "0.6961583", "0.6805548", "0.6805548", "0.6805548", "0.6805548", "0.6280648", "0.62231255", "0.6152556", "0.61381596", "0.6075746", "0.597047", "0.59602845", "0.5926421", "0.59064543", "0.58775926", "0.5876989", "0.5859482", "0.58415675", "0.5837184", "0.58210844", "0.5791032", "0.57669955", "0.5765423", "0.5745972", "0.5745972", "0.5745972", "0.5745972", "0.5745972", "0.5745972", "0.5745972", "0.5734386", "0.57072073", "0.5669863", "0.56256115", "0.56140995", "0.5534794", "0.55240095", "0.5523112", "0.5502806", "0.5501265", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5483828", "0.5457298", "0.54441434", "0.5436123", "0.5434301", "0.5434301", "0.5434301", "0.5417114", "0.539394", "0.5389994", "0.5364369", "0.53189534", "0.5311945", "0.53119075", "0.5304947", "0.5303202", "0.52991295", "0.5267128", "0.5252732", "0.5246246", "0.524533", "0.5226678", "0.52162766", "0.5188389", "0.5188093", "0.5182057", "0.5158927", "0.51430875", "0.5132071", "0.51160926", "0.51160926", "0.51077914", "0.50881416", "0.50881416", "0.5070199", "0.506583", "0.5061993", "0.5061993", "0.5059364", "0.5054869", "0.5049193", "0.5046729", "0.5045436" ]
0.71737444
6
Sets the severity of this ClairpbVulnerability.
def severity(self, severity): self._severity = severity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def severity(self, severity):\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n if severity is not None and len(severity) > 200:\n raise ValueError(\"Invalid value for `severity`, length must be less than or equal to `200`\") # noqa: E501\n if severity is not None and len(severity) < 1:\n raise ValueError(\"Invalid value for `severity`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n if severity is not None and len(severity) < 1:\n raise ValueError(\"Invalid value for `severity`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._severity = severity", "def severity_name(self, severity_name):\n\n self._severity_name = severity_name", "def severity(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")", "def setSeverityOverride(self, *args):\n return _libsbml.XMLErrorLog_setSeverityOverride(self, *args)", "def severity(self):\n return self._severity", "def severity(self):\n return self._severity", "def changeErrorSeverity(self, *args):\n return _libsbml.XMLErrorLog_changeErrorSeverity(self, *args)", "def severity(self) -> Optional[pulumi.Input['TestIssueSeverity']]:\n return pulumi.get(self, \"severity\")", "def set_verbosity(self,verbosity):\n type_name = type(verbosity).__name__\n if re.search('int',type_name) != None:\n \n # It is an integer, tes bounds\n if verbosity < 4 and verbosity > -1:\n self.verbosity = verbosity\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be either 0, 1, 2 or 3.\")\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be an integer.\")", "def severity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> pulumi.Input['EndpointSeverity']:\n return pulumi.get(self, \"severity\")", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def normalise_severity(self, severity):\n return \"Info\" if severity == \"Unknown\" else severity", "def set_verbosity(self, verbosity):\n if verbosity == 0:\n self.__logger.setLevel(logging.CRITICAL)\n if verbosity == 1:\n self.__logger.setLevel(logging.ERROR)\n if verbosity == 2:\n self.__logger.setLevel(logging.WARNING)\n if verbosity == 3:\n self.__logger.setLevel(logging.INFO)\n if verbosity >= 4:\n self.__logger.setLevel(logging.DEBUG)", "def severity_justification(self, severity_justification):\n\n self._severity_justification = severity_justification", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)", "def numerical_severity(self, numerical_severity):\n if numerical_severity is None:\n raise ValueError(\"Invalid value for `numerical_severity`, must not be `None`\") # noqa: E501\n if numerical_severity is not None and len(numerical_severity) > 4:\n raise ValueError(\"Invalid value for `numerical_severity`, length must be less than or equal to `4`\") # noqa: E501\n if numerical_severity is not None and len(numerical_severity) < 1:\n raise ValueError(\"Invalid value for `numerical_severity`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._numerical_severity = numerical_severity", "async def test_minimum_severity(self):\n self.set_source_parameter(\"severities\", [\"medium\", \"high\"])\n response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)\n self.assert_measurement(response, value=\"0\", entities=[])", "def getSeverity(self, *args):\n return _libsbml.SBMLExtension_getSeverity(self, *args)", "def set_level(self, level: LogLevel):\n pass", "def level_cap(self, level_cap):\n\n self._level_cap = level_cap", "def set_risk(self, event):\n if not self.caller.check_permstring(\"builders\"):\n raise self.CalCmdError(\"Only GMs can set the risk of an event.\")\n try:\n risk = int(self.lhs)\n if risk > 10 or risk < 0:\n raise ValueError\n except (TypeError, ValueError):\n raise self.CalCmdError(\"Risk must be between 0 and 10.\")\n self.set_form_or_event_attribute(\"risk\", risk, event)\n self.msg(\"Risk is now set to: %s\" % risk)", "def severity_for_platform_setting_syslog_config(self, severity_for_platform_setting_syslog_config):\n allowed_values = [\"ALERT\", \"CRIT\", \"DEBUG\", \"EMERG\", \"ERR\", \"INFO\", \"NOTICE\", \"WARNING\"] # noqa: E501\n if severity_for_platform_setting_syslog_config not in allowed_values:\n raise ValueError(\n \"Invalid value for `severity_for_platform_setting_syslog_config` ({0}), must be one of {1}\" # noqa: E501\n .format(severity_for_platform_setting_syslog_config, allowed_values)\n )\n\n self._severity_for_platform_setting_syslog_config = severity_for_platform_setting_syslog_config", "def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"SEVERE\", \"FINE\", \"FINER\", \"FINEST\"]\n if level not in allowed_values:\n raise ValueError(\n \"Invalid value for `level` ({0}), must be one of {1}\"\n .format(level, allowed_values)\n )\n\n self._level = level", "def set_verbosity(verbose_level=3):\n if not type(verbose_level) == int:\n raise TypeError(\"verbose_level must be an int\")\n\n if verbose_level < 0 or verbose_level > 4:\n raise ValueError(\"verbose_level must be between 0 and 4\")\n\n verbosity = [\n logging.CRITICAL,\n logging.ERROR,\n logging.WARNING,\n logging.INFO,\n logging.DEBUG]\n\n logging.basicConfig(\n format='%(asctime)s:\\t %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=verbosity[verbose_level])", "def get_severity(chid):\n return get_timevars(chid).get('severity', 0)", "def _impact_to_severity(impact):\n if impact >= 0.9:\n return Finding.Severity.CRITICAL\n elif impact >= 0.7:\n return Finding.Severity.HIGH\n elif impact >= 0.4:\n return Finding.Severity.MEDIUM\n elif impact >= 0.01:\n return Finding.Severity.LOW\n else:\n return Finding.Severity.SEVERITY_UNSPECIFIED", "def set_verbosity(self, value):\n for source in self._sources.itervalues():\n source.verbosity = value", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def setLogLevel(self,value):\n self.PDFreactorConfiguration.in1[\"logLevel\"] = value", "def _set_isis_level(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"isis-level\", rest_name=\"isis-level\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"isis_level must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"isis-level\", rest_name=\"isis-level\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__isis_level = t\n if hasattr(self, '_set'):\n self._set()", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def file_and_malware_syslog_severity(self, file_and_malware_syslog_severity):\n allowed_values = [\"ALERT\", \"CRIT\", \"DEBUG\", \"EMERG\", \"ERR\", \"INFO\", \"NOTICE\", \"WARNING\"] # noqa: E501\n if file_and_malware_syslog_severity not in allowed_values:\n raise ValueError(\n \"Invalid value for `file_and_malware_syslog_severity` ({0}), must be one of {1}\" # noqa: E501\n .format(file_and_malware_syslog_severity, allowed_values)\n )\n\n self._file_and_malware_syslog_severity = file_and_malware_syslog_severity", "def setThreshold(self, value):\n return self._set(threshold=value)", "def set_log_level(params, logger):\n level = params.get(\"Level\")\n\n levels = {\n \"CRITICAL\": logging.CRITICAL,\n \"ERROR\" : logging.ERROR,\n \"WARNING\" : logging.WARNING,\n \"INFO\" : logging.INFO,\n \"DEBUG\" : logging.DEBUG,\n \"NOTSET\" : logging.NOTSET\n }\n\n if level:\n logger.setLevel(levels.get(level, logging.NOTSET))", "def setAmbiguityThreshold(self, value):\n return self._set(ambiguityThreshold=value)", "def level(self, level=ERROR):\n try:\n self._level = level_dict[level]\n except KeyError:\n raise ValueError(f\"Input level is invalid.\")\n self.cnsl_handler.setLevel(level=self._level)\n self.file_handler.setLevel(level=self._level)\n self.logger.setLevel(level=self._level)", "def set_log_level_package(\n self,\n value: t.Union[str, int] = LOG_LEVEL_PACKAGE,\n ) -> None:\n logs.set_log_level(obj=self.LOG_LOGGER, level=value)", "def set_logging_level(self, level):\n if str(level) == '1':\n self.logging_level = logging.DEBUG\n elif str(level) == '2':\n self.logging_level = logging.INFO\n elif str(level) == '3':\n self.logging_level = logging.WARNING\n elif str(level) == '4':\n self.logging_level = logging.ERROR\n elif str(level) == '5':\n self.logging_level = logging.CRITICAL", "def compatibility_level(self, compatibility_level):\n\n self._compatibility_level = compatibility_level", "def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"ERROR\"]\n if level.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for level -> \" + level)\n self._level = \"outdated_sdk_version\"\n else:\n self._level = level", "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def test_severity_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = SeverityAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['severity'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'severity',\n ','.join(self.new['severity']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['severity'])", "def testStandarizeNumericVulnSeverity(self):\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=0)\n\n self.assertEquals(vuln.severity, 'info',\n 'Vulnerability severity not transformed correctly')\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=1)\n\n self.assertEquals(vuln.severity, 'low',\n 'Vulnerability severity not transformed correctly')\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=2)\n\n self.assertEquals(vuln.severity, 'med',\n 'Vulnerability severity not transformed correctly')\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=3)\n\n self.assertEquals(vuln.severity, 'high',\n 'Vulnerability severity not transformed correctly')\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=4)\n\n self.assertEquals(vuln.severity, 'critical', \n 'Vulnerability severity not transformed correctly')\n\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=5)\n\n self.assertEquals(vuln.severity, 'unclassified', \n 'Vulnerability severity not transformed correctly')\n\n vuln = ModelObjectVuln(name='VulnTest', desc='TestDescription',\n severity=-1)\n\n self.assertEquals(vuln.severity, 'unclassified', \n 'Vulnerability severity not transformed correctly')", "def __change_level(self, level):\n self.level = level", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def set_verbosity():\n\n\tif conf.verbose is None:\n\t\tconf.verbose = 1\n\n\tconf.verbose = int(conf.verbose)\n\n\tif conf.verbose == 0:\n\t\tlogger.setLevel(logging.ERROR)\n\telif conf.verbose == 1:\n\t\tlogger.setLevel(logging.INFO)\n\telif conf.verbose == 2:\n\t\tlogger.setLevel(logging.DEBUG)\n\telif conf.verbose == 3:\n\t\tlogger.setLevel(CUSTOM_LOGGING.PAYLOAD)\n\telif conf.verbose == 4:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)\n\telif conf.verbose >= 5:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)", "def error_severity(self) -> Union[MqexsErrorSeverity, str]:\n return self.__error_severity", "def _set_lsp_config_isis_level(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-isis-level\", rest_name=\"lsp-config-isis-level\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_isis_level must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-isis-level\", rest_name=\"lsp-config-isis-level\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__lsp_config_isis_level = t\n if hasattr(self, '_set'):\n self._set()", "def setLevel(self, level):\n self.level = level", "def __set_scalertype(self, scalertype):\n\n if not scalertype.lower() in self.types:\n raise ValueError(\"Scalertype '{}' not available. Use 'Standard' or 'MinMax'.\".format(scalertype))\n else:\n self.scalertype = scalertype.lower()", "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def civilstatus(self, civilstatus):\n\n self._civilstatus = civilstatus", "def change_level(level):\n if 'debug' in level: LOG.setLevel(logging.DEBUG)\n elif 'info' in level: LOG.setLevel(logging.INFO)\n elif 'warning' in level: LOG.setLevel(logging.WARNING)\n elif 'error' in level: LOG.setLevel(logging.ERROR)\n elif 'critical' in level: LOG.setLevel(logging.CRITICAL)\n Logger.log('info', 'This logger changed the messages priority level to ', level)", "def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue(self, _arg)", "def level(self, value):\n self._level = mdraid.RAID_levels.raidLevel(value) # pylint: disable=attribute-defined-outside-init", "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def setLevel(self, level):\n self.lvl = level", "async def setIncident_priority(\n self,\n eventID: str,\n incidentNumber: int,\n priority: IncidentPriority,\n author: str,\n ) -> None:", "def machado(self, color: 'Color', severity: float) -> None:\n\n machado(color, severity, self.MACHADO)", "def level(self, log_level):\n self.logger.setLevel(log_level)", "def setIntensity397( self, c, voltage):\n dev = devChannel = '397intensity'\n self.validateDevChannel( dev, devChannel )\n self.validateInput( dev, voltage )\n channel = self.dcDict[dev]['devChannels'][devChannel]['channel']\n self.tryToSend( channel, voltage )", "def unsetSeverityOverride(self):\n return _libsbml.XMLErrorLog_unsetSeverityOverride(self)", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def set_contrast(level):\n send_command(0x81)\n send_command(level)", "def set_threshold_levels(self, event_name, val):\n if self.validate_supply_name(event_name, \"events/\") and val:\n self.console.runcmd(f\"echo {val} > events/{event_name}\")\n else:\n assert (\n False\n ), \"A valid event name or the value, is not given while setting levels\"", "async def test_one_low_severity_warning(self):\n response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)\n self.assert_measurement(response, value=\"1\", entities=[self.expected_entity])", "def level(self, level: int):\n if level is None:\n raise ValueError(\"Invalid value for `level`, must not be `None`\")\n\n self._level = level", "def getSeverityOverride(self):\n return _libsbml.XMLErrorLog_getSeverityOverride(self)", "def priority(self, priority):\n self._priority = priority", "def setLevel(level='info'):\n\n mapper = {\n 'critical' : logging.CRITICAL, \n 'error' : logging.ERROR,\n 'warning' : logging.WARNING,\n 'info' : logging.INFO,\n 'debug' : logging.DEBUG,\n }\n if level not in mapper:\n raise ValueError('level must be one of these: {}'.format(list(mapper.keys())))\n else:\n logger.setLevel(mapper[level])", "async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")", "def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)", "def logging(self, value: LogLevel) -> None:\n self._data[ATTR_LOGGING] = value\n self.modify_log_level()", "def set_log_level_console(\n self,\n value: t.Union[str, int] = LOG_LEVEL_CONSOLE,\n ) -> None:\n if isinstance(self.ARGS_HANDLER_CON, dict):\n self.ARGS_HANDLER_CON[\"level\"] = logs.str_level(value)\n if self.HANDLER_CON:\n logs.set_log_level(obj=self.HANDLER_CON, level=value)", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def set_level(self, level: str):\n self._logger.setLevel(getattr(logging, level))", "def set_health(self, new_health):\n if new_health < 0:\n \"\"\"Health points can't be below zero.\"\"\"\n self.health = 0\n elif new_health > 100:\n \"\"\"Health points can't be above 100.\"\"\"\n self.health = 100\n else:\n self.health = new_health", "def set_priority(self, priority):\n self._priority = priority", "def impact(self, impact):\n if impact is None:\n raise ValueError(\"Invalid value for `impact`, must not be `None`\") # noqa: E501\n if impact is not None and len(impact) < 1:\n raise ValueError(\"Invalid value for `impact`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._impact = impact", "def set_level(self, level):\n\n self.sh.setLevel(level)\n\n if self.fh:\n self.fh.setLevel(level)" ]
[ "0.77666795", "0.738541", "0.73160356", "0.7266142", "0.62618464", "0.6216128", "0.59445876", "0.59445876", "0.59445876", "0.5885588", "0.5885588", "0.5885588", "0.58778495", "0.58508825", "0.58508825", "0.562879", "0.5625883", "0.55735195", "0.55235606", "0.55169725", "0.5506324", "0.542118", "0.542118", "0.542118", "0.542118", "0.542118", "0.53888", "0.5325664", "0.5312567", "0.52744985", "0.5222821", "0.5106243", "0.5052009", "0.50204176", "0.5018328", "0.501144", "0.49999622", "0.4970983", "0.4967644", "0.49475253", "0.49408743", "0.49355212", "0.4931807", "0.4917311", "0.4901244", "0.4868685", "0.48395875", "0.48241192", "0.47626376", "0.47614822", "0.4760201", "0.47457933", "0.4707124", "0.47030142", "0.46853724", "0.4684164", "0.4684164", "0.46699998", "0.4663117", "0.46441746", "0.46378198", "0.46378198", "0.46378198", "0.46239", "0.46091864", "0.4593276", "0.45721647", "0.4564752", "0.4550172", "0.45424214", "0.4531294", "0.45311156", "0.45107782", "0.4506991", "0.44999146", "0.44879687", "0.44637004", "0.4462535", "0.44599968", "0.44587228", "0.44564864", "0.4453855", "0.4446411", "0.44421867", "0.44357905", "0.442657", "0.4421326", "0.44156492", "0.44121814", "0.44094184", "0.4409275", "0.44065106", "0.44046572", "0.44046572", "0.44046572", "0.4399687", "0.43970993", "0.43968925", "0.43961486", "0.43953055" ]
0.776778
0
Sets the metadata of this ClairpbVulnerability.
def metadata(self, metadata): self._metadata = metadata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_metadata(self, data):\r\n pass", "def set_metadata(self, attribute, value):\n self.metadata[attribute] = value", "def set_metadata(self, metadata):\n self.metadata = metadata\n return self", "def set_metadata(self, loadbalancer, metadata):\n return loadbalancer.set_metadata(metadata)", "def set_metadata(self, metadata):\n return self.manager.set_metadata(self, metadata)", "def set_metadata(self, metadata):\n return self.manager.set_metadata(self, metadata)", "def set_metadata(self, key, val):\n \n self.metadata[key] = val", "def metadata(self, metadata: Mapping[str, str]):\r\n self._metadata = metadata", "def metadata(self, metadata: Mapping[str, str]):\r\n self._metadata = metadata", "def dataset_meta(self, dataset_meta: dict) -> None:\n self._dataset_meta = dataset_meta", "def set_metadata(self, metadata):\n return self.parent.set_metadata_for_node(self, metadata)", "def SetMetadata(self, driverName, machineIdent, thumbType, thumbWidth, thumbHeight, thumbData): # real signature unknown; restored from __doc__\n pass", "def set_metadata(self, metadata):\n if self.num_features != metadata.num_features:\n raise ValueError(\"Invalid metadata for feature list\")\n self.metadata = metadata", "def meta_data(self, meta_data):\n\n self._meta_data = meta_data", "def set_metadata(self, metadata):\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name),\n body=metadata)", "def _volume_metadata_set(self, volume_path, data):\n data['compat_version'] = 1\n data['version'] = self.version\n return self._metadata_set(self._volume_metadata_path(volume_path), data)", "def set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)", "def meta(self, meta):\n if not isinstance(meta, GiftiMetaData):\n raise TypeError(\"Not a valid GiftiMetaData instance\")\n self._meta = meta", "def add_metadata(self, key, value):\n self._h5.attrs[key] = value", "def set_server_metadata(self, name, **metadata):\n # VERBOSE(metadata)\n # leght of metadata is resricted, so e limit\n data = {}\n if metadata is not None and isinstance(metadata, dict) and 'cm' in metadata:\n if isinstance(metadata['cm'], str):\n import json\n data.update(json.loads(metadata['cm'].replace('\\'', '\\\"')))\n else:\n data.update(metadata['cm'])\n\n _data = {}\n for key in data.keys():\n _data[f\"cm_{key}\"] = data[key]\n # VERBOSE(_data)\n\n server = self.cloudman.get_server(name)\n\n self.cloudman.set_server_metadata(server, _data)", "def metadata(self, new_metadata: dict):\n if not isinstance(new_metadata, dict):\n raise ValueError(\"figure metadata must be a dictionary\")\n self._metadata = new_metadata", "def set_interface_metadata(cls, md):\n metadata.set_metadata(cls, METADATA_KEY, md)", "def set_server_metadata(self, name, **metadata):\n raise NotImplementedError", "def set_metadata(self, val, entry=None):\n \n if entry is None:\n self.metadata = val\n else:\n self.metadata[entry] = val", "def _auth_metadata_set(self, auth_id, data):\n data['compat_version'] = 1\n data['version'] = self.version\n return self._metadata_set(self._auth_metadata_path(auth_id), data)", "def meta(self, meta):\n\n self._meta = meta", "def meta(self, meta):\n\n self._meta = meta", "def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value", "def define_metadata(cls, pcm):\n raise NotImplementedError()", "def set_dataset_metadata(metadata):\n set_to_db(key='metadata', str_value=json.dumps(metadata))", "def setMetadata(self, metadata):\n document_properties = self.document_loaded.getDocumentProperties()\n user_defined_properties = document_properties.getUserDefinedProperties()\n new_properties = []\n for prop, value in metadata.items():\n for container in [document_properties, user_defined_properties]:\n current_value = getattr(container, prop, None)\n if current_value is not None:\n if isinstance(current_value, tuple):\n if isinstance(value, list):\n value = tuple(value)\n elif isinstance(value, basestring):\n # BBB: old ERP5 code sends Keywords as a string\n # separated by a whitespace.\n value = tuple(value.split(' '))\n if isinstance(value, type(current_value)):\n setattr(container, prop, value)\n break\n else:\n new_properties.append([prop, value])\n for prop, value in new_properties:\n if isinstance(value, basestring):\n user_defined_properties.addProperty(prop, 0, '')\n user_defined_properties.setPropertyValue(prop, value)\n self.document_loaded.store()\n self.document_loaded.dispose()", "def add_metadata(self, metadata: dict) -> None:", "def set_metadata_for_node(self, loadbalancer, node, metadata):\n return loadbalancer.set_metadata_for_node(node, metadata)", "def add_metadata (self, name, value):\n self.metadata[name] = value\n return self", "def set_resource_data(self, resource, meta):", "def metadata(self, metadata):\n if metadata is None:\n raise ValueError(\"Invalid value for `metadata`, must not be `None`\") # noqa: E501\n\n self._metadata = metadata", "def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()", "def set_metadata(self, snapshot_id, metadata, **kwargs):\n body = {'metadata': metadata}\n return self._post(\"/snapshots/%s/metadata\" % snapshot_id,\n body, **kwargs)", "def set_metadata(self, loadbalancer, metadata, node=None):\n # Delete any existing metadata\n self.delete_metadata(loadbalancer, node=node)\n # Convert the metadata dict into the list format\n metadata_list = [{\"key\": key, \"value\": val}\n for key, val in metadata.items()]\n if node:\n uri = \"/loadbalancers/%s/nodes/%s/metadata\" % (\n utils.get_id(loadbalancer), utils.get_id(node))\n else:\n uri = \"/loadbalancers/%s/metadata\" % utils.get_id(loadbalancer)\n req_body = {\"metadata\": metadata_list}\n resp, body = self.api.method_post(uri, body=req_body)\n return body", "def set_token_metadata(self, metadata):\n self.update_initial_storage(\n token_metadata = sp.big_map(\n {\n 0: sp.record(token_id = 0, token_info = self.normalize_metadata(metadata))\n },\n tkey = sp.TNat,\n tvalue = sp.TRecord(token_id = sp.TNat, token_info = sp.TMap(sp.TString, sp.TBytes))\n )\n )", "def attachment_metadata(self, value: dict):\n self._properties[\"attachmentMetadata\"] = value", "def storage_metadata(self, storage_metadata):\n self._storage_metadata = storage_metadata", "def SetMetadata(IMAGE,METADATA):\n IMAGE.SetSpacing(METADATA[0])\n IMAGE.SetOrigin(METADATA[1])\n IMAGE.SetDirection(METADATA[2])", "def set_meta(self, meta):\n self._meta['user_meta'] = meta", "def meta_version(self, meta_version):\n\n self._meta_version = meta_version", "def load_metadata_i(self, metadata):\n self.p2_frame_metadata.configure(borderwidth=2, relief=\"groove\")\n self.p2_label_metadata_code.config(text=self.lang.VP_CODE + metadata[\"metadata\"][\"code\"])\n self.p2_label_metadata_grade.config(text=self.lang.VP_GRADE + str(metadata[\"metadata\"][\"grade\"]))\n self.p2_label_metadata_cm.config(text=self.lang.VP_DATE + metadata[\"metadata\"][\"date\"])", "def set_meta(self, name, value):\n # note sometimes during .view, we won't have this var available\n check_meta = not hasattr(self, '_init_arg_check') or self._init_arg_check\n if check_meta and name in self._init_args:\n # note this is largely a failsafe, we shouldn't get to this\n # point via setattr since it'll match the hasattr(self.pobj, name)\n raise Exception('Cannot have member variables that clash with pandas constructor args')\n object.__setattr__(self, name, value)", "def sync_set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)", "def update_metadata(self, metadata: t.Mapping[str, str]) -> None:\n self._metadata.update(metadata)", "def set_meta(self, meta):\n self._meta['user_meta'] = meta\n return self", "def metadata(self, metadata: Metadata):\n if metadata is None:\n raise ValueError(\"Invalid value for `metadata`, must not be `None`\") # noqa: E501\n\n self._metadata = metadata", "def update_metadata(self, file_id, metadata):\n pass", "def update_metadata(self, loadbalancer, metadata):\n return loadbalancer.update_metadata(metadata)", "def set_metadata(self, snapshot, metadata):\n body = {'metadata': metadata}\n return self._create(\"/snapshots/%s/metadata\" % base.getid(snapshot),\n body, \"metadata\")", "def metadata(self): # -> None:\n ...", "def SetMetaData(self, fieldName, value):\n return _gmat_py.CCSDSEMSegment_SetMetaData(self, fieldName, value)", "def set_input_metadata(self, input_metadata):\n self.input_metadata = input_metadata\n if input_metadata is not None:\n with contextlib.suppress(AttributeError):\n self.data_loader.input_metadata = input_metadata", "def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)", "def metadata_update(self, new_metadata=None):\n if new_metadata is None:\n self.metadata_set(self.t.metadata())", "def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license", "def set_metadata_for_node(self, node, metadata):\n return self.manager.set_metadata(self, metadata, node=node)", "def __metadata__(self):\n raise NotImplementedError", "def setIncludeMetadata(self, value):\n return self._set(includeMetadata=value)", "def initial_metadata(self):\n raise NotImplementedError()", "def set_metadata_about_dataset(self):\n date=QDateTime(QDate.currentDate(),QTime.currentTime())\n self.dataset_attributes.child('dataset_info','date_time').setValue(date)\n res = self.show_file_attributes('dataset')\n return res", "def metadata_set(self, endpoint_name=None, key=None, value=None):\n if key is None:\n raise Exception(\"Key required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metadata/%s' % key, 'POST', body=value)\n else:\n self.request('/v1.1/endpoints/%s/metadata/%s' %\n (endpoint_name, key), 'POST', body=value)", "def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })", "def set_attr(self, aid, value, custom=False):\n if aid not in self.attributes and not custom:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, value)\n self.remember_custom_attribute(self.name, aid, value)\n self.attributes[aid] = {}\n else:\n # TODO: validate data_type\n pass\n self.attributes[aid]['nv'] = value\n # self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)", "def set_attribute(self, name, value):\n\n pass", "def set(self, property_dict):\r\n self.metadata = self.db.update(self.path, property_dict).json()", "def save(self, metadata):\n pass", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def set_task_metadata(self, task, metadata):\n self._gdb_interface.set_task_metadata(task, metadata)", "def metadata(self, value: typing.Union[\"ObjectMeta\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n ObjectMeta,\n ObjectMeta().from_dict(value),\n )\n self._properties[\"metadata\"] = value", "def declare_metadata(self, metadata=None):\n if self.hub.is_connected and self._private_key is not None:\n if metadata is not None:\n self._metadata.update(metadata)\n self.hub.declare_metadata(self._private_key, self._metadata)\n else:\n raise SAMPClientError(\n \"Unable to declare metadata. Hub \"\n \"unreachable or not connected or client \"\n \"not registered.\"\n )", "def save(self):\n if self.uuid is None:\n logger.info('Saving \"{}\" metadata: {}'.format(self.name, self.request_body))\n result = self._agave.meta.addMetadata(body=self.request_body)\n else:\n logger.info('Updating \"{}\" metadata {}: {}'.format(self.name, self.uuid,\n self.request_body))\n result = self._agave.meta.updateMetadata(uuid=self.uuid,\n body=self.request_body)\n self._wrapped.update(**result)\n return self", "def writeMetadata(self, dataRef):\n pass", "def metadata(self, metadata):\n return Metadata(metadata)", "def metadata_file(self, metadata_file):\n if metadata_file is None:\n raise ValueError(\"Invalid value for `metadata_file`, must not be `None`\")\n\n self._metadata_file = metadata_file", "def change_metadata(self, **kwargs):\n metadata = self.state.get_player_state(PLAYER_IDENTIFIER)\n\n # Update saved metadata\n for key, value in kwargs.items():\n setattr(metadata, key, value)\n\n # Create a temporary metadata instance with requested parameters\n change = PlayingState(**kwargs)\n self.state.item_update(change, PLAYER_IDENTIFIER)", "def metadata(self, df):\n raise NotImplementedError(\"missing metadata() method\")", "def set_invocation_metadata(self, items: Tuple[Tuple[str, str]]):\n self._invocation_metadata = items", "def set_metadata_about_current_scan(self):\n date=QDateTime(QDate.currentDate(),QTime.currentTime())\n self.scan_attributes.child('scan_info','date_time').setValue(date)\n self.scan_attributes.child('scan_info','author').setValue(self.dataset_attributes.child('dataset_info','author').value())\n res = self.show_file_attributes('scan')\n return res", "def set(self, path=None, meta=None):\n if path is not None:\n self.physical_key = PhysicalKey.from_url(fix_url(path))\n self.size = None\n self.hash = None\n elif meta is not None:\n self.set_meta(meta)\n else:\n raise PackageException('Must specify either path or meta')", "def initMetadata(self):\n\n if not 'flags' in self.metadata:\n\n self.metadata['flags'] = {}\n\n if not 'uidvalidity' in self.metadata:\n\n\n self.metadata['uidvalidity'] = random.randint(1000000, 9999999)\n\n if not 'uids' in self.metadata:\n\n self.metadata['uids'] = {}\n\n if not 'uidnext' in self.metadata:\n\n self.metadata['uidnext'] = 1", "def __setattr__(self, key, value):\n if (\n key != \"_pb\"\n and self._pb is not None\n and key in self._pb.__class__.DESCRIPTOR.fields_by_name.keys()\n ):\n setattr(self._pb, key, value)\n else:\n self.__dict__[key] = value", "def setInfo(label: str, value: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n self.__identity_info[label] = value\r\n else:\r\n raise HDDOPermissionException('Tried to set non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to set identity information in a closed HealthDominoDataObject.')", "def set_task_metadata(self, task, metadata):\n self._write_transaction(tx.set_task_metadata, task=task, metadata=metadata)", "def _set(self, thumbnail_name, thumbnail):\n raise NotImplementedError", "def _store_package_metadata(self):\n\n context = self._config.context\n log.debug('processing chef_json file {0} for package metadata'.format(self._get_chef_json_full_path()))\n with open(self._get_chef_json_full_path()) as chef_json_file:\n chef_json = json.load(chef_json_file)\n log.debug(chef_json.dump)\n\n context.package.attributes = {}\n for x in self._config.pkg_attributes:\n context.package.attributes[x] = chef_json.get(x, None)", "def add_metadata(self, flags, dataset=None):\n # Must use variable here, not constant, in order for the value to be\n # saved in the model.\n self.telluride_metadata = tf.Variable(json.dumps(flags))\n\n if not dataset:\n return\n if not isinstance(dataset, tf.data.Dataset):\n raise TypeError('dataset parameter must be tf.data.Dataset type.')\n\n for data in dataset.take(1):\n inputs, output = data\n for k in inputs:\n inputs[k] = list(inputs[k].shape)\n output = list(output.shape)\n self.telluride_inputs = tf.Variable(json.dumps(inputs))\n self.telluride_output = tf.Variable(json.dumps(output))", "def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})", "def tag_challenge(self, tag_challenge):\n\n self._tag_challenge = tag_challenge", "def save_meta(self):\n meta = self.serializer.dumps(self.meta)\n self.connection.hset(self.key, 'meta', meta)" ]
[ "0.7049014", "0.6655038", "0.65871096", "0.63758785", "0.63146836", "0.63146836", "0.62831694", "0.6266832", "0.6266832", "0.6024687", "0.59310377", "0.59303904", "0.59018284", "0.58468807", "0.5824315", "0.58013225", "0.57796216", "0.5759306", "0.5752463", "0.57415015", "0.5740437", "0.5739767", "0.56744635", "0.5671606", "0.5655041", "0.5654701", "0.5654701", "0.5647498", "0.56393206", "0.56150085", "0.56125665", "0.56075406", "0.56049967", "0.5561799", "0.55559474", "0.5516114", "0.5503141", "0.5423397", "0.5423273", "0.5389254", "0.5383597", "0.53821325", "0.5343781", "0.5331308", "0.53013307", "0.5297108", "0.5280842", "0.52547294", "0.52476263", "0.52438706", "0.52409714", "0.5237832", "0.5225305", "0.522488", "0.52199763", "0.52104837", "0.51926315", "0.518745", "0.5172268", "0.5166044", "0.5159576", "0.5148419", "0.51400936", "0.5130902", "0.5109307", "0.5090417", "0.5081144", "0.50774646", "0.50277215", "0.5009069", "0.50082606", "0.49947095", "0.49881583", "0.49880338", "0.49792084", "0.49712774", "0.49676475", "0.49447915", "0.49426395", "0.49384972", "0.49145782", "0.49135208", "0.488396", "0.48711696", "0.48635694", "0.48623076", "0.48473924", "0.48264438", "0.4819506", "0.48181078", "0.48114404", "0.4810717", "0.48020777", "0.48014134" ]
0.6576822
7
Sets the fixed_by of this ClairpbVulnerability.
def fixed_by(self, fixed_by): self._fixed_by = fixed_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def issued_by(self, issued_by):\n\n self._issued_by = issued_by", "def mitigated_by(self, mitigated_by):\n\n self._mitigated_by = mitigated_by", "def found_by(self, found_by):\n\n self._found_by = found_by", "def changed_by(self, changed_by):\n\n self._changed_by = changed_by", "def updated_by(self, updated_by):\n\n self._updated_by = updated_by", "def assigned_by_user(self, assigned_by_user):\n\n self._assigned_by_user = assigned_by_user", "def fixed_location(self, fixed_location):\n\n self._fixed_location = fixed_location", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def regulatory_body_approved_by(self, regulatory_body_approved_by):\n\n self._regulatory_body_approved_by = regulatory_body_approved_by", "def last_reviewed_by(self, last_reviewed_by):\n\n self._last_reviewed_by = last_reviewed_by", "def defect_review_requested_by(self, defect_review_requested_by):\n\n self._defect_review_requested_by = defect_review_requested_by", "def _setbeneficiary_customer_59F(self, val):\n self.swift_obj.BeneficiaryCustomer_F = val\n self.swift_obj.BeneficiaryCustomer_F.swiftTag = '59F'", "def allowed_by_team_id(self, allowed_by_team_id):\n\n self._allowed_by_team_id = allowed_by_team_id", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def review_requested_by(self, review_requested_by):\n\n self._review_requested_by = review_requested_by", "def created_by_id(self, created_by_id):\n\n self._created_by_id = created_by_id", "def created_by_security_user_id(self, created_by_security_user_id):\n\n self._created_by_security_user_id = created_by_security_user_id", "def _determine_uploader_by_changedby_field(self):\n maintainer_string = self.changes.get('Changed-By')\n log.debug(\"Determining user from 'Changed-By:' field: %s\" % maintainer_string)\n maintainer_realname, maintainer_email_address = email.utils.parseaddr(maintainer_string)\n log.debug(\"Changed-By's email address is: %s\", maintainer_email_address)\n self._find_user_by_email_address(maintainer_email_address)", "def amended_by(self, amended_by):\n\n self._amended_by = amended_by", "def last_modified_by(self, last_modified_by):\n if last_modified_by is not None and len(last_modified_by) > 100:\n raise ValueError(\"Invalid value for `last_modified_by`, length must be less than or equal to `100`\")\n\n self._last_modified_by = last_modified_by", "def created_by_id(self, created_by_id):\n self._created_by_id = created_by_id", "def setReplacedBy(self, *args):\n return _libsbml.CompSBasePlugin_setReplacedBy(self, *args)", "def founder(self, founder: object):\n\n self._founder = founder", "def effective_from(self, effective_from):\n\n self._effective_from = effective_from", "def SetFixedParams(self, fixedParameters=None):\n # Set first the fixParameter to true and\n # pass the parameters to fix\n \n #if not self.fixParameter:\n # self.fixParameter = True\n \n # Then adjust the Names and the Values\n \n if fixedParameters is not None:\n self.fixParameter = True\n pNames, pValues = \\\n Utils.reduceParameters(self.parameterNames0, \\\n self.initialParameterValues0, \\\n fixedParameters)\n self.parameterNames = pNames\n self.parameterNameList = pNames.split(\",\")\n self.initialParameterValues = pValues\n self.fixedParameters = fixedParameters\n self.fixParameter = True\n self.SetFixedParamsPass = True\n else:\n if self.SetFixedParamsPass:\n self.parameterNames=self.parameterNames0\n self.parameterNameList = self.parameterNameList0\n self.initialParameterValues = self.initialParameterValues0\n self.fixParameter = False\n self.fixedParameters = None", "def sent_by_user_id(self, sent_by_user_id):\n\n self._sent_by_user_id = sent_by_user_id", "def fixed_legend_filter_field(self, fixed_legend_filter_field):\n allowed_values = [\"CURRENT\", \"MEAN\", \"MEDIAN\", \"SUM\", \"MIN\", \"MAX\", \"COUNT\"] # noqa: E501\n if (self._configuration.client_side_validation and\n fixed_legend_filter_field not in allowed_values):\n raise ValueError(\n \"Invalid value for `fixed_legend_filter_field` ({0}), must be one of {1}\" # noqa: E501\n .format(fixed_legend_filter_field, allowed_values)\n )\n\n self._fixed_legend_filter_field = fixed_legend_filter_field", "def owned_by(self, owned_by):\n allowed_values = [\"All\", \"System\", \"Customer\"] # noqa: E501\n if owned_by not in allowed_values:\n raise ValueError(\n \"Invalid value for `owned_by` ({0}), must be one of {1}\" # noqa: E501\n .format(owned_by, allowed_values)\n )\n\n self._owned_by = owned_by", "def firewall_protection(self, firewall_protection):\n\n self._firewall_protection = firewall_protection", "def set_sort_by(self, sort_by):\n\n\t\tif sort_by is not None and not isinstance(sort_by, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_by EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_by = sort_by\n\t\tself.__key_modified['sort_by'] = 1", "def set_feudal_bond(self, lord: Nobleman, vassal: Nobleman):\n if lord > vassal:\n lord.vassals.add(vassal)\n if (liege := vassal.liege) is not None:\n self.break_feudal_bond(liege, vassal)\n vassal.liege = lord", "def set_userId(self, userId):\n self.authentication.userId = userId", "def age_of_fluid_diffusion(self, age_of_fluid_diffusion):\n\n self._age_of_fluid_diffusion = age_of_fluid_diffusion", "def set_ip(self, party_ip) -> None:\n\n self._ip = party_ip", "async def softban(self, ctx, member: discord.Member, days_to_clean: int=1, reason: str=None):\n if not 0 <= days_to_clean <= 7:\n await ctx.send('Invalid clean value. Use a number from 0 to 7.')\n return\n\n _reason = 'Initiated by {}'.format(ctx.author)\n if reason is not None:\n _reason += ', for reason \"{}\"'.format(reason)\n\n try:\n await member.ban(delete_message_days=days_to_clean, reason=_reason)\n await member.unban(reason=_reason)\n await ctx.send('Done. Good riddance.')\n except discord.Forbidden:\n await ctx.send('Sorry, I don\\'t have permission to ban that person here.')", "def setCharge(self, *args):\n return _libsbml.FbcSpeciesPlugin_setCharge(self, *args)", "def SetFixedParams(self, fixedParameters):\n th = self.theory\n if fixedParameters:\n pNames, pValues = Utils.reduceParameters(th.parameterNames0,\\\n th.initialParameterValues0,\\\n fixedParameters)\n th.parameterNames = pNames\n th.parameterNameList = pNames.split(\",\")\n th.initialParameterValues = pValues\n for currentModel in self.Models.values():\n currentModel.theory.fixParameter = True\n currentModel.theory.SetFixedParams(fixedParameters)\n self.SetFixedParamsPass = True\n else:\n if self.SetFixedParamsPass:\n th.parameterNames = th.parameterNames0\n th.parameterNameList = th.parameterNameList0\n th.initialParameterValues = th.initialParameterValues0\n for currentModel in self.Models.values():\n currentModel.theory.parameterNames=th.parameterNames0\n currentModel.theory.parameterNameList=th.parameterNameList0\n currentModel.theory.initialParameterValues=th.initialParameterValues0\n currentModel.theory.fixParameter = False\n currentModel.theory.fixedParameters = None", "def _setbeneficiary_customer_59A(self, val):\n self.swift_obj.BeneficiaryCustomer_A = val\n self.swift_obj.BeneficiaryCustomer_A.swiftTag = '59A'", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def fixed_legend_filter_limit(self, fixed_legend_filter_limit):\n\n self._fixed_legend_filter_limit = fixed_legend_filter_limit", "def sent_by_full_name(self, sent_by_full_name):\n\n self._sent_by_full_name = sent_by_full_name", "def shipper_reference(self, shipper_reference):\n\n self._shipper_reference = shipper_reference", "def fixed_legend_display_stats(self, fixed_legend_display_stats):\n\n self._fixed_legend_display_stats = fixed_legend_display_stats", "def scored_by_player_id(self, scored_by_player_id):\n\n self._scored_by_player_id = scored_by_player_id", "def chf(self, chf):\n\n self.logger.debug(\"In 'chf' setter.\")\n\n self._chf = chf", "def accepting_change_of_payor_patients(self, accepting_change_of_payor_patients):\n\n self._accepting_change_of_payor_patients = accepting_change_of_payor_patients", "def set_user(self, user):\n self._user = user", "def set_as_walker(self, user_id):\n user = self.user_repository.read(user_id)\n user_dict = asdict(user)\n user_dict[\"override_id\"] = user_dict[\"id\"]\n del user_dict[\"id\"]\n user_dict[\"is_available\"] = False\n self.create(user_dict)", "def set_maintainer(self, maintainer):\n self.paragraphs[0][\"Maintainer\"] = maintainer", "def sortby(self, sortby):\n self._sortby = sortby", "def tag_challenge(self, tag_challenge):\n\n self._tag_challenge = tag_challenge", "def resolved_by(self):\n return User(None, self.get_data(\"resolved_by\"), **self._new_session_args)", "def id_user(self, id_user):\n\n self._id_user = id_user", "def set_descriptor(\n self,\n arbitrary_descriptor_val=None,\n fingerprint_type=None,\n fingerprint_params=None,\n ):\n if arbitrary_descriptor_val is not None:\n self.descriptor.set_manually(arbitrary_descriptor_val)\n elif fingerprint_type is not None:\n if self.mol_graph is None:\n raise ValueError(\n \"Molecular graph not present. \"\n \"Fingerprint cannot be calculated.\"\n )\n self.descriptor.make_fingerprint(\n self.mol_graph,\n fingerprint_type=fingerprint_type,\n fingerprint_params=fingerprint_params,\n )\n else:\n raise ValueError(f\"No descriptor vector were passed.\")", "def fix_seq(self, fixed_seq):\n assert len(fixed_seq) == self.length, \\\n \"Length of fixed sequence (%d) does not match length of %s (%d)\" \\\n % (len(fixed_seq), self.full_name, self.length)\n i = 0\n for seq in self.seqs:\n seq.fix_seq( fixed_seq[i:i+seq.length] )\n i += seq.length", "def sent_by_email(self, sent_by_email):\n\n self._sent_by_email = sent_by_email", "def verifiable_credential(self, verifiable_credential):\n\n self._verifiable_credential = verifiable_credential", "def possessed_by(self, other):\r\n self.owner = other", "def set_user(self, user):\r\n self.user = user", "def fixed_width(self, fixed_width: bool):\n\n self._fixed_width = fixed_width", "def challenge(self, challenge):\n\n self._challenge = challenge", "def set_fixed_transfer_rate_flow(self, from_label, to_label, param_label):\n\n assert type(from_label) is str, 'Origin compartment label not string for setting fixed transfer rate'\n assert type(to_label) is str, 'Destination compartment label not string for setting fixed transfer rate'\n add_unique_tuple_to_list(\n self.fixed_transfer_rate_flows,\n (from_label,\n to_label,\n self.params[param_label]))", "def ddos_protection(self, ddos_protection):\n\n self._ddos_protection = ddos_protection", "def setChan(\n self,\n u,\n chan,\n fval,\n ):\n\n self.DMX[u].set_chan_float(chan, fval)", "def set_firerate(self, firerate):\n self._firerate = firerate", "def set_fk_act_i(self, fk):\n self.fkai = fk[:]", "def fix_bug(self,**kwargs):\n\n params={}\n\n if \"bug_id\" in kwargs:\n params[\"ROWID\"]=kwargs[\"bug_id\"]\n if self.NAME_COLUMN in kwargs:\n params[self.NAME_COLUMN]=kwargs[self.NAME_COLUMN]\n \n\n q=\"\"\"UPDATE {} SET {} WHERE {}\"\"\".format(\n self.BUG_TABLE,\n \",\".join([\"{}=1\".format(self.FIXED_COLUMN),\"{}=:datefixed\".format(self.DATE_FIXED_COLUMN)]),\n \" AND \".join([\"{}=:{}\".format(k,k) for k in params])\n )\n params[\"datefixed\"]=datetime.datetime.now()\n\n cur=self.cxn.cursor()\n cur.execute(q,params)\n self.cxn.commit()", "def assigned_user(self, assigned_user):\n self._assigned_user = assigned_user", "def _setbeneficiary_customer_no_option_59(self, val):\n self.swift_obj.BeneficiaryCustomer = val\n self.swift_obj.BeneficiaryCustomer.swiftTag = '59'", "def referred_by_name(self, referred_by_name: str):\n self._referred_by_name = referred_by_name", "def funder(self, funder: object):\n\n self._funder = funder", "def block(self, item, blocked_by):\n item_meta = self._blocked_items.get(item)\n if item_meta is None:\n item_meta = RefCount(item)\n self._blocked_items[item] = item_meta\n\n blocked_items = self._blockers.get(blocked_by, set())\n if item_meta in blocked_items:\n raise ValueError(\"'{}' is already blocked by '{}'\".format(\n str(item_meta.data), str(blocked_by)))\n\n blocked_items.add(item_meta)\n self._blockers[blocked_by] = blocked_items\n item_meta.add_ref()", "def setBuzzerPin(Pin):\n global buzzerPin\n buzzerPin = Pin\n # Replaces old pin value with the new Pin argument.", "async def update_user_is_hacked(self, user_id: int, hacked: int) -> None:\n\n mycursor, db = await the_database()\n await mycursor.execute(\"UPDATE UserCurrency SET hacked = %s WHERE user_id = %s\", (hacked, user_id))\n await db.commit()\n await mycursor.close()", "async def update_hacks_content(self, attacker_id: int) -> None:\n\n mycursor, db = await the_database()\n await mycursor.execute(\"UPDATE SlothSkills SET content = 'virus' WHERE user_id = %s\", (attacker_id,))\n await db.commit()\n await mycursor.close()", "def updated_by_id(self, updated_by_id):\n self._updated_by_id = updated_by_id", "def constrain_fixed(self, value=None, warning=True, trigger_parent=True):\n if value is not None:\n self[:] = value\n\n index = self.unconstrain()\n index = self._add_to_index_operations(self.constraints, index, __fixed__, warning)\n self._highest_parent_._set_fixed(self, index)\n self.notify_observers(self, None if trigger_parent else -np.inf)\n return index", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user", "def user(self, user):\n\n self._user = user" ]
[ "0.6088768", "0.6088768", "0.60354835", "0.594974", "0.56890184", "0.5542011", "0.5290293", "0.5184325", "0.5179588", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.5038125", "0.4941069", "0.49050403", "0.479892", "0.47826055", "0.47764128", "0.47764128", "0.47333765", "0.47333765", "0.47333765", "0.4714392", "0.47048336", "0.4695162", "0.46468732", "0.46423575", "0.4639786", "0.46303132", "0.459471", "0.4590534", "0.45756802", "0.45549393", "0.45549345", "0.45444348", "0.45081806", "0.45073515", "0.4455264", "0.444681", "0.44413465", "0.4430586", "0.44207937", "0.44049272", "0.44038025", "0.4402204", "0.44004324", "0.43993825", "0.43993825", "0.43993825", "0.43993825", "0.43733925", "0.43636134", "0.4350441", "0.43492278", "0.4319487", "0.43161434", "0.43122423", "0.4309571", "0.43095404", "0.43044558", "0.43029043", "0.4299276", "0.4293929", "0.4276036", "0.42604715", "0.4247234", "0.4237475", "0.4235136", "0.4234165", "0.4230108", "0.4225432", "0.4218475", "0.4207433", "0.41982403", "0.4187802", "0.41747642", "0.41712382", "0.41708326", "0.41692844", "0.41605148", "0.41447046", "0.41415903", "0.4134371", "0.41327992", "0.4126384", "0.4126114", "0.41222605", "0.41093093", "0.41059673", "0.41059673", "0.41059673", "0.41059673", "0.41059673", "0.41059673", "0.41059673", "0.41059673", "0.41059673" ]
0.8097117
0
Sets the affected_versions of this ClairpbVulnerability.
def affected_versions(self, affected_versions): self._affected_versions = affected_versions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def versions(self, versions):\n\n self._versions = versions", "def set_versions(self, consumer, versions):\n for resource_type, resource_version in versions.items():\n self._set_version(consumer, resource_type,\n resource_version)\n\n if versions:\n self._cleanup_removed_versions(consumer, versions)\n else:\n self._handle_no_set_versions(consumer)", "def vulnerable_versions(self):\n raise NotImplementedError()", "def update_versions(consumer, resource_versions):\n _get_cached_tracker().update_versions(consumer, resource_versions)", "def update_versions(self, reference_resolution):\n raise NotImplementedError(\"update_versions is not implemented\")", "def max_affected_version(self, max_affected_version):\n\n self._max_affected_version = max_affected_version", "def pipeline_versions(self, pipeline_versions):\n if (self.local_vars_configuration.client_side_validation and\n pipeline_versions is not None and not isinstance(pipeline_versions, int)):\n raise ValueError(\"Parameter `pipeline_versions` must be an integer\") # noqa: E501\n\n self._pipeline_versions = pipeline_versions", "def test_changeVersions(self):\n self._testVersionChanging(8, 2, 3)", "def set_vectors(self, vecs):\n self.vecs = vecs[:]", "def update_versions(self, new_versions_list):\n to_stop = [version for version in self if version not in new_versions_list]\n for version_id in to_stop:\n del self[version_id]\n\n for version_id in new_versions_list:\n if version_id not in self:\n self[version_id] = VersionManager(\n self.zk_client, self.project_id, self.service_id, version_id,\n self.callback)\n\n self[version_id].ensure_watch()", "def _get_affected_versions(self, rules, versions):\n affected = []\n for ver in versions:\n for rule in rules:\n # If there is a singular rule Ex >=2.1.1\n if len(rule) == 1:\n if self._is_relation_applicable(rule[0]['key'], ver, rule[0]['val']):\n affected.append(ver)\n # If there are 2 rules Ex >=2.1.1 & <2.1.5\n elif len(rule) == 2:\n key0 = rule[0]['key']\n key1 = rule[1]['key']\n first = self._is_relation_applicable(key0, ver, rule[0]['val'])\n second = self._is_relation_applicable(key1, ver, rule[1]['val'])\n if first and second:\n affected.append(ver)\n else:\n if '=' in key0:\n if self._is_relation_applicable(\"=\", ver, rule[0]['val']):\n affected.append(ver)\n elif '=' in key1:\n if self._is_relation_applicable(\"=\", ver, rule[1]['val']):\n affected.append(ver)\n return list(set(affected))", "def affected_portfolios(self, affected_portfolios):\n if self.local_vars_configuration.client_side_validation and affected_portfolios is None: # noqa: E501\n raise ValueError(\"Invalid value for `affected_portfolios`, must not be `None`\") # noqa: E501\n\n self._affected_portfolios = affected_portfolios", "def get_affected_versions(self, rules, versions):\n affected = []\n for ver in versions:\n for rule in rules:\n # If there is a singular rule Ex >=2.1.1\n if len(rule) == 1:\n if self._is_relation_applicable(rule[0]['key'], ver, rule[0]['val']):\n affected.append(ver)\n # If there are 2 rules Ex >=2.1.1 & <2.1.5\n elif len(rule) == 2:\n key0 = rule[0]['key']\n key1 = rule[1]['key']\n first = self._is_relation_applicable(key0, ver, rule[0]['val'])\n second = self._is_relation_applicable(key1, ver, rule[1]['val'])\n if first and second:\n affected.append(ver)\n else:\n if '=' in key0:\n if self._is_relation_applicable(\"=\", ver, rule[0]['val']):\n affected.append(ver)\n elif '=' in key1:\n if self._is_relation_applicable(\"=\", ver, rule[1]['val']):\n affected.append(ver)\n return list(set(affected))", "def min_affected_version(self, min_affected_version):\n\n self._min_affected_version = min_affected_version", "def versions(self):\n raise Exception(\"mcapi.Datafile.versions is not implemented\")", "def set_affected_nodes(self, affected_vertices_file):\n self.affected_nodes = pd.read_csv(affected_vertices_file,\n delimiter=self.delimiter,\n dtype='int32',\n header=None,\n engine='python').values\n self.affected_nodes += self.force_offset", "def cpe_vulnerabilities(self, _nvd_cls, _cpe_cls):\n db = get_thread_scoped_session()\n if not _nvd_cls or not _cpe_cls:\n _nvd_cls, _cpe_cls = select_nvd_classes(db)\n cpe_vulnerabilities = db.query(ImageCpe, _cpe_cls).filter(\n ImageCpe.image_id == self.id,\n ImageCpe.image_user_id == self.user_id,\n func.lower(ImageCpe.name) == _cpe_cls.name,\n ImageCpe.version == _cpe_cls.version\n ).options(joinedload(_cpe_cls.parent, innerjoin=True)).all()\n\n # vulndb is similar to nvd cpes, add them here\n cpe_vulnerabilities.extend(\n db.query(ImageCpe, VulnDBCpe).filter(\n ImageCpe.image_id == self.id, ImageCpe.image_user_id == self.user_id,\n func.lower(ImageCpe.name) == VulnDBCpe.name,\n ImageCpe.version == VulnDBCpe.version,\n VulnDBCpe.is_affected.is_(True)\n ).options(joinedload(VulnDBCpe.parent, innerjoin=True)).all())\n\n return cpe_vulnerabilities", "def affected_orders(self, affected_orders):\n if self.local_vars_configuration.client_side_validation and affected_orders is None: # noqa: E501\n raise ValueError(\"Invalid value for `affected_orders`, must not be `None`\") # noqa: E501\n\n self._affected_orders = affected_orders", "def _update_versions_watch(self, new_versions_list):\n if self._stopped:\n return False\n\n persistent_update_versions = retry_children_watch_coroutine(\n self.versions_node, self.update_versions\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_versions, new_versions_list)", "def vcpus(self, vcpus):\n self._vcpus = vcpus", "def set_verbosity(self, value):\n for source in self._sources.itervalues():\n source.verbosity = value", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def setConsistencyChecks(self, *args):\n return _libsbml.SBMLDocument_setConsistencyChecks(self, *args)", "def set_lives(self, new_number_of_lives):\n self.__lives = new_number_of_lives", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def get_vulnerabilities(self, **kwargs):\n ...", "def setVersion(self, *args):\n\n self._version = '.'.join( [str(arg) for arg in args] )", "def _recalculate_versions(self):\n versions = self._get_local_resource_versions()\n for versions_dict in self._versions_by_consumer.values():\n for res_type, res_version in versions_dict.items():\n versions[res_type].add(res_version)\n self._versions = versions", "def change_sides(self, sides):\n self.sides = list(sides)", "def set_dark_counts_auto_iv(self, dark_counts):\n if self.NUMBER_OF_DETECTORS != len(dark_counts):\n raise ValueError('Dark counts not the same lenght as number of detectors')\n else:\n msg =json.dumps(dict(command=\"DarkCountsAutoIV\", label=\"DarkCountsAutoIV\", value=dark_counts))\n self.talk.send(msg)", "def setWeights(self, weights):\n self._call_java('setWeights', weights)", "def test_defaultChangeVersionsVersionChanger(self):\n versionChanger = ChangeVersionsScript()\n self.assertEquals(versionChanger.changeAllProjectVersions,\n changeAllProjectVersions)", "def setLevelAndVersion(self, *args):\n return _libsbml.SBMLDocument_setLevelAndVersion(self, *args)", "def update(self, values: List[int]) -> None:\n ...", "def update(self, values: List[int]) -> None:\n ...", "def bulk_threat_update(self, threat_ids, remediation=None, comment=None):\n return self._bulk_threat_update_status(threat_ids, \"OPEN\", remediation, comment)", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def svn_client_commit_item2_t_wcprop_changes_set(svn_client_commit_item2_t_self, apr_array_header_t_wcprop_changes): # real signature unknown; restored from __doc__\n pass", "def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def _setVals(self, outcome=0):\n self.outcome = outcome", "def change(self, ids, **kwargs):\n args = {}\n for key, value in kwargs.iteritems():\n argument = make_rpc_name(key)\n (arg, val) = argument_value_convert('torrent-set'\n , argument, value, self.rpc_version)\n args[arg] = val\n\n if len(args) > 0:\n self._request('torrent-set', args, ids, True)\n else:\n ValueError(\"No arguments to set\")", "def update(self, vts):\r\n for vt in vts.versioned_targets:\r\n self._invalidator.update(vt.cache_key)\r\n vt.valid = True\r\n self._invalidator.update(vts.cache_key)\r\n vts.valid = True", "def _setVals(self, *args, **kwargs):\n pass", "def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules", "def ExecuteBeforeSolutionLoop(self):\n super().ExecuteBeforeSolutionLoop()\n num_of_vaviables = len(self.variables) + len(self.nonhistorical_variables)\n self.values = [[-1e6] * num_of_vaviables for _ in self.found_positions]", "def setReplacedBy(self, *args):\n return _libsbml.CompSBasePlugin_setReplacedBy(self, *args)", "def versions(self):\n return self._versions", "def setVals(self, *args, **kwargs):\n self._setVals(*args, **kwargs)\n self._check_vals()", "def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n except AttributeError :\n if len(version) == 2 : # 2-tuple\n self.version = version\n else :\n try :\n self.version = [int(p) for p in str(float(version)).split(\".\")]\n except :\n self.version = [int(p) for p in IPP_VERSION.split(\".\")]", "def ListVersions(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def setFitnesses(self, chromosomes: ChromList) -> ChromList:\n raise NotImplementedError", "def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)", "def setvoltages(self):\n pass", "def _order_changelog_versions(self, versions):\n\n return sorted(versions, key=LooseVersion)", "def executions(self, executions):\n\n self._executions = executions", "def setVectors(self, vectors):\n l = len(self.points)\n for point, vector in zip(self.points, vectors):\n point.set(vector.components)", "def load_referenced_versions(self):\n raise NotImplementedError(\"load_referenced_versions is not implemented\")", "def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles", "def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles", "def _send_changes(self, users):\n\n if self.changelog_notifications:\n changes = changelog.load()\n new_version = V(__version__)\n old_version = V(self.state['version'])\n\n if old_version < new_version:\n post = [\n 'Bot was upgraded to a new version %s.' % __version__,\n 'Compared with the previous version it has following changes:'\n ]\n for version, version_string, messages in changes:\n if version <= old_version:\n break\n\n post.append('\\nVersion %s:' % version_string)\n for line in messages:\n post.append(' * ' + line)\n\n for user in users:\n self.send_message(\n user.jid,\n '\\n'.join(post),\n mfrom = self.jid,\n mtype = 'chat'\n )\n self.state['version'] = __version__", "def ip_version(self, ip_version):\n\n self._ip_version = ip_version", "def categoria_svs(self, categoria_svs):\n\n self._categoria_svs = categoria_svs", "def vulnerabilities(self) -> api.Vulnerabilities:\n return self._get_model(model=api.Vulnerabilities)", "def svn_client_commit_item_t_wcprop_changes_set(svn_client_commit_item_t_self, apr_array_header_t_wcprop_changes): # real signature unknown; restored from __doc__\n pass", "def replace_version(self, source_version, target_version):\n raise NotImplementedError(\"replace_version is not implemented\")", "def change_priorities(self,idxs,errors):\n #print(\"Indecies \",idxs)\n for i,idx in enumerate(idxs):\n self.update(idx, errors[i])", "def health_indications(self, health_indications):\n\n self._health_indications = health_indications", "def replicas(self, replicas):\n\n self._replicas = replicas", "def vitamins(self, vitamins: List[RecipeObjectNutrientsCalories]):\n\n self._vitamins = vitamins", "def set_num_selections(self, integrity):\n #p = 1-self.integrity\n p = integrity\n numerator = 1\n denominator = 1+(0.29/p)\n num_selections = numerator/denominator\n self.num_selections = int(num_selections*self.limit)", "def test_valid_versions(self):\n instance = ClassWithVersion()\n versions = [\"1.2.3\", \"1.2.*\", \"1.*\", \"*\", \"1.1.1\", \"1.0.1rc1\"]\n for version in versions:\n instance.version = version\n self.assertEqual(instance.version(), version)", "def _set_cirq_version(core_reqs: List[str], relative_cirq_version: str) -> List[str]:\n cirq_version = CIRQ_VERSIONS[relative_cirq_version]\n to_change = 'cirq', 'cirq-google', 'cirq-core'\n\n new_reqs = []\n for req in core_reqs:\n without_spec = _remove_version_spec(req)\n if without_spec in to_change:\n new_reqs.append(f'{without_spec}{cirq_version}')\n else:\n new_reqs.append(req)\n\n return new_reqs", "def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]", "def update(self, values):\n pass", "def set(self, v):\n self.components = v.components", "def fuel_percents(self, fuel_percents):\n\n self._fuel_percents = fuel_percents", "def update(self, values, weights):\n super().update(values)", "def update(self, values, weights):\n super().update(values)", "def update(self, values, weights):\n super().update(values)", "def update(self, values, weights):\n super().update(values)", "def set_primals(self, primals: BlockVector):\n for ndx, nlp in self._nlps.items():\n nlp.set_primals(primals.get_block(ndx))\n self._primals.set_block(ndx, primals.get_block(ndx))\n self._primals.set_block(self._num_scenarios, primals.get_block(self._num_scenarios))", "def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions", "def SetGuardRailVersion(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"setGuardRailVersion\", payload=payload, response_object=None\n )", "def set_infected_nodes(self, list_or_dataframe):\n\n infected_dataframe = list_or_dataframe\n\n # Convert list to dataframe\n if type(list_or_dataframe) == list:\n rdd_list = self.sc.parallelize(list_or_dataframe)\n row_rdd_list = rdd_list.map(lambda x: Row(x))\n field_list = [StructField(\"id\", LongType(), True)]\n schema_list = StructType(field_list)\n infected_dataframe = self.sqlContext.createDataFrame(row_rdd_list, schema_list)\n\n # Create column for influence attribute containing 1's\n infected_dataframe = infected_dataframe.withColumn(self.attribute, lit(1.0))\n infected = infected_dataframe\n\n self.infected_nodes = infected_dataframe\n\n # Merge to original vertices of graph\n orig_vertices = self.graph.vertices.selectExpr(\"id as id\")\n\n # Update graph\n orig_edges = self.graph.edges\n new_vertices = orig_vertices.join(infected, \"id\", \"left_outer\").na.fill(0)\n self.graph = GraphFrame(new_vertices, orig_edges)", "def challenge_objective_hashes(self, challenge_objective_hashes):\n\n self._challenge_objective_hashes = challenge_objective_hashes", "def __set__(self, instance, value):\n # make sure value follows \"major,minor,build\" convention\n if not is_version_valid(value):\n raise InvalidVersionFormat(\"Version: {0} is invalid\".format(value))\n\n super().__set__(instance, value)", "def deployment_versions(self, deployment_versions):\n if (self.local_vars_configuration.client_side_validation and\n deployment_versions is not None and not isinstance(deployment_versions, int)):\n raise ValueError(\"Parameter `deployment_versions` must be an integer\") # noqa: E501\n\n self._deployment_versions = deployment_versions", "def change_sides(self, sides):\n self.sides = sides", "def progressions(self, progressions):\n\n self._progressions = progressions", "def progressions(self, progressions):\n\n self._progressions = progressions", "def set_values(self, value):\n for i in range(len(self)):\n self._elements[i] = value", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setValues(self):\n pass", "def setSeverityOverride(self, *args):\n return _libsbml.XMLErrorLog_setSeverityOverride(self, *args)" ]
[ "0.609255", "0.609255", "0.59750617", "0.5626721", "0.54159355", "0.51944435", "0.51533484", "0.5120908", "0.50574327", "0.50420326", "0.50411284", "0.50248915", "0.48979875", "0.4828377", "0.48191965", "0.4787847", "0.46811602", "0.4634885", "0.46162087", "0.4607605", "0.46066567", "0.45801947", "0.44978493", "0.4497593", "0.44804046", "0.44680262", "0.44612357", "0.445444", "0.44413474", "0.44246194", "0.44103485", "0.43877602", "0.43433803", "0.43363687", "0.43052402", "0.4304578", "0.4304578", "0.42978758", "0.42974544", "0.428404", "0.42831883", "0.42680123", "0.42577046", "0.4256368", "0.42500457", "0.42408726", "0.4238848", "0.42306906", "0.42208424", "0.42136294", "0.420846", "0.41978195", "0.4196744", "0.41936585", "0.41891617", "0.41854253", "0.4183001", "0.4181467", "0.41806698", "0.41736963", "0.41736963", "0.41705212", "0.4160465", "0.41489303", "0.41483647", "0.41392052", "0.41362733", "0.41332874", "0.41199678", "0.41195378", "0.41193813", "0.41169307", "0.41121042", "0.41112965", "0.4108494", "0.4104683", "0.40946433", "0.4088631", "0.4084849", "0.4084849", "0.4084849", "0.4084849", "0.40835077", "0.40816772", "0.40810886", "0.40749675", "0.40742067", "0.40695745", "0.40636045", "0.40580055", "0.40570462", "0.40570462", "0.40558076", "0.40539825", "0.40539825", "0.40539825", "0.40539825", "0.40539825", "0.40539825", "0.40521833" ]
0.8137943
0
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n pass", "def _print_custom(self):\n pass", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def print(self):\n # Your implementation here", "def p(value):\n pp.pprint(value)", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def pprint(self):\n print(self.pprint_str())", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def print_(self, s: str) -> None:", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def test_print(chikin):\n chikin.print()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def out(*args):\r\n print(*args)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def repl_print_statements():\n pass", "def test_03_pass_print(self):\n print('Hello World!')", "def p(self):\n self.printstdout = True", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def print(self):\r\n self.print_avec_separateur()", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print(self):\n print(self.pretty_str())", "def test_print4(self):\n writer = StringIO()\n collatz_print(writer, 1, 1, 1)\n self.assertEqual(writer.getvalue(), \"1 1 1\\n\")", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def test_print(self):\n writer = StringIO()\n collatz_print(writer, 1, 10, 20)\n self.assertEqual(writer.getvalue(), \"1 10 20\\n\")", "def pprint(self):\n return pformat(repr(self))", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def rec_print(p):\n if len(p) == 0:\n return\n t = p.pop(0)\n print t\n rec_print(p)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printc(*a, **kw):\n print(*a, **kw)", "def pr(x):\n Card.print_pretty_cards(x)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def safe_print(*objs, errors=\"replace\"):\n\tprint(*(to_stdout(str(o), errors) for o in objs))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def hook_print():\n sys.stdout = PrintHook()", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def print_list(self):\r\n pass", "def debugprint(obj, depth=-1, print_type=False,\r\n file=None, ids='CHAR', stop_on_name=False):\r\n if file == 'str':\r\n _file = StringIO()\r\n elif file is None:\r\n _file = sys.stdout\r\n else:\r\n _file = file\r\n done = dict()\r\n results_to_print = []\r\n order = []\r\n if isinstance(obj, gof.Variable):\r\n results_to_print.append(obj)\r\n elif isinstance(obj, gof.Apply):\r\n results_to_print.extend(obj.outputs)\r\n elif isinstance(obj, Function):\r\n results_to_print.extend(obj.maker.fgraph.outputs)\r\n order = obj.maker.fgraph.toposort()\r\n elif isinstance(obj, (list, tuple)):\r\n results_to_print.extend(obj)\r\n elif isinstance(obj, gof.FunctionGraph):\r\n results_to_print.extend(obj.outputs)\r\n order = obj.toposort()\r\n elif isinstance(obj, (int, long, float, numpy.ndarray)):\r\n print obj\r\n else:\r\n raise TypeError(\"debugprint cannot print an object of this type\", obj)\r\n for r in results_to_print:\r\n debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,\r\n file=_file, order=order, ids=ids,\r\n stop_on_name=stop_on_name)\r\n if file is _file:\r\n return file\r\n elif file == 'str':\r\n return _file.getvalue()\r\n else:\r\n _file.flush()", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def test_print1(self):\n writer = StringIO()\n collatz_print(writer, 100, 200, 125)\n self.assertEqual(writer.getvalue(), \"100 200 125\\n\")", "def printOutput(self):\n pass", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def pr(string, verbose):\n if(verbose):\n print(string)", "def print(*args, sep=\" \"):\n pass", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print(self):\n\n print(self)", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def test_print2(self):\n writer = StringIO()\n collatz_print(writer, 201, 210, 89)\n self.assertEqual(writer.getvalue(), \"201 210 89\\n\")", "def print_pointers(self):\n\n ### FILL IN ###", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def _print(cls, quad):\n\t\tprint(\"\\nLIGHT OUTPUT:\\n<<<<{}>>>>\".format(ast.literal_eval(str(cls.get_address_value(quad.result)))))\n\t\tprint(\"END\")\n\n\t\tvar = cls.get_address_value(quad.result)\n\t\tif isinstance(var, collections.Iterable):\n\t\t\tprint(\"DEEP COPY\")\n\t\t\tcls.print_queue.enqueue(copy.deepcopy(var))\n\t\telse:\n\t\t\tcls.print_queue.enqueue(var)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print(self):\n self.print_avec_separateur(\" \")", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debugprint(r, prefix='', depth=-1, done=None, print_type=False,\r\n file=sys.stdout, print_destroy_map=False,\r\n print_view_map=False, order=None, ids='CHAR',\r\n stop_on_name=False, prefix_child=None):\r\n if depth == 0:\r\n return\r\n\r\n if order is None:\r\n order = []\r\n\r\n if done is None:\r\n done = dict()\r\n\r\n if print_type:\r\n type_str = ' <%s>' % r.type\r\n else:\r\n type_str = ''\r\n\r\n if prefix_child is None:\r\n prefix_child = prefix\r\n\r\n def get_id_str(obj):\r\n if obj in done:\r\n id_str = done[obj]\r\n elif ids == \"id\":\r\n id_str = \"[@%s]\" % str(id(r))\r\n elif ids == \"int\":\r\n id_str = \"[@%s]\" % str(len(done))\r\n elif ids == \"CHAR\":\r\n id_str = \"[@%s]\" % char_from_number(len(done))\r\n elif ids == \"\":\r\n id_str = \"\"\r\n done[obj] = id_str\r\n return id_str\r\n\r\n if hasattr(r.owner, 'op'):\r\n # this variable is the output of computation,\r\n # so just print out the apply\r\n a = r.owner\r\n\r\n r_name = getattr(r, 'name', '')\r\n # normally if the name isn't set, it'll be None, so\r\n # r_name is None here\r\n if r_name is None:\r\n r_name = ''\r\n\r\n if print_destroy_map:\r\n destroy_map_str = str(getattr(r.owner.op, 'destroy_map', ''))\r\n else:\r\n destroy_map_str = ''\r\n\r\n if print_view_map:\r\n view_map_str = str(getattr(r.owner.op, 'view_map', ''))\r\n else:\r\n view_map_str = ''\r\n if destroy_map_str and destroy_map_str != '{}':\r\n destroy_map_str = 'd=' + destroy_map_str\r\n if view_map_str and view_map_str != '{}':\r\n view_map_str = 'v=' + view_map_str\r\n\r\n o = ''\r\n if order:\r\n o = str(order.index(r.owner))\r\n already_printed = a in done # get_id_str put it in the dict\r\n id_str = get_id_str(a)\r\n\r\n if len(a.outputs) == 1:\r\n print >> file, '%s%s %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n id_str,\r\n type_str, r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n else:\r\n print >> file, '%s%s.%i %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n a.outputs.index(r),\r\n id_str, type_str,\r\n r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n if not already_printed:\r\n if (not stop_on_name or\r\n not (hasattr(r, 'name') and r.name is not None)):\r\n new_prefix = prefix_child + ' |'\r\n new_prefix_child = prefix_child + ' |'\r\n for idx, i in enumerate(a.inputs):\r\n if idx == len(a.inputs) - 1:\r\n new_prefix_child = prefix_child + ' '\r\n\r\n debugprint(i, new_prefix, depth=depth - 1, done=done,\r\n print_type=print_type, file=file, order=order,\r\n ids=ids, stop_on_name=stop_on_name,\r\n prefix_child=new_prefix_child)\r\n else:\r\n #this is an input variable\r\n id_str = get_id_str(r)\r\n print >> file, '%s%s %s%s' % (prefix, r, id_str, type_str)\r\n\r\n return file", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n options = numpy.get_printoptions()\n numpy.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if isinstance(v, float):\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n numpy.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)" ]
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", "0.6561717", "0.65549695", "0.6494838", "0.6473391", "0.64491546", "0.6411177", "0.6340302", "0.6339321", "0.6335031", "0.6332035", "0.6315847", "0.631272", "0.6297732", "0.62969106", "0.6283717", "0.6279154", "0.6271603", "0.62673396", "0.6265511", "0.62629336", "0.6258366", "0.6258278", "0.62501305", "0.6248315", "0.62459755", "0.6244254", "0.6242083", "0.62393075", "0.62156516", "0.6208198", "0.62068796", "0.62062824", "0.62062824", "0.6194123", "0.6189738", "0.6183852", "0.6183035", "0.61697906", "0.61614454", "0.6160741", "0.61544997", "0.61528033", "0.6150831", "0.6147288", "0.61380607", "0.613793", "0.61300766", "0.61278135", "0.6125416", "0.6114217", "0.61126333", "0.6100682", "0.60998785", "0.6096818", "0.6081694", "0.6076982", "0.6072701", "0.6060028", "0.60581726", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6043662", "0.6037599", "0.60336643", "0.6030174", "0.60290223", "0.60242903", "0.6016989", "0.6004274", "0.60005474", "0.60005474", "0.60003483", "0.599558", "0.59923434", "0.5979316", "0.59777945" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, ClairpbVulnerability): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Calculates the output size of the last conv layer.
def _get_conv_out(self, shape) -> int: conv_out = self.conv(torch.zeros(1, *shape)) return int(np.prod(conv_out.size()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def _output_size_conv2d(conv, size):\n o_size = np.array(size) + 2 * np.array(conv.padding)\n o_size -= np.array(conv.dilation) * (np.array(conv.kernel_size) - 1)\n o_size -= 1\n o_size = o_size / np.array(conv.stride) + 1\n return np.floor(o_size)", "def upperLayersSize(self):\n return sys.getsizeof(self.segment)", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def output_size(self) -> int:\n return self.out_sz", "def get_model_output_size(self) -> int:\n pass", "def output_size(self):\n return self._output_size", "def output_size(self):\n return self._output_size", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_dim(self) -> int:\n return 2 * self._hidden_dim", "def output_dim(self):\n return self._output_dim", "def output_dim(self) -> int:\n return (\n self.mlp_hidden_dims[-1]\n if self.mlp_hidden_dims is not None\n else self.blocks_dims[-1]\n )", "def _total_chunk_size_left(self):\n if self.streaming_type == 'reshape':\n return self.N_l // self.conv_factor\n elif self.streaming_type == 'mask':\n return self.N_l // self.conv_factor * self.n_layers\n elif self.unidir:\n return 10000 // self.conv_factor\n else:\n return 10000 // self.conv_factor", "def get_layer_shape(self,layer_id):\n return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_size(self) -> int:\n return self.win_length", "def batch_size(self):\n return self._first_rgb.shape[0]", "def get_output_tensor_size(self, index):\n return self._engine.get_output_tensor_size(index)", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def outputSize(in_size, kernel_size, stride, padding):\n output = int((in_size - kernel_size + 2 * padding) / stride) + 1\n return output", "def num_layers(self): # -> int:\n ...", "def getOutputLength(self):\n return len(self.Y[0])", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.incoming_shape == self.scale_size:\n self.out = incoming\n else:\n self.out = resize2d(incoming, size=self.scale_size, method=self.method,\n align_corners=self.align_corners)\n if self.method_name == 'AREA':\n self.out = tf.stop_gradient(self.out)\n \n return self.out", "def output_shape(self):\r\n return self.detector.output_shape", "def num_layers(self):\n\n return 2 + self.num_hidden_layers", "def layer_size(self, layer_id): # -> int:\n ...", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def output_width(self):\n\t\treturn self.output_shape_param('W')", "def l_out_conv(layer_num, kernel_size, pool=False):\n l_out_list = []\n l_in = constants.SHAPE_OF_ONE_DATA_POINT[1]\n for i in range(layer_num):\n l_out = l_out_conv1d(l_in, kernel_size, stride=2)\n l_out = l_out_conv1d(l_out, kernel_size, stride=2)\n\n l_out_list.append(l_out)\n\n if pool:\n pool_size = 3\n l_out = l_out_pool(l_out, pool_size)\n l_out_list.append(l_out)\n l_in = l_out\n\n # make a copy and reverse for decoder size def\n\n l_out_list_copy = copy.deepcopy(l_out_list)\n l_out_list.append(32)\n encoder_sizes = l_out_list\n l_out_list_copy.reverse()\n l_out_list_copy.append(constants.SHAPE_OF_ONE_DATA_POINT[1])\n decoder_sizes = l_out_list_copy\n return encoder_sizes, decoder_sizes", "def get_hidden_layer_size(self):\r\n return self.hidden_layer_size", "def get_output_shape(self):\n return self.shape", "def get_out_dim(self) -> int:\n return self.out_dim", "def _input_size(self):\n return self.embedding_size + self.hidden_size", "def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons", "def outdim(self):\n return len(self.getSensors())", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def output_length(self,\n inp_len: Optional[th.Tensor]) -> Optional[th.Tensor]:\n if self.last_choice is None:\n return inp_len\n if inp_len is None:\n return None\n return th.div(inp_len,\n self.src_sr[self.last_choice],\n rounding_mode=\"trunc\") * self.dst_sr[self.last_choice]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def output_height(self):\n\t\treturn self.output_shape_param('H')", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def conv_to_fc_size(\n input_shape, conv_depth, pools,\n stride=[2, 2, 2], padding='SAME',\n dropout_keep_prob=1.0):\n h, w, d = input_shape\n if padding == 'SAME':\n for i in range(pools):\n h = math.ceil(float(h) / float(stride[0]))\n w = math.ceil(float(w) / float(stride[1]))\n d = math.ceil(float(d) / float(stride[2])) \n else:\n # 'VALID' padding\n pass\n \n return conv_depth * h * w * d", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def _n_features_out(self):\n return self.components_.shape[0]", "def layers_compressed_size(self):\n # don't have this information at this point\n return None", "def layers_compressed_size(self):\n # don't have this information at this point\n return None", "def num_layers(self):\n return self._num_layers", "def output_size(self):\n raise NotImplementedError('This is an interface class, please use a derived instance')", "def calculate_shape_decreases_3D_Net(self, input_crop_size):\n cropsize_x, cropsize_y, cropsize_z = input_crop_size\n input_crop = torch.ones((1, cropsize_z, cropsize_x, cropsize_y))\n net_output, _ = self.forward_net(input_crop)\n _, outsize_z, outsize_y, outsize_x = net_output.size()\n\n return cropsize_x-outsize_x, cropsize_y-outsize_y, cropsize_z-outsize_z", "def output_dims(self) -> Optional[Tuple[int]]:\n return None", "def size(self):\n return self.num_inputs, self.num_outputs", "def get_layer_size(self, layer_ind):\n assert(layer_ind < self.num_layers)\n return self._layer_sizes[layer_ind]", "def compute_output_shape(self, input_shape):\n output_shape = [0] * self.rank\n for d in range(self.rank):\n output_shape[d] = sum(self.paddings[d]) + input_shape[d]\n return tf.TensorShape(output_shape)", "def output_mb(self):\n total_output_size = sum([t.shuffle_mb_written for t in self.tasks])\n return total_output_size", "def get_num_of_output_tensors(self):\n return self._engine.get_num_of_output_tensors()", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def get_output_shape(self):\n return self.out.shape.as_list()", "def compute_output_shape(self, input_shape):\n if tf.keras.backend.image_data_format() == 'channels_first':\n return (input_shape[0][0], input_shape[0][1]) + input_shape[1][2:4]\n\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)", "def get_model_output_dimension(self):\r\n raise NotImplementedError()", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def __len__(self):\n return self.flat_image.size", "def get_frame_size(self):\n return self._frames.shape[-1]", "def size_out(self):\n return self.dimensions", "def size(self) -> tf.Tensor:", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def compute_output_shape(self, input_shape):\r\n return input_shape", "def get_num_hidden(self) -> int:\n return self.output_dim", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def __find_net_dims(self):\n\n input_params = INPUT_CHANNELS * INPUT_SIZE ** 2\n net_dims = [input_params]\n for w in self._conv_weights + self._lin_weights:\n net_dims.append(w.shape[0])", "def get_size(self):\n return self.get_params().shape[0]", "def target_size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"target_size\")", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def estimate_cudnn_parameter_size(input_size, hidden_size, direction):\n single_rnn_size = 8 * hidden_size + 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)\n return direction * single_rnn_size", "def compute_output_shape(self, input_shape):\n \n assert input_shape and len(input_shape) == 2\n return input_shape[0], self.n_clusters", "def num_channels_per_output(cls) -> list[tuple[int, ...]]:\n return [\n (16, 24, 40, 112, 320),\n (16, 24, 40, 112, 320),\n (16, 24, 48, 120, 352),\n (24, 32, 48, 136, 384),\n (24, 32, 56, 160, 448),\n (24, 40, 64, 176, 512),\n (32, 40, 72, 200, 576),\n (32, 48, 80, 224, 640),\n (32, 56, 88, 248, 704),\n (72, 104, 176, 480, 1376),\n ]", "def layers_sizes(self):\n return iter([self.delta_h*l for l in range(int(self.h/self.delta_h)-1)])", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def convert_size(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n out = _op.ndarray_size(input_x, dtype=\"int64\")\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def get_total_input_dimension(self, layers):\n self._validate_layer_names(layers)\n total = 0\n for layer in self.layers:\n if layer.layer_name in layers:\n total += layer.get_input_space().get_total_dimension()\n return total" ]
[ "0.72758055", "0.7067759", "0.70510364", "0.7000888", "0.68895096", "0.6863038", "0.6814944", "0.68022835", "0.67512447", "0.67106795", "0.6696877", "0.6696877", "0.66832334", "0.66832334", "0.6663041", "0.6635598", "0.6611119", "0.6571467", "0.6549242", "0.65476173", "0.6491185", "0.64886093", "0.64730805", "0.64347905", "0.6408419", "0.6395363", "0.63848263", "0.6374435", "0.6337225", "0.6329526", "0.63214976", "0.6321218", "0.6317406", "0.6317406", "0.6317406", "0.6317406", "0.6317406", "0.6313596", "0.63118535", "0.6292788", "0.6285853", "0.6261559", "0.62520146", "0.6235888", "0.62108845", "0.6203142", "0.61921227", "0.61860853", "0.6169535", "0.6169535", "0.6169535", "0.6117509", "0.6089173", "0.6081267", "0.6044872", "0.6041074", "0.6037495", "0.60354316", "0.60354316", "0.60304433", "0.6029954", "0.60257196", "0.6022036", "0.6017297", "0.6010937", "0.6010702", "0.6006798", "0.59927523", "0.5988746", "0.59523195", "0.59461147", "0.5939062", "0.59378123", "0.5931183", "0.5929887", "0.5929149", "0.5925468", "0.59234643", "0.590295", "0.5899945", "0.5894715", "0.58903706", "0.5888755", "0.58834404", "0.5874345", "0.5871627", "0.5868094", "0.5853292", "0.5848595", "0.5848207", "0.58449274", "0.58373946", "0.58373946", "0.58373946", "0.5822839", "0.581487", "0.581487", "0.5811419" ]
0.6925207
4
Forward pass through network.
def forward(self, input_x) -> Tensor: conv_out = self.conv(input_x).view(input_x.size()[0], -1) return self.head(conv_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward_pass(self):", "def forward(self, forward):\n\n self._forward = forward", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(self, x):\n return self.net(x)", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, x):\n pass", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[Ether].dst == GW_MAC_ADRRESS:\n if p[IP].src not in black_list:\n send(p[1::], iface=IFACE, verbose=0)\n except:\n print(\"error in forward\")\n finally:\n sys.exit()", "def forward(self, x):\r\n out = x + self.conv_block(x) # add skip connections\r\n return out", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def base_forward(self, x):\r\n pass", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()", "def forward(self)->None:", "def forward(self, x, **kwargs):\n pass", "def forward_graph(self):\n raise NotImplementedError", "def forward(self, output, target):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self):\n self.position += 1", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def network_forward(self, X):\n \n #############################################################################\n # TODO: Perform a forward pass on the network and store the caches of #\n # each layer inside the cache_list #\n #############################################################################\n ActivationFunction = None\n if self.hidden_activation_fn == \"sigmoid\":\n ActivationFunction = lambda x: self.sigmoid_forward(x)\n elif self.hidden_activation_fn == \"tanh\":\n ActivationFunction = lambda x: self.tanh_forward(x)\n elif self.hidden_activation_fn == \"relu\":\n ActivationFunction = lambda x: self.relu_forward(x)\n\n Layer1Value, cacheL1 = self.fully_connected_forward(X, self.params[\"W1\"], self.params[\"b1\"])\n Layer1ValueActivation, cacheL1A = ActivationFunction(Layer1Value)\n scores, cacheL2 = self.fully_connected_forward(Layer1ValueActivation, self.params[\"W2\"], self.params[\"b2\"])\n\n # Cache\n cache_list =[cacheL1, cacheL1A, cacheL2]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return scores, cache_list", "def forward(self, states):\n raise NotImplementedError()", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def forward(network, X):\r\n activations = []\r\n input = X\r\n for i in range(len(network)):\r\n activations.append(network[i].forward(X))\r\n X = network[i].forward(X)\r\n \r\n assert len(activations) == len(network)\r\n return activations", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(self, input_tensor: torch.Tensor):\n self.network_output = self.network.forward(input_tensor.type(self.data_type))\n return self.network_output", "def forward_once(self, x):\n\t\t#x = F.normalize(self.network(x), p=2)\n\t\tx = self.network(x)\n\t\treturn x", "def forward(self):\n self.img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def fastforward_all():\n\twhile _running:\n\t\t_running[0].fastforward(noerror=True)", "def forward(self, input):\n raise NotImplementedError()", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def test_forward(self):\n validate_forward()", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def forward(self, srcif, packet):\n # packet is already decoded\n def send_no_route():\n send_src = srcif[:-1]\n send_src += '1'\n self.sockets[srcif].send(json.dumps({\n SRCE: send_src,\n DEST: packet[SRCE],\n TYPE: NRTE,\n MESG: {}\n }).encode())\n # GEt correct route.\n sock_addr = self.get_route(srcif, packet[DEST])\n\n # If no route available, send no route message back\n if sock_addr == None:\n send_no_route()\n else:\n sock = self.sockets[sock_addr]\n # If socket is available, send to proper neighbor.\n sock.send(json.dumps(packet).encode())\n return False", "def forward(self, speed):\n self.controller.forward(speed)", "def forward(self, srcif, packet) -> bool:\n chosen_route = self.get_route(srcif, packet[DEST])\n if chosen_route is None:\n return False\n self.sockets[chosen_route[PEER]].sendall(json.dumps(packet).encode())\n return True", "def forward(self, inputs, outputs):\n super(copy, self).adjoint(inputs, outputs)", "def forward(self, s):", "def forward(self, srcif, packet): \n # TODO: will need to massively update this \n #print(\"PACKET FROM DATA: {0}\".format(packet))\n #print(\"ROUTING TABLE IS: {0}\".format(self.routes))\n dest = packet[\"dst\"]\n chosen_router = self.get_route(srcif, dest)\n if chosen_router is None:\n return False\n\n #TODO implement most specific route and business routes\n outroutes = []\n #print(\"CHOSEN ROUTER ISSSSSSSSSSSSSSSSSS\", chosen_router) \n #print(\"THIS IS FOR FORWARD:\", json.dumps(packet).encode(\"ascii\"))\n chosen_router.send(json.dumps(packet).encode(\"ascii\"))\n #return may need to be changed \n return True", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, input, context, state):\n raise NotImplementedError", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward_train(self, *args, **kwargs):\n pass", "def step_forward(self):\n if self.state_num < len(self.steps):\n print(\"\\nStepping forward to state %d.\" % int(self.state_num + 1))\n self.state_string[0] = \"Stepping forward to state \" + str(self.state_num + 1) + \".\"\n # Get process and resource involved.\n process = self.steps[self.state_num][0]\n resource = self.steps[self.state_num][2]\n # Is this a request?\n if self.steps[self.state_num][1]:\n print(\"Process %d requests resource %d.\" % (process, resource))\n self.state_string[1] = \"Process \" + str(process) + \" requests resource \" + str(resource) + \".\"\n # Is the resource not being used by a process?\n if self.available[resource] > 0:\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][process] += 1\n # Make resource unavailabe.\n self.available[resource] -= 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = process\n else:\n # Mark in request matrix the relationship between resource and process.\n self.request_edges[resource][process] += 1\n # Add our process to the graph and make a directed edge.\n if process not in self.graph:\n self.graph.add_vertex(process)\n if self.connected_v[resource] not in self.graph:\n self.graph.add_vertex(self.connected_v[resource])\n if not self.graph.does_edge_exist(process, self.connected_v[resource]):\n self.graph.add_edge(process, self.connected_v[resource])\n print(\"p{:d} --> p{:d}\".format(process, self.connected_v[resource]))\n else:\n print(\"Process %d releases resource %d.\" % (process, resource))\n self.state_string[0] = \"Process \" + str(process) + \" releases resource \" + str(resource) + \".\"\n # Remove connection in hold matrix.\n self.hold_edges[resource][process] -= 1\n # Does another process want this resource?\n if np.count_nonzero(self.request_edges[resource]) > 0:\n # Get next process that wants the resource.\n new_process = self.request_edges[resource].index(1)\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][new_process] += 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = new_process\n # Remove connection in request matrix.\n self.request_edges[resource][new_process] -= 1\n # Delete edge if it exists.\n if self.graph.does_edge_exist(new_process, self.connected_v[resource]):\n self.graph.delete_edge(new_process, self.connected_v[resource])\n print(\"Process %d now has resource %d.\" % (new_process, resource))\n self.state_string[1] = \"Process \" + str(new_process) + \" now has resource \" + str(resource) + \".\"\n else:\n print(\"Resource %d is now available.\" % resource)\n self.state_string[1] = \"Resource \" + str(resource) + \" is now available.\"\n # Mark resource as unowned by a process.\n self.available[resource] += 1\n # Empty process that owned the resource previously.\n self.connected_v[resource] = None\n # Advance the state.\n self.state_num += 1", "def forward(self, adj, z, n_nodes):\n x = z.repeat(n_nodes, 1)\n sequence = self.gcn(x, adj)\n\n return sequence", "def forward(self, x, mask):\n \"Follow Figure 1 for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)", "def test_propagate_forward(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.propagate_forward([2, 3], test=True)\n model_output = nn.layers[-1].nodes[0].value\n\n self.assertEqual(round(model_output, 3), 0.823)", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, *inputs):\n raise NotImplementedError", "def move_forward():\n pass", "def step_forward(self):", "def forward(self, x):\n x = self._activation(self.fully_connected_1(x))\n x = self._activation(self.fully_connected_2(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_3(x))\n x = self._activation(self.fully_connected_4(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_5(x))\n return self.fully_connected_out(x)", "def forward_train(self, *args, **kwargs):\n raise NotImplementedError('This interface should not be used in current training schedule. Please use `train_step` for training.')", "def feed_forward(self):\n pre = self.pre_layer.o\n self.post_layer.i = torch.matmul(pre, self.weight)", "def move_forward(self):\n self.x, self.y = self.compute_positions()", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(source, destination):\n string = b' '\n while string:\n string = source.recv(10240)\n if string:\n destination.sendall(string)\n else:\n destination.shutdown(socket.SHUT_WR)\n try:\n source.shutdown(socket.SHUT_RD)\n except socket.error as ex:\n if ex.errno not in (57, 107): # pragma: no cover\n # socket.error: [Errno 57] Socket is not connected\n # error: [Errno 107] Transport endpoint is not connected\n raise", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)", "def go_forward(net):\n global w, back_loss, loss, l2_loss\n start_forward_time = time.time()\n\n # feed in data\n P = net(w).t()\n\n # calculate loss\n Y = P.mv(X)\n Ybar = Y.mean()\n back_loss = (Y - Ybar).norm(1) / (J)\n loss = back_loss / Ybar\n l2_loss = ((Y - Ybar).norm(2) ** 2) / (J * Ybar)\n\n return time.time() - start_forward_time", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def forward(self, inputs):\n raise NotImplementedError", "def forward_batch(self,batcher, phase=0):\n pass", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, x):\n c_out = self.conv_net.forward(x)\n\n c_out_flat = c_out.flatten(start_dim=1)\n \n \n return self.linear.forward(c_out_flat)", "def adjoint(self, inputs, outputs):\n super(copy, self).forward(inputs, outputs)", "def feedForward(self, inputs):\n\n\t\tinputs = np.atleast_1d(inputs)\n\n\t\tif not len(inputs) == self.nInputs:\n\n\t\t\traise ValueError(\"The input vector is the wrong length for this network\")\n\n\t\t#don't forget we have a bias unit in here too\n\t\tfor i in range(1,self.nInputs+1):\n\t\t\tself.inputLayer[i].activation = inputs[i-1]\n\t\t\tself.inputLayer[i].output = inputs[i-1]\t\t\t\n\n\t\tfor layer in self.hiddenLayers:\n\n\t\t\tfor unit in layer:\n\n\t\t\t\tunit.forwardValue()\n\n\t\tfor unit in self.outputLayer:\n\t\n\t\t\tunit.forwardValue()", "def forward(self, X, training=False):\n pass", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n return self.net(x)", "def forward(self, x):\n x = self.input(x)\n x = self.in0(x)\n x = self.block0(x) + x\n x = self.block1(x) + x\n x = self.block2(x) + x\n x = self.block3(x) + x\n x = self.block4(x) + x\n x = self.in0(x)\n\n out = self.out(x)\n\n return out" ]
[ "0.7486405", "0.7486405", "0.72931826", "0.72931826", "0.72931826", "0.72568643", "0.71754724", "0.70931304", "0.70689535", "0.7054133", "0.69913656", "0.6969786", "0.69356275", "0.69356275", "0.69356275", "0.6921335", "0.6920985", "0.6747466", "0.6711534", "0.67010707", "0.66525286", "0.6641545", "0.6623176", "0.6617483", "0.6617483", "0.6617483", "0.66074246", "0.6604059", "0.6589724", "0.6578781", "0.6560978", "0.6535647", "0.65293694", "0.65143543", "0.65041804", "0.65041804", "0.64726806", "0.6471294", "0.6471294", "0.6459609", "0.6454982", "0.6453049", "0.6440824", "0.6428473", "0.6410913", "0.6408422", "0.64007837", "0.63997585", "0.63960415", "0.63766634", "0.63728476", "0.6368438", "0.636231", "0.63491744", "0.63488925", "0.6317315", "0.6317315", "0.63163906", "0.63152647", "0.6309884", "0.6307033", "0.62857485", "0.6271954", "0.62711734", "0.62630504", "0.6253357", "0.6253357", "0.62506914", "0.6249723", "0.6248265", "0.62186116", "0.620725", "0.6206563", "0.619249", "0.61901283", "0.61656743", "0.61607224", "0.6154688", "0.6152447", "0.6138764", "0.6131553", "0.61183935", "0.61165875", "0.6115646", "0.61136824", "0.61108863", "0.61102486", "0.6110234", "0.6106236", "0.6099183", "0.6097814", "0.6097745", "0.6095042", "0.6089835", "0.6052115", "0.6044877", "0.60431135", "0.60409063", "0.6040325", "0.6039296", "0.603644" ]
0.0
-1
Forward pass through network.
def forward(self, input_x): return self.net(input_x.float())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward_pass(self):", "def forward(self, forward):\n\n self._forward = forward", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(self, x):\n return self.net(x)", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, x):\n pass", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[Ether].dst == GW_MAC_ADRRESS:\n if p[IP].src not in black_list:\n send(p[1::], iface=IFACE, verbose=0)\n except:\n print(\"error in forward\")\n finally:\n sys.exit()", "def forward(self, x):\r\n out = x + self.conv_block(x) # add skip connections\r\n return out", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def base_forward(self, x):\r\n pass", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()", "def forward(self)->None:", "def forward(self, x, **kwargs):\n pass", "def forward_graph(self):\n raise NotImplementedError", "def forward(self, output, target):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self):\n self.position += 1", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def network_forward(self, X):\n \n #############################################################################\n # TODO: Perform a forward pass on the network and store the caches of #\n # each layer inside the cache_list #\n #############################################################################\n ActivationFunction = None\n if self.hidden_activation_fn == \"sigmoid\":\n ActivationFunction = lambda x: self.sigmoid_forward(x)\n elif self.hidden_activation_fn == \"tanh\":\n ActivationFunction = lambda x: self.tanh_forward(x)\n elif self.hidden_activation_fn == \"relu\":\n ActivationFunction = lambda x: self.relu_forward(x)\n\n Layer1Value, cacheL1 = self.fully_connected_forward(X, self.params[\"W1\"], self.params[\"b1\"])\n Layer1ValueActivation, cacheL1A = ActivationFunction(Layer1Value)\n scores, cacheL2 = self.fully_connected_forward(Layer1ValueActivation, self.params[\"W2\"], self.params[\"b2\"])\n\n # Cache\n cache_list =[cacheL1, cacheL1A, cacheL2]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return scores, cache_list", "def forward(self, states):\n raise NotImplementedError()", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def forward(network, X):\r\n activations = []\r\n input = X\r\n for i in range(len(network)):\r\n activations.append(network[i].forward(X))\r\n X = network[i].forward(X)\r\n \r\n assert len(activations) == len(network)\r\n return activations", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(self, input_tensor: torch.Tensor):\n self.network_output = self.network.forward(input_tensor.type(self.data_type))\n return self.network_output", "def forward_once(self, x):\n\t\t#x = F.normalize(self.network(x), p=2)\n\t\tx = self.network(x)\n\t\treturn x", "def forward(self):\n self.img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def fastforward_all():\n\twhile _running:\n\t\t_running[0].fastforward(noerror=True)", "def forward(self, input):\n raise NotImplementedError()", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def test_forward(self):\n validate_forward()", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def forward(self, srcif, packet):\n # packet is already decoded\n def send_no_route():\n send_src = srcif[:-1]\n send_src += '1'\n self.sockets[srcif].send(json.dumps({\n SRCE: send_src,\n DEST: packet[SRCE],\n TYPE: NRTE,\n MESG: {}\n }).encode())\n # GEt correct route.\n sock_addr = self.get_route(srcif, packet[DEST])\n\n # If no route available, send no route message back\n if sock_addr == None:\n send_no_route()\n else:\n sock = self.sockets[sock_addr]\n # If socket is available, send to proper neighbor.\n sock.send(json.dumps(packet).encode())\n return False", "def forward(self, speed):\n self.controller.forward(speed)", "def forward(self, srcif, packet) -> bool:\n chosen_route = self.get_route(srcif, packet[DEST])\n if chosen_route is None:\n return False\n self.sockets[chosen_route[PEER]].sendall(json.dumps(packet).encode())\n return True", "def forward(self, inputs, outputs):\n super(copy, self).adjoint(inputs, outputs)", "def forward(self, s):", "def forward(self, srcif, packet): \n # TODO: will need to massively update this \n #print(\"PACKET FROM DATA: {0}\".format(packet))\n #print(\"ROUTING TABLE IS: {0}\".format(self.routes))\n dest = packet[\"dst\"]\n chosen_router = self.get_route(srcif, dest)\n if chosen_router is None:\n return False\n\n #TODO implement most specific route and business routes\n outroutes = []\n #print(\"CHOSEN ROUTER ISSSSSSSSSSSSSSSSSS\", chosen_router) \n #print(\"THIS IS FOR FORWARD:\", json.dumps(packet).encode(\"ascii\"))\n chosen_router.send(json.dumps(packet).encode(\"ascii\"))\n #return may need to be changed \n return True", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, input, context, state):\n raise NotImplementedError", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward_train(self, *args, **kwargs):\n pass", "def step_forward(self):\n if self.state_num < len(self.steps):\n print(\"\\nStepping forward to state %d.\" % int(self.state_num + 1))\n self.state_string[0] = \"Stepping forward to state \" + str(self.state_num + 1) + \".\"\n # Get process and resource involved.\n process = self.steps[self.state_num][0]\n resource = self.steps[self.state_num][2]\n # Is this a request?\n if self.steps[self.state_num][1]:\n print(\"Process %d requests resource %d.\" % (process, resource))\n self.state_string[1] = \"Process \" + str(process) + \" requests resource \" + str(resource) + \".\"\n # Is the resource not being used by a process?\n if self.available[resource] > 0:\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][process] += 1\n # Make resource unavailabe.\n self.available[resource] -= 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = process\n else:\n # Mark in request matrix the relationship between resource and process.\n self.request_edges[resource][process] += 1\n # Add our process to the graph and make a directed edge.\n if process not in self.graph:\n self.graph.add_vertex(process)\n if self.connected_v[resource] not in self.graph:\n self.graph.add_vertex(self.connected_v[resource])\n if not self.graph.does_edge_exist(process, self.connected_v[resource]):\n self.graph.add_edge(process, self.connected_v[resource])\n print(\"p{:d} --> p{:d}\".format(process, self.connected_v[resource]))\n else:\n print(\"Process %d releases resource %d.\" % (process, resource))\n self.state_string[0] = \"Process \" + str(process) + \" releases resource \" + str(resource) + \".\"\n # Remove connection in hold matrix.\n self.hold_edges[resource][process] -= 1\n # Does another process want this resource?\n if np.count_nonzero(self.request_edges[resource]) > 0:\n # Get next process that wants the resource.\n new_process = self.request_edges[resource].index(1)\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][new_process] += 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = new_process\n # Remove connection in request matrix.\n self.request_edges[resource][new_process] -= 1\n # Delete edge if it exists.\n if self.graph.does_edge_exist(new_process, self.connected_v[resource]):\n self.graph.delete_edge(new_process, self.connected_v[resource])\n print(\"Process %d now has resource %d.\" % (new_process, resource))\n self.state_string[1] = \"Process \" + str(new_process) + \" now has resource \" + str(resource) + \".\"\n else:\n print(\"Resource %d is now available.\" % resource)\n self.state_string[1] = \"Resource \" + str(resource) + \" is now available.\"\n # Mark resource as unowned by a process.\n self.available[resource] += 1\n # Empty process that owned the resource previously.\n self.connected_v[resource] = None\n # Advance the state.\n self.state_num += 1", "def forward(self, adj, z, n_nodes):\n x = z.repeat(n_nodes, 1)\n sequence = self.gcn(x, adj)\n\n return sequence", "def forward(self, x, mask):\n \"Follow Figure 1 for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)", "def test_propagate_forward(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.propagate_forward([2, 3], test=True)\n model_output = nn.layers[-1].nodes[0].value\n\n self.assertEqual(round(model_output, 3), 0.823)", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, *inputs):\n raise NotImplementedError", "def move_forward():\n pass", "def step_forward(self):", "def forward(self, x):\n x = self._activation(self.fully_connected_1(x))\n x = self._activation(self.fully_connected_2(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_3(x))\n x = self._activation(self.fully_connected_4(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_5(x))\n return self.fully_connected_out(x)", "def forward_train(self, *args, **kwargs):\n raise NotImplementedError('This interface should not be used in current training schedule. Please use `train_step` for training.')", "def feed_forward(self):\n pre = self.pre_layer.o\n self.post_layer.i = torch.matmul(pre, self.weight)", "def move_forward(self):\n self.x, self.y = self.compute_positions()", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(source, destination):\n string = b' '\n while string:\n string = source.recv(10240)\n if string:\n destination.sendall(string)\n else:\n destination.shutdown(socket.SHUT_WR)\n try:\n source.shutdown(socket.SHUT_RD)\n except socket.error as ex:\n if ex.errno not in (57, 107): # pragma: no cover\n # socket.error: [Errno 57] Socket is not connected\n # error: [Errno 107] Transport endpoint is not connected\n raise", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)", "def go_forward(net):\n global w, back_loss, loss, l2_loss\n start_forward_time = time.time()\n\n # feed in data\n P = net(w).t()\n\n # calculate loss\n Y = P.mv(X)\n Ybar = Y.mean()\n back_loss = (Y - Ybar).norm(1) / (J)\n loss = back_loss / Ybar\n l2_loss = ((Y - Ybar).norm(2) ** 2) / (J * Ybar)\n\n return time.time() - start_forward_time", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def forward(self, inputs):\n raise NotImplementedError", "def forward_batch(self,batcher, phase=0):\n pass", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, x):\n c_out = self.conv_net.forward(x)\n\n c_out_flat = c_out.flatten(start_dim=1)\n \n \n return self.linear.forward(c_out_flat)", "def adjoint(self, inputs, outputs):\n super(copy, self).forward(inputs, outputs)", "def feedForward(self, inputs):\n\n\t\tinputs = np.atleast_1d(inputs)\n\n\t\tif not len(inputs) == self.nInputs:\n\n\t\t\traise ValueError(\"The input vector is the wrong length for this network\")\n\n\t\t#don't forget we have a bias unit in here too\n\t\tfor i in range(1,self.nInputs+1):\n\t\t\tself.inputLayer[i].activation = inputs[i-1]\n\t\t\tself.inputLayer[i].output = inputs[i-1]\t\t\t\n\n\t\tfor layer in self.hiddenLayers:\n\n\t\t\tfor unit in layer:\n\n\t\t\t\tunit.forwardValue()\n\n\t\tfor unit in self.outputLayer:\n\t\n\t\t\tunit.forwardValue()", "def forward(self, X, training=False):\n pass", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n return self.net(x)", "def forward(self, x):\n x = self.input(x)\n x = self.in0(x)\n x = self.block0(x) + x\n x = self.block1(x) + x\n x = self.block2(x) + x\n x = self.block3(x) + x\n x = self.block4(x) + x\n x = self.in0(x)\n\n out = self.out(x)\n\n return out" ]
[ "0.7486405", "0.7486405", "0.72931826", "0.72931826", "0.72931826", "0.72568643", "0.71754724", "0.70931304", "0.70689535", "0.7054133", "0.69913656", "0.6969786", "0.69356275", "0.69356275", "0.69356275", "0.6921335", "0.6920985", "0.6747466", "0.6711534", "0.67010707", "0.66525286", "0.6641545", "0.6623176", "0.6617483", "0.6617483", "0.6617483", "0.66074246", "0.6604059", "0.6589724", "0.6578781", "0.6560978", "0.6535647", "0.65293694", "0.65143543", "0.65041804", "0.65041804", "0.64726806", "0.6471294", "0.6471294", "0.6459609", "0.6454982", "0.6453049", "0.6440824", "0.6428473", "0.6410913", "0.6408422", "0.64007837", "0.63997585", "0.63960415", "0.63766634", "0.63728476", "0.6368438", "0.636231", "0.63491744", "0.63488925", "0.6317315", "0.6317315", "0.63163906", "0.63152647", "0.6309884", "0.6307033", "0.62857485", "0.6271954", "0.62711734", "0.62630504", "0.6253357", "0.6253357", "0.62506914", "0.6249723", "0.6248265", "0.62186116", "0.620725", "0.6206563", "0.619249", "0.61901283", "0.61656743", "0.61607224", "0.6154688", "0.6152447", "0.6138764", "0.6131553", "0.61183935", "0.61165875", "0.6115646", "0.61136824", "0.61108863", "0.61102486", "0.6110234", "0.6106236", "0.6099183", "0.6097814", "0.6097745", "0.6095042", "0.6089835", "0.6052115", "0.6044877", "0.60431135", "0.60409063", "0.6040325", "0.6039296", "0.603644" ]
0.0
-1
Forward pass through network. Calculates the action distribution.
def forward(self, x: FloatTensor) -> TanhMultivariateNormal: x = self.shared_net(x.float()) batch_mean = self.mean_layer(x) logstd = torch.clamp(self.logstd_layer(x), -20, 2) batch_scale_tril = torch.diag_embed(torch.exp(logstd)) return TanhMultivariateNormal( action_bias=self.action_bias, action_scale=self.action_scale, loc=batch_mean, scale_tril=batch_scale_tril )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n x = self.fc0(x.view(-1, x.size(-1))).view(x.size(0), x.size(1), -1)\n x = self.pe(x)\n\n x = self.inner_layers(x) # FF, FF, FF, finalFF\n\n state_value = self.fc_s(x) # double-dqn : state\n\n advantage_values = self.fc_a(x) # double-dqn : advantage\n advantage_values = advantage_values.view(\n advantage_values.size()[:-1] + (self.action_size, self.n_atoms))\n\n dist_weights = state_value.unsqueeze(\n dim=-2) + advantage_values - advantage_values.mean(dim=-2, keepdim=True)\n\n return dist_weights", "def forward(self, x):\n # action\n act = self.act_fc1(x)\n act = torch.tanh(act)\n act = self.act_fc2(act)\n act = torch.tanh(act)\n mean = self.mu(act) # N, num_actions\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch.normal(mean, std)\n\n # value\n v = self.value_fc1(x)\n v = torch.tanh(v)\n v = self.value_fc2(v)\n v = torch.tanh(v)\n v = self.value_fc3(v)\n\n # action prob on log scale\n logprob = log_normal_density(action, mean, std=std, log_std=logstd)\n return v, action, logprob, mean", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n return self.net(x)", "def forward(self, x):\n # action\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.conv3(x)\n x = F.relu(x)\n x = x.view(-1, 32 * 7 * 7)\n x = self.linear1(x)\n x = F.relu(x)\n\n mean = self.mu(x) # N, num_actions\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch.normal(mean, std)\n\n # value\n v = self.critic_linear(x)\n\n # action prob on log scale\n logprob = log_normal_density(action, mean, std=std, log_std=logstd)\n return v, action, logprob, mean", "def forward(self, actor_core_output):\n action_distribution_params = self.distribution_linear(actor_core_output)\n action_distribution = get_action_distribution(self.action_space, raw_logits=action_distribution_params)\n return action_distribution_params, action_distribution", "def forward(self, state, action):\n state = torch.cat(state, dim=1)\n \n for i in range(len(action)):\n action[i] /= self.max_action\n\n # Concatenate the action vector \n action = torch.cat(action, dim=1)\n x = torch.cat([state, action], dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n q_value = self.q_out(x)\n\n return q_value", "def forward(self, state, action): \n ##x = F.relu(self.fc1(state)) \n x = F.relu(self.bn1(self.fc1(state))) \n x = torch.cat([x, action], dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward(self, state, action):\n xs = self.fc1(state)\n x = torch.cat((xs, action), dim=1)\n return self.fc2(x)", "def forward(self, state, action) -> float:\n xs = F.relu(self.fcs1_layer(state))\n x = torch.cat((xs, action), dim=1)\n x = F.relu(self.fc2_layer(x))\n x = self.dropout(x)\n x = self.fc3_layer(x)\n\n return x", "def forward(self,state,action):\n action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE\n indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)\n indices = indices.tolist()\n action_[indices] = 1.\n x = torch.cat( (state,action_) ,dim=1)\n return self.forwardM(x)", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x", "def forward(self, x):\n x = F.relu(self.affine1(x))\n x = F.relu(self.affine2(x))\n x = F.relu(self.affine3(x))\n x = F.relu(self.affine4(x))\n x = F.relu(self.affine5(x))\n\n # actor: choses action to take from state s_t \n # by returning probability of each action\n action_prob = F.softplus(self.action_head(x)).reshape(-1) + 1e-20\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tuple of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t \n return action_prob, state_values", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, state, action):\n\t\tx = F.relu(self.fc1(state))\n\t\tx = F.relu(self.fc2(torch.cat([x, action], dim=1))) # add action too for the mapping\n\t\tx = F.relu(self.fc3(x))\n\n\t\treturn x", "def forward(self, state, action):\n sa = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(sa))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state, action):\n sa = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(sa))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state, action):\n s = self.state_encoder(state)\n x = torch.cat((s,action),dim=1)\n x = self.act(self.fc2(x))\n x = self.fc_out(x)*10\n return x", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n action = self.max_action * torch.tanh(self.action_out(x))\n\n return action", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def forward(self, state, action): #concatenate the action -value\n\n xu = torch.cat([state, action], 1)\n\n x1 = F.relu(self.fc1(xu))\n x1 = F.relu(self.fc2(x1))\n x1 = self.fc3(x1)\n\n x2 = F.relu(self.fc4(xu))\n x2 = F.relu(self.fc5(x2))\n x2 = self.fc6(x2)\n \n return x1, x2", "def forward(self, state, action):\n xs = f.relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = f.relu(self.fc2(x))\n return self.fc3(x)", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward(self, state, action):\n # Pass the states into the first layer\n # Pass the input through all the layers apllying ReLU activation except for the output layer\n x = F.relu(self.fc1(state))\n # Batch Normalization of the first layer\n x = self.bn(x)\n # Concatenate the first layer output with the action\n x = torch.cat((x, action), dim=1)\n x = F.relu(self.fc2(x))\n # Pass the input through all the layers apllying ReLU activation, but the last\n x = torch.sigmoid(self.fc3(x))\n # Return the Q-Value for the input state-action\n return x", "def forward(self, state, action):\n xs = F.relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state, action):\r\n \r\n x = torch.cat([state, action], 1) ##### fixed dim รคndern\r\n x = F.relu(self.linear1(x))\r\n x = F.relu(self.linear2(x))\r\n x = self.linear3(x)\r\n\r\n return x", "def forward(self, state, action):\n\n # Prepare the embeddings\n state_embedding = self.state_embedding(state.float())\n state_embedding = state_embedding.repeat(1, action.shape[1], 1)\n action_embedding = self.action_embedding(action.float())\n state_action_embedding = torch.cat((state_embedding, action_embedding),dim=2)\n\n # Attention\n query = self.q_projection(state_action_embedding).permute(1,0,2)\n key = self.k_projection(state_action_embedding).permute(1,0,2)\n value = self.v_projection(state_action_embedding).permute(1,0,2)\n \n x = self.attention(query, key, value)[0].permute(1,0,2)[:,0,:]\n\n # Predict the next state\n x = self.predict(x)\n \n return x", "def forward(self, state, action):\n q_in = torch.cat([state, action], 1)\n return self.ffn(q_in).view(-1)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = self.fc2(x)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def forward(self, state):\n x = self.fc1(state)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def feedforward(self, inputs):\n processsum = 0\n for i in range(len(inputs)):\n processsum = processsum + inputs[i] * self.weights[i]\n return self.activate(processsum)", "def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x", "def forward(self, state, action):\n s1 = F.relu(self.fcs1(state))\n s2 = F.relu(self.fcs2(s1))\n\n a1 = F.relu(self.fca1(action))\n\n x = torch.cat((s2, a1), dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x", "def action_distribution(self, state):\n means, stds = self.__call__(state)\n dist = Normal(means, torch.exp(stds))\n\n return dist", "def forward(self, state, action):\n s1 = F.relu(self.fcs1(state))\n a1 = F.relu(self.fca1(action))\n x = torch.cat((s1, a1), dim=1)\n\n x = self.fc3(x)\n\n return x", "def forward(self, state, action):\n # Pass the states into the first layer\n x = self.fc_layers[0](state)\n x = self.bn(x)\n x = F.relu(x)\n # Concatenate the first layer output with the action\n x = torch.cat((x, action), dim=1)\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[1:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying sigmoid activation\n x = torch.sigmoid(self.fc_layers[-1](x))\n # Return the Q-Value for the input state-action\n return x", "def feed_forward(self):\n pre = self.pre_layer.o\n self.post_layer.i = torch.matmul(pre, self.weight)", "def forward(self, x):\n return self.net(x)", "def _forward(self, x):\n\n batch_size = x.size()[0]\n x = self._data_augment(x)\n\n path_prob = self.inner_nodes(x)\n path_prob = torch.unsqueeze(path_prob, dim=2)\n path_prob = torch.cat((path_prob, 1 - path_prob), dim=2)\n\n mu = x.data.new(batch_size, 1, 1).fill_(1.0)\n penalty_ = torch.tensor(0.0).to(self.device)\n\n # Iterate through internal odes in each layer to compute the final path\n # probabilities and the regularization term.\n begin_idx = 0\n end_idx = 1\n\n for layer_idx in range(0, self.depth):\n path_prob_ = path_prob[:, begin_idx:end_idx, :]\n\n # Extract internal nodes in the current layer to compute the\n # regularization term\n penalty_ = penalty_ + self._cal_penalty(layer_idx, mu, path_prob_)\n mu = mu.view(batch_size, -1, 1).repeat(1, 1, 2)\n\n mu = mu * path_prob_ # update path probabilities\n\n begin_idx = end_idx\n end_idx = begin_idx + 2 ** (layer_idx + 1)\n\n mu = mu.view(batch_size, self.leaf_node_num_)\n\n return mu, penalty_", "def forward(self, X):\n self._X = X # For backprop later on.\n self._z = np.dot(X, self._W) + self._b\n a = self._act.a(self._z)\n return a", "def forward(self, state):\n x = F.relu(self.input(state))\n for layer in self.layers:\n x = F.relu(layer(x))\n if self.duel:\n # Value function estimator\n val = F.relu(self.val_fc_input(x))\n val = self.val_fc_output(val)\n # Advantage function estimator\n adv = F.relu(self.adv_fc_input(x))\n adv = self.adv_fc_output(adv)\n # Subtract mean so that V and A are uniquely identifiable for a given Q\n return val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.action_size)\n else:\n return self.output(x)", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def forward_graph(self):\n raise NotImplementedError", "def model_forward_pass(self, data):\n for key, value in data.items():\n data[key] = value.to(self.device)\n \n if self.fp16:\n with torch.cuda.amp.autocast():\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n else:\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n\n return output, loss", "def forward(self, x):\n # Compute the mean norm of activations per channel.\n nu2 = x.pow(2).mean(dim=[2, 3], keepdim=True)\n\n # Perform FRN.\n x = x * torch.rsqrt(nu2 + self.eps.abs())\n\n # Scale and Bias\n if self.is_scale:\n x = self.weight * x\n if self.is_bias:\n x = x + self.bias\n return x", "def step_forward(self):\n if self.state_num < len(self.steps):\n print(\"\\nStepping forward to state %d.\" % int(self.state_num + 1))\n self.state_string[0] = \"Stepping forward to state \" + str(self.state_num + 1) + \".\"\n # Get process and resource involved.\n process = self.steps[self.state_num][0]\n resource = self.steps[self.state_num][2]\n # Is this a request?\n if self.steps[self.state_num][1]:\n print(\"Process %d requests resource %d.\" % (process, resource))\n self.state_string[1] = \"Process \" + str(process) + \" requests resource \" + str(resource) + \".\"\n # Is the resource not being used by a process?\n if self.available[resource] > 0:\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][process] += 1\n # Make resource unavailabe.\n self.available[resource] -= 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = process\n else:\n # Mark in request matrix the relationship between resource and process.\n self.request_edges[resource][process] += 1\n # Add our process to the graph and make a directed edge.\n if process not in self.graph:\n self.graph.add_vertex(process)\n if self.connected_v[resource] not in self.graph:\n self.graph.add_vertex(self.connected_v[resource])\n if not self.graph.does_edge_exist(process, self.connected_v[resource]):\n self.graph.add_edge(process, self.connected_v[resource])\n print(\"p{:d} --> p{:d}\".format(process, self.connected_v[resource]))\n else:\n print(\"Process %d releases resource %d.\" % (process, resource))\n self.state_string[0] = \"Process \" + str(process) + \" releases resource \" + str(resource) + \".\"\n # Remove connection in hold matrix.\n self.hold_edges[resource][process] -= 1\n # Does another process want this resource?\n if np.count_nonzero(self.request_edges[resource]) > 0:\n # Get next process that wants the resource.\n new_process = self.request_edges[resource].index(1)\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][new_process] += 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = new_process\n # Remove connection in request matrix.\n self.request_edges[resource][new_process] -= 1\n # Delete edge if it exists.\n if self.graph.does_edge_exist(new_process, self.connected_v[resource]):\n self.graph.delete_edge(new_process, self.connected_v[resource])\n print(\"Process %d now has resource %d.\" % (new_process, resource))\n self.state_string[1] = \"Process \" + str(new_process) + \" now has resource \" + str(resource) + \".\"\n else:\n print(\"Resource %d is now available.\" % resource)\n self.state_string[1] = \"Resource \" + str(resource) + \" is now available.\"\n # Mark resource as unowned by a process.\n self.available[resource] += 1\n # Empty process that owned the resource previously.\n self.connected_v[resource] = None\n # Advance the state.\n self.state_num += 1", "def forward(self, states, actions_previous=None): \n x = states.view(states.shape[0], self.frames_n * self.state_size)\n \n # ACTOR\n x_actor_alphas = F.relu(self.actor_alphas_layer_1(x))\n x_actor_alphas = F.relu(self.actor_alphas_layer_2(x_actor_alphas))\n x_actor_alphas = F.softplus(self.actor_alphas_layer_3(x_actor_alphas)) + 1. # To get to the interval [1; Inf).\n\n x_actor_betas = F.relu(self.actor_betas_layer_1(x))\n x_actor_betas = F.relu(self.actor_betas_layer_2(x_actor_betas))\n x_actor_betas = F.softplus(self.actor_betas_layer_3(x_actor_betas)) + 1. # To get to the interval [1; Inf).\n \n distribution = torch.distributions.beta.Beta(concentration1=x_actor_alphas, concentration0=x_actor_betas)\n raw_actions = actions_previous * 0.5 + 0.5 if actions_previous is not None else distribution.sample() # To return to the Beta interval, [0, 1], for now.\n densities = torch.exp(distribution.log_prob(raw_actions))\n actions = (raw_actions - 0.5) * 2 # Finally back to the action interval, [-1, -1].\n entropies = distribution.entropy()\n \n # CRITIC\n x_critic = F.relu(self.critic_layer_1(x))\n x_critic = F.relu(self.critic_layer_2(x_critic))\n values = self.critic_layer_3(x_critic)\n \n return {\n 'actions': actions,\n 'densities': densities,\n 'entropies': entropies, \n 'values': values\n }", "def forward(self, input_dict):\n if not self.flag:\n print_total_parameters(self._net)\n self.flag = True\n\n agent_num = len(input_dict[\"agents\"])\n if \"update_target_net\" in input_dict and input_dict[\"update_target_net\"]:\n self._update_target_net()\n\n for i in range(agent_num):\n p = input_dict[\"agents\"][i]\n entities = p[\"obs\"][\"entities\"]\n localmap = p[\"obs\"][\"localmap\"]\n globalmap = p[\"obs\"][\"globalmap\"]\n p[\"atoms\"] = self.atoms.repeat(entities.shape[0], 1)\n p[\"q_dist\"] = self.policy_net(entities, localmap, globalmap)\n p[\"qs\"] = self._cal_q(p[\"q_dist\"])\n\n if \"train\" in input_dict and input_dict[\"train\"]:\n if not self.n_step:\n next_entities = p[\"next_obs\"][\"entities\"]\n next_localmap = p[\"next_obs\"][\"localmap\"]\n next_globalmap = p[\"next_obs\"][\"globalmap\"]\n with torch.no_grad():\n p[\"next_target_q_dist\"] = self.target_net(\n next_entities, next_localmap, next_globalmap\n )\n p[\"next_target_qs\"] = self._cal_q(p[\"next_target_q_dist\"])\n if self.double:\n p[\"next_q_dist\"] = self.policy_net(\n next_entities, next_localmap, next_globalmap\n )\n p[\"next_qs\"] = self._cal_q(p[\"next_q_dist\"])\n else:\n n_step_entities = p[\"n_step_obs\"][\"entities\"]\n n_step_localmap = p[\"n_step_obs\"][\"localmap\"]\n n_step_globalmap = p[\"n_step_obs\"][\"globalmap\"]\n with torch.no_grad():\n p[\"n_step_target_q_dist\"] = self.target_net(\n n_step_entities, n_step_localmap, n_step_globalmap\n )\n p[\"n_step_target_qs\"] = self._cal_q(p[\"n_step_target_q_dist\"])\n if self.double:\n p[\"n_step_q_dist\"] = self.policy_net(\n n_step_entities, n_step_localmap, n_step_globalmap\n )\n p[\"n_step_qs\"] = self._cal_q(p[\"n_step_q_dist\"])\n\n return input_dict", "def forward(self, state):\n '''\n state = F.relu(self.conv1(state))\n state = F.relu(self.conv2(state))\n state = F.relu(self.conv3(state))\n state = F.relu(self.fc1(state))\n \n action = F.relu(self.fc2(state))\n \n return action\n '''\n \n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x", "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def forward(self, inputs):\n atoms, distance_matrices, molecular_sizes = inputs\n atoms = torch.cat(atoms)\n distance_matrix = self.pad(distance_matrices, 1e6)\n\n \"\"\"GNN layer (update the atom vectors).\"\"\"\n atom_vectors = self.embed_atom(atoms)\n for l in range(layer_hidden):\n gammas = torch.squeeze(self.gamma[l](atoms))\n M = torch.exp(-gammas*distance_matrix**2)\n atom_vectors = self.update(M, atom_vectors, l)\n atom_vectors = F.normalize(atom_vectors, 2, 1) # normalize.\n\n \"\"\"Output layer.\"\"\"\n for l in range(layer_output):\n atom_vectors = torch.relu(self.W_output[l](atom_vectors))\n\n \"\"\"Molecular vector by sum of the atom vectors.\"\"\"\n molecular_vectors = self.sum(atom_vectors, molecular_sizes)\n\n \"\"\"Molecular property.\"\"\"\n properties = self.W_property(molecular_vectors)\n\n return properties", "def forward(self, x, weights, drop_path_prob=0, discrete=False):\n # ind = torch.nonzero(weights)\n # if discrete:\n # assert len(ind) == 1\n # if len(ind) == 1:\n # return self.drop_path_op(self._ops[ind[0][0]], x, drop_path_prob)\n # apply each operation to x\n self._fs = [op(x) for op in self._ops]\n # print(weights)\n # sum the weighted operations\n return sum(w * op for w, op in zip(weights, self._fs) if w > 0)", "def forward(self, x):\n x = F.relu(self.affine1(x))\n x = F.relu(self.affine2(x))\n\n # actor: choses action to take from state s_t\n # by returning probability of each action\n action_prob = F.softmax(self.action_head(x), dim=-1)\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tupel of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t\n return action_prob, state_values", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n distribution = Categorical(F.softmax(x, dim=-1))\n return distribution", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, observation):\r\n # Apply relu activation and feed forward\r\n observation = F.relu(self.fc1(observation))\r\n actions = self.fc2(observation)\r\n\r\n return actions", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward(network, X):\r\n activations = []\r\n input = X\r\n for i in range(len(network)):\r\n activations.append(network[i].forward(X))\r\n X = network[i].forward(X)\r\n \r\n assert len(activations) == len(network)\r\n return activations", "def forward(self, w):\n # Repeat the rewards as they were repeated at the end of F in\n # Identification problem (see build_input() in find_weights.py script\n repeatedR = self.R.repeat(J, 1).unsqueeze(dim=2) # shape is J x J x 1\n\n # multiply w1 with r and add the resulting tensor with the already\n # calculated F_DIST_w1. Drastically improves performance.\n\n # If you've trouble understanding why multiply and add, draw these\n # tensors on a paper and work out how the additions and multiplications\n # affect elements, i.e., which operations affect which sections of the\n # tensors\n\n # res is J x J x num_features after\n # batch multiply repeatedR and w for r, then add F_DIST_w1\n res = torch.baddbmm(F_DIST_w1, repeatedR, w['first_for_r'])\n\n # forward propagation done, multiply remaining tensors (no tensors are\n # mutable after this point except res)\n # last w doesn't need relu\n if MORE_THAN_1_W:\n res = batch_norm_tensor(torchfun.relu(res)) # relu for weight 1\n # bmm -> relu for all but last weight set\n res = reduce(forward_step_layer, w['except_first'][:-1], res)\n res = res.bmm(w['except_first'][-1])\n res = res.view(-1, J) # res is J x J\n res += eta_matrix\n\n return torchfun.softmax(res, dim=1)", "def network_forward(self, X):\n \n #############################################################################\n # TODO: Perform a forward pass on the network and store the caches of #\n # each layer inside the cache_list #\n #############################################################################\n ActivationFunction = None\n if self.hidden_activation_fn == \"sigmoid\":\n ActivationFunction = lambda x: self.sigmoid_forward(x)\n elif self.hidden_activation_fn == \"tanh\":\n ActivationFunction = lambda x: self.tanh_forward(x)\n elif self.hidden_activation_fn == \"relu\":\n ActivationFunction = lambda x: self.relu_forward(x)\n\n Layer1Value, cacheL1 = self.fully_connected_forward(X, self.params[\"W1\"], self.params[\"b1\"])\n Layer1ValueActivation, cacheL1A = ActivationFunction(Layer1Value)\n scores, cacheL2 = self.fully_connected_forward(Layer1ValueActivation, self.params[\"W2\"], self.params[\"b2\"])\n\n # Cache\n cache_list =[cacheL1, cacheL1A, cacheL2]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return scores, cache_list", "def forward(self, adj, z, n_nodes):\n x = z.repeat(n_nodes, 1)\n sequence = self.gcn(x, adj)\n\n return sequence", "def forward(self, states, actions_previous=None):\n x = states.view(states.shape[0], self.frames_n * self.state_size)\n \n # ACTOR\n x_actor_mus = F.relu(self.actor_layer_1(x))\n x_actor_mus = F.relu(self.actor_layer_2(x_actor_mus))\n x_actor_mus = torch.tanh(self.actor_layer_3(x_actor_mus))\n\n distribution = torch.distributions.normal.Normal(loc=x_actor_mus, scale=self.actor_sigmas_parameters)\n actions = actions_previous if actions_previous is not None else distribution.sample() \n # actions = torch.clamp(actions, -1, 1) # Note: This is one approach, if necessary. Another is to use a Beta distribution\n # instead of a Normal distribution (see below).\n densities = torch.exp(distribution.log_prob(actions))\n entropies = distribution.entropy()\n \n # CRITIC\n x_critic = F.relu(self.critic_layer_1(x))\n x_critic = F.relu(self.critic_layer_2(x_critic))\n values = self.critic_layer_3(x_critic)\n \n return {\n 'actions': actions,\n 'densities': densities,\n 'entropies': entropies, \n 'values': values\n }", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def test_propagate_forward(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.propagate_forward([2, 3], test=True)\n model_output = nn.layers[-1].nodes[0].value\n\n self.assertEqual(round(model_output, 3), 0.823)", "def _forward(self, a):\n a = np.array(a)\n self.weighted_layer, self.activations = [], [a]\n for w, b in zip(self.weights, self.biases):\n z = w.dot(a) + b\n a = sigmoid(z)\n self.weighted_layer.append(z)\n self.activations.append(a)\n\n return a", "def forward_pass(self, x):\r\n self.a = (self.w.T @ x.T).T + self.b # Weighted sum of x with weight matrix(augmented with bias)\r\n self.x = x\r\n return self.a", "def adjoint(self, inputs, outputs):\n super(copy, self).forward(inputs, outputs)", "def forward(self, input):\n\n x = self.conv(input)\n x = self.bn(x)\n out = self.act(x)\n return out", "def forward(self, inputs):\n action_pred_input, self.actions = self.sample_tsteps(inputs.states, inputs.action_targets)\n a_pred = self.s_encoder.forward(action_pred_input)\n return AttrDict(a_pred=a_pred)", "def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for k in range(self.u0_n):\n \n u = np.array([ self.ud[0][k] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = k\n\n # Increment node number\n action = action + 1", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forwardPropagate (self, x):\n\t\tif type(x) is not list:\n\t\t\tx = [x]\n\t\tx = np.concatenate((np.array([1]),np.array(x)),axis=0) # absorb w_0\n\n\t\t# No transformation, but needed later\n\t\tfor i in range(self.inn):\n\t\t\tself.z_in[i] = x[i]\n\t\t\n\t\t# For every hidden neuron (1 hidden layer only!)\n\t\tfor j in range(self.hidden):\n\t\t\tsumIn = 0\n\t\t\tfor i in range(self.inn):\n\t\t\t\tsumIn += self.w_hd[j][i]*self.z_in[i]\n\t\t\tself.a_hd[j] = sumIn # Needed for backprop (5.56)\n\t\t\tself.z_hd[j] = self.act(sumIn)\n\n\t\t# For every output neuron\n\t\tfor k in range(self.out):\n\t\t\tsumHdn = 0\n\t\t\tfor j in range(self.hidden):\n\t\t\t\tsumHdn += self.w_out[k][j]*self.z_hd[j]\n\t\t\tself.z_out[k] = sumHdn\n\t\treturn self.z_out", "def _forward_prop(self,W,X,transfer_func=sigmoid):\n \n # Hidden layer DxHLS\n weights_L1,bias_L1,weights_L2,bias_L2 = self._extract_weights(W) \n \n # Output layer HLSxOUT\n \n # A_2 = N x HLS\n A_2 = transfer_func(np.dot(X,weights_L1) + bias_L1 )\n \n # A_3 = N x Outputs - softmax\n A_3 = self.softmax(weights_L2,A_2,bias_L2)\n \n # output layer\n return [A_2,A_3]", "def forward(self, X):\n node_feats = self.act(X) - self.shift\n return node_feats", "def forward(self, x):\n pass", "def forward(self, screen, minimap, flat, available_actions):\n # push each input through the network\n screen = self.screen_features(screen)\n minimap = self.minimap_features(minimap)\n flat = self.flat_features(flat)\n\n flattened_screen = screen.view(1, -1)\n flattened_mm = minimap.view(1, -1)\n\n latent_vector = torch.cat([flat, flattened_screen, flattened_mm], 1)\n features = self.combined_features(latent_vector)\n\n value = self.value_predictor(features)\n action = self.policy_action(features)\n\n policy_args = dict()\n for arg in actions.TYPES:\n for dim, size in enumerate(arg.sizes):\n module_name = self.get_argument_module_name(arg, dim)\n operator = getattr(self, module_name)\n policy_args[module_name] = operator(features)\n\n return action, policy_args, value", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self, adj, features):\n\n # adj = torch.where(adj > 0.5, 1, 0)\n\n # Perform convolutional layers with Relu as the activation function\n h = F.relu(self.conv_1(adj, features))\n h = self.conv_dropout_1(h)\n h = F.relu(self.conv_2(adj, h))\n h = self.conv_dropout_2(h)\n\n # Find the sum of node embeddings to use as the graph embedding\n hg = sum(h, dim=0)\n\n # Perform the linear layers\n h = F.relu(self.fc_1(hg))\n h = self.fc_dropout(h)\n out = self.fc_2(h)\n\n # Perform the output activation function\n out = self.output_func(out)\n\n return out", "def forward(self, x: torch.Tensor, dim: int = 0, p: int = 1):\n raise NotImplementedError", "def forward(self, observation, action):\n # Observation, action embedding\n # Uses attention to determine whether another agent's observation/action pair is necessary to pay attention to\n\n # Prepare the embeddings\n observation_embedding = self.observation_embedding(observation.float()) \n action_embedding = self.action_embedding(action.float())\n observation_action_embedding = torch.cat((observation_embedding, action_embedding),dim=2)\n\n # Attention\n query = self.q_projection(observation_action_embedding).permute(1,0,2)\n key = self.k_projection(observation_action_embedding).permute(1,0,2)\n value = self.v_projection(observation_action_embedding).permute(1,0,2)\n\n x = self.attention(query, key, value)[0].permute(1,0,2)\n\n x = self.predict(x)\n \n return x", "def forward(self, state):\n x = self.fc(state)\n return x", "def forward(self, inputs, outputs):\n if len(inputs) > 1:\n np.copyto(outputs[0], np.sum(inputs, 0))\n else:\n np.copyto(outputs[0], inputs[0])", "def act(self, state, epsilon, env):\n if random.random() > epsilon:\n state = Variable(torch.FloatTensor(state)).unsqueeze(0) # adds extra dim when single input\n state = self.vari_gpu(state)\n _, u_opt = self.forward(state)\n action = (u_opt.cpu().detach().numpy()) # compute the u*[0] \n #print('act:q_value ',q_value)\n #print('act:model action ',action)\n else:\n rand = np.random.rand(int(np.array(env.action_space.shape)))\n high = env.action_space.high\n low = env.action_space.low\n action = low + rand*(high-low)\n #print('act: ',action)\n return action", "def forward_propagate(self, input):\n self.input_matrix[0] = input\n act_matrix = input\n for i in range(1, len(self.dimens)):\n\n #update our layer of activations\n act_matrix = self.activation_forward(act_matrix, i)\n\n #save our input values for use in back propogating\n self.input_matrix[i] = act_matrix\n\n #send the logistic function of all the inputs to the next layer\n act_matrix = [logistic(act) for act in act_matrix]\n\n return act_matrix", "def forward_propagation(self, X):\n W1, W2, W3, B1, B2, B3 = self.get_weights_from_dna()\n\n Z1 = np.dot(W1, X) + B1\n A1 = np.maximum(Z1, 0)\n Z2 = np.dot(W2, A1) + B2\n A2 = np.maximum(Z2, 0)\n Z3 = np.dot(W3, A2) + B3\n A3 = Z3\n return A3", "def forward(self, x, edge_index, edge_attr, size=None):\r\n # Initial x=[#,head*node_channels]\r\n x = torch.matmul(x, self.weight_node) #matmlu=matrix product\r\n # Final x=[#nodes,node_channels]\r\n # Initial edge_attr=[#,#edge_features]\r\n edge_attr = torch.matmul(edge_attr, self.weight_edge)\r\n # Final edge_attr = [#,head*node_channels]\r\n edge_attr = edge_attr.unsqueeze(-1) if edge_attr.dim() == 1 else edge_attr\r\n return self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size) # [#,node_channels]\r", "def forward(self, input_tensor):\n rv = torch.randn(input_tensor.size(), device=self.device) * 0.02\n intermediate = input_tensor + rv\n for module in self.down:\n intermediate = module(intermediate)\n rv = torch.randn(intermediate.size(), device=self.device) * 0.02 + 1\n intermediate *= rv\n\n intermediate = intermediate.view(-1, self.width)\n\n for module in self.classifier:\n intermediate = module(intermediate)\n\n return intermediate", "def forward(self, state):#forward pass\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def makeFastFeedForwardFunction(self):\n\n\t\toutWeightMatrix = []\n\t\tfor unit in self.outputLayer:\n\n\t\t\trow = []\n\t\t\tfor b in unit.branchesIn:\n\t\t\t\tprint b.weight\n\t\t\t\trow.append(b.weight)\n\t\t\t\n\t\t\toutWeightMatrix.append(row)\n\t\toutWeightMatrix = np.array(outWeightMatrix).squeeze()\n\n\t\thiddenMatrices = []\n\t\tfor layer in self.hiddenLayers:\n\t\t\tmatrix = []\n\t\t\t#ignore the bias unit, since it has no branches in\n\t\t\tfor unit in layer[1:]:\n\t\t\t\trow = []\n\t\t\t\tfor b in unit.branchesIn:\n\t\t\t\t\trow.append(b.weight)\n\n\t\t\t\tmatrix.append(row)\n\t\t\tmatrix = np.array(matrix)\n\n\t\t\thiddenMatrices.append(matrix)\n\n\t\thidActFunc = (self.hiddenLayers[0])[1].activationFunction\n\t\toutActFunc = self.outputLayer[0].activationFunction\n\n\t\tdef ffFunc(inp):\n\t\n\t\t\tforward = np.insert(inp.T,0,1.0,axis=0)\n\t\t\tfor matrix in hiddenMatrices:\n\t\t\t\tnext = np.dot(matrix,forward)\n\t\t\t\tnext = hidActFunc(next)\n\t\t\t\tforward = np.insert(next,0,1.0,axis=0)\n\n\t\t\tout = np.dot(outWeightMatrix,forward)\n\n\t\t\treturn outActFunc(out)\n\n\t\treturn ffFunc", "def forward_propagate(self, inputs):\n\n activations = inputs\n self.activations[0] = inputs\n\n for i, w in enumerate(self.weights):\n # Calculate the net inputs\n net_inputs = np.dot(activations, w)\n\n # Calculate the activations\n activations = self._sigmoid(net_inputs)\n self.activations[i+1] = activations\n\n return activations", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forward(self, inputs):\n raise NotImplementedError", "def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for l in range(self.u0_n):\n for m in range(self.u1_n):\n \n u = np.array([ self.ud[0][l] , self.ud[1][m] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = np.array([l,m])\n \n # Increment node number\n action = action + 1" ]
[ "0.6780671", "0.6720904", "0.66573745", "0.65337664", "0.6500491", "0.6433926", "0.6398824", "0.63217956", "0.6288894", "0.62833714", "0.62578505", "0.62466264", "0.6242794", "0.62398285", "0.6232702", "0.6224309", "0.6224309", "0.6190171", "0.61818534", "0.61756337", "0.6167651", "0.61394876", "0.6114766", "0.61021924", "0.60888857", "0.6086229", "0.6053521", "0.6052369", "0.60431147", "0.6040431", "0.6028421", "0.60271865", "0.6012014", "0.6001007", "0.59863156", "0.5979859", "0.5975842", "0.59690803", "0.596572", "0.5961669", "0.59562445", "0.59539026", "0.593796", "0.5915679", "0.59116226", "0.58999735", "0.5898619", "0.58928126", "0.5875865", "0.58692616", "0.5866065", "0.5863536", "0.5860726", "0.58592045", "0.5851383", "0.5847564", "0.5847564", "0.5847564", "0.5847165", "0.5835642", "0.58250916", "0.5823843", "0.5815822", "0.5815576", "0.58061504", "0.5798952", "0.57928634", "0.57830393", "0.578005", "0.57742083", "0.57542354", "0.57388365", "0.570166", "0.5698358", "0.569624", "0.56926566", "0.5690256", "0.5682984", "0.5682984", "0.5679849", "0.5679291", "0.567691", "0.5674383", "0.5671704", "0.5668285", "0.56664693", "0.5664935", "0.5664342", "0.5663497", "0.5663406", "0.5658626", "0.56515384", "0.56512934", "0.5648378", "0.56479645", "0.5645666", "0.56396264", "0.5638754", "0.5627209", "0.56205165", "0.5609082" ]
0.0
-1
Get the action greedily (without sampling)
def get_action(self, x: FloatTensor) -> Tensor: x = self.shared_net(x.float()) batch_mean = self.mean_layer(x) return self.action_scale * torch.tanh(batch_mean) + self.action_bias
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_action(self, state):\n return self.env.action_space.sample()", "def choose_random_action(env):\n return env.action_space.sample()", "def select_action(self, state):\n\t\treturn sample(range(0, self.action_space), 1)[0]", "def get_action(self, s, eval=False):\n if eval:\n with torch.no_grad():\n action = self.actor.get_best_action(s[None, ...].to(self.device))\n else:\n if self.step_count < 20000:\n action = self.action_space[np.random.randint(self.action_space_len)]\n else:\n with torch.no_grad():\n action, _, _ = self.actor.sample_action(s[None, ...].to(self.device))\n action = action.item()\n return action", "def explore_action():\n # def get_action(o, noise_scale):\n # a = ac.act(torch.as_tensor(o, dtype=torch.float32))\n # a += noise_scale * np.random.randn(act_dim)\n # return np.clip(a, -act_limit, act_limit)\n raise NotImplementedError", "def get_action(self, state):\n time.sleep(2.0)\n return random.choice(state.get_legal_actions(self.index))", "def obtain_action(self, timestep):\r\n\t\treturn random.randint(0, self.num_actions-1)", "def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]", "def act(self, observation):\n return self.action_space.sample()", "def get_action(self, state):\r\n if len (state.actions()) == 1:\r\n # dbstate = DebugState.from_state(state)\r\n # print (dbstate)\r\n self.queue.put(state.actions()[0])\r\n return\r\n if state.ply_count < 2:\r\n action = random.choice(state.actions())\r\n else:\r\n action = self.uct_search(state).action\r\n # dbstate = DebugState.from_state(state)\r\n # print (dbstate)\r\n if action is None:\r\n print(\"Incorrect action\")\r\n action = random.choice(state.actions())\r\n self.queue.put(action)", "def get_action(self, state):\n depth_limit = 20\n\n if state.ply_count < 4 and self.data is not None:\n if state in self.data:\n self.queue.put(self.data[state])\n else:\n self.queue.put(random.choice(state.actions()))\n else:\n for depth in range(1, depth_limit+1):\n best_move = self.alpha_beta_search(state, depth)\n if best_move is not None:\n self.queue.put(best_move)", "def sample_action(self, state, timestep, explore_prob):\r\n\r\n if np.random.random() < explore_prob:\r\n return np.random.uniform(*self.bounds, size=(self.action_size,))\r\n return self.action_select_eval(self.model, state, timestep)[0].detach()", "def takeAction(self, state):\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))", "def sample(self):\n return self._action_out(self._env.action_space.sample())", "def get_action(self, x):\n ber = self.random_state.binomial(1, self._get_mu())\n if ber == 1:\n self.random = True\n K = x.get_K()\n act = self.random_state.choice(K)\n return [act]\n else:\n self.random = False\n if self.learner_type == 'minimonster':\n self.base_num_unif = self.base_learner.num_unif\n return self.base_learner.get_action(truncate_context(x, self.d))", "def getAction(self, state):\n # Pick Action\n \"*** YOUR CODE HERE ***\"\n # Epsilon greedy\n if util.flipCoin(self.epsilon) is True:\n self.lastAction = random.choice(self.legalActions)\n else:\n self.lastAction = self.computeActionFromQValues(state)\n return self.lastAction", "def getAction(self, state):\n # Pick Action\n \"*** YOUR CODE HERE ***\"\n # Epsilon greedy\n if util.flipCoin(self.epsilon) is True:\n self.lastAction = random.choice(self.legalActions)\n else:\n self.lastAction = self.computeActionFromQValues(state)\n return self.lastAction", "def obtain_action(self, timestep):\r\n\t\t# Obtain the action probabiltiy weights\r\n\t\tself.curr_prob_gradient = softmax(self.H_gradient[timestep])\r\n\r\n\t\t# Randomly selects an action, weighted by the action probability weights\r\n\t\treturn np.random.choice(self.num_actions, p=self.curr_prob_gradient)", "def get_action(self, state):\n # TODO: Replace the example implementation below with your own search\n # method by combining techniques from lecture\n #\n # EXAMPLE: choose a random move without any search--this function MUST\n # call self.queue.put(ACTION) at least once before time expires\n # (the timer is automatically managed for you)\n\n import random\n if state.ply_count < 2:\n self.queue.put(random.choice(state.actions()))\n else:\n #my_timer = time.time() + float(0.1499)\n best_move = None\n depth_limit = 3\n for depth in range(1, depth_limit + 1):\n best_move = self.alpha_beta_search(state, depth)\n if best_move is None:\n best_move = random.choice(state.actions())\n self.queue.put(best_move)\n \n # Alpha beta pruning\n # Iterative deepening to set bounds\n # Evaluation function: other than (#my_moves - #opponent_moves), partition, symmetry ", "def get_action(self, state, available_actions, play_mode=False):\n epsilon = self.current_epsilon()\n will_explore = np.random.random_sample() < epsilon\n\n if not play_mode and will_explore:\n return np.random.choice(available_actions)\n else:\n query_state = np.reshape(state, tuple([1,1] + self.state_shape)).astype(np.float32)\n query_actions_filter = np.zeros((1, self.actions_count), dtype=np.float32)\n query_actions_filter[0][available_actions] = 1.0\n\n chosen_action = self.session.run(self.query_action, {\n self.states: query_state,\n self.query_actions_filter: query_actions_filter\n })\n\n return chosen_action[0]", "def get_action(self, state):\n action_probs = self.forward(state)\n action = torch.distributions.Categorical(probs=action_probs).sample()\n action = action.detach().cpu().numpy()\n return action", "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n if random.random() < self.epsilon:\n action = random.choice(legalActions)\n else:\n action = self.getPolicy(state)\n return action", "def bestAction(self):\n get_q = self.getQFunction()\n maxq = -5000\n best_actions = []\n for (state, action), q in get_q.items():\n if q > maxq:\n maxq = q\n best_actions = [action]\n elif q == maxq:\n best_actions.append(action)\n return self.tuple_to_dictionary(random.choice(best_actions))", "def get_action(self, obs):\n q = self.sess.run(self._qvals, feed_dict={self._observation: obs})\n max_a = np.argmax(q, axis=1)\n\n # Epsilon greedy method.\n global_step = self.sess.run(tf.train.get_global_step())\n batch_size = obs.shape[0]\n actions = np.random.randint(self._dim_act, size=batch_size)\n idx = np.random.uniform(size=batch_size) > self._epsilon_schedule(global_step)\n actions[idx] = max_a[idx]\n return actions", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def random_action(self, observation):\n return self.env.action_space.sample()", "def action_space_sample(self):\n return np.random.choice(self.possible_actions)", "def obtain_action(self, timestep):\r\n\t\t# Generates a random number for deciding between performing a random or\r\n\t\t# deterministic action.\r\n\t\trandom_num = random.random()\r\n\r\n\t\tif random_num < self.epsilon:\r\n\t\t\t# Random action taken.\r\n\t\t\treturn random.randint(0, self.num_actions-1)\r\n\r\n\t\telse:\r\n\t\t\t# Deterministic action taken, where a random action index is\r\n\t\t\t# selected among the action indexes with the maximum Q value\r\n\t\t\t# estimate.\r\n\t\t\treturn np.random.choice(np.argwhere(self.player_Q == self.player_Q.max()).flatten()).item()", "def get_random_action(self) -> ActionType:\n return self.action_space.sample()", "def randomAction():\n return np.random.randint(0, POSSIBLE_ACTIONS)", "def chooseAction(self, gameState):\n\n ####print \"chooseAction Called\"\n\n #self.lastEatenFood = None\n\n\n actions = gameState.getLegalActions(self.index)\n\n ##print \"\\nNEW ACTION\\n--------\"\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # ###print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n \n\n return random.choice(bestActions)", "def get_action(self,state):\n \n q_values = self.__network.predict(state[None])[0]\n \n ###YOUR CODE\n if np.random.rand()<self.epsilon:\n return np.random.choice(self.n_actions)\n return np.argmax(q_values)", "def act(self):\n action = self.best_action()\n return action", "def get_action(history, epsilon, step, model):\n if np.random.rand() <= epsilon or step <= FLAGS.observe_step_num:\n return random.randrange(ACTION_SIZE)\n else:\n q_value = model.predict([history, np.ones(ACTION_SIZE).reshape(1, ACTION_SIZE)])\n return np.argmax(q_value[0])", "def _select_action(self, state):\n if random.random() < self.epsilon:\n action = random.randrange(self.num_actions)\n return torch.tensor([[action]], device=device, dtype=torch.long)\n else:\n with torch.no_grad():\n return self.policy_net(state).max(1)[1].view(1, 1)", "def sample(self, action):\n selector = random.random()\n return 1 if selector <= self.pay_offs[action] else 0", "def _choose_action(self):\n return random.randint(0,self.num_bandits-1)", "def _get_action(self, state, episode):\n if (episode < self.num_explore_episodes):\n action = [random.choice(list(range(self.num_actions)))\n for _ in range(len(state))]\n return action\n\n action = []\n self.model.q_1.eval()\n with torch.no_grad():\n state = torch.tensor(state, device=self.device).float()\n action = torch.argmin(self.model.q_1(state), dim=-1).tolist()\n self.model.q_1.train()\n return action", "def best_action(self, state):\n return random.choice(self.possible_actions)", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n feat = self.feat_funct(state)\r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(feat)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def select_action(self, state: str) -> Action:\n rnd_num = self._random.random()\n p = 1.0 - self.epsilon\n if rnd_num > p:\n action = self._random.random_choice() \n else:\n action = max(self.Qs[state], key=lambda x: self.Qs[state][x])\n if self.epsilon_decay == True:\n self.turns += 1\n if self.turns < self.end_epsilon_decay:\n self.epsilon -= self.decay_value \n return action", "def choose_action(self, state):\n pure_action = self.actor_local.model.predict(state)[0]\n # add gaussian noise for exploration\n # noise = np.random.normal(self.noise_mean, self.noise_stddev, self.action_size)\n \n # add OU noise for exploration\n noise = self.noise.sample()\n\n # action = np.clip(pure_action + noise, self.action_low, self.action_high)\n # print(\"pure\", pure_action)\n # print(\"noise\", noise)\n # action = self.action_high * (pure_action + noise)\n # action = pure_action + noise\n action = np.clip(pure_action + noise, self.action_low, self.action_high)\n # print(\"action\", action)\n return action.tolist()", "def sample_action(self, state):\n sample = random.random()\n eps_threshold = self.eps_min + (self.eps - self.eps_min) * \\\n math.exp(-1. * self.n_steps / self.eps_decay)\n self.n_steps += 1\n state = torch.from_numpy(state).float().view(1,-1)\n self.working_q.eval()\n if sample > eps_threshold:\n with torch.no_grad():\n action = self.working_q(state).max(1)[1].view(1, 1).to(device=device)\n else:\n action = torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long)\n self.working_q.train()\n return action", "def getAction(trainer):\n\tpolicy = trainer.get_policy()\n\tstate = randState()\n\tprint(\"state:\")\n\tprint(state)\n\taction = policy.compute_single_action(state)\n\treturn action", "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n\n \"\"\"Description:\n Use util.flipCoin, if return true then randomly choice from legalAction\n if flase, then sue getPolicy to get best policy action\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n if len(legalActions) == 0:\n return action # None\n \n if util.flipCoin(self.epsilon):\n ''' exploration function (not work well)''' \n# posPol = util.Counter()\n# for a in legalActions:\n# if self.getQValue(state,a) >= 0:\n# posPol[a] = -1*self.getQValue(state, a) + (1000/(self.vitCount[(state,a)]+0.0001))\n# #print \"posPol[\", a, \"]= \",posPol[a]\n# #posPol[a] = (self.getQValue(state, a) * self.epsilon** self.vitCount[(state,a)]) + ( self.epsilon/(self.vitCount[(state,a)]+0.1) )\n# if len(posPol) == 0:\n# action = random.choice(legalActions)\n# else:\n# action = posPol.argMax() # random.choice(posPol.keys())\n ''' Random exploration '''\n action = random.choice(legalActions)\n else:\n action = self.getPolicy(state)\n \n \"\"\" END CODE \"\"\"\n\n return action", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def obtain_action(self, timestep):\r\n\t\t# Finds all actions which have not been selected before.\r\n\t\tzero_action = np.argwhere(self.player_selected_actions == 0).flatten()\r\n\r\n\t\t# Checks if there are any actions which have not been selected before.\r\n\t\tif zero_action.size:\r\n\t\t\t# Returns a random action index that has not been selected before.\r\n\t\t\treturn np.random.choice(zero_action).item()\r\n\r\n\t\telse:\r\n\t\t\t# Calculates the sum of the Q value estimate and the upper\r\n\t\t\t# confidence bound term\r\n\t\t\tvalue_list = self.player_Q + self.confidence_level * (np.log(timestep+1) / self.player_selected_actions) ** 0.5\r\n\r\n\t\t\t# Returns a random action index selected among the action indexes\r\n\t\t\t# with the maximum sum.\r\n\t\t\treturn np.random.choice(np.argwhere(value_list == value_list.max()).flatten()).item()", "def sample_action(self, state):\n # YOUR CODE HERE\n \n action = np.random.choice(1)\n \n \n return action", "def take_action(self, observation):\r\n if (np.random.rand() <= self.epsilon):\r\n action = random.randrange(self.action_size)\r\n return action\r\n act_values = self.model.predict(observation) # Forward Propagation\r\n action = np.argmax(act_values[0])\r\n return action", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def get_action(self, state):\n return random.choice(state.get_legal_actions(self.index))", "def get_random_action():\n # Define an array containing the available actions for the UAV\n # in the final work, takeoff and land must be added\n action_list = ['left', 'right', 'forward', 'backward', 'stop', 'descend']\n # Choose a random action within the array\n #action_index = STDrandom.randint(0, len(action_list) - 1)\n # forward,backward,left,right, stop and land\n probability_descend = 0.25\n probability = (1 - probability_descend)/ (len(action_list) -1)\n action_probability = [probability, probability, probability, probability, probability, probability_descend]\n action = np.random.choice(action_list, 1, p=action_probability)[0]\n #action_index = STDrandom.randint(0, 10)\n #action = action_list[action_index]\n\n return action", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n return random.choice(bestActions)", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n obs = gameState.getAgentDistances()\n for o in self.opponents:\n self.observe(o, obs[o], gameState)\n self.displayDistributionsOverPositions(self.distributions)\n\n # You can profile your evaluation time by uncommenting these lines\n start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n #self.elapseTime(gameState)\n\n return random.choice(bestActions)", "def choose_action(self, state):\n\n return self.policy(state).sample()", "def get_action(self, state):\n\n # Pick Action\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n # agent parameters:\n epsilon = self.epsilon\n\n #\n # INSERT CODE HERE to get action in a given state (according to epsilon greedy algorithm)\n #\n\n best_action = self.get_best_action(state)\n chosen_action = best_action\n\n if random.uniform(0, 1) < epsilon:\n random_actions = possible_actions.copy()\n random_actions.remove(best_action)\n chosen_action = random.choice(random_actions if random_actions else [best_action])\n\n return chosen_action", "def select_action(self, state):\n # print(\"agent.select_action() - state: {}\".format(state))\n\n self.step_counter += 1\n # self.epsilon = max(0.1, 1.0-self.step_counter/self.epsilon_decay_steps)\n epsilon_min = .01\n epsilon_max = .8\n epsilon_step = epsilon_max - (epsilon_max - epsilon_min) * self.step_counter / self.epsilon_decay_steps\n self.epsilon = max(epsilon_min, epsilon_step)\n # self.epsilon = max(0.1, 1.0/self.step_counter)\n\n rand = random.uniform(0, 1)\n if rand < self.epsilon:\n # choose random action\n return np.random.choice(self.nA)\n else:\n # choose greedy action\n return np.argmax(self.Q[state])", "def epsilonGreedyChooser(normalAction, state, stepsDone):\n epsThreshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * stepsDone / EPS_DECAY)\n randomSample = random.random()\n if randomSample > epsThreshold:\n action = normalAction(state).max(1)[1].view(1, 1)[0].item()\n #print(action)\n return action\n else:\n return ENVIRONMENT.action_space.sample()", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n for idx,a in enumerate(actions):\n baby = self.getSuccessor(gameState, a)\n qsum = [self.evaluate(baby, action) for action in baby.getLegalActions(self.index)]\n values[idx] += min(qsum) \n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)", "def pick_action(self, observation):\n if np.random.rand() < self.epsilon:\n action = np.random.randint(self.n_arm) # ไปŽnไธชarmไธญ้šๆœบ้€‰ๆ‹ฉไธ€ไธช\n else: # 1-epsilon greedy\n # ๆ‰€่ฐ“reward, ๅฐฑๆ˜ฏsuccessๅนณๅ‡ๅ€ผ\n posterior_means = self.get_posterior_mean() # shape:[arm, 1], ไปŽไธญ้€‰ๆ‹ฉไธ€ไธชrewardๆœ€ๅคง็š„arm\n action = random_argmax(posterior_means)\n\n return action", "def sample_action(self):\n raise NotImplementedError", "def select_action(engine, observation):\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()", "def take_action(self):\n iterative_search = Thread(target=self._iterative_deepening)\n iterative_search.start()\n sleep(9)\n action = action_string(self.pieces[self.best_action[0]], self.best_action[1])\n self.pieces[self.best_action[0]] = self.best_action[1]\n self.playing = 1 if self.color == 0 else 0\n self.turn += 1\n return action", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def sample_actions(self, state, stochastic=True) -> int:\n state = state.unsqueeze(0) # to get 1 batch size\n if not stochastic or random.random() < self.epsilon: # 1 means no exploration, 0 means lots of exploration\n _, indxs = torch.max(self.q_net(state), dim=1)\n action = self.id2action[indxs[0].item()]\n del indxs\n return action\n\n # random actions\n act_id = random.randint(0,4)\n action = self.id2action[act_id]\n return action", "def pick_action(self, available_actions, epsilon=.05):\n if np.random.uniform(0, 1) < epsilon:\n action = available_actions[np.random.randint(\n 0, len(available_actions))]\n else:\n q_values_of_state = self.q_table[self.environment.current_location]\n maxValue = max(q_values_of_state.values())\n action = np.random.choice(\n [k for k, v in q_values_of_state.items() if v == maxValue]\n )\n\n return action", "def choose_action(self, state):\n if random.random() < self.explore:\n action = random.choice(list(self.Q[state].keys()))\n else:\n action = self._best_action(state)\n\n # learn from the previous action, if there was one\n self._learn(state)\n\n # remember this state and action\n self.prev = (state, action)\n\n return action", "def available_action(self):\n return range(self.actions)", "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n if not self.getLegalActions(state): return None # Terminal State, return None\n\n if self.epsilon > random.random():\n action = random.choice(legalActions) # Explore\n else:\n action = self.computeActionFromQValues(state) # Exploit\n\n return action", "def choose_action(env, Q, observation, epsilon):\n if np.random.uniform(0, 1) < epsilon:\n action = env.action_space.sample()\n else:\n action = np.argmax(Q[observation, :])\n return action", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n opIndices = self.getOpponents(gameState)\n opStates = [gameState.getAgentState(i) for i in opIndices]\n opCarry = [x.numCarrying for x in opStates]\n \n if max(opCarry) >= 5:\n self.isOffensive = False\n else:\n self.isOffensive = True\n\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n\n\n # print if get eaten\n myPos = gameState.getAgentPosition(self.index)\n prevGameState = self.getPreviousObservation()\n if prevGameState is not None:\n\n previousPos = prevGameState.getAgentPosition(self.index)\n if self.getMazeDistance(myPos, previousPos) > 1:\n print(\"prePostion\",previousPos)\n print()\n previousLegalAction = prevGameState.getLegalActions(self.index)\n print([(self.evaluate(prevGameState, a), a) for a in previousLegalAction])\n print()\n print(self.getNonScaredGhostPos(prevGameState))\n print()\n print()\n\n\n return random.choice(bestActions)", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def choose_action(self, state):\n if random.random() < self.epsilon:\n self.epsilon -= self.epsilon_annealing_rate\n return random.choice(self.valid_actions)\n \n #initialize search variables\n opt_action = self.valid_actions[0]\n opt_value = 0\n\n #performs a search across all valid actions for highest q-value.\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if cur_value > opt_value:\n opt_action = action\n opt_value = cur_value\n elif cur_value == opt_value:\n opt_action = random.choice([opt_action, action])\n return opt_action", "def act(self, observation):\n self.epsilon *= self.epsilon_decay\n\n if np.random.rand() < self.epsilon:\n return self.action_space.sample()\n else:\n return self.get_best_action(observation)", "def make_action(self, observation, test=True):\n self.s_cur = self.prepro(observation)\n s = self.s_cur - self.s_prev\n self.s_prev = self.s_cur\n\n action_dist = self.output.eval(feed_dict={self.state_in: [s]})\n action = np.random.choice(self.action_size, p=action_dist[0])\n #action = np.argmax(action_dist[0])\n #print(action) \n return action+1#self.env.get_random_action()", "def select_action(images, n_actions, device, eps_threshold=-1):\n actions = []\n\n for i in images:\n if eps_threshold == -1:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n else:\n sample = random.random()\n if sample > eps_threshold:\n with torch.no_grad():\n # t.min(1) will return smallest column value of each row.\n # second column on min result is index of where min element was\n # found, so we pick action with the lower expected reward.\n actions.append(policy_net(i.unsqueeze(0)).min(1)[1].view(1, 1))\n else:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n\n return torch.tensor(actions, device=device)", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n def expHelper(gameState, deepness, agent):\n if agent >= gameState.getNumAgents():\n agent = 0\n deepness += 1\n if (deepness==self.depth or gameState.isWin() or gameState.isLose()):\n return self.evaluationFunction(gameState)\n elif (agent == 0):\n return maxFinder(gameState, deepness, agent)\n else:\n return expFinder(gameState, deepness, agent)\n \n def maxFinder(gameState, deepness, agent):\n output = [\"meow\", -float(\"inf\")]\n pacActions = gameState.getLegalActions(agent)\n if not pacActions:\n return self.evaluationFunction(gameState)\n for action in pacActions:\n currState = gameState.generateSuccessor(agent, action)\n currValue = expHelper(currState, deepness, agent+1)\n if type(currValue) is list:\n testVal = currValue[1]\n else:\n testVal = currValue\n if testVal > output[1]:\n output = [action, testVal] \n return output\n \n def expFinder(gameState, deepness, agent):\n output = [\"meow\", 0]\n ghostActions = gameState.getLegalActions(agent)\n if not ghostActions:\n return self.evaluationFunction(gameState)\n probability = 1.0/len(ghostActions)\n for action in ghostActions:\n currState = gameState.generateSuccessor(agent, action)\n currValue = expHelper(currState, deepness, agent+1)\n if type(currValue) is list:\n val = currValue[1]\n else:\n val = currValue\n output[0] = action\n output[1] += val * probability\n return output\n \n outputList = expHelper(gameState, 0, 0)\n return outputList[0]", "def act(self, state):\r\n self.state_info, actions= self.env.generatePossibleAction(state)\r\n # import pdb; pdb.set_trace()\r\n # print(actions)\r\n if self.eps > 0. and np.random.rand() < self.eps:\r\n # select the action randomly\r\n return random.choice(actions)\r\n # import pdb; pdb.set_trace()\r\n qvals = {action: self.Q_value[self.state_info, action] for action in actions}\r\n max_q = max(qvals.values())\r\n\r\n # in case of multiple actions having the same Q values\r\n actions_with_max_q = [a for a,q in qvals.items() if q == max_q]\r\n return random.choice(actions_with_max_q)", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a) \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def get_action(self, state):\n state = torch.from_numpy(state).float().to(self.training_device)\n action_dist, _ = self.old_policy.forward(state)\n action = action_dist.sample()\n\n return action", "def select_action(policy_net, state, eps, n_actions, device, steps_done):\n sample = random.random()\n if sample > eps:\n with torch.no_grad():\n # t.max(1) will return largest column value of each row\n # second column on max result is index of where max element was\n # found, so we pick action with the larger expected reward\n return torch.tensor([[policy_net.forward(state.float()).argmax()]], device=device,\n dtype=torch.long)\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)", "def choose_action(self, state):\n prob = [] # Probability distribution\n for i in range(len(ACTIONS)):\n prob.append(self.epsilon/4)\n Q_func = self.policy.predict(process_state(state))\n Q_vals = Q_func[0]\n max_index = []\n Qmax = np.amax(Q_vals)\n for i in range(len(prob)):\n if Q_vals[i] == Qmax:\n # max_index.append(i)\n prob[i] = 1 - self.epsilon + self.epsilon/4\n break\n # ind = np.random.choice(max_index)\n # prob[ind] = 1 - self.epsilon + self.epsilon/4\n action = np.random.choice(ACTIONS, p = prob)\n return action", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a)\n \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def act(self,observation):\n maximum_actions = np.argwhere(self.q_table[observation] == np.amax(self.q_table[observation])).flatten()\n return(np.random.choice(maximum_actions))", "def sample(self):\n return self._action_out(\n [self.action_space.sample() for _ in range(self.batch_size)]\n )", "def action(self, state, mode='train'):\n self.step += 1\n # reduce gradually epsilon to its minimum value\n self.epsilon = self.epsilon_min + (\n self.epsilon_max - self.epsilon_min)*np.exp(-self.epsilon_decay*self.step)\n if np.random.rand() > self.epsilon or mode.lower() == \"test\":\n return self._take_action(state)\n else:\n return random.randrange(self.action_size)", "def get_action(self, state):\n if np.random.rand() <= self.epsilon:\n action_idx = random.randrange(self.action_size)\n else:\n \n # Use all traces for RNN\n #q = self.model.predict(state) # 1x8x3\n #action_idx = np.argmax(q[0][-1])\n\n # Only use last trace for RNN\n q = self.model.predict(state) # 1x3\n action_idx = np.argmax(q)\n return action_idx", "def sample(self, observation):\n if not isinstance(observation, self.state_space):\n raise KeyError\n actions = self.valid_actions()\n qs = self.q.Qs(observation, actions)\n return max(actions, key=qs.get)", "def get_greedy_actions(self, state):\n state_action_values = self.get_action_values(state) # What are the value that we could get from current state\n\n max_action_value = max(state_action_values) # What is the higher value\n max_value_indices = [i for i, value in enumerate(state_action_values) if\n value == max_action_value] # Gets their indices\n\n # Prepares action probabilites for the ones with the higher value\n action_probs = np.zeros((4,))\n action_probs[max_value_indices] = 1 / (len(max_value_indices) if type(max_value_indices) is list else 1)\n\n return action_probs", "def _get_reward(self, action):\n HIRE_COST = 1 # TODO 7/29/20 - Determine significance of this value\n\n # Lookup the state representation using the cur_state index. Then we\n # can get the candidate productivity score.\n obs = self.observation_function[self.cur_state]\n prod_score = obs[1]\n r = action*(prod_score - HIRE_COST)\n return r", "def _make_get_action(self, problem: 'fpg.SingleProblem'):\n get_action = problem.policy.get_action\n\n # each get_action has an ephemeral cache (lasts only as long as the\n # closure does)\n cache = {}\n\n def inner(obs):\n obs = to_local(obs)\n try:\n # if this times out then something really screwy is going on\n self._get_act_lock.acquire(timeout=60 * 30)\n # each thread needs to have this call somewhere, per\n # https://www.tensorflow.org/versions/r0.12/api_docs/python/client/session_management\n with self.sess.as_default():\n # make sure it's 1D (need different strategy for batch\n # cache)\n assert obs.ndim == 1\n obs_bytes = obs.tostring()\n if obs_bytes not in cache:\n cache[obs_bytes] = get_action(obs)\n return cache[obs_bytes]\n return cache[obs_bytes]\n finally:\n self._get_act_lock.release()\n\n return inner", "def choose_action(self, state):\n if random.random() < self.e_greedy_prob:\n # randomly select action from state\n action = np.random.choice(len(self.q_val_table[state]))\n else:\n # greedily select action from state\n action = np.argmax(self.q_val_table[state])\n return action", "def pick_action(self, observation):\n # ๆณจๆ„: ๅชๆœ‰ๆญคๅค„ไธไธ€ๆ ท, ๅณTS้‡Œๆ˜ฏไปŽๅŽ้ชŒๅˆ†ๅธƒไธญ้‡‡ๆ ท,่€Œepsilon-greedyๆ˜ฏ่ฎก็ฎ—ๆœŸๆœ›\n sampled_means = self.get_posterior_sample() # ๆฏไธชarm้ƒฝ้‡‡ๆ ทไธ€ไธชrewardๅ‡ๅ€ผ, [arm, 1]\n action = random_argmax(sampled_means) # ้€‰ๆ‹ฉไบง็”Ÿๆœ€ๅคง็š„ๅ‡ๅ€ผ็š„action\n return action", "def act(observation):\n current_policy = sess.run(policy, {observation_: [observation]})\n action = np.random.choice(action_size, p=current_policy[0])\n return action", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n action = q_values.argmax().item()\n else:\n action = self.env.action_space.sample()\n return action", "def __call__(self, num_actions):\n return np.random.choice(num_actions)", "def select_action(self, state):\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[state], self.count, self.epsilon)\r\n return np.random.choice(np.arange(self.nA), p=policy_s)", "def get_action(self, model, observation, goal, batch=False):\n # generate random sequence\n batch_size = goal.goal_obs.shape[0] # requires goal_obs to be a key\n device = goal.goal_obs.device\n\n if not batch:\n observation = observation.leaf_apply(lambda arr: arr.unsqueeze(0).repeat_interleave(self._pop_size, dim=0))\n goal = goal.leaf_apply(lambda arr: arr.unsqueeze(0).repeat_interleave(self._pop_size, dim=0))\n else:\n observation = observation.leaf_apply(lambda arr: arr.repeat_interleave(self._pop_size, dim=0))\n goal = goal.leaf_apply(lambda arr: arr.repeat_interleave(self._pop_size, dim=0))\n\n action_sequence = self._env_spec.get_uniform(self._action_names,\n batch_size=batch_size * self._pop_size * self._horizon)\n action_sequence.leaf_modify(lambda x: split_dim(torch.from_numpy(x).to(device), dim=0,\n new_shape=[batch_size * self._pop_size, self._horizon]))\n\n def resample_and_flatten(vs):\n old_acseq = vs[0]\n mean, std = vs[1], vs[2]\n sample = torch.randn_like(old_acseq) * std + mean\n d = AttrDict(act=sample)\n self._env_spec.clip(d, ['act'])\n return d.act.view([-1] + list(old_acseq.shape[2:]))\n\n best_initial_act = None\n results = None\n for i in range(self._max_iters):\n # run the model\n results = model(action_sequence, observation, goal)\n\n # view as (B, Pop, ...)\n action_sequence.leaf_modify(lambda x: split_dim(x, 0, [batch_size, self._pop_size]))\n results.leaf_modify(lambda x: split_dim(x, 0, [batch_size, self._pop_size]))\n\n results.order = torch.argsort(results.costs, dim=1) # lowest to highest\n\n best = results.order[:, :self._num_elites]\n best = best.unsqueeze(-1).unsqueeze(-1).expand((-1, -1, self._horizon, self._act_dim))\n best_act_seq = action_sequence.leaf_apply(lambda x: torch.gather(x, 1, best))\n best_initial_act = best_act_seq.leaf_apply(lambda x: x[:, 0, 0]) # where x is (B, Pop, H ...)\n means = best_act_seq.leaf_apply(lambda x: x.mean(1, keepdim=True))\n stds = best_act_seq.leaf_apply(lambda x: x.std(1, keepdim=True))\n\n if i < self._max_iters - 1:\n # resampling\n action_sequence = AttrDict.leaf_combine_and_apply([action_sequence, means, stds], resample_and_flatten)\n\n # act is (B, actdim)\n best_initial_act.action_sequence = action_sequence # (B*Pop, horizon, actdim)\n best_initial_act.results = results\n\n return best_initial_act", "def getAction(self, gameState):" ]
[ "0.7114393", "0.70404077", "0.70395607", "0.6984486", "0.6971175", "0.6954252", "0.6913596", "0.6889728", "0.68695825", "0.6852474", "0.68420124", "0.6805876", "0.6799169", "0.67951995", "0.6754626", "0.6733447", "0.6733447", "0.6664213", "0.664351", "0.663247", "0.66250086", "0.65928024", "0.65790397", "0.6576512", "0.65714735", "0.6570036", "0.65674555", "0.6566418", "0.6558029", "0.65337455", "0.652031", "0.65187854", "0.65167016", "0.6510606", "0.64885575", "0.6486581", "0.64731497", "0.64682215", "0.645454", "0.64540565", "0.6453395", "0.64291394", "0.64186287", "0.6412171", "0.6401513", "0.6387699", "0.6379398", "0.6378967", "0.6372944", "0.6371561", "0.63669246", "0.63592905", "0.63478345", "0.6347083", "0.6344869", "0.63309747", "0.6325364", "0.6315496", "0.631524", "0.6309151", "0.6304801", "0.62890613", "0.62736934", "0.6259838", "0.6259817", "0.6254364", "0.62491053", "0.62476057", "0.62453413", "0.62405914", "0.6233125", "0.623309", "0.6223401", "0.6223401", "0.6223096", "0.62145275", "0.6208463", "0.6208143", "0.62055147", "0.62010247", "0.6200445", "0.62001383", "0.6194195", "0.61935365", "0.6190268", "0.618404", "0.6174593", "0.61712664", "0.6160271", "0.61556554", "0.6154515", "0.6145972", "0.6145655", "0.61395085", "0.61394227", "0.6136225", "0.6135717", "0.6131052", "0.6130737", "0.6128948", "0.6127997" ]
0.0
-1
Forward pass through network. Calculates the action logits and the value.
def forward(self, x) -> Tuple[Tensor, Tensor]: x = F.relu(self.fc1(x.float())) a = F.log_softmax(self.actor_head(x), dim=-1) c = self.critic_head(x) return a, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n # action\n act = self.act_fc1(x)\n act = torch.tanh(act)\n act = self.act_fc2(act)\n act = torch.tanh(act)\n mean = self.mu(act) # N, num_actions\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch.normal(mean, std)\n\n # value\n v = self.value_fc1(x)\n v = torch.tanh(v)\n v = self.value_fc2(v)\n v = torch.tanh(v)\n v = self.value_fc3(v)\n\n # action prob on log scale\n logprob = log_normal_density(action, mean, std=std, log_std=logstd)\n return v, action, logprob, mean", "def forward(self, x):\n # action\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.conv3(x)\n x = F.relu(x)\n x = x.view(-1, 32 * 7 * 7)\n x = self.linear1(x)\n x = F.relu(x)\n\n mean = self.mu(x) # N, num_actions\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch.normal(mean, std)\n\n # value\n v = self.critic_linear(x)\n\n # action prob on log scale\n logprob = log_normal_density(action, mean, std=std, log_std=logstd)\n return v, action, logprob, mean", "def forward(self, state, action): #concatenate the action -value\n\n xu = torch.cat([state, action], 1)\n\n x1 = F.relu(self.fc1(xu))\n x1 = F.relu(self.fc2(x1))\n x1 = self.fc3(x1)\n\n x2 = F.relu(self.fc4(xu))\n x2 = F.relu(self.fc5(x2))\n x2 = self.fc6(x2)\n \n return x1, x2", "def forward(self, state, action):\n state = torch.cat(state, dim=1)\n \n for i in range(len(action)):\n action[i] /= self.max_action\n\n # Concatenate the action vector \n action = torch.cat(action, dim=1)\n x = torch.cat([state, action], dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n q_value = self.q_out(x)\n\n return q_value", "def forward(self, x):\n x = F.relu(self.affine1(x))\n x = F.relu(self.affine2(x))\n x = F.relu(self.affine3(x))\n x = F.relu(self.affine4(x))\n x = F.relu(self.affine5(x))\n\n # actor: choses action to take from state s_t \n # by returning probability of each action\n action_prob = F.softplus(self.action_head(x)).reshape(-1) + 1e-20\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tuple of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t \n return action_prob, state_values", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n action = self.max_action * torch.tanh(self.action_out(x))\n\n return action", "def forward(self, x):\n x = self.fc0(x.view(-1, x.size(-1))).view(x.size(0), x.size(1), -1)\n x = self.pe(x)\n\n x = self.inner_layers(x) # FF, FF, FF, finalFF\n\n state_value = self.fc_s(x) # double-dqn : state\n\n advantage_values = self.fc_a(x) # double-dqn : advantage\n advantage_values = advantage_values.view(\n advantage_values.size()[:-1] + (self.action_size, self.n_atoms))\n\n dist_weights = state_value.unsqueeze(\n dim=-2) + advantage_values - advantage_values.mean(dim=-2, keepdim=True)\n\n return dist_weights", "def forward(self, state, action): \n ##x = F.relu(self.fc1(state)) \n x = F.relu(self.bn1(self.fc1(state))) \n x = torch.cat([x, action], dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward(self, state, action) -> float:\n xs = F.relu(self.fcs1_layer(state))\n x = torch.cat((xs, action), dim=1)\n x = F.relu(self.fc2_layer(x))\n x = self.dropout(x)\n x = self.fc3_layer(x)\n\n return x", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n return self.net(x)", "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def forward(self, x):\n x = F.relu(self.affine1(x))\n x = F.relu(self.affine2(x))\n\n # actor: choses action to take from state s_t\n # by returning probability of each action\n action_prob = F.softmax(self.action_head(x), dim=-1)\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tupel of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t\n return action_prob, state_values", "def forward(self, state, action):\n # Pass the states into the first layer\n x = self.fc_layers[0](state)\n x = self.bn(x)\n x = F.relu(x)\n # Concatenate the first layer output with the action\n x = torch.cat((x, action), dim=1)\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[1:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying sigmoid activation\n x = torch.sigmoid(self.fc_layers[-1](x))\n # Return the Q-Value for the input state-action\n return x", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x", "def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, state, action):\n\t\tx = F.relu(self.fc1(state))\n\t\tx = F.relu(self.fc2(torch.cat([x, action], dim=1))) # add action too for the mapping\n\t\tx = F.relu(self.fc3(x))\n\n\t\treturn x", "def forward(self, state, action):\n s = self.state_encoder(state)\n x = torch.cat((s,action),dim=1)\n x = self.act(self.fc2(x))\n x = self.fc_out(x)*10\n return x", "def forward(self, state, action):\n sa = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(sa))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state, action):\n sa = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(sa))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state, action):\n # Pass the states into the first layer\n # Pass the input through all the layers apllying ReLU activation except for the output layer\n x = F.relu(self.fc1(state))\n # Batch Normalization of the first layer\n x = self.bn(x)\n # Concatenate the first layer output with the action\n x = torch.cat((x, action), dim=1)\n x = F.relu(self.fc2(x))\n # Pass the input through all the layers apllying ReLU activation, but the last\n x = torch.sigmoid(self.fc3(x))\n # Return the Q-Value for the input state-action\n return x", "def forward(self, x):\n pass", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward(self, state, action):\n xs = self.fc1(state)\n x = torch.cat((xs, action), dim=1)\n return self.fc2(x)", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, state, action):\r\n \r\n x = torch.cat([state, action], 1) ##### fixed dim รคndern\r\n x = F.relu(self.linear1(x))\r\n x = F.relu(self.linear2(x))\r\n x = self.linear3(x)\r\n\r\n return x", "def forward(self, state, action):\n xs = f.relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = f.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, x):\n return self.net(x)", "def forward(self, state):\n x = F.relu(self.input(state))\n for layer in self.layers:\n x = F.relu(layer(x))\n if self.duel:\n # Value function estimator\n val = F.relu(self.val_fc_input(x))\n val = self.val_fc_output(val)\n # Advantage function estimator\n adv = F.relu(self.adv_fc_input(x))\n adv = self.adv_fc_output(adv)\n # Subtract mean so that V and A are uniquely identifiable for a given Q\n return val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.action_size)\n else:\n return self.output(x)", "def forward(self, state, action):\n xs = F.relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state, action):\n s1 = F.relu(self.fcs1(state))\n s2 = F.relu(self.fcs2(s1))\n\n a1 = F.relu(self.fca1(action))\n\n x = torch.cat((s2, a1), dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward(self,state,action):\n action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE\n indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)\n indices = indices.tolist()\n action_[indices] = 1.\n x = torch.cat( (state,action_) ,dim=1)\n return self.forwardM(x)", "def forward(self, state, action):\n s1 = F.relu(self.fcs1(state))\n a1 = F.relu(self.fca1(action))\n x = torch.cat((s1, a1), dim=1)\n\n x = self.fc3(x)\n\n return x", "def forward(self, X):\n node_feats = self.act(X) - self.shift\n return node_feats", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def network_forward(self, X):\n \n #############################################################################\n # TODO: Perform a forward pass on the network and store the caches of #\n # each layer inside the cache_list #\n #############################################################################\n ActivationFunction = None\n if self.hidden_activation_fn == \"sigmoid\":\n ActivationFunction = lambda x: self.sigmoid_forward(x)\n elif self.hidden_activation_fn == \"tanh\":\n ActivationFunction = lambda x: self.tanh_forward(x)\n elif self.hidden_activation_fn == \"relu\":\n ActivationFunction = lambda x: self.relu_forward(x)\n\n Layer1Value, cacheL1 = self.fully_connected_forward(X, self.params[\"W1\"], self.params[\"b1\"])\n Layer1ValueActivation, cacheL1A = ActivationFunction(Layer1Value)\n scores, cacheL2 = self.fully_connected_forward(Layer1ValueActivation, self.params[\"W2\"], self.params[\"b2\"])\n\n # Cache\n cache_list =[cacheL1, cacheL1A, cacheL2]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return scores, cache_list", "def forward(self, states, actions_previous=None): \n x = states.view(states.shape[0], self.frames_n * self.state_size)\n \n # ACTOR\n x_actor_alphas = F.relu(self.actor_alphas_layer_1(x))\n x_actor_alphas = F.relu(self.actor_alphas_layer_2(x_actor_alphas))\n x_actor_alphas = F.softplus(self.actor_alphas_layer_3(x_actor_alphas)) + 1. # To get to the interval [1; Inf).\n\n x_actor_betas = F.relu(self.actor_betas_layer_1(x))\n x_actor_betas = F.relu(self.actor_betas_layer_2(x_actor_betas))\n x_actor_betas = F.softplus(self.actor_betas_layer_3(x_actor_betas)) + 1. # To get to the interval [1; Inf).\n \n distribution = torch.distributions.beta.Beta(concentration1=x_actor_alphas, concentration0=x_actor_betas)\n raw_actions = actions_previous * 0.5 + 0.5 if actions_previous is not None else distribution.sample() # To return to the Beta interval, [0, 1], for now.\n densities = torch.exp(distribution.log_prob(raw_actions))\n actions = (raw_actions - 0.5) * 2 # Finally back to the action interval, [-1, -1].\n entropies = distribution.entropy()\n \n # CRITIC\n x_critic = F.relu(self.critic_layer_1(x))\n x_critic = F.relu(self.critic_layer_2(x_critic))\n values = self.critic_layer_3(x_critic)\n \n return {\n 'actions': actions,\n 'densities': densities,\n 'entropies': entropies, \n 'values': values\n }", "def forward(self, x):\n x=T.div(x,255.0)\n \n #print(state[20:,20:,0])\n #print(state[:,0,:,:])\n conv1 = F.relu(self.conv1(x))\n conv2 = F.relu(self.conv2(conv1))\n conv3 = F.relu(self.conv3(conv2))\n ###\n conv_state = conv3.view(conv3.size()[0], -1)\n flat1 = F.relu(self.fc1(conv_state))\n flat2 = F.relu(self.fc2(flat1))\n\n V = self.V(flat2)\n A = self.A(flat2)\n\n return V, A\n return x", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def model_forward_pass(self, data):\n for key, value in data.items():\n data[key] = value.to(self.device)\n \n if self.fp16:\n with torch.cuda.amp.autocast():\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n else:\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n\n return output, loss", "def _forwardImplementation(self, inbuf, outbuf):\n assert self.module\n \n values = self.module.getActionValues(self.state) \n n_values = self.n_values.getActionValues(self.state)\n values = map(lambda x, y: x + self.exploration * (sqrt(2 * log(self.experiment.stepid, 2) / y) if y > 0 else 1000), values, n_values);\n \n actions = []\n for i in range(self.shield_options):\n new_action = where(values == max(values))[0]\n new_action = choice(new_action) \n values[new_action] = -10000\n actions.append(new_action)\n \n while len(actions) < self.outdim:\n actions.append(-1)\n \n outbuf[:] = actions", "def forward(self, X):\n self._X = X # For backprop later on.\n self._z = np.dot(X, self._W) + self._b\n a = self._act.a(self._z)\n return a", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, state, action):\n q_in = torch.cat([state, action], 1)\n return self.ffn(q_in).view(-1)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = self.fc2(x)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def forward(self, x, **kwargs):\n pass", "def forward(self, X):\n # Our input are 4 RGB frames of shape 160, 160 each\n X = self.conv1(X)\n X = self.conv2(X)\n X = self.conv3(X)\n\n X_flatten = X.view(X.size(0), -1)\n value = self.value_fc(X_flatten)\n value = self.value(value)\n\n advantage = self.advantage_fc(X_flatten)\n advantage = self.advantage(advantage)\n\n state_value = value + (advantage - torch.mean(advantage, dim=1, keepdim=True))\n\n return state_value", "def forward(self, x):\n x, self.hidden = self.gru(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def forward(self, state):\n x = self.fc1(state)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def forward(self, state):\n '''\n state = F.relu(self.conv1(state))\n state = F.relu(self.conv2(state))\n state = F.relu(self.conv3(state))\n state = F.relu(self.fc1(state))\n \n action = F.relu(self.fc2(state))\n \n return action\n '''\n \n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def forward(self, state):\n x = self.fc(state)\n return x", "def forward(self, inputs):\n _, state = self.core(inputs)\n return state", "def forward(self, state, action):\n\n # Prepare the embeddings\n state_embedding = self.state_embedding(state.float())\n state_embedding = state_embedding.repeat(1, action.shape[1], 1)\n action_embedding = self.action_embedding(action.float())\n state_action_embedding = torch.cat((state_embedding, action_embedding),dim=2)\n\n # Attention\n query = self.q_projection(state_action_embedding).permute(1,0,2)\n key = self.k_projection(state_action_embedding).permute(1,0,2)\n value = self.v_projection(state_action_embedding).permute(1,0,2)\n \n x = self.attention(query, key, value)[0].permute(1,0,2)[:,0,:]\n\n # Predict the next state\n x = self.predict(x)\n \n return x", "def forward(self, observation):\r\n # Apply relu activation and feed forward\r\n observation = F.relu(self.fc1(observation))\r\n actions = self.fc2(observation)\r\n\r\n return actions", "def forward(self, x):\n return x", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def forward(self, state):\n\n # connect layers to each other and put relu activations between them\n for layer in self.hidden_layers:\n state = layer(state)\n state = F.relu(state)\n value = self.value_layer(state)\n return value", "def base_forward(self, x):\r\n pass", "def step(self, action):\n actions = action.reshape(2, 2)\n action = {'GoalieBrain': actions[0], 'StrikerBrain': actions[1]}\n #action = {'GoalieBrain': [5, 5], 'StrikerBrain': [2, 2]}\n #print(action)\n info = self.env.step(action)\n\n state0 = info[self.brain_names[0]].vector_observations\n state1 = info[self.brain_names[1]].vector_observations\n state = np.vstack((state0, state1))\n reward0 = info[self.brain_names[0]].rewards\n reward1 = info[self.brain_names[1]].rewards\n reward = reward0 + reward1\n done0 = info[self.brain_names[0]].local_done\n done1 = info[self.brain_names[1]].local_done\n done = done0 + done1\n return state, reward, done", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.act(input)\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n\n # Apply a separate activation to each resulting input if applicable\n if self.G.in_activation:\n for i, n in enumerate(self.input):\n in_result.append( self.G.in_activation[i](n()).type(_tensor(\"FloatTensor\")) )\n\n else:\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the lat dim\n self.result = self.act(torch.cat(in_result, in_result[0].dim() - 1))\n\n return self.result.view(*self.G.d_out)", "def forward(self, x):\n self.activations[0] = np.dot(x,self.weights[0]) + self.biases[0]\n self.zetas[0] = self.activation_f(self.activations[0])\n for i in range(1, self.n_layers-1):\n self.activations[i] = np.dot(self.zetas[i-1],self.weights[i]) \\\n + self.biases[i]\n self.zetas[i] = self.activation_f(self.activations[i])\n self.activations[-1] = np.dot(self.zetas[-2],self.weights[-1]) \\\n + self.biases[-1]\n self.zetas[-1] = self.activation_out_f(self.activations[-1])\n if self.activation_out_function == 'softmax':\n z = np.sum(self.zetas[-1], axis=1)\n z = np.reshape(z,(-1,1))\n self.zetas[-1] = np.divide(self.zetas[-1],z)\n return self.zetas[-1]", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n\t\tx = x.view(-1, self.input_size)\n\t\tout = x\n\t\t\n\t\t# Save the model inputs, which are considered the activations of the 0'th layer.\n\t\tactivations = [out]\n\t\tlinearCombs = []\n\n\t\tfor layer in self.hidden_layers:\n\t\t\tlinearComb = layer(out)\n\t\t\tout = self.act(linearComb)\n\n\t\t\t# Save the activations and linear combinations from this layer.\n\t\t\tactivations.append(out)\n\t\t\tlinearComb.retain_grad()\n\t\t\tlinearComb.requires_grad_(True)\n\t\t\tlinearCombs.append(linearComb)\n\n\t\tlogits = self.output_layer(out)\n\t\t\t\n\t\tlogits.retain_grad()\n\t\tlogits.requires_grad_(True)\n\t\tlinearCombs.append(logits)\n\t\t\n\t\treturn (logits, activations, linearCombs)", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def forward_propagate(self, input):\n self.input_matrix[0] = input\n act_matrix = input\n for i in range(1, len(self.dimens)):\n\n #update our layer of activations\n act_matrix = self.activation_forward(act_matrix, i)\n\n #save our input values for use in back propogating\n self.input_matrix[i] = act_matrix\n\n #send the logistic function of all the inputs to the next layer\n act_matrix = [logistic(act) for act in act_matrix]\n\n return act_matrix", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, input):\n\n x = self.conv(input)\n x = self.bn(x)\n out = self.act(x)\n return out", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\tself._cache_current = x\n\t\treturn x @ self._W + self._b\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(self, input_x):\n adv, val = self.adv_val(input_x)\n return val + (adv - adv.mean(dim=1, keepdim=True))", "def forward(self, input_x):\n adv, val = self.adv_val(input_x)\n return val + (adv - adv.mean(dim=1, keepdim=True))", "def forward(self):\n self.iteration_number += 1\n x = self.x\n self.x = self.alpha * self.x + self.betta\n t = x - self.x\n\n return (t * t).sum()", "def forward_propagation(self):\n pred_y = argmax(self.model.predict(train_x), axis=1)\n\n accuracy_func = Accuracy()\n accuracy_func.update_state(pred_y, train_y)\n self.accuracy = accuracy_func.result().numpy()", "def forward_propagate(self, x):\n self.z_h = np.dot( x, self.w_ih ) + self.b_h\n #Activations of hidden layer\n self.a_h = self.sigmoid( self.z_h )\n self.z_o = np.dot( self.a_h, self.w_ho ) + self.b_o\n #yEst = activations of output layer\n yEst = self.sigmoid( self.z_o )\n return yEst", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self, X):\n # Propaga a entrada pela rede\n self.z = np.dot(X, self.W1) # Produto escalar da entrada com a primeira matrix de pesos\n self.z2 = self.sigmoid(self.z) # Funรงรฃo de ativaรงรฃo\n self.z3 = np.dot(self.z2, self.W2) # Produto escalar da hidden layer com a segunda matrix de pesos\n return self.sigmoid(self.z3) # Funรงรฃo de ativaรงรฃo na saรญda ", "def feedForward(self):\n # Calculate the current values of the first layer\n self.layer1 = sigmoid(np.dot(self.input, self.weights1))\n\n # Calculate the sigmoid of the second layer which is the output\n self.output = sigmoid(np.dot(self.layer1, self.weights2))", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def propagate_forward(self, X):\n\n A = X\n L = self.L - 1\n\n for i in range(0, L - 1):\n Z = np.matmul(self.W[i], A) + self.b[i]\n cache = ((A, self.W[i], self.b[i]), Z)\n A = relu(Z)\n self.caches[i] = cache\n Z = np.matmul(self.W[L - 1], A) + self.b[L - 1]\n cache = ((A, self.W[L - 1], self.b[L - 1]), Z)\n AL = sigmoid(Z)\n self.caches[L - 1] = cache\n return AL", "def go_forward(net):\n global w, back_loss, loss, l2_loss\n start_forward_time = time.time()\n\n # feed in data\n P = net(w).t()\n\n # calculate loss\n Y = P.mv(X)\n Ybar = Y.mean()\n back_loss = (Y - Ybar).norm(1) / (J)\n loss = back_loss / Ybar\n l2_loss = ((Y - Ybar).norm(2) ** 2) / (J * Ybar)\n\n return time.time() - start_forward_time", "def forward(self, states, actions_previous=None):\n x = states.view(states.shape[0], self.frames_n * self.state_size)\n \n # ACTOR\n x_actor_mus = F.relu(self.actor_layer_1(x))\n x_actor_mus = F.relu(self.actor_layer_2(x_actor_mus))\n x_actor_mus = torch.tanh(self.actor_layer_3(x_actor_mus))\n\n distribution = torch.distributions.normal.Normal(loc=x_actor_mus, scale=self.actor_sigmas_parameters)\n actions = actions_previous if actions_previous is not None else distribution.sample() \n # actions = torch.clamp(actions, -1, 1) # Note: This is one approach, if necessary. Another is to use a Beta distribution\n # instead of a Normal distribution (see below).\n densities = torch.exp(distribution.log_prob(actions))\n entropies = distribution.entropy()\n \n # CRITIC\n x_critic = F.relu(self.critic_layer_1(x))\n x_critic = F.relu(self.critic_layer_2(x_critic))\n values = self.critic_layer_3(x_critic)\n \n return {\n 'actions': actions,\n 'densities': densities,\n 'entropies': entropies, \n 'values': values\n }", "def forward(self, x): # pylint: disable=invalid-name\n x = self.layer4(self.layer3(self.layer2(self.layer1(x))))\n return x.mean((-2, -1))", "def forward(self, state, action, next_state=None):\n dim_action, dim_state = self._true_dim_action[0], self.dim_state[0]\n control_action = action[..., :dim_action]\n\n if self.model_kind == \"dynamics\":\n optimism_vars = action[..., dim_action : dim_action + dim_state]\n elif self.model_kind == \"rewards\":\n optimism_vars = action[..., -1:]\n else:\n raise NotImplementedError(\n \"Hallucinated Models can only be of dynamics or rewards.\"\n )\n optimism_vars = torch.clamp(optimism_vars, -1.0, 1.0)\n\n mean, tril = self.predict(state, control_action)\n if torch.all(tril == 0.0):\n return mean\n\n if optimism_vars.shape[-1] == 0:\n return mean, tril\n\n self.nst_unc_scale = tril\n return (\n mean + self.beta * (tril @ optimism_vars.unsqueeze(-1)).squeeze(-1),\n torch.zeros_like(tril),\n )", "def forward(self, states):\n raise NotImplementedError()", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def forward(self, x, contextState):\r\n \r\n #concatenate input and context state\r\n #x = x.t()\r\n xAndContext = torch.cat((x, contextState), 1)\r\n\r\n #calculate next context state (hidden output for current t) with tanh(xAndContext * W1)\r\n contextState = torch.tanh(xAndContext.mm(self.W1))\r\n \r\n # Calculates final output\r\n output = contextState.mm(self.V)\r\n\r\n return (output, contextState)", "def forward(self, x):\n n, c, t, v = x.size()\n x1 = x.view(n, c * t, v)\n y = None\n for i in range(self.num_subset):\n A1 = self.PA[i]\n z = self.conv_d[i](torch.matmul(x1, A1).view(n, c, t, v))\n y = z + y if y is not None else z\n A2 = self.cen(x)\n z2 = torch.matmul(x1, A2).view(n, c, t, v)\n z2 = self.conv_cen(z2)\n y += self.lamb * z2\n y = self.bn(y)\n y += self.down(x)\n y = self.relu(y)\n y = self.attention(y)\n return y", "def _forward(self, a):\n a = np.array(a)\n self.weighted_layer, self.activations = [], [a]\n for w, b in zip(self.weights, self.biases):\n z = w.dot(a) + b\n a = sigmoid(z)\n self.weighted_layer.append(z)\n self.activations.append(a)\n\n return a", "def forward(self, x, act_hidden, crt_hidden):\n in_dim = x.shape[1]\n lidar = x[:, :in_dim-4].view(x.shape[0], self.frames, -1) # TODO hard coded\n others = x[:, in_dim-4:]\n # action\n a = F.relu(self.act_fea_cv1(lidar))\n a = F.relu(self.act_fea_cv2(a))\n a = a.view(a.shape[0], -1)\n a = F.relu(self.act_fc1(a))\n a = torch.cat((a, others), dim=-1)\n a = F.relu(self.act_fc2(a))\n act_hidden[0], act_hidden[1] = self.act_lstm(a, act_hidden)\n mean = torch.tanh(self.actor(act_hidden[0]))\n\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch.normal(mean, std)\n\n # action prob on log scale\n logprob = self.log_normal_density(action, mean, std=std, log_std=logstd)\n\n # value\n v = F.relu(self.crt_fea_cv1(lidar))\n v = F.relu(self.crt_fea_cv2(v))\n v = v.view(v.shape[0], -1)\n v = F.relu(self.crt_fc1(v))\n v = torch.cat((v, others), dim=-1)\n v = F.relu(self.crt_fc2(v))\n crt_hidden[0], crt_hidden[1] = self.crt_lstm(v, crt_hidden)\n v = self.critic(v)\n\n return v, action, logprob, mean" ]
[ "0.7648056", "0.7441679", "0.7235342", "0.7074458", "0.7050156", "0.69769365", "0.6912717", "0.6907993", "0.69072765", "0.6904189", "0.6899755", "0.6880525", "0.6867396", "0.6824801", "0.6819598", "0.67802507", "0.67745304", "0.6761813", "0.6747654", "0.6747654", "0.67306876", "0.67249095", "0.67134327", "0.6688794", "0.66485", "0.6641784", "0.65956205", "0.658203", "0.6575238", "0.65729403", "0.6566938", "0.6540798", "0.6538677", "0.65245116", "0.65022165", "0.649793", "0.6492027", "0.6485671", "0.64804494", "0.64701587", "0.6468531", "0.6460872", "0.6459993", "0.64512163", "0.64512163", "0.64433086", "0.6427071", "0.64178354", "0.64098865", "0.63957405", "0.639286", "0.6385737", "0.63758796", "0.6375529", "0.6365553", "0.6362663", "0.63518214", "0.6346253", "0.6342021", "0.63417387", "0.6334324", "0.6323531", "0.6317098", "0.6296778", "0.62949264", "0.62911624", "0.62871635", "0.6277045", "0.6272419", "0.626762", "0.626762", "0.62590796", "0.6255834", "0.6246275", "0.6234197", "0.62300086", "0.6225553", "0.620383", "0.6199399", "0.61991", "0.61991", "0.6189103", "0.6184866", "0.61766875", "0.6166288", "0.61463654", "0.6143733", "0.61426806", "0.6134586", "0.6132924", "0.61157095", "0.61146396", "0.6113235", "0.6113077", "0.61127657", "0.6107415", "0.61069703", "0.61059296", "0.6105154", "0.61042464" ]
0.6286952
67
Forward pass through network. Calculates the Q using the value and advantage.
def forward(self, input_x): adv, val = self.adv_val(input_x) return val + (adv - adv.mean(dim=1, keepdim=True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def advantage(self, state, Q: torch.Tensor = None):\n return Q - Q.max()\n # return Q - torch.matmul(self.ฯ€.pmf(state, action_values=Q), Q)", "def forward(self, x1, x2):\n return x1 * self.Q + (1 - self.Q) * x2", "def Q_net(self, state):\n\t\tif not self._prediction_made: \n\t\t\tQ = tf.matmul(tf.nn.relu( tf.matmul(state, self.weights_hidden) + self.bias_hidden ), self.weights_out) + self.bias_out \n\t\t\tself._Qval = Q\t\n\t\t\tself._prediction_made = True\n\t\treturn self._Qval", "def forward(self, state, action):\n state = torch.cat(state, dim=1)\n \n for i in range(len(action)):\n action[i] /= self.max_action\n\n # Concatenate the action vector \n action = torch.cat(action, dim=1)\n x = torch.cat([state, action], dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n q_value = self.q_out(x)\n\n return q_value", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n #simple implementation of a python noob to implement DDQN\n bla = torch.from_numpy(np.zeros(64)).float().to(device)\n for i in range(64):\n bla[i] = self.qnetwork_target(next_states[i]).detach()[self.qnetwork_local(next_states).detach().argmax(1)[i]]\n Q_targets_next = bla.unsqueeze(1)\n #this was my first try of ddqn in python style, but as i said i'm a noob and didn't get it working\n #Q_targets_next = [self.qnetwork_target(next_states).detach()[i] for i in self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)]\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def forward(self, x):\n q_denom = (x.unsqueeze(1) - self.clusters)**2\n q_denom = q_denom.sum(dim=2)\n q_denom /= self.alpha\n q_denom += 1.0\n q = 1.0 / q_denom\n q = q ** ((self.alpha + 1.0) / 2.0)\n q = q.t() / q.sum(dim=1) # Div shapes [20, 1024] / [1024]\n q = q.t()\n return q", "def forward(self, x):\n x = self.fc0(x.view(-1, x.size(-1))).view(x.size(0), x.size(1), -1)\n x = self.pe(x)\n\n x = self.inner_layers(x) # FF, FF, FF, finalFF\n\n state_value = self.fc_s(x) # double-dqn : state\n\n advantage_values = self.fc_a(x) # double-dqn : advantage\n advantage_values = advantage_values.view(\n advantage_values.size()[:-1] + (self.action_size, self.n_atoms))\n\n dist_weights = state_value.unsqueeze(\n dim=-2) + advantage_values - advantage_values.mean(dim=-2, keepdim=True)\n\n return dist_weights", "def q1_forward(self, state: torch.Tensor) -> torch.Tensor:\n return self.q_networks[0](state)", "def Q(self, states, neural_net_to_use, no_grad = False):\r\n\r\n states = torch.from_numpy(states)\r\n states = states.float()\r\n\r\n if no_grad:\r\n with torch.no_grad():\r\n output = neural_net_to_use(states)\r\n return output\r\n\r\n output = neural_net_to_use(states)\r\n return output", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def test_propagate(self):\n # Get network components\n data = array([[0], [1]])\n cdata = LabeledCData(data, labels=array([0, 1]))\n encoder = BinaryEncoding(cdata)\n unitary = ProductAnsatz(1)\n measure = Measurement(1, [0])\n qnn = Network([encoder, unitary, measure], \"1q-qvm\")\n\n # Propagate the zeroth data point\n out = qnn.propagate(0, shots=10)\n\n print(out)", "def updateQ_value(self, value):\n self.Q_value = (self.Q_value * self.nVisits + value) / (self.nVisits + 1)", "def forward(self, x):\n dims = list(range(1, len(x.shape)))\n mean = x.mean(dim=dims, keepdim=True)\n var = torch.pow(x - mean, 2).mean(dim=dims, keepdim=True)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())", "def forward(self, state):\n x = F.relu(self.input(state))\n for layer in self.layers:\n x = F.relu(layer(x))\n if self.duel:\n # Value function estimator\n val = F.relu(self.val_fc_input(x))\n val = self.val_fc_output(val)\n # Advantage function estimator\n adv = F.relu(self.adv_fc_input(x))\n adv = self.adv_fc_output(adv)\n # Subtract mean so that V and A are uniquely identifiable for a given Q\n return val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.action_size)\n else:\n return self.output(x)", "def forwardPolicyNet(self, state):\n with torch.no_grad():\n q_values = self.policy_net(state)\n return q_values", "def getQValue(self, state, action):\n #print \"getQValue in ApproximateQAgent\"\n\n \"*** YOUR CODE HERE ***\"\n weights = self.getWeights()\n features = self.featExtractor.getFeatures(state, action, self)\n\n value = 0\n\n #print \"FEATURES: \", features\n #print \"WEIGHTS: \", weights\n\n for feature in features:\n value += features[feature]*weights[feature]\n return value\n #util.raiseNotDefined()", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def preval_forward(self, data_shot, label_shot, data_query):\n embedding_query = self.encoder(data_query)\n embedding_shot = self.encoder(data_shot)\n logits = self.base_learner(embedding_shot)\n #loss = self.FL(logits, label_shot) + self.CD(logits,label_shot) + self.LS(logits,label_shot)\n loss = self.CD(logits,label_shot)\n grad = torch.autograd.grad(loss, self.base_learner.parameters())\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))\n logits_q = self.base_learner(embedding_query, fast_weights)\n\n for _ in range(1, 100):\n logits = self.base_learner(embedding_shot, fast_weights)\n #loss = self.FL(logits, label_shot) + self.CD(logits,label_shot) + self.LS(logits,label_shot)\n loss = self.CD(logits,label_shot)\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))\n logits_q = self.base_learner(embedding_query, fast_weights) \n return logits_q", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # Double DQN. Uses local network for action selection and target network for value estimation\n # see: https://arxiv.org/pdf/1509.06461.pdf\n Q_actions_next = self.dqn_local(next_states).detach().argmax(1).unsqueeze(1)\n Q_targets_next = self.dqn_target(next_states).gather(1, Q_actions_next)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Standard DQN\n # Get max predicted Q values (for next states) from target model\n # Q_targets_next = self.dqn_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states\n # Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.dqn_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.dqn_local, self.dqn_target, TAU)", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n conved_features = self.conv_layers(state_t)\n A = self.head_A(conved_features)\n V = self.head_V(conved_features).repeat(1, self.n_actions)\n mean_A = torch.mean(A, dim=-1, keepdim=True).repeat(1, self.n_actions)\n qvalues = A + V - mean_A\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == self.n_actions\n\n return qvalues", "def forward(self, value, query, lens):\n relevant_scores = self.relevant_score(value, query, lens)\n e_relevant_scores = torch.exp(relevant_scores)\n weights = e_relevant_scores / e_relevant_scores.sum(-1, keepdim=True)\n attention = (weights.unsqueeze(-1) * value).sum(1)\n return attention", "def forward(self):\n R = self.LP.cost.R\n A = self.LP.dyn.A\n B = self.LP.dyn.B\n\n x = self.LP.x0\n self.x[0] = x\n for i in range(self.LP.N):\n u = - np.linalg.inv(R+B.T.dot(self.V[i+1]).dot(B)).dot(.5*B.T.dot(self.W[i+1]) \\\n + B.T.dot(self.V[i+1]).dot(A).dot(x))\n if self.LP.dyn.u_dim == 1:\n self.u[i] = float(u)\n else:\n self.u[i] = u\n self.J_star[i] = float(x.T.dot(self.V[i]).dot(x) + self.W[i].T.dot(x)) #up to constant\n\n if i == 0:\n self.J[i] = self.LP.cost.loss(x, u, i)\n else:\n self.J[i] = self.J[i-1] + self.LP.cost.loss(x, u, i)\n x = self.LP.dyn.next_state(x, u)\n self.x[i+1] = x\n\n self.J[self.LP.N] = self.J[self.LP.N-1] + self.LP.cost.loss(x, 0, self.LP.N)\n\n self.J_star[self.LP.N] = float(x.T.dot(self.V[self.LP.N]).dot(x) \\\n + self.W[self.LP.N].T.dot(x)) #up to constant", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(\n next_states).detach().max(1)[0].unsqueeze(1)\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, self.tau) \n\n return", "def learn(self, experiences, gamma):\n self.optimizer.zero_grad()\n\n states, actions, rewards, next_states, dones = experiences\n\n best_actions = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)\n q_values_target = self.qnetwork_target(next_states).detach()\n q_expected = rewards + (gamma * q_values_target.gather(1, best_actions)) * (1 - dones)\n q_current = self.qnetwork_local(states).gather(1, actions)\n\n loss = F.mse_loss(q_expected, q_current)\n\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, dqn_agent.TAU)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def ipdTft(length,gamma,epsilon,alpha = .8):\r\n #possible previous states (what each did in the last iteration)\r\n states = [(\"*\",\"*\"),(\"C\",\"D\"), (\"C\",\"C\"), (\"D\",\"C\"), (\"D\",\"D\")]\r\n #actions: Defect or Cooperate\r\n actions = [\"D\",\"C\"]\r\n #payoff matrix (as dict)\r\n payoff = {(\"C\",\"D\"): (-3,0), (\"C\",\"C\"): (-1,-1), \r\n (\"D\",\"C\"): (0,-3), (\"D\",\"D\"): (-2,-2)}\r\n #initialize learners \r\n\r\n #q1 = qLearn(states,actions,gamma,alpha,epsilon)\r\n #q1 = QLearnCont(ipd_feats,10,actions,gamma,alpha,epsilon,kernel = 'linear')\r\n #q1 = DQN(ipd_feats,10,actions,.99,.5,.1,learn_type = 'linear')\r\n q1 = DQN(ipd_feats,10,actions,.99,.5,.1,shape = (10,10,1))\r\n #initialize list of rewards\r\n rewards = []\r\n #iterate through length states and run the game\r\n prevState = (\"*\",\"*\")\r\n for i in range(length):\r\n #get actions\r\n print(\"Iteration %i:\" %i)\r\n print(\"Previous State:\", prevState)\r\n qa1 = q1.chooseAction(prevState)\r\n qa2 = tft(prevState[0])\r\n print(\"Player 1 Action:\",qa1)\r\n print(\"Player 2 Action:\",qa2)\r\n \r\n #find payoff\r\n newState = (qa1,qa2)\r\n reward = payoff[newState]\r\n rewards.append(reward[0])\r\n print(\"Player 1 Reward:\", reward[0])\r\n print(\"Player 2 Rewards:\", reward[1])\r\n print(\"Current average reward for Player 1:\",np.mean(rewards))\r\n #assign reward and update Q params\r\n q1.qUpdate(prevState,qa1,reward[0],newState)\r\n \r\n prevState = newState\r\n #print(q1.Q)\r\n return(rewards,q1)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, weights, indexes = experiences\n\n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n #print('q_expected.shape', q_expected.shape)\n #print('q_targets.shape', q_targets.shape)\n \n # Compute loss\n ##### deltas = F.mse_loss(q_expected, q_targets)\n deltas = q_expected - q_targets\n #print('loss.shape', loss.data.cpu().numpy().shape)\n #print('loss', loss)\n \n _sampling_weights = (torch.Tensor(weights)\n .view((-1, 1)))\n \n # mean square error\n loss = torch.mean((deltas * _sampling_weights)**2)\n\n # importance sampling weights used to correct bias introduced \n # by prioritisation experience replay\n # See Annealing the bias https://arxiv.org/abs/1511.05952\n #with torch.no_grad():\n # weight = sum(np.multiply(weights, loss.data.cpu().numpy()))\n # print('weight', weight)\n # loss *= weight\n # print('weights.shape', weights.shape)\n # print('loss type', type(loss))\n # print('loss shape', loss.size())\n # loss *= weights\n # Minimize the loss\n # call zero_grad before calling backward() \n # o.w. gradients are accumulated from multiple passes\n self.optimizer.zero_grad()\n # backward computes dloss/dx for every parameter x\n loss.backward()\n # updates parameters\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n \n # ------------------- update priorities ------------------- # \n priorities = abs(deltas.detach()).numpy()\n #priorities = abs(q_expected.detach() - q_targets.detach()).numpy()\n self.memory.update_priorities(priorities, indexes)", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use Q-Learning algoritm in slide 58 of MDP\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n maxQns = self.getValue(nextState) # get max q-value of next state\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action) #self.qValues[(state, action)]\n difference = reward + self.discountRate * maxQns - Qsa\n self.qValues[(state, action)] += self.alpha * difference\n \n self.vitCount[(state, action)] += 1\n \"\"\" END CODE \"\"\"", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def forward(self, x):\n # tracking shapes\n B, C, H, W = x.size()\n K = self.K\n HW_prime = H * W\n\n # get qkv's\n f = self.f(x).view(B, C // K, H * W) # B x (C/K) x (HW)\n g = self.g(x) # B x (C/K) x H x W\n h = self.h(x) # B x (C/2) x H x W\n if self.down_sample:\n g = F.max_pool2d(g, [2, 2]) # B x (C/K) x (H/2) x (W/2)\n h = F.max_pool2d(h, [2, 2]) # B x (C/2) x (H/2) x (W/2)\n HW_prime = HW_prime // 4 # update (HW)'<-(HW) // 4\n\n g = g.view(B, C // K, HW_prime) # B x (C/K) x (HW)'\n h = h.view(B, C // 2, HW_prime) # B x (C/2) x (HW)'\n\n beta = self._dot_product_softmax(f, g) # B x (HW) x (HW)'\n s = torch.einsum('ijk,ilk->ijl', h, beta).view(B, C // 2, H, W) # B x (C/2) x H x W\n return self.gamma * self.v(s) + x # B x C x H x W", "def learn(self, experiences, gamma):\n \n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def test_forward_values():\n rnn = GeneratorRNN(1)\n inputs = Variable(torch.zeros(1,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n assert (e >= 0).all()\n assert (e <= 1).all()\n\n diff = torch.abs(1-torch.sum(pi))\n assert (diff < 0.00001).all()\n\n assert (sigma > 0).all()\n\n assert (rho > -1).all()\n assert (rho < 1).all()\n\n rnn = GeneratorRNN(3)\n inputs = Variable(torch.zeros(10,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n pi_sum = torch.sum(pi,dim=2)\n diff = torch.abs(1-pi_sum)\n assert (diff < 0.00001).all()", "def computeQValueFromValues(self, state, action):\r\n #\r\n weightedVfvsSum = 0\r\n reward = 0\r\n # to get possible next state(s)\r\n for nextState, prob in self.mdp.getTransitionStatesAndProbs(state, action):\r\n reward += self.mdp.getReward(state, action, nextState) * prob\r\n #print \":computeQValueFromValues: nextState is: \", nextState, \" | self.values[nextState] is: \", self.values[nextState]\r\n weightedVfvsSum += prob * self.getValue(nextState)\r\n #\r\n return ( reward + ( self.discount * weightedVfvsSum) ) # making the actual qvalue\r", "def learn(self):\n if self.learn_step_counter % self.target_q_update_step == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict()) #update target_net's parameters\n logging.info(\"updtate target q\")\n self.learn_step_counter += 1\n\n rgbs,depths, rgbs_1, depths_1,questions,actions,rewards,terminals = self.memory.sample()\n\n rgbs_var = Variable(torch.FloatTensor(rgbs).cuda())\n depths_var = Variable(torch.FloatTensor(depths).cuda())\n rgbs_1_var = Variable(torch.FloatTensor(rgbs_1).cuda())\n depths_1_var = Variable(torch.FloatTensor(depths_1).cuda())\n questions_var = Variable(torch.LongTensor(questions).cuda())\n actions_var = Variable(torch.LongTensor(actions).cuda())\n rewards_var = Variable(torch.FloatTensor(rewards).cuda())\n terminals_var = Variable(torch.FloatTensor(terminals).cuda())\n\n q_eval_matrix = self.eval_net(rgbs_var,depths_var,questions_var)\n q_eval_matrix = q_eval_matrix.view(-1,9*28*28)\n actions_var = actions_var.view(-1,1)\n q_eval = torch.gather(q_eval_matrix, 1, actions_var) \n q_eval = q_eval.squeeze(1)\n\n q_next_matrix = self.target_net(rgbs_1_var,depths_1_var,questions_var).detach() #don't backward\n q_next_matrix = q_next_matrix.view(-1,9*28*28)\n q_next = torch.max(q_next_matrix,1)[0]\n\n one_var = Variable(torch.ones_like(terminals_var))\n\n q_target = rewards_var + (one_var- terminals_var)*self.discount * q_next\n \n loss = self.loss_func(q_eval, q_target)\n\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.task_total_loss += loss.item()\n self.task_total_q += q_target.mean()\n self.update_count += 1", "def preval_forward(self, data_shot, label_shot, data_query):\n embedding_query = self.encoder(data_query)\n embedding_shot = self.encoder(data_shot)\n logits = self.base_learner(embedding_shot)\n loss = F.cross_entropy(logits, label_shot)\n grad = torch.autograd.grad(loss, self.base_learner.parameters())\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))\n logits_q = self.base_learner(embedding_query, fast_weights)\n\n for _ in range(1, 100):\n logits = self.base_learner(embedding_shot, fast_weights)\n loss = F.cross_entropy(logits, label_shot)\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))\n logits_q = self.base_learner(embedding_query, fast_weights) \n return logits_q", "def getQvalue(self, state, action):\n featureVector = self.getFeatures(state, action)\n qValue = 0\n for k in featureVector.keys():\n qValue = qValue + self.weights[k] * featureVector[k]\n\n return qValue", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def computeQValueFromValues(self, state, action):\n #get the Transition function and nextStates\n state_prob_pair=self.mdp.getTransitionStatesAndProbs(state,action)\n #initialize the value to zero\n actual_value=0\n #iterate over probabilities (transition functions) and next states\n for pair in state_prob_pair:\n #compute qvalue\n actual_value+=pair[1]*(self.mdp.getReward(state,action,pair[0])+self.discount*self.values[pair[0]])\n #print \"The Q value is \",actual_value\n return actual_value", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, idxs, weights = experiences\n \n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss MSE\n loss = (Q_expected - Q_targets.detach()).pow(2)\n # Add weights to loss\n loss = loss * weights\n # Add noise to loss to arrive at prior weights\n prios = loss + 1e-6\n # Take mean\n loss = loss.mean()\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update buffer priorities\n self.memory.update_priorities(zip(idxs, prios.data.cpu().numpy()))\n\n\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward(self,\n x: Tensor) \\\n -> Tensor:\n\n x = self.convs[0](x)\n res = x\n for i in range(self.num_rates):\n x = torch.tanh(self.filter_convs[i](\n x)) * torch.sigmoid(self.gate_convs[i](x))\n x = self.convs[i+1](x)\n res = res + x\n return res", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qValue = 0\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n #print('Transitions: ' + str(transitions))\n for t in transitions:\n nextState, prob = t\n reward = self.mdp.getReward(state, action, nextState)\n #print('Reward: ' + str(reward))\n oneTransition = prob * (reward + self.discount * self.values[nextState])\n qValue = qValue + oneTransition\n return qValue", "def learn(self, experiences, gamma):\n\n states, actions, rewards, next_states, dones = experiences\n\n Q_targets = self.get_dqg_target(next_states, rewards, gamma, dones)\n\n # Get expected Q values\n q_exp = self.qnetwork_local(states)\n\n # gets the q values along dimention 1 according to the actions, which is used as index\n # >>> t = torch.tensor([[1,2],[3,4]])\n # >>> torch.gather(t, 1, torch.tensor([[0],[1]]))\n # tensor([[ 1],\n # [ 4]])\n q_exp = q_exp.gather(1, actions)\n\n # compute loss\n loss = F.mse_loss(q_exp, Q_targets)\n\n # reset optimizer gradient\n self.optimizer.zero_grad()\n # do backpropagation\n loss.backward()\n # do optimize step\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, PARAM.TAU)", "def forward(self, inp):\n return inp.dot(self.W) + self.b", "def learn(self, memory: ReplayMemory, batch_size: int) -> float:\n # ไปŽreplay bufferๅฝ“ไธญ้‡‡ๆ ท๏ผŒไปŽ็ป้ชŒๅ›žๆ”พ้›†ๅˆไธญ้‡‡ๆ ทbatch_sizeไธชๆ ทๆœฌ๏ผŒ่ฎก็ฎ—ๅฝ“ๅ‰็›ฎๆ ‡Qๅ€ผ\n indices, (state_batch, next_batch, action_batch, reward_batch, done_batch), is_weights = \\\n memory.sample(batch_size)\n # ไฝฟ็”จ่กŒไธบ็ฝ‘็ปœ่ฎก็ฎ—ๅ€ผๅ‡ฝๆ•ฐ Q_j\n values = self.__policy(state_batch).gather(1, action_batch)\n \n expected = []\n policy_Q_batch = self.__policy(next_batch).cpu().data.numpy()\n max_action_next = np.argmax(policy_Q_batch, axis=1)\n target_Q_batch = self.__target(next_batch)\n \n for i in range(batch_size):\n if done_batch[i]:\n expected.append(reward_batch[i])\n else:\n target_Q_value = target_Q_batch[i, max_action_next[i]]\n expected.append(reward_batch[i] + self.__gamma * target_Q_value)\n \n expected = torch.stack(expected)\n TD_error = torch.abs(expected - values)\n memory.update(indices, TD_error)\n \n # ๆ นๆฎ็›ฎๆ ‡ๅ‡ฝๆ•ฐ ๏ผˆQ_j - expected)^2ๆฅๆขฏๅบฆไธ‹้™\n loss = (torch.FloatTensor(is_weights).to(self.__device) * F.mse_loss(values, expected)).mean()\n\n self.__optimizer.zero_grad()\n loss.backward()\n for param in self.__policy.parameters():\n param.grad.data.clamp_(-1, 1)\n self.__optimizer.step()\n\n return loss.item()", "def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n\n QValue = 0\n for feature in feature_dictionary:\n QValue += self.weights[feature] * feature_dictionary[feature]\n return QValue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n\n QValue = 0\n for feature in feature_dictionary:\n QValue += self.weights[feature] * feature_dictionary[feature]\n return QValue", "def update_Q(self, reward):\n old_estimate = self.q_estimates[self.prev_action]\n self.q_estimates[self.prev_action] = old_estimate + 1/self.N[self.prev_action] * (reward - old_estimate)", "def test_forward(self):\n # test single input\n self.model.w = np.array([[0.5, 0.25]])\n self.model.b = 0.5\n x = np.array([[0.2, 0.1]])\n out = self.model.forward(x)\n self.assertTrue(np.abs(out[0] - 0.6514) < 0.01)\n\n # test multiple inputs\n self.model.w = np.array([[0.1, 0.2]])\n self.model.b = 0.2\n x = np.array([[0.3, 0.4],\n [0.5, 0.6]])\n out = self.model.forward(x)\n should_be = np.array([0.5769,0.5915])\n self.assertTrue(np.allclose(out, should_be, atol=0.01))", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, indices = experiences\n\n # Get max predicted Q values (for next states) from target model\n if self.dbl_dqn:\n local_best_actions = self.qnetwork_local(next_states).detach().argmax(1)\n Q_next_states = self.qnetwork_target(next_states)\n Q_targets_next = Q_next_states.gather(1, local_best_actions.unsqueeze(1))\n else:\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n if self.priority_rpl:\n errors = abs(Q_expected - Q_targets)\n self.memory.update_priorities(indices, errors)\n importance = self.memory.get_importance(indices, self.a, self.b)\n importance = np.array(importance)\n loss = torch.mean(torch.mul(errors.float(), torch.from_numpy(importance).float().to(device)))\n else:\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def back_propagate(self, reward, maxQ):\n\n error = self.alpha * (reward + self.gamma*maxQ - self.value)\n #logging.debug(\"error is now %s\" % (error))\n\n # sigmoid derivate is sigmoid(x) * (1 - sigmoid(x) )\n dsig = self.value * (1 - self.value)\n\n gradient = error * dsig\n #logging.debug(\"gradient is now: %s\" % (gradient))\n\n self.weigths = np.add( self.weights, np.multiply(gradient, self.weights) )\n # self.weights = [gradient * w + w for w in self.weights]", "def train(self, Q_pred: torch.FloatTensor, Q_true: torch.FloatTensor) -> float:\n self.dqn.train(mode=True)\n self.optim.zero_grad()\n loss = self.loss_fn(Q_pred, Q_true)\n loss.backward()\n self.optim.step()\n\n return loss", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n qvalues = <YOUR CODE>\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert (\n len(qvalues.shape) == 2 and \n qvalues.shape[0] == state_t.shape[0] and \n qvalues.shape[1] == n_actions\n )\n\n return qvalues", "def forward(self, x, u0=torch.Tensor()):\n x =self.vari_gpu(x)\n u0 =self.vari_gpu(u0)\n \n # input x0 and batch size \n num_batch = x.size(0)\n x0 = x.view(num_batch, -1)\n \n A_hat = self.build_A_block()\n B_hat = self.build_B_block()\n \n # Q_sqrt in QP\n Q = self.Q_sqrt.mm(self.Q_sqrt.t())\n R = self.R_sqrt.mm(self.R_sqrt.t())\n R_diag = self.build_Rdiagnol_block(R)\n Q_hat, Q_diag = self.build_Q_block(Q, Q, R, B_hat)\n Q_sqrt_hat = sqrtm(Q_hat) # computs sqrt of Q\n Q_sqrt_hat = Q_sqrt_hat.repeat(num_batch,1,1) # builds batch\n \n # p in QP p = 2 * (Q_diag*B_hat)^T * (A_hat*x0)\n A_x0 = A_hat.mm(x0.t()).t() # presents[x1;x2;...;xN] size: batch * dim(x1;x2;...;xN)\n p = 2*A_x0.mm(Q_diag.mm(B_hat))\n \n # G in QP\n G1,G2 = self.build_G_block(B_hat)\n G1 = G1.repeat(num_batch,1,1) # builds batch\n G2 = G2.repeat(num_batch,1,1) # builds batch\n \n # h in QP\n h1 = self.h1.repeat(num_batch,1) # builds batch\n h21 = self.h21.repeat(num_batch,1) # builds batch\n h21 -= A_x0 \n h22 = self.h22.repeat(num_batch,1) # builds batch\n h22 += A_x0\n h2 = torch.cat((h21,h22),1)\n \n zero = self.zero.repeat(num_batch,1)\n \n # E in QP\n E = self.E_sqrt.mm(self.E_sqrt.t())\n E_sqrt = self.E_sqrt.repeat(num_batch,1,1)\n \n # for Q(x0,u0), add equality constraint: u(0) = u0 \n if u0.nelement() != 0:\n u0 = u0.view(num_batch, -1)\n # F*z = f\n F = self.F\n f = u0*self.f\n F = F.repeat(num_batch,1,1) # builds batch\n #f = f.repeat(num_batch,1) # builds batch\n# print(Q_sqrt_hat.size(), p.size(), G1.size(),\n# h1.size(), G2.size(),h2.size(),\n# E_sqrt.size(),F.size(),f.size())\n\n self.para = [Q_sqrt_hat, p, G1, h1, G2,\n h2, E_sqrt, F, f]\n u_opt,e_opt, = self.layer(Q_sqrt_hat, p, G1, h1, G2,\n h2, E_sqrt, F, f,zero) # u_opt: batch*dim(u)\n # for V(x0), defines the QP layer without equality \n # constraints \n else:\n layer = QP_layer_no_eq(nz=self.num_u, nineq_u=\n self.num_ineq_u, nineq_x=self.num_ineq_x)\n self.para = [Q_sqrt_hat, p, G1, h1, G2,\n h2, E_sqrt] \n # gets the solution of the basic optimization problem\n u_opt,e_opt, = layer(Q_sqrt_hat, p, G1, h1, G2, h2, \n E_sqrt,zero) # u_opt: batch*dim(u)\n\n # get the optimal cost\n # a+b: sum(i:1 to N): xi^T*Q*xi + u(i-1)^T*R*u(i-1)\n # c: x0^T*Q*x0\n # d:(i:1 to N):ei^T*E*ei\n a = (u_opt.mm(Q_hat)*u_opt + p*u_opt).sum(1)\n b = (A_x0.mm(Q_diag)*A_x0).sum(1)\n c = (x0.mm(Q)*x0).sum(1)\n d = (e_opt.mm(E)*e_opt).sum(1)\n cost_opt = (a+b+c+d).unsqueeze(1) # size: batch*1\n u0_opt = u_opt.mv(self.weight) # only the fisrt action\n #print(u0,u0_opt)\n return cost_opt, u0_opt", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n return torch.nn.functional.linear(\n x,\n self.weight_mu + self.weight_sigma * self.weight_epsilon,\n self.bias_mu + self.bias_sigma * self.bias_epsilon,\n )", "def forward(self, X):\n self._X = X # For backprop later on.\n self._z = np.dot(X, self._W) + self._b\n a = self._act.a(self._z)\n return a", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n transition_state_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # Add each state and probability to q_value\n q_value = 0\n for state_, probability in transition_state_probs:\n state_reward = self.mdp.getReward(state, state_, action)\n q_value += probability * (state_reward + self.discount * self.values[state_])\n return q_value", "def update_q_values(self, state, value):\n if self.prev_state is not None and self.learning:\n reward = self.reward(Game.game_state(state))\n self.q_values[self.represent_state(self.prev_state), self.prev_action] += self.alpha * (\n reward + self.gamma * value - self.prev_q_val)", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n qvalues = self.network(state_t)\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == self.n_actions\n\n return qvalues", "def forward(self, w_value, x_value, b_value):\n self.inputs = [w_value, x_value, b_value]\n # return np.matmul(x_value, w_value) + b_value # [Note] Matmul Order\n return x_value.dot(w_value) + b_value # [Note] Matmul Order", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n conved_1 = self.conv1(state_t)\n conved_2 = self.conv2(conved_1)\n conved_3 = self.conv3(conved_2)\n qvalues = self.dense(conved_3)\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == n_actions\n\n return qvalues", "def forward(self, state, action):\n # Pass the states into the first layer\n x = self.fc_layers[0](state)\n x = self.bn(x)\n x = F.relu(x)\n # Concatenate the first layer output with the action\n x = torch.cat((x, action), dim=1)\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[1:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying sigmoid activation\n x = torch.sigmoid(self.fc_layers[-1](x))\n # Return the Q-Value for the input state-action\n return x", "def computeQValueFromValues(self, state, action):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n # Code to remove --- from here\n transitions = self.mdp.getTransitionStatesAndProbabilities(state, action)\n qvalue = 0\n for (nextState, probability) in transitions:\n reward = self.mdp.getReward(state, action, nextState)\n qvalue += probability *(reward + self.discount*self.values[nextState])\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"\n \n return qvalue", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # sample memories\n states_val, action_val, rewards, next_state_val, continues \\\n = (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # evaluate the target q\n target_q = self.sess.run(self.graph.target_q_values, feed_dict={self.graph.states: next_state_val})\n # if using double q\n if self.params.double_q:\n online_q = self.sess.run(self.graph.online_q_values, feed_dict={self.graph.states: next_state_val})\n actions = np.argmax(online_q, axis=1)\n max_next_q_values = target_q[np.arange(actions.shape[0]), actions].reshape(-1, 1)\n else:\n max_next_q_values = np.max(target_q, axis=1, keepdims=True)\n # train the online DQN\n td_target = rewards + continues * self.params.discount_factor * max_next_q_values\n _, self.loss_val = self.sess.run([self.graph.training_op, self.graph.loss],\n feed_dict={self.graph.states: states_val, self.graph.actions: action_val,\n self.graph.td_target: td_target})\n # copy to target\n if self.params.copy_interval is None or (\n self.params.copy_interval and (self.iteration % self.params.copy_interval == 0)):\n self.sess.run(self.graph.copy_online_to_target)", "def train_step(self, experiences, gamma):\n states = experiences['states']\n rewards = experiences['rewards']\n actions = experiences['actions']\n next_states = experiences['next_states']\n dones = experiences['dones']\n q_values = self.main_network(states).gather(1, actions.view(-1, 1)).squeeze()\n\n # Get max predicted Q values (for next states) from target model\n next_q_values = self.target_network(next_states).detach().max(1)[0]\n\n # Compute Q targets for current states\n expected_q_value = rewards + (gamma * next_q_values * (1 - dones))\n\n # Compute loss\n loss = F.mse_loss(q_values, expected_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the target network\n self.soft_update(self.main_network, self.target_network, TAU)", "def forward(self, state):\n if self.noisy:\n lower, upper = self.x[0], self.x[-1]\n test_x = lower + torch.rand(len(self.x)) * (upper - lower)\n else:\n test_x = self.x\n\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n pred = self.gp(test_x)\n ucb = pred.mean + self.beta() * pred.stddev\n\n max_id = torch.argmax(ucb)\n next_point = test_x[[[max_id]]]\n return next_point, torch.zeros(1)", "def Q(self, value):\n assert value > 0, \"Q needs to be positive and above zero (we divide by Q)\"\n self._Q = value\n self._update()", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n #get the value of the state\n qVal = self.values[state]\n #iterate through the MDP transition states from the current state\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #q value = discount * expected value of reward of state\n qVal += self.discount * probability * self.values[transitionState]\n return qVal\n # END OUR CODE", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def train_step(self):\n # Sample training batch from replay\n training_batch = self.replay.sample(self.batch_size)\n\n # Calculate target Q values for each example:\n # For non-terminal states, targetQ is estimated according to\n # targetQ = r + gamma*Q'(s',max_a Q(s',a))\n # where Q' denotes the target network.\n # For terminating states the target is computed as\n # targetQ = r\n updates = []\n for exp in training_batch:\n start,_,reward,end = exp\n if(self.dampen_states):\n # To dampen states (usually done after major patches or when the meta shifts)\n # we replace winning rewards with 0.\n reward = 0.\n state_code = end.evaluate()\n if(state_code==DraftState.DRAFT_COMPLETE or state_code in DraftState.invalid_states):\n # Action moves to terminal state\n updates.append(reward)\n else:\n # Follwing double DQN paper (https://arxiv.org/abs/1509.06461).\n # Action is chosen by online network, but the target network is used to evaluate this policy.\n # Each row in predicted_Q gives estimated Q(s',a) values for all possible actions for the input state s'.\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[end.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[end.get_valid_actions()]}\n predicted_action = self.ddq_net.sess.run(self.ddq_net.online_ops[\"prediction\"], feed_dict=feed_dict)[0]\n\n feed_dict = {self.ddq_net.target_ops[\"input\"]:[end.format_state()]}\n predicted_Q = self.ddq_net.sess.run(self.ddq_net.target_ops[\"outQ\"], feed_dict=feed_dict)\n\n updates.append(reward + self.ddq_net.discount_factor*predicted_Q[0,predicted_action])\n\n # Update online net using target Q\n # Experience replay stores action = (champion_id, position) pairs\n # these need to be converted into the corresponding index of the input vector to the Qnet\n actions = np.array([start.get_action(*exp[1]) for exp in training_batch])\n targetQ = np.array(updates)\n feed_dict = {self.ddq_net.online_ops[\"input\"]:np.stack([exp[0].format_state() for exp in training_batch],axis=0),\n self.ddq_net.online_ops[\"actions\"]:actions,\n self.ddq_net.online_ops[\"target\"]:targetQ,\n self.ddq_net.online_ops[\"dropout_keep_prob\"]:0.5}\n _ = self.ddq_net.sess.run(self.ddq_net.online_ops[\"update\"],feed_dict=feed_dict)", "def learn(self, experiences, gamma):\n states_and_prev_recurrents, actions, recurrents, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n next_states_and_recurrents = torch.cat([next_states, recurrents], dim=1)\n Q_targets_next = self.qnetwork_target(next_states_and_recurrents).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states_and_prev_recurrents).gather(1, actions)\n\n # Compute loss\n loss_rl = F.mse_loss(Q_expected, Q_targets)\n\n states = states_and_prev_recurrents[:, :8]\n target_recurrents = map_observation_to_recurrent_state(states)\n recurrent_pred = self.qnetwork_local(states_and_prev_recurrents)[:, -5:]\n\n loss_internal_states = F.multilabel_soft_margin_loss(recurrent_pred, target_recurrents)\n\n loss = loss_rl + loss_internal_states\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states = experiences\n\n # Get max predicted Q values (for next states) from target model\n next_action_targets = self.target_model(next_states)\n next_action = next_action_targets.max(1)[0].unsqueeze(-1)\n targets = rewards + (gamma * torch.Tensor(next_action))\n#\n # Get expected Q values from policy model\n action_policy = self.policy_model(states)\n policy = action_policy.gather(1, actions)\n # Compute loss\n loss = F.mse_loss(policy, targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.policy_model, self.target_model, self.tau)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0\n features = self.featExtractor.getFeatures(state, action)\n #Each feature is in the form of dictionary {((3, 3), 'east'): 1.0}. Each key is a combination of coordinate and direction. Each value represents the old qvalue.\n for feature in features.keys():\n qvalue += features[feature] * self.weights[feature]\n return qvalue", "def learn(self, state, action, reward, state_):\n self.Q.optimizer.zero_grad()\n states = T.tensor(state, dtype=T.float).to(self.Q.device)\n action = T.tensor(action).to(self.Q.device)\n reward = T.tensor(reward).to(self.Q.device)\n states_ = T.tensor(state_, dtype=T.float).to(self.Q.device)\n\n q_pred = self.Q.forward(states)[action]\n\n q_next = self.Q.forward(states_).max()\n\n q_target = reward + self.gamma*q_next\n\n loss = self.Q.loss(q_target, q_pred).to(self.Q.device)\n loss.backward()\n self.Q.optimizer.step()\n self.decrement_epsilon()", "def getQValue(self, gameState, action):\n features = self.getFeatures(gameState, action)\n return features * self.weights", "def computeQValueFromValues(self, state, action):\n\n # Find expected utility of making this move\n x = 0\n for t in self.mdp.getTransitionStatesAndProbs(state,action):\n x += t[1] * self.getValue(t[0])\n\n\n # Return Reward + discounted expected utility\n return self.mdp.getReward(state,None,None) + self.discount*x", "def act(self, state_and_prev_recurrent, eps=0.):\n state_and_prev_recurrent = torch.from_numpy(state_and_prev_recurrent).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state_and_prev_recurrent)[:, :4]\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n # print('learn states.shape', states.shape)\n # print('learn next_states.shape', next_states.shape)\n \n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n\n # Compute loss\n loss = F.mse_loss(q_expected, q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def forward(self, x):\n # Compute the mean norm of activations per channel.\n nu2 = x.pow(2).mean(dim=[2, 3], keepdim=True)\n\n # Perform FRN.\n x = x * torch.rsqrt(nu2 + self.eps.abs())\n\n # Scale and Bias\n if self.is_scale:\n x = self.weight * x\n if self.is_bias:\n x = x + self.bias\n return x", "def forward(self, x):\n n, c, t, v = x.size()\n x1 = x.view(n, c * t, v)\n y = None\n for i in range(self.num_subset):\n A1 = self.PA[i]\n z = self.conv_d[i](torch.matmul(x1, A1).view(n, c, t, v))\n y = z + y if y is not None else z\n A2 = self.cen(x)\n z2 = torch.matmul(x1, A2).view(n, c, t, v)\n z2 = self.conv_cen(z2)\n y += self.lamb * z2\n y = self.bn(y)\n y += self.down(x)\n y = self.relu(y)\n y = self.attention(y)\n return y", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def update_value(self, reward):\n\t\tval = self.value\n\t\tval = val + ((reward - val)/self.visited)\n\t\tself.value = val", "def qlearning(env, iterations=1000, gamma=0.9, alpha=0.1):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n curr_state = s_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---Q Learning---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * (Q_value[new_state, np.argmax(Q_value[new_state])]) - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA \n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def computeQValueFromValues(self, state, action):\n \n \n next_states_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # liste des recompenses R(s,a,s')\n rewards = []\n # liste des probas de transitions P(s'|a,s)\n probs = []\n # liste des Vk(s')\n previous_values = []\n # occurence[0] = les next_state\n # occurence[1] = les proba de transi\n for occurence in next_states_probs:\n rewards.append(self.mdp.getReward(state, action, occurence[0]))\n probs.append(occurence[1])\n previous_values.append(self.getValue(occurence[0]))\n Q_value = 0\n # boucle qui calcule somme des ( P(s'|a,s) * [R(s,a,s') + gamma * Vk(s')] ) sur les s'\n for i in range(len(probs)):\n Q_value += probs[i] * (rewards[i] + self.discount * previous_values[i])\n \n return Q_value\n util.raiseNotDefined()", "def forward(self, input_x):\n return self.net(input_x.float())", "def prediccion(self):\n # Project the state ahead\n self.X = self.F @ self.X + self.B @ self.M\n self.P = self.F @ self.P @ self.F.T + self.Q\n\n return self.X", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # print \"getQValue\"\n features = self.featExtractor.getFeatures(state, self.index)#.values()\n #weights = self.weights.values()\n #dotProduct = reduce( (lambda x, y: x*y), map( (lambda x, y: x+y), self.weights, features))\n #return dotProduct\n score = 0\n for key in features.keys():\n score += features[key]*self.weights[key]\n return score", "def advantage(self, state):\n Q = self.predict(state)\n return Q - np.dot(self.ฯ€.pmf(state, Q), Q)", "def quantum_net(self, q_input_features, q_weights_flat):\n\n # Reshape weights\n q_weights = q_weights_flat.reshape(self.args.q_depth, self.args.n_qubits, 3)\n\n # Start from state |+> , unbiased w.r.t. |0> and |1>\n # Amplitude encoding\n qml.QubitStateVector(q_input_features, wires=list(range(self.args.n_qubits)))\n \n # Sequence of trainable variational layers\n for k in range(self.args.q_depth):\n self.entangling_layer(self.args.n_qubits)\n self.Rot_layer(q_weights[k])\n\n # Expectation values in the Z basis\n exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(self.args.target_class)]\n return tuple(exp_vals)", "def getQValue(self, state, action):\n features = self.featExtractor.getFeatures(state, action)\n total = 0\n for feat in features:\n total += self.getWeights()[feat] * features[feat]\n return total" ]
[ "0.67463326", "0.6215198", "0.6126189", "0.60463625", "0.6016947", "0.60107875", "0.600079", "0.598151", "0.5964633", "0.595709", "0.5950874", "0.59488696", "0.5943859", "0.5934017", "0.59093964", "0.5896094", "0.58890605", "0.5857793", "0.58310497", "0.58174837", "0.579977", "0.5798591", "0.57938", "0.57903445", "0.57851666", "0.57814026", "0.5770534", "0.5770534", "0.57601297", "0.5748083", "0.57397264", "0.57367706", "0.57330817", "0.5727399", "0.5727159", "0.57165533", "0.570699", "0.5697758", "0.5696968", "0.5696486", "0.56620806", "0.56576616", "0.5647243", "0.56453985", "0.56414396", "0.5634715", "0.56321317", "0.5631141", "0.5624357", "0.5620299", "0.5619738", "0.56118256", "0.5603188", "0.5603188", "0.5594863", "0.5581778", "0.5568276", "0.55674976", "0.5561115", "0.5560528", "0.55534476", "0.55493754", "0.554388", "0.5543701", "0.5540772", "0.55377597", "0.5531491", "0.55273855", "0.551119", "0.5510595", "0.550562", "0.5503964", "0.55021906", "0.5496681", "0.549383", "0.5492189", "0.54765266", "0.5469934", "0.54694253", "0.5463629", "0.5462993", "0.5447157", "0.5446816", "0.54462945", "0.5442648", "0.5439768", "0.54388124", "0.5434087", "0.5416447", "0.54121596", "0.54042614", "0.54040635", "0.5403024", "0.54019374", "0.539096", "0.53899777", "0.53753227", "0.536964", "0.5366321" ]
0.5647406
42
Gets the advantage and value by passing out of the base network through the value and advantage heads.
def adv_val(self, input_x) -> Tuple[Tensor, Tensor]: float_x = input_x.float() base_out = self.net(float_x) return self.fc_adv(base_out), self.fc_val(base_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bias(self):", "def __get_net_probs(self):\n return np.array([node.value for node in self.net]).reshape(5,5)", "def net_output(self):\n result = self.gives()\n for k, v in self.needs().items():\n result[k] = result.get(k, 0) - v\n\n return result", "def forward(self, obs):\n res = self.hidden_layers(obs)\n logits = self.logits(res)\n value = self.value_branch(res)\n return logits, value", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def test_find_highest_value_node_first(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [1.0, 1.0]\n nn.layers[3].nodes[1].weights = [0.0, 0.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '10')", "def forward(self, input):\n mean, std = self.mean_net(input), self.std_net(input)\n return mean, std", "def adv_val(self, input_x):\n float_x = input_x.float()\n base_out = self.conv(input_x).view(float_x.size()[0], -1)\n return self.head_adv(base_out), self.head_val(base_out)", "def test_forward_values():\n rnn = GeneratorRNN(1)\n inputs = Variable(torch.zeros(1,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n assert (e >= 0).all()\n assert (e <= 1).all()\n\n diff = torch.abs(1-torch.sum(pi))\n assert (diff < 0.00001).all()\n\n assert (sigma > 0).all()\n\n assert (rho > -1).all()\n assert (rho < 1).all()\n\n rnn = GeneratorRNN(3)\n inputs = Variable(torch.zeros(10,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n pi_sum = torch.sum(pi,dim=2)\n diff = torch.abs(1-pi_sum)\n assert (diff < 0.00001).all()", "def get_weights(self):", "def getWeight(self) -> float:\n ...", "def bias_prior(self):", "def get_hidden_values(self, data):\n return T.nnet.sigmoid(T.dot(data, self.w1) + self.b1)", "def weight(self):", "def get_q_values(self, state, network):\n out = None\n state = state.permute(0, 3, 1, 2)\n #pdb.set_trace()\n ##############################################################\n ################ YOUR CODE HERE - 4-5 lines lines ################\n if network == 'q_network':\n out = self.q_network(state)\n else:\n out = self.target_network(state)\n ##############################################################\n ######################## END YOUR CODE #######################\n return out", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def value(self, observation, prev_action, prev_reward):\n agent_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value, _rnn_state = self.model(*agent_inputs, self.prev_rnn_state)\n return value.to(\"cpu\")", "def forward(self, value, query, lens):\n relevant_scores = self.relevant_score(value, query, lens)\n e_relevant_scores = torch.exp(relevant_scores)\n weights = e_relevant_scores / e_relevant_scores.sum(-1, keepdim=True)\n attention = (weights.unsqueeze(-1) * value).sum(1)\n return attention", "def res_get(hp2res, hp_dict, hp_labels):\n logg = logging.getLogger(f\"c.{__name__}.res_get\")\n logg.setLevel(\"INFO\")\n logg.debug(f\"Start res_get\")\n\n # build the hp_set for the corresponding bar\n hp_set = []\n for label in hp_labels:\n hp_set.append(hp_dict[label])\n hp_set = tuple(hp_set)\n # get the corresponding loss value\n hp_val = hp2res[hp_set]\n\n logg.debug(f\"hp_set {hp_set} hp_val {hp_val}\")\n return hp_val", "def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var", "def getQValue(self, state, action):\n #print \"getQValue in ApproximateQAgent\"\n\n \"*** YOUR CODE HERE ***\"\n weights = self.getWeights()\n features = self.featExtractor.getFeatures(state, action, self)\n\n value = 0\n\n #print \"FEATURES: \", features\n #print \"WEIGHTS: \", weights\n\n for feature in features:\n value += features[feature]*weights[feature]\n return value\n #util.raiseNotDefined()", "def get_reward(self):\n # Ver list\n self.Verlist = {\n '1': False,\n '2': False,\n '3': False,\n '4': False,\n '5': True,\n }\n # --------------------------------- NEW ----\n r = 0\n if self.ENVGetSIReset:\n V = {\n 'CoolRateTemp': self.DRateFun(self.mem['KCNTOMS']['Val']),\n 'CurrentTemp': self.mem['UAVLEG2']['Val'],\n 'CurrentPres': self.mem['ZINST65']['Val'],\n 'Dis': abs(self.DRateFun(self.mem['KCNTOMS']['Val']) - self.mem['UAVLEG2']['Val']),\n 'PZRLevel': self.mem['ZINST63']['Val'],\n 'SG1Nar': self.mem['ZINST78']['Val'], 'SG2Nar': self.mem['ZINST77']['Val'],\n 'SG3Nar': self.mem['ZINST76']['Val'],\n 'SG1Wid': self.mem['ZINST72']['Val'], 'SG2Wid': self.mem['ZINST71']['Val'],\n 'SG3Wid': self.mem['ZINST70']['Val'],\n 'SG1Pres': self.mem['ZINST75']['Val'], 'SG2Pres': self.mem['ZINST74']['Val'],\n 'SG3Pres': self.mem['ZINST73']['Val'],\n }\n if self.Verlist['1']:\n # Cooling rate์— ๋”ฐ๋ผ์„œ ์˜จ๋„ ๊ฐ์†Œ\n r -= V['Dis'] / 100\n # ๊ฐ€์••๊ธฐ ์ˆ˜์œ„ 10 ์•„๋ž˜ ์ข…๋ฃŒ\n # if V['PZRLevel'] <= 10: r -= 100\n if self.Verlist['2']:\n # ๋ชฉํ‘œ์น˜๊นŒ์ง€ ๋„๋‹ฌ\n r += (29.5 - V['CurrentPres']) / 100\n r += (170 - V['CurrentTemp']) / 100\n if self.Verlist['3']:\n # Cooling rate์— ๋”ฐ๋ผ์„œ ์˜จ๋„ ๊ฐ์†Œ\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] ๋™ํ–ฅ์„ ๋ณด์ž„\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n curt = 170 if V['CurrentTemp'] <= 170 else V['CurrentTemp']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n dis_temp = (170 - V['CurrentTemp']) / 100\n\n # r += (dis_pres * 0.1) + (dis_temp * 0.1) + (dis_reward * 10) # ๊ฐ์•• X\n r += (dis_pres * 0.1) + (dis_reward * 5)\n if self.Verlist['4']:\n # Cooling rate์— ๋”ฐ๋ผ์„œ ์˜จ๋„ ๊ฐ์†Œ\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] ๋™ํ–ฅ์„ ๋ณด์ž„\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n PT_reward = - PTCureve().Check(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n r += (dis_pres * 0.1) + (dis_reward * 5) + (PT_reward * 0.1)\n if self.Verlist['5']:\n r = 0\n # 1] Cooling rate์— ๋”ฐ๋ผ์„œ ์˜จ๋„ ๊ฐ์†Œ\n coolrate_r = - V['Dis']\n # 2] ๊ฐ€์••๊ธฐ ์ˆ˜์œ„ 20~76% ๊ตฌ๊ฐ„ ์ดˆ๊ณผ์‹œ ํŒจ๋„ํ‹ฐ\n pzrlevel_r = 0\n if 20 <= V['PZRLevel'] <= 76:\n pass\n else:\n if 20 > V['PZRLevel']:\n pzrlevel_r -= (20 - V['PZRLevel'])\n else:\n pzrlevel_r -= (V['PZRLevel'] - 76)\n # 3] ์ฆ๊ธฐ ๋ฐœ์ƒ๊ธฐ 6% ~ 50% ์ด์ƒ ์ดˆ๊ณผ ์‹œ ํŒจ๋„ํ‹ฐ\n sg_r = 0\n for _ in range(1, 4):\n if 6 <= V[f'SG{_}Nar'] <= 50:\n pass\n else:\n if 6 > V[f'SG{_}Nar']:\n sg_r -= (6 - V[f'SG{_}Nar'])\n else:\n sg_r -= (V[f'SG{_}Nar'] - 50)\n # 4] PT ์ปค๋ธŒ์—์„œ ๋ฒ—์–ด๋‚˜๋ฉด ๊ฑฐ๋ฆฌ๋งŒํผ ํŒจ๋„ํ‹ฐ\n PT_reward = - PTCureve().Check_Dis(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n # 5] ๋ชฉํ‘œ์น˜์™€ ๊ฐ€๊นŒ์›Œ ์งˆ ์ˆ˜๋ก +\n pres_r, temp_r = 0, 0\n pres_r = (29.5 - V['CurrentPres'])\n temp_r = (170 - V['CurrentTemp'])\n # 6] S/G ์••๋ ฅ\n Avg_pres = (V['SG1Pres'] + V['SG2Pres'] + V['SG3Pres'])/3\n SGpres_r = 9 - Avg_pres if Avg_pres > 9 else 0\n # --------------------------------------------------------------\n w = {\n 'coolrate_r': [coolrate_r, 2],\n 'pzrlevel_r': [pzrlevel_r, 1],\n 'sg_r': [sg_r, 1.5],\n 'PT_reward': [PT_reward, 3],\n 'pres_r': [pres_r, 1],\n 'temp_r': [temp_r, 0.5],\n 'SGpres_r': [SGpres_r, 0.5]\n }\n\n log_txt_temp = ''\n for key in w.keys():\n r += w[key][0] * w[key][1]\n log_txt_temp += f'[{round(w[key][0]*w[key][1], 1)}:{w[key][0]}*{w[key][1]}]_'\n log_txt_temp = f'R:{r} = ' + log_txt_temp\n\n self.Loger_txt += log_txt_temp\n\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+{dis_temp * 0.1}+({dis_reward * 10})\\t\"\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})\\t\" #Verlist['3']\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})+({PT_reward * 0.1})\\t\"\n\n # --------------------------------- Send R ----\n self.AcumulatedReward += r\n # self.Loger_txt += f'{r}\\t'\n self.DIS_CSF_Info += f'[R: {r}]\\t'\n return r", "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def forward(self, state):\n\n # connect layers to each other and put relu activations between them\n for layer in self.hidden_layers:\n state = layer(state)\n state = F.relu(state)\n value = self.value_layer(state)\n return value", "def forwardPolicyNet(self, state):\n with torch.no_grad():\n q_values = self.policy_net(state)\n return q_values", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def _policy_nn(self):\n with tf.variable_scope(\"reward_params\") as scope:\n \n self.h1 = tf.layers.dense(self.input_ph, self.hidden_dim, tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.params_dim)), name=\"h1\")\n self.h2 = tf.layers.dense(self.h1, self.hidden_dim, tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.params_dim)), name=\"h2\")\n self.rewards = tf.layers.dense(self.h2, 1,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.hidden_dim)), name=\"rewards\")\n self.rewards_sum = tf.reduce_sum(self.rewards)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.bh)", "def get_thrust_value(self, command):\n return self._gain * numpy.abs(command) * command", "def get(self) -> float:\n ...", "def get_weight(self, val1, val2):\n\n\t\tnode1 = self.get_node(val1)\n\t\tnode2 = self.get_node(val2)\n\n\t\treturn node1.get_weight(node2)", "def learn(self):\n # Calculate prior probabilities.\n self.priorpos = len(self.posdata) / (len(self.posdata) + len(self.negdata))\n self.priorneg = len(self.negdata) / (len(self.posdata) + len(self.negdata))\n print(\"Prior probability positive: \")\n print(self.priorpos)\n print(\"Prior probability negative: \")\n print(self.priorneg)\n\n # Calculate negative likelihood/conditional probability.\n occurpos = self.occurence(self.posvec)\n self.condpos = self.condprob(occurpos)\n occurneg = self.occurence(self.negvec)\n self.condneg = self.condprob(occurneg)", "async def get_change(sochain_url, value_out, network, address):\n try:\n balance = await sochain_api.get_balance(sochain_url, network, address)\n balance = round(balance[0].amount * 10 ** 8)\n change = 0\n if balance - value_out > DUST_THRESHOLD:\n change = balance - value_out\n return change\n except Exception as err:\n raise Exception(str(err))", "def net(input_lst, weight_lst, bias):\r\n net_total = bias\r\n\r\n for node in range(len(input_lst)):\r\n net_total += input_lst[node] * weight_lst[node]\r\n\r\n return net_total", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def brain_weight_oz(self):\r\n return Heart.heart_weight_oz(self) # Used method from Heart Class\r", "def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues", "def LHopital(n, top, bot, accuracy=0.0001, scaledown=1.25):\n vtop = top(n)\n vbot = bot(n)\n i = 1\n while vtop == 0 and vbot == 0:\n vtop = deriv(top, n, i, accuracy, scaledown)\n vbot = deriv(bot, n, i, accuracy, scaledown)\n i += 1\n return vtop/vbot", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def compute_loss(\n action_probs: tf.Tensor, values: tf.Tensor, returns: tf.Tensor\n) -> tf.Tensor:\n\n advantage = returns - values\n td = tf.subtract(returns, values)\n\n # actor\n # action_log_probs = tf.math.log(action_probs)\n # actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)\n action_log_probs = tf.math.log(action_probs)\n actor_loss = -tf.math.reduce_mean(action_log_probs * td)\n\n # critic\n # td = tf.subtract(returns, values)\n # critic_loss = tf.reduce_mean(tf.square(td))\n critic_loss = huber_loss(values, returns)\n\n tf.print(\"a_loss:\", actor_loss, \"c_loss:\", critic_loss)\n\n return actor_loss + critic_loss", "def gain(self):\n return self[1]", "def get_expected_cost(self):", "def value(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value = self.model(*model_inputs)\n return value.to(\"cpu\")", "def baseEvaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def get_hyperparams(self):", "def forward(self, pred, gt, weight=None):\n num_pos = torch.relu(torch.sum(gt) - 1) + 1\n num_neg = torch.relu(torch.sum(1 - gt) - 1) + 1\n if weight is not None:\n loss = nn.BCEWithLogitsLoss(reduction='none')(pred, gt.float())\n loss = torch.mean(loss * weight)\n elif self.balanced is False:\n loss = nn.BCEWithLogitsLoss(reduction='mean')(pred, gt.float())\n else:\n loss = nn.BCEWithLogitsLoss(pos_weight=num_neg * 1.0 / num_pos, reduction='mean')(pred, gt.float())\n\n # compute precision, recall, f1\n pred_labels = pred > 0\n gt, pred_labels, pred = gt.detach().cpu().numpy(), pred_labels.detach().cpu().numpy(), pred.detach().cpu().numpy()\n precision = precision_score(gt[0], pred_labels[0])\n recall = recall_score(gt[0], pred_labels[0])\n f1 = f1_score(gt[0], pred_labels[0])\n mean_logit_true = np.sum(pred * gt) / max(1, np.sum(gt))\n mean_logit_false = np.sum(pred * (1 - gt)) / max(1, np.sum(1 - gt))\n\n eval_stats = {\n \"loss\": loss,\n \"precision\": float(precision),\n \"recall\": float(recall),\n \"f1\": float(f1),\n \"logit_true\": float(mean_logit_true),\n \"logit_false\": float(mean_logit_false)\n }\n return eval_stats", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def value(self) -> float:", "def prediction_b(self):\r\n return self._prediction_b", "def test_get_hyperflex_node_profile_by_moid(self):\n pass", "def get_value_loss(flat_params):\n set_flat_params_to(value_net, tensor(flat_params))\n for param in value_net.parameters():\n if param.grad is not None:\n param.grad.data.fill_(0)\n values_pred = value_net(states)\n value_loss = (values_pred - returns).pow(2).mean() # MeanSquaredError\n\n # weight decay\n for param in value_net.parameters():\n value_loss += param.pow(2).sum() * l2_reg\n value_loss.backward()\n return value_loss.item(), get_flat_grad_from(value_net.parameters()).cpu().numpy()", "def getWeight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _val_step(self, inputs, targets, extra_params):\n\n inputs = inputs.to(self.device)\n targets = targets.to(self.device, non_blocking=True)\n\n recons = self.generator(inputs)\n\n # Discriminator part.\n # pred_fake = self.discriminator(recons) # Generated fake image going through discriminator.\n # pred_real = self.discriminator(targets) # Real image going through discriminator.\n # gradient_penalty = compute_gradient_penalty(self.discriminator, targets, recons)\n # disc_loss = pred_fake.mean() - pred_real.mean() + self.lambda_gp * gradient_penalty\n\n # Generator part.\n # gen_loss = -pred_fake.mean()\n recon_loss = self.loss_funcs['recon_loss_func'](recons, targets)\n # total_gen_loss = gen_loss + self.recon_lambda * recon_loss\n\n step_loss = recon_loss\n step_loss_components = {'recon_loss': recon_loss}\n\n return recons, step_loss, step_loss_components", "def value(self):\n return self.head", "def evaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n ####print \"features \", features\n weights = self.getWeights(gameState, action)\n ####print \"weights \", weights\n return features * weights", "def get_net(netsnt, netrcv, tempsnt, temprcv):\n return {\n 'NET-PKT-SNT': (netsnt - tempsnt),\n 'NET-PKT-RCV': (netrcv - temprcv),\n }", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def forward(self, input):\n\n common = self.common_tower(input)\n wdl = self.wdl_head(common)\n policy = self.policy_head(common)\n\n return wdl, policy", "def call(self, obs):\n\t\tx = tf.convert_to_tensor(obs)\n\t\thidden_logs = self.hidden1(x)\n\t\thidden_vals = self.hidden2(x)\n\t\treturn self.logits(hidden_logs), self.value(hidden_vals)", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def test_find_highest_value_node_last(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [0.0, 0.0]\n nn.layers[3].nodes[1].weights = [1.0, 1.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '01')", "def gibbs_ask_traffic(self, X, e, Z, bn, N):\n\n #makes copies\n X = e\n e = e\n\n #probability\n probability = [0,0]\n numerator = 0\n\n\n #True, False\n\n for x in range(N):\n # second joint\n if Z == True: # if non evidence variable\n random_choice = np.random.choice([0,1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][0]\n else:\n random_choice = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][1]\n\n # first joint\n if X[1] == 0.8 or X[1] == 0.2: # Rain is true\n X[0] = bn[0][0]\n else: # Rain is False\n X[0] = bn[0][1]\n\n # third joint\n if X[1] == 0.8 or X[1] == 0.1: # traffic\n random_late = np.random.choice([0,1], 1, True, [0.5,0.5])[0]\n X[2] = bn[2][0][random_late]\n else: # no traffic\n random_late = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0]\n X[2] = bn[2][1][random_late]\n\n # print(X)\n if X[0] == 0.1:\n probability[0] += 1\n else:\n probability[1] += 1\n\n\n probability[0] = probability[0] / N\n probability[1] = probability[1] / N\n # print(probability)\n return probability", "def forward(self, x):\n dims = list(range(1, len(x.shape)))\n mean = x.mean(dim=dims, keepdim=True)\n var = torch.pow(x - mean, 2).mean(dim=dims, keepdim=True)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def test_get_damage(self):\n self.veh.health = 2.2\n for op in self.veh.operators:\n op.health = 0.5\n self.veh.get_damage(0.5)\n self.assertEqual(self.veh.health, 1.9)\n self.assertEqual(self.veh.operators[0].health, 0.4)\n self.assertEqual(self.veh.operators[1].health, 0.45)\n self.assertEqual(self.veh.operators[2].health, 0.45)", "def getMyValue(self):\n valueBV = 0.0\n valueCR = 0.0\n valueAL = 0.0\n valueEC = 0.0\n valueIA = 0.0\n factorAL = globals.cityCRGen/globals.cityALGen\n factorEC = globals.cityCRGen/globals.cityECGen\n factorIA = globals.cityCRGen/globals.cityIAGen\n ratio = self.strength/100.0\n valueCR += self.myDesign.costCR*ratio\n valueAL += self.myDesign.costAL*ratio\n valueEC += self.myDesign.costEC*ratio\n valueIA += self.myDesign.costIA*ratio\n valueBV += (valueCR +\n valueAL*factorAL +\n valueEC*factorEC +\n valueIA*factorIA) / 1000.0\n return (valueBV, valueCR, valueAL, valueEC, valueIA)", "def get_aff_net(sta):\n pass", "def getQValue(self, gameState, action):\n features = self.getFeatures(gameState, action)\n return features * self.weights", "def get_node_value(succs, preds):\n ret = 1\n if succs == 0:\n ret *= NODE_ENTRY\n\n if preds == 0:\n ret *= NODE_EXIT\n\n ret *= NODE_NORMAL\n return ret", "def forward(self, x):\n x = F.relu(self.affine1(x))\n x = F.relu(self.affine2(x))\n\n # actor: choses action to take from state s_t\n # by returning probability of each action\n action_prob = F.softmax(self.action_head(x), dim=-1)\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tupel of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t\n return action_prob, state_values", "def value_net(self):\n return functools.partial(self.value_net_fn, self.value_net_params)", "def test_regression_relative_attention_bidirectional_values(self):\n outputs, unused_params = self.relative_attention.init_with_output(\n random.PRNGKey(0), self.query_len, self.key_len, bidirectional=True)\n self.assertEqual(outputs.shape,\n (1, self.num_heads, self.query_len, self.key_len))\n self.assertAlmostEqual(outputs[0, 0, 0, 0], 0.55764728, places=5)\n self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.10935841, places=5)\n self.assertAlmostEqual(outputs[0, 1, 4, 6], 0.14510104, places=5)\n self.assertAlmostEqual(outputs[0, 2, 4, 6], -0.36783996, places=5)", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def _get_hop(self):\n return self.__hop", "def GetWeight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def receive_blocks(self, content: Tuple[int, ndarray]) -> Union[Iterable[netEncapsulation], netEncapsulation, None]:\n # get last state of working node\n last_state = self.Bak_Weights_Node.get(content[0], 0)\n # update global current state\n self.Current_Weights = self.Current_Weights + content[1]\n # get difference\n grad_diff = self.Current_Weights - last_state\n # update last state of working node\n self.Bak_Weights_Node[content[0]] = self.Current_Weights\n\n return netEncapsulation(content[0], (-1, grad_diff))", "def coefficient(self) -> float:\n ...", "def predict(self, model_input):\n # Should return a dictionary of move-prior pairs and the value from\n # the network's value head\n pass", "def neural_result(self, input):\n n_output = self.network.activate(input)\n if n_output >= 0.5:\n return 2\n else:\n return 1", "def analyze_belief_strength_without_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100) # average predecessor's belief strength is added to the original belief strength.\r\n nbs = max(nbs, -100)\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def invalid_values(net):\n\n check_results = {}\n\n # Contains all element attributes that are necessary to initiate a power flow calculation.\n # There's a tuple with the structure (attribute_name, input type restriction)\n # for each attribute according to pandapower data structure documantation\n # (see also type_checks function)\n\n important_values = {'bus': [('vn_kv', '>0'), ('in_service', 'boolean')],\n 'line': [('from_bus', 'positive_integer'),\n ('to_bus', 'positive_integer'),\n ('length_km', '>0'), ('r_ohm_per_km', '>=0'),\n ('x_ohm_per_km', '>=0'), ('c_nf_per_km', '>=0'),\n ('max_i_ka', '>0'), ('df', '0<x<=1'), ('in_service', 'boolean')],\n 'trafo': [('hv_bus', 'positive_integer'), ('lv_bus', 'positive_integer'),\n ('sn_mva', '>0'), ('vn_hv_kv', '>0'), ('vn_lv_kv', '>0'),\n ('vkr_percent', '>=0'),\n ('vk_percent', '>0'), ('pfe_kw', '>=0'), ('i0_percent', '>=0'),\n ('in_service', 'boolean')],\n 'trafo3w': [('hv_bus', 'positive_integer'), ('mv_bus', 'positive_integer'),\n ('lv_bus', 'positive_integer'),\n ('sn_hv_mva', '>0'), ('sn_mv_mva', '>0'), ('sn_lv_mva', '>0'),\n ('vn_hv_kv', '>0'), ('vn_mv_kv', '>0'), ('vn_lv_kv', '>0'),\n ('vkr_hv_percent', '>=0'), ('vkr_mv_percent', '>=0'),\n ('vkr_lv_percent', '>=0'), ('vk_hv_percent', '>0'),\n ('vk_mv_percent', '>0'), ('vk_lv_percent', '>0'),\n ('pfe_kw', '>=0'), ('i0_percent', '>=0'),\n ('in_service', 'boolean')],\n 'load': [('bus', 'positive_integer'), ('p_mw', 'number'),\n ('q_mvar', 'number'),\n ('scaling', '>=0'), ('in_service', 'boolean')],\n 'sgen': [('bus', 'positive_integer'), ('p_mw', 'number'),\n ('q_mvar', 'number'),\n ('scaling', '>=0'), ('in_service', 'boolean')],\n 'gen': [('bus', 'positive_integer'), ('p_mw', 'number'),\n ('scaling', '>=0'), ('in_service', 'boolean')],\n 'ext_grid': [('bus', 'positive_integer'), ('vm_pu', '>0'),\n ('va_degree', 'number')],\n 'switch': [('bus', 'positive_integer'), ('element', 'positive_integer'),\n ('et', 'switch_type'), ('closed', 'boolean')]}\n\n # matches a check function to each single input type restriction\n type_checks = {'>0': check_greater_zero,\n '>=0': check_greater_equal_zero,\n '<0': check_less_zero,\n '<=0': check_less_equal_zero,\n 'boolean': check_boolean,\n 'positive_integer': check_pos_int,\n 'number': check_number,\n '0<x<=1': check_greater_zero_less_equal_one,\n 'switch_type': check_switch_type\n }\n\n for key in important_values:\n if len(net[key]) > 0:\n for value in important_values[key]:\n for i, element in net[key].iterrows():\n check_result = type_checks[value[1]](element, i, value[0])\n if check_result is not None:\n if key not in check_results:\n check_results[key] = []\n # converts np.nan to str for easier usage of assert in pytest\n nan_check = pd.isnull(net[key][value[0]].at[i])\n if nan_check:\n check_results[key].append((i, value[0],\n str(net[key][value[0]].at[i]), value[1]))\n else:\n check_results[key].append((i, value[0],\n net[key][value[0]].at[i], value[1]))\n if check_results:\n return check_results", "def estimate_advantage(self, obs, q_values):\n\n # TODO: Estimate the advantage when nn_baseline is True\n # HINT1: pass obs into the neural network that you're using to learn the baseline\n # extra hint if you're stuck: see your actor's run_baseline_prediction\n # HINT2: advantage should be [Q-b]\n if self.nn_baseline:\n b_n_unnormalized = self.baseline_model(obs)\n b_n = b_n_unnormalized * np.std(q_values) + np.mean(q_values)\n adv_n = (q_values - tf.squeeze(b_n)).numpy()\n # Else, just set the advantage to [Q]\n else:\n adv_n = q_values.copy()\n\n # Normalize the resulting advantages\n if self.standardize_advantages:\n adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8)\n\n return adv_n.astype(np.float32)", "def cost_b_v(self):\n return self._cost_b_v" ]
[ "0.5776241", "0.5750878", "0.5623462", "0.54842955", "0.5386926", "0.52231693", "0.52097505", "0.5197751", "0.5113644", "0.5086685", "0.5073465", "0.50708735", "0.5056254", "0.5033763", "0.5021825", "0.5015476", "0.50106674", "0.5005514", "0.4995753", "0.49826834", "0.4966393", "0.49619266", "0.4957525", "0.49503857", "0.4940665", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49227518", "0.4919729", "0.4911313", "0.4905583", "0.4896395", "0.48959696", "0.4890918", "0.48896733", "0.48894134", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48852536", "0.48801818", "0.48775497", "0.48774955", "0.48774955", "0.48716152", "0.4870855", "0.48702246", "0.48555374", "0.4854864", "0.4831371", "0.48311788", "0.48306584", "0.48277628", "0.482472", "0.48242116", "0.4822445", "0.48213378", "0.48205054", "0.4819617", "0.48105913", "0.4803089", "0.47956207", "0.47956207", "0.47949058", "0.47926322", "0.47876275", "0.4785001", "0.47837597", "0.47819042", "0.47760442", "0.47744524", "0.47734678", "0.47693202", "0.47660184", "0.4765424", "0.47653174", "0.47619143", "0.47607484", "0.47589195", "0.4757779", "0.47575587", "0.4756708", "0.47563773", "0.4753214", "0.4749549", "0.47460026", "0.47404528", "0.47342733", "0.47323495", "0.47280627" ]
0.5109669
9